Jelajahi Sumber

HUE-8744 [task] Adding result storage cache service for query results

Romain 6 tahun lalu
induk
melakukan
967df969fc

+ 7 - 1
desktop/core/src/desktop/conf.py

@@ -1711,10 +1711,16 @@ TASK_SERVER = ConfigSection(
       type=coerce_positive_integer,
       help=_('Number of query results rows to fetch into the result storage.')
     ),
+    RESULT_CACHE = Config(
+      key='result_cache',
+      type=str,
+      help=_('Django file cache class to use to temporarily store query results'),
+      default='{"BACKEND": "django_redis.cache.RedisCache", "LOCATION": "redis://localhost:6379/0", "OPTIONS": {"CLIENT_CLASS": "django_redis.client.DefaultClient"},"KEY_PREFIX": "queries"}'
+    ),
     RESULT_STORAGE = Config(
       key='result_storage',
       type=str,
-      help=_('Django file storage class to use to temporarily store query results'),
+      help=_('Django file storage class to use to persist query results'),
       default='{"backend": "django.core.files.storage.FileSystemStorage", "properties": {"location": "./logs"}}'
     ),
     EXECUTION_STORAGE = Config(

+ 2 - 2
desktop/core/src/desktop/lib/export_csvxls.py

@@ -47,8 +47,8 @@ def file_reader(fh):
   while True:
     chunk = fh.read(DOWNLOAD_CHUNK_SIZE)
     if chunk == '':
-        fh.close()
-        break
+      fh.close()
+      break
     yield chunk
 
 def encode_row(row, encoding=None, make_excel_links=False):

+ 4 - 1
desktop/core/src/desktop/settings.py

@@ -382,7 +382,7 @@ CACHES = {
     'default': {
         'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', # TODO: Parameterize here for all the caches
         'LOCATION': 'unique-hue'
-    }
+    },
 }
 CACHES_HIVE_DISCOVERY_KEY = 'hive_discovery'
 CACHES[CACHES_HIVE_DISCOVERY_KEY] = {
@@ -391,8 +391,11 @@ CACHES[CACHES_HIVE_DISCOVERY_KEY] = {
 }
 
 CACHES_CELERY_KEY = 'celery'
+CACHES_CELERY_QUERY_RESULT_KEY = 'celery_query_results'
 if desktop.conf.TASK_SERVER.ENABLED.get():
   CACHES[CACHES_CELERY_KEY] = json.loads(desktop.conf.TASK_SERVER.EXECUTION_STORAGE.get())
+  if desktop.conf.TASK_SERVER.RESULT_CACHE.get():
+    CACHES[CACHES_CELERY_QUERY_RESULT_KEY] = json.loads(desktop.conf.TASK_SERVER.RESULT_CACHE.get())
 
 # Configure sessions
 SESSION_COOKIE_NAME = desktop.conf.SESSION.COOKIE_NAME.get()

+ 26 - 16
desktop/libs/notebook/src/notebook/tasks.py

@@ -39,7 +39,7 @@ from desktop.celery import app
 from desktop.conf import TASK_SERVER
 from desktop.lib import export_csvxls
 from desktop.lib import fsmanager
-from desktop.settings import CACHES_CELERY_KEY
+from desktop.settings import CACHES_CELERY_KEY, CACHES_CELERY_QUERY_RESULT_KEY
 
 from notebook.connectors.base import get_api, QueryExpired, ExecutionWrapper
 from notebook.sql_utils import get_current_statement
@@ -101,7 +101,7 @@ def download_to_file(notebook, snippet, file_format='csv', max_rows=-1, **kwargs
 
   with storage.open(_log_key(notebook), 'wb') as f_log:
     result_wrapper = ExecutionWrapper(api, notebook, snippet, ExecutionWrapperCallback(notebook['uuid'], meta, f_log))
-    content_generator = data_export.DataAdapter(result_wrapper, max_rows=max_rows, store_data_type_in_header=True) #TODO: Move FETCH_RESULT_LIMIT to front end
+    content_generator = data_export.DataAdapter(result_wrapper, max_rows=max_rows, store_data_type_in_header=True) # TODO: Move FETCH_RESULT_LIMIT to front end
     response = export_csvxls.create_generator(content_generator, file_format)
 
     with storage.open(_result_key(notebook), 'wb') as f:
@@ -111,6 +111,11 @@ def download_to_file(notebook, snippet, file_format='csv', max_rows=-1, **kwargs
         meta['truncated'] = content_generator.is_truncated
         download_to_file.update_state(task_id=notebook['uuid'], state='AVAILABLE', meta=meta)
 
+    if TASK_SERVER.RESULT_CACHE.get():
+      with storage.open(_result_key(notebook)) as f:
+        csv_reader = csv.reader(f, delimiter=','.encode('utf-8'))
+        caches[CACHES_CELERY_QUERY_RESULT_KEY].set(_result_key(notebook), [row for row in csv_reader], 60 * 5)
+
   return meta
 
 @app.task(ignore_result=True)
@@ -307,20 +312,25 @@ def fetch_result(notebook, snippet, rows, start_over, **kwargs):
     csv.field_size_limit(sys.maxsize)
     count = 0
     with storage.open(_result_key(notebook)) as f:
-      csv_reader = csv.reader(f, delimiter=','.encode('utf-8'))
-      first = next(csv_reader, None)
-      if first: # else no data to read
-        for col in first:
-          split = col.split('|')
-          split_type = split[1] if len(split) > 1 else 'STRING_TYPE'
-          cols.append({'name': split[0], 'type': split_type, 'comment': None})
-        for row in csv_reader:
-          count += 1
-          if count <= skip:
-            continue
-          data.append(row)
-          if count >= target:
-            break
+      if TASK_SERVER.RESULT_CACHE.get():
+        csv_reader = caches[CACHES_CELERY_QUERY_RESULT_KEY].get(_result_key(notebook)) # TODO check if expired
+        headers = csv_reader[0] # TODO check size
+        csv_reader = csv_reader[1:]
+      else:
+        csv_reader = csv.reader(f, delimiter=','.encode('utf-8'))
+        headers = next(csv_reader, [])
+
+      for col in headers:
+        split = col.split('|')
+        split_type = split[1] if len(split) > 1 else 'STRING_TYPE'
+        cols.append({'name': split[0], 'type': split_type, 'comment': None})
+      for row in csv_reader:
+        count += 1
+        if count <= skip: # TODO: seek(skip) or [skip:]
+          continue
+        data.append(row)
+        if count >= target:
+          break
 
     caches[CACHES_CELERY_KEY].set(_fetch_progress_key(notebook), count, timeout=None)