Browse Source

HUE-8747 [editor] Fix download arguments for task.

jdesjean 6 years ago
parent
commit
03af72ffc5

+ 14 - 20
apps/beeswax/src/beeswax/data_export.py

@@ -23,7 +23,7 @@ import types
 from django.utils.translation import ugettext as _
 
 from desktop.lib import export_csvxls
-from beeswax import common, conf
+from beeswax import common
 
 
 LOG = logging.getLogger(__name__)
@@ -32,8 +32,7 @@ LOG = logging.getLogger(__name__)
 FETCH_SIZE = 1000
 DOWNLOAD_COOKIE_AGE = 1800 # 30 minutes
 
-
-def download(handle, format, db, id=None, file_name='query_result', user_agent=None, callback=None, max_rows=None, store_data_type_in_header=False):
+def download(format, db, id=None, file_name='query_result', user_agent=None, max_rows=-1, max_bytes=-1, store_data_type_in_header=False, start_over=True):
   """
   download(query_model, format) -> HttpResponse
 
@@ -43,10 +42,7 @@ def download(handle, format, db, id=None, file_name='query_result', user_agent=N
     LOG.error('Unknown download format "%s"' % (format,))
     return
 
-  max_rows = max_rows if max_rows else conf.DOWNLOAD_ROW_LIMIT.get()
-  max_bytes = -1 if max_rows else conf.DOWNLOAD_BYTES_LIMIT.get()
-
-  content_generator = HS2DataAdapter(handle, db, max_rows=max_rows, start_over=True, max_bytes=max_bytes, callback=callback, store_data_type_in_header=store_data_type_in_header)
+  content_generator = DataAdapter(db, max_rows=max_rows, start_over=start_over, max_bytes=max_bytes, store_data_type_in_header=store_data_type_in_header)
   generator = export_csvxls.create_generator(content_generator, format)
 
   resp = export_csvxls.make_response(generator, format, file_name, user_agent=user_agent)
@@ -75,16 +71,15 @@ def upload(path, handle, user, db, fs, max_rows=-1, max_bytes=-1):
   else:
     fs.do_as_user(user.username, fs.create, path)
 
-  content_generator = HS2DataAdapter(handle, db, max_rows=max_rows, start_over=True, max_bytes=max_bytes)
+  content_generator = DataAdapter(handle, db, max_rows=max_rows, start_over=True, max_bytes=max_bytes)
   for header, data in content_generator:
     dataset = export_csvxls.dataset(None, data)
     fs.do_as_user(user.username, fs.append, path, dataset.csv)
 
 
-class HS2DataAdapter:
+class DataAdapter:
 
-  def __init__(self, handle, db, max_rows=-1, start_over=True, max_bytes=-1, callback=None, store_data_type_in_header=False):
-    self.handle = handle
+  def __init__(self, db, max_rows=-1, start_over=True, max_bytes=-1, store_data_type_in_header=False):
     self.db = db
     self.max_rows = max_rows
     self.max_bytes = max_bytes
@@ -92,7 +87,6 @@ class HS2DataAdapter:
     self.fetch_size = FETCH_SIZE
     self.limit_rows = max_rows > -1
     self.limit_bytes = max_bytes > -1
-    self.callback = callback
 
     self.first_fetched = True
     self.headers = None
@@ -135,15 +129,16 @@ class HS2DataAdapter:
     return size
 
   def next(self):
-    results = self.db.fetch(self.handle, start_over=self.start_over, rows=self.fetch_size)
+    results = self.db.fetch(start_over=self.start_over, rows=self.fetch_size)
 
     if self.first_fetched:
       self.first_fetched = False
       self.start_over = False
-      self.headers = results.cols()
-      self.num_cols = len(self.headers)
+      self.num_cols = len(results['meta'])
       if self.store_data_type_in_header:
-        self.headers = [column['name'] + '|' + column['type'] for column in results.full_cols()]
+        self.headers = [column['name'] + '|' + column['type'] for column in results['meta']]
+      else:
+        self.headers = [column['name'] for column in results['meta']]
       if self.limit_bytes:
         self.bytes_counter += max(self.num_cols - 1, 0)
         for header in self.headers:
@@ -155,10 +150,10 @@ class HS2DataAdapter:
         self.fetch_size = 100
 
     if self.has_more and not self.is_truncated:
-      self.has_more = results.has_more
+      self.has_more = results['has_more']
       data = []
 
-      for row in results.rows():
+      for row in results['data']:
         self.row_counter += 1
         if self.limit_bytes:
           self.bytes_counter += self._getsizeofascii(row)
@@ -175,6 +170,5 @@ class HS2DataAdapter:
 
       return self.headers, data
     else:
-      if self.callback:
-        self.callback()
+      self.db.close()
       raise StopIteration

+ 2 - 12
apps/filebrowser/src/filebrowser/views.py

@@ -52,6 +52,7 @@ from desktop.lib import i18n
 from desktop.lib.conf import coerce_bool
 from desktop.lib.django_util import render, format_preserving_redirect
 from desktop.lib.django_util import JsonResponse
+from desktop.lib.export_csvxls import file_reader
 from desktop.lib.exceptions_renderable import PopupException
 from desktop.lib.fs import splitpath
 from desktop.lib.i18n import smart_str
@@ -77,7 +78,6 @@ from desktop.auth.backend import is_admin
 
 DEFAULT_CHUNK_SIZE_BYTES = 1024 * 4 # 4KB
 MAX_CHUNK_SIZE_BYTES = 1024 * 1024 # 1MB
-DOWNLOAD_CHUNK_SIZE = 1 * 1024 * 1024 # 1MB
 
 # Defaults for "xxd"-style output.
 # Sentences refer to groups of bytes printed together, within a line.
@@ -118,16 +118,6 @@ def index(request):
   return view(request, path)
 
 
-def _file_reader(fh):
-    """Generator that reads a file, chunk-by-chunk."""
-    while True:
-        chunk = fh.read(DOWNLOAD_CHUNK_SIZE)
-        if chunk == '':
-            fh.close()
-            break
-        yield chunk
-
-
 def download(request, path):
     """
     Downloads a file.
@@ -165,7 +155,7 @@ def download(request, path):
       response = HttpResponseRedirect(fh.read_url())
       setattr(response, 'redirect_override', True)
     else:
-      response = StreamingHttpResponse(_file_reader(fh), content_type=content_type)
+      response = StreamingHttpResponse(file_reader(fh), content_type=content_type)
       response["Last-Modified"] = http_date(stats['mtime'])
       response["Content-Length"] = stats['size']
       response['Content-Disposition'] = request.GET.get('disposition', 'attachment') if _can_inline_display(path) else 'attachment'

+ 10 - 1
desktop/core/src/desktop/lib/export_csvxls.py

@@ -35,12 +35,21 @@ from desktop.lib import i18n
 
 LOG = logging.getLogger(__name__)
 
+DOWNLOAD_CHUNK_SIZE = 1 * 1024 * 1024 # 1MB
 ILLEGAL_CHARS = r'[\000-\010]|[\013-\014]|[\016-\037]'
 FORMAT_TO_CONTENT_TYPE = {'csv': 'application/csv', 'xls': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', 'json': 'application/json'}
 
 def nullify(cell):
   return cell if cell is not None else "NULL"
 
+def file_reader(fh):
+  """Generator that reads a file, chunk-by-chunk."""
+  while True:
+    chunk = fh.read(DOWNLOAD_CHUNK_SIZE)
+    if chunk == '':
+        fh.close()
+        break
+    yield chunk
 
 def encode_row(row, encoding=None, make_excel_links=False):
   encoded_row = []
@@ -113,7 +122,7 @@ def create_generator(content_generator, format, encoding=None):
     raise Exception("Unknown format: %s" % format)
 
 
-def make_response(generator, format, name, encoding=None, user_agent=None):
+def make_response(generator, format, name, encoding=None, user_agent=None): #TODO: Add support for 3rd party (e.g. nginx file serving)
   """
   @param data An iterator of rows, where every row is a list of strings
   @param format Either "csv" or "xls"

+ 66 - 2
desktop/libs/notebook/src/notebook/connectors/base.py

@@ -23,6 +23,7 @@ import uuid
 from django.utils.translation import ugettext as _
 
 from desktop.conf import has_multi_cluster
+from desktop.lib import export_csvxls
 from desktop.lib.exceptions_renderable import PopupException
 from desktop.lib.i18n import smart_unicode
 
@@ -431,11 +432,23 @@ class Api(object):
   def fetch_result(self, notebook, snippet, rows, start_over):
     pass
 
+  def can_start_over(self, notebook, snippet):
+    return False
+
   def fetch_result_size(self, notebook, snippet):
     raise OperationNotSupported()
 
-  def download(self, notebook, snippet, format, user_agent=None, max_rows=None, store_data_type_in_header=False):
-    pass
+  def download(self, notebook, snippet, file_format='csv'):
+    from beeswax import data_export #TODO: Move to notebook?
+    from beeswax import conf
+
+    result_wrapper = ResultWrapper(self, notebook, snippet)
+
+    max_rows = conf.DOWNLOAD_ROW_LIMIT.get()
+    max_bytes = conf.DOWNLOAD_BYTES_LIMIT.get()
+
+    content_generator = data_export.DataAdapter(result_wrapper, max_rows=max_rows, max_bytes=max_bytes)
+    return export_csvxls.create_generator(content_generator, file_format)
 
   def get_log(self, notebook, snippet, startFrom=None, size=None):
     return 'No logs'
@@ -463,6 +476,57 @@ class Api(object):
 
   def statement_similarity(self, notebook, snippet, source_platform, target_platform): raise NotImplementedError()
 
+class ResultWrapper():
+  def __init__(self, api, notebook, snippet, callback=None):
+    self.api = api
+    self.notebook = notebook
+    self.snippet = snippet
+    self.callback = callback
+    self.should_close = False
+
+  def fetch(self, start_over=None, rows=None):
+    if start_over:
+      if not self.snippet['result']['handle'] or not self.api.can_start_over(self.notebook, self.snippet):
+        start_over = False
+        handle = self.api.execute(self.notebook, self.snippet)
+        self.snippet['result']['handle'] = handle
+        if self.callback and hasattr(self.callback, 'on_execute'):
+          self.callback.on_execute(handle)
+        self.should_close = True
+        self._until_available()
+    if self.snippet['result']['handle'].get('sync', False):
+      return self.snippet['result']['handle']['result']
+    else:
+      result = self.api.fetch_result(self.notebook, self.snippet, rows, start_over)
+    return result
+
+  def _until_available(self):
+    if self.snippet['result']['handle'].get('sync', False):
+      return # Request is already completed
+    count = 0
+    sleep_seconds = 1
+    check_status_count = 0
+    while True:
+      response = self.api.check_status(self.notebook, self.snippet)
+      if self.callback and hasattr(self.callback, 'on_status'):
+        self.callback.on_status(response['status'])
+      if self.callback and hasattr(self.callback, 'on_log'):
+        log = self.api.get_log(self.notebook, self.snippet, startFrom=count)
+        self.callback.on_log(log)
+
+      if response['status'] not in ['waiting', 'running', 'submitted']:
+        break
+      check_status_count += 1
+      if check_status_count > 5:
+        sleep_seconds = 5
+      elif check_status_count > 10:
+        sleep_seconds = 10
+      time.sleep(sleep_seconds)
+
+  def close(self):
+    if self.should_close:
+      self.should_close = False
+      self.api.close_statement(self.notebook, self.snippet)
 
 def _get_snippet_name(notebook, unique=False, table_format=False):
   name = (('%(name)s' + ('-%(id)s' if unique else '') if notebook.get('name') else '%(type)s-%(id)s') % notebook)

+ 5 - 20
desktop/libs/notebook/src/notebook/connectors/hiveserver2.py

@@ -381,31 +381,16 @@ class HS2Api(Api):
       return {'status': -1}  # skipped
 
 
-  @query_error_handler
-  def download(self, notebook, snippet, format, user_agent=None, max_rows=None, store_data_type_in_header=False):
+  def can_start_over(self, notebook, snippet):
     try:
       db = self._get_db(snippet, cluster=self.cluster)
       handle = self._get_handle(snippet)
       # Test handle to verify if still valid
       db.fetch(handle, start_over=True, rows=1)
-
-      file_name = _get_snippet_name(notebook)
-
-      return data_export.download(handle, format, db, id=snippet['id'], file_name=file_name, user_agent=user_agent, max_rows=max_rows, store_data_type_in_header=store_data_type_in_header)
-    except Exception, e:
-      title = 'The query result cannot be downloaded.'
-      LOG.exception(title)
-
-      if hasattr(e, 'message') and e.message:
-        if 'generic failure: Unable to find a callback: 32775' in e.message:
-          message = e.message + " " + _("Increase the sasl_max_buffer value in hue.ini")
-        elif 'query result cache exceeded its limit' in e.message:
-          message = e.message.replace("Restarting the fetch is not possible.", _("Please execute the query again."))
-        else:
-          message = e.message
-      else:
-        message = e
-      raise PopupException(_(title), detail=message)
+      can_start_over = True
+    except Exception as e:
+      raise e
+    return can_start_over
 
 
   @query_error_handler

+ 0 - 6
desktop/libs/notebook/src/notebook/connectors/jdbc.py

@@ -124,12 +124,6 @@ class JdbcApi(Api):
   def cancel(self, notebook, snippet):
     return {'status': 0}
 
-  def download(self, notebook, snippet, format, user_agent=None, max_rows=None, store_data_type_in_header=False):
-    file_name = _get_snippet_name(notebook)
-    data, description = query_and_fetch(self.db, snippet['statement'])
-    db = FixedResult(data, description)
-    return data_export.download(None, format, db, id=snippet['id'], file_name=file_name)
-
   def progress(self, snippet, logs):
     return 50
 

+ 0 - 10
desktop/libs/notebook/src/notebook/connectors/rdbms.py

@@ -106,16 +106,6 @@ class RdbmsApi(Api):
     return 'No logs'
 
 
-  @query_error_handler
-  def download(self, notebook, snippet, format, user_agent=None, max_rows=None, store_data_type_in_header=False):
-
-    file_name = _get_snippet_name(notebook)
-    results = self._execute(notebook, snippet)
-    db = FixedResult(results)
-
-    return data_export.download(None, format, db, id=snippet['id'], file_name=file_name, user_agent=user_agent)
-
-
   @query_error_handler
   def close_statement(self, notebook, snippet):
     return {'status': -1}

+ 0 - 5
desktop/libs/notebook/src/notebook/connectors/solr.py

@@ -128,11 +128,6 @@ class SolrApi(Api):
   def get_log(self, notebook, snippet, startFrom=None, size=None):
     return 'No logs'
 
-
-  def download(self, notebook, snippet, format, user_agent=None, max_rows=None, store_data_type_in_header=False):
-    raise PopupException('Downloading is not supported yet')
-
-
   @query_error_handler
   def close_statement(self, notebook, snippet):
     return {'status': -1}

+ 1 - 10
desktop/libs/notebook/src/notebook/connectors/spark_shell.py

@@ -241,6 +241,7 @@ class SparkApi(Api):
       return {
           'id': response['id'],
           'has_result_set': True,
+          'sync': False
       }
     except Exception, e:
       message = force_unicode(str(e)).lower()
@@ -326,16 +327,6 @@ class SparkApi(Api):
 
       raise QueryError(msg)
 
-  def download(self, notebook, snippet, format, user_agent=None, max_rows=None, store_data_type_in_header=False):
-    try:
-      api = get_spark_api(self.user)
-      session = _get_snippet_session(notebook, snippet)
-      cell = snippet['result']['handle']['id']
-
-      return spark_download(api, session['id'], cell, format, user_agent=None)
-    except Exception, e:
-      raise PopupException(e)
-
   def cancel(self, notebook, snippet):
     api = get_spark_api(self.user)
     session = _get_snippet_session(notebook, snippet)

+ 1 - 49
desktop/libs/notebook/src/notebook/connectors/sqlalchemyapi.py

@@ -209,30 +209,6 @@ class SqlAlchemyApi(Api):
     return ''
 
 
-  @query_error_handler
-  def download(self, notebook, snippet, format, user_agent=None, max_rows=None, store_data_type_in_header=False):
-    file_name = _get_snippet_name(notebook)
-    guid = uuid.uuid4().hex
-
-    engine = self._create_engine()
-    connection = engine.connect()
-    result = connection.execution_options(stream_results=True).execute(snippet['statement'])
-
-    CONNECTION_CACHE[guid] = {
-      'connection': connection,
-      'result': result
-    }
-    db = FixedResult([col[0] if type(col) is dict or type(col) is tuple else col for col in result.cursor.description])
-
-    def callback():
-      connection = CONNECTION_CACHE.get(guid)
-      if connection:
-        connection['connection'].close()
-        del CONNECTION_CACHE[guid]
-
-    return data_export.download({'guid': guid}, format, db, id=snippet['id'], file_name=file_name, callback=callback)
-
-
   @query_error_handler
   def close_statement(self, notebook, snippet):
     result = {'status': -1}
@@ -344,28 +320,4 @@ class Assist():
       result = connection.execution_options(stream_results=True).execute(statement)
       return result.cursor.description, result.fetchall()
     finally:
-      connection.close()
-
-class FixedResultSet():
-  def __init__(self, metadata, data, has_more):
-    self.metadata = metadata
-    self.data = data
-    self.has_more = has_more
-
-  def cols(self):
-    return self.metadata
-
-  def rows(self):
-    return self.data if self.data is not None else []
-
-class FixedResult():
-  def __init__(self, metadata):
-    self.metadata = metadata
-
-  def fetch(self, handle=None, start_over=None, rows=None):
-    connection = CONNECTION_CACHE.get(handle['guid'])
-    if connection:
-      data = connection['result'].fetchmany(rows)
-      return FixedResultSet(self.metadata, data, data is not None and len(data) >= rows)
-    else:
-      return FixedResultSet([], [])
+      connection.close()

+ 39 - 62
desktop/libs/notebook/src/notebook/tasks.py

@@ -33,13 +33,12 @@ from django.http import FileResponse, HttpRequest
 from desktop.auth.backend import rewrite_user
 from desktop.celery import app
 from desktop.conf import TASK_SERVER
-from desktop.lib.export_csvxls import FORMAT_TO_CONTENT_TYPE
+from desktop.lib import export_csvxls
 
-from notebook.connectors.base import get_api, QueryExpired
+from notebook.connectors.base import get_api, QueryExpired, ResultWrapper
 
 LOG_TASK = get_task_logger(__name__)
 LOG = logging.getLogger(__name__)
-DOWNLOAD_COOKIE_AGE = 3600
 STATE_MAP = {
   'SUBMITTED': 'waiting',
   states.RECEIVED: 'waiting',
@@ -55,41 +54,56 @@ STATE_MAP = {
   states.IGNORED: 'ignored'
 }
 
+class ResultWrapperCallback(object):
+  def __init__(self, uuid, meta, log_file_handle):
+    self.meta = meta
+    self.uuid = uuid
+    self.log_file_handle = log_file_handle
+
+  def on_execute(self, handle):
+    if handle.get('sync', False) and handle['result'].get('data'):
+      handle_without_data = handle.copy()
+      handle_without_data['result'] = {}
+      for key in filter(lambda x: x != 'data', list(handle['result'].keys())):
+        handle_without_data['result'][key] = handle['result'][key]
+    else:
+      handle_without_data = handle
+    self.meta['handle'] = handle_without_data
+
+  def on_log(self, log):
+    os.write(self.log_file_handle, log)
+
+  def on_status(self, status):
+    self.meta['status'] = status
+    download_to_file.update_state(task_id=self.uuid, state='PROGRESS', meta=self.meta)
+
 #TODO: Add periodic cleanup task
 #TODO: move file paths to a file like API so we can change implementation
 #TODO: UI should be able to close a query that is available, but not expired
 @app.task()
-def download_to_file(notebook, snippet, file_format='csv', user_agent=None, postdict=None, user_id=None, create=False, store_data_type_in_header=False):
+def download_to_file(notebook, snippet, file_format='csv', postdict=None, user_id=None, max_rows=-1):
+  from beeswax import data_export
   download_to_file.update_state(task_id=notebook['uuid'], state='STARTED', meta={})
   request = _get_request(postdict, user_id)
   api = get_api(request, snippet)
-  if create:
-    handle = api.execute(notebook, snippet)
-  else:
-    handle = snippet['result']['handle']
 
   f, path = tempfile.mkstemp()
   f_log, path_log = tempfile.mkstemp()
   f_progress, path_progress = tempfile.mkstemp()
   try:
     os.write(f_progress, '0')
-    meta = {'row_counter': 0, 'file_path': path, 'handle': handle, 'log_path': path_log, 'progress_path': path_progress, 'status': 'running', 'truncated': False} #TODO: Truncated
-    download_to_file.update_state(task_id=notebook['uuid'], state='PROGRESS', meta=meta)
-    _until_available(notebook, snippet, api, f_log, handle, meta)
 
-    snippet['result']['handle'] = handle.copy()
-    #TODO: Move PREFETCH_RESULT_COUNT to front end
-    response = api.download(notebook, snippet, file_format, user_agent=user_agent, max_rows=TASK_SERVER.PREFETCH_RESULT_COUNT.get(), store_data_type_in_header=store_data_type_in_header)
+    meta = {'row_counter': 0, 'file_path': path, 'handle': {}, 'log_path': path_log, 'progress_path': path_progress, 'status': 'running', 'truncated': False} #TODO: Truncated
+
+    result_wrapper = ResultWrapper(api, notebook, snippet, ResultWrapperCallback(notebook['uuid'], meta, f_log))
+    content_generator = data_export.DataAdapter(result_wrapper, max_rows=max_rows, store_data_type_in_header=True) #TODO: Move PREFETCH_RESULT_COUNT to front end
+    response = export_csvxls.create_generator(content_generator, file_format)
 
-    row_count = 0
     for chunk in response:
       os.write(f, chunk)
-      row_count += chunk.count('\n')
-      meta['row_counter'] = row_count - 1
+      meta['row_counter'] = content_generator.row_counter
       download_to_file.update_state(task_id=notebook['uuid'], state='AVAILABLE', meta=meta)
 
-    snippet['result']['handle'] = handle.copy()
-    api.close_statement(notebook, snippet)
   finally:
     os.close(f)
     os.close(f_log)
@@ -106,29 +120,6 @@ def close_statement_async(notebook, snippet, postdict=None, user_id=None):
   request = _get_request(postdict, user_id)
   get_api(request, snippet).close_statement(notebook, snippet)
 
-def _until_available(notebook, snippet, api, f, handle, meta):
-  count = 0
-  sleep_seconds = 1
-  check_status_count = 0
-  while True:
-    snippet['result']['handle'] = handle.copy()
-    response = api.check_status(notebook, snippet)
-    meta['status'] = response['status']
-    download_to_file.update_state(task_id=notebook['uuid'], state='PROGRESS', meta=meta)
-    snippet['result']['handle'] = handle.copy()
-    log = api.get_log(notebook, snippet, startFrom=count)
-    os.write(f, log)
-    count += log.count('\n')
-
-    if response['status'] not in ['waiting', 'running', 'submitted']:
-      break
-    check_status_count += 1
-    if check_status_count > 5:
-      sleep_seconds = 5
-    elif check_status_count > 10:
-      sleep_seconds = 10
-    time.sleep(sleep_seconds)
-
 #TODO: Convert csv to excel if needed
 def download(*args, **kwargs):
   result = download_to_file.AsyncResult(args[0]['uuid'])
@@ -138,18 +129,9 @@ def download(*args, **kwargs):
   elif state in states.EXCEPTION_STATES:
     result.maybe_reraise()
 
-  info = result.wait()
-  response = FileResponse(open(info['file_path'], 'rb'), content_type=FORMAT_TO_CONTENT_TYPE.get('csv', 'application/octet-stream'))
-  response['Content-Disposition'] = 'attachment; filename="%s.%s"' % (args[0]['uuid'], 'csv') #TODO: Add support for 3rd party (e.g. nginx file serving)
-  response.set_cookie(
-      'download-%s' % args[1]['id'],
-      json.dumps({
-        'truncated': info.get('truncated', False),
-        'row_counter': info.get('row_counter', 0)
-      }),
-      max_age=DOWNLOAD_COOKIE_AGE
-    )
-  return response
+  info = result.wait() # TODO: Start returning data even if we're not done
+
+  return export_csvxls.file_reader(open(info['file_path'], 'rb'))
 
 # Why we need this:
 # 1) There is no way in celery to differentiate between a task that was submitted, but not yet started and a task that has been GCed.
@@ -160,8 +142,7 @@ def _patch_status(notebook):
 
 def execute(*args, **kwargs):
   notebook = args[0]
-  kwargs['create'] = True
-  kwargs['store_data_type_in_header'] = True
+  kwargs['max_rows'] = TASK_SERVER.PREFETCH_RESULT_COUNT.get()
   _patch_status(notebook)
   download_to_file.apply_async(args=args, kwargs=kwargs, task_id=notebook['uuid'])
   return {'sync': False,
@@ -184,12 +165,7 @@ def check_status(*args, **kwargs):
   elif state in states.EXCEPTION_STATES:
     result.maybe_reraise()
 
-  info = result.info
-  if not info or not info.get('status'):
-    status = STATE_MAP[state]
-  else:
-    status = info.get('status')
-  return {'status': status}
+  return {'status': STATE_MAP[state]}
 
 def get_log(notebook, snippet, startFrom=None, size=None, postdict=None, user_id=None):
   result = download_to_file.AsyncResult(notebook['uuid'])
@@ -357,6 +333,7 @@ def _close_statement_async_id(notebook):
 def _get_request(postdict=None, user_id=None):
   request = HttpRequest()
   request.POST = postdict
+  LOG.info('fetching user with id ' + user_id)
   user = User.objects.get(id=user_id)
   user = rewrite_user(user)
   request.user = user

+ 18 - 3
desktop/libs/notebook/src/notebook/views.py

@@ -18,6 +18,8 @@
 import json
 import logging
 
+from beeswax.data_export import DOWNLOAD_COOKIE_AGE
+
 from django.urls import reverse
 from django.db.models import Q
 from django.shortcuts import redirect
@@ -25,6 +27,7 @@ from django.utils.translation import ugettext as _
 from django.views.decorators.clickjacking import xframe_options_exempt
 
 from desktop.conf import ENABLE_DOWNLOAD, USE_NEW_EDITOR, TASK_SERVER
+from desktop.lib import export_csvxls
 from desktop.lib.django_util import render, JsonResponse
 from desktop.lib.exceptions_renderable import PopupException
 from desktop.lib.json_utils import JSONEncoderForHTML
@@ -337,9 +340,21 @@ def download(request):
   notebook = json.loads(request.POST.get('notebook', '{}'))
   snippet = json.loads(request.POST.get('snippet', '{}'))
   file_format = request.POST.get('format', 'csv')
-
-  response = get_api(request, snippet).download(notebook, snippet, format=file_format, user_agent=request.META.get('HTTP_USER_AGENT'))
-
+  user_agent = request.META.get('HTTP_USER_AGENT')
+  file_name = _get_snippet_name(notebook)
+
+  content_generator = get_api(request, snippet).download(notebook, snippet, file_format=file_format)
+  response = export_csvxls.make_response(content_generator, file_format, file_name, user_agent=user_agent)
+
+  if snippet['id']:
+    response.set_cookie(
+      'download-%s' % snippet['id'],
+      json.dumps({
+        'truncated': 'false',
+        'row_counter': '0'
+      }),
+      max_age=DOWNLOAD_COOKIE_AGE
+    )
   if response:
     request.audit = {
       'operation': 'DOWNLOAD',