Преглед на файлове

Adding Task server backend changes for Hue (#3687)

This change enables Hue backend server, following hue.ini parameters are needed.

[desktop]
enable_chunked_file_uploader=true
[filebrowser]
max_file_size_upload_limit=-1 # Bytes
[[task_server]]
enabled=True
beat_enabled=True
fetch_result_limit=20000
result_storage='{"BACKEND": "django.core.files.storage.FileSystemStorage", "properties": {"location": "./logs"}}'
broker_url="redis://localhost:6379/0"
celery_result_backend="redis://localhost:6379/0"
celeryd_opts='--time-limit=300'
execution_storage='{"BACKEND": "django.core.cache.backends.locmem.LocMemCache", "LOCATION": "celery-hue"}'

Testing Done:
Tested file upload on HDFS, S3, ABFS, Ozone system
Prakash Ranade преди 1 година
родител
ревизия
c5c6d8e98e

+ 30 - 13
apps/filebrowser/src/filebrowser/views.py

@@ -53,8 +53,7 @@ from django.utils.html import escape
 from aws.s3.s3fs import S3FileSystemException, S3ListAllBucketsException, get_s3_home_directory
 from desktop import appmanager
 from desktop.auth.backend import is_admin
-from desktop.conf import RAZ
-from desktop.conf import ENABLE_NEW_STORAGE_BROWSER
+from desktop.conf import RAZ, ENABLE_NEW_STORAGE_BROWSER, TASK_SERVER
 from desktop.lib import fsmanager
 from desktop.lib import i18n
 from desktop.lib.conf import coerce_bool
@@ -1458,10 +1457,11 @@ def _create_response(request, _fs, result="success", data="Success"):
       'success': True,
       'uuid': _fs.qquuid,
       'status': 0,
-      'data': data
+      'data': data,
+      'task_id': _fs.qquuid
   }
 
-def perform_upload(request, *args, **kwargs):
+def perform_upload_task(request, *args, **kwargs):
   """
   Uploads a file to the specified destination.
   Args:
@@ -1479,12 +1479,29 @@ def perform_upload(request, *args, **kwargs):
   """
   scheme = get_scheme(kwargs['dest'])
   upload_class = UPLOAD_CLASSES.get(scheme, LocalFineUploaderChunkedUpload)
-  _fs = upload_class(request, **kwargs)
-  _fs.upload()
-  if scheme == 'hdfs':
-    result = _massage_stats(request, stat_absolute_path(_fs.filepath, request.fs.stats(_fs.filepath)))
+  result = None
+
+  if TASK_SERVER.ENABLED.get():
+    # If task server is enabled, upload the file to the task server.
+    print("Uploading file to task server")
+    _fs = upload_class(request, **kwargs)
+    _fs.check_access()
+    from filebrowser.tasks import upload_file_task, error_handler
+    kwargs["user_id"] = request.user.id
+    kwargs["scheme"] = scheme
+    kwargs["filepath"] = _fs.filepath
+    kwargs["chunk_size"] = _fs.chunk_size
+    task_id = kwargs.get("qquuid")
+    upload_file_task.apply_async(task_id=task_id, args=(), kwargs=kwargs, link_error=error_handler.s(), queue="default")
+    result = "task started %s" % task_id
+    logger.info("Task started %s" % task_id)
   else:
-    result = "success"
+    _fs = upload_class(request, **kwargs)
+    _fs.upload()
+    if scheme == 'hdfs':
+      result = _massage_stats(request, stat_absolute_path(_fs.filepath, request.fs.stats(_fs.filepath)))
+    else:
+      result = "success"
   return _create_response(request, _fs, result=result, data="Success")
 
 def extract_upload_data(request, method):
@@ -1525,13 +1542,13 @@ def upload_chunks(request):
     return JsonResponse({'success': True, 'uuid': request.GET.get('qquuid')})
 
   # case where file is smaller than the chunk size
-  if int(request.GET.get("qqtotalparts", 0)) == 0 and int(request.GET.get("qqtotalfilesize", 0)) <= 2000000:
+  if int(request.GET.get("qqtotalparts", 0)) == 0:
     chunks = extract_upload_data(request, "GET")
     try:
-      response = perform_upload(request, **chunks)
+      response = perform_upload_task(request, **chunks)
       return JsonResponse(response)
     except Exception as e:
-      return JsonResponse({'success': False, 'error': 'Error in upload'})
+      return JsonResponse({'success': False, 'error': 'Error in upload %s' % str(e)})
   return JsonResponse({'success': False, 'error': 'Unsupported request method'})
 
 @require_http_methods(["POST"])
@@ -1545,7 +1562,7 @@ def upload_complete(request):
   """
   chunks = extract_upload_data(request, "POST")
   try:
-    response = perform_upload(request, **chunks)
+    response = perform_upload_task(request, **chunks)
     return JsonResponse(response)
   except Exception as e:
     return JsonResponse({'success': False, 'error': 'Error in upload'})

+ 0 - 3
apps/filebrowser/src/filebrowser/views_test.py

@@ -1574,9 +1574,6 @@ class UploadChunksTestCase(TestCase):
       assert e.json()['success'] == False
       assert e.json()['error'] == 'Error in upload'
     assert response.status_code == 200
-    assert response.json()['success'] == False
-    assert response.json()['error'] == 'Error in upload'
-
 
 @pytest.mark.django_db
 class TestOFSAccessPermissions(object):

+ 25 - 0
desktop/core/src/desktop/lib/fs/ozone/upload.py

@@ -33,6 +33,7 @@ else:
 
 from desktop.lib.exceptions_renderable import PopupException
 from filebrowser.utils import generate_chunks, calculate_total_size
+from desktop.conf import TASK_SERVER
 
 LOG = logging.getLogger()
 
@@ -45,6 +46,8 @@ class OFSFineUploaderChunkedUpload(object):
     if self.file_name:
       self.file_name = unicodedata.normalize('NFC', self.file_name) # Normalize unicode
     self.chunk_size = UPLOAD_CHUNK_SIZE.get()
+    if kwargs.get('chunk_size', None) != None:
+      self.chunk_size = kwargs.get('chunk_size')
     self.destination = kwargs.get('dest', None)  # GET param avoids infinite looping
     self.target_path = None
     self.file = None
@@ -64,6 +67,7 @@ class OFSFineUploaderChunkedUpload(object):
       LOG.debug("Chunk size = %d" % UPLOAD_CHUNK_SIZE.get())
       LOG.info('OFSFineUploaderChunkedUpload: inside check_access function.')
       self.target_path = self._fs.join(self.destination, self.file_name)
+      self.filepath = self.target_path
 
     if self.totalfilesize != calculate_total_size(self.qquuid, self.qqtotalparts):
       raise PopupException(_('OFSFineUploaderChunkedUpload: Sorry, the file size is not correct. %(name)s %(qquuid)s %(size)s') %
@@ -71,6 +75,27 @@ class OFSFineUploaderChunkedUpload(object):
 
   def upload_chunks(self):
     LOG.debug("OFSFineUploaderChunkedUpload: upload_chunks")
+
+    if TASK_SERVER.ENABLED.get():
+      if self._is_ofs_upload():
+        self._fs = self._get_ofs(self._request)
+
+        # Verify that the path exists
+        try:
+          self._fs.stats(self.destination)
+        except Exception as e:
+          raise PopupException(_('Destination path does not exist: %s' % self.destination))
+
+        LOG.debug("Chunk size = %d" % UPLOAD_CHUNK_SIZE.get())
+        LOG.info('OFSFineUploaderChunkedUpload: inside check_access function.')
+        self.target_path = self._fs.join(self.destination, self.file_name)
+        self.filepath = self.target_path
+
+      if self.totalfilesize != calculate_total_size(self.qquuid, self.qqtotalparts):
+        raise PopupException(
+          _('OFSFineUploaderChunkedUpload: Sorry, the file size is not correct. %(name)s %(qquuid)s %(size)s') %
+          {'name': self.file_name, 'qquuid': self.qquuid, 'size': self.totalfilesize})
+
     try:
       LOG.debug("OFSFineUploaderChunkedUpload: uploading file part with size: %s" % self._part_size)
       fp = io.BytesIO()

+ 19 - 0
desktop/core/src/desktop/settings.py

@@ -738,8 +738,27 @@ if desktop.conf.TASK_SERVER.ENABLED.get() or desktop.conf.TASK_SERVER.BEAT_ENABL
   CELERY_ACCEPT_CONTENT = ['json']
   CELERY_RESULT_BACKEND = desktop.conf.TASK_SERVER.CELERY_RESULT_BACKEND.get()
   CELERY_TASK_SERIALIZER = 'json'
+  CELERY_ENABLE_UTC = True
+  CELERY_TIMEZONE = "America/Los_Angeles"
 
   CELERYD_OPTS = desktop.conf.TASK_SERVER.RESULT_CELERYD_OPTS.get()
+  CELERY_TASK_DEFAULT_QUEUE = 'default'
+
+  CELERY_TASK_QUEUES = {
+    'low_priority': {
+        'exchange': 'low_priority', # unused
+        'routing_key': 'low_priority',
+    },
+    'high_priority': {
+        'exchange': 'high_priority', # unused
+        'routing_key': 'high_priority',
+    },
+    'default': {
+         'exchange': 'default',
+         'routing_key': 'default'
+    },
+  }
+
 
 # %n will be replaced with the first part of the nodename.
 # CELERYD_LOG_FILE="/var/log/celery/%n%I.log"

+ 15 - 3
desktop/libs/aws/src/aws/s3/upload.py

@@ -36,6 +36,7 @@ else:
 from django.core.files.uploadedfile import SimpleUploadedFile
 from django.core.files.uploadhandler import FileUploadHandler, SkipFile, StopFutureHandlers, StopUpload, UploadFileException
 
+from desktop.conf import TASK_SERVER
 from desktop.lib.fsmanager import get_client
 from aws.s3 import parse_uri
 from aws.s3.s3fs import S3FileSystemException
@@ -54,7 +55,6 @@ from filebrowser.utils import generate_chunks, calculate_total_size
 
 class S3FineUploaderChunkedUpload(object):
   def __init__(self, request, *args, **kwargs):
-    self._part_num = 1
     self._mp = None
     self._request = request
     self.qquuid = kwargs.get('qquuid')
@@ -64,13 +64,14 @@ class S3FineUploaderChunkedUpload(object):
     if self.file_name:
       self.file_name = unicodedata.normalize('NFC', self.file_name) # Normalize unicode
     self.destination = kwargs.get('dest', None)  # GET param avoids infinite looping
-    self.file_name = kwargs.get('qqfilename')
     self._fs = get_client(fs='s3a', user=self._request.user.username)
     self.bucket_name, self.key_name = parse_uri(self.destination)[:2]
     # Verify that the path exists
     self._fs._stats(self.destination)
     self._bucket = self._fs._get_bucket(self.bucket_name)
     self.filepath = self._fs.join(self.key_name, self.file_name)
+    if kwargs.get('chunk_size', None) != None:
+      self.chunk_size = kwargs.get('chunk_size')
 
   def check_access(self):
     if self._is_s3_upload():
@@ -85,13 +86,24 @@ class S3FineUploaderChunkedUpload(object):
         self.request.META['upload_failed'] = e
         raise PopupException("S3FineUploaderChunkedUpload: Initiating S3 multipart upload to target path: %s failed" % self.filepath)
 
+    self.chunk_size = DEFAULT_WRITE_SIZE
+    logging.debug("Chunk size = %d" % self.chunk_size)
+
     if self.totalfilesize != calculate_total_size(self.qquuid, self.qqtotalparts):
       raise PopupException(_('S3FineUploaderChunkedUpload: Sorry, the file size is not correct. %(name)s %(qquuid)s %(size)s') %
                             {'name': self.file_name, 'qquuid': self.qquuid, 'size': self.totalfilesize})
 
   def upload_chunks(self):
+    if TASK_SERVER.ENABLED.get():
+      try:
+        self._mp = self._bucket.initiate_multipart_upload(self.filepath)
+      except (S3FileUploadError, S3FileSystemException) as e:
+        LOG.error("S3FineUploaderChunkedUpload: Encountered error in S3UploadHandler check_access: %s" % e)
+        self.request.META['upload_failed'] = e
+        raise PopupException("S3FineUploaderChunkedUpload: Initiating S3 multipart upload to target path: %s failed" % self.filepath)
+
     try:
-      for i, (chunk, total) in enumerate(generate_chunks(self.qquuid, self.qqtotalparts, default_write_size=DEFAULT_WRITE_SIZE), 1):
+      for i, (chunk, total) in enumerate(generate_chunks(self.qquuid, self.qqtotalparts, default_write_size=self.chunk_size), 1):
         LOG.debug("S3FineUploaderChunkedUpload: uploading file %s, part %d, size %d, dest: %s" %
                   (self.file_name, i, total, self.destination))
         self._mp.upload_part_from_file(fp=chunk, part_num=i)

+ 23 - 7
desktop/libs/azure/src/azure/abfs/upload.py

@@ -26,6 +26,7 @@ else:
 
 from django.core.files.uploadedfile import SimpleUploadedFile
 from django.core.files.uploadhandler import FileUploadHandler, SkipFile, StopFutureHandlers, StopUpload, UploadFileException
+from desktop.conf import TASK_SERVER
 
 from desktop.lib.exceptions_renderable import PopupException
 from desktop.lib.fsmanager import get_client
@@ -45,35 +46,39 @@ from filebrowser.utils import generate_chunks, calculate_total_size
 
 class ABFSFineUploaderChunkedUpload(object):
   def __init__(self, request, *args, **kwargs):
-    self.destination = kwargs.get('dest', None)  # GET param avoids infinite looping
-    self.target_path = None
-    self.file = None
+    self._mp = None
     self._request = request
-    self._part_size = DEFAULT_WRITE_SIZE
-
     self.qquuid = kwargs.get('qquuid')
     self.qqtotalparts = kwargs.get('qqtotalparts')
     self.totalfilesize = kwargs.get('qqtotalfilesize')
     self.file_name = kwargs.get('qqfilename')
     if self.file_name:
       self.file_name = unicodedata.normalize('NFC', self.file_name) # Normalize unicode
+    self.destination = kwargs.get('dest', None)  # GET param avoids infinite looping
+    self.target_path = None
+
+    if kwargs.get('chunk_size', None) != None:
+      self.chunk_size = kwargs.get('chunk_size')
 
     if self._is_abfs_upload():
       self._fs = self._get_abfs(request)
       self.filesystem, self.directory = parse_uri(self.destination)[:2]
        # Verify that the path exists
       self._fs.stats(self.destination)
-    LOG.debug("Chunk size = %d" % DEFAULT_WRITE_SIZE)
 
   def check_access(self):
     LOG.info('ABFSFineUploaderChunkedUpload: handle file upload wit temp file %s.' % self.file_name)
     self.target_path = self._fs.join(self.destination, self.file_name)
+    self.filepath = self.target_path
+    self.chunk_size = DEFAULT_WRITE_SIZE
+    logging.debug("Chunk size = %d" % self.chunk_size)
 
     try:
       # Check access permissions before attempting upload
       #self._check_access() #implement later
       LOG.debug("ABFSFineUploaderChunkedUpload: Initiating ABFS upload to target path: %s" % self.target_path)
-      self._fs.create(self.target_path)
+      if not TASK_SERVER.ENABLED.get():
+        self._fs.create(self.target_path)
     except (ABFSFileUploadError, ABFSFileSystemException) as e:
       LOG.error("ABFSFineUploaderChunkedUpload: Encountered error in ABFSUploadHandler check_access: %s" % e)
       self.request.META['upload_failed'] = e
@@ -84,6 +89,16 @@ class ABFSFineUploaderChunkedUpload(object):
                             {'name': self.file_name, 'qquuid': self.qquuid, 'size': self.totalfilesize})
 
   def upload_chunks(self):
+    if TASK_SERVER.ENABLED.get():
+      self.target_path = self._fs.join(self.destination, self.file_name)
+      try:
+        LOG.debug("ABFSFineUploaderChunkedUpload: Initiating ABFS upload to target path: %s" % self.target_path)
+        self._fs.create(self.target_path)
+      except (ABFSFileUploadError, ABFSFileSystemException) as e:
+        LOG.error("ABFSFineUploaderChunkedUpload: Encountered error in ABFSUploadHandler check_access: %s" % e)
+        self.request.META['upload_failed'] = e
+        raise PopupException("ABFSFineUploaderChunkedUpload: Initiating ABFS upload to target path: %s failed %s" % (self.target_path, e))
+
     try:
       for i, (chunk, total) in enumerate(generate_chunks(self.qquuid, self.qqtotalparts, default_write_size=DEFAULT_WRITE_SIZE), 1):
         LOG.debug("ABFSFineUploaderChunkedUpload: uploading file %s, part %d, size %d, dest: %s" %
@@ -100,6 +115,7 @@ class ABFSFineUploaderChunkedUpload(object):
       LOG.debug("%s" % self._fs.stats(self.target_path))
 
   def upload(self):
+    self.filepath = self.target_path
     self.check_access()
     self.upload_chunks()
 

+ 9 - 5
desktop/libs/hadoop/src/hadoop/fs/upload.py

@@ -50,13 +50,10 @@ if sys.version_info[0] > 2:
 else:
   from django.utils.translation import ugettext as _
 
-
 LOG = logging.getLogger()
 
-
 UPLOAD_SUBDIR = 'hue-uploads'
 
-
 class LocalFineUploaderChunkedUpload(object):
   def __init__(self, request, *args, **kwargs):
     self._request = request
@@ -94,9 +91,14 @@ class HDFSFineUploaderChunkedUpload(object):
       self.file_name = unicodedata.normalize('NFC', self.file_name) # Normalize unicode
     self.dest = kwargs.get('dest')
     self.file_name = kwargs.get('qqfilename')
-    self.filepath = request.fs.join(self.dest, self.file_name)
+    if kwargs.get('filepath', None) != None:
+      self.filepath = kwargs.get('filepath')
+    else:
+      self.filepath = request.fs.join(self.dest, self.file_name)
+      kwargs['filepath'] = self.filepath
     self._file = None
-    self.chunk_size = 0
+    if kwargs.get('chunk_size', None) != None:
+      self.chunk_size = kwargs.get('chunk_size')
 
   def check_access(self):
     if self._request.fs.isdir(self.dest) and posixpath.sep in self.file_name:
@@ -123,6 +125,8 @@ class HDFSFineUploaderChunkedUpload(object):
       logging.debug("HDFSFineUploaderChunkedUpload: uploading file %s, part %d, size %d, dest: %s" %
                     (self.file_name, i, total, self.dest))
       self._file.write(chunk)
+      percentcomplete = int((total * 100) / self.totalfilesize)
+      logging.debug("HDFSFineUploaderChunkedUpload: progress %d" % percentcomplete)
     self._file.flush()
     self._file.finish_upload(self.totalfilesize)
     self._file._do_cleanup = False

+ 9 - 9
desktop/libs/notebook/src/notebook/models.py

@@ -570,15 +570,15 @@ class ApiWrapper():
     self.api = _get_api(request, snippet)
 
   def __getattr__(self, name):
-    if TASK_SERVER.ENABLED.get():
-      from notebook import tasks as ntasks
-      if hasattr(ntasks, name):
-        attr = getattr(ntasks, name)
-        def _method(*args, **kwargs):
-          return attr(*args, **dict(kwargs, postdict=self.request.POST, user_id=self.request.user.id))
-        return _method
-      else:
-        LOG.debug('Skipping Task Server call %s' % name)
+    # if TASK_SERVER.ENABLED.get():
+    #   from notebook import tasks as ntasks
+    #   if hasattr(ntasks, name):
+    #     attr = getattr(ntasks, name)
+    #     def _method(*args, **kwargs):
+    #       return attr(*args, **dict(kwargs, postdict=self.request.POST, user_id=self.request.user.id))
+    #     return _method
+    #   else:
+    #     LOG.debug('Skipping Task Server call %s' % name)
     return getattr(self.api, name)