Bläddra i källkod

HUE-8908 [fb] ABFS in Hue (#932)

* Adding placeholder functions and methods for the ABFS file system

* Minor changes to ABFS files

* Fully added listdir_stats and listdir
Added test cases to abfs_test
Added methods to abfs.__init_.py

* Added Stats to ABFS
Added a test case to hadoop
In process of adding more methods

* Added make directory and delete directory
Changed status method
Condensed code

* Made methods more reliant on other methods

* Added more test conditions to ABFS
Fixed Bugs

* Improved path exists method
Created a shell for append and flush
Created Access Control Methods (like chmod and chown)

* Moved around functions for organizational purposes
Edited read to take in more parameters
Added copy functions
More comments for clarity

* Fixed Append and Flush
Minor changes to test case concerning create

* Added more test conditions
Changed methods that are related with create to take in more parameters
Attempted to create methods that deal with URI

* Changed Stats and List Methods to properly take in Parameters
Changed Comments
Changed the regex statement for ABFS
Changed Join statement

* Added an upload class
Added the upload test_case to ABFS_testcases

* Added Code to views to make upload work properly
Revised some code in __init__ to work better with upload

* Added _formatted stats to abfs
Minor improvements

* Added stats class to comply with upload
Changed the stats method to use stats class

* Changed code so that upload can work

* Minor changes to code
Deleted unnecessary code

* Revised Copy functions
Added test cases for Copy

* Made Stats compatible with root directory

* Minor changes to ABFS to comply with views test
Added test Permissions for ABFS

* Minor Bug fixes in ABFS

* Added test permissions for ADLS in views test

* Added conditions for test permissions

* Added ABFS to HUE UI

* Improved access time of ABFS by changing how filesystem_stats worked
Changes to ABFSStats

* Overhalled listdir_stats to run faster
Made list functions rely on list_stats

* Added ABFS file to read files

* Made Some directories work with HUE UI
Minor changes to ABFSstats

* Fixed bugs concerning directories showing up as files
Fixed bug where filesystem does not go to root

* Made Files Readable on HUE using ABFS

* Added the same icon as ADLS to make ABFS work

* Changed some text to fit the current ABFS system

* Fixed Bug concerning ABFS sidebar not lighitng up
Slight Comment changes
Made ABFS Files downloadedable

* Disabled upload for ABFS filesystems
Removed Home in ABFS

* Changed indents js files

* Added a space in code to comply with tests

* Fixed bug Concerning edit files

* Added ABFS Functionality to the assist panel

* Changed format slightly to comply with checks

* Disabled home button and add file on the side bar for ABFS

* Fixed Access Time and Modified Time in ABFS_Stats

* Changes to code to improve readability
Changed format of code

* Minor changes to code
Implemented CopyFromLocal

* Added upload button to sidebar

* Added condition such that file system name must be of length 3

* Added the ability for ABFS to move files
travisle22 6 år sedan
förälder
incheckning
fae59a5bc1

+ 3 - 0
apps/filebrowser/src/filebrowser/forms.py

@@ -29,6 +29,7 @@ from django.forms import FileField, CharField, BooleanField, Textarea
 from django.forms.formsets import formset_factory, BaseFormSet
 
 from aws.s3 import S3A_ROOT, normpath as s3_normpath
+from azure.abfs.__init__ import ABFS_ROOT, normpath as abfs_normpath
 from desktop.lib import i18n
 from hadoop.fs import normpath
 from filebrowser.lib import rwx
@@ -73,6 +74,8 @@ class PathField(CharField):
     cleaned_path = CharField.clean(self, value)
     if value.lower().startswith(S3A_ROOT):
       cleaned_path = s3_normpath(cleaned_path)
+    elif value.lower().startswith(ABFS_ROOT):
+      cleaned_path = abfs_normpath(cleaned_path)
     else:
       cleaned_path = normpath(cleaned_path)
     return cleaned_path

+ 6 - 0
apps/filebrowser/src/filebrowser/templates/fb_components.mako

@@ -39,6 +39,12 @@ from aws.conf import get_default_region
               <svg class="hi"><use xlink:href='#hi-adls'></use></svg>
             </span>
           </li>
+        %elif path.lower().find('abfs://') == 0:
+          <li style="padding-top: 12px">
+            <span class="breadcrumb-link homeLink">
+              <svg class="hi"><use xlink:href='#hi-adls'></use></svg>
+            </span>
+          </li>
         %else:
           <li><a class="pointer breadcrumb-link homeLink" data-bind="click: $root.openHome, attr:{'href': window.HUE_BASE_URL + '/filebrowser/view=${ urllib.quote(path.encode('utf-8'), safe=SAFE_CHARACTERS_URI_COMPONENTS) }?default_to_home'}">
             <i class="fa fa-home"></i> ${_('Home')}</a>

+ 14 - 6
apps/filebrowser/src/filebrowser/templates/listdir.mako

@@ -44,7 +44,7 @@ ${ fb_components.menubar() }
   %endif
 </style>
 
-<div id="${ path.startswith('S3A://') and 'filebrowser_s3Components' or 'filebrowserComponents' }" class="container-fluid filebrowser">
+<div id="${ path.startswith('S3A://') and 'filebrowser_s3Components' or path.startswith('abfs://') and 'filebrowser_abfsComponents' or 'filebrowserComponents' }" class="container-fluid filebrowser">
   <div class="card card-small">
     <div class="actionbar">
     <%actionbar:render>
@@ -56,7 +56,7 @@ ${ fb_components.menubar() }
         <div class="btn-toolbar" style="display: inline; vertical-align: middle">
           <div id="ch-dropdown" class="btn-group" style="vertical-align: middle">
             <button class="btn dropdown-toggle" title="${_('Actions')}" data-toggle="dropdown"
-            data-bind="visible: !inTrash(), enable: selectedFiles().length > 0 && (!isS3() || (isS3() && !isS3Root()))">
+            data-bind="visible: !inTrash(), enable: selectedFiles().length > 0 && ((!isS3() && !isABFS()) || (isS3() && !isS3Root()) || (isABFS() && !isABFSRoot()))">
               <i class="fa fa-cog"></i> ${_('Actions')}
               <span class="caret" style="line-height: 15px"></span>
             </button>
@@ -155,9 +155,12 @@ ${ fb_components.menubar() }
           <!-- ko if: isS3 -->
             <a class="btn fileToolbarBtn" title="${_('Upload files')}" data-bind="visible: !inTrash(), css: {'disabled': isS3Root()}, click: function(){ if (!isS3Root()) { uploadFile() }}"><i class="fa fa-arrow-circle-o-up"></i> ${_('Upload')}</a>
           <!-- /ko -->
-          <!-- ko ifnot: isS3 -->
+          <!-- ko if: isABFS -->
+            <a class="btn fileToolbarBtn" title="${_('Upload files')}" data-bind="visible: !inTrash(), css: {'disabled': isABFSRoot()}, click: function(){ if (!isABFSRoot()) { uploadFile() }}"><i class="fa fa-arrow-circle-o-up"></i> ${_('Upload')}</a>
+          <!-- /ko -->
+          <!-- ko ifnot: isS3() || isABFS() -->
           <div id="upload-dropdown" class="btn-group" style="vertical-align: middle">
-            <a href="javascript: void(0)" class="btn upload-link dropdown-toggle" title="${_('Upload')}" data-bind="click: uploadFile, visible: !inTrash(), css: {'disabled': isS3() && isS3Root()}">
+            <a href="javascript: void(0)" class="btn upload-link dropdown-toggle" title="${_('Upload')}" data-bind="click: uploadFile, visible: !inTrash(), css: {'disabled': isS3() && isS3Root() || isABFS() && isABFSRoot()}">
               <i class="fa fa-arrow-circle-o-up"></i> ${_('Upload')}
             </a>
           </div>
@@ -169,8 +172,13 @@ ${ fb_components.menubar() }
               <span class="caret"></span>
             </a>
             <ul class="dropdown-menu pull-right" style="top: auto">
-              <li data-bind="visible: !isS3() || isS3() && !isS3Root()"><a href="javascript: void(0)" class="create-file-link" title="${_('File')}"><i class="fa fa-file-o"></i> ${_('File')}</a></li>
-              <li><a href="javascript: void(0)" class="create-directory-link" title="${_('Directory')}"><i class="fa fa-folder"></i> <span data-bind="visible: !isS3() || isS3() && !isS3Root()">${_('Directory')}</span><span data-bind="visible: isS3() && isS3Root()">${_('Bucket')}</span></a></li>
+              <li data-bind="visible: !isS3() && !isABFS() || isS3() && !isS3Root() || isABFS() && !isABFSRoot()"><a href="javascript: void(0)" class="create-file-link" title="${_('File')}"><i class="fa fa-file-o"></i> ${_('File')}</a></li>
+              <li><a href="javascript: void(0)" class="create-directory-link" title="${_('Directory')}">
+                <i class="fa fa-folder"></i> 
+                <span data-bind="visible: !isS3() && !isABFS() || isS3() && !isS3Root() || isABFS() && !isABFSRoot()">${_('Directory')}</span>
+                <span data-bind="visible: isS3() && isS3Root()">${_('Bucket')}</span>
+                <span data-bind="visible: isABFS() && isABFSRoot()">${_('File System')}</span>
+              </a></li>
             </ul>
           </div>
         </div>

+ 33 - 7
apps/filebrowser/src/filebrowser/templates/listdir_components.mako

@@ -455,21 +455,27 @@ from filebrowser.conf import ENABLE_EXTRACT_UPLOADED_ARCHIVE
     <div id="createDirectoryModal" class="modal hide fade">
       <div class="modal-header">
         <button type="button" class="close" data-dismiss="modal" aria-label="${ _('Close') }"><span aria-hidden="true">&times;</span></button>
-        <!-- ko if: !isS3() || (isS3() && !isS3Root()) -->
+        <!-- ko if: (!isS3() && !isABFS()) || (isS3() && !isS3Root()) || (isABFS() && !isABFSRoot()) -->
         <h2 class="modal-title">${_('Create Directory')}</h2>
         <!-- /ko -->
         <!-- ko if: isS3() && isS3Root() -->
         <h2 class="modal-title">${_('Create Bucket')}</h2>
         <!-- /ko -->
+        <!-- ko if: isABFS() && isABFSRoot() -->
+        <h2 class="modal-title">${_('Create File System')}</h2>
+        <!-- /ko -->
       </div>
       <div class="modal-body">
         <label>
-          <!-- ko if: !isS3() || (isS3() && !isS3Root()) -->
+          <!-- ko if: (!isS3() && !isABFS()) || (isS3() && !isS3Root()) || (isABFS() && !isABFSRoot()) -->
           ${_('Directory Name')}
           <!-- /ko -->
           <!-- ko if: isS3() && isS3Root() -->
           ${_('Bucket Name')}
           <!-- /ko -->
+          <!-- ko if: isABFS() && isABFSRoot() -->
+          ${_('File System Name')}
+          <!-- /ko -->
           <input id="newDirectoryNameInput" name="name" value="" type="text" class="input-xlarge"/></label>
           <input type="hidden" name="path" data-bind="value: currentPath"/>
       </div>
@@ -480,6 +486,9 @@ from filebrowser.conf import ENABLE_EXTRACT_UPLOADED_ARCHIVE
         <div id="directoryNameExistsAlert" class="hide" style="position: absolute; left: 10px;">
           <span class="label label-important"><span class="newName"></span> ${_('already exists.')}</span>
         </div>
+        <div id="smallFileSystemNameAlert" class="hide" style="position: absolute; left: 10px;">
+          <span class="label label-important"><span class="newName"></span> ${_('File system requires namesize to be 3 or more characters')}</span>
+        </div>
         <a class="btn" href="#" data-dismiss="modal">${_('Cancel')}</a>
         <input class="btn btn-primary" type="submit" value="${_('Create')}" />
       </div>
@@ -560,12 +569,12 @@ from filebrowser.conf import ENABLE_EXTRACT_UPLOADED_ARCHIVE
   <!-- actions context menu -->
   <ul class="context-menu dropdown-menu">
   <!-- ko ifnot: $root.inTrash -->
-    <li data-bind="visible: !isS3() || (isS3() && !isS3Root()), css: {'disabled': $root.selectedFiles().length != 1 || isCurrentDirSelected().length > 0}">
+    <li data-bind="visible: (!isS3() && !isABFS()) || (isS3() && !isS3Root()) || (isABFS() && !isABFSRoot()), css: {'disabled': $root.selectedFiles().length != 1 || isCurrentDirSelected().length > 0}">
     <a href="javascript: void(0)" title="${_('Rename')}" data-bind="click: ($root.selectedFiles().length == 1 && isCurrentDirSelected().length == 0) ? $root.renameFile: void(0)"><i class="fa fa-fw fa-font"></i>
     ${_('Rename')}</a></li>
-    <li data-bind="visible: !isS3() || (isS3() && !isS3Root()), css: {'disabled': $root.selectedFiles().length == 0 || isCurrentDirSelected().length > 0}">
+    <li data-bind="visible: (!isS3() && !isABFS()) || (isS3() && !isS3Root()) || (isABFS() && !isABFSRoot()), css: {'disabled': $root.selectedFiles().length == 0 || isCurrentDirSelected().length > 0}">
     <a href="javascript: void(0)" title="${_('Move')}" data-bind="click: ( $root.selectedFiles().length > 0 && isCurrentDirSelected().length == 0) ? $root.move: void(0)"><i class="fa fa-fw fa-random"></i> ${_('Move')}</a></li>
-    <li data-bind="visible: !isS3() || (isS3() && !isS3Root()), css: {'disabled': $root.selectedFiles().length == 0 || isCurrentDirSelected().length > 0}">
+    <li data-bind="visible: (!isS3() && !isABFS()) || (isS3() && !isS3Root()) || (isABFS() && !isABFSRoot()), css: {'disabled': $root.selectedFiles().length == 0 || isCurrentDirSelected().length > 0}">
     <a href="javascript: void(0)" title="${_('Copy')}" data-bind="click: ($root.selectedFiles().length > 0 && isCurrentDirSelected().length == 0) ? $root.copy: void(0)"><i class="fa fa-fw fa-files-o"></i> ${_('Copy')}</a></li>
     % if show_download_button:
     <li data-bind="css: {'disabled': $root.inTrash() || $root.selectedFiles().length != 1 || selectedFile().type != 'file'}">
@@ -1050,6 +1059,10 @@ from filebrowser.conf import ENABLE_EXTRACT_UPLOADED_ARCHIVE
       self.isAdls = ko.pureComputed(function () {
         return self.currentPath().toLowerCase().indexOf('adl:/') === 0;
       });
+      
+      self.isABFS = ko.pureComputed(function () {
+        return self.currentPath().toLowerCase().indexOf('abfs://') === 0;
+      });
 
       self.scheme = ko.pureComputed(function () {
         var path = self.currentPath();
@@ -1075,6 +1088,8 @@ from filebrowser.conf import ENABLE_EXTRACT_UPLOADED_ARCHIVE
           return 's3a://';
         } else if (path.indexOf('adl:/') >= 0) {
           return 'adl:/';
+        } else if (path.indexOf('abfs://') >= 0) {
+          return 'abfs://';
         } else {
           return '/';
         }
@@ -1093,13 +1108,13 @@ from filebrowser.conf import ENABLE_EXTRACT_UPLOADED_ARCHIVE
         return currentPath.indexOf('/') === 0 || currentPath.indexOf('hdfs') === 0
       });
       self.isCompressEnabled = ko.pureComputed(function () {
-        return !self.isS3() && !self.isAdls();
+        return !self.isS3() && !self.isAdls() && !self.isABFS();
       });
       self.isSummaryEnabled = ko.pureComputed(function () {
         return self.isHdfs();
       });
       self.isPermissionEnabled = ko.pureComputed(function () {
-        return !self.isS3();
+        return !self.isS3() && !self.isABFS();
       });
       self.isReplicationEnabled = ko.pureComputed(function () {
         return self.isHdfs();
@@ -1114,6 +1129,10 @@ from filebrowser.conf import ENABLE_EXTRACT_UPLOADED_ARCHIVE
       self.isS3Root = ko.pureComputed(function () {
         return self.isS3() && self.currentPath().toLowerCase() === 's3a://';
       });
+      
+      self.isABFSRoot = ko.pureComputed(function () {
+        return self.isABFS() && self.currentPath().toLowerCase() === 'abfs://';
+      });
 
       self.inTrash = ko.computed(function() {
         return self.currentPath().match(/^\/user\/.+?\/\.Trash/);
@@ -1787,6 +1806,12 @@ from filebrowser.conf import ENABLE_EXTRACT_UPLOADED_ARCHIVE
           resetPrimaryButtonsStatus(); //globally available
           return false;
         }
+        if ($("#newDirectoryNameInput").val().length < 3 && self.isABFSRoot()) {
+          $("#smallFileSystemNameAlert").show();
+          $("#newDirectoryNameInput").addClass("fieldError");
+          resetPrimaryButtonsStatus(); //globally available
+          return false;
+        }
         $(formElement).ajaxSubmit({
           dataType:  'json',
           success: function() {
@@ -2368,6 +2393,7 @@ from filebrowser.conf import ENABLE_EXTRACT_UPLOADED_ARCHIVE
         $("#newDirectoryNameInput").removeClass("fieldError");
         $("#directoryNameRequiredAlert").hide();
         $("#directoryNameExistsAlert").hide();
+        $("#smallFileSystemNameAlert").hide();
       });
 
       $("#newFileNameInput").focus(function () {

+ 1 - 1
apps/filebrowser/src/filebrowser/views.py

@@ -1339,7 +1339,7 @@ def _upload_file(request):
           'result': _massage_stats(request, stat_absolute_path(filepath, request.fs.stats(filepath))),
           'next': request.GET.get("next")
         })
-
+ 
         return response
     else:
         raise PopupException(_("Error in upload form: %s") % (form.errors,))

+ 80 - 1
apps/filebrowser/src/filebrowser/views_test.py

@@ -39,6 +39,7 @@ from avro import schema, datafile, io
 
 from aws.s3.s3fs import S3FileSystemException
 from aws.s3.s3test_utils import get_test_bucket
+from azure.conf import is_abfs_enabled, is_adls_enabled
 from django.contrib.auth.models import User
 from django.urls import reverse
 from django.utils.encoding import smart_str
@@ -1197,7 +1198,6 @@ def test_location_to_url():
   assert_equal(prefix + '/', location_to_url('hdfs://localhost:8020'))
   assert_equal(prefix + 's3a://bucket/key', location_to_url('s3a://bucket/key'))
 
-
 class TestS3AccessPermissions(object):
 
   def setUp(self):
@@ -1243,3 +1243,82 @@ class TestS3AccessPermissions(object):
       assert_equal(200, response.status_code)
     finally:
       remove_from_group(self.user.username, 'has_s3')
+      
+class TestABFSAccessPermissions(object):
+
+  def setUp(self):
+    self.client = make_logged_in_client(username="test", groupname="default", recreate=True, is_superuser=False)
+    grant_access('test', 'test', 'filebrowser')
+    add_to_group('test')
+
+    self.user = User.objects.get(username="test")
+
+  def test_no_default_permissions(self):
+    if not is_abfs_enabled():
+      raise SkipTest
+    response = self.client.get('/filebrowser/view=ABFS://')
+    assert_equal(500, response.status_code)
+
+    # 500 for real currently
+#     with tempfile.NamedTemporaryFile() as local_file: # Flaky
+#       DEST_DIR = 'S3A://bucket/hue'
+#       LOCAL_FILE = local_file.name
+#       assert_raises(S3FileSystemException, self.client.post, '/filebrowser/upload/file?dest=%s' % DEST_DIR, dict(dest=DEST_DIR, hdfs_file=file(LOCAL_FILE)))
+
+  def test_has_default_permissions(self):
+    if not is_abfs_enabled():
+      raise SkipTest
+    add_permission(self.user.username, 'has_abfs', permname='abfs_access', appname='filebrowser')
+
+    try:
+      response = self.client.get('/filebrowser/view=ABFS://')
+      assert_equal(200, response.status_code)
+    finally:
+      remove_from_group(self.user.username, 'has_abfs')
+      
+class TestADLSAccessPermissions(object):
+
+  def setUp(self):
+    self.client = make_logged_in_client(username="test", groupname="default", recreate=True, is_superuser=False)
+    grant_access('test', 'test', 'filebrowser')
+    add_to_group('test')
+
+    self.user = User.objects.get(username="test")
+
+  def test_no_default_permissions(self):
+    if not is_adls_enabled():
+      raise SkipTest
+    response = self.client.get('/filebrowser/view=ADL://')
+    assert_equal(500, response.status_code)
+    
+    response = self.client.get('/filebrowser/view=ADL://hue_adls_testing')
+    assert_equal(500, response.status_code)
+
+    response = self.client.get('/filebrowser/view=adl://hue_adls_testing')
+    assert_equal(500, response.status_code)
+
+    response = self.client.get('/filebrowser/view=ADL://hue_adls_testing/ADLS_tables')
+    assert_equal(500, response.status_code)
+
+    response = self.client.post('/filebrowser/rmtree', dict(path=['ADL://hue-test-01']))
+    assert_equal(500, response.status_code)
+
+    # 500 for real currently
+    assert_raises(IOError, self.client.get, '/filebrowser/edit=ADL://hue-test-01')
+
+    # 500 for real currently
+#     with tempfile.NamedTemporaryFile() as local_file: # Flaky
+#       DEST_DIR = 'S3A://bucket/hue'
+#       LOCAL_FILE = local_file.name
+#       assert_raises(S3FileSystemException, self.client.post, '/filebrowser/upload/file?dest=%s' % DEST_DIR, dict(dest=DEST_DIR, hdfs_file=file(LOCAL_FILE)))
+
+  def test_has_default_permissions(self):
+    if not is_adls_enabled():
+      raise SkipTest
+    add_permission(self.user.username, 'has_adls', permname='adls_access', appname='filebrowser')
+
+    try:
+      response = self.client.get('/filebrowser/view=ADL://')
+      assert_equal(200, response.status_code)
+    finally:
+      remove_from_group(self.user.username, 'has_adls')

+ 73 - 1
desktop/core/src/desktop/js/api/apiHelper.js

@@ -31,6 +31,7 @@ const DOCUMENTS_SEARCH_API = '/desktop/api2/docs/';
 const FETCH_CONFIG = '/desktop/api2/get_config/';
 const HDFS_API_PREFIX = '/filebrowser/view=' + encodeURIComponent('/');
 const ADLS_API_PREFIX = '/filebrowser/view=' + encodeURIComponent('adl:/');
+const ABFS_API_PREFIX = '/filebrowser/view=' + encodeURIComponent('ABFS://');
 const GIT_API_PREFIX = '/desktop/api/vcs/contents/';
 const S3_API_PREFIX = '/filebrowser/view=' + encodeURIComponent('S3A://');
 const IMPALA_INVALIDATE_API = '/impala/api/invalidate';
@@ -196,6 +197,7 @@ class ApiHelper {
       });
       $.totalStorage(self.getAssistCacheIdentifier({ sourceType: 'hdfs' }), {});
       $.totalStorage(self.getAssistCacheIdentifier({ sourceType: 'adls' }), {});
+      $.totalStorage(self.getAssistCacheIdentifier({ sourceType: 'abfs' }), {});
       $.totalStorage(self.getAssistCacheIdentifier({ sourceType: 'git' }), {});
       $.totalStorage(self.getAssistCacheIdentifier({ sourceType: 's3' }), {});
       $.totalStorage(self.getAssistCacheIdentifier({ sourceType: 'collections' }), {});
@@ -527,7 +529,7 @@ class ApiHelper {
    *
    * @param {Object} options
    * @param {string[]} options.path
-   * @param {string} options.type - 's3', 'adls' or 'hdfs'
+   * @param {string} options.type - 's3', 'adls', 'abfs' or 'hdfs'
    * @param {number} [options.offset]
    * @param {number} [options.length]
    * @param {boolean} [options.silenceErrors]
@@ -539,6 +541,8 @@ class ApiHelper {
       url = S3_API_PREFIX;
     } else if (options.type === 'adls') {
       url = ADLS_API_PREFIX;
+    } else if (options.type === 'abfs') {
+      url = ABFS_API_PREFIX;
     } else {
       url = HDFS_API_PREFIX;
     }
@@ -709,6 +713,74 @@ class ApiHelper {
     );
   }
 
+  /**
+   * @param {Object} options
+   * @param {Function} options.successCallback
+   * @param {Function} [options.errorCallback]
+   * @param {boolean} [options.silenceErrors]
+   * @param {Number} [options.timeout]
+   * @param {Object} [options.editor] - Ace editor
+   *
+   * @param {string[]} options.pathParts
+   * @param {number} [options.pageSize] - Default 500
+   * @param {number} [options.page] - Default 1
+   * @param {string} [options.filter]
+   */
+  fetchAbfsPath(options) {
+    const self = this;
+    options.pathParts.shift();
+    let url =
+      ABFS_API_PREFIX +
+      encodeURI(options.pathParts.join('/')) +
+      '?format=json&sortby=name&descending=false&pagesize=' +
+      (options.pageSize || 500) +
+      '&pagenum=' +
+      (options.page || 1);
+    if (options.filter) {
+      url += '&filter=' + options.filter;
+    }
+    const fetchFunction = function(storeInCache) {
+      if (options.timeout === 0) {
+        self.assistErrorCallback(options)({ status: -1 });
+        return;
+      }
+      return $.ajax({
+        dataType: 'json',
+        url: url,
+        timeout: options.timeout,
+        success: function(data) {
+          if (
+            !data.error &&
+            !self.successResponseIsError(data) &&
+            typeof data.files !== 'undefined' &&
+            data.files !== null
+          ) {
+            if (data.files.length > 2 && !options.filter) {
+              storeInCache(data);
+            }
+            options.successCallback(data);
+          } else {
+            self.assistErrorCallback(options)(data);
+          }
+        }
+      })
+        .fail(self.assistErrorCallback(options))
+        .always(() => {
+          if (typeof options.editor !== 'undefined' && options.editor !== null) {
+            options.editor.hideSpinner();
+          }
+        });
+    };
+
+    return fetchCached.bind(self)(
+      $.extend({}, options, {
+        sourceType: 'abfs',
+        url: url,
+        fetchFunction: fetchFunction
+      })
+    );
+  }
+
   /**
    * @param {Object} options
    * @param {Function} options.successCallback

+ 17 - 0
desktop/core/src/desktop/js/jquery/plugins/jquery.filechooser.js

@@ -106,6 +106,23 @@ const pluginName = 'jHueFileChooser',
           home: '',
           name: 'ADLS'
         }
+      },
+      abfs: {
+        scheme: 'abfs',
+        root: 'abfs://',
+        home: 'abfs://',
+        icon: {
+          svg: {
+            brand: '#hi-adls',
+            home: '#hi-adls'
+          },
+          brand: 'fa-windows',
+          home: 'fa-windows'
+        },
+        label: {
+          home: '',
+          name: 'ABFS'
+        }
       }
     },
     fsSelected: 'hdfs',

+ 16 - 1
desktop/core/src/desktop/js/ko/bindings/ko.aceEditor.js

@@ -641,6 +641,16 @@ ko.bindingHandlers.aceEditor = {
       dblClickAdlsItemSub.remove();
     });
 
+    const dblClickAbfsItemSub = huePubSub.subscribe('assist.dblClickAbfsItem', assistHdfsEntry => {
+      if ($el.data('last-active-editor')) {
+        editor.session.insert(editor.getCursorPosition(), 'abfs://' + assistHdfsEntry.path + "'");
+      }
+    });
+
+    disposeFunctions.push(() => {
+      dblClickAbfsItemSub.remove();
+    });
+
     const dblClickGitItemSub = huePubSub.subscribe('assist.dblClickGitItem', assistGitEntry => {
       if ($el.data('last-active-editor')) {
         editor.session.setValue(assistGitEntry.fileContent());
@@ -802,7 +812,12 @@ ko.bindingHandlers.aceEditor = {
       drop: function(e, ui) {
         const position = editor.renderer.screenToTextCoordinates(e.clientX, e.clientY);
         let text = ui.helper.text();
-        if (lastMeta.type === 's3' || lastMeta.type === 'hdfs' || lastMeta.type === 'adls') {
+        if (
+          lastMeta.type === 's3' ||
+          lastMeta.type === 'hdfs' ||
+          lastMeta.type === 'adls' ||
+          lastMeta.type === 'abfs'
+        ) {
           text = "'" + lastMeta.definition.path + "'";
         }
         editor.moveCursorToPosition(position);

+ 5 - 0
desktop/core/src/desktop/js/ko/components/assist/assistStorageEntry.js

@@ -27,6 +27,10 @@ const TYPE_SPECIFICS = {
     apiHelperFetchFunction: 'fetchAdlsPath',
     dblClickPubSubId: 'assist.dblClickAdlsItem'
   },
+  abfs: {
+    apiHelperFetchFunction: 'fetchAbfsPath',
+    dblClickPubSubId: 'assist.dblClickAbfsItem'
+  },
   hdfs: {
     apiHelperFetchFunction: 'fetchHdfsPath',
     dblClickPubSubId: 'assist.dblClickHdfsItem'
@@ -328,6 +332,7 @@ class AssistStorageEntry {
     type = typeMatch ? typeMatch[1] : type || 'hdfs';
     type = type.replace(/s3.*/i, 's3');
     type = type.replace(/adl.*/i, 'adls');
+    type = type.replace(/abfs.*/i, 'abfs');
 
     const rootEntry = new AssistStorageEntry({
       type: type.toLowerCase(),

+ 4 - 1
desktop/core/src/desktop/js/ko/components/assist/ko.assistPanel.js

@@ -125,7 +125,10 @@ class AssistPanel {
           if (appConfig.browser && appConfig.browser.interpreter_names) {
             const storageBrowsers = appConfig.browser.interpreter_names.filter(
               interpreter =>
-                interpreter === 'adls' || interpreter === 'hdfs' || interpreter === 's3'
+                interpreter === 'adls' ||
+                interpreter === 'hdfs' ||
+                interpreter === 's3' ||
+                interpreter === 'abfs'
             );
 
             if (storageBrowsers.length) {

+ 17 - 2
desktop/core/src/desktop/js/ko/components/assist/ko.assistStoragePanel.js

@@ -44,7 +44,7 @@ const TEMPLATE = `
 
   <script type="text/html" id="assist-storage-header-actions">
     <div class="assist-db-header-actions">
-      <!-- ko if: type !== 's3' -->
+      <!-- ko if: type !== 's3' && type !== 'abfs' -->
       <a class="inactive-action" href="javascript:void(0)" data-bind="click: goHome, attr: { title: I18n('Go to ' + window.USER_HOME_DIR) }"><i class="pointer fa fa-home"></i></a>
       <!-- ko if: window.SHOW_UPLOAD_BUTTON -->
       <a class="inactive-action" data-bind="dropzone: {
@@ -61,6 +61,20 @@ const TEMPLATE = `
       </a>
       <!-- /ko -->
       <!-- /ko -->
+      <!-- ko if: type === 'abfs' && path !== '/' && window.SHOW_UPLOAD_BUTTON -->
+      <a class="inactive-action" data-bind="dropzone: {
+            url: '/filebrowser/upload/file?dest=' + 'abfs:/' + path,
+            params: { dest: 'abfs:/' + path },
+            paramName: 'hdfs_file',
+            onError: function(x, e){ $(document).trigger('error', e); },
+            onComplete: function () { huePubSub.publish('assist.storage.refresh'); } }" title="${I18n(
+              'Upload file'
+            )}" href="javascript:void(0)">
+        <div class="dz-message inline" data-dz-message><i class="pointer fa fa-plus" title="${I18n(
+          'Upload file'
+        )}"></i></div>
+      </a>
+      <!-- /ko -->
       <a class="inactive-action" href="javascript:void(0)" data-bind="click: function () { huePubSub.publish('assist.storage.refresh'); }" title="${I18n(
         'Manual refresh'
       )}"><i class="pointer fa fa-refresh" data-bind="css: { 'fa-spin blue' : loading }"></i></a>
@@ -198,7 +212,8 @@ class AssistStoragePanel {
     });
 
     huePubSub.subscribe('assist.storage.go.home', () => {
-      const path = this.activeSource() === 's3' ? '/' : window.USER_HOME_DIR;
+      const path =
+        this.activeSource() === 's3' || this.activeSource() === 'abfs' ? '/' : window.USER_HOME_DIR;
       this.loadPath(path);
       apiHelper.setInTotalStorage('assist', 'currentStoragePath_' + this.activeSource(), path);
     });

+ 2 - 0
desktop/core/src/desktop/js/ko/components/ko.sidebar.js

@@ -221,6 +221,8 @@ class Sidebar {
               active = child.type === 's3';
             } else if (location.href.indexOf('=adl') !== -1) {
               active = child.type === 'adls';
+            } else if (location.href.indexOf('=abfs') !== -1) {
+              active = child.type === 'abfs';
             } else {
               active = child.type === 'hdfs';
             }

+ 9 - 0
desktop/core/src/desktop/models.py

@@ -1787,6 +1787,15 @@ class ClusterConfig():
         'tooltip': _('ADLS'),
         'page': '/filebrowser/view=' + urllib.quote('adl:/'.encode('utf-8'), safe=SAFE_CHARACTERS_URI_COMPONENTS)
       })
+      
+    if 'filebrowser' in self.apps and ANALYTIC_DB not in self.cluster_type and fsmanager.is_enabled_and_has_access('abfs', self.user):
+      interpreters.append({
+        'type': 'abfs',
+        'displayName': _('ABFS'),
+        'buttonName': _('Browse'),
+        'tooltip': _('ABFS'),
+        'page': '/filebrowser/view=' + urllib.quote('abfs://'.encode('utf-8'), safe=SAFE_CHARACTERS_URI_COMPONENTS)
+      })
 
     if 'metastore' in self.apps:
       interpreters.append({

+ 3 - 0
desktop/core/src/desktop/settings.py

@@ -36,6 +36,7 @@ from desktop.lib.paths import get_desktop_root
 from desktop.lib.python_util import force_dict_to_strings
 
 from aws.conf import is_enabled as is_s3_enabled
+from azure.conf import is_abfs_enabled
 
 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
 BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.dirname(__file__)), '..', '..', '..'))
@@ -569,6 +570,8 @@ file_upload_handlers = [
 if is_s3_enabled():
   file_upload_handlers.insert(0, 'aws.s3.upload.S3FileUploadHandler')
 
+if is_abfs_enabled():
+  file_upload_handlers.insert(0, 'azure.abfs.upload.ABFSFileUploadHandler')
 FILE_UPLOAD_HANDLERS = tuple(file_upload_handlers)
 
 ############################################################

+ 124 - 0
desktop/libs/azure/src/azure/abfs/__init__.py

@@ -13,3 +13,127 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+from __future__ import absolute_import
+
+import calendar
+import errno
+import re
+import logging
+import tempfile
+import posixpath
+import time
+
+from nose.tools import assert_not_equal
+from hadoop.fs import normpath as fs_normpath
+
+LOG = logging.getLogger(__name__)
+
+ABFS_PATH_RE = re.compile('^/*[aA][bB][fF][sS]{1,2}://([$a-z0-9](?!.*--)[-a-z0-9]{1,61}[a-z0-9])(/(.*?)/?)?$') # bug here
+ABFS_ROOT_S = 'abfss://'
+ABFS_ROOT = 'abfs://'
+
+def parse_uri(uri):
+  """
+  Returns filesystem_name, direct_name, base_direct_name
+  Raises ValueError if invalid ABFS URI is passed.
+  """
+  match = ABFS_PATH_RE.match(uri)
+  if not match:
+    raise ValueError("Invalid ABFS URI: %s" % uri)
+  direct_name = match.group(3) or ''
+  base_direct_name = match.group(2) or ''
+  return match.group(1), direct_name, base_direct_name
+
+def is_root(uri):
+  """
+  Checks if Uri is the Root Directory
+  """
+  return uri.lower() == ABFS_ROOT or uri.lower() == ABFS_ROOT_S
+
+
+def strip_scheme(path):
+  """
+  returns the path without abfss:// or abfs://
+  """
+  try:
+    filesystem, file_path = parse_uri(path)[:2]
+  except:
+    return path
+  assert_not_equal(filesystem, '', 'File System must be Specified')
+  path = filesystem + '/' + file_path
+  return path
+  
+def strip_path(path):
+  """
+  Return only the end of a path given another path
+  """
+  if is_root(path):
+    return path
+  split_path = path.split('/')
+  return split_path[len(split_path) - 1]
+
+def normpath(path):
+  """
+  Return the normlized path, but ignore leading prefix if it exists
+  """
+  if is_root(path):
+    return path
+  elif path.lower().startswith(ABFS_ROOT):
+    normalized = '%s%s' % (ABFS_ROOT, fs_normpath(path[len(ABFS_ROOT):]))
+  elif path.lower().startswith(ABFS_ROOT_S):
+    normalized = '%s%s' % (ABFS_ROOT_S, fs_normpath(path[len(ABFS_ROOT_S):]))
+  else:
+    normalized = fs_normpath(path)
+  return normalized
+
+def parent_path(path):
+  """
+  Returns the parent of the specified folder
+  """
+  if is_root(path):
+    return "abfs://"
+  filesystem, directory_name, other = parse_uri(path)
+  parent = None
+  if directory_name == "":
+    if path.lower() == ABFS_ROOT_S:
+      return ABFS_ROOT_S
+    return ABFS_ROOT
+  else:
+    parent = '/'.join(directory_name.split('/')[:-1])
+  if path.lower().startswith(ABFS_ROOT):
+    return normpath(ABFS_ROOT + filesystem + '/' + parent)
+  return normpath(ABFS_ROOT_S + filesystem + '/' + parent)
+
+def join(first,*complist):
+  """
+  Join a path on to another path
+  """
+  def _prep(uri):
+    try:
+      return '/%s/%s' % parse_uri(uri)[:2]
+    except ValueError:
+      return '/' if is_root(uri) else uri
+  listings = [first]
+  listings.extend(complist)
+  joined = posixpath.join(*list(map(_prep, listings)))
+  if joined and joined[0] == '/':
+    if first.startswith(ABFS_ROOT_S):
+      joined = 'abfss:/%s' % joined
+    else:
+      joined = 'abfs:/%s' % joined
+  return joined
+
+
+def abfsdatetime_to_timestamp(datetime):
+  """
+  Returns timestamp (seconds) by datetime string from ABFS API responses.
+  ABFS REST API returns one types of datetime strings:
+  * `Thu, 26 Feb 2015 20:42:07 GMT` for Object HEAD requests
+    (see http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectHEAD.html);
+  """
+  # There is chance (depends on platform) to get
+  # `'z' is a bad directive in format ...` error (see https://bugs.python.org/issue6641),
+  #LOG.debug("%s" %datetime)
+  stripped = time.strptime(datetime[:-4], '%a, %d %b %Y %H:%M:%S')
+  assert datetime[-4:] == ' GMT', 'Time [%s] is not in GMT.' % datetime
+  return int(calendar.timegm(stripped))

+ 509 - 80
desktop/libs/azure/src/azure/abfs/abfs.py

@@ -22,16 +22,22 @@ from future import standard_library
 standard_library.install_aliases()
 from builtins import object
 import logging
+import os
 import threading
-
+from math import ceil
+from posixpath import join
 from urllib.parse import urlparse
-from azure.conf import PERMISSION_ACTION_ABFS
-from hadoop.hdfs_site import get_umask_mode
 
+from hadoop.hdfs_site import get_umask_mode
 from hadoop.fs.exceptions import WebHdfsException
 
 from desktop.lib.rest import http_client, resource
 
+import azure.abfs.__init__ as Init_ABFS
+from azure.abfs.abfsfile import ABFSFile
+from azure.abfs.abfsstats import ABFSStat
+from azure.conf import PERMISSION_ACTION_ABFS
+
 
 LOG = logging.getLogger(__name__)
 
@@ -39,6 +45,12 @@ LOG = logging.getLogger(__name__)
 UPLOAD_CHUCK_SIZE = 30 * 1000 * 1000
 
 
+class ABFSFileSystemException(IOError):
+
+  def __init__(self, *args, **kwargs):
+    super(ABFSFileSystemException, self).__init__(*args, **kwargs)
+    
+
 class ABFS(object):
 
   def __init__(self, url,
@@ -92,110 +104,527 @@ class ABFS(object):
     return {
       "Authorization": self._auth_provider.get_token(),
     }
-
+  
+  # Parse info about filesystems, directories, and files
+  # --------------------------------
   def isdir(self, path):
-    raise NotImplementedError("")
+    """
+    Checks if the path is a directory (note diabled because filebrowser/views is bugged)
+    """
+    resp = self.stats(path)
+    #LOG.debug("checking directoty or not")
+    return resp.isDir
 
   def isfile(self, path):
-    raise NotImplementedError("")
-
-  def stats(self, path):
-    raise NotImplementedError("")
-
-  def listdir_stats(self, path, **kwargs):
-    raise NotImplementedError("")
-
-  def listdir(self, path, glob=None):
-    raise NotImplementedError("") # e.g. self._root.get('/', {'resource': 'account'}, self._getheaders())
-
-  def normpath(self, path):
-    raise NotImplementedError("")
-
-  def netnormpath(self, path):
-    raise NotImplementedError("")
-
-  def open(self, path, *args, **kwargs):
-    raise NotImplementedError("")
-
+    """
+    Checks if the path is a file
+    """
+    return not self.isdir(path) 
+  
   def exists(self, path):
-    raise NotImplementedError("")
-
-  def isroot(self, path):
-    raise NotImplementedError("")
-
-  def parent_path(self, path):
-    raise NotImplementedError("")
-
-  def join(self, first, *comp_list):
-    raise NotImplementedError("")
-
-  def mkdir(self, path, *args, **kwargs):
-    raise NotImplementedError("")
-
-  def read(self, path, *args, **kwargs):
-    raise NotImplementedError("")
-
-  def append(self, path, *args, **kwargs):
-    raise NotImplementedError("")
-
-  def rmtree(self, path, *args, **kwargs):
-    raise NotImplementedError("")
-
-  def remove(self, path, skip_trash=False):
-    raise NotImplementedError("")
-
-  def restore(self, path):
-    raise NotImplementedError("")
-
-  def create(self, path, *args, **kwargs):
-    raise NotImplementedError("")
+    """
+    Test if a path exists
+    """
+    try:
+      #LOG.debug("checking existence")
+      if ABFS.isroot(path):
+        return True
+      self.stats(path)
+    except WebHdfsException as e:
+      if e.code == 404:
+        return False
+      raise WebHdfsException
+    return True
+
+  def stats(self, path, params = None, **kwargs):
+    """
+    List the stat of the actual file/directory
+    Returns the ABFFStat object
+    """
+    if ABFS.isroot(path):
+      return ABFSStat.for_root()
+    file_system, dir_name = Init_ABFS.parse_uri(path)[:2]
+    if dir_name == '':
+      LOG.debug("Path being called is a Filesystem")
+      return ABFSStat.for_filesystem(self._statsf(file_system, params, **kwargs), path)
+    return ABFSStat.for_single(self._stats(file_system + '/' +dir_name, params, **kwargs), path)
+  
+  def listdir_stats(self,path, params = None, **kwargs):
+    """
+    List the stats for the directories inside the specified path
+    Returns the Multiple ABFFStat object #note change later for recursive cases
+    """
+    if ABFS.isroot(path):
+      LOG.warn("Path: %s is a Filesystem" %path)
+      return self.listfilesystems_stats(params = None, **kwargs)
+    dir_stats = []
+    file_system, directory_name = Init_ABFS.parse_uri(path)[:2]
+    if params is None:
+      params = {}
+    if 'recursive' not in params:
+      params['recursive'] = 'false'
+    params['resource'] = 'filesystem'
+    if directory_name != "":
+      params['directory'] = directory_name
+    res = self._root._invoke("GET",file_system, params, headers= self._getheaders(), **kwargs)
+    resp = self._root._format_response(res)
+    for x in resp['paths']:
+      dir_stats.append(ABFSStat.for_directory(res.headers, x, Init_ABFS.ABFS_ROOT +file_system + "/" + x['name']))
+    return dir_stats
+  
+  def listfilesystems_stats(self, params = None, **kwargs):
+    """
+    Lists the stats inside the File Systems, No functionality for params
+    """
+    stats = []
+    if params is None:
+      params = {}
+    params["resource"] = "account"
+    res = self._root._invoke("GET", params = params, headers = self._getheaders() )
+    resp = self._root._format_response(res)
+    for x in resp['filesystems']:
+      stats.append(ABFSStat.for_filesystems(res.headers, x))
+    return stats
+  
+  def _stats(self, schemeless_path, params = None, **kwargs):
+    """
+    Container function for both stats,
+    Returns the header of the result
+    """
+    if params is None:
+      params = {}
+    params['action'] = 'getStatus'
+    res = self._root._invoke('HEAD', schemeless_path, params, headers = self._getheaders(), **kwargs)
+    return res.headers
+  
+  def _statsf(self, schemeless_path, params = None, **kwargs):
+    """
+    Continer function for both stats but if it's a file system
+    Returns the header of the result
+    """
+    if params is None:
+      params = {}
+    params['resource'] = 'filesystem'
+    res = self._root._invoke('HEAD', schemeless_path, params, headers = self._getheaders(), **kwargs)
+    return res.headers
+    
+  def listdir(self, path, params = None, glob=None, **kwargs):
+    """
+    Lists the names inside the current directories 
+    """
+    if ABFS.isroot(path):
+      LOG.warn("Path being called is a Filesystem")
+      return self.listfilesystems(params, **kwargs)
+    listofDir = self.listdir_stats(path, params)
+    return [x.name for x in listofDir]
+  
+  
+  def listfilesystems(self, params=None,**kwargs):
+    """
+    Lists the names of the File Systems, limited arguements  
+    """
+    listofFileSystems = self.listfilesystems_stats(params)
+    return [x.name for x in listofFileSystems]
+  
+  # Find or alter information about the URI path
+  # --------------------------------
+  @staticmethod
+  def isroot(path):
+    """
+    Checks if the path is the root path
+    """
+    return Init_ABFS.is_root(path)  
+  
+  @staticmethod
+  def normpath(path):
+    """
+    Normalizes a path
+    """
+    resp = Init_ABFS.normpath(path)
+    return resp
+
+  @staticmethod
+  def netnormpath(path):
+    """
+    Normalizes a path
+    """
+    return Init_ABFS.normpath(path)
+
+  def open(self, path, option = 'r', *args, **kwargs):
+    return ABFSFile(self,path, option )
+  
+  @staticmethod
+  def parent_path(path):
+    """
+    Returns the Parent Path
+    """
+    return Init_ABFS.parent_path(path)
+
+  @staticmethod
+  def join(first, *comp_list):
+    """
+    Joins two paths together
+    """
+    return Init_ABFS.join(first,*comp_list)
+
+  # Create Files,directories, or File Systems
+  # --------------------------------
+  def mkdir(self, path, params = None, headers = None, *args, **kwargs):
+    """
+    Makes a directory
+    """
+    if params is None:
+      params = {}
+    params['resource'] = 'directory'
+    self._create_path(path, params = params, headers = params, overwrite = False)
+  
+  def create(self, path, overwrite= False, data = None, headers = None, *args, **kwargs):
+    """
+    Makes a File (Put text in data if adding data)
+    """
+    params = {'resource' : 'file'}
+    self._create_path(path, params = params, headers =headers, overwrite = overwrite)
+    if data:
+      self._writedata(path, data, len(data))
 
   def create_home_dir(self, home_path=None):
+    raise NotImplementedError("File System not named")
+  
+  def _create_path(self,path, params = None, headers = None, overwrite = False):
+    """
+    Container method for Create
+    """
+    file_system, dir_name = Init_ABFS.parse_uri(path)[:2]
+    if dir_name == '':
+      return self._create_fs(file_system)
+    no_scheme = file_system + '/' + dir_name
+    additional_header = self._getheaders()
+    if headers is not None:
+      additional_header.update(headers)
+    if not overwrite:
+      additional_header['If-None-Match'] = '*'
+    self._root.put(no_scheme,params, headers= additional_header)
+    
+  def _create_fs(self, file_system):
+    """
+    Creates a File System
+    """
+    self._root.put(file_system,{'resource': 'filesystem'}, headers= self._getheaders())
+
+  # Read Files
+  # --------------------------------
+  def read(self, path, offset = '0', length = 0, *args, **kwargs):
+    """
+    Read data from a file
+    """
+    path = Init_ABFS.strip_scheme(path)
+    headers = self._getheaders()
+    if length != 0 and length != '0':
+      headers['range']= 'bytes=%s-%s' %(str(offset), str(int(offset) + int(length)))
+    return self._root.get(path, headers = headers)
+  
+  # Alter Files
+  # --------------------------------
+  def append(self, path, data, size = 0, offset =0 ,params = None, **kwargs):
+    """
+    Appends the data to a file
+    """
+    path = Init_ABFS.strip_scheme(path)
+    if params is None:
+      LOG.warn("Params not specified, Append will take longer")
+      resp = self._stats(path)
+      params = {'position' : int(resp['Content-Length']) + offset, 'action' : 'append'}
+      LOG.debug("%s" %params)
+    else:
+      params['action'] = 'append'
+    headers = {}
+    if size == 0:
+      headers['Content-Length'] = str(len(data))
+    else:
+      headers['Content-Length'] = str(size)
+    LOG.debug("%s" %headers['Content-Length'])
+    return self._patching_sl( path, params, data, headers,  **kwargs)
+  
+  def flush(self, path, params = None, headers = None, **kwargs):
+    """
+    Flushes the data(i.e. writes appended data to File)
+    """
+    path = Init_ABFS.strip_scheme(path)
+    if params is None:
+      LOG.warn("Params not specified")
+      params = {'position' : 0}
+    if 'position' not in params:
+      LOG.warn("Position is not specified")
+      params['position'] = 0
+    params['action'] = 'flush'
+    if headers is None:
+      headers = {}
+    headers['Content-Length'] = '0'
+    self._patching_sl( path, params, header = headers,  **kwargs)
+
+  # Remove Filesystems, directories. or Files
+  # --------------------------------
+  def remove(self, path, skip_trash=True):
+    """
+    Removes an item indicated in the path
+    Also removes empty directories
+    """
+    self._delete(path, recursive = 'false', skip_trash = skip_trash)
+    
+  def rmtree(self, path, skip_trash = True):
+    """
+    Remove everything in a given directory
+    """
+    self._delete(path, recursive = 'true', skip_trash = skip_trash)
+    
+  def _delete(self, path, recursive = 'false', skip_trash=True):
+    """
+    Wrapper function for calling delete, no support for trash or 
+    """
+    if not skip_trash:
+      raise NotImplementedError("Trash not implemented for ABFS")
+    if ABFS.isroot(path):
+      raise RuntimeError("Cannot Remove Root")
+    file_system, dir_name = Init_ABFS.parse_uri(path)[:2]
+    if dir_name == '':
+      return self._root.delete(file_system,{'resource': 'filesystem'}, headers= self._getheaders())
+    new_path = file_system + '/' + dir_name
+    param = None
+    if self.isdir(path):
+      param = {'recursive' : recursive}
+    self._root.delete(new_path,param , headers= self._getheaders())
+    
+  def restore(self, path):
     raise NotImplementedError("")
-
-  def chown(self, path, *args, **kwargs):
-    raise NotImplementedError("")
-
-  def chmod(self, path, *args, **kwargs):
-    raise NotImplementedError("")
-
-  def copyFromLocal(self, local_src, remote_dst, *args, **kwargs):
-    raise NotImplementedError("")
+  
+  # Edit permissions of Filesystems, directories. or Files
+  # --------------------------------
+  def chown(self, path, user = None, group = None, *args, **kwargs):
+    """
+    Changes ownership (not implemented)
+    """
+    headers = {}
+    if user is not None:
+      headers['x-ms-owner'] = user
+    if group is not None:
+      headers['x-ms-group'] = group
+    self.setAccessControl(path, headers = headers, **kwargs)
+  
+  def chmod(self, path, permissionNumber = None, *args, **kwargs):
+    """
+    Set File Permissions (not implemented)
+    """
+    header = {}
+    if permissionNumber is not None:
+      header['x-ms-permissions'] = str(permissionNumber)
+    self.setAccessControl(path, headers = header)
+  
+  def setAccessControl(self, path, headers, **kwargs):
+    """
+    Set Access Controls (Can do both chmod and chown) (not implemented)
+    """
+    path = Init_ABFS.strip_scheme(path)
+    params= {'action': 'setAccessControl'}
+    if headers is None:
+      headers ={}
+    self._patching_sl( path, params, header = headers,  **kwargs)
 
   def mktemp(self, subdir='', prefix='tmp', basedir=None):
     raise NotImplementedError("")
 
   def purge_trash(self):
     raise NotImplementedError("")
-
+  
   # Handle file systems interactions
   # --------------------------------
   def copy(self, src, dst, *args, **kwargs):
-    raise NotImplementedError("")
-
+    """
+    General Copying
+    """
+    if self.isfile(src):
+      return self.copyfile(src ,dst)    
+    self.copy_remote_dir(src, dst)
+          
   def copyfile(self, src, dst, *args, **kwargs):
-    raise NotImplementedError("")
+    """
+    Copies a File to another location
+    """
+    new_path = dst + '/' + Init_ABFS.strip_path(src)
+    self.create(new_path)
+    chunk_size = self.get_upload_chuck_size()
+    file = self.read(src)
+    size = len(file)
+    self._writedata(new_path, file, size)
 
   def copy_remote_dir(self, src, dst, *args, **kwargs):
-    raise NotImplementedError("")
-
-  def rename(self, old, new):
-    raise NotImplementedError("")
+    """
+    Copies the entire contents of a directory to another location
+    """
+    dst = dst + '/' + Init_ABFS.strip_path(src)
+    LOG.debug("%s" %dst)
+    self.mkdir(dst)
+    other_files = self.listdir(src)
+    for x in other_files:
+      x = src + '/' + Init_ABFS.strip_path(x)
+      LOG.debug("%s" %x)
+      self.copy(x, dst)
+
+  def rename(self, old, new): 
+    """
+    Renames a file
+    """ 
+    LOG.debug("%s\n%s" %(old, new))
+    headers = {'x-ms-rename-source' : '/' + Init_ABFS.strip_scheme(old) }
+    try:
+      self._create_path(new, headers = headers, overwrite= True)
+    except WebHdfsException as e:
+      if e.code == 409:
+        self.copy(old, new)
+        self.rmtree(old)
+      else:
+        raise e
 
   def rename_star(self, old_dir, new_dir):
-    raise NotImplementedError("")
+    """
+    Renames a directory
+    """
+    self.rename(old_dir, new_dir)
 
   def upload(self, file, path, *args, **kwargs):
-    raise NotImplementedError("")
+    """
+    Upload is done by the client
+    """
+    pass
+  
+  def copyFromLocal(self, local_src, remote_dst, *args, **kwargs):
+    """
+    Copy a directory or file from Local (Testing)
+    """
+    local_src = local_src.endswith('/') and local_src[:-1] or local_src
+    remote_dst = remote_dst.endswith('/') and remote_dst[:-1] or remote_dst
+    
+    if os.path.isdir(local_src):
+      self._local_copy_dir(local_src,remote_dst)
+    else:
+      (basename, filename) = os.path.split(local_src)
+      self._local_copy_file(local_src, self.isdir(remote_dst) and self.join(remote_dst, filename) or remote_dst)
+
+  def _local_copy_dir(self,local_src,remote_dst):
+    """
+    A wraper function for copying local directories
+    """
+    self.mkdir(remote_dir)
+
+    for f in os.listdir(local_dir):
+      local_src = os.path.join(local_dir, f)
+      remote_dst = self.join(remote_dir, f)
+
+      if os.path.isdir(local_src):
+        self._copy_dir(local_src, remote_dst, mode)
+      else:
+        self._copy_file(local_src, remote_dst)
+    
+  def _local_copy_file(self,local_src,remote_dst, chunk_size = UPLOAD_CHUCK_SIZE ):
+    """
+    A wraper function for copying local Files
+    """
+    if os.path.isfile(local_src):
+      if self.exists(remote_dst):
+        LOG.info('%s already exists. Skipping.' %remote_dst)
+        return
+      else:
+        LOG.info('%s does not exist. Trying to copy.' % remote_dst)
+      
+      src = file(local_src)
+      try:
+        try:
+          self.create(remote_dst)
+          chunk = src.read(chunk_size)
+          offset = 0
+          while chunk:
+            size = len(chunk)
+            self.append(remote_dst, chunk, size = size, params = {'position' : offset})
+            offset += size
+            chunk = src.read(chunk_size)
+          self.flush(remote_dst, params = {'position' : offset})
+          LOG.info(_('Copied %s -> %s.') % (local_src, remote_dst))
+        except:
+          LOG.exception(_('Copying %s -> %s failed.') % (local_src, remote_dst))
+          raise
+      finally:
+        src.close()
+    else:
+      LOG.info(_('Skipping %s (not a file).') % local_src)
 
   def check_access(self, path, *args, **kwargs):
-    raise NotImplementedError("")
+    """
+    Check access of a file/directory (Work in Progress/Not Ready)
+    """
+    raise NotImplementedError("")
+    try:
+      status = self.stats(path)
+      if 'x-ms-permissions' not in status.keys():
+        raise b
+    except b:
+      LOG.debug("Permisions have not been set")
+    except:
+      Exception
 
   def mkswap(self, filename, subdir='', suffix='swp', basedir=None):
-    raise NotImplementedError("")
+    """
+    Makes a directory and returns a potential filename for that directory
+    """
+    base = self.join(basedir or self._temp_dir, subdir)
+    if not self.isdir(base):
+      self.mkdir(base)
+
+    candidate = self.join(base, "%s.%s" % (filename, suffix))
+    return candidate
   
   def setuser(self, user):
+    """
+    Changes the User
+    """
     self._user = user
-
-  def get_upload_chuck_size(self, path):
-    raise NotImplementedError("")
+  
+  def get_upload_chuck_size(self):
+    """
+    Gets the maximum size allowed to upload
+    """
+    return UPLOAD_CHUCK_SIZE
+  
+  def filebrowser_action(self):
+    return self._filebrowser_action
+  
+  #Other Methods to condense stuff
+  #----------------------------
+  # Write Files on creation
+  #----------------------------
+  def _writedata(self, path, data, size):
+    """
+    Adds text to a given file
+    """
+    chunk_size = self.get_upload_chuck_size()
+    cycles = ceil(float(size) / chunk_size)
+    for i in range(0,cycles):
+      chunk = size % chunk_size
+      if i != cycles or chunk == 0:
+        length = chunk_size
+      else:
+        length = chunk
+      self.append(path, data[i*chunk_size:i*chunk_size + length], length)
+    LOG.debug("%s" %data)
+    self.flush(path, {'position' : int(size) })
+  
+  # Use Patch HTTP request
+  #----------------------------
+  def _patching_sl(self, schemeless_path, param, data = None, header = None, **kwargs):
+    """
+    A wraper function for patch
+    """
+    if header is None:
+      header = {}
+    header.update(self._getheaders())
+    LOG.debug("%s" %kwargs)
+    return self._root.invoke('PATCH', schemeless_path, param, data, headers = header, **kwargs)
+      

+ 262 - 7
desktop/libs/azure/src/azure/abfs/abfs_test.py

@@ -17,13 +17,24 @@
 from __future__ import absolute_import
 
 import logging
+import json
+import os
 import unittest
+import tempfile
+import time
+
+from django.contrib.auth.models import User
+from nose.plugins.skip import SkipTest
+from nose.tools import assert_true, assert_false, assert_equal
+
+from desktop.lib.django_test_util import make_logged_in_client
+from desktop.lib.test_utils import grant_access, add_to_group, add_permission, remove_from_group
 
 from azure.abfs.abfs import ABFS
 from azure.active_directory import ActiveDirectory
-from azure.conf import ABFS_CLUSTERS, is_abfs_enabled
+from azure.conf import ABFS_CLUSTERS,AZURE_ACCOUNTS, is_abfs_enabled
 
-from nose.plugins.skip import SkipTest
+from azure.abfs.upload import DEFAULT_WRITE_SIZE
 
 LOG = logging.getLogger(__name__)
 
@@ -36,11 +47,255 @@ class ABFSTestBase(unittest.TestCase):
   def setUp(self):
     if not is_abfs_enabled():
       raise SkipTest
-    self.client = ABFS.from_config(ABFS_CLUSTERS['default'], ActiveDirectory.from_config(None, version='v2.0'))
+    self.client = ABFS.from_config(ABFS_CLUSTERS['default'], ActiveDirectory.from_config(AZURE_ACCOUNTS['default'], version='v2.0'))
+    self.c = make_logged_in_client(username='test', is_superuser=False)
+    grant_access('test', 'test', 'filebrowser')
+    add_to_group('test')
+    self.user = User.objects.get(username="test")
+      
+    self.test_fs = 'abfs://testfs' + (str(int(time.time()) ))
+    LOG.debug("%s" %self.test_fs)
+    self.client.mkdir(self.test_fs)
 
   def tearDown(self):
-    pass
-
+    self.client.rmtree(self.test_fs)
+    
   def test_list(self):
-    self.client.listdir('abfs://')
-    pass
+    filesystems = self.client.listdir('abfs://')
+    LOG.debug("%s" %filesystems)
+    assert_true(filesystems is not None, filesystems)
+    
+    pathing = self.client.listdir('abfs://' + filesystems[0],  {"recursive" : "true"} )
+    LOG.debug("%s" %pathing)
+    assert_true(pathing is not None, pathing)
+    
+    directory = self.client.listdir('abfs://' + filesystems[0] + '/' + pathing[0])
+    LOG.debug("%s" %directory)
+    assert_true(directory is not None, directory)
+    
+    directory = self.client.listdir(self.test_fs)
+    LOG.debug("%s" %directory)
+    assert_true(directory is not None, directory)
+    
+    pathing = self.client._statsf(filesystems[276])
+    LOG.debug("%s" %pathing)
+    assert_true(pathing is not None, pathing)
+    
+    pathing = self.client._statsf(filesystems[277])
+    LOG.debug("%s" %pathing)
+    assert_true(pathing is not None, pathing)
+    
+    
+  def test_existence(self):
+    test_fs = self.test_fs
+    test_dir = test_fs + '/test_existence'
+    test_file = test_dir + '/test.txt'
+    self.client.mkdir(test_dir)
+    self.client.create(test_file)
+    
+    #Testing root and filesystems
+    assert_true(self.client.exists('abfs://'))
+    assert_true(self.client.exists(test_fs))
+    
+    #testing created directories and files
+    assert_true(self.client.exists(test_dir))
+    assert_true(self.client.exists(test_file))
+    assert_false(self.client.exists(test_dir + 'a'))
+     
+  def test_stat_output(self):
+    """
+    Only tests if the stat outputs something
+    """
+    test_fs = self.test_fs
+    test_dir = test_fs + '/test_stats'
+    test_dir2 = test_dir + '/test2'
+    test_dir3 = test_dir2 + '/test3'
+    self.client.mkdir(test_dir)
+    self.client.mkdir(test_dir2)
+    self.client.mkdir(test_dir3)
+    
+    #testing filesystems
+    result = self.client.stats(test_fs)
+    LOG.debug("%s" %result)
+    assert_true(result is not None, result)
+    result = self.client.listdir_stats(test_fs)
+    LOG.debug("%s" %result)
+    
+    #testing directories
+    result = self.client.stats(test_dir)
+    LOG.debug("%s" %result)
+    result = self.client.listdir_stats(test_dir)
+    LOG.debug("%s" %result)
+    
+    result = self.client.stats(test_dir2)
+    LOG.debug("%s" %result)
+    result = self.client.listdir_stats(test_dir2)
+    LOG.debug("%s" %result)
+    
+    result = self.client.stats(test_dir3)
+    LOG.debug("%s" %result)
+    result = self.client.listdir_stats(test_dir3)
+    LOG.debug("%s" %result)
+    
+  def test_mkdir(self):
+    test_dir = self.test_fs + '/test_mkdir'
+    assert_false(self.client.exists(test_dir))
+    
+    self.client.mkdir(test_dir)
+    assert_true(self.client.exists(test_dir))
+    self.client.isdir(test_dir)
+    
+    
+  def test_append_and_flush(self):
+    test_fs = self.test_fs
+    test_file = test_fs + '/test.txt'
+    self.client.create(test_file)
+    
+    test_string = "This is a test."
+    test_len = len(test_string)
+    resp = self.client.append(test_file, test_string) #only works with strings
+    LOG.debug("%s" %self.client.stats(test_file))
+    try:
+      LOG.debug("%s" %resp)
+      resp = self.client.read(test_file, length = test_len)
+    except:
+      LOG.debug("Not written yet")
+    
+    self.client.flush(test_file, {"position" : test_len} )
+    resp = self.client.read(test_file)
+    assert_true(resp == test_string)
+    self.client.remove(test_file)
+  
+  def test_rename(self):
+    test_fs = self.test_fs
+    test_dir = test_fs + '/test'
+    test_dir2 = test_fs + '/test2'
+    test_file = test_fs + '/test.txt'
+    test_file2 = test_fs + '/test2.txt'
+    
+    self.client.mkdir(test_dir)
+    assert_true(self.client.exists(test_dir))
+    assert_false(self.client.exists(test_dir2))
+    
+    self.client.rename(test_dir, test_dir2)
+    assert_false(self.client.exists(test_dir))
+    assert_true(self.client.exists(test_dir2))
+    
+    self.client.create(test_file)
+    assert_true(self.client.exists(test_file))
+    assert_false(self.client.exists(test_file2))
+    
+    self.client.rename(test_file, test_file2)
+    assert_false(self.client.exists(test_file))
+    assert_true(self.client.exists(test_file2))
+    
+  def test_chmod(self):
+    test_dir = self.test_fs + '/test_chmod'
+    self.client.mkdir(test_dir)
+    test_dir_permission = test_dir +'/test'
+    test_file_permission = test_dir +'/test.txt'
+    
+    self.client.create(test_file_permission)
+    self.client.chmod(test_file_permission, '0777')
+    self.client.stats(test_file_permission)
+    
+    self.client.mkdir(test_dir_permission)
+    self.client.chmod(test_dir_permission, '0777')
+    self.client.stats(test_dir_permission)
+    
+  def test_chown(self):
+    test_dir = self.test_fs + '/test_chown'
+    self.client.mkdir(test_dir)
+    test_dir_permission = test_dir +'/test'
+    test_file_permission = test_dir +'/test.txt'
+    
+    self.client.create(test_file_permission)
+    self.client.chown(test_file_permission, 'temp')
+    self.client.stats(test_file_permission)
+    
+    self.client.mkdir(test_dir_permission)
+    self.client.chown(test_dir_permission, 'temp')
+    self.client.stats(test_dir_permission)
+    
+  def test_create_with_file_permissions(self):
+    test_dir = self.test_fs + '/test_chown'
+    test_file = test_dir + '/test.txt'
+    self.client.mkdir(test_dir)
+    self.client.create(test_file, headers = {'x-ms-permissions' : '0777'})
+    
+  def test_upload(self):
+    with tempfile.NamedTemporaryFile() as local_file:
+      # Make sure we can upload larger than the UPLOAD chunk size
+      file_size = DEFAULT_WRITE_SIZE * 2
+      local_file.write('0' * file_size)
+      local_file.flush()
+      self.client.mkdir(self.test_fs + '/test_upload')
+      dest_dir = self.test_fs + '/test_upload'
+      local_file = local_file.name
+      dest_path = '%s/%s' % (dest_dir, os.path.basename(local_file))
+      
+      add_permission(self.user.username, 'has_abfs', permname='abfs_access', appname='filebrowser')
+      # Just upload the current python file
+      try:
+        resp = self.c.post('/filebrowser/upload/file?dest=%s' % dest_dir, dict(dest=dest_dir, hdfs_file=file(local_file)))
+        response = json.loads(resp.content)
+      finally:
+        remove_from_group(self.user.username, 'has_abfs')
+      
+      assert_equal(0, response['status'], response)
+      stats = self.client.stats(dest_path)
+
+      actual = self.client.read(dest_path)
+      expected = file(local_file).read()
+      assert_equal(actual, expected, 'files do not match: %s != %s' % (len(actual), len(expected)))
+   
+   
+  def test_copy_file(self):
+    test_fs = self.test_fs
+    testdir1 = test_fs + '/testcpy1'
+    testdir2 = test_fs + '/testcpy2'
+    test_file = testdir1 + '/test.txt'
+    self.client.mkdir(testdir1)
+    self.client.mkdir(testdir2)
+    self.client.create(test_file)
+    
+    test_string = "This is a test."
+    test_len = len(test_string)
+    resp = self.client.append(test_file, test_string)
+    self.client.flush(test_file, {"position" : test_len} )
+    
+    self.client.copy(test_file, testdir2)
+    self.client.stats(testdir2 + '/test.txt')
+    resp = self.client.read(testdir2 + '/test.txt')
+    resp2 = self.client.read(test_file)
+    assert_equal(resp, resp2, "Files %s and %s are not equal" %(test_file, testdir2 + '/test.txt'))
+    
+  
+  def test_copy_dir(self):
+    test_fs = self.test_fs
+    testdir1 = test_fs + '/testcpy1'
+    testdir2 = test_fs + '/testcpy2'
+    test_dir3 = testdir1 + '/test'
+    test_dir4 = test_dir3 + '/test2'
+    self.client.mkdir(testdir1)
+    self.client.mkdir(testdir2)
+    self.client.mkdir(test_dir3)
+    self.client.mkdir(test_dir4)
+    
+    
+    self.client.copy(test_dir3, testdir2)
+    self.client.stats(testdir2 + '/test')
+    self.client.stats(testdir2 + '/test/test2')
+    
+  @staticmethod
+  def test_static_methods():
+    test_dir = 'abfss://testfs/test_static/'
+    LOG.debug("%s" %test_dir)
+    norm_path = ABFS.normpath(test_dir)
+    LOG.debug("%s" %norm_path)
+    parent = ABFS.parent_path(test_dir)
+    LOG.debug("%s" %parent)
+    join_path = ABFS.join(test_dir, 'test1')
+    LOG.debug("%s" %join_path)
+
+    

+ 93 - 0
desktop/libs/azure/src/azure/abfs/abfsfile.py

@@ -0,0 +1,93 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import os
+import logging
+
+from hadoop.fs.hadoopfs import require_open
+from azure.abfs.__init__ import normpath
+
+LOG = logging.getLogger(__name__)
+
+SEEK_SET, SEEK_CUR, SEEK_END = os.SEEK_SET, os.SEEK_CUR, os.SEEK_END
+
+class ABFSFile(object):
+  """ Represents an open file on ABFS. """
+
+  def __init__(self, fs, path, mode="r"):
+    self.fs = fs
+    self.path = normpath(path)
+    self.pos = 0
+    self.closed = False
+
+    if mode != "r":
+      raise Exception("buffering and write support not yet implemented") # NYI
+
+    stat = self._stat()
+
+    if stat is None:
+      raise IOError(errno.ENOENT, "No such file or directory: '%s'" % path)
+    if stat.isDir:
+      raise IOError(errno.EISDIR, "Is a directory: '%s'" % path)
+    #TODO(todd) somehow we need to check permissions here - maybe we need an access() call?
+
+  # Minimal context manager implementation.
+  # See: http://www.python.org/doc/2.5.2/lib/typecontextmanager.html
+  def __enter__(self):
+    return self
+
+  def __exit__(self, exc_type, exc_val, exc_tb):
+    self.close()
+    return False # don't supress exceptions.
+
+  @require_open
+  def seek(self, offset, whence=0):
+    """ Set the file pointer to the given spot. @see file.seek """
+    if whence == SEEK_SET:
+      self.pos = offset
+    elif whence == SEEK_CUR:
+      self.pos += offset
+    elif whence == SEEK_END:
+      self.pos = self._stat().length + offset
+    else:
+      raise IOError(errno.EINVAL, "Invalid argument to seek for whence")
+
+  @require_open
+  def tell(self):
+    return self.pos
+
+  @require_open
+  def read(self, length=0):
+    """
+    Read the given number of bytes from this file.
+    If EOF has been reached, returns the empty string.
+
+    @param length the number of bytes wanted
+    """
+    resp = ""
+    try:
+      resp = self.fs.read(self.path, offset = self.pos, length = str(length))
+      self.pos += length
+    except:
+      resp =''
+    return resp
+    
+  def close(self):
+    self.closed = True
+
+  def _stat(self):
+    if not hasattr(self, "_stat_cache"):
+      self._stat_cache = self.fs.stats(self.path)
+    return self._stat_cache

+ 108 - 0
desktop/libs/azure/src/azure/abfs/abfsstats.py

@@ -0,0 +1,108 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from __future__ import absolute_import
+
+import stat
+
+from azure.abfs.__init__ import strip_path, abfsdatetime_to_timestamp
+from django.utils.encoding import smart_str
+
+class ABFSStat(object):
+  DIR_MODE = 0o777 | stat.S_IFDIR
+  FILE_MODE = 0o666 | stat.S_IFREG
+
+  def __init__(self, isDir, atime, mtime, size, path):
+    self.name = strip_path(path)
+    self.path = path
+    self.isDir = isDir
+    self.type = 'DIRECTORY' if isDir else 'FILE'
+    try:
+      self.atime = abfsdatetime_to_timestamp(atime) if atime else None
+      self.mtime = abfsdatetime_to_timestamp(mtime) if mtime else None
+    except:
+      self.atime = 0
+      self.mtime = 0
+    self.size = size
+    
+  def __getitem__(self, key):
+    try:
+      return getattr(self, key)
+    except AttributeError:
+      raise KeyError(key)
+  
+  def __setitem__(self, key, value):
+    # What about derivable values?
+    setattr(self, key, value)
+    
+  def __repr__(self):
+    return smart_str("<abfsStat %s>" % (self.path,))
+    
+  @property
+  def mode(self):
+    return ABFSStat.DIR_MODE if self.isDir else ABFSStat.FILE_MODE
+  
+  @property
+  def user(self):
+    return ''
+
+  @property
+  def group(self):
+    return ''
+  
+  @property
+  def aclBit(self):
+    return False
+  
+  @classmethod
+  def for_root(cls):
+    return cls(True, 0, 0, 0, 'abfs://')
+  
+  @classmethod
+  def for_filesystems(cls,headers,resp):
+    return cls(True, headers['date'], resp['lastModified'], 0, 'abfs://' + resp['name'])
+
+  @classmethod
+  def for_directory(cls,headers,resp, path):
+    try:
+      size = int(resp['contentLength'])
+    except:
+      size = 0
+    try:
+      isDir = resp['isDirectory'] == 'true'
+    except:
+      isDir = False
+    return cls(isDir, headers['date'], resp['lastModified'], size, path)
+  
+  @classmethod
+  def for_single(cls,resp, path):
+    size = int(resp['Content-Length'])
+    isDir = resp['x-ms-resource-type'] == 'directory'
+    return cls(isDir, resp['date'],resp['Last-Modified'], size, path)
+  
+  @classmethod
+  def for_filesystem(cls, resp, path):
+    return cls(True, resp['date'], resp['Last-Modified'], 0, path)
+    
+  def to_json_dict(self):
+    """
+    Returns a dictionary for easy serialization
+    """
+    keys = ('path', 'size', 'atime', 'mtime', 'mode', 'user', 'group', 'aclBit')
+    res = {}
+    for k in keys:
+      res[k] = self[k]
+    return res
+    

+ 131 - 0
desktop/libs/azure/src/azure/abfs/upload.py

@@ -0,0 +1,131 @@
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from future import standard_library
+standard_library.install_aliases()
+import logging
+import sys
+
+if sys.version_info[0] > 2:
+  from io import StringIO as string_io
+else:
+  from cStringIO import StringIO as string_io
+
+from django.core.files.uploadedfile import SimpleUploadedFile
+from django.core.files.uploadhandler import FileUploadHandler, SkipFile, StopFutureHandlers, StopUpload, UploadFileException
+from django.utils.translation import ugettext as _
+
+from azure.client import get_client_abfs
+from azure.abfs.__init__ import parse_uri
+from azure.abfs.abfs import ABFSFileSystemException
+
+DEFAULT_WRITE_SIZE = 30 * 1000 * 1000 # TODO: set in configuration
+
+LOG = logging.getLogger(__name__)
+
+
+class ABFSFileUploadError(UploadFileException):
+  pass
+
+
+class ABFSFileUploadHandler(FileUploadHandler):
+  """
+  This handler is triggered by any upload field whose destination path starts with "ABFS" (case insensitive).
+
+  Streams data chunks directly to ABFS
+  """
+  def __init__(self, request):
+    super(ABFSFileUploadHandler, self).__init__(request)
+    self.chunk_size = DEFAULT_WRITE_SIZE
+    self.destination = request.GET.get('dest', None)  # GET param avoids infinite looping
+    self.target_path = None
+    self.file = None
+    self._request = request
+    self._part_size = DEFAULT_WRITE_SIZE
+    
+    if self._is_abfs_upload():
+      self._fs = self._get_abfs(request)
+      self.filesystem, self.directory = parse_uri(self.destination)[:2]
+       # Verify that the path exists
+      self._fs.stats(self.destination)
+      
+    LOG.debug("Chunk size = %d" %DEFAULT_WRITE_SIZE)
+
+
+  def new_file(self, field_name, file_name, *args, **kwargs):
+    if self._is_abfs_upload():
+      super(ABFSFileUploadHandler, self).new_file(field_name, file_name, *args, **kwargs)
+
+      LOG.info('Using ABFSFileUploadHandler to handle file upload wit temp file%s.' %file_name)
+      self.target_path = self._fs.join(self.destination, file_name)
+      
+      try:
+        # Check access permissions before attempting upload
+        #self._check_access() #implement later
+        LOG.debug("Initiating ABFS upload to target path: %s" % self.target_path)
+        self._fs.create(self.target_path)
+        self.file = SimpleUploadedFile(name=file_name, content='')
+        raise StopFutureHandlers()
+      except (ABFSFileUploadHandler, ABFSFileSystemException) as e:
+        LOG.error("Encountered error in ABFSUploadHandler check_access: %s" % e)
+        self.request.META['upload_failed'] = e
+        raise StopUpload()
+
+
+  def receive_data_chunk(self, raw_data, start):
+    if self._is_abfs_upload():
+      try:
+        LOG.debug("ABFSFileUploadHandler uploading file part with size: %s" %self._part_size)
+        self._fs.append(self.target_path, raw_data, params = {'position' : int(start)})
+        return None
+      except Exception as e:
+        self._fs.remove(self.target_path)
+        LOG.exception('Failed to upload file to S3 at %s: %s' % (self.target_path, e))
+        raise StopUpload()
+    else:
+      return raw_data
+
+  def file_complete(self, file_size):
+    if self._is_abfs_upload():
+      #finish the upload
+      self._fs.flush(self.target_path, {'position' : int(file_size)})
+      LOG.info("ABFSFileUploadHandler has completed file upload to ABFS, total file size is: %d." % file_size)
+      self.file.size = file_size
+      LOG.debug("%s" %self._fs.stats(self.target_path))
+      return self.file
+    else:
+      return None
+
+  def _get_abfs(self, request):
+    fs = get_client_abfs()
+    
+    if not fs:
+      raise ABFSFileUploadError(_("No ABFS filesystem found"))
+    
+    return fs
+  
+  def _is_abfs_upload(self):
+    return self._get_scheme() and self._get_scheme().startswith('ABFS')
+  
+  def _get_scheme(self):
+    if self.destination:
+      dst_parts = self.destination.split('://')
+      if dst_parts > 0:
+        return dst_parts[0].upper()
+      else:
+        raise ABFSFileSystemException('Destination does not start with a valid scheme.')
+    else:
+      return None
+  

+ 2 - 2
desktop/libs/azure/src/azure/client.py

@@ -65,8 +65,8 @@ def _make_adls_client(identifier):
 
 def _make_abfs_client(identifier):
   client_conf = conf.ABFS_CLUSTERS[identifier]
-  azure_client = CLIENT_CACHE["azure"][identifier]
-  return ABFS.from_config(client_conf, azure_client)
+  azure_client_conf = conf.AZURE_ACCOUNTS[identifier]
+  return ABFS.from_config(client_conf, ActiveDirectory.from_config(azure_client_conf, version='v2.0'))#temporary fix
 
 def _make_azure_client(identifier):
   client_conf = conf.AZURE_ACCOUNTS[identifier]

+ 24 - 0
desktop/libs/hadoop/src/hadoop/fs/test_webhdfs.py

@@ -575,3 +575,27 @@ class WebhdfsTests(unittest.TestCase):
     # Set user to non-authorized, non-superuser user
     self.cluster.fs.setuser('nonadmin')
     assert_raises(WebHdfsException, self.cluster.fs.check_access, path='/user/test', aclspec='rw-')
+    
+  def test_list(self):
+    test_file = self.prefix + "/fortest.txt"
+    test_dir = self.prefix + "/temp2"
+    f = self.cluster.fs.open(test_file, "w")
+    f.write("ok")
+    f.close()
+    
+    resp = self.cluster.fs.listdir(self.prefix)
+    LOG.debug("%s" %resp)
+    
+    test_dir = self.prefix + "/temp2"
+    self.cluster.fs.mkdir(test_dir, 0333)
+    test_file2 = test_dir + "/fortest.txt"
+    f = self.cluster.fs.open(test_file2, "w")
+    f.write("ok")
+    f.close()
+    
+    resp = self.cluster.fs.listdir(self.prefix)
+    LOG.debug("%s" %resp)
+    resp = self.cluster.fs.listdir_stats(self.prefix)
+    LOG.debug("%s" %resp)
+    self.cluster.fs.remove(test_file)
+    self.cluster.fs.remove(test_file2)