Selaa lähdekoodia

[abfs] Enhance _writedata method to write data in chunks and improve flushing logic (#4189)

* [abfs] Enhance _writedata method to write data in chunks and improve flushing logic

- Updated the _writedata method to split input data into chunks based on the maximum allowed upload size.
- Improved handling of the last chunk to ensure only the remaining data is written.
- Added detailed docstring to clarify method functionality and parameters.
- Ensured data is only appended if there is content to write
Harsh Gupta 4 kuukautta sitten
vanhempi
commit
409416fc17
2 muutettua tiedostoa jossa 368 lisäystä ja 139 poistoa
  1. 45 29
      desktop/libs/azure/src/azure/abfs/abfs.py
  2. 323 110
      desktop/libs/azure/src/azure/abfs/abfs_test.py

+ 45 - 29
desktop/libs/azure/src/azure/abfs/abfs.py

@@ -19,14 +19,10 @@
 Interfaces for ABFS
 """
 
-import os
 import logging
+import os
 import threading
-import urllib.error
-import urllib.request
 from builtins import object
-from math import ceil
-from posixpath import join
 from urllib.parse import quote as urllib_quote, urlparse as lib_urlparse
 
 from django.http.multipartparser import MultiPartParser
@@ -34,7 +30,7 @@ from django.http.multipartparser import MultiPartParser
 import azure.abfs.__init__ as Init_ABFS
 from azure.abfs.abfsfile import ABFSFile
 from azure.abfs.abfsstats import ABFSStat
-from azure.conf import PERMISSION_ACTION_ABFS, is_raz_abfs
+from azure.conf import is_raz_abfs, PERMISSION_ACTION_ABFS
 from desktop.conf import RAZ
 from desktop.lib.rest import http_client, resource
 from desktop.lib.rest.raz_http_client import RazHttpClient
@@ -531,7 +527,7 @@ class ABFS(object):
     """
     header = {}
     if permissionNumber is not None:
-      if isinstance(permissionNumber, basestring):
+      if isinstance(permissionNumber, str):
         header['x-ms-permissions'] = str(permissionNumber)
       else:
         header['x-ms-permissions'] = oct(permissionNumber)
@@ -571,7 +567,6 @@ class ABFS(object):
     """
     new_path = dst + '/' + Init_ABFS.strip_path(src)
     self.create(new_path)
-    chunk_size = self.get_upload_chuck_size()
     file = self.read(src)
     size = len(file)
     self._writedata(new_path, file, size)
@@ -641,16 +636,16 @@ class ABFS(object):
     """
     A wraper function for copying local directories
     """
-    self.mkdir(remote_dir)
+    self.mkdir(remote_dst)
 
-    for f in os.listdir(local_dir):
-      local_src = os.path.join(local_dir, f)
-      remote_dst = self.join(remote_dir, f)
+    for f in os.listdir(local_src):
+      local_src = os.path.join(local_src, f)
+      remote_dst = self.join(remote_dst, f)
 
       if os.path.isdir(local_src):
-        self._copy_dir(local_src, remote_dst, mode)
+        self._local_copy_dir(local_src, remote_dst)
       else:
-        self._copy_file(local_src, remote_dst)
+        self._local_copy_file(local_src, remote_dst)
 
   def _local_copy_file(self, local_src, remote_dst, chunk_size=UPLOAD_CHUCK_SIZE):
     """
@@ -658,10 +653,10 @@ class ABFS(object):
     """
     if os.path.isfile(local_src):
       if self.exists(remote_dst):
-        LOG.info('%s already exists. Skipping.' % remote_dst)
+        LOG.info(f'{remote_dst} already exists. Skipping.')
         return
 
-      src = file(local_src)
+      src = open(local_src, 'rb')
       try:
         try:
           self.create(remote_dst)
@@ -674,12 +669,12 @@ class ABFS(object):
             chunk = src.read(chunk_size)
           self.flush(remote_dst, params={'position': offset})
         except Exception:
-          LOG.exception(_('Copying %s -> %s failed.') % (local_src, remote_dst))
+          LOG.exception(f'Copying {local_src} -> {remote_dst} failed.')
           raise
       finally:
         src.close()
     else:
-      LOG.info(_('Skipping %s (not a file).') % local_src)
+      LOG.info(f'Skipping {local_src} (not a file).')
 
   def check_access(self, path, *args, **kwargs):
     """
@@ -715,22 +710,43 @@ class ABFS(object):
 
   # Other Methods to condense stuff
   # ----------------------------
-  # Write Files on creation
-  # ----------------------------
   def _writedata(self, path, data, size):
     """
-    Adds text to a given file
+    Write data to a file in chunks.
+
+    This method splits the input data into chunks of the maximum allowed upload size and appends each chunk to the specified path.
+    After all chunks are written, it flushes the file to ensure all data is committed.
+
+    Args:
+      path (str): The destination file path in ABFS.
+      data (bytes or bytearray): The data to be written.
+      size (int): The total size of the data to be written.
+
+    Returns:
+      None
     """
     chunk_size = self.get_upload_chuck_size()
-    cycles = ceil(float(size) / chunk_size)
-    for i in range(0, cycles):
-      chunk = size % chunk_size
-      if i != cycles or chunk == 0:
-        length = chunk_size
+    # Calculate number of chunks needed using integer ceiling division
+    cycles = (size + chunk_size - 1) // chunk_size
+
+    for i in range(cycles):
+      start = i * chunk_size
+      if i == cycles - 1:  # Last chunk
+        # For the last chunk, only write the remaining data
+        length = size - start
       else:
-        length = chunk
-      self._append(path, data[i * chunk_size : i * chunk_size + length], length)
-    self.flush(path, {'position': int(size)})
+        # For all other chunks, write full chunk size
+        length = chunk_size
+
+      end = start + length
+      chunk_data = data[start:end]
+
+      # Only append if we have data to write
+      if chunk_data:
+        self._append(path, chunk_data, size=length, params={"position": start})
+
+    # Flush at the end with the total size
+    self.flush(path, {"position": int(size)})
 
   # Use Patch HTTP request
   # ----------------------------

+ 323 - 110
desktop/libs/azure/src/azure/abfs/abfs_test.py

@@ -15,11 +15,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import os
 import json
-import time
 import logging
+import os
 import tempfile
+import time
+from unittest.mock import Mock
 
 import pytest
 from django.contrib.auth.models import User
@@ -38,109 +39,323 @@ from filebrowser.conf import REMOTE_STORAGE_HOME
 LOG = logging.getLogger()
 
 
-@pytest.mark.django_db
-def test_get_abfs_home_directory():
-  client = make_logged_in_client(username="test", groupname="test", recreate=True, is_superuser=False)
-  user = User.objects.get(username="test")
-
-  client_not_me = make_logged_in_client(username="test_not_me", groupname="test_not_me", recreate=True, is_superuser=False)
-  user_not_me = User.objects.get(username="test_not_me")
-
-  # When REMOTE_STORAGE_HOME ends with /user in RAZ ABFS environment.
-  resets = [RAZ.IS_ENABLED.set_for_testing(True), REMOTE_STORAGE_HOME.set_for_testing('abfs://gethue-container/user')]
-
-  try:
-    default_abfs_home_path = get_abfs_home_directory(user)
-    assert default_abfs_home_path == 'abfs://gethue-container/user/test'
-
-    default_abfs_home_path = get_abfs_home_directory(user_not_me)
-    assert default_abfs_home_path == 'abfs://gethue-container/user/test_not_me'
-  finally:
-    for reset in resets:
-      reset()
-
-  # When ABFS filesystem's DEFAULT_HOME_PATH ends with /user in RAZ ABFS environment.
-  resets = [
-    RAZ.IS_ENABLED.set_for_testing(True),
-    ABFS_CLUSTERS.set_for_testing({'default': {'default_home_path': 'abfs://gethue-other-container/user'}}),
-  ]
-
-  try:
-    default_abfs_home_path = get_abfs_home_directory(user)
-    assert default_abfs_home_path == 'abfs://gethue-other-container/user/test'
-
-    default_abfs_home_path = get_abfs_home_directory(user_not_me)
-    assert default_abfs_home_path == 'abfs://gethue-other-container/user/test_not_me'
-  finally:
-    for reset in resets:
-      reset()
-
-  # When ABFS filesystem's DEFAULT_HOME_PATH is set in non-RAZ ABFS environment.
-  resets = [
-    RAZ.IS_ENABLED.set_for_testing(False),
-    ABFS_CLUSTERS.set_for_testing({'default': {'default_home_path': 'abfs://gethue-other-container/test-dir'}}),
-  ]
-
-  try:
-    default_abfs_home_path = get_abfs_home_directory(user)
-    assert default_abfs_home_path == 'abfs://gethue-other-container/test-dir'
-
-    default_abfs_home_path = get_abfs_home_directory(user_not_me)
-    assert default_abfs_home_path == 'abfs://gethue-other-container/test-dir'
-  finally:
-    for reset in resets:
-      reset()
-
-  # When both REMOTE_STORAGE_HOME and ABFS filesystem's DEFAULT_HOME_PATH are set in RAZ ABFS environment.
-  resets = [
-    RAZ.IS_ENABLED.set_for_testing(True),
-    REMOTE_STORAGE_HOME.set_for_testing('abfs://gethue-container/user'),
-    ABFS_CLUSTERS.set_for_testing({'default': {'default_home_path': 'abfs://gethue-other-container/user'}}),
-  ]
-
-  try:
-    # Gives preference to REMOTE_STORAGE_HOME for of backward compatibility.
-    default_abfs_home_path = get_abfs_home_directory(user)
-    assert default_abfs_home_path == 'abfs://gethue-container/user/test'
-
-    default_abfs_home_path = get_abfs_home_directory(user_not_me)
-    assert default_abfs_home_path == 'abfs://gethue-container/user/test_not_me'
-  finally:
-    for reset in resets:
-      reset()
-
-  # When ABFS filesystem's DEFAULT_HOME_PATH is set but path does not end with ../user or ../user/ in RAZ ABFS environment.
-  resets = [
-    RAZ.IS_ENABLED.set_for_testing(True),
-    ABFS_CLUSTERS.set_for_testing({'default': {'default_home_path': 'abfs://gethue-other-container/dir'}}),
-  ]
-
-  try:
-    default_abfs_home_path = get_abfs_home_directory(user)
-    assert default_abfs_home_path == 'abfs://gethue-other-container/dir'
-
-    default_abfs_home_path = get_abfs_home_directory(user_not_me)
-    assert default_abfs_home_path == 'abfs://gethue-other-container/dir'
-  finally:
-    for reset in resets:
-      reset()
-
-  # When some different path is set in both RAZ and non-RAZ ABFS environment.
-  resets = [
-    RAZ.IS_ENABLED.set_for_testing(True),
-    REMOTE_STORAGE_HOME.set_for_testing('s3a://gethue-bucket/user'),
-    ABFS_CLUSTERS.set_for_testing({'default': {'default_home_path': 's3a://gethue-other-bucket/dir'}}),
-  ]
-
-  try:
-    default_abfs_home_path = get_abfs_home_directory(user)
-    assert default_abfs_home_path == 'abfs://'
-
-    default_abfs_home_path = get_abfs_home_directory(user_not_me)
-    assert default_abfs_home_path == 'abfs://'
-  finally:
-    for reset in resets:
-      reset()
+class TestABFS:
+  @pytest.mark.django_db
+  def test_get_abfs_home_directory(self):
+    make_logged_in_client(username="test", groupname="test", recreate=True, is_superuser=False)
+    user = User.objects.get(username="test")
+
+    make_logged_in_client(username="test_not_me", groupname="test_not_me", recreate=True, is_superuser=False)
+    user_not_me = User.objects.get(username="test_not_me")
+
+    # When REMOTE_STORAGE_HOME ends with /user in RAZ ABFS environment.
+    resets = [RAZ.IS_ENABLED.set_for_testing(True), REMOTE_STORAGE_HOME.set_for_testing("abfs://gethue-container/user")]
+
+    try:
+      default_abfs_home_path = get_abfs_home_directory(user)
+      assert default_abfs_home_path == "abfs://gethue-container/user/test"
+
+      default_abfs_home_path = get_abfs_home_directory(user_not_me)
+      assert default_abfs_home_path == "abfs://gethue-container/user/test_not_me"
+    finally:
+      for reset in resets:
+        reset()
+
+    # When ABFS filesystem's DEFAULT_HOME_PATH ends with /user in RAZ ABFS environment.
+    resets = [
+      RAZ.IS_ENABLED.set_for_testing(True),
+      ABFS_CLUSTERS.set_for_testing({"default": {"default_home_path": "abfs://gethue-other-container/user"}}),
+    ]
+
+    try:
+      default_abfs_home_path = get_abfs_home_directory(user)
+      assert default_abfs_home_path == "abfs://gethue-other-container/user/test"
+
+      default_abfs_home_path = get_abfs_home_directory(user_not_me)
+      assert default_abfs_home_path == "abfs://gethue-other-container/user/test_not_me"
+    finally:
+      for reset in resets:
+        reset()
+
+    # When ABFS filesystem's DEFAULT_HOME_PATH is set in non-RAZ ABFS environment.
+    resets = [
+      RAZ.IS_ENABLED.set_for_testing(False),
+      ABFS_CLUSTERS.set_for_testing({"default": {"default_home_path": "abfs://gethue-other-container/test-dir"}}),
+    ]
+
+    try:
+      default_abfs_home_path = get_abfs_home_directory(user)
+      assert default_abfs_home_path == "abfs://gethue-other-container/test-dir"
+
+      default_abfs_home_path = get_abfs_home_directory(user_not_me)
+      assert default_abfs_home_path == "abfs://gethue-other-container/test-dir"
+    finally:
+      for reset in resets:
+        reset()
+
+    # When both REMOTE_STORAGE_HOME and ABFS filesystem's DEFAULT_HOME_PATH are set in RAZ ABFS environment.
+    resets = [
+      RAZ.IS_ENABLED.set_for_testing(True),
+      REMOTE_STORAGE_HOME.set_for_testing("abfs://gethue-container/user"),
+      ABFS_CLUSTERS.set_for_testing({"default": {"default_home_path": "abfs://gethue-other-container/user"}}),
+    ]
+
+    try:
+      # Gives preference to REMOTE_STORAGE_HOME for of backward compatibility.
+      default_abfs_home_path = get_abfs_home_directory(user)
+      assert default_abfs_home_path == "abfs://gethue-container/user/test"
+
+      default_abfs_home_path = get_abfs_home_directory(user_not_me)
+      assert default_abfs_home_path == "abfs://gethue-container/user/test_not_me"
+    finally:
+      for reset in resets:
+        reset()
+
+    # When ABFS filesystem's DEFAULT_HOME_PATH is set but path does not end with ../user or ../user/ in RAZ ABFS environment.
+    resets = [
+      RAZ.IS_ENABLED.set_for_testing(True),
+      ABFS_CLUSTERS.set_for_testing({"default": {"default_home_path": "abfs://gethue-other-container/dir"}}),
+    ]
+
+    try:
+      default_abfs_home_path = get_abfs_home_directory(user)
+      assert default_abfs_home_path == "abfs://gethue-other-container/dir"
+
+      default_abfs_home_path = get_abfs_home_directory(user_not_me)
+      assert default_abfs_home_path == "abfs://gethue-other-container/dir"
+    finally:
+      for reset in resets:
+        reset()
+
+    # When some different path is set in both RAZ and non-RAZ ABFS environment.
+    resets = [
+      RAZ.IS_ENABLED.set_for_testing(True),
+      REMOTE_STORAGE_HOME.set_for_testing("s3a://gethue-bucket/user"),
+      ABFS_CLUSTERS.set_for_testing({"default": {"default_home_path": "s3a://gethue-other-bucket/dir"}}),
+    ]
+
+    try:
+      default_abfs_home_path = get_abfs_home_directory(user)
+      assert default_abfs_home_path == "abfs://"
+
+      default_abfs_home_path = get_abfs_home_directory(user_not_me)
+      assert default_abfs_home_path == "abfs://"
+    finally:
+      for reset in resets:
+        reset()
+
+  @pytest.mark.parametrize(
+    "data_size,chunk_size,expected_chunks",
+    [
+      # Small data - single chunk
+      (100, 1024, 1),
+      # Exact chunk size
+      (1024, 1024, 1),
+      # One byte over chunk size
+      (1025, 1024, 2),
+      # Multiple full chunks
+      (3072, 1024, 3),
+      # Multiple chunks with partial last chunk
+      (3500, 1024, 4),
+      # Large data
+      (1048576, 65536, 16),  # 1MB data, 64KB chunks
+      # Zero size
+      (0, 1024, 0),
+    ],
+  )
+  def test_writedata_various_sizes(self, data_size, chunk_size, expected_chunks):
+    """Test _writedata with various data sizes and chunk configurations."""
+
+    abfs = ABFS(url="abfs://hue_container@hue_storage.dfs.core.windows.net", fs_defaultfs="https://hue_storage.dfs.core.windows.net/")
+    abfs.get_upload_chuck_size = Mock(return_value=chunk_size)
+    abfs._append = Mock()
+    abfs.flush = Mock()
+
+    path = "abfs://hue_container/test/file.txt"
+    data = b"x" * data_size
+
+    abfs._writedata(path, data, data_size)
+
+    assert abfs._append.call_count == expected_chunks
+
+    # Verify each chunk call
+    for i in range(expected_chunks):
+      start = i * chunk_size
+      if i == expected_chunks - 1:  # Last chunk
+        length = data_size - start
+      else:
+        length = chunk_size
+
+      expected_chunk_data = data[start : start + length]
+      abfs._append.assert_any_call(path, expected_chunk_data, size=length, params={"position": start})
+
+    # Verify flush is called once with final position
+    abfs.flush.assert_called_once_with(path, {"position": data_size})
+
+  def test_writedata_empty_data(self):
+    abfs = ABFS(url="abfs://hue_container@hue_storage.dfs.core.windows.net", fs_defaultfs="https://hue_storage.dfs.core.windows.net/")
+    abfs.get_upload_chuck_size = Mock(return_value=1024)
+    abfs._append = Mock()
+    abfs.flush = Mock()
+
+    path = "abfs://hue_container/test/empty.txt"
+    data = b""
+
+    abfs._writedata(path, data, 0)
+
+    abfs._append.assert_not_called()  # No append calls for empty data
+    abfs.flush.assert_called_once_with(path, {"position": 0})
+
+  def test_writedata_single_byte(self):
+    abfs = ABFS(url="abfs://hue_container@hue_storage.dfs.core.windows.net", fs_defaultfs="https://hue_storage.dfs.core.windows.net/")
+    abfs.get_upload_chuck_size = Mock(return_value=1024)
+    abfs._append = Mock()
+    abfs.flush = Mock()
+
+    path = "abfs://hue_container/test/single.txt"
+    data = b"a"
+
+    abfs._writedata(path, data, 1)
+
+    abfs._append.assert_called_once_with(path, b"a", size=1, params={"position": 0})
+    abfs.flush.assert_called_once_with(path, {"position": 1})
+
+  def test_writedata_boundary_conditions(self):
+    """Test _writedata with data at chunk size boundaries."""
+
+    abfs = ABFS(url="abfs://hue_container@hue_storage.dfs.core.windows.net", fs_defaultfs="https://hue_storage.dfs.core.windows.net/")
+    chunk_size = 1000
+    abfs.get_upload_chuck_size = Mock(return_value=chunk_size)
+    abfs._append = Mock()
+    abfs.flush = Mock()
+
+    test_cases = [
+      (chunk_size - 1, 1),  # 999 bytes = 1 chunk
+      (chunk_size, 1),  # 1000 bytes = 1 chunk
+      (chunk_size + 1, 2),  # 1001 bytes = 2 chunks
+    ]
+
+    for data_size, expected_chunks in test_cases:
+      # Reset mocks for each test case
+      abfs._append.reset_mock()
+      abfs.flush.reset_mock()
+
+      path = f"abfs://hue_container/test/boundary_{data_size}.txt"
+      data = b"x" * data_size
+
+      abfs._writedata(path, data, data_size)
+
+      assert abfs._append.call_count == expected_chunks, (
+        f"Expected {expected_chunks} chunks for {data_size} bytes, got {abfs._append.call_count}"
+      )
+
+  def test_writedata_append_exception_handling(self):
+    """Test _writedata behavior when _append raises an exception."""
+
+    abfs = ABFS(url="abfs://hue_container@hue_storage.dfs.core.windows.net", fs_defaultfs="https://hue_storage.dfs.core.windows.net/")
+    abfs.get_upload_chuck_size = Mock(return_value=1024)
+    abfs._append = Mock(side_effect=Exception("Network error"))
+    abfs.flush = Mock()
+
+    path = "abfs://hue_container/test/error.txt"
+    data = b"x" * 2048  # 2 chunks
+
+    with pytest.raises(Exception) as exc_info:
+      abfs._writedata(path, data, 2048)
+
+    assert str(exc_info.value) == "Network error"
+    abfs._append.assert_called_once()  # Should fail on first append
+    abfs.flush.assert_not_called()  # Should not reach flush
+
+  def test_writedata_unicode_path(self):
+    """Test _writedata with Unicode characters in path."""
+
+    abfs = ABFS(url="abfs://hue_container@hue_storage.dfs.core.windows.net", fs_defaultfs="https://hue_storage.dfs.core.windows.net/")
+    abfs.get_upload_chuck_size = Mock(return_value=1024)
+    abfs._append = Mock()
+    abfs.flush = Mock()
+
+    path = "abfs://hue_container/test/文件名.txt"
+    data = b"Hello World"
+
+    abfs._writedata(path, data, 11)
+
+    abfs._append.assert_called_once_with(path, b"Hello World", size=11, params={"position": 0})
+    abfs.flush.assert_called_once_with(path, {"position": 11})
+
+  def test_writedata_order_of_operations(self):
+    """Test that _append calls happen in correct order and flush is called last."""
+
+    abfs = ABFS(url="abfs://hue_container@hue_storage.dfs.core.windows.net", fs_defaultfs="https://hue_storage.dfs.core.windows.net/")
+    abfs.get_upload_chuck_size = Mock(return_value=10)
+    append_calls = []
+    flush_calls = []
+
+    def track_append(*args, **kwargs):
+      append_calls.append(("append", args, kwargs))
+
+    def track_flush(*args, **kwargs):
+      flush_calls.append(("flush", args, kwargs))
+
+    abfs._append = Mock(side_effect=track_append)
+    abfs.flush = Mock(side_effect=track_flush)
+
+    path = "abfs://hue_container/test/order.txt"
+    data = b"0123456789abcdefghij"  # 20 bytes = 2 chunks of 10
+
+    abfs._writedata(path, data, 20)
+
+    all_calls = append_calls + flush_calls
+
+    # Verify order: append(chunk1), append(chunk2), flush
+    assert len(all_calls) == 3
+    assert all_calls[0][0] == "append"
+    assert all_calls[0][2]["params"]["position"] == 0
+    assert all_calls[1][0] == "append"
+    assert all_calls[1][2]["params"]["position"] == 10
+    assert all_calls[2][0] == "flush"
+
+  @pytest.mark.parametrize(
+    "data_type",
+    [
+      b"bytes data",
+      bytearray(b"bytearray data"),
+    ],
+  )
+  def test_writedata_different_data_types(self, data_type):
+    """Test _writedata with different data types (bytes and bytearray)."""
+
+    abfs = ABFS(url="abfs://hue_container@hue_storage.dfs.core.windows.net", fs_defaultfs="https://hue_storage.dfs.core.windows.net/")
+    abfs.get_upload_chuck_size = Mock(return_value=1024)
+    abfs._append = Mock()
+    abfs.flush = Mock()
+
+    path = "abfs://hue_container/test/types.txt"
+    size = len(data_type)
+
+    abfs._writedata(path, data_type, size)
+
+    abfs._append.assert_called_once()
+    # Check that the data was passed correctly (may be converted internally)
+    call_args = abfs._append.call_args
+    assert len(call_args[0][1]) == size  # Second argument is the data
+
+  def test_writedata_large_chunk_size(self):
+    """Test _writedata with very large chunk size (larger than data)."""
+
+    abfs = ABFS(url="abfs://hue_container@hue_storage.dfs.core.windows.net", fs_defaultfs="https://hue_storage.dfs.core.windows.net/")
+    abfs.get_upload_chuck_size = Mock(return_value=1048576)  # 1MB chunk
+    abfs._append = Mock()
+    abfs.flush = Mock()
+
+    path = "abfs://hue_container/test/small.txt"
+    data = b"Small data"
+
+    abfs._writedata(path, data, 10)
+
+    # Should still make one append call even though chunk size >> data size
+    abfs._append.assert_called_once_with(path, b"Small data", size=10, params={"position": 0})
+    abfs.flush.assert_called_once_with(path, {"position": 10})
 
 
 @pytest.mark.integration
@@ -277,7 +492,6 @@ class ABFSTestBase(TestCase):
     test_dir3 = test_fs + '/test 3'
     test_file = test_fs + '/test.txt'
     test_file2 = test_fs + '/test2.txt'
-    test_file3 = test_fs + '/test 3.txt'
 
     self.client.mkdir(test_dir)
     assert self.client.exists(test_dir)
@@ -342,7 +556,7 @@ class ABFSTestBase(TestCase):
     with tempfile.NamedTemporaryFile() as local_file:
       # Make sure we can upload larger than the UPLOAD chunk size
       file_size = DEFAULT_WRITE_SIZE * 2
-      local_file.write('0' * file_size)
+      local_file.write(b'0' * file_size)
       local_file.flush()
       self.client.mkdir(self.test_fs + '/test_upload')
       dest_dir = self.test_fs + '/test_upload'
@@ -352,16 +566,15 @@ class ABFSTestBase(TestCase):
       add_permission(self.user.username, 'has_abfs', permname='abfs_access', appname='filebrowser')
       # Just upload the current python file
       try:
-        resp = self.c.post('/filebrowser/upload/file?dest=%s' % dest_dir, dict(dest=dest_dir, hdfs_file=file(local_file)))
+        resp = self.c.post('/filebrowser/upload/file?dest=%s' % dest_dir, dict(dest=dest_dir, hdfs_file=open(local_file, 'rb')))
         response = json.loads(resp.content)
       finally:
         remove_from_group(self.user.username, 'has_abfs')
 
       assert 0 == response['status'], response
-      stats = self.client.stats(dest_path)
 
       actual = self.client.read(dest_path)
-      expected = file(local_file).read()
+      expected = open(local_file, 'rb').read()
       assert actual == expected, 'files do not match: %s != %s' % (len(actual), len(expected))
 
   def test_copy_file(self):