Sfoglia il codice sorgente

[importer] Integrate Pydantic schemas for file operations and enhance validation (#4183)

- Added Pydantic schemas for local file uploads, file metadata guessing, file previews, and SQL type mapping to improve data validation and structure.
- Updated the `local_file_upload`, `guess_file_metadata`, `preview_file`, `guess_file_header`, and `get_sql_type_mapping` functions to utilize these schemas, enhancing type safety and clarity.
- Refactored serializers to leverage Pydantic for validation, simplifying the validation logic and improving error handling.
- Included the `pydantic` dependency in requirements for schema validation support.
- Added comprehensive unit tests for all changes.
Harsh Gupta 6 mesi fa
parent
commit
f20fa80d4e

+ 1 - 0
desktop/core/base_requirements.txt

@@ -45,6 +45,7 @@ polars[calamine]==1.8.2  # Python >= 3.8
 prompt-toolkit==3.0.39
 prompt-toolkit==3.0.39
 protobuf==3.20.3
 protobuf==3.20.3
 pyarrow==17.0.0
 pyarrow==17.0.0
+pydantic==2.10.6
 pyformance==0.3.2
 pyformance==0.3.2
 python-dateutil==2.8.2
 python-dateutil==2.8.2
 python-daemon==2.2.4
 python-daemon==2.2.4

+ 1 - 0
desktop/core/generate_requirements.py

@@ -92,6 +92,7 @@ class RequirementsGenerator:
       "protobuf==3.20.3",
       "protobuf==3.20.3",
       "psutil==5.8.0",
       "psutil==5.8.0",
       "pyarrow==17.0.0",
       "pyarrow==17.0.0",
+      "pydantic==2.10.6",
       "pyformance==0.3.2",
       "pyformance==0.3.2",
       "PyJWT==2.4.0",
       "PyJWT==2.4.0",
       "python-daemon==2.2.4",
       "python-daemon==2.2.4",

+ 11 - 59
desktop/core/src/desktop/lib/importer/api.py

@@ -79,10 +79,10 @@ def local_file_upload(request: Request) -> Response:
   if not serializer.is_valid():
   if not serializer.is_valid():
     return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
     return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
 
 
-  uploaded_file = serializer.validated_data["file"]
+  upload_data = serializer.validated_data
 
 
-  LOG.info(f"User {request.user.username} is uploading a local file: {uploaded_file.name}")
-  result = operations.local_file_upload(uploaded_file, request.user.username)
+  LOG.info(f"User {request.user.username} is uploading a local file: {upload_data.filename}")
+  result = operations.local_file_upload(upload_data, request.user.username)
 
 
   return Response(result, status=status.HTTP_201_CREATED)
   return Response(result, status=status.HTTP_201_CREATED)
 
 
@@ -114,15 +114,10 @@ def guess_file_metadata(request: Request) -> Response:
   if not serializer.is_valid():
   if not serializer.is_valid():
     return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
     return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
 
 
-  validated_data = serializer.validated_data
-  file_path = validated_data["file_path"]
-  import_type = validated_data["import_type"]
+  metadata_params = serializer.validated_data
 
 
   try:
   try:
-    metadata = operations.guess_file_metadata(
-      file_path=file_path, import_type=import_type, fs=request.fs if import_type == "remote" else None
-    )
-
+    metadata = operations.guess_file_metadata(data=metadata_params, fs=request.fs if metadata_params.import_type == "remote" else None)
     return Response(metadata, status=status.HTTP_200_OK)
     return Response(metadata, status=status.HTTP_200_OK)
 
 
   except ValueError as e:
   except ValueError as e:
@@ -152,44 +147,10 @@ def preview_file(request: Request) -> Response:
   if not serializer.is_valid():
   if not serializer.is_valid():
     return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
     return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
 
 
-  # Get validated data
-  validated_data = serializer.validated_data
-  file_path = validated_data["file_path"]
-  file_type = validated_data["file_type"]
-  import_type = validated_data["import_type"]
-  sql_dialect = validated_data["sql_dialect"]
-  has_header = validated_data.get("has_header")
+  preview_params = serializer.validated_data
 
 
   try:
   try:
-    if file_type == "excel":
-      sheet_name = validated_data.get("sheet_name")
-
-      preview = operations.preview_file(
-        file_path=file_path,
-        file_type=file_type,
-        import_type=import_type,
-        sql_dialect=sql_dialect,
-        has_header=has_header,
-        sheet_name=sheet_name,
-        fs=request.fs if import_type == "remote" else None,
-      )
-    else:  # Delimited file types
-      field_separator = validated_data.get("field_separator")
-      quote_char = validated_data.get("quote_char")
-      record_separator = validated_data.get("record_separator")
-
-      preview = operations.preview_file(
-        file_path=file_path,
-        file_type=file_type,
-        import_type=import_type,
-        sql_dialect=sql_dialect,
-        has_header=has_header,
-        field_separator=field_separator,
-        quote_char=quote_char,
-        record_separator=record_separator,
-        fs=request.fs if import_type == "remote" else None,
-      )
-
+    preview = operations.preview_file(data=preview_params, fs=request.fs if preview_params.import_type == "remote" else None)
     return Response(preview, status=status.HTTP_200_OK)
     return Response(preview, status=status.HTTP_200_OK)
 
 
   except ValueError as e:
   except ValueError as e:
@@ -224,17 +185,10 @@ def guess_file_header(request: Request) -> Response:
   if not serializer.is_valid():
   if not serializer.is_valid():
     return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
     return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
 
 
-  validated_data = serializer.validated_data
+  header_params = serializer.validated_data
 
 
   try:
   try:
-    has_header = operations.guess_file_header(
-      file_path=validated_data["file_path"],
-      file_type=validated_data["file_type"],
-      import_type=validated_data["import_type"],
-      sheet_name=validated_data.get("sheet_name"),
-      fs=request.fs if validated_data["import_type"] == "remote" else None,
-    )
-
+    has_header = operations.guess_file_header(data=header_params, fs=request.fs if header_params.import_type == "remote" else None)
     return Response({"has_header": has_header}, status=status.HTTP_200_OK)
     return Response({"has_header": has_header}, status=status.HTTP_200_OK)
 
 
   except ValueError as e:
   except ValueError as e:
@@ -267,13 +221,11 @@ def get_sql_type_mapping(request: Request) -> Response:
   if not serializer.is_valid():
   if not serializer.is_valid():
     return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
     return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
 
 
-  validated_data = serializer.validated_data
-  sql_dialect = validated_data["sql_dialect"]
+  type_mapping_params = serializer.validated_data
 
 
   try:
   try:
-    type_mapping = operations.get_sql_type_mapping(sql_dialect)
+    type_mapping = operations.get_sql_type_mapping(type_mapping_params)
     return Response(type_mapping, status=status.HTTP_200_OK)
     return Response(type_mapping, status=status.HTTP_200_OK)
-
   except ValueError as e:
   except ValueError as e:
     return Response({"error": str(e)}, status=status.HTTP_400_BAD_REQUEST)
     return Response({"error": str(e)}, status=status.HTTP_400_BAD_REQUEST)
   except Exception as e:
   except Exception as e:

+ 192 - 176
desktop/core/src/desktop/lib/importer/api_tests.py

@@ -28,7 +28,14 @@ class TestLocalFileUploadAPI:
   @patch("desktop.lib.importer.api.LocalFileUploadSerializer")
   @patch("desktop.lib.importer.api.LocalFileUploadSerializer")
   @patch("desktop.lib.importer.api.operations.local_file_upload")
   @patch("desktop.lib.importer.api.operations.local_file_upload")
   def test_local_file_upload_success(self, mock_local_file_upload, mock_serializer_class):
   def test_local_file_upload_success(self, mock_local_file_upload, mock_serializer_class):
-    mock_serializer = MagicMock(is_valid=MagicMock(return_value=True), validated_data={"file": SimpleUploadedFile("test.csv", b"content")})
+    # Create a mock schema object that will be returned by the serializer
+    mock_file = SimpleUploadedFile("test.csv", b"content")
+    mock_schema = MagicMock()
+    mock_schema.file = mock_file
+    mock_schema.filename = "test.csv"
+    mock_schema.filesize = 7
+
+    mock_serializer = MagicMock(is_valid=MagicMock(return_value=True), validated_data=mock_schema)
     mock_serializer_class.return_value = mock_serializer
     mock_serializer_class.return_value = mock_serializer
 
 
     mock_local_file_upload.return_value = {"file_path": "/tmp/user_12345_test.csv"}
     mock_local_file_upload.return_value = {"file_path": "/tmp/user_12345_test.csv"}
@@ -40,7 +47,7 @@ class TestLocalFileUploadAPI:
 
 
     assert response.status_code == status.HTTP_201_CREATED
     assert response.status_code == status.HTTP_201_CREATED
     assert response.data == {"file_path": "/tmp/user_12345_test.csv"}
     assert response.data == {"file_path": "/tmp/user_12345_test.csv"}
-    mock_local_file_upload.assert_called_once_with(mock_serializer.validated_data["file"], "test_user")
+    mock_local_file_upload.assert_called_once_with(mock_schema, "test_user")
 
 
   @patch("desktop.lib.importer.api.LocalFileUploadSerializer")
   @patch("desktop.lib.importer.api.LocalFileUploadSerializer")
   def test_local_file_upload_invalid_data(self, mock_serializer_class):
   def test_local_file_upload_invalid_data(self, mock_serializer_class):
@@ -58,7 +65,14 @@ class TestLocalFileUploadAPI:
   @patch("desktop.lib.importer.api.LocalFileUploadSerializer")
   @patch("desktop.lib.importer.api.LocalFileUploadSerializer")
   @patch("desktop.lib.importer.api.operations.local_file_upload")
   @patch("desktop.lib.importer.api.operations.local_file_upload")
   def test_local_file_upload_operation_error(self, mock_local_file_upload, mock_serializer_class):
   def test_local_file_upload_operation_error(self, mock_local_file_upload, mock_serializer_class):
-    mock_serializer = MagicMock(is_valid=MagicMock(return_value=True), validated_data={"file": SimpleUploadedFile("test.csv", b"content")})
+    # Create a mock schema object that will be returned by the serializer
+    mock_file = SimpleUploadedFile("test.csv", b"content")
+    mock_schema = MagicMock()
+    mock_schema.file = mock_file
+    mock_schema.filename = "test.csv"
+    mock_schema.filesize = 7
+
+    mock_serializer = MagicMock(is_valid=MagicMock(return_value=True), validated_data=mock_schema)
     mock_serializer_class.return_value = mock_serializer
     mock_serializer_class.return_value = mock_serializer
 
 
     mock_local_file_upload.side_effect = IOError("Operation error")
     mock_local_file_upload.side_effect = IOError("Operation error")
@@ -76,9 +90,12 @@ class TestGuessFileMetadataAPI:
   @patch("desktop.lib.importer.api.GuessFileMetadataSerializer")
   @patch("desktop.lib.importer.api.GuessFileMetadataSerializer")
   @patch("desktop.lib.importer.api.operations.guess_file_metadata")
   @patch("desktop.lib.importer.api.operations.guess_file_metadata")
   def test_guess_csv_file_metadata_success(self, mock_guess_file_metadata, mock_serializer_class):
   def test_guess_csv_file_metadata_success(self, mock_guess_file_metadata, mock_serializer_class):
-    mock_serializer = MagicMock(
-      is_valid=MagicMock(return_value=True), validated_data={"file_path": "/path/to/test.csv", "import_type": "local"}
-    )
+    # Create a mock schema object that will be returned by the serializer
+    mock_schema = MagicMock()
+    mock_schema.file_path = "/path/to/test.csv"
+    mock_schema.import_type = "local"
+
+    mock_serializer = MagicMock(is_valid=MagicMock(return_value=True), validated_data=mock_schema)
     mock_serializer_class.return_value = mock_serializer
     mock_serializer_class.return_value = mock_serializer
 
 
     mock_guess_file_metadata.return_value = {"type": "csv", "field_separator": ",", "quote_char": '"', "record_separator": "\n"}
     mock_guess_file_metadata.return_value = {"type": "csv", "field_separator": ",", "quote_char": '"', "record_separator": "\n"}
@@ -92,14 +109,17 @@ class TestGuessFileMetadataAPI:
 
 
     assert response.status_code == status.HTTP_200_OK
     assert response.status_code == status.HTTP_200_OK
     assert response.data == {"type": "csv", "field_separator": ",", "quote_char": '"', "record_separator": "\n"}
     assert response.data == {"type": "csv", "field_separator": ",", "quote_char": '"', "record_separator": "\n"}
-    mock_guess_file_metadata.assert_called_once_with(file_path="/path/to/test.csv", import_type="local", fs=None)
+    mock_guess_file_metadata.assert_called_once_with(data=mock_schema, fs=None)
 
 
   @patch("desktop.lib.importer.api.GuessFileMetadataSerializer")
   @patch("desktop.lib.importer.api.GuessFileMetadataSerializer")
   @patch("desktop.lib.importer.api.operations.guess_file_metadata")
   @patch("desktop.lib.importer.api.operations.guess_file_metadata")
   def test_guess_excel_file_metadata_success(self, mock_guess_file_metadata, mock_serializer_class):
   def test_guess_excel_file_metadata_success(self, mock_guess_file_metadata, mock_serializer_class):
-    mock_serializer = MagicMock(
-      is_valid=MagicMock(return_value=True), validated_data={"file_path": "/path/to/test.xlsx", "import_type": "local"}
-    )
+    # Create a mock schema object that will be returned by the serializer
+    mock_schema = MagicMock()
+    mock_schema.file_path = "/path/to/test.xlsx"
+    mock_schema.import_type = "local"
+
+    mock_serializer = MagicMock(is_valid=MagicMock(return_value=True), validated_data=mock_schema)
     mock_serializer_class.return_value = mock_serializer
     mock_serializer_class.return_value = mock_serializer
 
 
     mock_guess_file_metadata.return_value = {"type": "excel", "sheet_names": ["Sheet1", "Sheet2"]}
     mock_guess_file_metadata.return_value = {"type": "excel", "sheet_names": ["Sheet1", "Sheet2"]}
@@ -113,14 +133,17 @@ class TestGuessFileMetadataAPI:
 
 
     assert response.status_code == status.HTTP_200_OK
     assert response.status_code == status.HTTP_200_OK
     assert response.data == {"type": "excel", "sheet_names": ["Sheet1", "Sheet2"]}
     assert response.data == {"type": "excel", "sheet_names": ["Sheet1", "Sheet2"]}
-    mock_guess_file_metadata.assert_called_once_with(file_path="/path/to/test.xlsx", import_type="local", fs=None)
+    mock_guess_file_metadata.assert_called_once_with(data=mock_schema, fs=None)
 
 
   @patch("desktop.lib.importer.api.GuessFileMetadataSerializer")
   @patch("desktop.lib.importer.api.GuessFileMetadataSerializer")
   @patch("desktop.lib.importer.api.operations.guess_file_metadata")
   @patch("desktop.lib.importer.api.operations.guess_file_metadata")
   def test_guess_file_metadata_remote_csv_file(self, mock_guess_file_metadata, mock_serializer_class):
   def test_guess_file_metadata_remote_csv_file(self, mock_guess_file_metadata, mock_serializer_class):
-    mock_serializer = MagicMock(
-      is_valid=MagicMock(return_value=True), validated_data={"file_path": "s3a://bucket/user/test_user/test.csv", "import_type": "remote"}
-    )
+    # Create a mock schema object that will be returned by the serializer
+    mock_schema = MagicMock()
+    mock_schema.file_path = "s3a://bucket/user/test_user/test.csv"
+    mock_schema.import_type = "remote"
+
+    mock_serializer = MagicMock(is_valid=MagicMock(return_value=True), validated_data=mock_schema)
     mock_serializer_class.return_value = mock_serializer
     mock_serializer_class.return_value = mock_serializer
 
 
     mock_guess_file_metadata.return_value = {"type": "csv", "field_separator": ",", "quote_char": '"', "record_separator": "\n"}
     mock_guess_file_metadata.return_value = {"type": "csv", "field_separator": ",", "quote_char": '"', "record_separator": "\n"}
@@ -135,7 +158,7 @@ class TestGuessFileMetadataAPI:
 
 
     assert response.status_code == status.HTTP_200_OK
     assert response.status_code == status.HTTP_200_OK
     assert response.data == {"type": "csv", "field_separator": ",", "quote_char": '"', "record_separator": "\n"}
     assert response.data == {"type": "csv", "field_separator": ",", "quote_char": '"', "record_separator": "\n"}
-    mock_guess_file_metadata.assert_called_once_with(file_path="s3a://bucket/user/test_user/test.csv", import_type="remote", fs=mock_fs)
+    mock_guess_file_metadata.assert_called_once_with(data=mock_schema, fs=mock_fs)
 
 
   @patch("desktop.lib.importer.api.GuessFileMetadataSerializer")
   @patch("desktop.lib.importer.api.GuessFileMetadataSerializer")
   def test_guess_file_metadata_invalid_data(self, mock_serializer_class):
   def test_guess_file_metadata_invalid_data(self, mock_serializer_class):
@@ -154,9 +177,12 @@ class TestGuessFileMetadataAPI:
   @patch("desktop.lib.importer.api.GuessFileMetadataSerializer")
   @patch("desktop.lib.importer.api.GuessFileMetadataSerializer")
   @patch("desktop.lib.importer.api.operations.guess_file_metadata")
   @patch("desktop.lib.importer.api.operations.guess_file_metadata")
   def test_guess_file_metadata_value_error(self, mock_guess_file_metadata, mock_serializer_class):
   def test_guess_file_metadata_value_error(self, mock_guess_file_metadata, mock_serializer_class):
-    mock_serializer = MagicMock(
-      is_valid=MagicMock(return_value=True), validated_data={"file_path": "/path/to/test.csv", "import_type": "local"}
-    )
+    # Create a mock schema object that will be returned by the serializer
+    mock_schema = MagicMock()
+    mock_schema.file_path = "/path/to/test.csv"
+    mock_schema.import_type = "local"
+
+    mock_serializer = MagicMock(is_valid=MagicMock(return_value=True), validated_data=mock_schema)
     mock_serializer_class.return_value = mock_serializer
     mock_serializer_class.return_value = mock_serializer
 
 
     mock_guess_file_metadata.side_effect = ValueError("File does not exist")
     mock_guess_file_metadata.side_effect = ValueError("File does not exist")
@@ -174,9 +200,12 @@ class TestGuessFileMetadataAPI:
   @patch("desktop.lib.importer.api.GuessFileMetadataSerializer")
   @patch("desktop.lib.importer.api.GuessFileMetadataSerializer")
   @patch("desktop.lib.importer.api.operations.guess_file_metadata")
   @patch("desktop.lib.importer.api.operations.guess_file_metadata")
   def test_guess_file_metadata_operation_error(self, mock_guess_file_metadata, mock_serializer_class):
   def test_guess_file_metadata_operation_error(self, mock_guess_file_metadata, mock_serializer_class):
-    mock_serializer = MagicMock(
-      is_valid=MagicMock(return_value=True), validated_data={"file_path": "/path/to/test.csv", "import_type": "local"}
-    )
+    # Create a mock schema object that will be returned by the serializer
+    mock_schema = MagicMock()
+    mock_schema.file_path = "/path/to/test.csv"
+    mock_schema.import_type = "local"
+
+    mock_serializer = MagicMock(is_valid=MagicMock(return_value=True), validated_data=mock_schema)
     mock_serializer_class.return_value = mock_serializer
     mock_serializer_class.return_value = mock_serializer
 
 
     mock_guess_file_metadata.side_effect = RuntimeError("Operation error")
     mock_guess_file_metadata.side_effect = RuntimeError("Operation error")
@@ -196,19 +225,18 @@ class TestPreviewFileAPI:
   @patch("desktop.lib.importer.api.PreviewFileSerializer")
   @patch("desktop.lib.importer.api.PreviewFileSerializer")
   @patch("desktop.lib.importer.api.operations.preview_file")
   @patch("desktop.lib.importer.api.operations.preview_file")
   def test_preview_csv_file_success(self, mock_preview_file, mock_serializer_class):
   def test_preview_csv_file_success(self, mock_preview_file, mock_serializer_class):
-    mock_serializer = MagicMock(
-      is_valid=MagicMock(return_value=True),
-      validated_data={
-        "file_path": "/path/to/test.csv",
-        "file_type": "csv",
-        "import_type": "local",
-        "sql_dialect": "hive",
-        "has_header": True,
-        "field_separator": ",",
-        "quote_char": '"',
-        "record_separator": "\n",
-      },
-    )
+    # Create a mock schema object that will be returned by the serializer
+    mock_schema = MagicMock()
+    mock_schema.file_path = "/path/to/test.csv"
+    mock_schema.file_type = "csv"
+    mock_schema.import_type = "local"
+    mock_schema.sql_dialect = "hive"
+    mock_schema.has_header = True
+    mock_schema.field_separator = ","
+    mock_schema.quote_char = '"'
+    mock_schema.record_separator = "\n"
+
+    mock_serializer = MagicMock(is_valid=MagicMock(return_value=True), validated_data=mock_schema)
     mock_serializer_class.return_value = mock_serializer
     mock_serializer_class.return_value = mock_serializer
 
 
     mock_preview_result = {
     mock_preview_result = {
@@ -227,32 +255,21 @@ class TestPreviewFileAPI:
 
 
     assert response.status_code == status.HTTP_200_OK
     assert response.status_code == status.HTTP_200_OK
     assert response.data == mock_preview_result
     assert response.data == mock_preview_result
-    mock_preview_file.assert_called_once_with(
-      file_path="/path/to/test.csv",
-      file_type="csv",
-      import_type="local",
-      sql_dialect="hive",
-      has_header=True,
-      field_separator=",",
-      quote_char='"',
-      record_separator="\n",
-      fs=None,
-    )
+    mock_preview_file.assert_called_once_with(data=mock_schema, fs=None)
 
 
   @patch("desktop.lib.importer.api.PreviewFileSerializer")
   @patch("desktop.lib.importer.api.PreviewFileSerializer")
   @patch("desktop.lib.importer.api.operations.preview_file")
   @patch("desktop.lib.importer.api.operations.preview_file")
   def test_preview_excel_file_success(self, mock_preview_file, mock_serializer_class):
   def test_preview_excel_file_success(self, mock_preview_file, mock_serializer_class):
-    mock_serializer = MagicMock(
-      is_valid=MagicMock(return_value=True),
-      validated_data={
-        "file_path": "/path/to/test.xlsx",
-        "file_type": "excel",
-        "import_type": "local",
-        "sql_dialect": "hive",
-        "has_header": True,
-        "sheet_name": "Sheet1",
-      },
-    )
+    # Create a mock schema object that will be returned by the serializer
+    mock_schema = MagicMock()
+    mock_schema.file_path = "/path/to/test.xlsx"
+    mock_schema.file_type = "excel"
+    mock_schema.import_type = "local"
+    mock_schema.sql_dialect = "hive"
+    mock_schema.has_header = True
+    mock_schema.sheet_name = "Sheet1"
+
+    mock_serializer = MagicMock(is_valid=MagicMock(return_value=True), validated_data=mock_schema)
     mock_serializer_class.return_value = mock_serializer
     mock_serializer_class.return_value = mock_serializer
 
 
     mock_preview_result = {
     mock_preview_result = {
@@ -271,32 +288,23 @@ class TestPreviewFileAPI:
 
 
     assert response.status_code == status.HTTP_200_OK
     assert response.status_code == status.HTTP_200_OK
     assert response.data == mock_preview_result
     assert response.data == mock_preview_result
-    mock_preview_file.assert_called_once_with(
-      file_path="/path/to/test.xlsx",
-      file_type="excel",
-      import_type="local",
-      sql_dialect="hive",
-      has_header=True,
-      sheet_name="Sheet1",
-      fs=None,
-    )
+    mock_preview_file.assert_called_once_with(data=mock_schema, fs=None)
 
 
   @patch("desktop.lib.importer.api.PreviewFileSerializer")
   @patch("desktop.lib.importer.api.PreviewFileSerializer")
   @patch("desktop.lib.importer.api.operations.preview_file")
   @patch("desktop.lib.importer.api.operations.preview_file")
   def test_preview_tsv_file_success(self, mock_preview_file, mock_serializer_class):
   def test_preview_tsv_file_success(self, mock_preview_file, mock_serializer_class):
-    mock_serializer = MagicMock(
-      is_valid=MagicMock(return_value=True),
-      validated_data={
-        "file_path": "/path/to/test.tsv",
-        "file_type": "tsv",
-        "import_type": "local",
-        "sql_dialect": "impala",
-        "has_header": True,
-        "field_separator": "\t",
-        "quote_char": '"',
-        "record_separator": "\n",
-      },
-    )
+    # Create a mock schema object that will be returned by the serializer
+    mock_schema = MagicMock()
+    mock_schema.file_path = "/path/to/test.tsv"
+    mock_schema.file_type = "tsv"
+    mock_schema.import_type = "local"
+    mock_schema.sql_dialect = "impala"
+    mock_schema.has_header = True
+    mock_schema.field_separator = "\t"
+    mock_schema.quote_char = '"'
+    mock_schema.record_separator = "\n"
+
+    mock_serializer = MagicMock(is_valid=MagicMock(return_value=True), validated_data=mock_schema)
     mock_serializer_class.return_value = mock_serializer
     mock_serializer_class.return_value = mock_serializer
 
 
     mock_preview_result = {
     mock_preview_result = {
@@ -315,34 +323,23 @@ class TestPreviewFileAPI:
 
 
     assert response.status_code == status.HTTP_200_OK
     assert response.status_code == status.HTTP_200_OK
     assert response.data == mock_preview_result
     assert response.data == mock_preview_result
-    mock_preview_file.assert_called_once_with(
-      file_path="/path/to/test.tsv",
-      file_type="tsv",
-      import_type="local",
-      sql_dialect="impala",
-      has_header=True,
-      field_separator="\t",
-      quote_char='"',
-      record_separator="\n",
-      fs=None,
-    )
+    mock_preview_file.assert_called_once_with(data=mock_schema, fs=None)
 
 
   @patch("desktop.lib.importer.api.PreviewFileSerializer")
   @patch("desktop.lib.importer.api.PreviewFileSerializer")
   @patch("desktop.lib.importer.api.operations.preview_file")
   @patch("desktop.lib.importer.api.operations.preview_file")
   def test_preview_remote_csv_file_success(self, mock_preview_file, mock_serializer_class):
   def test_preview_remote_csv_file_success(self, mock_preview_file, mock_serializer_class):
-    mock_serializer = MagicMock(
-      is_valid=MagicMock(return_value=True),
-      validated_data={
-        "file_path": "s3a://bucket/user/test_user/test.csv",
-        "file_type": "csv",
-        "import_type": "remote",
-        "sql_dialect": "hive",
-        "has_header": True,
-        "field_separator": ",",
-        "quote_char": '"',
-        "record_separator": "\n",
-      },
-    )
+    # Create a mock schema object that will be returned by the serializer
+    mock_schema = MagicMock()
+    mock_schema.file_path = "s3a://bucket/user/test_user/test.csv"
+    mock_schema.file_type = "csv"
+    mock_schema.import_type = "remote"
+    mock_schema.sql_dialect = "hive"
+    mock_schema.has_header = True
+    mock_schema.field_separator = ","
+    mock_schema.quote_char = '"'
+    mock_schema.record_separator = "\n"
+
+    mock_serializer = MagicMock(is_valid=MagicMock(return_value=True), validated_data=mock_schema)
     mock_serializer_class.return_value = mock_serializer
     mock_serializer_class.return_value = mock_serializer
 
 
     mock_preview_result = {
     mock_preview_result = {
@@ -363,17 +360,7 @@ class TestPreviewFileAPI:
 
 
     assert response.status_code == status.HTTP_200_OK
     assert response.status_code == status.HTTP_200_OK
     assert response.data == mock_preview_result
     assert response.data == mock_preview_result
-    mock_preview_file.assert_called_once_with(
-      file_path="s3a://bucket/user/test_user/test.csv",
-      file_type="csv",
-      import_type="remote",
-      sql_dialect="hive",
-      has_header=True,
-      field_separator=",",
-      quote_char='"',
-      record_separator="\n",
-      fs=mock_fs,
-    )
+    mock_preview_file.assert_called_once_with(data=mock_schema, fs=mock_fs)
 
 
   @patch("desktop.lib.importer.api.PreviewFileSerializer")
   @patch("desktop.lib.importer.api.PreviewFileSerializer")
   def test_preview_file_invalid_data(self, mock_serializer_class):
   def test_preview_file_invalid_data(self, mock_serializer_class):
@@ -406,19 +393,18 @@ class TestPreviewFileAPI:
   @patch("desktop.lib.importer.api.PreviewFileSerializer")
   @patch("desktop.lib.importer.api.PreviewFileSerializer")
   @patch("desktop.lib.importer.api.operations.preview_file")
   @patch("desktop.lib.importer.api.operations.preview_file")
   def test_preview_file_value_error(self, mock_preview_file, mock_serializer_class):
   def test_preview_file_value_error(self, mock_preview_file, mock_serializer_class):
-    mock_serializer = MagicMock(
-      is_valid=MagicMock(return_value=True),
-      validated_data={
-        "file_path": "/path/to/test.csv",
-        "file_type": "csv",
-        "import_type": "local",
-        "sql_dialect": "hive",
-        "has_header": True,
-        "field_separator": ",",
-        "quote_char": '"',
-        "record_separator": "\n",
-      },
-    )
+    # Create a mock schema object that will be returned by the serializer
+    mock_schema = MagicMock()
+    mock_schema.file_path = "/path/to/test.csv"
+    mock_schema.file_type = "csv"
+    mock_schema.import_type = "local"
+    mock_schema.sql_dialect = "hive"
+    mock_schema.has_header = True
+    mock_schema.field_separator = ","
+    mock_schema.quote_char = '"'
+    mock_schema.record_separator = "\n"
+
+    mock_serializer = MagicMock(is_valid=MagicMock(return_value=True), validated_data=mock_schema)
     mock_serializer_class.return_value = mock_serializer
     mock_serializer_class.return_value = mock_serializer
 
 
     mock_preview_file.side_effect = ValueError("File does not exist")
     mock_preview_file.side_effect = ValueError("File does not exist")
@@ -436,19 +422,18 @@ class TestPreviewFileAPI:
   @patch("desktop.lib.importer.api.PreviewFileSerializer")
   @patch("desktop.lib.importer.api.PreviewFileSerializer")
   @patch("desktop.lib.importer.api.operations.preview_file")
   @patch("desktop.lib.importer.api.operations.preview_file")
   def test_preview_file_operation_error(self, mock_preview_file, mock_serializer_class):
   def test_preview_file_operation_error(self, mock_preview_file, mock_serializer_class):
-    mock_serializer = MagicMock(
-      is_valid=MagicMock(return_value=True),
-      validated_data={
-        "file_path": "/path/to/test.csv",
-        "file_type": "csv",
-        "import_type": "local",
-        "sql_dialect": "hive",
-        "has_header": True,
-        "field_separator": ",",
-        "quote_char": '"',
-        "record_separator": "\n",
-      },
-    )
+    # Create a mock schema object that will be returned by the serializer
+    mock_schema = MagicMock()
+    mock_schema.file_path = "/path/to/test.csv"
+    mock_schema.file_type = "csv"
+    mock_schema.import_type = "local"
+    mock_schema.sql_dialect = "hive"
+    mock_schema.has_header = True
+    mock_schema.field_separator = ","
+    mock_schema.quote_char = '"'
+    mock_schema.record_separator = "\n"
+
+    mock_serializer = MagicMock(is_valid=MagicMock(return_value=True), validated_data=mock_schema)
     mock_serializer_class.return_value = mock_serializer
     mock_serializer_class.return_value = mock_serializer
 
 
     mock_preview_file.side_effect = RuntimeError("Operation error")
     mock_preview_file.side_effect = RuntimeError("Operation error")
@@ -468,9 +453,14 @@ class TestGuessFileHeaderAPI:
   @patch("desktop.lib.importer.api.GuessFileHeaderSerializer")
   @patch("desktop.lib.importer.api.GuessFileHeaderSerializer")
   @patch("desktop.lib.importer.api.operations.guess_file_header")
   @patch("desktop.lib.importer.api.operations.guess_file_header")
   def test_guess_csv_file_header_success(self, mock_guess_file_header, mock_serializer_class):
   def test_guess_csv_file_header_success(self, mock_guess_file_header, mock_serializer_class):
-    mock_serializer = MagicMock(
-      is_valid=MagicMock(return_value=True), validated_data={"file_path": "/path/to/test.csv", "file_type": "csv", "import_type": "local"}
-    )
+    # Create a mock schema object that will be returned by the serializer
+    mock_schema = MagicMock()
+    mock_schema.file_path = "/path/to/test.csv"
+    mock_schema.file_type = "csv"
+    mock_schema.import_type = "local"
+    mock_schema.sheet_name = None
+
+    mock_serializer = MagicMock(is_valid=MagicMock(return_value=True), validated_data=mock_schema)
     mock_serializer_class.return_value = mock_serializer
     mock_serializer_class.return_value = mock_serializer
 
 
     mock_guess_file_header.return_value = True
     mock_guess_file_header.return_value = True
@@ -484,17 +474,19 @@ class TestGuessFileHeaderAPI:
 
 
     assert response.status_code == status.HTTP_200_OK
     assert response.status_code == status.HTTP_200_OK
     assert response.data == {"has_header": True}
     assert response.data == {"has_header": True}
-    mock_guess_file_header.assert_called_once_with(
-      file_path="/path/to/test.csv", file_type="csv", import_type="local", sheet_name=None, fs=None
-    )
+    mock_guess_file_header.assert_called_once_with(data=mock_schema, fs=None)
 
 
   @patch("desktop.lib.importer.api.GuessFileHeaderSerializer")
   @patch("desktop.lib.importer.api.GuessFileHeaderSerializer")
   @patch("desktop.lib.importer.api.operations.guess_file_header")
   @patch("desktop.lib.importer.api.operations.guess_file_header")
   def test_guess_excel_file_header_success(self, mock_guess_file_header, mock_serializer_class):
   def test_guess_excel_file_header_success(self, mock_guess_file_header, mock_serializer_class):
-    mock_serializer = MagicMock(
-      is_valid=MagicMock(return_value=True),
-      validated_data={"file_path": "/path/to/test.xlsx", "file_type": "excel", "import_type": "local", "sheet_name": "Sheet1"},
-    )
+    # Create a mock schema object that will be returned by the serializer
+    mock_schema = MagicMock()
+    mock_schema.file_path = "/path/to/test.xlsx"
+    mock_schema.file_type = "excel"
+    mock_schema.import_type = "local"
+    mock_schema.sheet_name = "Sheet1"
+
+    mock_serializer = MagicMock(is_valid=MagicMock(return_value=True), validated_data=mock_schema)
     mock_serializer_class.return_value = mock_serializer
     mock_serializer_class.return_value = mock_serializer
 
 
     mock_guess_file_header.return_value = True
     mock_guess_file_header.return_value = True
@@ -508,17 +500,19 @@ class TestGuessFileHeaderAPI:
 
 
     assert response.status_code == status.HTTP_200_OK
     assert response.status_code == status.HTTP_200_OK
     assert response.data == {"has_header": True}
     assert response.data == {"has_header": True}
-    mock_guess_file_header.assert_called_once_with(
-      file_path="/path/to/test.xlsx", file_type="excel", import_type="local", sheet_name="Sheet1", fs=None
-    )
+    mock_guess_file_header.assert_called_once_with(data=mock_schema, fs=None)
 
 
   @patch("desktop.lib.importer.api.GuessFileHeaderSerializer")
   @patch("desktop.lib.importer.api.GuessFileHeaderSerializer")
   @patch("desktop.lib.importer.api.operations.guess_file_header")
   @patch("desktop.lib.importer.api.operations.guess_file_header")
   def test_guess_remote_csv_file_header_success(self, mock_guess_file_header, mock_serializer_class):
   def test_guess_remote_csv_file_header_success(self, mock_guess_file_header, mock_serializer_class):
-    mock_serializer = MagicMock(
-      is_valid=MagicMock(return_value=True),
-      validated_data={"file_path": "s3a://bucket/user/test_user/test.csv", "file_type": "csv", "import_type": "remote"},
-    )
+    # Create a mock schema object that will be returned by the serializer
+    mock_schema = MagicMock()
+    mock_schema.file_path = "s3a://bucket/user/test_user/test.csv"
+    mock_schema.file_type = "csv"
+    mock_schema.import_type = "remote"
+    mock_schema.sheet_name = None
+
+    mock_serializer = MagicMock(is_valid=MagicMock(return_value=True), validated_data=mock_schema)
     mock_serializer_class.return_value = mock_serializer
     mock_serializer_class.return_value = mock_serializer
 
 
     mock_guess_file_header.return_value = True
     mock_guess_file_header.return_value = True
@@ -533,17 +527,19 @@ class TestGuessFileHeaderAPI:
 
 
     assert response.status_code == status.HTTP_200_OK
     assert response.status_code == status.HTTP_200_OK
     assert response.data == {"has_header": True}
     assert response.data == {"has_header": True}
-    mock_guess_file_header.assert_called_once_with(
-      file_path="s3a://bucket/user/test_user/test.csv", file_type="csv", import_type="remote", sheet_name=None, fs=mock_fs
-    )
+    mock_guess_file_header.assert_called_once_with(data=mock_schema, fs=mock_fs)
 
 
   @patch("desktop.lib.importer.api.GuessFileHeaderSerializer")
   @patch("desktop.lib.importer.api.GuessFileHeaderSerializer")
   @patch("desktop.lib.importer.api.operations.guess_file_header")
   @patch("desktop.lib.importer.api.operations.guess_file_header")
   def test_guess_remote_csv_file_header_success_false_value(self, mock_guess_file_header, mock_serializer_class):
   def test_guess_remote_csv_file_header_success_false_value(self, mock_guess_file_header, mock_serializer_class):
-    mock_serializer = MagicMock(
-      is_valid=MagicMock(return_value=True),
-      validated_data={"file_path": "s3a://bucket/user/test_user/test.csv", "file_type": "csv", "import_type": "remote"},
-    )
+    # Create a mock schema object that will be returned by the serializer
+    mock_schema = MagicMock()
+    mock_schema.file_path = "s3a://bucket/user/test_user/test.csv"
+    mock_schema.file_type = "csv"
+    mock_schema.import_type = "remote"
+    mock_schema.sheet_name = None
+
+    mock_serializer = MagicMock(is_valid=MagicMock(return_value=True), validated_data=mock_schema)
     mock_serializer_class.return_value = mock_serializer
     mock_serializer_class.return_value = mock_serializer
 
 
     mock_guess_file_header.return_value = False
     mock_guess_file_header.return_value = False
@@ -558,9 +554,7 @@ class TestGuessFileHeaderAPI:
 
 
     assert response.status_code == status.HTTP_200_OK
     assert response.status_code == status.HTTP_200_OK
     assert response.data == {"has_header": False}
     assert response.data == {"has_header": False}
-    mock_guess_file_header.assert_called_once_with(
-      file_path="s3a://bucket/user/test_user/test.csv", file_type="csv", import_type="remote", sheet_name=None, fs=mock_fs
-    )
+    mock_guess_file_header.assert_called_once_with(data=mock_schema, fs=mock_fs)
 
 
   @patch("desktop.lib.importer.api.GuessFileHeaderSerializer")
   @patch("desktop.lib.importer.api.GuessFileHeaderSerializer")
   def test_guess_file_header_invalid_data(self, mock_serializer_class):
   def test_guess_file_header_invalid_data(self, mock_serializer_class):
@@ -579,9 +573,14 @@ class TestGuessFileHeaderAPI:
   @patch("desktop.lib.importer.api.GuessFileHeaderSerializer")
   @patch("desktop.lib.importer.api.GuessFileHeaderSerializer")
   @patch("desktop.lib.importer.api.operations.guess_file_header")
   @patch("desktop.lib.importer.api.operations.guess_file_header")
   def test_guess_file_header_value_error(self, mock_guess_file_header, mock_serializer_class):
   def test_guess_file_header_value_error(self, mock_guess_file_header, mock_serializer_class):
-    mock_serializer = MagicMock(
-      is_valid=MagicMock(return_value=True), validated_data={"file_path": "/path/to/test.csv", "file_type": "csv", "import_type": "local"}
-    )
+    # Create a mock schema object that will be returned by the serializer
+    mock_schema = MagicMock()
+    mock_schema.file_path = "/path/to/test.csv"
+    mock_schema.file_type = "csv"
+    mock_schema.import_type = "local"
+    mock_schema.sheet_name = None
+
+    mock_serializer = MagicMock(is_valid=MagicMock(return_value=True), validated_data=mock_schema)
     mock_serializer_class.return_value = mock_serializer
     mock_serializer_class.return_value = mock_serializer
 
 
     mock_guess_file_header.side_effect = ValueError("File does not exist")
     mock_guess_file_header.side_effect = ValueError("File does not exist")
@@ -599,9 +598,14 @@ class TestGuessFileHeaderAPI:
   @patch("desktop.lib.importer.api.GuessFileHeaderSerializer")
   @patch("desktop.lib.importer.api.GuessFileHeaderSerializer")
   @patch("desktop.lib.importer.api.operations.guess_file_header")
   @patch("desktop.lib.importer.api.operations.guess_file_header")
   def test_guess_file_header_operation_error(self, mock_guess_file_header, mock_serializer_class):
   def test_guess_file_header_operation_error(self, mock_guess_file_header, mock_serializer_class):
-    mock_serializer = MagicMock(
-      is_valid=MagicMock(return_value=True), validated_data={"file_path": "/path/to/test.csv", "file_type": "csv", "import_type": "local"}
-    )
+    # Create a mock schema object that will be returned by the serializer
+    mock_schema = MagicMock()
+    mock_schema.file_path = "/path/to/test.csv"
+    mock_schema.file_type = "csv"
+    mock_schema.import_type = "local"
+    mock_schema.sheet_name = None
+
+    mock_serializer = MagicMock(is_valid=MagicMock(return_value=True), validated_data=mock_schema)
     mock_serializer_class.return_value = mock_serializer
     mock_serializer_class.return_value = mock_serializer
 
 
     mock_guess_file_header.side_effect = RuntimeError("Operation error")
     mock_guess_file_header.side_effect = RuntimeError("Operation error")
@@ -621,7 +625,11 @@ class TestSqlTypeMappingAPI:
   @patch("desktop.lib.importer.api.SqlTypeMapperSerializer")
   @patch("desktop.lib.importer.api.SqlTypeMapperSerializer")
   @patch("desktop.lib.importer.api.operations.get_sql_type_mapping")
   @patch("desktop.lib.importer.api.operations.get_sql_type_mapping")
   def test_get_sql_type_mapping_success(self, mock_get_sql_type_mapping, mock_serializer_class):
   def test_get_sql_type_mapping_success(self, mock_get_sql_type_mapping, mock_serializer_class):
-    mock_serializer = MagicMock(is_valid=MagicMock(return_value=True), validated_data={"sql_dialect": "hive"})
+    # Create a mock schema object that will be returned by the serializer
+    mock_schema = MagicMock()
+    mock_schema.sql_dialect = "hive"
+
+    mock_serializer = MagicMock(is_valid=MagicMock(return_value=True), validated_data=mock_schema)
     mock_serializer_class.return_value = mock_serializer
     mock_serializer_class.return_value = mock_serializer
 
 
     mock_get_sql_type_mapping.return_value = {"Int32": "INT", "Utf8": "STRING", "Float64": "DOUBLE", "Boolean": "BOOLEAN"}
     mock_get_sql_type_mapping.return_value = {"Int32": "INT", "Utf8": "STRING", "Float64": "DOUBLE", "Boolean": "BOOLEAN"}
@@ -634,7 +642,7 @@ class TestSqlTypeMappingAPI:
 
 
     assert response.status_code == status.HTTP_200_OK
     assert response.status_code == status.HTTP_200_OK
     assert response.data == {"Int32": "INT", "Utf8": "STRING", "Float64": "DOUBLE", "Boolean": "BOOLEAN"}
     assert response.data == {"Int32": "INT", "Utf8": "STRING", "Float64": "DOUBLE", "Boolean": "BOOLEAN"}
-    mock_get_sql_type_mapping.assert_called_once_with("hive")
+    mock_get_sql_type_mapping.assert_called_once_with(mock_schema)
 
 
   @patch("desktop.lib.importer.api.SqlTypeMapperSerializer")
   @patch("desktop.lib.importer.api.SqlTypeMapperSerializer")
   def test_get_sql_type_mapping_invalid_dialect(self, mock_serializer_class):
   def test_get_sql_type_mapping_invalid_dialect(self, mock_serializer_class):
@@ -653,7 +661,11 @@ class TestSqlTypeMappingAPI:
   @patch("desktop.lib.importer.api.SqlTypeMapperSerializer")
   @patch("desktop.lib.importer.api.SqlTypeMapperSerializer")
   @patch("desktop.lib.importer.api.operations.get_sql_type_mapping")
   @patch("desktop.lib.importer.api.operations.get_sql_type_mapping")
   def test_get_sql_type_mapping_value_error(self, mock_get_sql_type_mapping, mock_serializer_class):
   def test_get_sql_type_mapping_value_error(self, mock_get_sql_type_mapping, mock_serializer_class):
-    mock_serializer = MagicMock(is_valid=MagicMock(return_value=True), validated_data={"sql_dialect": "hive"})
+    # Create a mock schema object that will be returned by the serializer
+    mock_schema = MagicMock()
+    mock_schema.sql_dialect = "hive"
+
+    mock_serializer = MagicMock(is_valid=MagicMock(return_value=True), validated_data=mock_schema)
     mock_serializer_class.return_value = mock_serializer
     mock_serializer_class.return_value = mock_serializer
 
 
     mock_get_sql_type_mapping.side_effect = ValueError("Unsupported dialect")
     mock_get_sql_type_mapping.side_effect = ValueError("Unsupported dialect")
@@ -670,7 +682,11 @@ class TestSqlTypeMappingAPI:
   @patch("desktop.lib.importer.api.SqlTypeMapperSerializer")
   @patch("desktop.lib.importer.api.SqlTypeMapperSerializer")
   @patch("desktop.lib.importer.api.operations.get_sql_type_mapping")
   @patch("desktop.lib.importer.api.operations.get_sql_type_mapping")
   def test_get_sql_type_mapping_operation_error(self, mock_get_sql_type_mapping, mock_serializer_class):
   def test_get_sql_type_mapping_operation_error(self, mock_get_sql_type_mapping, mock_serializer_class):
-    mock_serializer = MagicMock(is_valid=MagicMock(return_value=True), validated_data={"sql_dialect": "hive"})
+    # Create a mock schema object that will be returned by the serializer
+    mock_schema = MagicMock()
+    mock_schema.sql_dialect = "hive"
+
+    mock_serializer = MagicMock(is_valid=MagicMock(return_value=True), validated_data=mock_schema)
     mock_serializer_class.return_value = mock_serializer
     mock_serializer_class.return_value = mock_serializer
 
 
     mock_get_sql_type_mapping.side_effect = RuntimeError("Operation error")
     mock_get_sql_type_mapping.side_effect = RuntimeError("Operation error")

+ 97 - 169
desktop/core/src/desktop/lib/importer/operations.py

@@ -14,19 +14,26 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
-import codecs
 import csv
 import csv
 import logging
 import logging
 import os
 import os
+import shutil
 import tempfile
 import tempfile
-import uuid
 import xml.etree.ElementTree as ET
 import xml.etree.ElementTree as ET
 import zipfile
 import zipfile
 from io import BytesIO
 from io import BytesIO
-from typing import Any, BinaryIO, Dict, List, Optional, Union
+from typing import Any, BinaryIO, Dict, List, Union
 
 
 import polars as pl
 import polars as pl
 
 
+from desktop.lib.importer.schemas import (
+  GuessFileHeaderSchema,
+  GuessFileMetadataSchema,
+  LocalFileUploadSchema,
+  PreviewFileSchema,
+  SqlTypeMapperSchema,
+)
+
 LOG = logging.getLogger()
 LOG = logging.getLogger()
 
 
 try:
 try:
@@ -106,7 +113,7 @@ SQL_TYPE_DIALECT_OVERRIDES = {
 }
 }
 
 
 
 
-def local_file_upload(upload_file, username: str) -> Dict[str, str]:
+def local_file_upload(data: LocalFileUploadSchema, username: str) -> Dict[str, str]:
   """Uploads a local file to a temporary directory with a unique filename.
   """Uploads a local file to a temporary directory with a unique filename.
 
 
   This function takes an uploaded file and username, generates a unique filename,
   This function takes an uploaded file and username, generates a unique filename,
@@ -114,7 +121,7 @@ def local_file_upload(upload_file, username: str) -> Dict[str, str]:
   the username, a unique ID, and a sanitized version of the original filename.
   the username, a unique ID, and a sanitized version of the original filename.
 
 
   Args:
   Args:
-    upload_file: The uploaded file object from Django's file upload handling.
+    data: A Pydantic schema containing the file to upload.
     username: The username of the user uploading the file.
     username: The username of the user uploading the file.
 
 
   Returns:
   Returns:
@@ -122,33 +129,32 @@ def local_file_upload(upload_file, username: str) -> Dict[str, str]:
       - file_path: The full path where the file was saved
       - file_path: The full path where the file was saved
 
 
   Raises:
   Raises:
-    ValueError: If upload_file or username is None/empty
+    ValueError: If username is None/empty
     Exception: If there are issues with file operations
     Exception: If there are issues with file operations
 
 
   Example:
   Example:
-    >>> result = upload_local_file(request.FILES["file"], "hue_user")
+    >>> result = upload_local_file(data, "hue_user")
     >>> print(result)
     >>> print(result)
     {'file_path': '/tmp/hue_user_a1b2c3d4_myfile.txt'}
     {'file_path': '/tmp/hue_user_a1b2c3d4_myfile.txt'}
   """
   """
-  if not upload_file:
-    raise ValueError("Upload file cannot be None or empty.")
-
   if not username:
   if not username:
     raise ValueError("Username cannot be None or empty.")
     raise ValueError("Username cannot be None or empty.")
 
 
-  # Generate a unique filename
-  unique_id = uuid.uuid4().hex[:8]
-  filename = f"{username}_{unique_id}_{upload_file.name}"
+  upload_file = data.file
+  sanitized_filename = os.path.basename(data.filename)
 
 
-  # Create a temporary file with our generated filename
-  temp_dir = tempfile.gettempdir()
-  destination_path = os.path.join(temp_dir, filename)
+  destination_file = tempfile.NamedTemporaryFile(
+    mode="wb",
+    delete=False,
+    prefix=f"{username}_",
+    suffix=f"_{sanitized_filename}",
+    dir=tempfile.gettempdir(),
+  )
+  destination_path = destination_file.name
 
 
   try:
   try:
-    # Simply write the file content to temporary location
-    with open(destination_path, "wb") as destination:
-      for chunk in upload_file.chunks():
-        destination.write(chunk)
+    with destination_file:
+      shutil.copyfileobj(upload_file, destination_file)
 
 
     return {"file_path": destination_path}
     return {"file_path": destination_path}
 
 
@@ -159,12 +165,11 @@ def local_file_upload(upload_file, username: str) -> Dict[str, str]:
     raise e
     raise e
 
 
 
 
-def guess_file_metadata(file_path: str, import_type: str, fs=None) -> Dict[str, Any]:
+def guess_file_metadata(data: GuessFileMetadataSchema, fs=None) -> Dict[str, Any]:
   """Guess the metadata of a file based on its content or extension.
   """Guess the metadata of a file based on its content or extension.
 
 
   Args:
   Args:
-    file_path: Path to the file to analyze
-    import_type: Type of import ('local' or 'remote')
+    data: A Pydantic schema containing file_path and import_type.
     fs: File system object for remote files (default: None)
     fs: File system object for remote files (default: None)
 
 
   Returns:
   Returns:
@@ -179,23 +184,17 @@ def guess_file_metadata(file_path: str, import_type: str, fs=None) -> Dict[str,
     ValueError: If the file does not exist or parameters are invalid
     ValueError: If the file does not exist or parameters are invalid
     Exception: For various file processing errors
     Exception: For various file processing errors
   """
   """
-  if not file_path:
-    raise ValueError("File path cannot be empty")
-
-  if import_type not in ["local", "remote"]:
-    raise ValueError(f"Unsupported import type: {import_type}")
-
-  if import_type == "remote" and fs is None:
+  if data.import_type == "remote" and fs is None:
     raise ValueError("File system object is required for remote import type")
     raise ValueError("File system object is required for remote import type")
 
 
   # Check if file exists based on import type
   # Check if file exists based on import type
-  if import_type == "local" and not os.path.exists(file_path):
-    raise ValueError(f"Local file does not exist: {file_path}")
-  elif import_type == "remote" and fs and not fs.exists(file_path):
-    raise ValueError(f"Remote file does not exist: {file_path}")
+  if data.import_type == "local" and not os.path.exists(data.file_path):
+    raise ValueError(f"Local file does not exist: {data.file_path}")
+  elif data.import_type == "remote" and fs and not fs.exists(data.file_path):
+    raise ValueError(f"Remote file does not exist: {data.file_path}")
 
 
-  error_occurred = False
-  fh = open(file_path, "rb") if import_type == "local" else fs.open(file_path, "rb")
+  should_cleanup = False
+  fh = open(data.file_path, "rb") if data.import_type == "local" else fs.open(data.file_path, "rb")
 
 
   try:
   try:
     sample = fh.read(16 * 1024)  # Read 16 KiB sample
     sample = fh.read(16 * 1024)  # Read 16 KiB sample
@@ -217,45 +216,25 @@ def guess_file_metadata(file_path: str, import_type: str, fs=None) -> Dict[str,
     return metadata
     return metadata
 
 
   except Exception as e:
   except Exception as e:
-    error_occurred = True
+    should_cleanup = True
     LOG.exception(f"Error guessing file metadata: {e}", exc_info=True)
     LOG.exception(f"Error guessing file metadata: {e}", exc_info=True)
     raise e
     raise e
 
 
   finally:
   finally:
     fh.close()
     fh.close()
-    if import_type == "local" and error_occurred and os.path.exists(file_path):
-      LOG.debug(f"Due to error in guess_file_metadata, cleaning up uploaded local file: {file_path}")
-      os.remove(file_path)
-
-
-def preview_file(
-  file_path: str,
-  file_type: str,
-  import_type: str,
-  sql_dialect: str,
-  has_header: bool = False,
-  sheet_name: Optional[str] = None,
-  field_separator: Optional[str] = ",",
-  quote_char: Optional[str] = '"',
-  record_separator: Optional[str] = "\n",
-  fs=None,
-  preview_rows: int = 50,
-) -> Dict[str, Any]:
+    if data.import_type == "local" and should_cleanup and os.path.exists(data.file_path):
+      LOG.debug(f"Due to error in guess_file_metadata, cleaning up uploaded local file: {data.file_path}")
+      os.remove(data.file_path)
+
+
+def preview_file(data: PreviewFileSchema, fs=None, preview_rows: int = 50) -> Dict[str, Any]:
   """Generate a preview of a file's content with column type mapping.
   """Generate a preview of a file's content with column type mapping.
 
 
   This method reads a file and returns a preview of its contents, along with
   This method reads a file and returns a preview of its contents, along with
   column information and metadata for creating tables or further processing.
   column information and metadata for creating tables or further processing.
 
 
   Args:
   Args:
-    file_path: Path to the file to preview
-    file_type: Type of file ('excel', 'csv', 'tsv', 'delimiter_format')
-    import_type: Type of import ('local' or 'remote')
-    sql_dialect: SQL dialect for type mapping ('hive', 'impala', etc.)
-    has_header: Whether the file has a header row or not
-    sheet_name: Sheet name for Excel files (required for Excel)
-    field_separator: Field separator character for delimited files
-    quote_char: Quote character for delimited files
-    record_separator: Record separator for delimited files
+    data: A Pydantic schema with all the required parameters.
     fs: File system object for remote files (default: None)
     fs: File system object for remote files (default: None)
     preview_rows: Number of rows to include in preview (default: 50)
     preview_rows: Number of rows to include in preview (default: 50)
 
 
@@ -269,65 +248,37 @@ def preview_file(
     ValueError: If the file does not exist or parameters are invalid
     ValueError: If the file does not exist or parameters are invalid
     Exception: For various file processing errors
     Exception: For various file processing errors
   """
   """
-  if not file_path:
-    raise ValueError("File path cannot be empty")
-
-  if sql_dialect.lower() not in ["hive", "impala", "trino", "phoenix", "sparksql"]:
-    raise ValueError(f"Unsupported SQL dialect: {sql_dialect}")
-
-  if file_type not in ["excel", "csv", "tsv", "delimiter_format"]:
-    raise ValueError(f"Unsupported file type: {file_type}")
-
-  if import_type not in ["local", "remote"]:
-    raise ValueError(f"Unsupported import type: {import_type}")
-
-  if import_type == "remote" and fs is None:
+  if data.import_type == "remote" and fs is None:
     raise ValueError("File system object is required for remote import type")
     raise ValueError("File system object is required for remote import type")
 
 
   # Check if file exists based on import type
   # Check if file exists based on import type
-  if import_type == "local" and not os.path.exists(file_path):
-    raise ValueError(f"Local file does not exist: {file_path}")
-  elif import_type == "remote" and fs and not fs.exists(file_path):
-    raise ValueError(f"Remote file does not exist: {file_path}")
+  if data.import_type == "local" and not os.path.exists(data.file_path):
+    raise ValueError(f"Local file does not exist: {data.file_path}")
+  elif data.import_type == "remote" and fs and not fs.exists(data.file_path):
+    raise ValueError(f"Remote file does not exist: {data.file_path}")
 
 
-  error_occurred = False
-  fh = open(file_path, "rb") if import_type == "local" else fs.open(file_path, "rb")
+  should_cleanup = False
+  fh = open(data.file_path, "rb") if data.import_type == "local" else fs.open(data.file_path, "rb")
 
 
   try:
   try:
-    if file_type == "excel":
-      if not sheet_name:
-        raise ValueError("Sheet name is required for Excel files.")
-
-      preview = _preview_excel_file(fh, file_type, sheet_name, sql_dialect, has_header, preview_rows)
-    elif file_type in ["csv", "tsv", "delimiter_format"]:
-      # Process escapable characters
-      try:
-        if field_separator:
-          field_separator = codecs.decode(field_separator, "unicode_escape")
-        if quote_char:
-          quote_char = codecs.decode(quote_char, "unicode_escape")
-        if record_separator:
-          record_separator = codecs.decode(record_separator, "unicode_escape")
-
-      except Exception as e:
-        LOG.exception(f"Error decoding escape characters: {e}", exc_info=True)
-        raise ValueError("Invalid escape characters in field_separator, quote_char, or record_separator.")
-
-      preview = _preview_delimited_file(fh, file_type, field_separator, quote_char, record_separator, sql_dialect, has_header, preview_rows)
+    if data.file_type == "excel":
+      preview = _preview_excel_file(fh, data, preview_rows)
+    elif data.file_type in ["csv", "tsv", "delimiter_format"]:
+      preview = _preview_delimited_file(fh, data)
     else:
     else:
-      raise ValueError(f"Unsupported file type: {file_type}")
+      raise ValueError(f"Unsupported file type: {data.file_type}")
 
 
     return preview
     return preview
   except Exception as e:
   except Exception as e:
-    error_occurred = True
+    should_cleanup = True
     LOG.exception(f"Error previewing file: {e}", exc_info=True)
     LOG.exception(f"Error previewing file: {e}", exc_info=True)
     raise e
     raise e
 
 
   finally:
   finally:
     fh.close()
     fh.close()
-    if import_type == "local" and error_occurred and os.path.exists(file_path):
-      LOG.debug(f"Due to error in preview_file, cleaning up uploaded local file: {file_path}")
-      os.remove(file_path)
+    if data.import_type == "local" and should_cleanup and os.path.exists(data.file_path):
+      LOG.debug(f"Due to error in preview_file, cleaning up uploaded local file: {data.file_path}")
+      os.remove(data.file_path)
 
 
 
 
 def _detect_file_type(file_sample: bytes) -> str:
 def _detect_file_type(file_sample: bytes) -> str:
@@ -476,17 +427,12 @@ def _get_delimited_metadata(file_sample: Union[bytes, str], file_type: str) -> D
   }
   }
 
 
 
 
-def _preview_excel_file(
-  fh: BinaryIO, file_type: str, sheet_name: str, dialect: str, has_header: bool, preview_rows: int = 50
-) -> Dict[str, Any]:
+def _preview_excel_file(fh: BinaryIO, data: PreviewFileSchema, preview_rows: int = 50) -> Dict[str, Any]:
   """Preview an Excel file (.xlsx, .xls)
   """Preview an Excel file (.xlsx, .xls)
 
 
   Args:
   Args:
     fh: File handle for the Excel file
     fh: File handle for the Excel file
-    file_type: Type of file ('excel')
-    sheet_name: Name of the sheet to preview
-    dialect: SQL dialect for type mapping
-    has_header: Whether the file has a header row or not
+    data: A Pydantic schema with all the required parameters.
     preview_rows: Number of rows to include in preview (default: 50)
     preview_rows: Number of rows to include in preview (default: 50)
 
 
   Returns:
   Returns:
@@ -502,12 +448,16 @@ def _preview_excel_file(
     fh.seek(0)
     fh.seek(0)
 
 
     df = pl.read_excel(
     df = pl.read_excel(
-      BytesIO(fh.read()), sheet_name=sheet_name, has_header=has_header, read_options={"n_rows": preview_rows}, infer_schema_length=10000
+      BytesIO(fh.read()),
+      sheet_name=data.sheet_name,
+      has_header=data.has_header,
+      read_options={"n_rows": preview_rows},
+      infer_schema_length=10000,
     )
     )
 
 
     # Return empty result if the df is empty
     # Return empty result if the df is empty
     if df.height == 0:
     if df.height == 0:
-      return {"type": file_type, "columns": [], "preview_data": {}}
+      return {"type": data.file_type, "columns": [], "preview_data": {}}
 
 
     schema = df.schema
     schema = df.schema
     preview_data = df.to_dict(as_series=False)
     preview_data = df.to_dict(as_series=False)
@@ -516,11 +466,11 @@ def _preview_excel_file(
     columns = []
     columns = []
     for col in df.columns:
     for col in df.columns:
       col_type = str(schema[col])
       col_type = str(schema[col])
-      sql_type = _map_polars_dtype_to_sql_type(dialect, col_type)
+      sql_type = _map_polars_dtype_to_sql_type(data.sql_dialect, col_type)
 
 
       columns.append({"name": col, "type": sql_type})
       columns.append({"name": col, "type": sql_type})
 
 
-    result = {"type": file_type, "columns": columns, "preview_data": preview_data}
+    result = {"type": data.file_type, "columns": columns, "preview_data": preview_data}
 
 
     return result
     return result
 
 
@@ -533,24 +483,14 @@ def _preview_excel_file(
 
 
 def _preview_delimited_file(
 def _preview_delimited_file(
   fh: BinaryIO,
   fh: BinaryIO,
-  file_type: str,
-  field_separator: str,
-  quote_char: str,
-  record_separator: str,
-  dialect: str,
-  has_header: bool,
+  data: PreviewFileSchema,
   preview_rows: int = 50,
   preview_rows: int = 50,
 ) -> Dict[str, Any]:
 ) -> Dict[str, Any]:
   """Preview a delimited file (CSV, TSV, etc.)
   """Preview a delimited file (CSV, TSV, etc.)
 
 
   Args:
   Args:
     fh: File handle for the delimited file
     fh: File handle for the delimited file
-    file_type: Type of file ('csv', 'tsv', 'delimiter_format')
-    field_separator: Field separator character
-    quote_char: Quote character
-    record_separator: Record separator character
-    dialect: SQL dialect for type mapping
-    has_header: Whether the file has a header row or not
+    data: A Pydantic schema with all the required parameters.
     preview_rows: Number of rows to include in preview (default: 50)
     preview_rows: Number of rows to include in preview (default: 50)
 
 
   Returns:
   Returns:
@@ -567,10 +507,10 @@ def _preview_delimited_file(
 
 
     df = pl.read_csv(
     df = pl.read_csv(
       BytesIO(fh.read()),
       BytesIO(fh.read()),
-      separator=field_separator,
-      quote_char=quote_char,
-      eol_char="\n" if record_separator == "\r\n" else record_separator,
-      has_header=has_header,
+      separator=data.field_separator,
+      quote_char=data.quote_char,
+      eol_char=data.record_separator,
+      has_header=data.has_header,
       infer_schema_length=10000,
       infer_schema_length=10000,
       n_rows=preview_rows,
       n_rows=preview_rows,
       ignore_errors=True,
       ignore_errors=True,
@@ -578,7 +518,7 @@ def _preview_delimited_file(
 
 
     # Return empty result if the df is empty
     # Return empty result if the df is empty
     if df.height == 0:
     if df.height == 0:
-      return {"type": file_type, "columns": [], "preview_data": {}}
+      return {"type": data.file_type, "columns": [], "preview_data": {}}
 
 
     schema = df.schema
     schema = df.schema
     preview_data = df.to_dict(as_series=False)
     preview_data = df.to_dict(as_series=False)
@@ -587,11 +527,11 @@ def _preview_delimited_file(
     columns = []
     columns = []
     for col in df.columns:
     for col in df.columns:
       col_type = str(schema[col])
       col_type = str(schema[col])
-      sql_type = _map_polars_dtype_to_sql_type(dialect, col_type)
+      sql_type = _map_polars_dtype_to_sql_type(data.sql_dialect, col_type)
 
 
       columns.append({"name": col, "type": sql_type})
       columns.append({"name": col, "type": sql_type})
 
 
-    result = {"type": file_type, "columns": columns, "preview_data": preview_data}
+    result = {"type": data.file_type, "columns": columns, "preview_data": preview_data}
 
 
     return result
     return result
 
 
@@ -602,17 +542,14 @@ def _preview_delimited_file(
     raise Exception(message)
     raise Exception(message)
 
 
 
 
-def guess_file_header(file_path: str, file_type: str, import_type: str, sheet_name: Optional[str] = None, fs=None) -> bool:
+def guess_file_header(data: GuessFileHeaderSchema, fs=None) -> bool:
   """Guess whether a file has a header row.
   """Guess whether a file has a header row.
 
 
   This function analyzes a file to determine if it contains a header row based on the
   This function analyzes a file to determine if it contains a header row based on the
   content pattern. It works for both Excel files and delimited text files (CSV, TSV, etc.).
   content pattern. It works for both Excel files and delimited text files (CSV, TSV, etc.).
 
 
   Args:
   Args:
-    file_path: Path to the file to analyze
-    file_type: Type of file ('excel', 'csv', 'tsv', 'delimiter_format')
-    import_type: Type of import ('local' or 'remote')
-    sheet_name: Sheet name for Excel files (required for Excel)
+    data: A Pydantic schema with all the required parameters.
     fs: File system object for remote files (default: None)
     fs: File system object for remote files (default: None)
 
 
   Returns:
   Returns:
@@ -622,39 +559,30 @@ def guess_file_header(file_path: str, file_type: str, import_type: str, sheet_na
     ValueError: If the file does not exist or parameters are invalid
     ValueError: If the file does not exist or parameters are invalid
     Exception: For various file processing errors
     Exception: For various file processing errors
   """
   """
-  if not file_path:
-    raise ValueError("File path cannot be empty")
-
-  if file_type not in ["excel", "csv", "tsv", "delimiter_format"]:
-    raise ValueError(f"Unsupported file type: {file_type}")
-
-  if import_type not in ["local", "remote"]:
-    raise ValueError(f"Unsupported import type: {import_type}")
-
-  if import_type == "remote" and fs is None:
+  if data.import_type == "remote" and fs is None:
     raise ValueError("File system object is required for remote import type")
     raise ValueError("File system object is required for remote import type")
 
 
   # Check if file exists based on import type
   # Check if file exists based on import type
-  if import_type == "local" and not os.path.exists(file_path):
-    raise ValueError(f"Local file does not exist: {file_path}")
-  elif import_type == "remote" and fs and not fs.exists(file_path):
-    raise ValueError(f"Remote file does not exist: {file_path}")
+  if data.import_type == "local" and not os.path.exists(data.file_path):
+    raise ValueError(f"Local file does not exist: {data.file_path}")
+  elif data.import_type == "remote" and fs and not fs.exists(data.file_path):
+    raise ValueError(f"Remote file does not exist: {data.file_path}")
 
 
-  fh = open(file_path, "rb") if import_type == "local" else fs.open(file_path, "rb")
+  fh = open(data.file_path, "rb") if data.import_type == "local" else fs.open(data.file_path, "rb")
 
 
   has_header = False
   has_header = False
 
 
   try:
   try:
-    if file_type == "excel":
-      if not sheet_name:
-        raise ValueError("Sheet name is required for Excel files.")
-
+    if data.file_type == "excel":
       # Convert excel sample to CSV for header detection
       # Convert excel sample to CSV for header detection
       try:
       try:
         fh.seek(0)
         fh.seek(0)
 
 
         csv_snippet = pl.read_excel(
         csv_snippet = pl.read_excel(
-          source=BytesIO(fh.read()), sheet_name=sheet_name, infer_schema_length=10000, read_options={"n_rows": 20}
+          source=BytesIO(fh.read()),
+          sheet_name=data.sheet_name,
+          infer_schema_length=10000,
+          read_options={"n_rows": 20},
         ).write_csv(file=None)
         ).write_csv(file=None)
 
 
         if isinstance(csv_snippet, bytes):
         if isinstance(csv_snippet, bytes):
@@ -669,7 +597,7 @@ def guess_file_header(file_path: str, file_type: str, import_type: str, sheet_na
 
 
         raise Exception(message)
         raise Exception(message)
 
 
-    elif file_type in ["csv", "tsv", "delimiter_format"]:
+    elif data.file_type in ["csv", "tsv", "delimiter_format"]:
       try:
       try:
         # Reset file position
         # Reset file position
         fh.seek(0)
         fh.seek(0)
@@ -692,14 +620,14 @@ def guess_file_header(file_path: str, file_type: str, import_type: str, sheet_na
     fh.close()
     fh.close()
 
 
 
 
-def get_sql_type_mapping(dialect: str) -> Dict[str, str]:
+def get_sql_type_mapping(data: SqlTypeMapperSchema) -> Dict[str, str]:
   """Get all type mappings from Polars dtypes to SQL types for a given SQL dialect.
   """Get all type mappings from Polars dtypes to SQL types for a given SQL dialect.
 
 
   This function returns a dictionary mapping of all Polars data types to their
   This function returns a dictionary mapping of all Polars data types to their
   corresponding SQL types for a specific dialect.
   corresponding SQL types for a specific dialect.
 
 
   Args:
   Args:
-    dialect: One of "hive", "impala", "trino", "phoenix", "sparksql".
+    data: A Pydantic schema with the SQL dialect.
 
 
   Returns:
   Returns:
     A dict mapping Polars dtype names to SQL type names.
     A dict mapping Polars dtype names to SQL type names.
@@ -707,9 +635,9 @@ def get_sql_type_mapping(dialect: str) -> Dict[str, str]:
   Raises:
   Raises:
     ValueError: If the dialect is not supported.
     ValueError: If the dialect is not supported.
   """
   """
-  dl = dialect.lower()
+  dl = data.sql_dialect.lower()
   if dl not in SQL_TYPE_DIALECT_OVERRIDES:
   if dl not in SQL_TYPE_DIALECT_OVERRIDES:
-    raise ValueError(f"Unsupported dialect: {dialect}")
+    raise ValueError(f"Unsupported dialect: {data.sql_dialect}")
 
 
   # Merge base_map and overrides[dl] into a new dict, giving precedence to any overlapping keys in overrides[dl]
   # Merge base_map and overrides[dl] into a new dict, giving precedence to any overlapping keys in overrides[dl]
   return {**SQL_TYPE_BASE_MAP, **SQL_TYPE_DIALECT_OVERRIDES[dl]}
   return {**SQL_TYPE_BASE_MAP, **SQL_TYPE_DIALECT_OVERRIDES[dl]}
@@ -728,7 +656,7 @@ def _map_polars_dtype_to_sql_type(dialect: str, polars_type: str) -> str:
   Raises:
   Raises:
     ValueError: If the dialect or polars_type is not supported.
     ValueError: If the dialect or polars_type is not supported.
   """
   """
-  mapping = get_sql_type_mapping(dialect)
+  mapping = get_sql_type_mapping(SqlTypeMapperSchema(sql_dialect=dialect))
 
 
   if polars_type not in mapping:
   if polars_type not in mapping:
     raise ValueError(f"No mapping for Polars dtype {polars_type} in dialect {dialect}")
     raise ValueError(f"No mapping for Polars dtype {polars_type} in dialect {dialect}")

+ 401 - 162
desktop/core/src/desktop/lib/importer/operations_tests.py

@@ -18,65 +18,139 @@
 import os
 import os
 import tempfile
 import tempfile
 import zipfile
 import zipfile
-from unittest.mock import MagicMock, mock_open, patch
+from unittest.mock import MagicMock, patch
 
 
 import pytest
 import pytest
 from django.core.files.uploadedfile import SimpleUploadedFile
 from django.core.files.uploadedfile import SimpleUploadedFile
 
 
+from desktop.conf import IMPORTER
 from desktop.lib.importer import operations
 from desktop.lib.importer import operations
+from desktop.lib.importer.schemas import (
+  GuessFileHeaderSchema,
+  GuessFileMetadataSchema,
+  LocalFileUploadSchema,
+  PreviewFileSchema,
+  SqlTypeMapperSchema,
+)
 
 
 
 
 class TestLocalFileUpload:
 class TestLocalFileUpload:
-  @patch("uuid.uuid4")
-  def test_local_file_upload_success(self, mock_uuid):
-    # Mock uuid to get a predictable filename
-    mock_uuid.return_value.hex = "12345678"
+  def test_local_file_upload_success(self):
+    resets = [
+      IMPORTER.RESTRICT_LOCAL_FILE_EXTENSIONS.set_for_testing([".exe", ".bat"]),
+      IMPORTER.MAX_LOCAL_FILE_SIZE_UPLOAD_LIMIT.set_for_testing(10 * 1024 * 1024),  # 10 MiB limit
+    ]
 
 
-    test_file = SimpleUploadedFile(name="test_file.csv", content=b"header1,header2\nvalue1,value2", content_type="text/csv")
+    try:
+      test_file = SimpleUploadedFile(name="test_file.csv", content=b"header1,header2\nvalue1,value2", content_type="text/csv")
 
 
-    result = operations.local_file_upload(test_file, "test_user")
+      # Create schema object
+      schema = LocalFileUploadSchema(file=test_file, filename="test_file.csv", filesize=test_file.size)
 
 
-    # Get the expected file path
-    temp_dir = tempfile.gettempdir()
-    expected_path = os.path.join(temp_dir, "test_user_12345678_test_file.csv")
+      result = operations.local_file_upload(schema, "test_user")
 
 
-    try:
       assert "file_path" in result
       assert "file_path" in result
-      assert result["file_path"] == expected_path
+      file_path = result["file_path"]
+
+      # Verify the file path contains expected components
+      assert "test_user_" in file_path
+      assert "_test_file.csv" in file_path
+      assert file_path.startswith(tempfile.gettempdir())
 
 
       # Verify the file was created and has the right content
       # Verify the file was created and has the right content
-      assert os.path.exists(expected_path)
-      with open(expected_path, "rb") as f:
+      assert os.path.exists(file_path)
+      with open(file_path, "rb") as f:
         assert f.read() == b"header1,header2\nvalue1,value2"
         assert f.read() == b"header1,header2\nvalue1,value2"
 
 
     finally:
     finally:
-      # Clean up the file
-      if os.path.exists(expected_path):
-        os.remove(expected_path)
+      # Clean up in case assertion fails
+      if os.path.exists(result["file_path"]):
+        os.remove(result["file_path"])
 
 
-      assert not os.path.exists(expected_path), "Temporary file was not cleaned up properly"
+      for reset in resets:
+        reset()
 
 
-  def test_local_file_upload_none_file(self):
-    with pytest.raises(ValueError, match="Upload file cannot be None or empty."):
-      operations.local_file_upload(None, "test_user")
-
-  def test_local_file_upload_none_username(self):
+  def test_local_file_upload_empty_username(self):
     test_file = SimpleUploadedFile(name="test_file.csv", content=b"header1,header2\nvalue1,value2", content_type="text/csv")
     test_file = SimpleUploadedFile(name="test_file.csv", content=b"header1,header2\nvalue1,value2", content_type="text/csv")
 
 
+    # Create schema object
+    schema = LocalFileUploadSchema(file=test_file, filename="test_file.csv", filesize=test_file.size)
+
     with pytest.raises(ValueError, match="Username cannot be None or empty."):
     with pytest.raises(ValueError, match="Username cannot be None or empty."):
-      operations.local_file_upload(test_file, None)
+      operations.local_file_upload(schema, "")
+
+  @patch("tempfile.NamedTemporaryFile")
+  @patch("shutil.copyfileobj")
+  def test_local_file_upload_exception_handling_with_cleanup(self, mock_copyfileobj, mock_tempfile):
+    resets = [
+      IMPORTER.RESTRICT_LOCAL_FILE_EXTENSIONS.set_for_testing([".exe", ".bat"]),
+      IMPORTER.MAX_LOCAL_FILE_SIZE_UPLOAD_LIMIT.set_for_testing(10 * 1024 * 1024),  # 10 MiB limit
+    ]
 
 
-  @patch("os.path.join")
-  @patch("builtins.open", new_callable=mock_open)
-  def test_local_file_upload_exception_handling(self, mock_file_open, mock_join):
-    # Setup mocks to raise an exception when opening the file
-    mock_file_open.side_effect = IOError("Test IO Error")
-    mock_join.return_value = "/tmp/test_user_12345678_test_file.csv"
+    # Mock the temporary file
+    mock_file = MagicMock()
+    mock_file.name = "/tmp/test_user_12345678_test_file.csv"
+    mock_tempfile.return_value = mock_file
+    mock_file.__enter__.return_value = mock_file
+
+    # Make copyfileobj raise an exception
+    mock_copyfileobj.side_effect = IOError("Test IO Error")
 
 
     test_file = SimpleUploadedFile(name="test_file.csv", content=b"header1,header2\nvalue1,value2", content_type="text/csv")
     test_file = SimpleUploadedFile(name="test_file.csv", content=b"header1,header2\nvalue1,value2", content_type="text/csv")
 
 
-    with pytest.raises(Exception, match="Test IO Error"):
-      operations.local_file_upload(test_file, "test_user")
+    # Create schema object
+    schema = LocalFileUploadSchema(file=test_file, filename="test_file.csv", filesize=test_file.size)
+
+    # Create the temp file for testing cleanup
+    with open(mock_file.name, "w") as f:
+      f.write("temp")
+
+    try:
+      with pytest.raises(IOError, match="Test IO Error"):
+        operations.local_file_upload(schema, "test_user")
+
+      # Verify the file was cleaned up after the exception
+      assert not os.path.exists(mock_file.name), "Temporary file was not cleaned up after exception"
+
+    finally:
+      # Clean up in case assertion fails
+      if os.path.exists(mock_file.name):
+        os.remove(mock_file.name)
+
+      for reset in resets:
+        reset()
+
+  def test_local_file_upload_special_characters_in_filename(self):
+    resets = [
+      IMPORTER.RESTRICT_LOCAL_FILE_EXTENSIONS.set_for_testing([".exe", ".bat"]),
+      IMPORTER.MAX_LOCAL_FILE_SIZE_UPLOAD_LIMIT.set_for_testing(10 * 1024 * 1024),  # 10 MiB limit
+    ]
+
+    # Test with special characters in filename
+    test_file = SimpleUploadedFile(name="test file (with) [special] {chars} & symbols!.csv", content=b"data", content_type="text/csv")
+
+    # Create schema object
+    schema = LocalFileUploadSchema(file=test_file, filename="test file (with) [special] {chars} & symbols!.csv", filesize=test_file.size)
+
+    result = operations.local_file_upload(schema, "test_user")
+
+    try:
+      assert "file_path" in result
+      file_path = result["file_path"]
+
+      # Verify the file was created
+      assert os.path.exists(file_path)
+
+      # Verify filename is sanitized properly
+      assert "_test file (with) [special] {chars} & symbols!.csv" in file_path
+
+    finally:
+      # Clean up the file
+      if os.path.exists(result["file_path"]):
+        os.remove(result["file_path"])
+
+      for reset in resets:
+        reset()
 
 
 
 
 @pytest.mark.usefixtures("cleanup_temp_files")
 @pytest.mark.usefixtures("cleanup_temp_files")
@@ -107,7 +181,10 @@ class TestGuessFileMetadata:
     # Mock magic.from_buffer to return text/csv MIME type
     # Mock magic.from_buffer to return text/csv MIME type
     mock_magic.from_buffer.return_value = "text/plain"
     mock_magic.from_buffer.return_value = "text/plain"
 
 
-    result = operations.guess_file_metadata(temp_file.name, "local")
+    # Create schema object
+    schema = GuessFileMetadataSchema(file_path=temp_file.name, import_type="local")
+
+    result = operations.guess_file_metadata(data=schema)
 
 
     assert result == {
     assert result == {
       "type": "csv",
       "type": "csv",
@@ -130,7 +207,10 @@ class TestGuessFileMetadata:
     # Mock magic.from_buffer to return text/plain MIME type
     # Mock magic.from_buffer to return text/plain MIME type
     mock_magic.from_buffer.return_value = "text/plain"
     mock_magic.from_buffer.return_value = "text/plain"
 
 
-    result = operations.guess_file_metadata(temp_file.name, "local")
+    # Create schema object
+    schema = GuessFileMetadataSchema(file_path=temp_file.name, import_type="local")
+
+    result = operations.guess_file_metadata(data=schema)
 
 
     assert result == {
     assert result == {
       "type": "tsv",
       "type": "tsv",
@@ -165,7 +245,10 @@ class TestGuessFileMetadata:
     # Mock _get_sheet_names_xlsx to return sheet names
     # Mock _get_sheet_names_xlsx to return sheet names
     mock_get_sheet_names.return_value = ["Sheet1", "Sheet2", "Sheet3"]
     mock_get_sheet_names.return_value = ["Sheet1", "Sheet2", "Sheet3"]
 
 
-    result = operations.guess_file_metadata(temp_file.name, "local")
+    # Create schema object
+    schema = GuessFileMetadataSchema(file_path=temp_file.name, import_type="local")
+
+    result = operations.guess_file_metadata(data=schema)
 
 
     assert result == {
     assert result == {
       "type": "excel",
       "type": "excel",
@@ -184,21 +267,25 @@ class TestGuessFileMetadata:
     # Mock magic.from_buffer to return an unsupported MIME type
     # Mock magic.from_buffer to return an unsupported MIME type
     mock_magic.from_buffer.return_value = "application/octet-stream"
     mock_magic.from_buffer.return_value = "application/octet-stream"
 
 
+    # Create schema object
+    schema = GuessFileMetadataSchema(file_path=temp_file.name, import_type="local")
+
     with pytest.raises(ValueError, match="Unable to detect file format."):
     with pytest.raises(ValueError, match="Unable to detect file format."):
-      operations.guess_file_metadata(temp_file.name, "local")
+      operations.guess_file_metadata(data=schema)
 
 
   def test_guess_file_metadata_nonexistent_file(self):
   def test_guess_file_metadata_nonexistent_file(self):
-    file_path = "/path/to/nonexistent/file.csv"
+    # Create schema object
+    schema = GuessFileMetadataSchema(file_path="/path/to/nonexistent/file.csv", import_type="local")
 
 
     with pytest.raises(ValueError, match="Local file does not exist."):
     with pytest.raises(ValueError, match="Local file does not exist."):
-      operations.guess_file_metadata(file_path, "local")
+      operations.guess_file_metadata(data=schema)
 
 
   def test_guess_remote_file_metadata_no_fs(self):
   def test_guess_remote_file_metadata_no_fs(self):
+    # Create schema object
+    schema = GuessFileMetadataSchema(file_path="s3a://bucket/user/test_user/test.csv", import_type="remote")
+
     with pytest.raises(ValueError, match="File system object is required for remote import type"):
     with pytest.raises(ValueError, match="File system object is required for remote import type"):
-      operations.guess_file_metadata(
-        file_path="s3a://bucket/user/test_user/test.csv",  # Remote file path
-        import_type="remote",  # Remote file but no fs provided
-      )
+      operations.guess_file_metadata(data=schema, fs=None)
 
 
   def test_guess_file_metadata_empty_file(self, cleanup_temp_files):
   def test_guess_file_metadata_empty_file(self, cleanup_temp_files):
     temp_file = tempfile.NamedTemporaryFile(delete=False)
     temp_file = tempfile.NamedTemporaryFile(delete=False)
@@ -206,8 +293,11 @@ class TestGuessFileMetadata:
 
 
     cleanup_temp_files.append(temp_file.name)
     cleanup_temp_files.append(temp_file.name)
 
 
+    # Create schema object
+    schema = GuessFileMetadataSchema(file_path=temp_file.name, import_type="local")
+
     with pytest.raises(ValueError, match="File is empty, cannot detect file format."):
     with pytest.raises(ValueError, match="File is empty, cannot detect file format."):
-      operations.guess_file_metadata(temp_file.name, "local")
+      operations.guess_file_metadata(data=schema)
 
 
   @patch("desktop.lib.importer.operations.is_magic_lib_available", False)
   @patch("desktop.lib.importer.operations.is_magic_lib_available", False)
   def test_guess_file_metadata_no_magic_lib(self, cleanup_temp_files):
   def test_guess_file_metadata_no_magic_lib(self, cleanup_temp_files):
@@ -217,8 +307,11 @@ class TestGuessFileMetadata:
 
 
     cleanup_temp_files.append(temp_file.name)
     cleanup_temp_files.append(temp_file.name)
 
 
+    # Create schema object
+    schema = GuessFileMetadataSchema(file_path=temp_file.name, import_type="local")
+
     with pytest.raises(RuntimeError, match="Unable to guess file type. python-magic or its dependency libmagic is not installed."):
     with pytest.raises(RuntimeError, match="Unable to guess file type. python-magic or its dependency libmagic is not installed."):
-      operations.guess_file_metadata(temp_file.name, "local")
+      operations.guess_file_metadata(data=schema)
 
 
 
 
 @pytest.mark.usefixtures("cleanup_temp_files")
 @pytest.mark.usefixtures("cleanup_temp_files")
@@ -260,10 +353,13 @@ class TestPreviewFile:
 
 
     mock_pl.read_excel.return_value = mock_df
     mock_pl.read_excel.return_value = mock_df
 
 
-    result = operations.preview_file(
+    # Create schema object
+    schema = PreviewFileSchema(
       file_path=temp_file.name, file_type="excel", import_type="local", sql_dialect="hive", has_header=True, sheet_name="Sheet1"
       file_path=temp_file.name, file_type="excel", import_type="local", sql_dialect="hive", has_header=True, sheet_name="Sheet1"
     )
     )
 
 
+    result = operations.preview_file(data=schema)
+
     assert result == {
     assert result == {
       "type": "excel",
       "type": "excel",
       "columns": [
       "columns": [
@@ -282,7 +378,8 @@ class TestPreviewFile:
 
 
     cleanup_temp_files.append(temp_file.name)
     cleanup_temp_files.append(temp_file.name)
 
 
-    result = operations.preview_file(
+    # Create schema object
+    schema = PreviewFileSchema(
       file_path=temp_file.name,
       file_path=temp_file.name,
       file_type="csv",
       file_type="csv",
       import_type="local",
       import_type="local",
@@ -293,6 +390,8 @@ class TestPreviewFile:
       record_separator="\n",
       record_separator="\n",
     )
     )
 
 
+    result = operations.preview_file(data=schema)
+
     assert result == {
     assert result == {
       "type": "csv",
       "type": "csv",
       "columns": [
       "columns": [
@@ -311,7 +410,8 @@ class TestPreviewFile:
 
 
     cleanup_temp_files.append(temp_file.name)
     cleanup_temp_files.append(temp_file.name)
 
 
-    result = operations.preview_file(
+    # Create schema object
+    schema = PreviewFileSchema(
       file_path=temp_file.name,
       file_path=temp_file.name,
       file_type="csv",
       file_type="csv",
       import_type="local",
       import_type="local",
@@ -322,6 +422,8 @@ class TestPreviewFile:
       record_separator="\n",
       record_separator="\n",
     )
     )
 
 
+    result = operations.preview_file(data=schema)
+
     assert result == {
     assert result == {
       "type": "csv",
       "type": "csv",
       "columns": [{"name": "column_1", "type": "STRING"}, {"name": "column_2", "type": "STRING"}],
       "columns": [{"name": "column_1", "type": "STRING"}, {"name": "column_2", "type": "STRING"}],
@@ -336,7 +438,8 @@ class TestPreviewFile:
 
 
     cleanup_temp_files.append(temp_file.name)
     cleanup_temp_files.append(temp_file.name)
 
 
-    result = operations.preview_file(
+    # Create schema object
+    schema = PreviewFileSchema(
       file_path=temp_file.name,
       file_path=temp_file.name,
       file_type="csv",
       file_type="csv",
       import_type="local",
       import_type="local",
@@ -347,54 +450,34 @@ class TestPreviewFile:
       record_separator="\n",
       record_separator="\n",
     )
     )
 
 
+    result = operations.preview_file(data=schema)
+
     assert result == {
     assert result == {
       "type": "csv",
       "type": "csv",
       "columns": [],
       "columns": [],
       "preview_data": {},
       "preview_data": {},
     }
     }
 
 
-  def test_preview_invalid_file_path(self):
-    with pytest.raises(ValueError, match="File path cannot be empty"):
-      operations.preview_file(file_path="", file_type="csv", import_type="local", sql_dialect="hive", has_header=True)
-
-  def test_preview_unsupported_file_type(self):
-    with pytest.raises(ValueError, match="Unsupported file type: json"):
-      operations.preview_file(
-        file_path="/path/to/test.json",
-        file_type="json",  # Unsupported type
-        import_type="local",
-        sql_dialect="hive",
-        has_header=True,
-      )
-
-  def test_preview_unsupported_sql_dialect(self):
-    with pytest.raises(ValueError, match="Unsupported SQL dialect: mysql"):
-      operations.preview_file(
-        file_path="/path/to/test.csv",
-        file_type="csv",
-        import_type="local",
-        sql_dialect="mysql",  # Unsupported dialect
-        has_header=True,
-      )
-
   def test_preview_remote_file_no_fs(self):
   def test_preview_remote_file_no_fs(self):
+    # Create schema object
+    schema = PreviewFileSchema(
+      file_path="s3a://bucket/user/test_user/test.csv", file_type="csv", import_type="remote", sql_dialect="hive", has_header=True
+    )
+
     with pytest.raises(ValueError, match="File system object is required for remote import type"):
     with pytest.raises(ValueError, match="File system object is required for remote import type"):
-      operations.preview_file(
-        file_path="s3a://bucket/user/test_user/test.csv",  # Remote file path
-        file_type="csv",
-        import_type="remote",  # Remote file but no fs provided
-        sql_dialect="hive",
-        has_header=True,
-      )
+      operations.preview_file(data=schema, fs=None)
 
 
   @patch("os.path.exists")
   @patch("os.path.exists")
   def test_preview_nonexistent_local_file(self, mock_exists):
   def test_preview_nonexistent_local_file(self, mock_exists):
     mock_exists.return_value = False
     mock_exists.return_value = False
 
 
+    # Create schema object
+    schema = PreviewFileSchema(
+      file_path="/path/to/nonexistent.csv", file_type="csv", import_type="local", sql_dialect="hive", has_header=True
+    )
+
     with pytest.raises(ValueError, match="Local file does not exist: /path/to/nonexistent.csv"):
     with pytest.raises(ValueError, match="Local file does not exist: /path/to/nonexistent.csv"):
-      operations.preview_file(
-        file_path="/path/to/nonexistent.csv", file_type="csv", import_type="local", sql_dialect="hive", has_header=True
-      )
+      operations.preview_file(data=schema)
 
 
   def test_preview_trino_dialect_type_mapping(self, cleanup_temp_files):
   def test_preview_trino_dialect_type_mapping(self, cleanup_temp_files):
     test_content = "string_col\nfoo\nbar"
     test_content = "string_col\nfoo\nbar"
@@ -404,7 +487,8 @@ class TestPreviewFile:
 
 
     cleanup_temp_files.append(temp_file.name)
     cleanup_temp_files.append(temp_file.name)
 
 
-    result = operations.preview_file(
+    # Create schema object
+    schema = PreviewFileSchema(
       file_path=temp_file.name,
       file_path=temp_file.name,
       file_type="csv",
       file_type="csv",
       import_type="local",
       import_type="local",
@@ -413,6 +497,8 @@ class TestPreviewFile:
       field_separator=",",
       field_separator=",",
     )
     )
 
 
+    result = operations.preview_file(data=schema)
+
     # Check the result for Trino-specific type mapping
     # Check the result for Trino-specific type mapping
     assert result["columns"][0]["type"] == "VARCHAR"  # Not STRING
     assert result["columns"][0]["type"] == "VARCHAR"  # Not STRING
 
 
@@ -439,7 +525,10 @@ class TestGuessFileHeader:
 
 
     cleanup_temp_files.append(temp_file.name)
     cleanup_temp_files.append(temp_file.name)
 
 
-    result = operations.guess_file_header(file_path=temp_file.name, file_type="csv", import_type="local")
+    # Create schema object
+    schema = GuessFileHeaderSchema(file_path=temp_file.name, file_type="csv", import_type="local")
+
+    result = operations.guess_file_header(data=schema)
 
 
     assert result
     assert result
 
 
@@ -467,120 +556,270 @@ class TestGuessFileHeader:
     mock_sniffer_instance.has_header.return_value = True
     mock_sniffer_instance.has_header.return_value = True
     mock_sniffer.return_value = mock_sniffer_instance
     mock_sniffer.return_value = mock_sniffer_instance
 
 
-    result = operations.guess_file_header(file_path=temp_file.name, file_type="excel", import_type="local", sheet_name="Sheet1")
-
-    assert result
-
-  def test_guess_header_excel_no_sheet_name(self, cleanup_temp_files):
-    test_content = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
-    <workbook xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
-    </workbook>"""
-
-    temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".xlsx")
-    temp_file.write(test_content.encode("utf-8"))
-    temp_file.close()
+    # Create schema object
+    schema = GuessFileHeaderSchema(file_path=temp_file.name, file_type="excel", import_type="local", sheet_name="Sheet1")
 
 
-    cleanup_temp_files.append(temp_file.name)
+    result = operations.guess_file_header(data=schema)
 
 
-    with pytest.raises(ValueError, match="Sheet name is required for Excel files"):
-      operations.guess_file_header(
-        file_path=temp_file.name,
-        file_type="excel",
-        import_type="local",
-        # Missing sheet_name
-      )
-
-  def test_guess_header_invalid_path(self):
-    with pytest.raises(ValueError, match="File path cannot be empty"):
-      operations.guess_file_header(file_path="", file_type="csv", import_type="local")
-
-  def test_guess_header_unsupported_file_type(self):
-    with pytest.raises(ValueError, match="Unsupported file type: json"):
-      operations.guess_file_header(
-        file_path="/path/to/test.json",
-        file_type="json",  # Unsupported type
-        import_type="local",
-      )
+    assert result
 
 
   def test_guess_header_nonexistent_local_file(self):
   def test_guess_header_nonexistent_local_file(self):
+    # Create schema object
+    schema = GuessFileHeaderSchema(file_path="/path/to/nonexistent/file.csv", file_type="csv", import_type="local")
+
     with pytest.raises(ValueError, match="Local file does not exist"):
     with pytest.raises(ValueError, match="Local file does not exist"):
-      operations.guess_file_header(file_path="/path/to/nonexistent.csv", file_type="csv", import_type="local")
+      operations.guess_file_header(data=schema)
 
 
   def test_guess_header_remote_file_no_fs(self):
   def test_guess_header_remote_file_no_fs(self):
+    # Create schema object
+    schema = GuessFileHeaderSchema(file_path="s3a://bucket/user/test_user/test.csv", file_type="csv", import_type="remote")
+
     with pytest.raises(ValueError, match="File system object is required for remote import type"):
     with pytest.raises(ValueError, match="File system object is required for remote import type"):
-      operations.guess_file_header(
-        file_path="hdfs:///path/to/test.csv",
-        file_type="csv",
-        import_type="remote",  # Remote but no fs provided
-      )
+      operations.guess_file_header(data=schema, fs=None)
 
 
 
 
 class TestSqlTypeMapping:
 class TestSqlTypeMapping:
   def test_get_sql_type_mapping_hive(self):
   def test_get_sql_type_mapping_hive(self):
-    mappings = operations.get_sql_type_mapping("hive")
+    # Create schema object
+    schema = SqlTypeMapperSchema(sql_dialect="hive")
 
 
-    # Check some key mappings for Hive
-    assert mappings["Int32"] == "INT"
-    assert mappings["Utf8"] == "STRING"
-    assert mappings["Float64"] == "DOUBLE"
-    assert mappings["Boolean"] == "BOOLEAN"
-    assert mappings["Decimal"] == "DECIMAL"
+    result = operations.get_sql_type_mapping(schema)
 
 
-  def test_get_sql_type_mapping_trino(self):
-    mappings = operations.get_sql_type_mapping("trino")
+    # Test all integer types (signed and unsigned)
+    assert result["Int8"] == "TINYINT"
+    assert result["Int16"] == "SMALLINT"
+    assert result["Int32"] == "INT"
+    assert result["Int64"] == "BIGINT"
+    assert result["UInt8"] == "TINYINT"  # Unsigned mapped to signed in Hive
+    assert result["UInt16"] == "SMALLINT"
+    assert result["UInt32"] == "INT"
+    assert result["UInt64"] == "BIGINT"
+
+    # Test floating point and decimal types
+    assert result["Float32"] == "FLOAT"
+    assert result["Float64"] == "DOUBLE"
+    assert result["Decimal"] == "DECIMAL"
 
 
-    # Check some key mappings for Trino that differ from Hive
-    assert mappings["Int32"] == "INTEGER"
-    assert mappings["Utf8"] == "VARCHAR"
-    assert mappings["Binary"] == "VARBINARY"
-    assert mappings["Float32"] == "REAL"
-    assert mappings["Struct"] == "ROW"
-    assert mappings["Object"] == "JSON"
+    # Test boolean, string, and binary types
+    assert result["Boolean"] == "BOOLEAN"
+    assert result["Utf8"] == "STRING"
+    assert result["String"] == "STRING"
+    assert result["Categorical"] == "STRING"
+    assert result["Enum"] == "STRING"
+    assert result["Binary"] == "BINARY"
+
+    # Test temporal types
+    assert result["Date"] == "DATE"
+    assert result["Time"] == "TIMESTAMP"  # No pure TIME type in Hive
+    assert result["Datetime"] == "TIMESTAMP"
+    assert result["Duration"] == "INTERVAL DAY TO SECOND"
+
+    # Test nested and other types
+    assert result["Array"] == "ARRAY"
+    assert result["List"] == "ARRAY"
+    assert result["Struct"] == "STRUCT"
+    assert result["Object"] == "STRING"
+    assert result["Null"] == "STRING"
+    assert result["Unknown"] == "STRING"
+
+  def test_get_sql_type_mapping_trino(self):
+    # Create schema object
+    schema = SqlTypeMapperSchema(sql_dialect="trino")
+
+    result = operations.get_sql_type_mapping(schema)
+
+    # Test Trino-specific overrides
+    assert result["Int32"] == "INTEGER"  # Not INT
+    assert result["UInt32"] == "INTEGER"  # Not INT
+    assert result["Float32"] == "REAL"  # Not FLOAT
+    assert result["Utf8"] == "VARCHAR"  # Not STRING
+    assert result["String"] == "VARCHAR"  # Not STRING
+    assert result["Binary"] == "VARBINARY"  # Not BINARY
+    assert result["Struct"] == "ROW"  # Not STRUCT
+    assert result["Object"] == "JSON"  # Not STRING
+    assert result["Duration"] == "INTERVAL DAY TO SECOND"
+
+    # Test types that remain the same as base mapping
+    assert result["Int8"] == "TINYINT"
+    assert result["Int16"] == "SMALLINT"
+    assert result["Int64"] == "BIGINT"
+    assert result["Float64"] == "DOUBLE"
+    assert result["Boolean"] == "BOOLEAN"
+    assert result["Date"] == "DATE"
+    assert result["Time"] == "TIMESTAMP"
+    assert result["Datetime"] == "TIMESTAMP"
 
 
   def test_get_sql_type_mapping_phoenix(self):
   def test_get_sql_type_mapping_phoenix(self):
-    mappings = operations.get_sql_type_mapping("phoenix")
+    # Create schema object
+    schema = SqlTypeMapperSchema(sql_dialect="phoenix")
+
+    result = operations.get_sql_type_mapping(schema)
+
+    # Test Phoenix-specific unsigned integer mappings
+    assert result["UInt8"] == "UNSIGNED_TINYINT"
+    assert result["UInt16"] == "UNSIGNED_SMALLINT"
+    assert result["UInt32"] == "UNSIGNED_INT"
+    assert result["UInt64"] == "UNSIGNED_LONG"
+
+    # Test other Phoenix-specific overrides
+    assert result["Utf8"] == "VARCHAR"  # Not STRING
+    assert result["String"] == "VARCHAR"  # Not STRING
+    assert result["Binary"] == "VARBINARY"  # Not BINARY
+    assert result["Duration"] == "STRING"  # Phoenix treats durations as strings
+    assert result["Struct"] == "STRING"  # No native STRUCT type
+    assert result["Object"] == "VARCHAR"  # Not STRING
+    assert result["Time"] == "TIME"  # Phoenix has its own TIME type
+    assert result["Decimal"] == "DECIMAL"
+
+    # Test signed integers (use base mapping)
+    assert result["Int8"] == "TINYINT"
+    assert result["Int16"] == "SMALLINT"
+    assert result["Int32"] == "INT"
+    assert result["Int64"] == "BIGINT"
 
 
-    # Check some key mappings for Phoenix
-    assert mappings["UInt32"] == "UNSIGNED_INT"
-    assert mappings["Utf8"] == "VARCHAR"
-    assert mappings["Time"] == "TIME"
-    assert mappings["Struct"] == "STRING"  # Phoenix treats structs as strings
-    assert mappings["Duration"] == "STRING"  # Phoenix treats durations as strings
+    # Test other types that remain the same
+    assert result["Float32"] == "FLOAT"
+    assert result["Float64"] == "DOUBLE"
+    assert result["Boolean"] == "BOOLEAN"
+    assert result["Date"] == "DATE"
+    assert result["Datetime"] == "TIMESTAMP"
 
 
   def test_get_sql_type_mapping_impala(self):
   def test_get_sql_type_mapping_impala(self):
-    result = operations.get_sql_type_mapping("impala")
+    # Create schema object
+    schema = SqlTypeMapperSchema(sql_dialect="impala")
+
+    result = operations.get_sql_type_mapping(schema)
 
 
-    # Impala uses the base mappings, so check those
+    # Impala uses all base mappings (no overrides)
+    # Test a comprehensive set to ensure no overrides are applied
+    assert result["Int8"] == "TINYINT"
+    assert result["Int16"] == "SMALLINT"
     assert result["Int32"] == "INT"
     assert result["Int32"] == "INT"
     assert result["Int64"] == "BIGINT"
     assert result["Int64"] == "BIGINT"
+    assert result["UInt8"] == "TINYINT"
+    assert result["UInt16"] == "SMALLINT"
+    assert result["UInt32"] == "INT"
+    assert result["UInt64"] == "BIGINT"
+    assert result["Float32"] == "FLOAT"
     assert result["Float64"] == "DOUBLE"
     assert result["Float64"] == "DOUBLE"
+    assert result["Decimal"] == "DECIMAL"
+    assert result["Boolean"] == "BOOLEAN"
     assert result["Utf8"] == "STRING"
     assert result["Utf8"] == "STRING"
+    assert result["String"] == "STRING"
+    assert result["Binary"] == "BINARY"
+    assert result["Date"] == "DATE"
+    assert result["Time"] == "TIMESTAMP"
+    assert result["Datetime"] == "TIMESTAMP"
+    assert result["Duration"] == "INTERVAL DAY TO SECOND"
+    assert result["Array"] == "ARRAY"
+    assert result["Struct"] == "STRUCT"
+    assert result["Object"] == "STRING"
 
 
   def test_get_sql_type_mapping_sparksql(self):
   def test_get_sql_type_mapping_sparksql(self):
-    result = operations.get_sql_type_mapping("sparksql")
+    # Create schema object
+    schema = SqlTypeMapperSchema(sql_dialect="sparksql")
 
 
-    # SparkSQL uses the base mappings, so check those
+    result = operations.get_sql_type_mapping(schema)
+
+    # SparkSQL uses all base mappings (no overrides)
+    # Test a comprehensive set to ensure no overrides are applied
+    assert result["Int8"] == "TINYINT"
+    assert result["Int16"] == "SMALLINT"
     assert result["Int32"] == "INT"
     assert result["Int32"] == "INT"
     assert result["Int64"] == "BIGINT"
     assert result["Int64"] == "BIGINT"
+    assert result["UInt8"] == "TINYINT"
+    assert result["UInt16"] == "SMALLINT"
+    assert result["UInt32"] == "INT"
+    assert result["UInt64"] == "BIGINT"
+    assert result["Float32"] == "FLOAT"
     assert result["Float64"] == "DOUBLE"
     assert result["Float64"] == "DOUBLE"
+    assert result["Decimal"] == "DECIMAL"
+    assert result["Boolean"] == "BOOLEAN"
     assert result["Utf8"] == "STRING"
     assert result["Utf8"] == "STRING"
-
-  def test_get_sql_type_mapping_unsupported_dialect(self):
-    with pytest.raises(ValueError, match="Unsupported dialect: mysql"):
-      operations.get_sql_type_mapping("mysql")
+    assert result["String"] == "STRING"
+    assert result["Binary"] == "BINARY"
+    assert result["Date"] == "DATE"
+    assert result["Time"] == "TIMESTAMP"
+    assert result["Datetime"] == "TIMESTAMP"
+    assert result["Duration"] == "INTERVAL DAY TO SECOND"
+    assert result["Array"] == "ARRAY"
+    assert result["Struct"] == "STRUCT"
+    assert result["Object"] == "STRING"
+
+  def test_get_sql_type_mapping_all_dialects_consistency(self):
+    # Test that all dialects return mappings for all base types
+    dialects = ["hive", "impala", "sparksql", "trino", "phoenix"]
+    base_types = [
+      "Int8",
+      "Int16",
+      "Int32",
+      "Int64",
+      "UInt8",
+      "UInt16",
+      "UInt32",
+      "UInt64",
+      "Float32",
+      "Float64",
+      "Decimal",
+      "Boolean",
+      "Utf8",
+      "String",
+      "Categorical",
+      "Enum",
+      "Binary",
+      "Date",
+      "Time",
+      "Datetime",
+      "Duration",
+      "Array",
+      "List",
+      "Struct",
+      "Object",
+      "Null",
+      "Unknown",
+    ]
+
+    for dialect in dialects:
+      schema = SqlTypeMapperSchema(sql_dialect=dialect)
+      result = operations.get_sql_type_mapping(schema)
+
+      # Ensure all base types have mappings
+      for base_type in base_types:
+        assert base_type in result, f"Missing mapping for {base_type} in {dialect} dialect"
+        assert isinstance(result[base_type], str), f"Invalid mapping type for {base_type} in {dialect} dialect"
+        assert len(result[base_type]) > 0, f"Empty mapping for {base_type} in {dialect} dialect"
 
 
   def test_map_polars_dtype_to_sql_type(self):
   def test_map_polars_dtype_to_sql_type(self):
-    # Test with Hive dialect
-    assert operations._map_polars_dtype_to_sql_type("hive", "Int64") == "BIGINT"
-    assert operations._map_polars_dtype_to_sql_type("hive", "Float32") == "FLOAT"
-
-    # Test with Trino dialect
-    assert operations._map_polars_dtype_to_sql_type("trino", "Int64") == "BIGINT"
+    # Test comprehensive type mapping for each dialect
+
+    # Hive dialect tests
+    assert operations._map_polars_dtype_to_sql_type("hive", "Int8") == "TINYINT"
+    assert operations._map_polars_dtype_to_sql_type("hive", "Int32") == "INT"
+    assert operations._map_polars_dtype_to_sql_type("hive", "Float64") == "DOUBLE"
+    assert operations._map_polars_dtype_to_sql_type("hive", "Utf8") == "STRING"
+    assert operations._map_polars_dtype_to_sql_type("hive", "Boolean") == "BOOLEAN"
+    assert operations._map_polars_dtype_to_sql_type("hive", "Date") == "DATE"
+    assert operations._map_polars_dtype_to_sql_type("hive", "Array") == "ARRAY"
+
+    # Trino dialect tests (with overrides)
+    assert operations._map_polars_dtype_to_sql_type("trino", "Int32") == "INTEGER"
     assert operations._map_polars_dtype_to_sql_type("trino", "Float32") == "REAL"
     assert operations._map_polars_dtype_to_sql_type("trino", "Float32") == "REAL"
-
-    # Test unsupported type
+    assert operations._map_polars_dtype_to_sql_type("trino", "Utf8") == "VARCHAR"
+    assert operations._map_polars_dtype_to_sql_type("trino", "Binary") == "VARBINARY"
+    assert operations._map_polars_dtype_to_sql_type("trino", "Struct") == "ROW"
+    assert operations._map_polars_dtype_to_sql_type("trino", "Object") == "JSON"
+
+    # Phoenix dialect tests (with unsigned types)
+    assert operations._map_polars_dtype_to_sql_type("phoenix", "UInt8") == "UNSIGNED_TINYINT"
+    assert operations._map_polars_dtype_to_sql_type("phoenix", "UInt32") == "UNSIGNED_INT"
+    assert operations._map_polars_dtype_to_sql_type("phoenix", "UInt64") == "UNSIGNED_LONG"
+    assert operations._map_polars_dtype_to_sql_type("phoenix", "Time") == "TIME"
+    assert operations._map_polars_dtype_to_sql_type("phoenix", "Duration") == "STRING"
+    assert operations._map_polars_dtype_to_sql_type("phoenix", "Struct") == "STRING"
+
+    # Test error for unknown type
     with pytest.raises(ValueError, match="No mapping for Polars dtype"):
     with pytest.raises(ValueError, match="No mapping for Polars dtype"):
-      operations._map_polars_dtype_to_sql_type("hive", "NonExistentType")
+      operations._map_polars_dtype_to_sql_type("hive", "UnknownType")
 
 
 
 
 @pytest.mark.usefixtures("cleanup_temp_files")
 @pytest.mark.usefixtures("cleanup_temp_files")

+ 142 - 0
desktop/core/src/desktop/lib/importer/schemas.py

@@ -0,0 +1,142 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import codecs
+import os
+from typing import Any, Literal, Optional
+
+from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator
+
+from desktop.conf import IMPORTER
+
+
+class LocalFileUploadSchema(BaseModel):
+  model_config = ConfigDict(arbitrary_types_allowed=True)
+
+  file: Any = Field(..., description="CSV or Excel file to upload and process")
+  filename: str = Field(..., description="The name of the file")
+  filesize: int = Field(..., description="The size of the file in bytes")
+
+  @field_validator("filename")
+  @classmethod
+  def validate_filename(cls, v: str) -> str:
+    # Check if the file type is restricted
+    _, file_type = os.path.splitext(v)
+    restricted_extensions = IMPORTER.RESTRICT_LOCAL_FILE_EXTENSIONS.get()
+    if restricted_extensions and file_type.lower() in [ext.lower() for ext in restricted_extensions]:
+      raise ValueError(f'Uploading files with type "{file_type}" is not allowed. Hue is configured to restrict this type.')
+    return v
+
+  @field_validator("filesize")
+  @classmethod
+  def validate_filesize(cls, v: int) -> int:
+    # Check file size
+    max_size = IMPORTER.MAX_LOCAL_FILE_SIZE_UPLOAD_LIMIT.get()
+    if v > max_size:
+      max_size_mib = max_size / (1024 * 1024)
+      raise ValueError(f"File too large. Maximum file size is {max_size_mib:.0f} MiB.")
+    return v
+
+
+class GuessFileMetadataSchema(BaseModel):
+  file_path: str = Field(..., description="Full path to the file to analyze")
+  import_type: Literal["local", "remote"] = Field(..., description="Whether the file is local or on a remote filesystem")
+
+  @field_validator("file_path")
+  @classmethod
+  def file_path_not_blank(cls, v: str) -> str:
+    if not v or v.strip() == "":
+      raise ValueError("File path cannot be empty or whitespace.")
+    return v
+
+
+class PreviewFileSchema(BaseModel):
+  file_path: str = Field(..., description="Full path to the file to preview")
+  file_type: Literal["csv", "tsv", "excel", "delimiter_format"] = Field(..., description="Type of file (csv, tsv, excel, delimiter_format)")
+  import_type: Literal["local", "remote"] = Field(..., description="Whether the file is local or on a remote filesystem")
+  sql_dialect: Literal["hive", "impala", "trino", "phoenix", "sparksql"] = Field(..., description="SQL dialect for mapping column types")
+  has_header: bool = Field(..., description="Whether the file has a header row or not")
+  sheet_name: Optional[str] = Field(None, description="Sheet name for Excel files")
+  field_separator: Optional[str] = Field(None, description="Field separator character")
+  quote_char: Optional[str] = Field(None, description="Quote character")
+  record_separator: Optional[str] = Field(None, description="Record separator character")
+
+  @field_validator("file_path")
+  @classmethod
+  def file_path_not_blank(cls, v: str) -> str:
+    if not v or v.strip() == "":
+      raise ValueError("File path cannot be empty or whitespace.")
+    return v
+
+  @field_validator("field_separator", "quote_char", "record_separator", mode="before")
+  @classmethod
+  def decode_escape_sequences(cls, value: Optional[str]) -> Optional[str]:
+    if value is None:
+      return value
+    return codecs.decode(value, "unicode_escape")
+
+  @model_validator(mode="after")
+  def set_defaults_and_validate_dependencies(self) -> "PreviewFileSchema":
+    # Validate sheet_name dependency for excel files
+    if self.file_type == "excel" and not self.sheet_name:
+      raise ValueError("Sheet name is required for Excel files.")
+
+    # Normalize record separator
+    if self.record_separator == "\r\n":
+      self.record_separator = "\n"
+
+    # Set defaults for delimited files
+    if self.file_type in ["csv", "tsv", "delimiter_format"]:
+      if self.field_separator is None:
+        if self.file_type == "csv":
+          self.field_separator = ","
+        elif self.file_type == "tsv":
+          self.field_separator = "\t"
+        else:
+          raise ValueError("Field separator is required for delimited files")
+
+      if self.quote_char is None:
+        self.quote_char = '"'
+
+      if self.record_separator is None:
+        self.record_separator = "\n"
+
+    return self
+
+
+class SqlTypeMapperSchema(BaseModel):
+  sql_dialect: Literal["hive", "impala", "trino", "phoenix", "sparksql"] = Field(..., description="SQL dialect for mapping column types")
+
+
+class GuessFileHeaderSchema(BaseModel):
+  file_path: str = Field(..., description="Full path to the file to analyze")
+  file_type: Literal["csv", "tsv", "excel", "delimiter_format"] = Field(..., description="Type of file (csv, tsv, excel, delimiter_format)")
+  import_type: Literal["local", "remote"] = Field(..., description="Whether the file is local or on a remote filesystem")
+  sheet_name: Optional[str] = Field(None, description="Sheet name for Excel files")
+
+  @field_validator("file_path")
+  @classmethod
+  def file_path_not_blank(cls, v: str) -> str:
+    if not v or v.strip() == "":
+      raise ValueError("File path cannot be empty or whitespace.")
+    return v
+
+  @model_validator(mode="after")
+  def sheet_name_required_for_excel(self) -> "GuessFileHeaderSchema":
+    if self.file_type == "excel" and not self.sheet_name:
+      raise ValueError("Sheet name is required for Excel files.")
+    return self

+ 540 - 0
desktop/core/src/desktop/lib/importer/schemas_tests.py

@@ -0,0 +1,540 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from unittest.mock import MagicMock
+
+import pytest
+from pydantic import ValidationError
+
+from desktop.conf import IMPORTER
+from desktop.lib.importer.schemas import (
+  GuessFileHeaderSchema,
+  GuessFileMetadataSchema,
+  LocalFileUploadSchema,
+  PreviewFileSchema,
+  SqlTypeMapperSchema,
+)
+
+
+class TestLocalFileUploadSchema:
+  def test_valid_file_upload(self):
+    resets = [
+      IMPORTER.RESTRICT_LOCAL_FILE_EXTENSIONS.set_for_testing([".exe", ".bat"]),
+      IMPORTER.MAX_LOCAL_FILE_SIZE_UPLOAD_LIMIT.set_for_testing(10 * 1024 * 1024),  # 10 MiB limit
+    ]
+    try:
+      mock_file = MagicMock()
+      schema = LocalFileUploadSchema(file=mock_file, filename="test.csv", filesize=1024)
+
+      assert schema.file == mock_file
+      assert schema.filename == "test.csv"
+      assert schema.filesize == 1024
+    finally:
+      for reset in resets:
+        reset()
+
+  def test_restricted_file_extension(self):
+    resets = [IMPORTER.RESTRICT_LOCAL_FILE_EXTENSIONS.set_for_testing([".exe", ".bat", ".csv"])]
+    try:
+      mock_file = MagicMock()
+
+      with pytest.raises(ValidationError) as exc_info:
+        LocalFileUploadSchema(file=mock_file, filename="test.csv", filesize=1024)
+
+      errors = exc_info.value.errors()
+      assert len(errors) == 1
+      assert "filename" in str(errors[0]["loc"])
+      assert "not allowed" in str(errors[0]["msg"])
+    finally:
+      for reset in resets:
+        reset()
+
+  def test_file_too_large(self):
+    resets = [IMPORTER.MAX_LOCAL_FILE_SIZE_UPLOAD_LIMIT.set_for_testing(1024 * 1024)]  # 1 MiB
+    try:
+      mock_file = MagicMock()
+
+      with pytest.raises(ValidationError) as exc_info:
+        LocalFileUploadSchema(file=mock_file, filename="test.csv", filesize=1024 * 1024 * 2)  # 2 MiB
+
+      errors = exc_info.value.errors()
+      assert len(errors) == 1
+      assert "filesize" in str(errors[0]["loc"])
+      assert "too large" in str(errors[0]["msg"])
+    finally:
+      for reset in resets:
+        reset()
+
+  def test_case_insensitive_extension_check(self):
+    resets = [
+      IMPORTER.RESTRICT_LOCAL_FILE_EXTENSIONS.set_for_testing([".EXE", ".BAT"]),
+      IMPORTER.MAX_LOCAL_FILE_SIZE_UPLOAD_LIMIT.set_for_testing(1024 * 1024 * 10),  # 10 MiB limit
+    ]
+    try:
+      mock_file = MagicMock()
+
+      with pytest.raises(ValidationError) as exc_info:
+        LocalFileUploadSchema(file=mock_file, filename="test.exe", filesize=1024)
+
+      errors = exc_info.value.errors()
+      assert len(errors) == 1
+      assert "filename" in str(errors[0]["loc"])
+      assert "not allowed" in str(errors[0]["msg"])
+    finally:
+      for reset in resets:
+        reset()
+
+  def test_missing_file(self):
+    with pytest.raises(ValidationError) as exc_info:
+      LocalFileUploadSchema()
+
+    errors = exc_info.value.errors()
+    assert len(errors) == 3
+    assert "file" in str(errors[0]["loc"])
+    assert "filename" in str(errors[1]["loc"])
+    assert "filesize" in str(errors[2]["loc"])
+
+
+class TestGuessFileMetadataSchema:
+  def test_valid_local_file_metadata(self):
+    schema = GuessFileMetadataSchema(file_path="/path/to/test.csv", import_type="local")
+
+    assert schema.file_path == "/path/to/test.csv"
+    assert schema.import_type == "local"
+
+  def test_valid_remote_file_metadata(self):
+    schema = GuessFileMetadataSchema(file_path="s3a://bucket/test.csv", import_type="remote")
+
+    assert schema.file_path == "s3a://bucket/test.csv"
+    assert schema.import_type == "remote"
+
+  def test_invalid_import_type(self):
+    with pytest.raises(ValidationError) as exc_info:
+      GuessFileMetadataSchema(file_path="/path/to/test.csv", import_type="invalid")
+
+    errors = exc_info.value.errors()
+    assert len(errors) == 1
+    assert "import_type" in str(errors[0]["loc"])
+
+  def test_missing_required_fields(self):
+    with pytest.raises(ValidationError) as exc_info:
+      GuessFileMetadataSchema()
+
+    errors = exc_info.value.errors()
+    assert len(errors) == 2  # file_path and import_type are required
+
+  def test_invalid_file_path(self):
+    with pytest.raises(ValidationError) as exc_info:
+      GuessFileMetadataSchema(file_path="", import_type="local")
+
+    errors = exc_info.value.errors()
+    assert len(errors) == 1
+    assert "file_path" in str(errors[0]["loc"])
+
+
+class TestPreviewFileSchema:
+  def test_valid_csv_preview(self):
+    schema = PreviewFileSchema(
+      file_path="/path/to/test.csv",
+      file_type="csv",
+      import_type="local",
+      sql_dialect="hive",
+      has_header=True,
+      field_separator=",",
+      quote_char='"',
+      record_separator="\n",
+    )
+
+    assert schema.file_path == "/path/to/test.csv"
+    assert schema.file_type == "csv"
+    assert schema.import_type == "local"
+    assert schema.sql_dialect == "hive"
+    assert schema.has_header is True
+    assert schema.field_separator == ","
+    assert schema.quote_char == '"'
+    assert schema.record_separator == "\n"
+
+  def test_csv_default_values(self):
+    schema = PreviewFileSchema(file_path="/path/to/test.csv", file_type="csv", import_type="local", sql_dialect="hive", has_header=False)
+
+    assert schema.field_separator == ","
+    assert schema.quote_char == '"'
+    assert schema.record_separator == "\n"
+
+  def test_tsv_default_values(self):
+    schema = PreviewFileSchema(file_path="/path/to/test.tsv", file_type="tsv", import_type="local", sql_dialect="hive", has_header=False)
+
+    assert schema.field_separator == "\t"
+    assert schema.quote_char == '"'
+    assert schema.record_separator == "\n"
+
+  def test_excel_requires_sheet_name(self):
+    with pytest.raises(ValidationError) as exc_info:
+      PreviewFileSchema(file_path="/path/to/test.xlsx", file_type="excel", import_type="local", sql_dialect="hive", has_header=False)
+
+    errors = exc_info.value.errors()
+    assert len(errors) == 1
+    assert "Sheet name is required for Excel files" in str(errors[0]["msg"])
+
+  def test_valid_excel_preview(self):
+    schema = PreviewFileSchema(
+      file_path="/path/to/test.xlsx",
+      file_type="excel",
+      import_type="local",
+      sql_dialect="hive",
+      has_header=True,
+      sheet_name="Sheet1",
+    )
+
+    assert schema.file_type == "excel"
+    assert schema.sheet_name == "Sheet1"
+
+  def test_delimiter_format_requires_field_separator(self):
+    with pytest.raises(ValidationError) as exc_info:
+      PreviewFileSchema(
+        file_path="/path/to/test.txt", file_type="delimiter_format", import_type="local", sql_dialect="hive", has_header=False
+      )
+
+    errors = exc_info.value.errors()
+    assert len(errors) == 1
+    assert "Field separator is required for delimited files" in str(errors[0]["msg"])
+
+  def test_valid_delimiter_format(self):
+    schema = PreviewFileSchema(
+      file_path="/path/to/test.txt",
+      file_type="delimiter_format",
+      import_type="local",
+      sql_dialect="hive",
+      has_header=False,
+      field_separator="|",
+    )
+
+    assert schema.field_separator == "|"
+    assert schema.quote_char == '"'
+    assert schema.record_separator == "\n"
+
+  def test_invalid_file_type(self):
+    with pytest.raises(ValidationError) as exc_info:
+      PreviewFileSchema(file_path="/path/to/test.pdf", file_type="pdf", import_type="local", sql_dialect="hive", has_header=False)
+
+    errors = exc_info.value.errors()
+    assert len(errors) == 1
+    assert "file_type" in str(errors[0]["loc"])
+
+  def test_invalid_file_path(self):
+    with pytest.raises(ValidationError) as exc_info:
+      PreviewFileSchema(file_path="", file_type="csv", import_type="local", sql_dialect="hive", has_header=False)
+
+    errors = exc_info.value.errors()
+    assert len(errors) == 1
+    assert "file_path" in str(errors[0]["loc"])
+
+  def test_invalid_sql_dialect(self):
+    with pytest.raises(ValidationError) as exc_info:
+      PreviewFileSchema(file_path="/path/to/test.csv", file_type="csv", import_type="local", sql_dialect="mysql", has_header=False)
+
+    errors = exc_info.value.errors()
+    assert len(errors) == 1
+    assert "sql_dialect" in str(errors[0]["loc"])
+
+  def test_decode_escape_sequences_field_separator(self):
+    # Test tab escape sequence
+    schema = PreviewFileSchema(
+      file_path="/path/to/test.txt",
+      file_type="delimiter_format",
+      import_type="local",
+      sql_dialect="hive",
+      has_header=True,
+      field_separator="\\t",  # Tab character as escape sequence
+    )
+
+    assert schema.field_separator == "\t"  # Should be decoded to actual tab
+
+    # Test newline escape sequence
+    schema = PreviewFileSchema(
+      file_path="/path/to/test.txt",
+      file_type="delimiter_format",
+      import_type="local",
+      sql_dialect="hive",
+      has_header=True,
+      field_separator="\\n",
+    )
+    assert schema.field_separator == "\n"
+
+    # Test backslash escape sequence
+    schema = PreviewFileSchema(
+      file_path="/path/to/test.txt",
+      file_type="delimiter_format",
+      import_type="local",
+      sql_dialect="hive",
+      has_header=True,
+      field_separator="\\\\",
+    )
+    assert schema.field_separator == "\\"
+
+  def test_decode_escape_sequences_quote_char(self):
+    # Test double quote escape sequence
+    schema = PreviewFileSchema(
+      file_path="/path/to/test.csv",
+      file_type="csv",
+      import_type="local",
+      sql_dialect="hive",
+      has_header=True,
+      quote_char='\\"',  # Escaped double quote
+    )
+
+    assert schema.quote_char == '"'
+
+    # Test single quote escape sequence
+    schema = PreviewFileSchema(
+      file_path="/path/to/test.csv",
+      file_type="csv",
+      import_type="local",
+      sql_dialect="hive",
+      has_header=True,
+      quote_char="\\'",
+    )
+    assert schema.quote_char == "'"
+
+    # Test Unicode escape sequence
+    schema = PreviewFileSchema(
+      file_path="/path/to/test.csv",
+      file_type="csv",
+      import_type="local",
+      sql_dialect="hive",
+      has_header=True,
+      quote_char="\\u0022",  # Unicode for double quote
+    )
+    assert schema.quote_char == '"'
+
+  def test_decode_escape_sequences_record_separator(self):
+    # Test newline escape sequence
+    schema = PreviewFileSchema(
+      file_path="/path/to/test.csv",
+      file_type="csv",
+      import_type="local",
+      sql_dialect="hive",
+      has_header=True,
+      record_separator="\\n",
+    )
+
+    assert schema.record_separator == "\n"
+
+    # Test carriage return + newline (should be normalized to just newline)
+    schema = PreviewFileSchema(
+      file_path="/path/to/test.csv",
+      file_type="csv",
+      import_type="local",
+      sql_dialect="hive",
+      has_header=True,
+      record_separator="\\r\\n",
+    )
+    assert schema.record_separator == "\n"  # Should be normalized
+
+    # Test just carriage return
+    schema = PreviewFileSchema(
+      file_path="/path/to/test.csv",
+      file_type="csv",
+      import_type="local",
+      sql_dialect="hive",
+      has_header=True,
+      record_separator="\\r",
+    )
+    assert schema.record_separator == "\r"
+
+  def test_decode_escape_sequences_with_none_values(self):
+    # Test that None values are handled properly and defaults are set
+    schema = PreviewFileSchema(
+      file_path="/path/to/test.csv",
+      file_type="csv",
+      import_type="local",
+      sql_dialect="hive",
+      has_header=True,
+      field_separator=None,
+      quote_char=None,
+      record_separator=None,
+    )
+
+    # CSV defaults should be applied
+    assert schema.field_separator == ","
+    assert schema.quote_char == '"'
+    assert schema.record_separator == "\n"
+
+  def test_decode_escape_sequences_with_complex_patterns(self):
+    # Test multiple escape sequences in a single value
+    schema = PreviewFileSchema(
+      file_path="/path/to/test.txt",
+      file_type="delimiter_format",
+      import_type="local",
+      sql_dialect="hive",
+      has_header=True,
+      field_separator="\\t\\t",  # Double tab
+      quote_char="\\x22",  # Hex escape for double quote
+      record_separator="\\x0A",  # Hex escape for newline
+    )
+
+    assert schema.field_separator == "\t\t"
+    assert schema.quote_char == '"'
+    assert schema.record_separator == "\n"
+
+  def test_decode_escape_sequences_preserves_regular_strings(self):
+    # Test that regular strings without escape sequences are preserved
+    schema = PreviewFileSchema(
+      file_path="/path/to/test.txt",
+      file_type="delimiter_format",
+      import_type="local",
+      sql_dialect="hive",
+      has_header=True,
+      field_separator="|",
+      quote_char="'",
+      record_separator="###",
+    )
+
+    assert schema.field_separator == "|"
+    assert schema.quote_char == "'"
+    assert schema.record_separator == "###"
+
+  def test_decode_escape_sequences_mixed_scenarios(self):
+    # Test various escape sequence patterns
+    test_cases = [
+      ("\\x09", "\t"),  # Hex escape for tab
+      ("\\u0009", "\t"),  # Unicode escape for tab
+      ("\\a", "\a"),  # Bell/alert character
+      ("\\b", "\b"),  # Backspace
+      ("\\f", "\f"),  # Form feed
+      ("\\v", "\v"),  # Vertical tab
+      ("\\0", "\0"),  # Null character
+    ]
+
+    for input_val, expected_val in test_cases:
+      schema = PreviewFileSchema(
+        file_path="/path/to/test.txt",
+        file_type="delimiter_format",
+        import_type="local",
+        sql_dialect="hive",
+        has_header=True,
+        field_separator=input_val,
+      )
+      assert schema.field_separator == expected_val, f"Failed for input: {repr(input_val)}"
+
+  def test_record_separator_normalization(self):
+    # Test that \r\n is normalized to \n
+    schema = PreviewFileSchema(
+      file_path="/path/to/test.csv",
+      file_type="csv",
+      import_type="local",
+      sql_dialect="hive",
+      has_header=True,
+      record_separator="\r\n",  # Already decoded value
+    )
+    assert schema.record_separator == "\n"  # Should be normalized
+
+    # But \r alone should remain as \r
+    schema = PreviewFileSchema(
+      file_path="/path/to/test.csv",
+      file_type="csv",
+      import_type="local",
+      sql_dialect="hive",
+      has_header=True,
+      record_separator="\r",
+    )
+    assert schema.record_separator == "\r"  # Should NOT be normalized
+
+
+class TestSqlTypeMapperSchema:
+  def test_valid_sql_dialects(self):
+    valid_dialects = ["hive", "impala", "trino", "phoenix", "sparksql"]
+
+    for dialect in valid_dialects:
+      schema = SqlTypeMapperSchema(sql_dialect=dialect)
+      assert schema.sql_dialect == dialect
+
+  def test_invalid_sql_dialect(self):
+    with pytest.raises(ValidationError) as exc_info:
+      SqlTypeMapperSchema(sql_dialect="mysql")
+
+    errors = exc_info.value.errors()
+    assert len(errors) == 1
+    assert "sql_dialect" in str(errors[0]["loc"])
+
+  def test_missing_sql_dialect(self):
+    with pytest.raises(ValidationError) as exc_info:
+      SqlTypeMapperSchema()
+
+    errors = exc_info.value.errors()
+    assert len(errors) == 1
+    assert "sql_dialect" in str(errors[0]["loc"])
+
+
+class TestGuessFileHeaderSchema:
+  def test_valid_csv_header_guess(self):
+    schema = GuessFileHeaderSchema(file_path="/path/to/test.csv", file_type="csv", import_type="local")
+
+    assert schema.file_path == "/path/to/test.csv"
+    assert schema.file_type == "csv"
+    assert schema.import_type == "local"
+    assert schema.sheet_name is None
+
+  def test_excel_requires_sheet_name(self):
+    with pytest.raises(ValidationError) as exc_info:
+      GuessFileHeaderSchema(file_path="/path/to/test.xlsx", file_type="excel", import_type="local")
+
+    errors = exc_info.value.errors()
+    assert len(errors) == 1
+    assert "Sheet name is required for Excel files" in str(errors[0]["msg"])
+
+  def test_valid_excel_header_guess(self):
+    schema = GuessFileHeaderSchema(file_path="/path/to/test.xlsx", file_type="excel", import_type="local", sheet_name="Sheet1")
+
+    assert schema.file_type == "excel"
+    assert schema.sheet_name == "Sheet1"
+
+  def test_valid_remote_file_header_guess(self):
+    schema = GuessFileHeaderSchema(file_path="s3a://bucket/test.csv", file_type="csv", import_type="remote")
+
+    assert schema.file_path == "s3a://bucket/test.csv"
+    assert schema.import_type == "remote"
+
+  def test_all_file_types(self):
+    valid_file_types = ["csv", "tsv", "delimiter_format"]
+
+    for file_type in valid_file_types:
+      schema = GuessFileHeaderSchema(file_path=f"/path/to/test.{file_type}", file_type=file_type, import_type="local")
+      assert schema.file_type == file_type
+
+  def test_invalid_file_type(self):
+    with pytest.raises(ValidationError) as exc_info:
+      GuessFileHeaderSchema(file_path="/path/to/test.pdf", file_type="pdf", import_type="local")
+
+    errors = exc_info.value.errors()
+    assert len(errors) == 1
+    assert "file_type" in str(errors[0]["loc"])
+
+  def test_missing_required_fields(self):
+    with pytest.raises(ValidationError) as exc_info:
+      GuessFileHeaderSchema(file_path="/path/to/test.csv")
+
+    errors = exc_info.value.errors()
+    assert len(errors) >= 2  # At least file_type and import_type are missing
+
+  def test_invalid_file_path(self):
+    with pytest.raises(ValidationError) as exc_info:
+      GuessFileHeaderSchema(file_path="", file_type="csv", import_type="local")
+
+    errors = exc_info.value.errors()
+    assert len(errors) == 1
+    assert "file_path" in str(errors[0]["loc"])

+ 41 - 54
desktop/core/src/desktop/lib/importer/serializers.py

@@ -14,11 +14,17 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
-import os
 
 
+from pydantic import ValidationError
 from rest_framework import serializers
 from rest_framework import serializers
 
 
-from desktop.conf import IMPORTER
+from desktop.lib.importer.schemas import (
+  GuessFileHeaderSchema,
+  GuessFileMetadataSchema,
+  LocalFileUploadSchema,
+  PreviewFileSchema,
+  SqlTypeMapperSchema,
+)
 
 
 
 
 class LocalFileUploadSerializer(serializers.Serializer):
 class LocalFileUploadSerializer(serializers.Serializer):
@@ -33,20 +39,14 @@ class LocalFileUploadSerializer(serializers.Serializer):
 
 
   file = serializers.FileField(required=True, help_text="CSV or Excel file to upload and process")
   file = serializers.FileField(required=True, help_text="CSV or Excel file to upload and process")
 
 
-  def validate_file(self, value):
-    # Check if the file type is restricted
-    _, file_type = os.path.splitext(value.name)
-    restricted_extensions = IMPORTER.RESTRICT_LOCAL_FILE_EXTENSIONS.get()
-    if restricted_extensions and file_type.lower() in [ext.lower() for ext in restricted_extensions]:
-      raise serializers.ValidationError(f'Uploading files with type "{file_type}" is not allowed. Hue is configured to restrict this type.')
-
-    # Check file size
-    max_size = IMPORTER.MAX_LOCAL_FILE_SIZE_UPLOAD_LIMIT.get()
-    if value.size > max_size:
-      max_size_mib = max_size / (1024 * 1024)
-      raise serializers.ValidationError(f"File too large. Maximum file size is {max_size_mib:.0f} MiB.")
-
-    return value
+  def validate(self, data):
+    uploaded_file = data["file"]
+    schema_data = {"file": uploaded_file, "filename": uploaded_file.name, "filesize": uploaded_file.size}
+    try:
+      # The serializer now directly returns the validated Pydantic model instance.
+      return LocalFileUploadSchema.model_validate(schema_data)
+    except ValidationError as e:
+      raise serializers.ValidationError(e.errors())
 
 
 
 
 class GuessFileMetadataSerializer(serializers.Serializer):
 class GuessFileMetadataSerializer(serializers.Serializer):
@@ -64,6 +64,12 @@ class GuessFileMetadataSerializer(serializers.Serializer):
     choices=["local", "remote"], required=True, help_text="Whether the file is local or on a remote filesystem"
     choices=["local", "remote"], required=True, help_text="Whether the file is local or on a remote filesystem"
   )
   )
 
 
+  def validate(self, data):
+    try:
+      return GuessFileMetadataSchema.model_validate(data)
+    except ValidationError as e:
+      raise serializers.ValidationError(e.errors())
+
 
 
 class PreviewFileSerializer(serializers.Serializer):
 class PreviewFileSerializer(serializers.Serializer):
   """Serializer for file preview request validation.
   """Serializer for file preview request validation.
@@ -92,40 +98,18 @@ class PreviewFileSerializer(serializers.Serializer):
   sql_dialect = serializers.ChoiceField(
   sql_dialect = serializers.ChoiceField(
     choices=["hive", "impala", "trino", "phoenix", "sparksql"], required=True, help_text="SQL dialect for mapping column types"
     choices=["hive", "impala", "trino", "phoenix", "sparksql"], required=True, help_text="SQL dialect for mapping column types"
   )
   )
-
   has_header = serializers.BooleanField(required=True, help_text="Whether the file has a header row or not")
   has_header = serializers.BooleanField(required=True, help_text="Whether the file has a header row or not")
-
-  # Excel-specific fields
   sheet_name = serializers.CharField(required=False, help_text="Sheet name for Excel files")
   sheet_name = serializers.CharField(required=False, help_text="Sheet name for Excel files")
-
-  # Delimited file-specific fields
-  field_separator = serializers.CharField(required=False, help_text="Field separator character")
-  quote_char = serializers.CharField(required=False, help_text="Quote character")
-  record_separator = serializers.CharField(required=False, help_text="Record separator character")
+  field_separator = serializers.CharField(required=False, allow_null=True, help_text="Field separator character")
+  quote_char = serializers.CharField(required=False, allow_null=True, help_text="Quote character")
+  record_separator = serializers.CharField(required=False, allow_null=True, help_text="Record separator character")
 
 
   def validate(self, data):
   def validate(self, data):
-    """Validate the complete data set with interdependent field validation."""
-
-    if data.get("file_type") == "excel" and not data.get("sheet_name"):
-      raise serializers.ValidationError({"sheet_name": "Sheet name is required for Excel files."})
-
-    if data.get("file_type") in ["csv", "tsv", "delimiter_format"]:
-      if not data.get("field_separator"):
-        # If not provided, set default value based on file type
-        if data.get("file_type") == "csv":
-          data["field_separator"] = ","
-        elif data.get("file_type") == "tsv":
-          data["field_separator"] = "\t"
-        else:
-          raise serializers.ValidationError({"field_separator": "Field separator is required for delimited files"})
-
-      if not data.get("quote_char"):
-        data["quote_char"] = '"'  # Default quote character
-
-      if not data.get("record_separator"):
-        data["record_separator"] = "\n"  # Default record separator
-
-    return data
+    try:
+      # Pydantic will handle interdependent validation
+      return PreviewFileSchema.model_validate(data)
+    except ValidationError as e:
+      raise serializers.ValidationError(e.errors())
 
 
 
 
 class SqlTypeMapperSerializer(serializers.Serializer):
 class SqlTypeMapperSerializer(serializers.Serializer):
@@ -142,6 +126,12 @@ class SqlTypeMapperSerializer(serializers.Serializer):
     choices=["hive", "impala", "trino", "phoenix", "sparksql"], required=True, help_text="SQL dialect for mapping column types"
     choices=["hive", "impala", "trino", "phoenix", "sparksql"], required=True, help_text="SQL dialect for mapping column types"
   )
   )
 
 
+  def validate(self, data):
+    try:
+      return SqlTypeMapperSchema.model_validate(data)
+    except ValidationError as e:
+      raise serializers.ValidationError(e.errors())
+
 
 
 class GuessFileHeaderSerializer(serializers.Serializer):
 class GuessFileHeaderSerializer(serializers.Serializer):
   """Serializer for file header guessing request validation.
   """Serializer for file header guessing request validation.
@@ -162,14 +152,11 @@ class GuessFileHeaderSerializer(serializers.Serializer):
   import_type = serializers.ChoiceField(
   import_type = serializers.ChoiceField(
     choices=["local", "remote"], required=True, help_text="Whether the file is local or on a remote filesystem"
     choices=["local", "remote"], required=True, help_text="Whether the file is local or on a remote filesystem"
   )
   )
-
-  # Excel-specific fields
   sheet_name = serializers.CharField(required=False, help_text="Sheet name for Excel files")
   sheet_name = serializers.CharField(required=False, help_text="Sheet name for Excel files")
 
 
   def validate(self, data):
   def validate(self, data):
-    """Validate the complete data set with interdependent field validation."""
-
-    if data.get("file_type") == "excel" and not data.get("sheet_name"):
-      raise serializers.ValidationError({"sheet_name": "Sheet name is required for Excel files."})
-
-    return data
+    try:
+      # Pydantic will handle interdependent validation
+      return GuessFileHeaderSchema.model_validate(data)
+    except ValidationError as e:
+      raise serializers.ValidationError(e.errors())

+ 231 - 36
desktop/core/src/desktop/lib/importer/serializers_tests.py

@@ -19,6 +19,13 @@
 from django.core.files.uploadedfile import SimpleUploadedFile
 from django.core.files.uploadedfile import SimpleUploadedFile
 
 
 from desktop.conf import IMPORTER
 from desktop.conf import IMPORTER
+from desktop.lib.importer.schemas import (
+  GuessFileHeaderSchema,
+  GuessFileMetadataSchema,
+  LocalFileUploadSchema,
+  PreviewFileSchema,
+  SqlTypeMapperSchema,
+)
 from desktop.lib.importer.serializers import (
 from desktop.lib.importer.serializers import (
   GuessFileHeaderSerializer,
   GuessFileHeaderSerializer,
   GuessFileMetadataSerializer,
   GuessFileMetadataSerializer,
@@ -40,7 +47,11 @@ class TestLocalFileUploadSerializer:
       serializer = LocalFileUploadSerializer(data={"file": test_file})
       serializer = LocalFileUploadSerializer(data={"file": test_file})
 
 
       assert serializer.is_valid(), f"Serializer validation failed: {serializer.errors}"
       assert serializer.is_valid(), f"Serializer validation failed: {serializer.errors}"
-      assert serializer.validated_data["file"] == test_file
+      # validated_data now returns a schema object
+      assert isinstance(serializer.validated_data, LocalFileUploadSchema)
+      assert serializer.validated_data.file == test_file
+      assert serializer.validated_data.filename == "test_file.csv"
+      assert serializer.validated_data.filesize == test_file.size
     finally:
     finally:
       for reset in resets:
       for reset in resets:
         reset()
         reset()
@@ -58,8 +69,10 @@ class TestLocalFileUploadSerializer:
       serializer = LocalFileUploadSerializer(data={"file": test_file})
       serializer = LocalFileUploadSerializer(data={"file": test_file})
 
 
       assert not serializer.is_valid()
       assert not serializer.is_valid()
-      assert "file" in serializer.errors
-      assert serializer.errors["file"][0] == 'Uploading files with type ".exe" is not allowed. Hue is configured to restrict this type.'
+      assert len(serializer.errors) > 0
+      # The error structure might be different with Pydantic validation
+      error_messages = str(serializer.errors)
+      assert "not allowed" in error_messages or "restrict" in error_messages.lower()
     finally:
     finally:
       for reset in resets:
       for reset in resets:
         reset()
         reset()
@@ -77,8 +90,10 @@ class TestLocalFileUploadSerializer:
       serializer = LocalFileUploadSerializer(data={"file": test_file})
       serializer = LocalFileUploadSerializer(data={"file": test_file})
 
 
       assert not serializer.is_valid()
       assert not serializer.is_valid()
-      assert "file" in serializer.errors
-      assert serializer.errors["file"][0] == "File too large. Maximum file size is 0 MiB."  # 10 bytes is very less than 1 MiB
+      assert len(serializer.errors) > 0
+      # Check for file size error
+      error_messages = str(serializer.errors)
+      assert "too large" in error_messages.lower() or "maximum" in error_messages.lower()
     finally:
     finally:
       for reset in resets:
       for reset in resets:
         reset()
         reset()
@@ -93,50 +108,61 @@ class TestLocalFileUploadSerializer:
 
 
 class TestGuessFileMetadataSerializer:
 class TestGuessFileMetadataSerializer:
   def test_valid_data(self):
   def test_valid_data(self):
-    # Test with local import type
+    # Scenario 1: Test with local import type
     local_valid_data = {"file_path": "/path/to/file.csv", "import_type": "local"}
     local_valid_data = {"file_path": "/path/to/file.csv", "import_type": "local"}
 
 
     serializer = GuessFileMetadataSerializer(data=local_valid_data)
     serializer = GuessFileMetadataSerializer(data=local_valid_data)
 
 
     assert serializer.is_valid(), f"Serializer validation failed: {serializer.errors}"
     assert serializer.is_valid(), f"Serializer validation failed: {serializer.errors}"
-    assert serializer.validated_data == local_valid_data
+    assert isinstance(serializer.validated_data, GuessFileMetadataSchema)
+    assert serializer.validated_data.file_path == "/path/to/file.csv"
+    assert serializer.validated_data.import_type == "local"
 
 
-    # Test with remote import type
+    # Scenario 2: Test with remote import type
     remote_valid_data = {"file_path": "s3a://bucket/user/test_user/file.csv", "import_type": "remote"}
     remote_valid_data = {"file_path": "s3a://bucket/user/test_user/file.csv", "import_type": "remote"}
 
 
     serializer = GuessFileMetadataSerializer(data=remote_valid_data)
     serializer = GuessFileMetadataSerializer(data=remote_valid_data)
 
 
     assert serializer.is_valid(), f"Serializer validation failed: {serializer.errors}"
     assert serializer.is_valid(), f"Serializer validation failed: {serializer.errors}"
-    assert serializer.validated_data == remote_valid_data
+    assert isinstance(serializer.validated_data, GuessFileMetadataSchema)
+    assert serializer.validated_data.file_path == "s3a://bucket/user/test_user/file.csv"
+    assert serializer.validated_data.import_type == "remote"
 
 
   def test_missing_required_fields(self):
   def test_missing_required_fields(self):
-    # Test missing file_path
+    # Scenario 1: Test missing file_path
     invalid_data = {"import_type": "local"}
     invalid_data = {"import_type": "local"}
 
 
     serializer = GuessFileMetadataSerializer(data=invalid_data)
     serializer = GuessFileMetadataSerializer(data=invalid_data)
 
 
     assert not serializer.is_valid()
     assert not serializer.is_valid()
-    assert "file_path" in serializer.errors
+    assert len(serializer.errors) > 0
+    # Check for file_path error in serializer errors
+    error_messages = str(serializer.errors)
+    assert "file_path" in error_messages
 
 
-    # Test missing import_type
+    # Scenario 2: Test missing import_type
     invalid_data = {"file_path": "/path/to/file.csv"}
     invalid_data = {"file_path": "/path/to/file.csv"}
 
 
     serializer = GuessFileMetadataSerializer(data=invalid_data)
     serializer = GuessFileMetadataSerializer(data=invalid_data)
 
 
     assert not serializer.is_valid()
     assert not serializer.is_valid()
-    assert "import_type" in serializer.errors
+    assert len(serializer.errors) > 0
+    # Check for import_type error in serializer errors
+    error_messages = str(serializer.errors)
+    assert "import_type" in error_messages
 
 
   def test_invalid_import_type(self):
   def test_invalid_import_type(self):
     invalid_data = {
     invalid_data = {
       "file_path": "/path/to/file.csv",
       "file_path": "/path/to/file.csv",
-      "import_type": "invalid_type",  # Not one of 'local' or 'remote'
+      "import_type": "invalid_type",  # Not one of 'local' or 'remote' (invalid_type)
     }
     }
 
 
     serializer = GuessFileMetadataSerializer(data=invalid_data)
     serializer = GuessFileMetadataSerializer(data=invalid_data)
 
 
     assert not serializer.is_valid()
     assert not serializer.is_valid()
-    assert "import_type" in serializer.errors
-    assert serializer.errors["import_type"][0] == '"invalid_type" is not a valid choice.'
+    assert len(serializer.errors) > 0
+    error_messages = str(serializer.errors)
+    assert "import_type" in error_messages
 
 
 
 
 class TestPreviewFileSerializer:
 class TestPreviewFileSerializer:
@@ -153,9 +179,13 @@ class TestPreviewFileSerializer:
     serializer = PreviewFileSerializer(data=valid_data)
     serializer = PreviewFileSerializer(data=valid_data)
 
 
     assert serializer.is_valid(), f"Serializer validation failed: {serializer.errors}"
     assert serializer.is_valid(), f"Serializer validation failed: {serializer.errors}"
+    # validated_data now returns a schema object
+    assert isinstance(serializer.validated_data, PreviewFileSchema)
+    assert serializer.validated_data.file_path == "/path/to/file.csv"
+    assert serializer.validated_data.file_type == "csv"
     # Check that default values are set for quote_char and record_separator
     # Check that default values are set for quote_char and record_separator
-    assert serializer.validated_data["quote_char"] == '"'
-    assert serializer.validated_data["record_separator"] == "\n"
+    assert serializer.validated_data.quote_char == '"'
+    assert serializer.validated_data.record_separator == "\n"
 
 
   def test_valid_excel_data(self):
   def test_valid_excel_data(self):
     valid_data = {
     valid_data = {
@@ -170,6 +200,9 @@ class TestPreviewFileSerializer:
     serializer = PreviewFileSerializer(data=valid_data)
     serializer = PreviewFileSerializer(data=valid_data)
 
 
     assert serializer.is_valid(), f"Serializer validation failed: {serializer.errors}"
     assert serializer.is_valid(), f"Serializer validation failed: {serializer.errors}"
+    assert isinstance(serializer.validated_data, PreviewFileSchema)
+    assert serializer.validated_data.file_type == "excel"
+    assert serializer.validated_data.sheet_name == "Sheet1"
 
 
   def test_missing_required_fields(self):
   def test_missing_required_fields(self):
     # Test with minimal data
     # Test with minimal data
@@ -196,8 +229,9 @@ class TestPreviewFileSerializer:
     serializer = PreviewFileSerializer(data=invalid_data)
     serializer = PreviewFileSerializer(data=invalid_data)
 
 
     assert not serializer.is_valid()
     assert not serializer.is_valid()
-    assert "file_type" in serializer.errors
-    assert serializer.errors["file_type"][0] == '"json" is not a valid choice.'
+    assert len(serializer.errors) > 0
+    error_messages = str(serializer.errors)
+    assert "file_type" in error_messages
 
 
   def test_excel_without_sheet_name(self):
   def test_excel_without_sheet_name(self):
     invalid_data = {
     invalid_data = {
@@ -212,8 +246,9 @@ class TestPreviewFileSerializer:
     serializer = PreviewFileSerializer(data=invalid_data)
     serializer = PreviewFileSerializer(data=invalid_data)
 
 
     assert not serializer.is_valid()
     assert not serializer.is_valid()
-    assert "sheet_name" in serializer.errors
-    assert serializer.errors["sheet_name"][0] == "Sheet name is required for Excel files."
+    assert len(serializer.errors) > 0
+    error_messages = str(serializer.errors)
+    assert "Sheet name is required for Excel files" in error_messages
 
 
   def test_delimited_without_field_separator(self):
   def test_delimited_without_field_separator(self):
     # For delimiter_format type (not csv/tsv) field separator is required
     # For delimiter_format type (not csv/tsv) field separator is required
@@ -228,7 +263,9 @@ class TestPreviewFileSerializer:
 
 
     serializer = PreviewFileSerializer(data=invalid_data)
     serializer = PreviewFileSerializer(data=invalid_data)
     assert not serializer.is_valid()
     assert not serializer.is_valid()
-    assert "field_separator" in serializer.errors
+    assert len(serializer.errors) > 0
+    error_messages = str(serializer.errors)
+    assert "Field separator is required for delimited files" in error_messages
 
 
   def test_default_separators_by_file_type(self):
   def test_default_separators_by_file_type(self):
     # For CSV, field_separator should default to ','
     # For CSV, field_separator should default to ','
@@ -237,7 +274,8 @@ class TestPreviewFileSerializer:
     serializer = PreviewFileSerializer(data=csv_data)
     serializer = PreviewFileSerializer(data=csv_data)
 
 
     assert serializer.is_valid(), f"CSV serializer validation failed: {serializer.errors}"
     assert serializer.is_valid(), f"CSV serializer validation failed: {serializer.errors}"
-    assert serializer.validated_data["field_separator"] == ","
+    assert isinstance(serializer.validated_data, PreviewFileSchema)
+    assert serializer.validated_data.field_separator == ","
 
 
     # For TSV, field_separator should default to '\t'
     # For TSV, field_separator should default to '\t'
     tsv_data = {"file_path": "/path/to/file.tsv", "file_type": "tsv", "import_type": "local", "sql_dialect": "hive", "has_header": True}
     tsv_data = {"file_path": "/path/to/file.tsv", "file_type": "tsv", "import_type": "local", "sql_dialect": "hive", "has_header": True}
@@ -245,7 +283,153 @@ class TestPreviewFileSerializer:
     serializer = PreviewFileSerializer(data=tsv_data)
     serializer = PreviewFileSerializer(data=tsv_data)
 
 
     assert serializer.is_valid(), f"TSV serializer validation failed: {serializer.errors}"
     assert serializer.is_valid(), f"TSV serializer validation failed: {serializer.errors}"
-    assert serializer.validated_data["field_separator"] == "\t"
+    assert isinstance(serializer.validated_data, PreviewFileSchema)
+    assert serializer.validated_data.field_separator == "\t"
+
+  def test_decode_escape_sequences_field_separator(self):
+    # Test tab escape sequence
+    data = {
+      "file_path": "/path/to/file.csv",
+      "file_type": "delimiter_format",
+      "import_type": "local",
+      "sql_dialect": "hive",
+      "has_header": True,
+      "field_separator": "\\t",  # Tab character as escape sequence
+    }
+
+    serializer = PreviewFileSerializer(data=data)
+
+    assert serializer.is_valid(), f"Serializer validation failed: {serializer.errors}"
+    assert serializer.validated_data.field_separator == "\t"  # Should be decoded to actual tab
+
+    # Test newline escape sequence
+    data["field_separator"] = "\\n"
+    serializer = PreviewFileSerializer(data=data)
+    assert serializer.is_valid()
+    assert serializer.validated_data.field_separator == "\n"
+
+    # Test backslash escape sequence
+    data["field_separator"] = "\\\\"
+    serializer = PreviewFileSerializer(data=data)
+    assert serializer.is_valid()
+    assert serializer.validated_data.field_separator == "\\"
+
+  def test_decode_escape_sequences_quote_char(self):
+    # Test double quote escape sequence
+    data = {
+      "file_path": "/path/to/file.csv",
+      "file_type": "csv",
+      "import_type": "local",
+      "sql_dialect": "hive",
+      "has_header": True,
+      "quote_char": '\\"',  # Escaped double quote
+    }
+
+    serializer = PreviewFileSerializer(data=data)
+
+    assert serializer.is_valid(), f"Serializer validation failed: {serializer.errors}"
+    assert serializer.validated_data.quote_char == '"'
+
+    # Test single quote escape sequence
+    data["quote_char"] = "\\'"
+    serializer = PreviewFileSerializer(data=data)
+    assert serializer.is_valid()
+    assert serializer.validated_data.quote_char == "'"
+
+    # Test Unicode escape sequence
+    data["quote_char"] = "\\u0022"  # Unicode for double quote
+    serializer = PreviewFileSerializer(data=data)
+    assert serializer.is_valid()
+    assert serializer.validated_data.quote_char == '"'
+
+  def test_decode_escape_sequences_record_separator(self):
+    # Test newline escape sequence
+    data = {
+      "file_path": "/path/to/file.csv",
+      "file_type": "csv",
+      "import_type": "local",
+      "sql_dialect": "hive",
+      "has_header": True,
+      "record_separator": "\\n",
+    }
+
+    serializer = PreviewFileSerializer(data=data)
+
+    assert serializer.is_valid(), f"Serializer validation failed: {serializer.errors}"
+    assert serializer.validated_data.record_separator == "\n"
+
+    # Test carriage return + newline (should be normalized to just newline)
+    data["record_separator"] = "\\r\\n"
+    serializer = PreviewFileSerializer(data=data)
+    assert serializer.is_valid()
+    assert serializer.validated_data.record_separator == "\n"  # Should be normalized
+
+    # Test just carriage return
+    data["record_separator"] = "\\r"
+    serializer = PreviewFileSerializer(data=data)
+    assert serializer.is_valid()
+    assert serializer.validated_data.record_separator == "\r"
+
+  def test_decode_escape_sequences_with_none_values(self):
+    # Test that None values remain None
+    data = {
+      "file_path": "/path/to/file.csv",
+      "file_type": "csv",
+      "import_type": "local",
+      "sql_dialect": "hive",
+      "has_header": True,
+      "field_separator": None,
+      "quote_char": None,
+      "record_separator": None,
+    }
+
+    serializer = PreviewFileSerializer(data=data)
+
+    assert serializer.is_valid(), f"Serializer validation failed: {serializer.errors}"
+    # CSV defaults should be applied after None is processed
+    assert serializer.validated_data.field_separator == ","
+    assert serializer.validated_data.quote_char == '"'
+    assert serializer.validated_data.record_separator == "\n"
+
+  def test_decode_escape_sequences_with_complex_patterns(self):
+    # Test multiple escape sequences in a single value
+    data = {
+      "file_path": "/path/to/file.csv",
+      "file_type": "delimiter_format",
+      "import_type": "local",
+      "sql_dialect": "hive",
+      "has_header": True,
+      "field_separator": "\\t\\t",  # Double tab
+      "quote_char": "\\x22",  # Hex escape for double quote
+      "record_separator": "\\x0A",  # Hex escape for newline
+    }
+
+    serializer = PreviewFileSerializer(data=data)
+
+    assert serializer.is_valid(), f"Serializer validation failed: {serializer.errors}"
+    assert serializer.validated_data.field_separator == "\t\t"
+    assert serializer.validated_data.quote_char == '"'
+    assert serializer.validated_data.record_separator == "\n"
+
+  def test_decode_escape_sequences_preserves_regular_strings(self):
+    # Test that regular strings without escape sequences are preserved
+    data = {
+      "file_path": "/path/to/file.csv",
+      "file_type": "delimiter_format",
+      "import_type": "local",
+      "sql_dialect": "hive",
+      "has_header": True,
+      "field_separator": "|",
+      "quote_char": "'",
+      "record_separator": "###",
+    }
+
+    serializer = PreviewFileSerializer(data=data)
+
+    assert serializer.is_valid(), f"Serializer validation failed: {serializer.errors}"
+    assert serializer.validated_data.field_separator == "|"
+    assert serializer.validated_data.quote_char == "'"
+    assert serializer.validated_data.record_separator == "###"
 
 
 
 
 class TestSqlTypeMapperSerializer:
 class TestSqlTypeMapperSerializer:
@@ -256,7 +440,8 @@ class TestSqlTypeMapperSerializer:
       serializer = SqlTypeMapperSerializer(data=valid_data)
       serializer = SqlTypeMapperSerializer(data=valid_data)
 
 
       assert serializer.is_valid(), f"Failed for dialect '{dialect}': {serializer.errors}"
       assert serializer.is_valid(), f"Failed for dialect '{dialect}': {serializer.errors}"
-      assert serializer.validated_data["sql_dialect"] == dialect
+      assert isinstance(serializer.validated_data, SqlTypeMapperSchema)
+      assert serializer.validated_data.sql_dialect == dialect
 
 
   def test_invalid_sql_dialect(self):
   def test_invalid_sql_dialect(self):
     invalid_data = {"sql_dialect": "invalid_dialect"}
     invalid_data = {"sql_dialect": "invalid_dialect"}
@@ -264,8 +449,9 @@ class TestSqlTypeMapperSerializer:
     serializer = SqlTypeMapperSerializer(data=invalid_data)
     serializer = SqlTypeMapperSerializer(data=invalid_data)
 
 
     assert not serializer.is_valid()
     assert not serializer.is_valid()
-    assert "sql_dialect" in serializer.errors
-    assert serializer.errors["sql_dialect"][0] == '"invalid_dialect" is not a valid choice.'
+    assert len(serializer.errors) > 0
+    error_messages = str(serializer.errors)
+    assert "sql_dialect" in error_messages
 
 
   def test_missing_sql_dialect(self):
   def test_missing_sql_dialect(self):
     invalid_data = {}  # Empty data
     invalid_data = {}  # Empty data
@@ -284,7 +470,10 @@ class TestGuessFileHeaderSerializer:
     serializer = GuessFileHeaderSerializer(data=valid_data)
     serializer = GuessFileHeaderSerializer(data=valid_data)
 
 
     assert serializer.is_valid(), f"Serializer validation failed: {serializer.errors}"
     assert serializer.is_valid(), f"Serializer validation failed: {serializer.errors}"
-    assert serializer.validated_data == valid_data
+    assert isinstance(serializer.validated_data, GuessFileHeaderSchema)
+    assert serializer.validated_data.file_path == "/path/to/file.csv"
+    assert serializer.validated_data.file_type == "csv"
+    assert serializer.validated_data.import_type == "local"
 
 
   def test_valid_data_excel(self):
   def test_valid_data_excel(self):
     valid_data = {"file_path": "/path/to/file.xlsx", "file_type": "excel", "import_type": "local", "sheet_name": "Sheet1"}
     valid_data = {"file_path": "/path/to/file.xlsx", "file_type": "excel", "import_type": "local", "sheet_name": "Sheet1"}
@@ -292,7 +481,9 @@ class TestGuessFileHeaderSerializer:
     serializer = GuessFileHeaderSerializer(data=valid_data)
     serializer = GuessFileHeaderSerializer(data=valid_data)
 
 
     assert serializer.is_valid(), f"Serializer validation failed: {serializer.errors}"
     assert serializer.is_valid(), f"Serializer validation failed: {serializer.errors}"
-    assert serializer.validated_data == valid_data
+    assert isinstance(serializer.validated_data, GuessFileHeaderSchema)
+    assert serializer.validated_data.file_type == "excel"
+    assert serializer.validated_data.sheet_name == "Sheet1"
 
 
   def test_missing_required_fields(self):
   def test_missing_required_fields(self):
     # Missing file_path
     # Missing file_path
@@ -330,8 +521,9 @@ class TestGuessFileHeaderSerializer:
     serializer = GuessFileHeaderSerializer(data=invalid_data)
     serializer = GuessFileHeaderSerializer(data=invalid_data)
 
 
     assert not serializer.is_valid()
     assert not serializer.is_valid()
-    assert "sheet_name" in serializer.errors
-    assert serializer.errors["sheet_name"][0] == "Sheet name is required for Excel files."
+    assert len(serializer.errors) > 0
+    error_messages = str(serializer.errors)
+    assert "Sheet name is required for Excel files" in error_messages
 
 
   def test_non_excel_with_sheet_name(self):
   def test_non_excel_with_sheet_name(self):
     # This should pass, as sheet_name is optional for non-Excel files
     # This should pass, as sheet_name is optional for non-Excel files
@@ -345,6 +537,7 @@ class TestGuessFileHeaderSerializer:
     serializer = GuessFileHeaderSerializer(data=valid_data)
     serializer = GuessFileHeaderSerializer(data=valid_data)
 
 
     assert serializer.is_valid(), f"Serializer validation failed: {serializer.errors}"
     assert serializer.is_valid(), f"Serializer validation failed: {serializer.errors}"
+    assert isinstance(serializer.validated_data, GuessFileHeaderSchema)
 
 
   def test_invalid_file_type(self):
   def test_invalid_file_type(self):
     invalid_data = {
     invalid_data = {
@@ -356,8 +549,9 @@ class TestGuessFileHeaderSerializer:
     serializer = GuessFileHeaderSerializer(data=invalid_data)
     serializer = GuessFileHeaderSerializer(data=invalid_data)
 
 
     assert not serializer.is_valid()
     assert not serializer.is_valid()
-    assert "file_type" in serializer.errors
-    assert serializer.errors["file_type"][0] == '"json" is not a valid choice.'
+    assert len(serializer.errors) > 0
+    error_messages = str(serializer.errors)
+    assert "file_type" in error_messages
 
 
   def test_invalid_import_type(self):
   def test_invalid_import_type(self):
     invalid_data = {
     invalid_data = {
@@ -369,5 +563,6 @@ class TestGuessFileHeaderSerializer:
     serializer = GuessFileHeaderSerializer(data=invalid_data)
     serializer = GuessFileHeaderSerializer(data=invalid_data)
 
 
     assert not serializer.is_valid()
     assert not serializer.is_valid()
-    assert "import_type" in serializer.errors
-    assert serializer.errors["import_type"][0] == '"invalid_type" is not a valid choice.'
+    assert len(serializer.errors) > 0
+    error_messages = str(serializer.errors)
+    assert "import_type" in error_messages