|
|
@@ -15,19 +15,15 @@
|
|
|
# See the License for the specific language governing permissions and
|
|
|
# limitations under the License.
|
|
|
|
|
|
-import re
|
|
|
import csv
|
|
|
import json
|
|
|
-import uuid
|
|
|
import logging
|
|
|
+import re
|
|
|
import tempfile
|
|
|
-import urllib.error
|
|
|
-from builtins import zip
|
|
|
-from io import StringIO as string_io
|
|
|
-from urllib.parse import unquote as urllib_unquote, urlparse
|
|
|
+import uuid
|
|
|
+from urllib.parse import unquote as urllib_unquote
|
|
|
|
|
|
import pandas as pd
|
|
|
-import openpyxl
|
|
|
from django.urls import reverse
|
|
|
from django.utils.translation import gettext as _
|
|
|
from django.views.decorators.http import require_POST
|
|
|
@@ -41,21 +37,20 @@ from desktop.models import Document2
|
|
|
from filebrowser.forms import UploadLocalFileForm
|
|
|
from indexer.controller import CollectionManagerController
|
|
|
from indexer.fields import Field, guess_field_type_from_samples
|
|
|
-from indexer.file_format import HiveFormat
|
|
|
-from indexer.indexers.base import get_api
|
|
|
+from indexer.file_format import get_file_format_instance, HiveFormat
|
|
|
from indexer.indexers.envelope import _envelope_job
|
|
|
from indexer.indexers.flink_sql import FlinkIndexer
|
|
|
from indexer.indexers.flume import FlumeIndexer
|
|
|
-from indexer.indexers.morphline import MorphlineIndexer, _create_solr_collection
|
|
|
+from indexer.indexers.morphline import _create_solr_collection, MorphlineIndexer
|
|
|
from indexer.indexers.phoenix_sql import PhoenixIndexer
|
|
|
from indexer.indexers.rdbms import _get_api, run_sqoop
|
|
|
from indexer.indexers.sql import _create_database, _create_table, _create_table_from_local
|
|
|
from indexer.models import _save_pipeline
|
|
|
from indexer.solr_client import MAX_UPLOAD_SIZE, SolrClient
|
|
|
from kafka.kafka_api import get_topic_data, get_topics
|
|
|
-from notebook.connectors.base import Notebook, get_api
|
|
|
+from notebook.connectors.base import get_api, Notebook
|
|
|
from notebook.decorators import api_error_handler
|
|
|
-from notebook.models import MockedDjangoRequest, escape_rows
|
|
|
+from notebook.models import escape_rows, MockedDjangoRequest
|
|
|
|
|
|
LOG = logging.getLogger()
|
|
|
|
|
|
@@ -67,17 +62,27 @@ except ImportError:
|
|
|
|
|
|
try:
|
|
|
from beeswax.server import dbms
|
|
|
-except ImportError as e:
|
|
|
+except ImportError:
|
|
|
LOG.warning('Hive and HiveServer2 interfaces are not enabled')
|
|
|
|
|
|
try:
|
|
|
- from filebrowser.views import detect_parquet
|
|
|
-except ImportError as e:
|
|
|
+ import importlib.util
|
|
|
+ spec = importlib.util.find_spec('filebrowser.views.detect_parquet')
|
|
|
+ if spec is None:
|
|
|
+ LOG.warning('detect_parquet function not available in filebrowser.views')
|
|
|
+except ImportError:
|
|
|
LOG.warning('File Browser interface is not enabled')
|
|
|
|
|
|
try:
|
|
|
- from search.conf import SOLR_URL
|
|
|
-except ImportError as e:
|
|
|
+ import importlib.util
|
|
|
+ spec = importlib.util.find_spec('search.conf')
|
|
|
+ if spec is not None:
|
|
|
+ import search.conf
|
|
|
+ if not hasattr(search.conf, 'SOLR_URL'):
|
|
|
+ LOG.warning('SOLR_URL not available in search.conf')
|
|
|
+ else:
|
|
|
+ LOG.warning('Solr Search interface is not enabled')
|
|
|
+except ImportError:
|
|
|
LOG.warning('Solr Search interface is not enabled')
|
|
|
|
|
|
|
|
|
@@ -137,17 +142,14 @@ def guess_format(request):
|
|
|
path = excel_to_csv_file_name_change(path)
|
|
|
request.fs.create(path, overwrite=True, data=_csv_data)
|
|
|
|
|
|
- indexer = MorphlineIndexer(request.user, request.fs)
|
|
|
if not request.fs.isfile(path):
|
|
|
raise PopupException(_('Path %(path)s is not a file') % file_format)
|
|
|
|
|
|
stream = request.fs.open(path)
|
|
|
- format_ = indexer.guess_format({
|
|
|
- "file": {
|
|
|
- "stream": stream,
|
|
|
- "name": path
|
|
|
- }
|
|
|
- })
|
|
|
+ format_ = get_file_format_instance({
|
|
|
+ "stream": stream,
|
|
|
+ "name": path
|
|
|
+ }).get_format()
|
|
|
_convert_format(format_)
|
|
|
|
|
|
if file_format["path"][-3:] == 'xls' or file_format["path"][-4:] == 'xlsx':
|
|
|
@@ -277,7 +279,6 @@ def guess_field_types(request):
|
|
|
}
|
|
|
|
|
|
elif file_format['inputFormat'] == 'file':
|
|
|
- indexer = MorphlineIndexer(request.user, request.fs)
|
|
|
path = file_format["path"]
|
|
|
|
|
|
if path[-3:] == 'xls' or path[-4:] == 'xlsx':
|
|
|
@@ -288,13 +289,8 @@ def guess_field_types(request):
|
|
|
stream.seek(0)
|
|
|
_convert_format(file_format["format"], inverse=True)
|
|
|
|
|
|
- format_ = indexer.guess_field_types({
|
|
|
- "file": {
|
|
|
- "stream": stream,
|
|
|
- "name": path
|
|
|
- },
|
|
|
- "format": file_format['format']
|
|
|
- })
|
|
|
+ file_format_instance = get_file_format_instance({"stream": stream, "name": path}, file_format['format'])
|
|
|
+ format_ = file_format_instance.get_fields() if file_format_instance else {'columns': []}
|
|
|
|
|
|
# Note: Would also need to set charset to table (only supported in Hive)
|
|
|
if 'sample' in format_ and format_['sample']:
|
|
|
@@ -361,7 +357,6 @@ def guess_field_types(request):
|
|
|
)
|
|
|
|
|
|
kafkaFieldNames = [col['name'] for col in data['full_headers']]
|
|
|
- kafkaFieldTypes = [col['type'] for col in data['full_headers']]
|
|
|
topics_data = data['rows']
|
|
|
|
|
|
format_ = {
|
|
|
@@ -634,7 +629,7 @@ def _small_indexing(user, fs, client, source, destination, index_name):
|
|
|
rows=rows,
|
|
|
start_over=start_over
|
|
|
)
|
|
|
- rows = searcher.update_data_from_hive(
|
|
|
+ searcher.update_data_from_hive(
|
|
|
index_name,
|
|
|
columns,
|
|
|
fetch_handle=fetch_handle,
|