瀏覽代碼

[libs] Upgrade phoenixdb to 1.1.0.dev0 for Python 2

Romain Rigaux 4 年之前
父節點
當前提交
8f358c6d4b
共有 52 個文件被更改,包括 1856 次插入303 次删除
  1. 0 164
      desktop/core/ext-py/phoenixdb-1.0.1/PKG-INFO
  2. 0 96
      desktop/core/ext-py/phoenixdb-1.0.1/phoenixdb/meta.py
  3. 0 24
      desktop/core/ext-py/phoenixdb-1.0.1/setup.cfg
  4. 13 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/.gitignore
  5. 149 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/.gitlab-ci.yml
  6. 24 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/Dockerfile
  7. 24 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/Dockerfile-pqs
  8. 0 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/LICENSE
  9. 70 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/NEWS.rst
  10. 0 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/NOTICE
  11. 5 5
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/README.rst
  12. 12 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/RELEASING.rst
  13. 22 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/ci/build-env/Dockerfile
  14. 48 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/ci/phoenix/Dockerfile
  15. 39 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/ci/phoenix/docker-entrypoint.sh
  16. 29 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/ci/phoenix/hbase-site.xml
  17. 139 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/dev-support/cache-apache-project-artifact.sh
  18. 96 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/dev-support/make_rc.sh
  19. 4 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/dev-support/rat-excludes.txt
  20. 44 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/dev-support/run-source-ratcheck.sh
  21. 207 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/doc/Makefile
  22. 46 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/doc/api.rst
  23. 302 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/doc/conf.py
  24. 41 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/doc/index.rst
  25. 19 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/doc/versions.rst
  26. 27 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/examples/basic.py
  27. 59 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/gen-protobuf.sh
  28. 0 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/phoenixdb/__init__.py
  29. 0 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/phoenixdb/avatica/__init__.py
  30. 10 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/phoenixdb/avatica/client.py
  31. 0 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/phoenixdb/avatica/proto/__init__.py
  32. 0 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/phoenixdb/avatica/proto/common_pb2.py
  33. 0 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/phoenixdb/avatica/proto/requests_pb2.py
  34. 0 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/phoenixdb/avatica/proto/responses_pb2.py
  35. 0 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/phoenixdb/connection.py
  36. 15 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/phoenixdb/cursor.py
  37. 0 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/phoenixdb/errors.py
  38. 210 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/phoenixdb/meta.py
  39. 36 10
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/phoenixdb/sqlalchemy_phoenix.py
  40. 0 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/phoenixdb/tests/__init__.py
  41. 0 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/phoenixdb/tests/dbapi20.py
  42. 0 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/phoenixdb/tests/test_avatica.py
  43. 60 1
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/phoenixdb/tests/test_db.py
  44. 0 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/phoenixdb/tests/test_dbapi20.py
  45. 0 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/phoenixdb/tests/test_errors.py
  46. 21 1
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/phoenixdb/tests/test_sqlalchemy.py
  47. 2 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/phoenixdb/tests/test_types.py
  48. 2 1
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/phoenixdb/types.py
  49. 21 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/requirements.txt
  50. 34 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/setup.cfg
  51. 1 1
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/setup.py
  52. 25 0
      desktop/core/ext-py/phoenixdb-1.1.0.dev0/tox.ini

+ 0 - 164
desktop/core/ext-py/phoenixdb-1.0.1/PKG-INFO

@@ -1,164 +0,0 @@
-Metadata-Version: 2.1
-Name: phoenixdb
-Version: 1.0.1
-Summary: Phoenix database adapter for Python
-Home-page: http://phoenix.apache.org/python.html
-Author: Apache Software Foundation
-Author-email: dev@phoenix.apache.org
-License: Apache 2
-Description: Phoenix database adapter for Python
-        ===================================
-        
-        ``phoenixdb`` is a Python library for accessing 
-        `Apache Phoenix <http://phoenix.apache.org/>`_
-        using the
-        `remote query server <http://phoenix.apache.org/server.html>`_.
-        This library implements the
-        standard `DB API 2.0 <https://www.python.org/dev/peps/pep-0249/>`_ interface and a
-        subset of `SQLAlchemy <https://www.sqlalchemy.org/>`_, either of which should be familiar
-        to most Python programmers.
-        
-        Installation
-        ------------
-        
-        The source code is part of the phoenix-queryserver source distribution.
-        You can download it from <https://phoenix.apache.org/>, or get the latest development version
-        from <https://github.com/apache/phoenix-queryserver>
-        
-        Extract the archive and then install it manually::
-        
-            cd /path/to/phoenix-queryserver-x.y.z/python/phoenixdb
-            python setup.py install
-        
-        Usage
-        -----
-        
-        The library implements the standard DB API 2.0 interface, so it can be
-        used the same way you would use any other SQL database from Python, for example::
-        
-            import phoenixdb
-            import phoenixdb.cursor
-        
-            database_url = 'http://localhost:8765/'
-            conn = phoenixdb.connect(database_url, autocommit=True)
-        
-            cursor = conn.cursor()
-            cursor.execute("CREATE TABLE users (id INTEGER PRIMARY KEY, username VARCHAR)")
-            cursor.execute("UPSERT INTO users VALUES (?, ?)", (1, 'admin'))
-            cursor.execute("SELECT * FROM users")
-            print(cursor.fetchall())
-        
-            cursor = conn.cursor(cursor_factory=phoenixdb.cursor.DictCursor)
-            cursor.execute("SELECT * FROM users WHERE id=1")
-            print(cursor.fetchone()['USERNAME'])
-        
-        
-        Setting up a development environment
-        ------------------------------------
-        
-        If you want to quickly try out the included examples, you can set up a
-        local `virtualenv <https://virtualenv.pypa.io/en/latest/>`_ with all the
-        necessary requirements::
-        
-            virtualenv e
-            source e/bin/activate
-            pip install -r requirements.txt
-            python setup.py develop
-        
-        You can start a Phoenix QueryServer instance on http://localhost:8765 for testing by running
-        the following command in the phoenix-queryserver directory::
-        
-            mvn clean verify -am -pl phoenix-queryserver-it -Dtest=foo \
-            -Dit.test=QueryServerBasicsIT\#startLocalPQS \
-            -Ddo.not.randomize.pqs.port=true -Dstart.unsecure.pqs=true
-        
-        You can start a secure (https+kerberos) Phoenix QueryServer instance on https://localhost:8765
-        for testing by running the following command in the phoenix-queryserver directory::
-        
-            mvn clean verify -am -pl phoenix-queryserver-it -Dtest=foo \
-            -Dit.test=SecureQueryServerPhoenixDBIT\#startLocalPQS \
-            -Ddo.not.randomize.pqs.port=true -Dstart.secure.pqs=true
-        
-        this will also create a shell script in phoenix-queryserver-it/target/krb_setup.sh, that you can use to set
-        up the environment for the tests.
-        
-        If you want to use the library without installing the phoenixdb library, you can use
-        the `PYTHONPATH` environment variable to point to the library directly::
-        
-            cd $PHOENIX_HOME/python
-            python setup.py build
-            cd ~/my_project
-            PYTHONPATH=$PHOENIX_HOME/build/lib python my_app.py
-        
-        Don't forget to run flake8 on your changes.
-        
-        Running the test suite
-        ----------------------
-        
-        The library comes with a test suite for testing Python DB API 2.0 compliance and
-        various Phoenix-specific features. In order to run the test suite, you need a
-        working Phoenix database and set the ``PHOENIXDB_TEST_DB_URL`` environment variable::
-        
-            export PHOENIXDB_TEST_DB_URL='http://localhost:8765/'
-            nosetests
-        
-        If you use a secure PQS server, you can set the connection parameters via the following environment
-        variables:
-        
-        - PHOENIXDB_TEST_DB_TRUSTSTORE
-        - PHOENIXDB_TEST_DB_AUTHENTICATION
-        - PHOENIXDB_TEST_DB_AVATICA_USER
-        - PHOENIXDB_TEST_DB_AVATICA_PASSWORD
-        
-        Similarly, tox can be used to run the test suite against multiple Python versions::
-        
-            pyenv install 3.5.5
-            pyenv install 3.6.4
-            pyenv install 2.7.14
-            pyenv global 2.7.14 3.5.5 3.6.4
-            PHOENIXDB_TEST_DB_URL='http://localhost:8765' tox
-        
-        You can use tox and docker to run the tests on all supported python versions without installing the
-        environments locally::
-        
-            docker build -t toxtest .
-            docker run --rm  -v `pwd`:/src toxtest
-        
-        You can also run the test suite from maven as part of the Java build by setting the 
-        run.full.python.testsuite property. You DO NOT need to set the PHOENIXDB_* enviroment variables,
-        maven will set them up for you. The output of the test run will be saved in
-        phoenix-queryserver/phoenix-queryserver-it/target/python-stdout.log and python-stderr.log::
-        
-            mvn clean verify -Drun.full.python.testsuite=true
-        
-        Known issues
-        ------------
-        
-        - TIME and DATE columns in Phoenix are stored as full timestamps with a millisecond accuracy,
-          but the remote protocol only exposes the time (hour/minute/second) or date (year/month/day)
-          parts of the columns. (`CALCITE-797 <https://issues.apache.org/jira/browse/CALCITE-797>`_, `CALCITE-798 <https://issues.apache.org/jira/browse/CALCITE-798>`_)
-        - TIMESTAMP columns in Phoenix are stored with a nanosecond accuracy, but the remote protocol truncates them to milliseconds. (`CALCITE-796 <https://issues.apache.org/jira/browse/CALCITE-796>`_)
-        
-        
-        SQLAlchemy feature support
-        --------------------------
-        
-        SQLAlchemy has a wide breadth of API, ranging from basic SQL commands to object-relational mapping support.
-        
-        Today, python-phoenixdb only supports the following subset of the complete SQLAlchemy API:
-        
-        - `Textual SQL <https://docs.sqlalchemy.org/en/13/core/tutorial.html#using-textual-sql>`_
-        
-        All other API should be considered not implemented.
-        
-Platform: UNKNOWN
-Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 2
-Classifier: Programming Language :: Python :: 2.7
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.4
-Classifier: Programming Language :: Python :: 3.5
-Classifier: Programming Language :: Python :: 3.6
-Classifier: Programming Language :: Python :: 3.7
-Classifier: Programming Language :: Python :: 3.8
-Provides-Extra: SQLAlchemy

+ 0 - 96
desktop/core/ext-py/phoenixdb-1.0.1/phoenixdb/meta.py

@@ -1,96 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import sys
-import logging
-
-from phoenixdb.errors import ProgrammingError
-from phoenixdb.cursor import DictCursor
-
-
-__all__ = ['Meta']
-
-logger = logging.getLogger(__name__)
-
-
-class Meta(object):
-    """Database meta for querying MetaData
-    """
-
-    def __init__(self, connection):
-        self._connection = connection
-
-    def get_catalogs(self):
-        if self._connection._closed:
-            raise ProgrammingError('The connection is already closed.')
-        result = self._connection._client.get_catalogs(self._connection._id)
-        with DictCursor(self._connection) as cursor:
-            cursor._process_result(result)
-            return cursor.fetchall()
-
-    def get_schemas(self, catalog=None, schemaPattern=None):
-        if self._connection._closed:
-            raise ProgrammingError('The connection is already closed.')
-        result = self._connection._client.get_schemas(self._connection._id, catalog, schemaPattern)
-        with DictCursor(self._connection) as cursor:
-            cursor._process_result(result)
-            return self._fix_default(cursor.fetchall(), schemaPattern=schemaPattern)
-
-    def get_tables(self, catalog=None, schemaPattern=None, tableNamePattern=None, typeList=None):
-        if self._connection._closed:
-            raise ProgrammingError('The connection is already closed.')
-        result = self._connection._client.get_tables(
-            self._connection._id, catalog, schemaPattern, tableNamePattern, typeList=typeList)
-        with DictCursor(self._connection) as cursor:
-            cursor._process_result(result)
-            return self._fix_default(cursor.fetchall(), catalog, schemaPattern)
-
-    def get_columns(self, catalog=None, schemaPattern=None, tableNamePattern=None,
-                    columnNamePattern=None):
-        if self._connection._closed:
-            raise ProgrammingError('The connection is already closed.')
-        result = self._connection._client.get_columns(
-            self._connection._id, catalog, schemaPattern, tableNamePattern, columnNamePattern)
-        with DictCursor(self._connection) as cursor:
-            cursor._process_result(result)
-            return self._fix_default(cursor.fetchall(), catalog, schemaPattern)
-
-    def get_table_types(self):
-        if self._connection._closed:
-            raise ProgrammingError('The connection is already closed.')
-        result = self._connection._client.get_table_types(self._connection._id)
-        with DictCursor(self._connection) as cursor:
-            cursor._process_result(result)
-            return cursor.fetchall()
-
-    def get_type_info(self):
-        if self._connection._closed:
-            raise ProgrammingError('The connection is already closed.')
-        result = self._connection._client.get_type_info(self._connection._id)
-        with DictCursor(self._connection) as cursor:
-            cursor._process_result(result)
-            return cursor.fetchall()
-
-    def _fix_default(self, rows, catalog=None, schemaPattern=None):
-        '''Workaround for PHOENIX-6003'''
-        if schemaPattern == '':
-            rows = [row for row in rows if row['TABLE_SCHEM'] is None]
-        if catalog == '':
-            rows = [row for row in rows if row['TABLE_CATALOG'] is None]
-        # Couldn't find a sane way to do it that works on 2 and 3
-        if sys.version_info.major == 3:
-            return [{k: v or '' for k, v in row.items()} for row in rows]
-        else:
-            return [{k: v or '' for k, v in row.iteritems()} for row in rows]

+ 0 - 24
desktop/core/ext-py/phoenixdb-1.0.1/setup.cfg

@@ -1,24 +0,0 @@
-[nosetests]
-verbosity = 2
-testmatch = ^test_.+
-where = phoenixdb/tests
-
-[build_sphinx]
-source-dir = doc
-build-dir = doc/build
-all_files = 1
-
-[upload_sphinx]
-upload-dir = doc/build/html
-
-[flake8]
-max-line-length = 140
-exclude = 
-	e,e3,env,venv,doc,build,dist,.tox,.idea,
-	./phoenixdb/tests/dbapi20.py,
-	./phoenixdb/avatica/proto/*_pb2.py
-
-[egg_info]
-tag_build = 
-tag_date = 0
-

+ 13 - 0
desktop/core/ext-py/phoenixdb-1.1.0.dev0/.gitignore

@@ -0,0 +1,13 @@
+/dist/
+/build/
+/doc/_build/
+/doc/build/
+*.pyc
+*.egg-info/
+.vagrant/
+.tox
+dev-support/artifacts
+dev-support/work
+phoenixdb/.eggs
+phoenixdb/build
+phoenixdb/e

+ 149 - 0
desktop/core/ext-py/phoenixdb-1.1.0.dev0/.gitlab-ci.yml

@@ -0,0 +1,149 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+stages:
+  - prepare
+  - test
+
+build build-env image:
+  stage: prepare
+  script:
+    - cd ci/build-env
+    - docker build -t ${CI_REGISTRY_IMAGE}/build-env .
+    - docker login -u gitlab-ci-token -p $CI_BUILD_TOKEN $CI_REGISTRY
+    - docker push $CI_REGISTRY_IMAGE/build-env
+  tags:
+    - docker-host
+  only:
+    - master@lukas/python-phoenixdb
+
+.build-phoenix-image: &build_phoenix_image
+  stage: prepare
+  script:
+    - JOB_NAME=($CI_JOB_NAME)
+    - cd ci/phoenix
+    - docker build -t ${CI_REGISTRY_IMAGE}/phoenix:${JOB_NAME[2]}
+        --build-arg PHOENIX_VERSION=$PHOENIX_VERSION
+        --build-arg HBASE_VERSION=$HBASE_VERSION
+        --build-arg HBASE_DIR=$HBASE_DIR
+        .
+    - docker login -u gitlab-ci-token -p $CI_BUILD_TOKEN $CI_REGISTRY
+    - docker push $CI_REGISTRY_IMAGE/phoenix:${JOB_NAME[2]}
+  tags:
+    - docker-host
+
+build phoenix 5.0.0-alpha-HBase-2.0 image:
+  <<: *build_phoenix_image
+  variables:
+    PHOENIX_VERSION: 5.0.0-alpha-HBase-2.0
+    HBASE_VERSION: 2.0.0-beta-1
+    HBASE_DIR: hbase-2.0.0-beta-1
+
+build phoenix 4.13 image:
+  <<: *build_phoenix_image
+  variables:
+    PHOENIX_VERSION: 4.13.1-HBase-1.3
+    HBASE_VERSION: 1.3.1
+    HBASE_DIR: 1.3.1
+
+build phoenix 4.12 image:
+  <<: *build_phoenix_image
+  variables:
+    PHOENIX_VERSION: 4.12.0-HBase-1.3
+    HBASE_VERSION: 1.3.1
+    HBASE_DIR: 1.3.1
+
+build phoenix 4.11 image:
+  <<: *build_phoenix_image
+  variables:
+    PHOENIX_VERSION: 4.11.0-HBase-1.3
+    HBASE_VERSION: 1.3.1
+    HBASE_DIR: 1.3.1
+
+build phoenix 4.10 image:
+  <<: *build_phoenix_image
+  variables:
+    PHOENIX_VERSION: 4.10.0-HBase-1.2
+    HBASE_VERSION: 1.2.6
+    HBASE_DIR: 1.2.6
+
+build phoenix 4.9 image:
+  <<: *build_phoenix_image
+  variables:
+    PHOENIX_VERSION: 4.9.0-HBase-1.2
+    HBASE_VERSION: 1.2.6
+    HBASE_DIR: 1.2.6
+
+build phoenix 4.8 image:
+  <<: *build_phoenix_image
+  variables:
+    PHOENIX_VERSION: 4.8.2-HBase-1.2
+    HBASE_VERSION: 1.2.6
+    HBASE_DIR: 1.2.6
+
+.test: &test
+  image: $CI_REGISTRY_IMAGE/build-env
+  variables:
+    PHOENIXDB_TEST_DB_URL: http://phoenix:8765/
+    PIP_CACHE_DIR: $CI_PROJECT_DIR/cache/
+  script:
+    - tox -e py27,py35
+  cache:
+    paths:
+      - cache/
+  tags:
+    - docker
+
+test phoenix 5.0.0-alpha-HBase-2.0:
+  <<: *test
+  services:
+    - name: $CI_REGISTRY_IMAGE/phoenix:5.0.0-alpha-HBase-2.0
+      alias: phoenix
+
+test phoenix 4.13:
+  <<: *test
+  services:
+    - name: $CI_REGISTRY_IMAGE/phoenix:4.13
+      alias: phoenix
+
+test phoenix 4.12:
+  <<: *test
+  services:
+    - name: $CI_REGISTRY_IMAGE/phoenix:4.12
+      alias: phoenix
+
+test phoenix 4.11:
+  <<: *test
+  services:
+    - name: $CI_REGISTRY_IMAGE/phoenix:4.11
+      alias: phoenix
+
+test phoenix 4.10:
+  <<: *test
+  services:
+    - name: $CI_REGISTRY_IMAGE/phoenix:4.10
+      alias: phoenix
+
+test phoenix 4.9:
+  <<: *test
+  services:
+    - name: $CI_REGISTRY_IMAGE/phoenix:4.9
+      alias: phoenix
+
+test phoenix 4.8:
+  <<: *test
+  services:
+    - name: $CI_REGISTRY_IMAGE/phoenix:4.8
+      alias: phoenix

+ 24 - 0
desktop/core/ext-py/phoenixdb-1.1.0.dev0/Dockerfile

@@ -0,0 +1,24 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from themattrix/tox-base
+
+RUN apt-get update && apt-get install -y krb5-user libkrb5-dev
+
+ENV PHOENIXDB_TEST_DB_URL=http://host.docker.internal:8765
+ENV PHOENIXDB_TEST_DB_TRUSTSTORE=
+ENV PHOENIXDB_TEST_DB_AUTHENTICATION=
+ENV PHOENIXDB_TEST_DB_AVATICA_USER=
+ENV PHOENIXDB_TEST_DB_AVATICA_PASSWORD=

+ 24 - 0
desktop/core/ext-py/phoenixdb-1.1.0.dev0/Dockerfile-pqs

@@ -0,0 +1,24 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from maven:3-jdk-8
+
+RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq krb5-user libkrb5-dev
+
+EXPOSE 8765
+
+# copy all the files to the container
+
+CMD mvn clean verify -am -pl queryserver-it -Dtest=foo -Dit.test=QueryServerBasicsIT#startLocalPQS -Ddo.not.randomize.pqs.port=true -Dstart.unsecure.pqs=true

+ 0 - 0
desktop/core/ext-py/phoenixdb-1.0.1/LICENSE → desktop/core/ext-py/phoenixdb-1.1.0.dev0/LICENSE


+ 70 - 0
desktop/core/ext-py/phoenixdb-1.1.0.dev0/NEWS.rst

@@ -0,0 +1,70 @@
+Changelog
+=========
+
+Version 1.0.1
+-------------
+
+- Use HTTP sessions to enable sticky load balancers (PHOENIX-6459)
+- Revert default GSSAPI OID to SPNEGO to improve compatibility (PHOENIX-6414)
+
+Version 1.0.0
+-------------
+
+- Replaced bundled requests_kerberos with request_gssapi library
+- Use default SPNEGO Auth settings from request_gssapi
+- Refactored authentication code
+- Added support for specifying server certificate
+- Added support for BASIC and DIGEST authentication
+- Fixed HTTP error parsing
+- Added transaction support
+- Added list support
+- Rewritten type handling
+- Refactored test suite
+- Removed shell example, as it was python2 only
+- Updated documentation
+- Added SQLAlchemy dialect
+- Implemented Avatica Metadata API
+- Misc fixes
+- Licensing cleanup
+
+Version 0.7
+-----------
+
+- Added DictCursor for easier access to columns by their names.
+- Support for Phoenix versions from 4.8 to 4.11.
+
+Version 0.6
+-----------
+
+- Fixed result fetching when using a query with parameters.
+- Support for Phoenix 4.9.
+
+Version 0.5
+-----------
+
+- Added support for Python 3.
+- Switched from the JSON serialization to Protocol Buffers, improved compatibility with Phoenix 4.8.
+- Phoenix 4.6 and older are no longer supported.
+
+Version 0.4
+-----------
+
+- Fixes for the final version of Phoenix 4.7.
+
+Version 0.3
+-----------
+
+- Compatible with Phoenix 4.7.
+
+Version 0.2
+-----------
+
+- Added (configurable) retry on connection errors.
+- Added Vagrantfile for easier testing.
+- Compatible with Phoenix 4.6.
+
+Version 0.1
+-----------
+
+- Initial release.
+- Compatible with Phoenix 4.4.

+ 0 - 0
desktop/core/ext-py/phoenixdb-1.0.1/NOTICE → desktop/core/ext-py/phoenixdb-1.1.0.dev0/NOTICE


+ 5 - 5
desktop/core/ext-py/phoenixdb-1.0.1/README.rst → desktop/core/ext-py/phoenixdb-1.1.0.dev0/README.rst

@@ -58,14 +58,14 @@ necessary requirements::
     python setup.py develop
     python setup.py develop
 
 
 You can start a Phoenix QueryServer instance on http://localhost:8765 for testing by running
 You can start a Phoenix QueryServer instance on http://localhost:8765 for testing by running
-the following command in the phoenix-queryserver directory::
+the following command in the pohoenix-queryserver-parent directory::
 
 
     mvn clean verify -am -pl phoenix-queryserver-it -Dtest=foo \
     mvn clean verify -am -pl phoenix-queryserver-it -Dtest=foo \
     -Dit.test=QueryServerBasicsIT\#startLocalPQS \
     -Dit.test=QueryServerBasicsIT\#startLocalPQS \
     -Ddo.not.randomize.pqs.port=true -Dstart.unsecure.pqs=true
     -Ddo.not.randomize.pqs.port=true -Dstart.unsecure.pqs=true
 
 
 You can start a secure (https+kerberos) Phoenix QueryServer instance on https://localhost:8765
 You can start a secure (https+kerberos) Phoenix QueryServer instance on https://localhost:8765
-for testing by running the following command in the phoenix-queryserver directory::
+for testing by running the following command in the phoenix-queryserver-parent directory::
 
 
     mvn clean verify -am -pl phoenix-queryserver-it -Dtest=foo \
     mvn clean verify -am -pl phoenix-queryserver-it -Dtest=foo \
     -Dit.test=SecureQueryServerPhoenixDBIT\#startLocalPQS \
     -Dit.test=SecureQueryServerPhoenixDBIT\#startLocalPQS \
@@ -77,7 +77,7 @@ up the environment for the tests.
 If you want to use the library without installing the phoenixdb library, you can use
 If you want to use the library without installing the phoenixdb library, you can use
 the `PYTHONPATH` environment variable to point to the library directly::
 the `PYTHONPATH` environment variable to point to the library directly::
 
 
-    cd $PHOENIX_HOME/python
+    cd phoenix-queryserver-parent/python-phoenixdb
     python setup.py build
     python setup.py build
     cd ~/my_project
     cd ~/my_project
     PYTHONPATH=$PHOENIX_HOME/build/lib python my_app.py
     PYTHONPATH=$PHOENIX_HOME/build/lib python my_app.py
@@ -110,8 +110,8 @@ Similarly, tox can be used to run the test suite against multiple Python version
     pyenv global 2.7.14 3.5.5 3.6.4
     pyenv global 2.7.14 3.5.5 3.6.4
     PHOENIXDB_TEST_DB_URL='http://localhost:8765' tox
     PHOENIXDB_TEST_DB_URL='http://localhost:8765' tox
 
 
-You can use tox and docker to run the tests on all supported python versions without installing the
-environments locally::
+You can use tox and docker to run the tests on supported python versions up to 3.8 without
+installing the environments locally::
 
 
     docker build -t toxtest .
     docker build -t toxtest .
     docker run --rm  -v `pwd`:/src toxtest
     docker run --rm  -v `pwd`:/src toxtest

+ 12 - 0
desktop/core/ext-py/phoenixdb-1.1.0.dev0/RELEASING.rst

@@ -0,0 +1,12 @@
+Releasing a new version
+=======================
+
+Change the version number ``setup.py`` and ``NEWS.rst``.
+
+Commit the changes and tag the repository::
+
+    git tag -s vX.Y
+
+Upload the package to PyPI::
+
+    python setup.py clean sdist upload

+ 22 - 0
desktop/core/ext-py/phoenixdb-1.1.0.dev0/ci/build-env/Dockerfile

@@ -0,0 +1,22 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM ubuntu:xenial
+
+RUN apt-get update && \
+    DEBIAN_FRONTEND=noninteractive apt-get install -y python-dev python3-dev tox
+
+RUN apt-get update && \
+    DEBIAN_FRONTEND=noninteractive apt-get install -y git

+ 48 - 0
desktop/core/ext-py/phoenixdb-1.1.0.dev0/ci/phoenix/Dockerfile

@@ -0,0 +1,48 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM openjdk:8
+
+ARG HBASE_VERSION
+ARG HBASE_DIR
+ARG PHOENIX_VERSION
+ARG PHOENIX_NAME=apache-phoenix
+
+ENV HBASE_URL https://archive.apache.org/dist/hbase/$HBASE_DIR/hbase-$HBASE_VERSION-bin.tar.gz
+
+RUN wget --no-verbose -O hbase.tar.gz "$HBASE_URL" && \
+    mkdir /opt/hbase && \
+    tar xf hbase.tar.gz --strip-components=1 -C /opt/hbase && \
+    rm hbase.tar.gz
+
+ENV PHOENIX_URL https://archive.apache.org/dist/phoenix/apache-phoenix-$PHOENIX_VERSION/bin/apache-phoenix-$PHOENIX_VERSION-bin.tar.gz
+
+RUN wget --no-verbose -O phoenix.tar.gz "$PHOENIX_URL" && \
+    mkdir /opt/phoenix && \
+    tar xf phoenix.tar.gz --strip-components=1 -C /opt/phoenix && \
+    rm phoenix.tar.gz
+
+RUN ln -sv /opt/phoenix/phoenix-*-server.jar /opt/hbase/lib/
+
+ADD hbase-site.xml /opt/hbase/conf/hbase-site.xml
+
+ENV HBASE_CONF_DIR /opt/hbase/conf
+ENV HBASE_CP /opt/hbase/lib
+ENV HBASE_HOME /opt/hbase
+
+EXPOSE 8765
+
+COPY docker-entrypoint.sh /usr/local/bin/
+ENTRYPOINT ["docker-entrypoint.sh"]

+ 39 - 0
desktop/core/ext-py/phoenixdb-1.1.0.dev0/ci/phoenix/docker-entrypoint.sh

@@ -0,0 +1,39 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+pids=()
+
+/opt/hbase/bin/hbase-daemon.sh foreground_start master &
+pids+=($!)
+
+/opt/phoenix/bin/queryserver.py &
+pids+=($!)
+
+cleanup() {
+    if [ ${#pids[@]} -ne 0 ]
+    then
+        pids=($(ps -o pid= -p "${pids[@]}"))
+        if [ ${#pids[@]} -ne 0 ]
+        then
+            kill "${pids[@]}"
+        fi
+    fi
+}
+
+trap cleanup SIGCHLD SIGINT SIGTERM
+
+wait

+ 29 - 0
desktop/core/ext-py/phoenixdb-1.1.0.dev0/ci/phoenix/hbase-site.xml

@@ -0,0 +1,29 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+ 
+      http://www.apache.org/licenses/LICENSE-2.0
+ 
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<configuration>
+    <property>
+        <name>hbase.regionserver.wal.codec</name>
+        <value>org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec</value>
+    </property>
+	<property>
+		<name>phoenix.schema.isNamespaceMappingEnabled</name>
+		<value>true</value>
+	</property>
+</configuration>

+ 139 - 0
desktop/core/ext-py/phoenixdb-1.1.0.dev0/dev-support/cache-apache-project-artifact.sh

@@ -0,0 +1,139 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# This was lovingly copied from Apache HBase
+
+set -e
+function usage {
+  echo "Usage: ${0} [options] /path/to/download/file.tar.gz download/fragment/eg/project/subdir/some-artifact-version.tar.gz"
+  echo ""
+  echo "    --force                       for a redownload even if /path/to/download/file.tar.gz exists."
+  echo "    --working-dir /path/to/use    Path for writing tempfiles. must exist."
+  echo "                                  defaults to making a directory via mktemp that we clean."
+  echo "    --keys url://to/project/KEYS  where to get KEYS. needed to check signature on download."
+  echo ""
+  exit 1
+}
+# if no args specified, show usage
+if [ $# -lt 2 ]; then
+  usage
+fi
+
+
+# Get arguments
+declare done_if_cached="true"
+declare working_dir
+declare cleanup="true"
+declare keys
+while [ $# -gt 0 ]
+do
+  case "$1" in
+    --force) shift; done_if_cached="false";;
+    --working-dir) shift; working_dir=$1; cleanup="false"; shift;;
+    --keys) shift; keys=$1; shift;;
+    --) shift; break;;
+    -*) usage ;;
+    *)  break;;  # terminate while loop
+  esac
+done
+
+# should still have required args
+if [ $# -lt 2 ]; then
+  usage
+fi
+
+target="$1"
+artifact="$2"
+
+if [ -f "${target}" ] && [ "true" = "${done_if_cached}" ]; then
+  echo "Reusing existing download of '${artifact}'."
+  exit 0
+fi
+
+if [ -z "${working_dir}" ]; then
+  if ! working_dir="$(mktemp -d -t hbase-download-apache-artifact)" ; then
+    echo "Failed to create temporary working directory. Please specify via --working-dir" >&2
+    exit 1
+  fi
+else
+  # absolutes please
+  working_dir="$(cd "$(dirname "${working_dir}")"; pwd)/$(basename "${working_dir}")"
+  if [ ! -d "${working_dir}" ]; then
+    echo "passed working directory '${working_dir}' must already exist." >&2
+    exit 1
+  fi
+fi
+
+function cleanup {
+  if [ -n "${keys}" ]; then
+    echo "Stopping gpg agent daemon"
+    gpgconf --homedir "${working_dir}/.gpg" --kill gpg-agent
+    echo "Stopped gpg agent daemon"
+  fi
+
+  if [ "true" = "${cleanup}" ]; then
+    echo "cleaning up temp space."
+    rm -rf "${working_dir}"
+  fi
+}
+trap cleanup EXIT SIGQUIT
+
+echo "New download of '${artifact}'"
+
+# N.B. this comes first so that if gpg falls over we skip the expensive download.
+if [ -n "${keys}" ]; then
+  if [ ! -d "${working_dir}/.gpg" ]; then
+    rm -rf "${working_dir}/.gpg"
+    mkdir -p "${working_dir}/.gpg"
+    chmod -R 700 "${working_dir}/.gpg"
+  fi
+
+  echo "installing project KEYS"
+  curl -L --fail -o "${working_dir}/KEYS" "${keys}"
+  if ! gpg --homedir "${working_dir}/.gpg" --import "${working_dir}/KEYS" ; then
+    echo "ERROR importing the keys via gpg failed. If the output above mentions this error:" >&2
+    echo "    gpg: can't connect to the agent: File name too long" >&2
+    # we mean to give them the command to run, not to run it.
+    #shellcheck disable=SC2016
+    echo 'then you prolly need to create /var/run/user/$(id -u)' >&2
+    echo "see this thread on gnupg-users: https://s.apache.org/uI7x" >&2
+    exit 2
+  fi
+
+  echo "downloading signature"
+  curl -L --fail -o "${working_dir}/artifact.asc" "https://archive.apache.org/dist/${artifact}.asc"
+fi
+
+echo "downloading artifact"
+if ! curl --dump-header "${working_dir}/artifact_download_headers.txt" -L --fail -o "${working_dir}/artifact" "https://www.apache.org/dyn/closer.lua?filename=${artifact}&action=download" ; then
+  echo "Artifact wasn't in mirror system. falling back to archive.a.o."
+  curl --dump-header "${working_dir}/artifact_fallback_headers.txt" -L --fail -o "${working_dir}/artifact" "http://archive.apache.org/dist/${artifact}"
+fi
+
+if [ -n "${keys}" ]; then
+  echo "verifying artifact signature"
+  gpg --homedir "${working_dir}/.gpg" --verify "${working_dir}/artifact.asc"
+  echo "signature good."
+fi
+
+echo "moving artifact into place at '${target}'"
+# ensure we're on the same filesystem
+mv "${working_dir}/artifact" "${target}.copying"
+# attempt atomic move
+mv "${target}.copying" "${target}"
+echo "all done!"

+ 96 - 0
desktop/core/ext-py/phoenixdb-1.1.0.dev0/dev-support/make_rc.sh

@@ -0,0 +1,96 @@
+#!/bin/bash
+############################################################################
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+############################################################################
+set -e
+
+echo "Script that assembles all you need to make an RC."
+echo "It generates source tar in release directory"
+echo "Presumes that you can sign a release as described at https://www.apache.org/dev/release-signing.html"
+echo ""
+echo "Continuing will overwrite all uncommitted changes under the phoenix-queryserver repository."
+
+read -p "Y to continue or any other key to quit" prompt
+if [[ ! $prompt =~ [yY](es)* ]]
+then
+  echo "Aborting."
+  exit
+ fi
+
+echo "Starting...";sleep 2s
+
+# Set directory variables
+DIR_ROOT="$(cd $(dirname $0);pwd)/.."
+cd $DIR_ROOT
+
+VERSION=$(grep '^version = ".*"$' setup.py | grep -o '".*"' | sed 's/"//g')
+
+DIR_REL_BASE=$DIR_ROOT/release
+DIR_REL_ROOT=$DIR_REL_BASE/python-phoenixdb-$VERSION
+REL_SRC=python-phoenixdb-$VERSION-src
+DIR_REL_SRC_TAR_PATH=$DIR_REL_ROOT/src
+
+git clean -fx .
+
+# Generate src tar
+ln -s . $REL_SRC; tar cvzf $REL_SRC.tar.gz --exclude="$REL_SRC/$REL_SRC" $REL_SRC/*; rm $REL_SRC;
+
+# Generate directory structure
+mkdir $DIR_REL_BASE;
+mkdir $DIR_REL_ROOT;
+mkdir $DIR_REL_SRC_TAR_PATH;
+
+# Move src tar
+mv $REL_SRC.tar.gz $DIR_REL_SRC_TAR_PATH;
+
+echo "DONE generating  source tar in release directory."
+echo "Now signing source  tar"
+
+# Sign
+function_sign() {
+  phoenix_tar=$(find python-phoenixdb-*.gz);
+
+  # if on MAC OS
+  if [[ "$OSTYPE" == "darwin"* ]]; then
+    gpg --armor --output $phoenix_tar.asc --detach-sig $phoenix_tar;
+    openssl dgst -sha512 $phoenix_tar > $phoenix_tar.sha512;
+    openssl dgst -sha256 $phoenix_tar >> $phoenix_tar.sha256;
+  # all other OS
+  else
+    gpg --armor --output $phoenix_tar.asc --detach-sig $phoenix_tar;
+    sha512sum -b $phoenix_tar > $phoenix_tar.sha512;
+    sha256sum -b $phoenix_tar >> $phoenix_tar.sha256;
+  fi
+}
+
+cd $DIR_REL_SRC_TAR_PATH; function_sign;
+
+# Tag
+read -p "Do you want add tag for this RC in GIT? (Y for yes or any other key to continue)" prompt
+if [[ $prompt =~ [yY](es)* ]]
+then
+  echo "Tagging..."
+  read -p "Enter tag (Example python-phoenixdb-1.0.0.rc0):" prompt
+  echo "Setting tag: $prompt";sleep 5s
+  git tag -a $prompt -m "$prompt"; git push origin $prompt
+  mv $DIR_REL_ROOT $DIR_REL_BASE/$prompt
+fi
+
+echo "DONE."
+echo "If all looks good in release directory then commit RC at https://dist.apache.org/repos/dist/dev/phoenix/python-phoenixdb"

+ 4 - 0
desktop/core/ext-py/phoenixdb-1.1.0.dev0/dev-support/rat-excludes.txt

@@ -0,0 +1,4 @@
+.*\.pyc
+NEWS\.rst
+RELEASING\.rst
+README\.rst

+ 44 - 0
desktop/core/ext-py/phoenixdb-1.1.0.dev0/dev-support/run-source-ratcheck.sh

@@ -0,0 +1,44 @@
+#!/usr/bin/env bash
+
+# Catch some more errors
+set -eu
+set -o pipefail
+
+# The name of the Apache RAT CLI binary file
+RAT_BINARY_NAME="apache-rat-0.13-bin.tar.gz"
+# The relative path on the ASF mirrors for the RAT binary file
+RAT_BINARY_MIRROR_NAME="creadur/apache-rat-0.13/$RAT_BINARY_NAME"
+RAT_BINARY_DIR="apache-rat-0.13"
+RAT_JAR="$RAT_BINARY_DIR.jar"
+
+# Constants
+DEV_SUPPORT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+ARTIFACTS_DIR="$DEV_SUPPORT/artifacts"
+WORK_DIR="$DEV_SUPPORT/work"
+
+mkdir -p "$WORK_DIR" "$ARTIFACTS_DIR"
+
+# Cache the RAT binary artifacts
+if [[ ! -f "$ARTIFACTS_DIR/$RAT_BINARY_NAME" ]]; then
+  echo "$ARTIFACTS_DIR/$RAT_BINARY_NAME does not exist, downloading it"
+  $DEV_SUPPORT/cache-apache-project-artifact.sh --working-dir "$WORK_DIR" --keys https://www.apache.org/dist/creadur/KEYS \
+    "$ARTIFACTS_DIR/$RAT_BINARY_NAME" "$RAT_BINARY_MIRROR_NAME"
+fi
+
+# Extract the RAT binary artifacts
+if [[ ! -d "$ARTIFACTS_DIR/$RAT_BINARY_DIR" ]]; then
+  echo "$ARTIFACTS_DIR/$RAT_BINARY_DIR does not exist, extracting $ARTIFACTS_DIR/$RAT_BINARY_NAME"
+  tar xf $ARTIFACTS_DIR/$RAT_BINARY_NAME -C $ARTIFACTS_DIR
+fi
+
+echo "RAT binary installation localized, running RAT check"
+
+# Run the RAT check, excluding pyc files
+for src in 'phoenixdb' 'ci' 'examples' 'doc'; do 
+  echo "Running RAT check over $src"
+  java -jar "$ARTIFACTS_DIR/$RAT_BINARY_DIR/$RAT_JAR" -d "$DEV_SUPPORT/../$src" -E "$DEV_SUPPORT/rat-excludes.txt"
+  if [[ $? -ne 0 ]]; then
+    echo "Failed RAT check over $src"
+    exit 1
+  fi
+done

+ 207 - 0
desktop/core/ext-py/phoenixdb-1.1.0.dev0/doc/Makefile

@@ -0,0 +1,207 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS    =
+SPHINXBUILD   = sphinx-build
+PAPER         =
+BUILDDIR      = _build
+
+# User-friendly check for sphinx-build
+ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
+$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
+endif
+
+# Internal variables.
+PAPEROPT_a4     = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS  = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext
+
+help:
+	@echo "Please use \`make <target>' where <target> is one of"
+	@echo "  html       to make standalone HTML files"
+	@echo "  dirhtml    to make HTML files named index.html in directories"
+	@echo "  singlehtml to make a single large HTML file"
+	@echo "  pickle     to make pickle files"
+	@echo "  json       to make JSON files"
+	@echo "  htmlhelp   to make HTML files and a HTML help project"
+	@echo "  qthelp     to make HTML files and a qthelp project"
+	@echo "  applehelp  to make an Apple Help Book"
+	@echo "  devhelp    to make HTML files and a Devhelp project"
+	@echo "  epub       to make an epub"
+	@echo "  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+	@echo "  latexpdf   to make LaTeX files and run them through pdflatex"
+	@echo "  latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
+	@echo "  text       to make text files"
+	@echo "  man        to make manual pages"
+	@echo "  texinfo    to make Texinfo files"
+	@echo "  info       to make Texinfo files and run them through makeinfo"
+	@echo "  gettext    to make PO message catalogs"
+	@echo "  changes    to make an overview of all changed/added/deprecated items"
+	@echo "  xml        to make Docutils-native XML files"
+	@echo "  pseudoxml  to make pseudoxml-XML files for display purposes"
+	@echo "  linkcheck  to check all external links for integrity"
+	@echo "  doctest    to run all doctests embedded in the documentation (if enabled)"
+	@echo "  coverage   to run coverage check of the documentation (if enabled)"
+
+clean:
+	rm -rf $(BUILDDIR)/*
+
+html:
+	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml:
+	$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+	@echo
+	@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle:
+	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+	@echo
+	@echo "Build finished; now you can process the pickle files."
+
+json:
+	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+	@echo
+	@echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+	@echo
+	@echo "Build finished; now you can run HTML Help Workshop with the" \
+	      ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+	@echo
+	@echo "Build finished; now you can run "qcollectiongenerator" with the" \
+	      ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+	@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/phoenixdb.qhcp"
+	@echo "To view the help file:"
+	@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/phoenixdb.qhc"
+
+applehelp:
+	$(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp
+	@echo
+	@echo "Build finished. The help book is in $(BUILDDIR)/applehelp."
+	@echo "N.B. You won't be able to view it unless you put it in" \
+	      "~/Library/Documentation/Help or install it in your application" \
+	      "bundle."
+
+devhelp:
+	$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+	@echo
+	@echo "Build finished."
+	@echo "To view the help file:"
+	@echo "# mkdir -p $$HOME/.local/share/devhelp/phoenixdb"
+	@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/phoenixdb"
+	@echo "# devhelp"
+
+epub:
+	$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+	@echo
+	@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo
+	@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+	@echo "Run \`make' in that directory to run these through (pdf)latex" \
+	      "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo "Running LaTeX files through pdflatex..."
+	$(MAKE) -C $(BUILDDIR)/latex all-pdf
+	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+latexpdfja:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo "Running LaTeX files through platex and dvipdfmx..."
+	$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
+	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text:
+	$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+	@echo
+	@echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man:
+	$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+	@echo
+	@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+texinfo:
+	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+	@echo
+	@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+	@echo "Run \`make' in that directory to run these through makeinfo" \
+	      "(use \`make info' here to do that automatically)."
+
+info:
+	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+	@echo "Running Texinfo files through makeinfo..."
+	make -C $(BUILDDIR)/texinfo info
+	@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
+
+gettext:
+	$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
+	@echo
+	@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
+changes:
+	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+	@echo
+	@echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+	@echo
+	@echo "Link check complete; look for any errors in the above output " \
+	      "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+	@echo "Testing of doctests in the sources finished, look at the " \
+	      "results in $(BUILDDIR)/doctest/output.txt."
+
+coverage:
+	$(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage
+	@echo "Testing of coverage in the sources finished, look at the " \
+	      "results in $(BUILDDIR)/coverage/python.txt."
+
+xml:
+	$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
+	@echo
+	@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
+
+pseudoxml:
+	$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
+	@echo
+	@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."

+ 46 - 0
desktop/core/ext-py/phoenixdb-1.1.0.dev0/doc/api.rst

@@ -0,0 +1,46 @@
+..
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+
+API Reference
+=============
+
+phoenixdb module
+----------------
+
+.. automodule:: phoenixdb
+    :members:
+    :undoc-members:
+
+phoenixdb.connection module
+---------------------------
+
+.. automodule:: phoenixdb.connection
+    :members:
+    :undoc-members:
+
+phoenixdb.cursor module
+-----------------------
+
+.. automodule:: phoenixdb.cursor
+    :members:
+    :undoc-members:
+
+phoenixdb.avatica module
+------------------------
+
+.. automodule:: phoenixdb.avatica
+    :members:
+    :undoc-members:

+ 302 - 0
desktop/core/ext-py/phoenixdb-1.1.0.dev0/doc/conf.py

@@ -0,0 +1,302 @@
+# -*- coding: utf-8 -*-
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# phoenixdb documentation build configuration file, created by
+# sphinx-quickstart on Sun Jun 28 18:07:35 2015.
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys
+import os
+import shlex
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+sys.path.insert(0, os.path.abspath('../phoenixdb'))
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+    'sphinx.ext.autodoc',
+    'sphinx.ext.doctest',
+    'sphinx.ext.intersphinx',
+]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix(es) of source filenames.
+# You can specify multiple suffix as a list of string:
+# source_suffix = ['.rst', '.md']
+source_suffix = '.rst'
+
+# The encoding of source files.
+source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'phoenixdb'
+copyright = u'2015, Lukas Lalinsky'
+author = u'Lukas Lalinsky'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#
+# This is also used if you do content translation via gettext catalogs.
+# Usually you set "language" from the command line for these cases.
+language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ['_build']
+
+# The reST default role (used for this markup: `text`) to use for all
+# documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+# If true, keep warnings as "system message" paragraphs in the built documents.
+#keep_warnings = False
+
+# If true, `todo` and `todoList` produce output, else they produce nothing.
+todo_include_todos = False
+
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  See the documentation for
+# a list of builtin themes.
+html_theme = 'classic'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further.  For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents.  If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar.  Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# Add any extra paths that contain custom files (such as robots.txt or
+# .htaccess) here, relative to this directory. These files are copied
+# directly to the root of the documentation.
+#html_extra_path = []
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+html_show_sourcelink = False
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it.  The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Language to be used for generating the HTML full-text search index.
+# Sphinx supports the following languages:
+#   'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
+#   'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
+#html_search_language = 'en'
+
+# A dictionary with options for the search language support, empty by default.
+# Now only 'ja' uses this config value
+#html_search_options = {'type': 'default'}
+
+# The name of a javascript file (relative to the configuration directory) that
+# implements a search results scorer. If empty, the default will be used.
+#html_search_scorer = 'scorer.js'
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'phoenixdbdoc'
+
+# -- Options for LaTeX output ---------------------------------------------
+
+#latex_elements = {
+# The paper size ('letterpaper' or 'a4paper').
+#'papersize': 'letterpaper',
+
+# The font size ('10pt', '11pt' or '12pt').
+#'pointsize': '10pt',
+
+# Additional stuff for the LaTeX preamble.
+#'preamble': '',
+
+# Latex figure (float) alignment
+#'figure_align': 'htbp',
+#}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+#  author, documentclass [howto, manual, or own class]).
+#latex_documents = [
+#  (master_doc, 'phoenixdb.tex', u'phoenixdb Documentation',
+#   u'Lukas Lalinsky', 'manual'),
+#]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+    (master_doc, 'phoenixdb', u'phoenixdb Documentation',
+     [author], 1)
+]
+
+# If true, show URL addresses after external links.
+#man_show_urls = False
+
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+#  dir menu entry, description, category)
+texinfo_documents = [
+  (master_doc, 'phoenixdb', u'phoenixdb Documentation',
+   author, 'phoenixdb', 'One line description of project.',
+   'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#texinfo_appendices = []
+
+# If false, no module index is generated.
+#texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#texinfo_show_urls = 'footnote'
+
+# If true, do not generate a @detailmenu in the "Top" node's menu.
+#texinfo_no_detailmenu = False
+
+
+# Example configuration for intersphinx: refer to the Python standard library.
+intersphinx_mapping = {'https://docs.python.org/': None}

+ 41 - 0
desktop/core/ext-py/phoenixdb-1.1.0.dev0/doc/index.rst

@@ -0,0 +1,41 @@
+..
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License... include:: ../README.rst
+
+API Reference
+-------------
+
+.. toctree::
+   :maxdepth: 2
+
+   api
+
+Changelog
+-------------
+
+.. toctree::
+   :maxdepth: 2
+
+   versions
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
+
+
+.. _

+ 19 - 0
desktop/core/ext-py/phoenixdb-1.1.0.dev0/doc/versions.rst

@@ -0,0 +1,19 @@
+..
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+
+.. include:: ../NEWS.rst
+
+.. _

+ 27 - 0
desktop/core/ext-py/phoenixdb-1.1.0.dev0/examples/basic.py

@@ -0,0 +1,27 @@
+#!/usr/bin/env python
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import phoenixdb
+
+with phoenixdb.connect('http://localhost:8765/', autocommit=True) as connection:
+    with connection.cursor() as cursor:
+        cursor.execute("DROP TABLE IF EXISTS test")
+        cursor.execute("CREATE TABLE test (id INTEGER PRIMARY KEY, text VARCHAR)")
+        cursor.executemany("UPSERT INTO test VALUES (?, ?)", [[1, 'hello'], [2, 'world']])
+        cursor.execute("SELECT * FROM test ORDER BY id")
+        for row in cursor:
+            print(row)

+ 59 - 0
desktop/core/ext-py/phoenixdb-1.1.0.dev0/gen-protobuf.sh

@@ -0,0 +1,59 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -x
+AVATICA_VER=rel/avatica-1.10.0
+
+set -e
+
+rm -rf avatica-tmp
+
+mkdir avatica-tmp
+cd avatica-tmp
+wget -O avatica.tar.gz https://github.com/apache/calcite-avatica/archive/$AVATICA_VER.tar.gz
+tar -x --strip-components=1 -f avatica.tar.gz
+
+cd ..
+rm -f phoenixdb/avatica/proto/*_pb2.py
+protoc --proto_path=avatica-tmp/core/src/main/protobuf/ --python_out=phoenixdb/avatica/proto avatica-tmp/core/src/main/protobuf/*.proto
+if [[ "$(uname)" == "Darwin" ]]; then
+  sed -i '' 's/import common_pb2/from . import common_pb2/' phoenixdb/avatica/proto/*_pb2.py
+else
+  sed -i 's/import common_pb2/from . import common_pb2/' phoenixdb/avatica/proto/*_pb2.py
+fi
+
+for f in $(find phoenixdb/avatica/proto -name '*.py'); do
+  cat << EOF > ${f}-with-header
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+EOF
+  cat $f >> ${f}-with-header
+done
+
+rm -rf avatica-tmp

+ 0 - 0
desktop/core/ext-py/phoenixdb-1.0.1/phoenixdb/__init__.py → desktop/core/ext-py/phoenixdb-1.1.0.dev0/phoenixdb/__init__.py


+ 0 - 0
desktop/core/ext-py/phoenixdb-1.0.1/phoenixdb/avatica/__init__.py → desktop/core/ext-py/phoenixdb-1.1.0.dev0/phoenixdb/avatica/__init__.py


+ 10 - 0
desktop/core/ext-py/phoenixdb-1.0.1/phoenixdb/avatica/client.py → desktop/core/ext-py/phoenixdb-1.1.0.dev0/phoenixdb/avatica/client.py

@@ -310,6 +310,16 @@ class AvaticaClient(object):
         response.ParseFromString(response_data)
         response.ParseFromString(response_data)
         return response
         return response
 
 
+    def get_sync_results(self, connection_id, statement_id, state):
+        request = requests_pb2.SyncResultsRequest()
+        request.connection_id = connection_id
+        request.statement_id = statement_id
+        request.state.CopyFrom(state)
+        response_data = self._apply(request, 'SyncResultsResponse')
+        syncResultResponse = responses_pb2.SyncResultsResponse()
+        syncResultResponse.ParseFromString(response_data)
+        return syncResultResponse
+
     def connection_sync_dict(self, connection_id, connProps=None):
     def connection_sync_dict(self, connection_id, connProps=None):
         conn_props = self.connection_sync(connection_id, connProps)
         conn_props = self.connection_sync(connection_id, connProps)
         return {
         return {

+ 0 - 0
desktop/core/ext-py/phoenixdb-1.0.1/phoenixdb/avatica/proto/__init__.py → desktop/core/ext-py/phoenixdb-1.1.0.dev0/phoenixdb/avatica/proto/__init__.py


+ 0 - 0
desktop/core/ext-py/phoenixdb-1.0.1/phoenixdb/avatica/proto/common_pb2.py → desktop/core/ext-py/phoenixdb-1.1.0.dev0/phoenixdb/avatica/proto/common_pb2.py


+ 0 - 0
desktop/core/ext-py/phoenixdb-1.0.1/phoenixdb/avatica/proto/requests_pb2.py → desktop/core/ext-py/phoenixdb-1.1.0.dev0/phoenixdb/avatica/proto/requests_pb2.py


+ 0 - 0
desktop/core/ext-py/phoenixdb-1.0.1/phoenixdb/avatica/proto/responses_pb2.py → desktop/core/ext-py/phoenixdb-1.1.0.dev0/phoenixdb/avatica/proto/responses_pb2.py


+ 0 - 0
desktop/core/ext-py/phoenixdb-1.0.1/phoenixdb/connection.py → desktop/core/ext-py/phoenixdb-1.1.0.dev0/phoenixdb/connection.py


+ 15 - 0
desktop/core/ext-py/phoenixdb-1.0.1/phoenixdb/cursor.py → desktop/core/ext-py/phoenixdb-1.1.0.dev0/phoenixdb/cursor.py

@@ -246,6 +246,21 @@ class Cursor(object):
             self._connection._id, self._id,
             self._connection._id, self._id,
             [self._transform_parameters(p) for p in seq_of_parameters])
             [self._transform_parameters(p) for p in seq_of_parameters])
 
 
+    def get_sync_results(self, state):
+        if self._closed:
+            raise ProgrammingError('The cursor is already closed.')
+        if self._id is None:
+            self._set_id(self._connection._client.create_statement(self._connection._id))
+        return self._connection._client.get_sync_results(self._connection._id, self._id, state)
+
+    def fetch(self, signature):
+        if self._closed:
+            raise ProgrammingError('The cursor is already closed.')
+        self._updatecount = -1
+        self._set_signature(signature)
+        frame = self._connection._client.fetch(self._connection._id, self._id, 0, self.itersize)
+        self._set_frame(frame)
+
     def _transform_row(self, row):
     def _transform_row(self, row):
         """Transforms a Row into Python values.
         """Transforms a Row into Python values.
 
 

+ 0 - 0
desktop/core/ext-py/phoenixdb-1.0.1/phoenixdb/errors.py → desktop/core/ext-py/phoenixdb-1.1.0.dev0/phoenixdb/errors.py


+ 210 - 0
desktop/core/ext-py/phoenixdb-1.1.0.dev0/phoenixdb/meta.py

@@ -0,0 +1,210 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+import logging
+
+from phoenixdb.avatica.proto import common_pb2
+from phoenixdb.errors import ProgrammingError
+from phoenixdb.cursor import DictCursor
+
+
+__all__ = ['Meta']
+
+logger = logging.getLogger(__name__)
+
+
+class Meta(object):
+    """Database meta for querying MetaData
+    """
+
+    def __init__(self, connection):
+        self._connection = connection
+
+    def get_catalogs(self):
+        if self._connection._closed:
+            raise ProgrammingError('The connection is already closed.')
+        result = self._connection._client.get_catalogs(self._connection._id)
+        with DictCursor(self._connection) as cursor:
+            cursor._process_result(result)
+            return cursor.fetchall()
+
+    def get_schemas(self, catalog=None, schemaPattern=None):
+        if self._connection._closed:
+            raise ProgrammingError('The connection is already closed.')
+        result = self._connection._client.get_schemas(self._connection._id, catalog, schemaPattern)
+        with DictCursor(self._connection) as cursor:
+            cursor._process_result(result)
+            return self._fix_default(cursor.fetchall(), schemaPattern=schemaPattern)
+
+    def get_tables(self, catalog=None, schemaPattern=None, tableNamePattern=None, typeList=None):
+        if self._connection._closed:
+            raise ProgrammingError('The connection is already closed.')
+        result = self._connection._client.get_tables(
+            self._connection._id, catalog, schemaPattern, tableNamePattern, typeList=typeList)
+        with DictCursor(self._connection) as cursor:
+            cursor._process_result(result)
+            return self._fix_default(cursor.fetchall(), catalog, schemaPattern)
+
+    def get_columns(self, catalog=None, schemaPattern=None, tableNamePattern=None,
+                    columnNamePattern=None):
+        if self._connection._closed:
+            raise ProgrammingError('The connection is already closed.')
+        result = self._connection._client.get_columns(
+            self._connection._id, catalog, schemaPattern, tableNamePattern, columnNamePattern)
+        with DictCursor(self._connection) as cursor:
+            cursor._process_result(result)
+            return self._fix_default(cursor.fetchall(), catalog, schemaPattern)
+
+    def get_table_types(self):
+        if self._connection._closed:
+            raise ProgrammingError('The connection is already closed.')
+        result = self._connection._client.get_table_types(self._connection._id)
+        with DictCursor(self._connection) as cursor:
+            cursor._process_result(result)
+            return cursor.fetchall()
+
+    def get_type_info(self):
+        if self._connection._closed:
+            raise ProgrammingError('The connection is already closed.')
+        result = self._connection._client.get_type_info(self._connection._id)
+        with DictCursor(self._connection) as cursor:
+            cursor._process_result(result)
+            return cursor.fetchall()
+
+    def get_primary_keys(self, catalog=None, schema=None, table=None):
+        if self._connection._closed:
+            raise ProgrammingError('The cursor is already closed.')
+
+        state = common_pb2.QueryState()
+        state.type = common_pb2.StateType.METADATA
+        state.op = common_pb2.MetaDataOperation.GET_PRIMARY_KEYS
+        state.has_args = True
+        state.has_op = True
+
+        catalog_arg = self._moa_string_arg_factory(catalog)
+        schema_arg = self._moa_string_arg_factory(schema)
+        table_arg = self._moa_string_arg_factory(table)
+        state.args.extend([catalog_arg, schema_arg, table_arg])
+
+        with DictCursor(self._connection) as cursor:
+            syncResultResponse = cursor.get_sync_results(state)
+            if not syncResultResponse.more_results:
+                return []
+
+            signature = common_pb2.Signature()
+            signature.columns.append(self._column_meta_data_factory(1, 'TABLE_CAT', 12))
+            signature.columns.append(self._column_meta_data_factory(2, 'TABLE_SCHEM', 12))
+            signature.columns.append(self._column_meta_data_factory(3, 'TABLE_NAME', 12))
+            signature.columns.append(self._column_meta_data_factory(4, 'COLUMN_NAME', 12))
+            signature.columns.append(self._column_meta_data_factory(5, 'KEY_SEQ', 5))
+            signature.columns.append(self._column_meta_data_factory(6, 'PK_NAME', 12))
+            # The following are non-standard Phoenix extensions
+            # This returns '\x00\x00\x00A' or '\x00\x00\x00D' , but that's consistent with Java
+            signature.columns.append(self._column_meta_data_factory(7, 'ASC_OR_DESC', 12))
+            signature.columns.append(self._column_meta_data_factory(8, 'DATA_TYPE', 5))
+            signature.columns.append(self._column_meta_data_factory(9, 'TYPE_NAME', 12))
+            signature.columns.append(self._column_meta_data_factory(10, 'COLUMN_SIZE', 5))
+            signature.columns.append(self._column_meta_data_factory(11, 'TYPE_ID', 5))
+            signature.columns.append(self._column_meta_data_factory(12, 'VIEW_CONSTANT', 12))
+
+            cursor.fetch(signature)
+            return cursor.fetchall()
+
+    def get_index_info(self, catalog=None, schema=None, table=None, unique=False, approximate=False):
+        if self._connection._closed:
+            raise ProgrammingError('The cursor is already closed.')
+
+        state = common_pb2.QueryState()
+        state.type = common_pb2.StateType.METADATA
+        state.op = common_pb2.MetaDataOperation.GET_INDEX_INFO
+        state.has_args = True
+        state.has_op = True
+
+        catalog_arg = self._moa_string_arg_factory(catalog)
+        schema_arg = self._moa_string_arg_factory(schema)
+        table_arg = self._moa_string_arg_factory(table)
+        unique_arg = self._moa_bool_arg_factory(unique)
+        approximate_arg = self._moa_bool_arg_factory(approximate)
+
+        state.args.extend([catalog_arg, schema_arg, table_arg, unique_arg, approximate_arg])
+
+        with DictCursor(self._connection) as cursor:
+            syncResultResponse = cursor.get_sync_results(state)
+            if not syncResultResponse.more_results:
+                return []
+
+            signature = common_pb2.Signature()
+            signature.columns.append(self._column_meta_data_factory(1, 'TABLE_CAT', 12))
+            signature.columns.append(self._column_meta_data_factory(2, 'TABLE_SCHEM', 12))
+            signature.columns.append(self._column_meta_data_factory(3, 'TABLE_NAME', 12))
+            signature.columns.append(self._column_meta_data_factory(4, 'NON_UNIQUE', 16))
+            signature.columns.append(self._column_meta_data_factory(5, 'INDEX_QUALIFIER', 12))
+            signature.columns.append(self._column_meta_data_factory(6, 'INDEX_NAME', 12))
+            signature.columns.append(self._column_meta_data_factory(7, 'TYPE', 5))
+            signature.columns.append(self._column_meta_data_factory(8, 'ORDINAL_POSITION', 5))
+            signature.columns.append(self._column_meta_data_factory(9, 'COLUMN_NAME', 12))
+            signature.columns.append(self._column_meta_data_factory(10, 'ASC_OR_DESC', 12))
+            signature.columns.append(self._column_meta_data_factory(11, 'CARDINALITY', 5))
+            signature.columns.append(self._column_meta_data_factory(12, 'PAGES', 5))
+            signature.columns.append(self._column_meta_data_factory(13, 'FILTER_CONDITION', 12))
+            # The following are non-standard Phoenix extensions
+            signature.columns.append(self._column_meta_data_factory(14, 'DATA_TYPE', 5))
+            signature.columns.append(self._column_meta_data_factory(15, 'TYPE_NAME', 12))
+            signature.columns.append(self._column_meta_data_factory(16, 'TYPE_ID', 5))
+            signature.columns.append(self._column_meta_data_factory(17, 'COLUMN_FAMILY', 12))
+            signature.columns.append(self._column_meta_data_factory(18, 'COLUMN_SIZE', 5))
+            signature.columns.append(self._column_meta_data_factory(19, 'ARRAY_SIZE', 5))
+
+            cursor.fetch(signature)
+            return cursor.fetchall()
+
+    def _column_meta_data_factory(self, ordinal, column_name, jdbc_code):
+        cmd = common_pb2.ColumnMetaData()
+        cmd.ordinal = ordinal
+        cmd.column_name = column_name
+        cmd.type.id = jdbc_code
+        cmd.nullable = 2
+        return cmd
+
+    def _moa_string_arg_factory(self, arg):
+        moa = common_pb2.MetaDataOperationArgument()
+        if arg is None:
+            moa.type = common_pb2.MetaDataOperationArgument.ArgumentType.NULL
+        else:
+            moa.type = common_pb2.MetaDataOperationArgument.ArgumentType.STRING
+            moa.string_value = arg
+        return moa
+
+    def _moa_bool_arg_factory(self, arg):
+        moa = common_pb2.MetaDataOperationArgument()
+        if arg is None:
+            moa.type = common_pb2.MetaDataOperationArgument.ArgumentType.NULL
+        else:
+            moa.type = common_pb2.MetaDataOperationArgument.ArgumentType.BOOL
+            moa.bool_value = arg
+        return moa
+
+    def _fix_default(self, rows, catalog=None, schemaPattern=None):
+        '''Workaround for PHOENIX-6003'''
+        if schemaPattern == '':
+            rows = [row for row in rows if row['TABLE_SCHEM'] is None]
+        if catalog == '':
+            rows = [row for row in rows if row['TABLE_CATALOG'] is None]
+        # Couldn't find a sane way to do it that works on 2 and 3
+        if sys.version_info.major == 3:
+            return [{k: v or '' for k, v in row.items()} for row in rows]
+        else:
+            return [{k: v or '' for k, v in row.iteritems()} for row in rows]

+ 36 - 10
desktop/core/ext-py/phoenixdb-1.0.1/phoenixdb/sqlalchemy_phoenix.py → desktop/core/ext-py/phoenixdb-1.1.0.dev0/phoenixdb/sqlalchemy_phoenix.py

@@ -166,19 +166,45 @@ class PhoenixDialect(DefaultDialect):
     def get_pk_constraint(self, connection, table_name, schema=None, **kw):
     def get_pk_constraint(self, connection, table_name, schema=None, **kw):
         if schema is None:
         if schema is None:
             schema = ''
             schema = ''
-        columns = connection.connect().connection.meta().get_columns(
-            schemaPattern=schema, tableNamePattern=table_name, *kw)
-        pk_columns = [col['COLUMN_NAME'] for col in columns if col['KEY_SEQ'] > 0]
-        return {'constrained_columns': pk_columns}
-
-    def get_indexes(self, conn, table_name, schema=None, **kw):
-        '''This information does not seem to be exposed via Avatica
-        TODO: Implement by directly querying SYSTEM tables ? '''
-        return []
+        raw = connection.connect().connection.meta().get_primary_keys(
+            schema=schema, table=table_name)
+        cooked = {
+            'constrained_columns': []
+        }
+        if raw:
+            cooked['name'] = raw[0]['PK_NAME']
+            for row in raw:
+                cooked['constrained_columns'].insert(row['KEY_SEQ'] - 1, row['COLUMN_NAME'])
+        return cooked
+
+    def get_indexes(self, connection, table_name, schema=None, **kw):
+        if schema is None:
+            schema = ''
+        raw = connection.connect().connection.meta().get_index_info(schema=schema, table=table_name)
+        # We know that Phoenix returns the rows ordered by INDEX_NAME and ORDINAL_POSITION
+        cooked = []
+        current = None
+        for row in raw:
+            if current is None or row['INDEX_NAME'] != current['name']:
+                current = {
+                    'name': row['INDEX_NAME'],
+                    'unique': not row['NON_UNIQUE'] is False,
+                    'column_names': [],
+                }
+                cooked.append(current)
+            # Phoenix returns the column names in its internal representation here
+            # Remove the default CF prefix
+            canonical_name = row['INDEX_NAME']
+            if canonical_name.startswith('0:'):
+                canonical_name = canonical_name[len(':0')]
+            if canonical_name.startswith(':'):
+                canonical_name = canonical_name[len(':')]
+            current['column_names'].append(canonical_name)
+        return cooked
 
 
     def get_foreign_keys(self, conn, table_name, schema=None, **kw):
     def get_foreign_keys(self, conn, table_name, schema=None, **kw):
         '''Foreign keys are a foreign concept to Phoenix,
         '''Foreign keys are a foreign concept to Phoenix,
-        but SqlAlchemy cannot parse the DB schema if it's not implemented '''
+        and SqlAlchemy cannot parse the DB schema if it's not implemented '''
         return []
         return []
 
 
     def _map_column(self, raw):
     def _map_column(self, raw):

+ 0 - 0
desktop/core/ext-py/phoenixdb-1.0.1/phoenixdb/tests/__init__.py → desktop/core/ext-py/phoenixdb-1.1.0.dev0/phoenixdb/tests/__init__.py


+ 0 - 0
desktop/core/ext-py/phoenixdb-1.0.1/phoenixdb/tests/dbapi20.py → desktop/core/ext-py/phoenixdb-1.1.0.dev0/phoenixdb/tests/dbapi20.py


+ 0 - 0
desktop/core/ext-py/phoenixdb-1.0.1/phoenixdb/tests/test_avatica.py → desktop/core/ext-py/phoenixdb-1.1.0.dev0/phoenixdb/tests/test_avatica.py


+ 60 - 1
desktop/core/ext-py/phoenixdb-1.0.1/phoenixdb/tests/test_db.py → desktop/core/ext-py/phoenixdb-1.1.0.dev0/phoenixdb/tests/test_db.py

@@ -17,7 +17,7 @@ import unittest
 
 
 import phoenixdb.cursor
 import phoenixdb.cursor
 from phoenixdb.connection import Connection
 from phoenixdb.connection import Connection
-from phoenixdb.errors import InternalError
+from phoenixdb.errors import InternalError, ProgrammingError
 from phoenixdb.tests import DatabaseTestCase, TEST_DB_URL
 from phoenixdb.tests import DatabaseTestCase, TEST_DB_URL
 
 
 
 
@@ -172,6 +172,65 @@ class PhoenixDatabaseTest(DatabaseTestCase):
                     {'TABLE_TYPE': 'VIEW'}]))
                     {'TABLE_TYPE': 'VIEW'}]))
 
 
                 self.assertEqual(meta.get_type_info(), [])
                 self.assertEqual(meta.get_type_info(), [])
+
+            finally:
+                cursor.execute('drop table if exists DEFAULT_TABLE')
+                cursor.execute('drop table if exists A_SCHEMA.A_TABLE')
+                cursor.execute('drop table if exists B_SCHEMA.B_TABLE')
+
+    def test_meta2(self):
+        with self.conn.cursor() as cursor:
+            try:
+                cursor.execute('drop table if exists DEFAULT_TABLE')
+                cursor.execute('drop table if exists A_SCHEMA.A_TABLE')
+                cursor.execute('drop table if exists B_SCHMEA.B_TABLE')
+
+                cursor.execute('''create table DEFAULT_TABLE (ID integer not null, ID2 varchar not null,
+                V1 integer, V2 varchar, constraint PK PRIMARY KEY (ID DESC, ID2 ASC))''')
+                cursor.execute('CREATE INDEX GLOBAL_IDX ON DEFAULT_TABLE (V1) INCLUDE (V2)')
+                cursor.execute('CREATE LOCAL INDEX LOCAL_IDX ON DEFAULT_TABLE (V1)')
+                cursor.execute('create table A_SCHEMA.A_TABLE (ID_A integer primary key)')
+                cursor.execute('create table B_SCHEMA.B_TABLE (ID_B integer primary key)')
+
+                meta = self.conn.meta()
+                self.assertTrue(len(meta.get_primary_keys(table='DEFAULT_TABLE')),
+                                [{'ASC_OR_DESC': '\x00\x00\x00D',
+                                  'COLUMN_NAME': 'ID',
+                                  'COLUMN_SIZE': None,
+                                  'DATA_TYPE': 4,
+                                  'KEY_SEQ': 1,
+                                  'PK_NAME': 'PK',
+                                  'TABLE_CAT': None,
+                                  'TABLE_NAME': 'DEFAULT_TABLE',
+                                  'TABLE_SCHEM': None,
+                                  'TYPE_ID': 4,
+                                  'TYPE_NAME': 'INTEGER',
+                                  'VIEW_CONSTANT': None},
+                                 {'ASC_OR_DESC': '\x00\x00\x00A',
+                                  'COLUMN_NAME': 'ID2',
+                                  'COLUMN_SIZE': None,
+                                  'DATA_TYPE': 12,
+                                  'KEY_SEQ': 2,
+                                  'PK_NAME': 'PK',
+                                  'TABLE_CAT': None,
+                                  'TABLE_NAME': 'DEFAULT_TABLE',
+                                  'TABLE_SCHEM': None,
+                                  'TYPE_ID': 12,
+                                  'TYPE_NAME': 'VARCHAR',
+                                  'VIEW_CONSTANT': None}])
+                self.assertEqual(len(meta.get_primary_keys(schema='A_SCHEMA', table='A_TABLE')), 1)
+                try:
+                    self.assertEqual(len(meta.get_primary_keys(schema='A_SCHEMA', table='B_TABLE')), 0)
+                    self.assertTrue(False)
+                except ProgrammingError:
+                    pass
+
+                self.maxDiff = None
+
+                self.assertEqual(meta.get_index_info(table='NON_EXISTENT'), [])
+
+                self.assertTrue(len(meta.get_index_info(table='DEFAULT_TABLE')) > 1)
+
             finally:
             finally:
                 cursor.execute('drop table if exists DEFAULT_TABLE')
                 cursor.execute('drop table if exists DEFAULT_TABLE')
                 cursor.execute('drop table if exists A_SCHEMA.A_TABLE')
                 cursor.execute('drop table if exists A_SCHEMA.A_TABLE')

+ 0 - 0
desktop/core/ext-py/phoenixdb-1.0.1/phoenixdb/tests/test_dbapi20.py → desktop/core/ext-py/phoenixdb-1.1.0.dev0/phoenixdb/tests/test_dbapi20.py


+ 0 - 0
desktop/core/ext-py/phoenixdb-1.0.1/phoenixdb/tests/test_errors.py → desktop/core/ext-py/phoenixdb-1.1.0.dev0/phoenixdb/tests/test_errors.py


+ 21 - 1
desktop/core/ext-py/phoenixdb-1.0.1/phoenixdb/tests/test_sqlalchemy.py → desktop/core/ext-py/phoenixdb-1.1.0.dev0/phoenixdb/tests/test_sqlalchemy.py

@@ -18,6 +18,7 @@ import unittest
 
 
 import sqlalchemy as db
 import sqlalchemy as db
 from sqlalchemy import text
 from sqlalchemy import text
+from sqlalchemy.types import BIGINT, CHAR, VARCHAR
 
 
 from . import TEST_DB_AUTHENTICATION, TEST_DB_AVATICA_PASSWORD, TEST_DB_AVATICA_USER, \
 from . import TEST_DB_AUTHENTICATION, TEST_DB_AVATICA_PASSWORD, TEST_DB_AVATICA_USER, \
     TEST_DB_TRUSTSTORE, TEST_DB_URL
     TEST_DB_TRUSTSTORE, TEST_DB_URL
@@ -103,8 +104,27 @@ class SQLAlchemyTest(unittest.TestCase):
                 city VARCHAR NOT NULL,
                 city VARCHAR NOT NULL,
                 population BIGINT
                 population BIGINT
                 CONSTRAINT my_pk PRIMARY KEY (state, city))'''))
                 CONSTRAINT my_pk PRIMARY KEY (state, city))'''))
+                connection.execute('CREATE INDEX GLOBAL_IDX ON US_POPULATION (state) INCLUDE (city)')
+                connection.execute('CREATE LOCAL INDEX LOCAL_IDX ON US_POPULATION (population)')
+
                 columns_result = inspector.get_columns('US_POPULATION')
                 columns_result = inspector.get_columns('US_POPULATION')
-                self.assertEqual(len(columns_result), 3)
+                # The list is not equal to its represenatation
+                self.assertTrue(str(columns_result),
+                                str([{'name': 'STATE', 'type': CHAR(), 'nullable': True,
+                                      'autoincrement': False, 'comment': '', 'default': None},
+                                    {'name': 'CITY', 'type': VARCHAR(), 'nullable': True,
+                                    'autoincrement': False, 'comment': '', 'default': None},
+                                     {'name': 'POPULATION', 'type': BIGINT(), 'nullable': True,
+                                     'autoincrement': False, 'comment': '', 'default': None}]))
+
+                indexes_result = inspector.get_indexes('US_POPULATION')
+                self.assertTrue(indexes_result,
+                                [{'name': 'GLOBAL_IDX', 'unique': False, 'column_names': ['STATE', 'CITY']},
+                                 {'name': 'LOCAL_IDX', 'unique': False, 'column_names': ['_INDEX_ID', 'POPULATION', 'STATE', 'CITY']}])
+
+                pk_result = inspector.get_pk_constraint('US_POPULATION')
+                self.assertTrue(pk_result, {'constrained_columns': ['STATE', 'CITY'], 'name': 'MY_PK'})
+
             finally:
             finally:
                 connection.execute('drop table if exists us_population')
                 connection.execute('drop table if exists us_population')
 
 

+ 2 - 0
desktop/core/ext-py/phoenixdb-1.0.1/phoenixdb/tests/test_types.py → desktop/core/ext-py/phoenixdb-1.1.0.dev0/phoenixdb/tests/test_types.py

@@ -350,6 +350,8 @@ class TypesTest(DatabaseTestCase):
     def test_array(self):
     def test_array(self):
         self.createTable("phoenixdb_test_tbl1", "CREATE TABLE {table} (id integer primary key, val integer[])")
         self.createTable("phoenixdb_test_tbl1", "CREATE TABLE {table} (id integer primary key, val integer[])")
         with self.conn.cursor() as cursor:
         with self.conn.cursor() as cursor:
+            cursor.execute("SELECT id, val FROM phoenixdb_test_tbl1 ORDER BY id")
+            self.assertEqual(cursor.fetchall(), [])
             cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (1, ARRAY[1, 2])")
             cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (1, ARRAY[1, 2])")
             cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (2, ?)", [[2, 3]])
             cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (2, ?)", [[2, 3]])
             cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (3, ?)", [[4]])
             cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (3, ?)", [[4]])

+ 2 - 1
desktop/core/ext-py/phoenixdb-1.0.1/phoenixdb/types.py → desktop/core/ext-py/phoenixdb-1.1.0.dev0/phoenixdb/types.py

@@ -221,10 +221,11 @@ JDBC_TO_REP = dict([
     (-16, common_pb2.STRING),  # LONGNVARCHAR
     (-16, common_pb2.STRING),  # LONGNVARCHAR
     (2011, common_pb2.STRING),  # NCLOB
     (2011, common_pb2.STRING),  # NCLOB
     (2009, common_pb2.STRING),  # SQLXML
     (2009, common_pb2.STRING),  # SQLXML
+    # Returned by Avatica for Arrays in EMPTY resultsets
+    (2000, common_pb2.BYTE_STRING)  # JAVA_OBJECT
     # These are defined by JDBC, but cannot be mapped
     # These are defined by JDBC, but cannot be mapped
     # NULL
     # NULL
     # OTHER
     # OTHER
-    # JAVA_OBJECT
     # DISTINCT
     # DISTINCT
     # STRUCT
     # STRUCT
     # ARRAY 2003 - We are handling this as a special case
     # ARRAY 2003 - We are handling this as a special case

+ 21 - 0
desktop/core/ext-py/phoenixdb-1.1.0.dev0/requirements.txt

@@ -0,0 +1,21 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+protobuf>=3.0.0
+requests
+requests-gssapi

+ 34 - 0
desktop/core/ext-py/phoenixdb-1.1.0.dev0/setup.cfg

@@ -0,0 +1,34 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+[nosetests]
+verbosity=2
+testmatch=^test_.+
+where=phoenixdb/tests
+
+[build_sphinx]
+source-dir = doc
+build-dir = doc/build
+all_files = 1
+
+[upload_sphinx]
+upload-dir = doc/build/html
+
+[flake8]
+max-line-length = 140
+exclude =
+  e,e3,env,venv,doc,build,dist,.tox,.idea,
+  ./phoenixdb/tests/dbapi20.py,
+  ./phoenixdb/avatica/proto/*_pb2.py

+ 1 - 1
desktop/core/ext-py/phoenixdb-1.0.1/setup.py → desktop/core/ext-py/phoenixdb-1.1.0.dev0/setup.py

@@ -59,7 +59,7 @@ else:
         'Sphinx;python_version>="3.6"',
         'Sphinx;python_version>="3.6"',
     ],
     ],
 
 
-version = "1.0.1"
+version = "1.1.0.dev0"
 
 
 setup(
 setup(
     name="phoenixdb",
     name="phoenixdb",

+ 25 - 0
desktop/core/ext-py/phoenixdb-1.1.0.dev0/tox.ini

@@ -0,0 +1,25 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+[tox]
+envlist = py27,py34,py35,py36,py37,py38
+[testenv]
+passenv = PHOENIXDB_TEST_DB_URL
+commands =
+  flake8 phoenixdb  
+  nosetests -v
+deps = -rrequirements.txt
+  nose
+  flake8