瀏覽代碼

[phoenixdb] adding modified phoenixdb lib in ext-py3 folder (#2904)

* [phoenixdb] adding phoenixdb lib in ext-py3 folder

* fixing get_view_names() function
Ayush Goyal 3 年之前
父節點
當前提交
38d9c9f662
共有 30 個文件被更改,包括 5107 次插入1 次删除
  1. 1 0
      Makefile.sdk
  2. 1 0
      Makefile.vars
  3. 202 0
      desktop/core/ext-py3/phoenixdb-1.1.0/LICENSE
  4. 10 0
      desktop/core/ext-py3/phoenixdb-1.1.0/NOTICE
  5. 164 0
      desktop/core/ext-py3/phoenixdb-1.1.0/PKG-INFO
  6. 144 0
      desktop/core/ext-py3/phoenixdb-1.1.0/README.rst
  7. 195 0
      desktop/core/ext-py3/phoenixdb-1.1.0/phoenixdb/__init__.py
  8. 16 0
      desktop/core/ext-py3/phoenixdb-1.1.0/phoenixdb/avatica/__init__.py
  9. 618 0
      desktop/core/ext-py3/phoenixdb-1.1.0/phoenixdb/avatica/client.py
  10. 14 0
      desktop/core/ext-py3/phoenixdb-1.1.0/phoenixdb/avatica/proto/__init__.py
  11. 36 0
      desktop/core/ext-py3/phoenixdb-1.1.0/phoenixdb/avatica/proto/common_pb2.py
  12. 36 0
      desktop/core/ext-py3/phoenixdb-1.1.0/phoenixdb/avatica/proto/requests_pb2.py
  13. 36 0
      desktop/core/ext-py3/phoenixdb-1.1.0/phoenixdb/avatica/proto/responses_pb2.py
  14. 209 0
      desktop/core/ext-py3/phoenixdb-1.1.0/phoenixdb/connection.py
  15. 390 0
      desktop/core/ext-py3/phoenixdb-1.1.0/phoenixdb/cursor.py
  16. 93 0
      desktop/core/ext-py3/phoenixdb-1.1.0/phoenixdb/errors.py
  17. 210 0
      desktop/core/ext-py3/phoenixdb-1.1.0/phoenixdb/meta.py
  18. 290 0
      desktop/core/ext-py3/phoenixdb-1.1.0/phoenixdb/sqlalchemy_phoenix.py
  19. 64 0
      desktop/core/ext-py3/phoenixdb-1.1.0/phoenixdb/tests/__init__.py
  20. 857 0
      desktop/core/ext-py3/phoenixdb-1.1.0/phoenixdb/tests/dbapi20.py
  21. 52 0
      desktop/core/ext-py3/phoenixdb-1.1.0/phoenixdb/tests/test_avatica.py
  22. 308 0
      desktop/core/ext-py3/phoenixdb-1.1.0/phoenixdb/tests/test_db.py
  23. 125 0
      desktop/core/ext-py3/phoenixdb-1.1.0/phoenixdb/tests/test_dbapi20.py
  24. 60 0
      desktop/core/ext-py3/phoenixdb-1.1.0/phoenixdb/tests/test_errors.py
  25. 159 0
      desktop/core/ext-py3/phoenixdb-1.1.0/phoenixdb/tests/test_sqlalchemy.py
  26. 380 0
      desktop/core/ext-py3/phoenixdb-1.1.0/phoenixdb/tests/test_types.py
  27. 305 0
      desktop/core/ext-py3/phoenixdb-1.1.0/phoenixdb/types.py
  28. 24 0
      desktop/core/ext-py3/phoenixdb-1.1.0/setup.cfg
  29. 108 0
      desktop/core/ext-py3/phoenixdb-1.1.0/setup.py
  30. 0 1
      desktop/core/requirements.txt

+ 1 - 0
Makefile.sdk

@@ -126,6 +126,7 @@ ext-env-pip-install:
 	@$(ENV_PIP) install -e $(PYSAML2_PKG)
 	@$(ENV_PIP) install -e $(DJANGOSAML2_PKG)
 	@$(ENV_PIP) install -e $(DJANGO_AXES_PKG)
+	@$(ENV_PIP) install -e $(PHOENIXDB_PKG)
 	@echo '--- Finished installing ext-py3 into virtual-env via $(ENV_PIP)'
 	@touch $@
 

+ 1 - 0
Makefile.vars

@@ -147,3 +147,4 @@ REQUEST_PKG := $(ROOT)/desktop/core/ext-py3/requests-2.27.1
 PYSAML2_PKG := $(ROOT)/desktop/core/ext-py3/pysaml2-5.0.0
 DJANGOSAML2_PKG := $(ROOT)/desktop/core/ext-py3/djangosaml2-0.18.0
 DJANGO_AXES_PKG := $(ROOT)/desktop/core/ext-py3/django-axes-5.13.0
+PHOENIXDB_PKG := $(ROOT)/desktop/core/ext-py3/phoenixdb-1.1.0

+ 202 - 0
desktop/core/ext-py3/phoenixdb-1.1.0/LICENSE

@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.

+ 10 - 0
desktop/core/ext-py3/phoenixdb-1.1.0/NOTICE

@@ -0,0 +1,10 @@
+Apache Phoenix -- PhoenixDB
+Copyright 2020 The Apache Software Foundation
+
+This product includes software developed by The Apache Software
+Foundation (http://www.apache.org/).
+
+This project was originally created by Lukas Lalinsky, copyright 2015.
+
+This project contains phoenixdb/phoenixdb/sqlalchemy_phoenix.py which is a modification from
+https://github.com/Pirionfr/pyPhoenix, authored by Dimitri Capitaine, copyright 2017.

+ 164 - 0
desktop/core/ext-py3/phoenixdb-1.1.0/PKG-INFO

@@ -0,0 +1,164 @@
+Metadata-Version: 2.1
+Name: phoenixdb
+Version: 1.1.0
+Summary: Phoenix database adapter for Python
+Home-page: http://phoenix.apache.org/python.html
+Author: Apache Software Foundation
+Author-email: dev@phoenix.apache.org
+License: Apache 2
+Description: Phoenix database adapter for Python
+        ===================================
+        
+        ``phoenixdb`` is a Python library for accessing 
+        `Apache Phoenix <http://phoenix.apache.org/>`_
+        using the
+        `remote query server <http://phoenix.apache.org/server.html>`_.
+        This library implements the
+        standard `DB API 2.0 <https://www.python.org/dev/peps/pep-0249/>`_ interface and a
+        subset of `SQLAlchemy <https://www.sqlalchemy.org/>`_, either of which should be familiar
+        to most Python programmers.
+        
+        Installation
+        ------------
+        
+        The source code is part of the phoenix-queryserver source distribution.
+        You can download it from <https://phoenix.apache.org/>, or get the latest development version
+        from <https://github.com/apache/phoenix-queryserver>
+        
+        Extract the archive and then install it manually::
+        
+            cd /path/to/phoenix-queryserver-x.y.z/python/phoenixdb
+            python setup.py install
+        
+        Usage
+        -----
+        
+        The library implements the standard DB API 2.0 interface, so it can be
+        used the same way you would use any other SQL database from Python, for example::
+        
+            import phoenixdb
+            import phoenixdb.cursor
+        
+            database_url = 'http://localhost:8765/'
+            conn = phoenixdb.connect(database_url, autocommit=True)
+        
+            cursor = conn.cursor()
+            cursor.execute("CREATE TABLE users (id INTEGER PRIMARY KEY, username VARCHAR)")
+            cursor.execute("UPSERT INTO users VALUES (?, ?)", (1, 'admin'))
+            cursor.execute("SELECT * FROM users")
+            print(cursor.fetchall())
+        
+            cursor = conn.cursor(cursor_factory=phoenixdb.cursor.DictCursor)
+            cursor.execute("SELECT * FROM users WHERE id=1")
+            print(cursor.fetchone()['USERNAME'])
+        
+        
+        Setting up a development environment
+        ------------------------------------
+        
+        If you want to quickly try out the included examples, you can set up a
+        local `virtualenv <https://virtualenv.pypa.io/en/latest/>`_ with all the
+        necessary requirements::
+        
+            virtualenv e
+            source e/bin/activate
+            pip install -r requirements.txt
+            python setup.py develop
+        
+        You can start a Phoenix QueryServer instance on http://localhost:8765 for testing by running
+        the following command in the pohoenix-queryserver-parent directory::
+        
+            mvn clean verify -am -pl phoenix-queryserver-it -Dtest=foo \
+            -Dit.test=QueryServerBasicsIT\#startLocalPQS \
+            -Ddo.not.randomize.pqs.port=true -Dstart.unsecure.pqs=true
+        
+        You can start a secure (https+kerberos) Phoenix QueryServer instance on https://localhost:8765
+        for testing by running the following command in the phoenix-queryserver-parent directory::
+        
+            mvn clean verify -am -pl phoenix-queryserver-it -Dtest=foo \
+            -Dit.test=SecureQueryServerPhoenixDBIT\#startLocalPQS \
+            -Ddo.not.randomize.pqs.port=true -Dstart.secure.pqs=true
+        
+        this will also create a shell script in phoenix-queryserver-it/target/krb_setup.sh, that you can use to set
+        up the environment for the tests.
+        
+        If you want to use the library without installing the phoenixdb library, you can use
+        the `PYTHONPATH` environment variable to point to the library directly::
+        
+            cd phoenix-queryserver-parent/python-phoenixdb
+            python setup.py build
+            cd ~/my_project
+            PYTHONPATH=$PHOENIX_HOME/build/lib python my_app.py
+        
+        Don't forget to run flake8 on your changes.
+        
+        Running the test suite
+        ----------------------
+        
+        The library comes with a test suite for testing Python DB API 2.0 compliance and
+        various Phoenix-specific features. In order to run the test suite, you need a
+        working Phoenix database and set the ``PHOENIXDB_TEST_DB_URL`` environment variable::
+        
+            export PHOENIXDB_TEST_DB_URL='http://localhost:8765/'
+            nosetests
+        
+        If you use a secure PQS server, you can set the connection parameters via the following environment
+        variables:
+        
+        - PHOENIXDB_TEST_DB_TRUSTSTORE
+        - PHOENIXDB_TEST_DB_AUTHENTICATION
+        - PHOENIXDB_TEST_DB_AVATICA_USER
+        - PHOENIXDB_TEST_DB_AVATICA_PASSWORD
+        
+        Similarly, tox can be used to run the test suite against multiple Python versions::
+        
+            pyenv install 3.5.5
+            pyenv install 3.6.4
+            pyenv install 2.7.14
+            pyenv global 2.7.14 3.5.5 3.6.4
+            PHOENIXDB_TEST_DB_URL='http://localhost:8765' tox
+        
+        You can use tox and docker to run the tests on supported python versions up to 3.8 without
+        installing the environments locally::
+        
+            docker build -t toxtest .
+            docker run --rm  -v `pwd`:/src toxtest
+        
+        You can also run the test suite from maven as part of the Java build by setting the 
+        run.full.python.testsuite property. You DO NOT need to set the PHOENIXDB_* enviroment variables,
+        maven will set them up for you. The output of the test run will be saved in
+        phoenix-queryserver/phoenix-queryserver-it/target/python-stdout.log and python-stderr.log::
+        
+            mvn clean verify -Drun.full.python.testsuite=true
+        
+        Known issues
+        ------------
+        
+        - TIME and DATE columns in Phoenix are stored as full timestamps with a millisecond accuracy,
+          but the remote protocol only exposes the time (hour/minute/second) or date (year/month/day)
+          parts of the columns. (`CALCITE-797 <https://issues.apache.org/jira/browse/CALCITE-797>`_, `CALCITE-798 <https://issues.apache.org/jira/browse/CALCITE-798>`_)
+        - TIMESTAMP columns in Phoenix are stored with a nanosecond accuracy, but the remote protocol truncates them to milliseconds. (`CALCITE-796 <https://issues.apache.org/jira/browse/CALCITE-796>`_)
+        
+        
+        SQLAlchemy feature support
+        --------------------------
+        
+        SQLAlchemy has a wide breadth of API, ranging from basic SQL commands to object-relational mapping support.
+        
+        Today, python-phoenixdb only supports the following subset of the complete SQLAlchemy API:
+        
+        - `Textual SQL <https://docs.sqlalchemy.org/en/13/core/tutorial.html#using-textual-sql>`_
+        
+        All other API should be considered not implemented.
+        
+Platform: UNKNOWN
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Provides-Extra: SQLAlchemy

+ 144 - 0
desktop/core/ext-py3/phoenixdb-1.1.0/README.rst

@@ -0,0 +1,144 @@
+Phoenix database adapter for Python
+===================================
+
+``phoenixdb`` is a Python library for accessing 
+`Apache Phoenix <http://phoenix.apache.org/>`_
+using the
+`remote query server <http://phoenix.apache.org/server.html>`_.
+This library implements the
+standard `DB API 2.0 <https://www.python.org/dev/peps/pep-0249/>`_ interface and a
+subset of `SQLAlchemy <https://www.sqlalchemy.org/>`_, either of which should be familiar
+to most Python programmers.
+
+Installation
+------------
+
+The source code is part of the phoenix-queryserver source distribution.
+You can download it from <https://phoenix.apache.org/>, or get the latest development version
+from <https://github.com/apache/phoenix-queryserver>
+
+Extract the archive and then install it manually::
+
+    cd /path/to/phoenix-queryserver-x.y.z/python/phoenixdb
+    python setup.py install
+
+Usage
+-----
+
+The library implements the standard DB API 2.0 interface, so it can be
+used the same way you would use any other SQL database from Python, for example::
+
+    import phoenixdb
+    import phoenixdb.cursor
+
+    database_url = 'http://localhost:8765/'
+    conn = phoenixdb.connect(database_url, autocommit=True)
+
+    cursor = conn.cursor()
+    cursor.execute("CREATE TABLE users (id INTEGER PRIMARY KEY, username VARCHAR)")
+    cursor.execute("UPSERT INTO users VALUES (?, ?)", (1, 'admin'))
+    cursor.execute("SELECT * FROM users")
+    print(cursor.fetchall())
+
+    cursor = conn.cursor(cursor_factory=phoenixdb.cursor.DictCursor)
+    cursor.execute("SELECT * FROM users WHERE id=1")
+    print(cursor.fetchone()['USERNAME'])
+
+
+Setting up a development environment
+------------------------------------
+
+If you want to quickly try out the included examples, you can set up a
+local `virtualenv <https://virtualenv.pypa.io/en/latest/>`_ with all the
+necessary requirements::
+
+    virtualenv e
+    source e/bin/activate
+    pip install -r requirements.txt
+    python setup.py develop
+
+You can start a Phoenix QueryServer instance on http://localhost:8765 for testing by running
+the following command in the pohoenix-queryserver-parent directory::
+
+    mvn clean verify -am -pl phoenix-queryserver-it -Dtest=foo \
+    -Dit.test=QueryServerBasicsIT\#startLocalPQS \
+    -Ddo.not.randomize.pqs.port=true -Dstart.unsecure.pqs=true
+
+You can start a secure (https+kerberos) Phoenix QueryServer instance on https://localhost:8765
+for testing by running the following command in the phoenix-queryserver-parent directory::
+
+    mvn clean verify -am -pl phoenix-queryserver-it -Dtest=foo \
+    -Dit.test=SecureQueryServerPhoenixDBIT\#startLocalPQS \
+    -Ddo.not.randomize.pqs.port=true -Dstart.secure.pqs=true
+
+this will also create a shell script in phoenix-queryserver-it/target/krb_setup.sh, that you can use to set
+up the environment for the tests.
+
+If you want to use the library without installing the phoenixdb library, you can use
+the `PYTHONPATH` environment variable to point to the library directly::
+
+    cd phoenix-queryserver-parent/python-phoenixdb
+    python setup.py build
+    cd ~/my_project
+    PYTHONPATH=$PHOENIX_HOME/build/lib python my_app.py
+
+Don't forget to run flake8 on your changes.
+
+Running the test suite
+----------------------
+
+The library comes with a test suite for testing Python DB API 2.0 compliance and
+various Phoenix-specific features. In order to run the test suite, you need a
+working Phoenix database and set the ``PHOENIXDB_TEST_DB_URL`` environment variable::
+
+    export PHOENIXDB_TEST_DB_URL='http://localhost:8765/'
+    nosetests
+
+If you use a secure PQS server, you can set the connection parameters via the following environment
+variables:
+
+- PHOENIXDB_TEST_DB_TRUSTSTORE
+- PHOENIXDB_TEST_DB_AUTHENTICATION
+- PHOENIXDB_TEST_DB_AVATICA_USER
+- PHOENIXDB_TEST_DB_AVATICA_PASSWORD
+
+Similarly, tox can be used to run the test suite against multiple Python versions::
+
+    pyenv install 3.5.5
+    pyenv install 3.6.4
+    pyenv install 2.7.14
+    pyenv global 2.7.14 3.5.5 3.6.4
+    PHOENIXDB_TEST_DB_URL='http://localhost:8765' tox
+
+You can use tox and docker to run the tests on supported python versions up to 3.8 without
+installing the environments locally::
+
+    docker build -t toxtest .
+    docker run --rm  -v `pwd`:/src toxtest
+
+You can also run the test suite from maven as part of the Java build by setting the 
+run.full.python.testsuite property. You DO NOT need to set the PHOENIXDB_* enviroment variables,
+maven will set them up for you. The output of the test run will be saved in
+phoenix-queryserver/phoenix-queryserver-it/target/python-stdout.log and python-stderr.log::
+
+    mvn clean verify -Drun.full.python.testsuite=true
+
+Known issues
+------------
+
+- TIME and DATE columns in Phoenix are stored as full timestamps with a millisecond accuracy,
+  but the remote protocol only exposes the time (hour/minute/second) or date (year/month/day)
+  parts of the columns. (`CALCITE-797 <https://issues.apache.org/jira/browse/CALCITE-797>`_, `CALCITE-798 <https://issues.apache.org/jira/browse/CALCITE-798>`_)
+- TIMESTAMP columns in Phoenix are stored with a nanosecond accuracy, but the remote protocol truncates them to milliseconds. (`CALCITE-796 <https://issues.apache.org/jira/browse/CALCITE-796>`_)
+
+
+SQLAlchemy feature support
+--------------------------
+
+SQLAlchemy has a wide breadth of API, ranging from basic SQL commands to object-relational mapping support.
+
+Today, python-phoenixdb only supports the following subset of the complete SQLAlchemy API:
+
+- `Textual SQL <https://docs.sqlalchemy.org/en/13/core/tutorial.html#using-textual-sql>`_
+
+All other API should be considered not implemented.

+ 195 - 0
desktop/core/ext-py3/phoenixdb-1.1.0/phoenixdb/__init__.py

@@ -0,0 +1,195 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import sys
+
+import gssapi
+
+from phoenixdb import errors, types
+from phoenixdb.avatica import AvaticaClient
+from phoenixdb.connection import Connection
+from phoenixdb.errors import *  # noqa: F401,F403
+from phoenixdb.types import *  # noqa: F401,F403
+
+from requests.auth import HTTPBasicAuth, HTTPDigestAuth
+
+from requests_gssapi import HTTPSPNEGOAuth
+
+if sys.version_info.major == 3:
+    from urllib.parse import urlencode, urlparse, urlunparse, parse_qs
+else:
+    from urllib import urlencode
+    from urlparse import urlparse, urlunparse, parse_qs
+
+__all__ = ['connect', 'apilevel', 'threadsafety', 'paramstyle'] + types.__all__ + errors.__all__
+
+
+apilevel = "2.0"
+"""
+This module supports the `DB API 2.0 interface <https://www.python.org/dev/peps/pep-0249/>`_.
+"""
+
+threadsafety = 1
+"""
+Multiple threads can share the module, but neither connections nor cursors.
+"""
+
+paramstyle = 'qmark'
+"""
+Parmetrized queries should use the question mark as a parameter placeholder.
+
+For example::
+
+ cursor.execute("SELECT * FROM table WHERE id = ?", [my_id])
+"""
+
+
+def connect(url, max_retries=None, auth=None, authentication=None, avatica_user=None, avatica_password=None,
+            truststore=None, verify=None, do_as=None, user=None, password=None, **kwargs):
+    """Connects to a Phoenix query server.
+
+    :param url:
+        URL to the Phoenix query server, e.g. ``http://localhost:8765/``
+
+    :param autocommit:
+        Switch the connection to autocommit mode.
+
+    :param readonly:
+        Switch the connection to readonly mode.
+
+    :param max_retries:
+        The maximum number of retries in case there is a connection error.
+
+    :param cursor_factory:
+        If specified, the connection's :attr:`~phoenixdb.connection.Connection.cursor_factory`
+        is set to it.
+
+    :param auth:
+        Authentication configuration object as expected by the underlying python_requests and
+        python_requests_gssapi library
+
+    :param verify:
+        The path to the PEM file for verifying the server's certificate. It is passed directly to
+        the `~verify` parameter of the underlying python_requests library.
+        Setting it to False disables the server certificate verification.
+
+    :param do_as:
+        Username to impersonate (sets the Hadoop doAs URL parameter)
+
+    :param authentication:
+        Alternative way to specify the authentication mechanism that mimics
+        the semantics of the JDBC drirver
+
+    :param avatica_user:
+        Username for BASIC or DIGEST authentication. Use in conjunction with the
+        `~authentication' option.
+
+    :param avatica_password:
+        Password for BASIC or DIGEST authentication. Use in conjunction with the
+        `~authentication' option.
+
+    :param user
+        If `~authentication' is BASIC or DIGEST then alias for `~avatica_user`
+        If `~authentication' is NONE or SPNEGO then alias for `~do_as`
+
+    :param password
+        If `~authentication' is BASIC or DIGEST then is alias for `~avatica_password`
+
+    :param truststore:
+        Alias for verify
+
+    :returns:
+        :class:`~phoenixdb.connection.Connection` object.
+    """
+
+    (url, auth, verify) = _process_args(
+        url, auth=auth, authentication=authentication,
+        avatica_user=avatica_user, avatica_password=avatica_password,
+        truststore=truststore, verify=verify, do_as=do_as, user=user, password=password)
+
+    client = AvaticaClient(url, max_retries=max_retries, auth=auth, verify=verify)
+    client.connect()
+    return Connection(client, **kwargs)
+
+
+def _process_args(
+        url, auth=None, authentication=None, avatica_user=None, avatica_password=None,
+        truststore=None, verify=None, do_as=None, user=None, password=None):
+    url_parsed = urlparse(url)
+    url_params = parse_qs(url_parsed.query, keep_blank_values=True)
+
+    # Parse supported JDBC compatible parameters from URL. args have precendece
+    # Unlike the JDBC driver, we are expecting these as query params, as the avatica java client
+    # has a different idea of what an URL param is than urlparse. (urlparse seems just broken
+    # in this regard)
+    params_changed = False
+    if auth is None and authentication is None and 'authentication' in url_params:
+        authentication = url_params['authentication'][0]
+        del url_params['authentication']
+        params_changed = True
+
+    if avatica_user is None and 'avatica_user' in url_params:
+        avatica_user = url_params['avatica_user'][0]
+        del url_params['avatica_user']
+        params_changed = True
+
+    if avatica_password is None and 'avatica_password' in url_params:
+        avatica_password = url_params['avatica_password'][0]
+        del url_params['avatica_password']
+        params_changed = True
+
+    if verify is None and truststore is None and 'truststore' in url_params:
+        truststore = url_params['truststore'][0]
+        del url_params['truststore']
+        params_changed = True
+
+    if authentication == 'BASIC' or authentication == 'DIGEST':
+        # Handle standard user and password parameters
+        if user is not None and avatica_user is None:
+            avatica_user = user
+        if password is not None and avatica_password is None:
+            avatica_password = password
+    else:
+        # interpret standard user parameter as do_as for SPNEGO and NONE
+        if user is not None and do_as is None:
+            do_as = user
+
+    # Add doAs
+    if do_as:
+        url_params['doAs'] = do_as
+        params_changed = True
+
+    if params_changed:
+        url_parsed = url_parsed._replace(query=urlencode(url_params))
+        url = urlunparse(url_parsed)
+
+    if auth == "SPNEGO":
+        # Special case for backwards compatibility
+        auth = HTTPSPNEGOAuth(opportunistic_auth=True)
+    elif auth is None and authentication is not None:
+        if authentication == "SPNEGO":
+            try:
+                spnego = gssapi.mechs.Mechanism.from_sasl_name("SPNEGO")
+            except AttributeError:
+                spnego = gssapi.OID.from_int_seq("1.3.6.1.5.5.2")
+            auth = HTTPSPNEGOAuth(opportunistic_auth=True, mech=spnego)
+        elif authentication == "BASIC" and avatica_user is not None and avatica_password is not None:
+            auth = HTTPBasicAuth(avatica_user, avatica_password)
+        elif authentication == "DIGEST" and avatica_user is not None and avatica_password is not None:
+            auth = HTTPDigestAuth(avatica_user, avatica_password)
+
+    if verify is None and truststore is not None:
+        verify = truststore
+
+    return (url, auth, verify)

+ 16 - 0
desktop/core/ext-py3/phoenixdb-1.1.0/phoenixdb/avatica/__init__.py

@@ -0,0 +1,16 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .client import AvaticaClient  # noqa: F401

+ 618 - 0
desktop/core/ext-py3/phoenixdb-1.1.0/phoenixdb/avatica/client.py

@@ -0,0 +1,618 @@
+# Copyright 2015 Lukas Lalinsky
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Implementation of the PROTOBUF-over-HTTP RPC protocol used by Avatica."""
+
+import logging
+import math
+import pprint
+import re
+import time
+
+from phoenixdb import errors
+from phoenixdb.avatica.proto import common_pb2, requests_pb2, responses_pb2
+
+import requests
+
+try:
+    import urlparse
+except ImportError:
+    import urllib.parse as urlparse
+
+try:
+    from HTMLParser import HTMLParser
+except ImportError:
+    from html.parser import HTMLParser
+
+__all__ = ['AvaticaClient']
+
+logger = logging.getLogger(__name__)
+
+
+class JettyErrorPageParser(HTMLParser):
+
+    def __init__(self):
+        HTMLParser.__init__(self)
+        self.path = []
+        self.title = []
+        self.message = []
+
+    def handle_starttag(self, tag, attrs):
+        self.path.append(tag)
+
+    def handle_endtag(self, tag):
+        self.path.pop()
+
+    def handle_data(self, data):
+        if len(self.path) > 2 and self.path[0] == 'html' and self.path[1] == 'body':
+            if len(self.path) == 3 and self.path[2] == 'h2':
+                self.title.append(data.strip())
+            elif len(self.path) == 4 and self.path[2] == 'p' and self.path[3] == 'pre':
+                self.message.append(data.strip())
+
+
+def parse_url(url):
+    url = urlparse.urlparse(url)
+    if not url.scheme and not url.netloc and url.path:
+        netloc = url.path
+        if ':' not in netloc:
+            netloc = '{}:8765'.format(netloc)
+        return urlparse.ParseResult('http', netloc, '/', '', '', '')
+    return url
+
+
+# Defined in phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
+SQLSTATE_ERROR_CLASSES = [
+    ('08', errors.OperationalError),  # Connection Exception
+    ('22018', errors.IntegrityError),  # Constraint violatioin.
+    ('22', errors.DataError),  # Data Exception
+    ('23', errors.IntegrityError),  # Constraint Violation
+    ('24', errors.InternalError),  # Invalid Cursor State
+    ('25', errors.InternalError),  # Invalid Transaction State
+    ('42', errors.ProgrammingError),  # Syntax Error or Access Rule Violation
+    ('XLC', errors.OperationalError),  # Execution exceptions
+    ('INT', errors.InternalError),  # Phoenix internal error
+]
+
+
+def raise_sql_error(code, sqlstate, message):
+    for prefix, error_class in SQLSTATE_ERROR_CLASSES:
+        if sqlstate.startswith(prefix):
+            raise error_class(message, code, sqlstate)
+    raise errors.InternalError(message, code, sqlstate)
+
+
+def parse_and_raise_sql_error(message):
+    match = re.findall(r'(?:([^ ]+): )?ERROR (\d+) \(([0-9A-Z]{5})\): (.*?) ->', message)
+    if match is not None and len(match):
+        exception, code, sqlstate, message = match[0]
+        raise_sql_error(int(code), sqlstate, message)
+
+
+def parse_error_page(html):
+    parser = JettyErrorPageParser()
+    parser.feed(html)
+    if parser.title == ['HTTP ERROR: 500']:
+        message = ' '.join(parser.message).strip()
+        parse_and_raise_sql_error(message)
+        raise errors.InternalError(message)
+
+
+def parse_error_protobuf(text):
+    try:
+        message = common_pb2.WireMessage()
+        message.ParseFromString(text)
+
+        err = responses_pb2.ErrorResponse()
+        if not err.ParseFromString(message.wrapped_message):
+            raise Exception('No error message found')
+    except Exception:
+        # Not a protobuf error, fall through
+        return
+
+    parse_and_raise_sql_error(err.error_message)
+    raise_sql_error(err.error_code, err.sql_state, err.error_message)
+    # Not a protobuf error, fall through
+
+
+class AvaticaClient(object):
+    """Client for Avatica's RPC server.
+
+    This exposes all low-level functionality that the Avatica
+    server provides, using the native terminology. You most likely
+    do not want to use this class directly, but rather get connect
+    to a server using :func:`phoenixdb.connect`.
+    """
+
+    def __init__(self, url, max_retries=None, auth=None, verify=None):
+        """Constructs a new client object.
+
+        :param url:
+            URL of an Avatica RPC server.
+        """
+        self.url = parse_url(url)
+        self.max_retries = max_retries if max_retries is not None else 3
+        self.auth = auth
+        self.verify = verify
+        self.session = None
+
+    def __del__(self):
+        """Finalizer. Calls close() to close any open sessions"""
+        self.close()
+
+    def connect(self):
+        """Open the session on the the first request instead"""
+        pass
+
+    def close(self):
+        if self.session:
+            self.session.close()
+            self.session = None
+
+    def _post_request(self, body, headers):
+        # Create the session if we haven't before
+        if not self.session:
+            logger.debug("Creating a new Session")
+            self.session = requests.Session()
+            self.session.headers.update(headers)
+            self.session.stream = True
+            if self.auth is not None:
+                self.session.auth = self.auth
+
+        retry_count = self.max_retries
+        while True:
+            logger.debug("POST %s %r %r", self.url.geturl(), body, self.session.headers)
+
+            requestArgs = {'data': body}
+
+            # Setting verify on the Session is not the same as setting it
+            # as a request arg
+            if self.verify is not None:
+                requestArgs.update(verify=self.verify)
+
+            try:
+                response = self.session.post(self.url.geturl(), **requestArgs)
+
+            except requests.HTTPError as e:
+                if retry_count > 0:
+                    delay = math.exp(-retry_count)
+                    logger.debug("HTTP protocol error, will retry in %s seconds...", delay, exc_info=True)
+                    time.sleep(delay)
+                    retry_count -= 1
+                    continue
+                raise errors.InterfaceError('RPC request failed', cause=e)
+            else:
+                if response.status_code == requests.codes.service_unavailable:
+                    if retry_count > 0:
+                        delay = math.exp(-retry_count)
+                        logger.debug("Service unavailable, will retry in %s seconds...", delay, exc_info=True)
+                        time.sleep(delay)
+                        retry_count -= 1
+                        continue
+                return response
+
+    def _apply(self, request_data, expected_response_type=None):
+        logger.debug("Sending request\n%s", pprint.pformat(request_data))
+
+        request_name = request_data.__class__.__name__
+        message = common_pb2.WireMessage()
+        message.name = 'org.apache.calcite.avatica.proto.Requests${}'.format(request_name)
+        message.wrapped_message = request_data.SerializeToString()
+        body = message.SerializeToString()
+        headers = {'content-type': 'application/x-google-protobuf'}
+
+        response = self._post_request(body, headers)
+        response_body = response.raw.read()
+
+        if response.status_code != requests.codes.ok:
+            logger.debug("Received response\n%s", response_body)
+            if b'<html>' in response_body:
+                parse_error_page(response_body.decode(response.encoding))
+            else:
+                # assume the response is in protobuf format
+                parse_error_protobuf(response_body)
+            raise errors.InterfaceError('RPC request returned invalid status code', response.status_code)
+
+        message = common_pb2.WireMessage()
+        message.ParseFromString(response_body)
+
+        logger.debug("Received response\n%s", message)
+
+        if expected_response_type is None:
+            expected_response_type = request_name.replace('Request', 'Response')
+
+        expected_response_type = 'org.apache.calcite.avatica.proto.Responses$' + expected_response_type
+        if message.name != expected_response_type:
+            raise errors.InterfaceError('unexpected response type "{}" expected "{}"'.format(message.name, expected_response_type))
+
+        return message.wrapped_message
+
+    def get_catalogs(self, connection_id):
+        request = requests_pb2.CatalogsRequest()
+        request.connection_id = connection_id
+        response_data = self._apply(request, 'ResultSetResponse')
+        response = responses_pb2.ResultSetResponse()
+        response.ParseFromString(response_data)
+        return response
+
+    def get_schemas(self, connection_id, catalog=None, schemaPattern=None):
+        request = requests_pb2.SchemasRequest()
+        request.connection_id = connection_id
+        if catalog is not None:
+            request.catalog = catalog
+        if schemaPattern is not None:
+            request.schema_pattern = schemaPattern
+        response_data = self._apply(request, 'ResultSetResponse')
+        response = responses_pb2.ResultSetResponse()
+        response.ParseFromString(response_data)
+        return response
+
+    def get_tables(self, connection_id, catalog=None, schemaPattern=None, tableNamePattern=None, typeList=None):
+        request = requests_pb2.TablesRequest()
+        request.connection_id = connection_id
+        if catalog is not None:
+            request.catalog = catalog
+        if schemaPattern is not None:
+            request.schema_pattern = schemaPattern
+        if tableNamePattern is not None:
+            request.table_name_pattern = tableNamePattern
+        if typeList is not None:
+            request.type_list.extend(typeList)
+        request.has_type_list = typeList is not None
+        response_data = self._apply(request, 'ResultSetResponse')
+        response = responses_pb2.ResultSetResponse()
+        response.ParseFromString(response_data)
+        return response
+
+    def get_columns(self, connection_id, catalog=None, schemaPattern=None, tableNamePattern=None, columnNamePattern=None):
+        request = requests_pb2.ColumnsRequest()
+        request.connection_id = connection_id
+        if catalog is not None:
+            request.catalog = catalog
+        if schemaPattern is not None:
+            request.schema_pattern = schemaPattern
+        if tableNamePattern is not None:
+            request.table_name_pattern = tableNamePattern
+        if columnNamePattern is not None:
+            request.column_name_pattern = columnNamePattern
+        response_data = self._apply(request, 'ResultSetResponse')
+        response = responses_pb2.ResultSetResponse()
+        response.ParseFromString(response_data)
+        return response
+
+    def get_table_types(self, connection_id):
+        request = requests_pb2.TableTypesRequest()
+        request.connection_id = connection_id
+        response_data = self._apply(request, 'ResultSetResponse')
+        response = responses_pb2.ResultSetResponse()
+        response.ParseFromString(response_data)
+        return response
+
+    def get_type_info(self, connection_id):
+        request = requests_pb2.TypeInfoRequest()
+        request.connection_id = connection_id
+        response_data = self._apply(request, 'ResultSetResponse')
+        response = responses_pb2.ResultSetResponse()
+        response.ParseFromString(response_data)
+        return response
+
+    def get_sync_results(self, connection_id, statement_id, state):
+        request = requests_pb2.SyncResultsRequest()
+        request.connection_id = connection_id
+        request.statement_id = statement_id
+        request.state.CopyFrom(state)
+        response_data = self._apply(request, 'SyncResultsResponse')
+        syncResultResponse = responses_pb2.SyncResultsResponse()
+        syncResultResponse.ParseFromString(response_data)
+        return syncResultResponse
+
+    def connection_sync_dict(self, connection_id, connProps=None):
+        conn_props = self.connection_sync(connection_id, connProps)
+        return {
+            'autoCommit': conn_props.auto_commit,
+            'readOnly': conn_props.read_only,
+            'transactionIsolation': conn_props.transaction_isolation,
+            'catalog': conn_props.catalog,
+            'schema': conn_props.schema}
+
+    def connection_sync(self, connection_id, connProps=None):
+        """Synchronizes connection properties with the server.
+
+        :param connection_id:
+            ID of the current connection.
+
+        :param connProps:
+            Dictionary with the properties that should be changed.
+
+        :returns:
+            A ``common_pb2.ConnectionProperties`` object.
+        """
+        if connProps:
+            props = connProps.copy()
+        else:
+            props = {}
+
+        request = requests_pb2.ConnectionSyncRequest()
+        request.connection_id = connection_id
+        request.conn_props.has_auto_commit = True
+        request.conn_props.has_read_only = True
+        if 'autoCommit' in props:
+            request.conn_props.auto_commit = props.pop('autoCommit')
+        if 'readOnly' in props:
+            request.conn_props.read_only = props.pop('readOnly')
+        if 'transactionIsolation' in props:
+            request.conn_props.transaction_isolation = props.pop('transactionIsolation', None)
+        if 'catalog' in props:
+            request.conn_props.catalog = props.pop('catalog', None)
+        if 'schema' in props:
+            request.conn_props.schema = props.pop('schema', None)
+
+        if props:
+            logger.warning("Unhandled connection property:" + props)
+
+        response_data = self._apply(request)
+        response = responses_pb2.ConnectionSyncResponse()
+        response.ParseFromString(response_data)
+        return response.conn_props
+
+    def open_connection(self, connection_id, info=None):
+        """Opens a new connection.
+
+        :param connection_id:
+            ID of the connection to open.
+        """
+        request = requests_pb2.OpenConnectionRequest()
+        request.connection_id = connection_id
+        if info is not None:
+            # Info is a list of repeated pairs, setting a dict directly fails
+            for k, v in info.items():
+                request.info[k] = v
+
+        response_data = self._apply(request)
+        response = responses_pb2.OpenConnectionResponse()
+        response.ParseFromString(response_data)
+
+    def close_connection(self, connection_id):
+        """Closes a connection.
+
+        :param connection_id:
+            ID of the connection to close.
+        """
+        request = requests_pb2.CloseConnectionRequest()
+        request.connection_id = connection_id
+        self._apply(request)
+
+    def create_statement(self, connection_id):
+        """Creates a new statement.
+
+        :param connection_id:
+            ID of the current connection.
+
+        :returns:
+            New statement ID.
+        """
+        request = requests_pb2.CreateStatementRequest()
+        request.connection_id = connection_id
+
+        response_data = self._apply(request)
+        response = responses_pb2.CreateStatementResponse()
+        response.ParseFromString(response_data)
+        return response.statement_id
+
+    def close_statement(self, connection_id, statement_id):
+        """Closes a statement.
+
+        :param connection_id:
+            ID of the current connection.
+
+        :param statement_id:
+            ID of the statement to close.
+        """
+        request = requests_pb2.CloseStatementRequest()
+        request.connection_id = connection_id
+        request.statement_id = statement_id
+
+        self._apply(request)
+
+    def prepare_and_execute(self, connection_id, statement_id, sql, max_rows_total=None, first_frame_max_size=None):
+        """Prepares and immediately executes a statement.
+
+        :param connection_id:
+            ID of the current connection.
+
+        :param statement_id:
+            ID of the statement to prepare.
+
+        :param sql:
+            SQL query.
+
+        :param max_rows_total:
+            The maximum number of rows that will be allowed for this query.
+
+        :param first_frame_max_size:
+            The maximum number of rows that will be returned in the first Frame returned for this query.
+
+        :returns:
+            Result set with the signature of the prepared statement and the first frame data.
+        """
+        request = requests_pb2.PrepareAndExecuteRequest()
+        request.connection_id = connection_id
+        request.statement_id = statement_id
+        request.sql = sql
+        if max_rows_total is not None:
+            request.max_rows_total = max_rows_total
+        if first_frame_max_size is not None:
+            request.first_frame_max_size = first_frame_max_size
+
+        response_data = self._apply(request, 'ExecuteResponse')
+        response = responses_pb2.ExecuteResponse()
+        response.ParseFromString(response_data)
+        return response.results
+
+    def prepare(self, connection_id, sql, max_rows_total=None):
+        """Prepares a statement.
+
+        :param connection_id:
+            ID of the current connection.
+
+        :param sql:
+            SQL query.
+
+        :param max_rows_total:
+            The maximum number of rows that will be allowed for this query.
+
+        :returns:
+            Signature of the prepared statement.
+        """
+        request = requests_pb2.PrepareRequest()
+        request.connection_id = connection_id
+        request.sql = sql
+        if max_rows_total is not None:
+            request.max_rows_total = max_rows_total
+
+        response_data = self._apply(request)
+        response = responses_pb2.PrepareResponse()
+        response.ParseFromString(response_data)
+        return response.statement
+
+    def execute(self, connection_id, statement_id, signature, parameter_values=None, first_frame_max_size=None):
+        """Returns a frame of rows.
+
+        The frame describes whether there may be another frame. If there is not
+        another frame, the current iteration is done when we have finished the
+        rows in the this frame.
+
+        :param connection_id:
+            ID of the current connection.
+
+        :param statement_id:
+            ID of the statement to fetch rows from.
+
+        :param signature:
+            common_pb2.Signature object
+
+        :param parameter_values:
+            A list of parameter values, if statement is to be executed; otherwise ``None``.
+
+        :param first_frame_max_size:
+            The maximum number of rows that will be returned in the first Frame returned for this query.
+
+        :returns:
+            Frame data, or ``None`` if there are no more.
+        """
+        request = requests_pb2.ExecuteRequest()
+        request.statementHandle.id = statement_id
+        request.statementHandle.connection_id = connection_id
+        request.statementHandle.signature.CopyFrom(signature)
+        if parameter_values is not None:
+            request.parameter_values.extend(parameter_values)
+            request.has_parameter_values = True
+        if first_frame_max_size is not None:
+            request.deprecated_first_frame_max_size = first_frame_max_size
+            request.first_frame_max_size = first_frame_max_size
+
+        response_data = self._apply(request)
+        response = responses_pb2.ExecuteResponse()
+        response.ParseFromString(response_data)
+        return response.results
+
+    def execute_batch(self, connection_id, statement_id, rows):
+        """Returns an array of update counts corresponding to each row written.
+
+        :param connection_id:
+            ID of the current connection.
+
+        :param statement_id:
+            ID of the statement to fetch rows from.
+
+        :param rows:
+            A list of lists corresponding to the columns to bind to the statement
+            for many rows.
+
+        :returns:
+            Update counts for the writes.
+        """
+        request = requests_pb2.ExecuteBatchRequest()
+        request.statement_id = statement_id
+        request.connection_id = connection_id
+        if rows is not None:
+            for row in rows:
+                batch = requests_pb2.UpdateBatch()
+                for col in row:
+                    batch.parameter_values.append(col)
+                request.updates.append(batch)
+
+        response_data = self._apply(request)
+        response = responses_pb2.ExecuteBatchResponse()
+        response.ParseFromString(response_data)
+        if response.missing_statement:
+            raise errors.DatabaseError('ExecuteBatch reported missing statement', -1)
+        return response.update_counts
+
+    def fetch(self, connection_id, statement_id, offset=0, frame_max_size=None):
+        """Returns a frame of rows.
+
+        The frame describes whether there may be another frame. If there is not
+        another frame, the current iteration is done when we have finished the
+        rows in the this frame.
+
+        :param connection_id:
+            ID of the current connection.
+
+        :param statement_id:
+            ID of the statement to fetch rows from.
+
+        :param offset:
+            Zero-based offset of first row in the requested frame.
+
+        :param frame_max_size:
+            Maximum number of rows to return; negative means no limit.
+
+        :returns:
+            Frame data, or ``None`` if there are no more.
+        """
+        request = requests_pb2.FetchRequest()
+        request.connection_id = connection_id
+        request.statement_id = statement_id
+        request.offset = offset
+        if frame_max_size is not None:
+            request.frame_max_size = frame_max_size
+
+        response_data = self._apply(request)
+        response = responses_pb2.FetchResponse()
+        response.ParseFromString(response_data)
+        return response.frame
+
+    def commit(self, connection_id):
+        """TODO Commits the transaction
+
+        :param connection_id:
+            ID of the current connection.
+        """
+        request = requests_pb2.CommitRequest()
+        request.connection_id = connection_id
+        return self._apply(request)
+
+    def rollback(self, connection_id):
+        """TODO Rolls back the transaction
+
+        :param connection_id:
+            ID of the current connection.
+        """
+        request = requests_pb2.RollbackRequest()
+        request.connection_id = connection_id
+        return self._apply(request)

+ 14 - 0
desktop/core/ext-py3/phoenixdb-1.1.0/phoenixdb/avatica/proto/__init__.py

@@ -0,0 +1,14 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.

文件差異過大導致無法顯示
+ 36 - 0
desktop/core/ext-py3/phoenixdb-1.1.0/phoenixdb/avatica/proto/common_pb2.py


文件差異過大導致無法顯示
+ 36 - 0
desktop/core/ext-py3/phoenixdb-1.1.0/phoenixdb/avatica/proto/requests_pb2.py


文件差異過大導致無法顯示
+ 36 - 0
desktop/core/ext-py3/phoenixdb-1.1.0/phoenixdb/avatica/proto/responses_pb2.py


+ 209 - 0
desktop/core/ext-py3/phoenixdb-1.1.0/phoenixdb/connection.py

@@ -0,0 +1,209 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import uuid
+import weakref
+
+from phoenixdb import errors
+from phoenixdb.cursor import Cursor
+from phoenixdb.errors import ProgrammingError
+from phoenixdb.meta import Meta
+
+__all__ = ['Connection']
+
+logger = logging.getLogger(__name__)
+
+AVATICA_PROPERTIES = ('autoCommit', 'autocommit', 'readOnly', 'readonly', 'transactionIsolation',
+                      'catalog', 'schema')
+
+
+class Connection(object):
+    """Database connection.
+
+    You should not construct this object manually, use :func:`~phoenixdb.connect` instead.
+    """
+
+    cursor_factory = None
+    """
+    The default cursor factory used by :meth:`cursor` if the parameter is not specified.
+    """
+
+    def __init__(self, client, cursor_factory=None, **kwargs):
+        self._client = client
+        self._closed = False
+        if cursor_factory is not None:
+            self.cursor_factory = cursor_factory
+        else:
+            self.cursor_factory = Cursor
+        self._cursors = []
+        self._phoenix_props, avatica_props_init = Connection._map_conn_props(kwargs)
+        self.open()
+
+        # TODO we could probably optimize it away if the defaults are not changed
+        self.set_session(**avatica_props_init)
+
+    def __del__(self):
+        if not self._closed:
+            self.close()
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        if not self._closed:
+            self.close()
+
+    @staticmethod
+    def _default_avatica_props():
+        return {'autoCommit': False,
+                'readOnly': False,
+                'transactionIsolation': 0,
+                'catalog': '',
+                'schema': ''}
+
+    @staticmethod
+    def _map_conn_props(conn_props):
+        """Sorts and prepocesses args that should be passed to Phoenix and Avatica"""
+
+        avatica_props = dict([(k, conn_props[k]) for k in conn_props.keys() if k in AVATICA_PROPERTIES])
+        phoenix_props = dict([(k, conn_props[k]) for k in conn_props.keys() if k not in AVATICA_PROPERTIES])
+        avatica_props = Connection._map_legacy_avatica_props(avatica_props)
+
+        return (phoenix_props, avatica_props)
+
+    @staticmethod
+    def _map_legacy_avatica_props(props):
+        if 'autocommit' in props:
+            props['autoCommit'] = bool(props.pop('autocommit'))
+        if 'readonly' in props:
+            props['readOnly'] = bool(props.pop('readonly'))
+        return props
+
+    def open(self):
+        """Opens the connection."""
+        self._id = str(uuid.uuid4())
+        self._client.open_connection(self._id, info=self._phoenix_props)
+
+    def close(self):
+        """Closes the connection.
+        No further operations are allowed, either on the connection or any
+        of its cursors, once the connection is closed.
+
+        If the connection is used in a ``with`` statement, this method will
+        be automatically called at the end of the ``with`` block.
+        """
+        if self._closed:
+            raise ProgrammingError('The connection is already closed.')
+        for cursor_ref in self._cursors:
+            cursor = cursor_ref()
+            if cursor is not None and not cursor._closed:
+                cursor.close()
+        self._client.close_connection(self._id)
+        self._client.close()
+        self._closed = True
+
+    @property
+    def closed(self):
+        """Read-only attribute specifying if the connection is closed or not."""
+        return self._closed
+
+    def commit(self):
+        if self._closed:
+            raise ProgrammingError('The connection is already closed.')
+        self._client.commit(self._id)
+
+    def rollback(self):
+        if self._closed:
+            raise ProgrammingError('The connection is already closed.')
+        self._client.rollback(self._id)
+
+    def cursor(self, cursor_factory=None):
+        """Creates a new cursor.
+
+        :param cursor_factory:
+            This argument can be used to create non-standard cursors.
+            The class returned must be a subclass of
+            :class:`~phoenixdb.cursor.Cursor` (for example :class:`~phoenixdb.cursor.DictCursor`).
+            A default factory for the connection can also be specified using the
+            :attr:`cursor_factory` attribute.
+
+        :returns:
+            A :class:`~phoenixdb.cursor.Cursor` object.
+        """
+        if self._closed:
+            raise ProgrammingError('The connection is already closed.')
+        cursor = (cursor_factory or self.cursor_factory)(self)
+        self._cursors.append(weakref.ref(cursor, self._cursors.remove))
+        return cursor
+
+    def set_session(self, **props):
+        """Sets one or more parameters in the current connection.
+
+        :param autocommit:
+            Switch the connection to autocommit mode.
+
+        :param readonly:
+            Switch the connection to read-only mode.
+        """
+        props = Connection._map_legacy_avatica_props(props)
+        self._avatica_props = self._client.connection_sync_dict(self._id, props)
+
+    @property
+    def autocommit(self):
+        """Read/write attribute for switching the connection's autocommit mode."""
+        return self._avatica_props['autoCommit']
+
+    @autocommit.setter
+    def autocommit(self, value):
+        if self._closed:
+            raise ProgrammingError('The connection is already closed.')
+        self._avatica_props = self._client.connection_sync_dict(self._id, {'autoCommit': bool(value)})
+
+    @property
+    def readonly(self):
+        """Read/write attribute for switching the connection's readonly mode."""
+        return self._avatica_props['readOnly']
+
+    @readonly.setter
+    def readonly(self, value):
+        if self._closed:
+            raise ProgrammingError('The connection is already closed.')
+        self._avatica_props = self._client.connection_sync_dict(self._id, {'readOnly': bool(value)})
+
+    @property
+    def transactionisolation(self):
+        return self._avatica_props['_transactionIsolation']
+
+    @transactionisolation.setter
+    def transactionisolation(self, value):
+        if self._closed:
+            raise ProgrammingError('The connection is already closed.')
+        self._avatica_props = self._client.connection_sync_dict(self._id, {'transactionIsolation': bool(value)})
+
+    def meta(self):
+        """Creates a new meta.
+
+        :returns:
+            A :class:`~phoenixdb.meta` object.
+        """
+        if self._closed:
+            raise ProgrammingError('The connection is already closed.')
+        meta = Meta(self)
+        return meta
+
+
+for name in errors.__all__:
+    setattr(Connection, name, getattr(errors, name))

+ 390 - 0
desktop/core/ext-py3/phoenixdb-1.1.0/phoenixdb/cursor.py

@@ -0,0 +1,390 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import collections
+import logging
+
+from phoenixdb.avatica.proto import common_pb2
+from phoenixdb.errors import InternalError, ProgrammingError
+from phoenixdb.types import TypeHelper
+
+__all__ = ['Cursor', 'ColumnDescription', 'DictCursor']
+
+logger = logging.getLogger(__name__)
+
+# TODO see note in Cursor.rowcount()
+MAX_INT = 2 ** 64 - 1
+
+ColumnDescription = collections.namedtuple('ColumnDescription', 'name type_code display_size internal_size precision scale null_ok')
+"""Named tuple for representing results from :attr:`Cursor.description`."""
+
+
+class Cursor(object):
+    """Database cursor for executing queries and iterating over results.
+
+    You should not construct this object manually, use :meth:`Connection.cursor() <phoenixdb.connection.Connection.cursor>` instead.
+    """
+
+    arraysize = 1
+    """
+    Read/write attribute specifying the number of rows to fetch
+    at a time with :meth:`fetchmany`. It defaults to 1 meaning to
+    fetch a single row at a time.
+    """
+
+    itersize = 2000
+    """
+    Read/write attribute specifying the number of rows to fetch
+    from the backend at each network roundtrip during iteration
+    on the cursor. The default is 2000.
+    """
+
+    def __init__(self, connection, id=None):
+        self._connection = connection
+        self._id = id
+        self._signature = None
+        self._column_data_types = []
+        self._frame = None
+        self._pos = None
+        self._closed = False
+        self.arraysize = self.__class__.arraysize
+        self.itersize = self.__class__.itersize
+        self._updatecount = -1
+
+    def __del__(self):
+        if not self._connection._closed and not self._closed:
+            self.close()
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        if not self._closed:
+            self.close()
+
+    def __iter__(self):
+        return self
+
+    def __next__(self):
+        row = self.fetchone()
+        if row is None:
+            raise StopIteration
+        return row
+
+    next = __next__
+
+    def close(self):
+        """Closes the cursor.
+        No further operations are allowed once the cursor is closed.
+
+        If the cursor is used in a ``with`` statement, this method will
+        be automatically called at the end of the ``with`` block.
+        """
+        if self._closed:
+            raise ProgrammingError('The cursor is already closed.')
+        if self._id is not None:
+            self._connection._client.close_statement(self._connection._id, self._id)
+            self._id = None
+        self._signature = None
+        self._column_data_types = []
+        self._frame = None
+        self._pos = None
+        self._closed = True
+
+    @property
+    def closed(self):
+        """Read-only attribute specifying if the cursor is closed or not."""
+        return self._closed
+
+    @property
+    def description(self):
+        if self._signature is None:
+            return None
+        description = []
+        for column in self._signature.columns:
+            description.append(ColumnDescription(
+                column.column_name,
+                column.type.name,
+                column.display_size,
+                None,
+                column.precision,
+                column.scale,
+                None if column.nullable == 2 else bool(column.nullable),
+            ))
+        return description
+
+    def _set_id(self, id):
+        if self._id is not None and self._id != id:
+            self._connection._client.close_statement(self._connection._id, self._id)
+        self._id = id
+
+    def _set_signature(self, signature):
+        self._signature = signature
+        self._column_data_types = []
+        self._parameter_data_types = []
+        if signature is None:
+            return
+
+        for column in signature.columns:
+            dtype = TypeHelper.from_column(column)
+            self._column_data_types.append(dtype)
+
+        for parameter in signature.parameters:
+            dtype = TypeHelper.from_param(parameter)
+            self._parameter_data_types.append(dtype)
+
+    def _set_frame(self, frame):
+        self._frame = frame
+        self._pos = None
+
+        if frame is not None:
+            if frame.rows:
+                self._pos = 0
+            elif not frame.done:
+                raise InternalError('Got an empty frame, but the statement is not done yet.')
+
+    def _fetch_next_frame(self):
+        offset = self._frame.offset + len(self._frame.rows)
+        frame = self._connection._client.fetch(
+            self._connection._id, self._id,
+            offset=offset, frame_max_size=self.itersize)
+        self._set_frame(frame)
+
+    def _process_result(self, result):
+        if result.own_statement:
+            self._set_id(result.statement_id)
+        self._set_signature(result.signature if result.HasField('signature') else None)
+        self._set_frame(result.first_frame if result.HasField('first_frame') else None)
+        self._updatecount = result.update_count
+
+    def _process_results(self, results):
+        if results:
+            return self._process_result(results[0])
+
+    def _transform_parameters(self, parameters):
+        if len(parameters) != len(self._parameter_data_types):
+            raise ProgrammingError('Number of placeholders (?) must match number of parameters.'
+                                   ' Number of placeholders: {0}. Number of parameters: {1}'
+                                   .format(len(self._parameter_data_types), len(parameters)))
+        typed_parameters = []
+        for value, data_type in zip(parameters, self._parameter_data_types):
+            field_name, rep, mutate_to, cast_from, is_array = data_type
+            typed_value = common_pb2.TypedValue()
+
+            if value is None:
+                typed_value.null = True
+                typed_value.type = common_pb2.NULL
+            else:
+                typed_value.null = False
+                if is_array:
+                    if type(value) in [list, tuple]:
+                        for element in value:
+                            if mutate_to is not None:
+                                element = mutate_to(element)
+                            typed_element = common_pb2.TypedValue()
+                            if element is None:
+                                typed_element.null = True
+                            else:
+                                typed_element.type = rep
+                                setattr(typed_element, field_name, element)
+                            typed_value.array_value.append(typed_element)
+                        typed_value.type = common_pb2.ARRAY
+                        typed_value.component_type = rep
+                    else:
+                        raise ProgrammingError('Scalar value specified for array parameter.')
+                else:
+                    if mutate_to is not None:
+                        value = mutate_to(value)
+                    typed_value.type = rep
+                    setattr(typed_value, field_name, value)
+
+            typed_parameters.append(typed_value)
+        return typed_parameters
+
+    def execute(self, operation, parameters=None):
+        if self._closed:
+            raise ProgrammingError('The cursor is already closed.')
+        self._updatecount = -1
+        self._set_frame(None)
+        if parameters is None:
+            if self._id is None:
+                self._set_id(self._connection._client.create_statement(self._connection._id))
+            results = self._connection._client.prepare_and_execute(
+                self._connection._id, self._id,
+                operation, first_frame_max_size=self.itersize)
+            self._process_results(results)
+        else:
+            statement = self._connection._client.prepare(
+                self._connection._id, operation)
+            self._set_id(statement.id)
+            self._set_signature(statement.signature)
+
+            results = self._connection._client.execute(
+                self._connection._id, self._id,
+                statement.signature, self._transform_parameters(parameters),
+                first_frame_max_size=self.itersize)
+            self._process_results(results)
+
+    def executemany(self, operation, seq_of_parameters):
+        if self._closed:
+            raise ProgrammingError('The cursor is already closed.')
+        self._updatecount = -1
+        self._set_frame(None)
+        statement = self._connection._client.prepare(
+            self._connection._id, operation, max_rows_total=0)
+        self._set_id(statement.id)
+        self._set_signature(statement.signature)
+        return self._connection._client.execute_batch(
+            self._connection._id, self._id,
+            [self._transform_parameters(p) for p in seq_of_parameters])
+
+    def get_sync_results(self, state):
+        if self._closed:
+            raise ProgrammingError('The cursor is already closed.')
+        if self._id is None:
+            self._set_id(self._connection._client.create_statement(self._connection._id))
+        return self._connection._client.get_sync_results(self._connection._id, self._id, state)
+
+    def fetch(self, signature):
+        if self._closed:
+            raise ProgrammingError('The cursor is already closed.')
+        self._updatecount = -1
+        self._set_signature(signature)
+        frame = self._connection._client.fetch(self._connection._id, self._id, 0, self.itersize)
+        self._set_frame(frame)
+
+    def _transform_row(self, row):
+        """Transforms a Row into Python values.
+
+        :param row:
+            A ``common_pb2.Row`` object.
+
+        :returns:
+            A list of values casted into the correct Python types.
+
+        :raises:
+            NotImplementedError
+        """
+        tmp_row = []
+
+        for i, column in enumerate(row.value):
+            if column.scalar_value.null:
+                tmp_row.append(None)
+            elif column.has_array_value:
+                field_name, rep, mutate_to, cast_from = self._column_data_types[i]
+
+                list_value = []
+                for j, typed_value in enumerate(column.array_value):
+                    value = getattr(typed_value, field_name)
+                    if cast_from is not None:
+                        value = cast_from(value)
+                    list_value.append(value)
+
+                tmp_row.append(list_value)
+            else:
+                field_name, rep, mutate_to, cast_from = self._column_data_types[i]
+
+                # get the value from the field_name
+                value = getattr(column.scalar_value, field_name)
+
+                # cast the value
+                if cast_from is not None:
+                    value = cast_from(value)
+
+                tmp_row.append(value)
+        return tmp_row
+
+    def fetchone(self):
+        if self._frame is None:
+            raise ProgrammingError('No select statement was executed.')
+        if self._pos is None:
+            return None
+        rows = self._frame.rows
+        row = self._transform_row(rows[self._pos])
+        self._pos += 1
+        if self._pos >= len(rows):
+            self._pos = None
+            if not self._frame.done:
+                self._fetch_next_frame()
+        return row
+
+    def fetchmany(self, size=None):
+        if size is None:
+            size = self.arraysize
+        rows = []
+        while size > 0:
+            row = self.fetchone()
+            if row is None:
+                break
+            rows.append(row)
+            size -= 1
+        return rows
+
+    def fetchall(self):
+        rows = []
+        while True:
+            row = self.fetchone()
+            if row is None:
+                break
+            rows.append(row)
+        return rows
+
+    def setinputsizes(self, sizes):
+        pass
+
+    def setoutputsize(self, size, column=None):
+        pass
+
+    @property
+    def connection(self):
+        """Read-only attribute providing access to the :class:`Connection <phoenixdb.connection.Connection>`
+        object this cursor was created from."""
+        return self._connection
+
+    @property
+    def rowcount(self):
+        """Read-only attribute specifying the number of rows affected by
+        the last executed DML statement or -1 if the number cannot be
+        determined. Note that this will always be set to -1 for select
+        queries."""
+        # TODO instead of -1, this ends up being set to Integer.MAX_VALUE
+        if self._updatecount == MAX_INT:
+            return -1
+        return self._updatecount
+
+    @property
+    def rownumber(self):
+        """Read-only attribute providing the current 0-based index of the
+        cursor in the result set or ``None`` if the index cannot be
+        determined.
+
+        The index can be seen as index of the cursor in a sequence
+        (the result set). The next fetch operation will fetch the
+        row indexed by :attr:`rownumber` in that sequence.
+        """
+        if self._frame is not None and self._pos is not None:
+            return self._frame.offset + self._pos
+        return self._pos
+
+
+class DictCursor(Cursor):
+    """A cursor which returns results as a dictionary"""
+
+    def _transform_row(self, row):
+        row = super(DictCursor, self)._transform_row(row)
+        d = {}
+        for ind, val in enumerate(row):
+            d[self._signature.columns[ind].column_name] = val
+        return d

+ 93 - 0
desktop/core/ext-py3/phoenixdb-1.1.0/phoenixdb/errors.py

@@ -0,0 +1,93 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__all__ = [
+    'Warning', 'Error', 'InterfaceError', 'DatabaseError', 'DataError',
+    'OperationalError', 'IntegrityError', 'InternalError',
+    'ProgrammingError', 'NotSupportedError',
+]
+
+try:
+    _StandardError = StandardError
+except NameError:
+    _StandardError = Exception
+
+
+class Warning(_StandardError):
+    """Not used by this package, only defined for compatibility
+    with DB API 2.0."""
+
+
+class Error(_StandardError):
+    """Exception that is the base class of all other error exceptions.
+    You can use this to catch all errors with one single except statement."""
+
+    def __init__(self, message, code=None, sqlstate=None, cause=None):
+        super(_StandardError, self).__init__(message, code, sqlstate, cause)
+
+    @property
+    def message(self):
+        return self.args[0]
+
+    @property
+    def code(self):
+        return self.args[1]
+
+    @property
+    def sqlstate(self):
+        return self.args[2]
+
+    @property
+    def cause(self):
+        return self.args[3]
+
+
+class InterfaceError(Error):
+    """Exception raised for errors that are related to the database
+    interface rather than the database itself."""
+
+
+class DatabaseError(Error):
+    """Exception raised for errors that are related to the database."""
+
+
+class DataError(DatabaseError):
+    """Exception raised for errors that are due to problems with the
+    processed data like division by zero, numeric value out of range,
+    etc."""
+
+
+class OperationalError(DatabaseError):
+    """Raised for errors that are related to the database's operation and not
+    necessarily under the control of the programmer, e.g. an unexpected
+    disconnect occurs, the data source name is not found, a transaction could
+    not be processed, a memory allocation error occurred during
+    processing, etc."""
+
+
+class IntegrityError(DatabaseError):
+    """Raised when the relational integrity of the database is affected, e.g. a foreign key check fails."""
+
+
+class InternalError(DatabaseError):
+    """Raised when the database encounters an internal problem."""
+
+
+class ProgrammingError(DatabaseError):
+    """Raises for programming errors, e.g. table not found, syntax error, etc."""
+
+
+class NotSupportedError(DatabaseError):
+    """Raised when using an API that is not supported by the database."""

+ 210 - 0
desktop/core/ext-py3/phoenixdb-1.1.0/phoenixdb/meta.py

@@ -0,0 +1,210 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+import logging
+
+from phoenixdb.avatica.proto import common_pb2
+from phoenixdb.errors import ProgrammingError
+from phoenixdb.cursor import DictCursor
+
+
+__all__ = ['Meta']
+
+logger = logging.getLogger(__name__)
+
+
+class Meta(object):
+    """Database meta for querying MetaData
+    """
+
+    def __init__(self, connection):
+        self._connection = connection
+
+    def get_catalogs(self):
+        if self._connection._closed:
+            raise ProgrammingError('The connection is already closed.')
+        result = self._connection._client.get_catalogs(self._connection._id)
+        with DictCursor(self._connection) as cursor:
+            cursor._process_result(result)
+            return cursor.fetchall()
+
+    def get_schemas(self, catalog=None, schemaPattern=None):
+        if self._connection._closed:
+            raise ProgrammingError('The connection is already closed.')
+        result = self._connection._client.get_schemas(self._connection._id, catalog, schemaPattern)
+        with DictCursor(self._connection) as cursor:
+            cursor._process_result(result)
+            return self._fix_default(cursor.fetchall(), schemaPattern=schemaPattern)
+
+    def get_tables(self, catalog=None, schemaPattern=None, tableNamePattern=None, typeList=None):
+        if self._connection._closed:
+            raise ProgrammingError('The connection is already closed.')
+        result = self._connection._client.get_tables(
+            self._connection._id, catalog, schemaPattern, tableNamePattern, typeList=typeList)
+        with DictCursor(self._connection) as cursor:
+            cursor._process_result(result)
+            return self._fix_default(cursor.fetchall(), catalog, schemaPattern)
+
+    def get_columns(self, catalog=None, schemaPattern=None, tableNamePattern=None,
+                    columnNamePattern=None):
+        if self._connection._closed:
+            raise ProgrammingError('The connection is already closed.')
+        result = self._connection._client.get_columns(
+            self._connection._id, catalog, schemaPattern, tableNamePattern, columnNamePattern)
+        with DictCursor(self._connection) as cursor:
+            cursor._process_result(result)
+            return self._fix_default(cursor.fetchall(), catalog, schemaPattern)
+
+    def get_table_types(self):
+        if self._connection._closed:
+            raise ProgrammingError('The connection is already closed.')
+        result = self._connection._client.get_table_types(self._connection._id)
+        with DictCursor(self._connection) as cursor:
+            cursor._process_result(result)
+            return cursor.fetchall()
+
+    def get_type_info(self):
+        if self._connection._closed:
+            raise ProgrammingError('The connection is already closed.')
+        result = self._connection._client.get_type_info(self._connection._id)
+        with DictCursor(self._connection) as cursor:
+            cursor._process_result(result)
+            return cursor.fetchall()
+
+    def get_primary_keys(self, catalog=None, schema=None, table=None):
+        if self._connection._closed:
+            raise ProgrammingError('The cursor is already closed.')
+
+        state = common_pb2.QueryState()
+        state.type = common_pb2.StateType.METADATA
+        state.op = common_pb2.MetaDataOperation.GET_PRIMARY_KEYS
+        state.has_args = True
+        state.has_op = True
+
+        catalog_arg = self._moa_string_arg_factory(catalog)
+        schema_arg = self._moa_string_arg_factory(schema)
+        table_arg = self._moa_string_arg_factory(table)
+        state.args.extend([catalog_arg, schema_arg, table_arg])
+
+        with DictCursor(self._connection) as cursor:
+            syncResultResponse = cursor.get_sync_results(state)
+            if not syncResultResponse.more_results:
+                return []
+
+            signature = common_pb2.Signature()
+            signature.columns.append(self._column_meta_data_factory(1, 'TABLE_CAT', 12))
+            signature.columns.append(self._column_meta_data_factory(2, 'TABLE_SCHEM', 12))
+            signature.columns.append(self._column_meta_data_factory(3, 'TABLE_NAME', 12))
+            signature.columns.append(self._column_meta_data_factory(4, 'COLUMN_NAME', 12))
+            signature.columns.append(self._column_meta_data_factory(5, 'KEY_SEQ', 5))
+            signature.columns.append(self._column_meta_data_factory(6, 'PK_NAME', 12))
+            # The following are non-standard Phoenix extensions
+            # This returns '\x00\x00\x00A' or '\x00\x00\x00D' , but that's consistent with Java
+            signature.columns.append(self._column_meta_data_factory(7, 'ASC_OR_DESC', 12))
+            signature.columns.append(self._column_meta_data_factory(8, 'DATA_TYPE', 5))
+            signature.columns.append(self._column_meta_data_factory(9, 'TYPE_NAME', 12))
+            signature.columns.append(self._column_meta_data_factory(10, 'COLUMN_SIZE', 5))
+            signature.columns.append(self._column_meta_data_factory(11, 'TYPE_ID', 5))
+            signature.columns.append(self._column_meta_data_factory(12, 'VIEW_CONSTANT', 12))
+
+            cursor.fetch(signature)
+            return cursor.fetchall()
+
+    def get_index_info(self, catalog=None, schema=None, table=None, unique=False, approximate=False):
+        if self._connection._closed:
+            raise ProgrammingError('The cursor is already closed.')
+
+        state = common_pb2.QueryState()
+        state.type = common_pb2.StateType.METADATA
+        state.op = common_pb2.MetaDataOperation.GET_INDEX_INFO
+        state.has_args = True
+        state.has_op = True
+
+        catalog_arg = self._moa_string_arg_factory(catalog)
+        schema_arg = self._moa_string_arg_factory(schema)
+        table_arg = self._moa_string_arg_factory(table)
+        unique_arg = self._moa_bool_arg_factory(unique)
+        approximate_arg = self._moa_bool_arg_factory(approximate)
+
+        state.args.extend([catalog_arg, schema_arg, table_arg, unique_arg, approximate_arg])
+
+        with DictCursor(self._connection) as cursor:
+            syncResultResponse = cursor.get_sync_results(state)
+            if not syncResultResponse.more_results:
+                return []
+
+            signature = common_pb2.Signature()
+            signature.columns.append(self._column_meta_data_factory(1, 'TABLE_CAT', 12))
+            signature.columns.append(self._column_meta_data_factory(2, 'TABLE_SCHEM', 12))
+            signature.columns.append(self._column_meta_data_factory(3, 'TABLE_NAME', 12))
+            signature.columns.append(self._column_meta_data_factory(4, 'NON_UNIQUE', 16))
+            signature.columns.append(self._column_meta_data_factory(5, 'INDEX_QUALIFIER', 12))
+            signature.columns.append(self._column_meta_data_factory(6, 'INDEX_NAME', 12))
+            signature.columns.append(self._column_meta_data_factory(7, 'TYPE', 5))
+            signature.columns.append(self._column_meta_data_factory(8, 'ORDINAL_POSITION', 5))
+            signature.columns.append(self._column_meta_data_factory(9, 'COLUMN_NAME', 12))
+            signature.columns.append(self._column_meta_data_factory(10, 'ASC_OR_DESC', 12))
+            signature.columns.append(self._column_meta_data_factory(11, 'CARDINALITY', 5))
+            signature.columns.append(self._column_meta_data_factory(12, 'PAGES', 5))
+            signature.columns.append(self._column_meta_data_factory(13, 'FILTER_CONDITION', 12))
+            # The following are non-standard Phoenix extensions
+            signature.columns.append(self._column_meta_data_factory(14, 'DATA_TYPE', 5))
+            signature.columns.append(self._column_meta_data_factory(15, 'TYPE_NAME', 12))
+            signature.columns.append(self._column_meta_data_factory(16, 'TYPE_ID', 5))
+            signature.columns.append(self._column_meta_data_factory(17, 'COLUMN_FAMILY', 12))
+            signature.columns.append(self._column_meta_data_factory(18, 'COLUMN_SIZE', 5))
+            signature.columns.append(self._column_meta_data_factory(19, 'ARRAY_SIZE', 5))
+
+            cursor.fetch(signature)
+            return cursor.fetchall()
+
+    def _column_meta_data_factory(self, ordinal, column_name, jdbc_code):
+        cmd = common_pb2.ColumnMetaData()
+        cmd.ordinal = ordinal
+        cmd.column_name = column_name
+        cmd.type.id = jdbc_code
+        cmd.nullable = 2
+        return cmd
+
+    def _moa_string_arg_factory(self, arg):
+        moa = common_pb2.MetaDataOperationArgument()
+        if arg is None:
+            moa.type = common_pb2.MetaDataOperationArgument.ArgumentType.NULL
+        else:
+            moa.type = common_pb2.MetaDataOperationArgument.ArgumentType.STRING
+            moa.string_value = arg
+        return moa
+
+    def _moa_bool_arg_factory(self, arg):
+        moa = common_pb2.MetaDataOperationArgument()
+        if arg is None:
+            moa.type = common_pb2.MetaDataOperationArgument.ArgumentType.NULL
+        else:
+            moa.type = common_pb2.MetaDataOperationArgument.ArgumentType.BOOL
+            moa.bool_value = arg
+        return moa
+
+    def _fix_default(self, rows, catalog=None, schemaPattern=None):
+        '''Workaround for PHOENIX-6003'''
+        if schemaPattern == '':
+            rows = [row for row in rows if row['TABLE_SCHEM'] is None]
+        if catalog == '':
+            rows = [row for row in rows if row['TABLE_CATALOG'] is None]
+        # Couldn't find a sane way to do it that works on 2 and 3
+        if sys.version_info.major == 3:
+            return [{k: v or '' for k, v in row.items()} for row in rows]
+        else:
+            return [{k: v or '' for k, v in row.iteritems()} for row in rows]

+ 290 - 0
desktop/core/ext-py3/phoenixdb-1.1.0/phoenixdb/sqlalchemy_phoenix.py

@@ -0,0 +1,290 @@
+# Copyright 2017 Dimitri Capitaine
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import re
+import sys
+
+import phoenixdb
+
+from sqlalchemy import types
+from sqlalchemy.engine.default import DefaultDialect, DefaultExecutionContext
+from sqlalchemy.exc import CompileError
+from sqlalchemy.sql.compiler import DDLCompiler
+from sqlalchemy.types import BIGINT, BOOLEAN, CHAR, DATE, DECIMAL, FLOAT, INTEGER, NUMERIC,\
+    SMALLINT, TIME, TIMESTAMP, VARBINARY, VARCHAR
+
+if sys.version_info.major == 3:
+    from urllib.parse import urlunsplit, SplitResult, urlencode
+else:
+    from urllib import urlencode
+    from urlparse import urlunsplit, SplitResult
+
+
+class PhoenixDDLCompiler(DDLCompiler):
+
+    def visit_primary_key_constraint(self, constraint):
+        if constraint.name is None:
+            raise CompileError("Can't create primary key without a name.")
+        return DDLCompiler.visit_primary_key_constraint(self, constraint)
+
+
+AUTOCOMMIT_REGEXP = re.compile(
+    r"\s*(?:UPDATE|UPSERT|CREATE|DELETE|DROP|ALTER)", re.I | re.UNICODE
+)
+
+
+class PhoenixExecutionContext(DefaultExecutionContext):
+
+    def should_autocommit_text(self, statement):
+        return AUTOCOMMIT_REGEXP.match(statement)
+
+
+class PhoenixDialect(DefaultDialect):
+    '''Phoenix dialect
+
+    dialect:: phoenix
+    :name: Phoenix
+
+    note::
+
+    The Phoenix dialect for SQLAlchemy is incomplete. It implements the functions required by Hue
+    for basic operation, but little else.
+
+    Connecting
+    ----------
+
+    The connection URL has the format of phoenix://host:port
+
+    This format does not allow for specifying the http scheme, or the URL path the the server uses.
+    Setting tls=True sets the server URL scheme to https.
+    If the path arg is set , it used as the path of the server URL.
+
+    The phoenix-specific authentication options can be set via the standard connect_args argument.
+
+    Connecting to an unsecure server::
+
+        create_engine('phoenix://localhost:8765')
+
+    Connecting to a secure server via SPNEGO (after kinit)::
+
+        create_engine('phoenix://localhost:8765', tls=True, connect_args={'authentication': 'SPNEGO'})
+
+    Connecting to a secure server via Knox::
+
+        create_engine('phoenix://localhost:8765', tls=True, path='/gateway/avatica/'\
+        connect_args={'authentication':'BASIC', 'avatica_user':'user', 'avatica_password':'password'})
+    '''
+
+    name = "phoenix"
+
+    driver = "phoenixdb"
+
+    ddl_compiler = PhoenixDDLCompiler
+
+    execution_ctx_cls = PhoenixExecutionContext
+
+    def __init__(self, tls=False, path='/', **opts):
+        '''
+        :param tls:
+            If True, then use https for connecting, otherwise use http
+
+        :param path:
+            The path component of the connection URL
+        '''
+        # There is no way to pass these via the SqlAlchemy url object
+        self.tls = tls
+        self.path = path
+        super(PhoenixDialect, self).__init__(self, **opts)
+
+    @classmethod
+    def dbapi(cls):
+        return phoenixdb
+
+    def create_connect_args(self, url):
+        connect_args = dict()
+        if url.username is not None:
+            connect_args['user'] = url.username
+            if url.password is not None:
+                connect_args['password'] = url.username
+        phoenix_url = urlunsplit(SplitResult(
+            scheme='https' if self.tls else 'http',
+            netloc='{}:{}'.format(url.host, 8765 if url.port is None else url.port),
+            path=self.path,
+            query=urlencode(url.query),
+            fragment='',
+        ))
+        return [phoenix_url], connect_args
+
+    def has_table(self, connection, table_name, schema=None, **kw):
+        if schema is None:
+            schema = ''
+        return bool(connection.connect().connection.meta().get_tables(
+            tableNamePattern=table_name,
+            schemaPattern=schema,
+            typeList=('TABLE', 'SYSTEM_TABLE')))
+
+    def get_schema_names(self, connection, **kw):
+        schemas = connection.connect().connection.meta().get_schemas()
+        schema_names = [schema['TABLE_SCHEM'] for schema in schemas]
+        # Phoenix won't return the default schema if there aren't any tables in it
+        if '' not in schema_names:
+            schema_names.insert(0, '')
+        return schema_names
+
+    def get_table_names(self, connection, schema=None, order_by=None, **kw):
+        '''order_by is ignored'''
+        if schema is None:
+            schema = ''
+        tables = connection.connect().connection.meta().get_tables(
+            schemaPattern=schema, typeList=('TABLE', 'SYSTEM TABLE'))
+        return [table['TABLE_NAME'] for table in tables]
+
+    def get_view_names(self, connection, schema=None, **kw):
+        if schema is None:
+            schema = ''
+        views = connection.connect().connection.meta().get_tables(schemaPattern=schema, typeList=('VIEW',))
+        return [view['TABLE_NAME'] for view in views]
+
+    def get_columns(self, connection, table_name, schema=None, **kw):
+        if schema is None:
+            schema = ''
+        raw = connection.connect().connection.meta().get_columns(
+            schemaPattern=schema, tableNamePattern=table_name)
+        return [self._map_column(row) for row in raw]
+
+    def get_pk_constraint(self, connection, table_name, schema=None, **kw):
+        if schema is None:
+            schema = ''
+        raw = connection.connect().connection.meta().get_primary_keys(
+            schema=schema, table=table_name)
+        cooked = {
+            'constrained_columns': []
+        }
+        if raw:
+            cooked['name'] = raw[0]['PK_NAME']
+            for row in raw:
+                cooked['constrained_columns'].insert(row['KEY_SEQ'] - 1, row['COLUMN_NAME'])
+        return cooked
+
+    def get_indexes(self, connection, table_name, schema=None, **kw):
+        if schema is None:
+            schema = ''
+        raw = connection.connect().connection.meta().get_index_info(schema=schema, table=table_name)
+        # We know that Phoenix returns the rows ordered by INDEX_NAME and ORDINAL_POSITION
+        cooked = []
+        current = None
+        for row in raw:
+            if current is None or row['INDEX_NAME'] != current['name']:
+                current = {
+                    'name': row['INDEX_NAME'],
+                    'unique': not row['NON_UNIQUE'] is False,
+                    'column_names': [],
+                }
+                cooked.append(current)
+            # Phoenix returns the column names in its internal representation here
+            # Remove the default CF prefix
+            canonical_name = row['INDEX_NAME']
+            if canonical_name.startswith('0:'):
+                canonical_name = canonical_name[len(':0')]
+            if canonical_name.startswith(':'):
+                canonical_name = canonical_name[len(':')]
+            current['column_names'].append(canonical_name)
+        return cooked
+
+    def get_foreign_keys(self, conn, table_name, schema=None, **kw):
+        '''Foreign keys are a foreign concept to Phoenix,
+        and SqlAlchemy cannot parse the DB schema if it's not implemented '''
+        return []
+
+    def _map_column(self, raw):
+        cooked = {}
+        cooked['name'] = raw['COLUMN_NAME']
+        cooked['type'] = COLUMN_DATA_TYPE[raw['TYPE_ID']]
+        cooked['nullable'] = bool(raw['IS_NULLABLE'])
+        cooked['autoincrement'] = bool(raw['IS_AUTOINCREMENT'])
+        cooked['comment'] = raw['REMARKS']
+        cooked['default'] = None  # Not apparent how to get this from the metatdata
+        return cooked
+
+
+class TINYINT(types.Integer):
+    __visit_name__ = "SMALLINT"
+
+
+class UNSIGNED_TINYINT(types.Integer):
+    __visit_name__ = "SMALLINT"
+
+
+class UNSIGNED_INTEGER(types.Integer):
+    __visit_name__ = "INTEGER"
+
+
+class DOUBLE(types.FLOAT):
+    __visit_name__ = "FLOAT"
+
+
+class UNSIGNED_DOUBLE(types.FLOAT):
+    __visit_name__ = "FLOAT"
+
+
+class UNSIGNED_FLOAT(types.FLOAT):
+    __visit_name__ = "FLOAT"
+
+
+class UNSIGNED_LONG(types.BIGINT):
+    __visit_name__ = "BIGINT"
+
+
+class UNSIGNED_TIME(types.TIME):
+    __visit_name__ = "TIME"
+
+
+class UNSIGNED_DATE(types.DATE):
+    __visit_name__ = "DATE"
+
+
+class UNSIGNED_TIMESTAMP(types.TIMESTAMP):
+    __visit_name__ = "TIMESTAMP"
+
+
+class ROWID (types.String):
+    __visit_name__ = "VARCHAR"
+
+
+COLUMN_DATA_TYPE = {
+    -6: TINYINT,
+    -5: BIGINT,
+    -3: VARBINARY,
+    1: CHAR,
+    2: NUMERIC,
+    3: DECIMAL,
+    4: INTEGER,
+    5: SMALLINT,
+    6: FLOAT,
+    8: DOUBLE,
+    9: UNSIGNED_INTEGER,
+    10: UNSIGNED_LONG,
+    11: UNSIGNED_TINYINT,
+    12: VARCHAR,
+    13: ROWID,
+    14: UNSIGNED_FLOAT,
+    15: UNSIGNED_DOUBLE,
+    16: BOOLEAN,
+    18: UNSIGNED_TIME,
+    19: UNSIGNED_DATE,
+    20: UNSIGNED_TIMESTAMP,
+    91: DATE,
+    92: TIME,
+    93: TIMESTAMP
+}

+ 64 - 0
desktop/core/ext-py3/phoenixdb-1.1.0/phoenixdb/tests/__init__.py

@@ -0,0 +1,64 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import unittest
+
+import phoenixdb
+
+TEST_DB_URL = os.environ.get('PHOENIXDB_TEST_DB_URL', "http://localhost:8765")
+TEST_DB_TRUSTSTORE = os.environ.get('PHOENIXDB_TEST_DB_TRUSTSTORE')
+TEST_DB_AUTHENTICATION = os.environ.get('PHOENIXDB_TEST_DB_AUTHENTICATION')
+TEST_DB_AVATICA_USER = os.environ.get('PHOENIXDB_TEST_DB_AVATICA_USER')
+TEST_DB_AVATICA_PASSWORD = os.environ.get('PHOENIXDB_TEST_DB_AVATICA_PASSWORD')
+
+httpArgs = {}
+if TEST_DB_TRUSTSTORE is not None:
+    httpArgs.update(verify=TEST_DB_TRUSTSTORE)
+if TEST_DB_AUTHENTICATION is not None:
+    httpArgs.update(authentication=TEST_DB_AUTHENTICATION)
+if TEST_DB_AVATICA_USER is not None:
+    httpArgs.update(avatica_user=TEST_DB_AVATICA_USER)
+if TEST_DB_AVATICA_PASSWORD is not None:
+    httpArgs.update(avatica_password=TEST_DB_AVATICA_PASSWORD)
+
+
+@unittest.skipIf(TEST_DB_URL is None, "these tests require the PHOENIXDB_TEST_DB_URL environment variable set to a clean database")
+class DatabaseTestCase(unittest.TestCase):
+
+    def setUp(self):
+        self.conn = phoenixdb.connect(TEST_DB_URL, autocommit=True, **httpArgs)
+
+        def closeDb():
+            self.conn.close()
+        self.addCleanup(closeDb)
+
+    def reopen(self, **avaticaArgs):
+        self.conn.close()
+        kwargs = avaticaArgs.copy()
+        kwargs.update(httpArgs)
+        self.conn = phoenixdb.connect(TEST_DB_URL, **kwargs)
+
+    def addTableCleanup(self, name):
+        def dropTable():
+            with self.conn.cursor() as cursor:
+                cursor.execute("DROP TABLE IF EXISTS {table}".format(table=name))
+        self.addCleanup(dropTable)
+
+    def createTable(self, name, statement):
+        with self.conn.cursor() as cursor:
+            cursor.execute("DROP TABLE IF EXISTS {table}".format(table=name))
+            cursor.execute(statement.format(table=name))
+            self.addTableCleanup(name)

+ 857 - 0
desktop/core/ext-py3/phoenixdb-1.1.0/phoenixdb/tests/dbapi20.py

@@ -0,0 +1,857 @@
+#!/usr/bin/env python
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+''' Python DB API 2.0 driver compliance unit test suite. 
+    
+    This software is Public Domain and may be used without restrictions.
+
+ "Now we have booze and barflies entering the discussion, plus rumours of
+  DBAs on drugs... and I won't tell you what flashes through my mind each
+  time I read the subject line with 'Anal Compliance' in it.  All around
+  this is turning out to be a thoroughly unwholesome unit test."
+
+    -- Ian Bicking
+'''
+
+__version__ = '1.14.3'
+
+import unittest
+import time
+import sys
+
+if sys.version[0] >= '3': #python 3.x
+    _BaseException = Exception
+    def _failUnless(self, expr, msg=None):
+        self.assertTrue(expr, msg)
+else:                   #python 2.x
+    from exceptions import StandardError as _BaseException
+    def _failUnless(self, expr, msg=None):
+        self.failUnless(expr, msg)  ## deprecated since Python 2.6
+
+def str2bytes(sval):
+    if sys.version_info < (3,0) and isinstance(sval, str):
+        sval = sval.decode("latin1")
+    return sval.encode("latin1") #python 3 make unicode into bytes
+
+class DatabaseAPI20Test(unittest.TestCase):
+    ''' Test a database self.driver for DB API 2.0 compatibility.
+        This implementation tests Gadfly, but the TestCase
+        is structured so that other self.drivers can subclass this 
+        test case to ensure compiliance with the DB-API. It is 
+        expected that this TestCase may be expanded in the future
+        if ambiguities or edge conditions are discovered.
+
+        The 'Optional Extensions' are not yet being tested.
+
+        self.drivers should subclass this test, overriding setUp, tearDown,
+        self.driver, connect_args and connect_kw_args. Class specification
+        should be as follows:
+
+        import dbapi20 
+        class mytest(dbapi20.DatabaseAPI20Test):
+           [...] 
+
+        Don't 'import DatabaseAPI20Test from dbapi20', or you will
+        confuse the unit tester - just 'import dbapi20'.
+    '''
+
+    # The self.driver module. This should be the module where the 'connect'
+    # method is to be found
+    driver = None
+    connect_args = () # List of arguments to pass to connect
+    connect_kw_args = {} # Keyword arguments for connect
+    table_prefix = 'dbapi20test_' # If you need to specify a prefix for tables
+
+    ddl1 = 'create table %sbooze (name varchar(20))' % table_prefix
+    ddl2 = 'create table %sbarflys (name varchar(20), drink varchar(30))' % table_prefix
+    xddl1 = 'drop table %sbooze' % table_prefix
+    xddl2 = 'drop table %sbarflys' % table_prefix
+    insert = 'insert'
+
+    lowerfunc = 'lower' # Name of stored procedure to convert string->lowercase
+        
+    # Some drivers may need to override these helpers, for example adding
+    # a 'commit' after the execute.
+    def executeDDL1(self,cursor):
+        cursor.execute(self.ddl1)
+
+    def executeDDL2(self,cursor):
+        cursor.execute(self.ddl2)
+
+    def setUp(self):
+        ''' self.drivers should override this method to perform required setup
+            if any is necessary, such as creating the database.
+        '''
+        pass
+
+    def tearDown(self):
+        ''' self.drivers should override this method to perform required cleanup
+            if any is necessary, such as deleting the test database.
+            The default drops the tables that may be created.
+        '''
+        try:
+            con = self._connect()
+            try:
+                cur = con.cursor()
+                for ddl in (self.xddl1,self.xddl2):
+                    try:
+                        cur.execute(ddl)
+                        con.commit()
+                    except self.driver.Error:
+                        # Assume table didn't exist. Other tests will check if
+                        # execute is busted.
+                        pass
+            finally:
+                con.close()
+        except _BaseException:
+            pass
+
+    def _connect(self):
+        try:
+             r = self.driver.connect(
+                *self.connect_args,**self.connect_kw_args
+                )
+        except AttributeError:
+            self.fail("No connect method found in self.driver module")
+        return r
+
+    def test_connect(self):
+        con = self._connect()
+        con.close()
+
+    def test_apilevel(self):
+        try:
+            # Must exist
+            apilevel = self.driver.apilevel
+            # Must equal 2.0
+            self.assertEqual(apilevel,'2.0')
+        except AttributeError:
+            self.fail("Driver doesn't define apilevel")
+
+    def test_threadsafety(self):
+        try:
+            # Must exist
+            threadsafety = self.driver.threadsafety
+            # Must be a valid value
+            _failUnless(self, threadsafety in (0,1,2,3))
+        except AttributeError:
+            self.fail("Driver doesn't define threadsafety")
+
+    def test_paramstyle(self):
+        try:
+            # Must exist
+            paramstyle = self.driver.paramstyle
+            # Must be a valid value
+            _failUnless(self, paramstyle in (
+                'qmark','numeric','named','format','pyformat'
+                ))
+        except AttributeError:
+            self.fail("Driver doesn't define paramstyle")
+
+    def test_Exceptions(self):
+        # Make sure required exceptions exist, and are in the
+        # defined heirarchy.
+        if sys.version[0] == '3': #under Python 3 StardardError no longer exists
+            self.assertTrue(issubclass(self.driver.Warning,Exception))
+            self.assertTrue(issubclass(self.driver.Error,Exception))
+        else:
+            self.failUnless(issubclass(self.driver.Warning,StandardError))
+            self.failUnless(issubclass(self.driver.Error,StandardError))
+
+        _failUnless(self,
+            issubclass(self.driver.InterfaceError,self.driver.Error)
+            )
+        _failUnless(self,
+            issubclass(self.driver.DatabaseError,self.driver.Error)
+            )
+        _failUnless(self,
+            issubclass(self.driver.OperationalError,self.driver.Error)
+            )
+        _failUnless(self,
+            issubclass(self.driver.IntegrityError,self.driver.Error)
+            )
+        _failUnless(self,
+            issubclass(self.driver.InternalError,self.driver.Error)
+            )
+        _failUnless(self,
+            issubclass(self.driver.ProgrammingError,self.driver.Error)
+            )
+        _failUnless(self,
+            issubclass(self.driver.NotSupportedError,self.driver.Error)
+            )
+
+    def test_ExceptionsAsConnectionAttributes(self):
+        # OPTIONAL EXTENSION
+        # Test for the optional DB API 2.0 extension, where the exceptions
+        # are exposed as attributes on the Connection object
+        # I figure this optional extension will be implemented by any
+        # driver author who is using this test suite, so it is enabled
+        # by default.
+        con = self._connect()
+        drv = self.driver
+        _failUnless(self,con.Warning is drv.Warning)
+        _failUnless(self,con.Error is drv.Error)
+        _failUnless(self,con.InterfaceError is drv.InterfaceError)
+        _failUnless(self,con.DatabaseError is drv.DatabaseError)
+        _failUnless(self,con.OperationalError is drv.OperationalError)
+        _failUnless(self,con.IntegrityError is drv.IntegrityError)
+        _failUnless(self,con.InternalError is drv.InternalError)
+        _failUnless(self,con.ProgrammingError is drv.ProgrammingError)
+        _failUnless(self,con.NotSupportedError is drv.NotSupportedError)
+
+
+    def test_commit(self):
+        con = self._connect()
+        try:
+            # Commit must work, even if it doesn't do anything
+            con.commit()
+        finally:
+            con.close()
+
+    def test_rollback(self):
+        con = self._connect()
+        # If rollback is defined, it should either work or throw
+        # the documented exception
+        if hasattr(con,'rollback'):
+            try:
+                con.rollback()
+            except self.driver.NotSupportedError:
+                pass
+    
+    def test_cursor(self):
+        con = self._connect()
+        try:
+            cur = con.cursor()
+        finally:
+            con.close()
+
+    def test_cursor_isolation(self):
+        con = self._connect()
+        try:
+            # Make sure cursors created from the same connection have
+            # the documented transaction isolation level
+            cur1 = con.cursor()
+            cur2 = con.cursor()
+            self.executeDDL1(cur1)
+            cur1.execute("%s into %sbooze values ('Victoria Bitter')" % (
+                self.insert, self.table_prefix
+                ))
+            cur2.execute("select name from %sbooze" % self.table_prefix)
+            booze = cur2.fetchall()
+            self.assertEqual(len(booze),1)
+            self.assertEqual(len(booze[0]),1)
+            self.assertEqual(booze[0][0],'Victoria Bitter')
+        finally:
+            con.close()
+
+    def test_description(self):
+        con = self._connect()
+        try:
+            cur = con.cursor()
+            self.executeDDL1(cur)
+            self.assertEqual(cur.description,None,
+                'cursor.description should be none after executing a '
+                'statement that can return no rows (such as DDL)'
+                )
+            cur.execute('select name from %sbooze' % self.table_prefix)
+            self.assertEqual(len(cur.description),1,
+                'cursor.description describes too many columns'
+                )
+            self.assertEqual(len(cur.description[0]),7,
+                'cursor.description[x] tuples must have 7 elements'
+                )
+            self.assertEqual(cur.description[0][0].lower(),'name',
+                'cursor.description[x][0] must return column name'
+                )
+            self.assertEqual(cur.description[0][1],self.driver.STRING,
+                'cursor.description[x][1] must return column type. Got %r'
+                    % cur.description[0][1]
+                )
+
+            # Make sure self.description gets reset
+            self.executeDDL2(cur)
+            self.assertEqual(cur.description,None,
+                'cursor.description not being set to None when executing '
+                'no-result statements (eg. DDL)'
+                )
+        finally:
+            con.close()
+
+    def test_rowcount(self):
+        con = self._connect()
+        try:
+            cur = con.cursor()
+            self.executeDDL1(cur)
+            _failUnless(self,cur.rowcount in (-1,0),   # Bug #543885
+                'cursor.rowcount should be -1 or 0 after executing no-result '
+                'statements'
+                )
+            cur.execute("%s into %sbooze values ('Victoria Bitter')" % (
+                self.insert, self.table_prefix
+                ))
+            _failUnless(self,cur.rowcount in (-1,1),
+                'cursor.rowcount should == number or rows inserted, or '
+                'set to -1 after executing an insert statement'
+                )
+            cur.execute("select name from %sbooze" % self.table_prefix)
+            _failUnless(self,cur.rowcount in (-1,1),
+                'cursor.rowcount should == number of rows returned, or '
+                'set to -1 after executing a select statement'
+                )
+            self.executeDDL2(cur)
+            _failUnless(self,cur.rowcount in (-1,0),   # Bug #543885
+                'cursor.rowcount should be -1 or 0 after executing no-result '
+                'statements'
+                )
+        finally:
+            con.close()
+
+    lower_func = 'lower'
+    def test_callproc(self):
+        con = self._connect()
+        try:
+            cur = con.cursor()
+            if self.lower_func and hasattr(cur,'callproc'):
+                r = cur.callproc(self.lower_func,('FOO',))
+                self.assertEqual(len(r),1)
+                self.assertEqual(r[0],'FOO')
+                r = cur.fetchall()
+                self.assertEqual(len(r),1,'callproc produced no result set')
+                self.assertEqual(len(r[0]),1,
+                    'callproc produced invalid result set'
+                    )
+                self.assertEqual(r[0][0],'foo',
+                    'callproc produced invalid results'
+                    )
+        finally:
+            con.close()
+
+    def test_close(self):
+        con = self._connect()
+        try:
+            cur = con.cursor()
+        finally:
+            con.close()
+
+        # cursor.execute should raise an Error if called after connection
+        # closed
+        self.assertRaises(self.driver.Error,self.executeDDL1,cur)
+
+        # connection.commit should raise an Error if called after connection'
+        # closed.'
+        self.assertRaises(self.driver.Error,con.commit)
+
+    def test_non_idempotent_close(self):
+        con = self._connect()
+        con.close()
+        # connection.close should raise an Error if called more than once
+        #!!! reasonable persons differ about the usefulness of this test and this feature !!!
+        self.assertRaises(self.driver.Error,con.close)
+
+    def test_execute(self):
+        con = self._connect()
+        try:
+            cur = con.cursor()
+            self._paraminsert(cur)
+        finally:
+            con.close()
+
+    def _paraminsert(self,cur):
+        self.executeDDL2(cur)
+        cur.execute("%s into %sbarflys values ('Victoria Bitter', 'thi%%s :may ca%%(u)se? troub:1e')" % (
+            self.insert, self.table_prefix
+            ))
+        _failUnless(self,cur.rowcount in (-1,1))
+
+        if self.driver.paramstyle == 'qmark':
+            cur.execute(
+                "%s into %sbarflys values (?, 'thi%%s :may ca%%(u)se? troub:1e')" % (self.insert, self.table_prefix),
+                ("Cooper's",)
+                )
+        elif self.driver.paramstyle == 'numeric':
+            cur.execute(
+                "%s into %sbarflys values (:1, 'thi%%s :may ca%%(u)se? troub:1e')" % (self.insert, self.table_prefix),
+                ("Cooper's",)
+                )
+        elif self.driver.paramstyle == 'named':
+            cur.execute(
+                "%s into %sbarflys values (:beer, 'thi%%s :may ca%%(u)se? troub:1e')" % (self.insert, self.table_prefix),
+                {'beer':"Cooper's"}
+                )
+        elif self.driver.paramstyle == 'format':
+            cur.execute(
+                "%s into %sbarflys values (%%s, 'thi%%%%s :may ca%%%%(u)se? troub:1e')" % (self.insert, self.table_prefix),
+                ("Cooper's",)
+                )
+        elif self.driver.paramstyle == 'pyformat':
+            cur.execute(
+                "%s into %sbarflys values (%%(beer)s, 'thi%%%%s :may ca%%%%(u)se? troub:1e')" % (self.insert, self.table_prefix),
+                {'beer':"Cooper's"}
+                )
+        else:
+            self.fail('Invalid paramstyle')
+        _failUnless(self,cur.rowcount in (-1,1))
+
+        cur.execute('select name, drink from %sbarflys' % self.table_prefix)
+        res = cur.fetchall()
+        self.assertEqual(len(res),2,'cursor.fetchall returned too few rows')
+        beers = [res[0][0],res[1][0]]
+        beers.sort()
+        self.assertEqual(beers[0],"Cooper's",
+            'cursor.fetchall retrieved incorrect data, or data inserted '
+            'incorrectly'
+            )
+        self.assertEqual(beers[1],"Victoria Bitter",
+            'cursor.fetchall retrieved incorrect data, or data inserted '
+            'incorrectly'
+            )
+        trouble = "thi%s :may ca%(u)se? troub:1e"
+        self.assertEqual(res[0][1], trouble,
+            'cursor.fetchall retrieved incorrect data, or data inserted '
+            'incorrectly. Got=%s, Expected=%s' % (repr(res[0][1]), repr(trouble)))      
+        self.assertEqual(res[1][1], trouble,
+            'cursor.fetchall retrieved incorrect data, or data inserted '
+            'incorrectly. Got=%s, Expected=%s' % (repr(res[1][1]), repr(trouble)
+            ))
+        
+    def test_executemany(self):
+        con = self._connect()
+        try:
+            cur = con.cursor()
+            self.executeDDL1(cur)
+            largs = [ ("Cooper's",) , ("Boag's",) ]
+            margs = [ {'beer': "Cooper's"}, {'beer': "Boag's"} ]
+            if self.driver.paramstyle == 'qmark':
+                cur.executemany(
+                    '%s into %sbooze values (?)' % (self.insert, self.table_prefix),
+                    largs
+                    )
+            elif self.driver.paramstyle == 'numeric':
+                cur.executemany(
+                    '%s into %sbooze values (:1)' % (self.insert, self.table_prefix),
+                    largs
+                    )
+            elif self.driver.paramstyle == 'named':
+                cur.executemany(
+                    '%s into %sbooze values (:beer)' % (self.insert, self.table_prefix),
+                    margs
+                    )
+            elif self.driver.paramstyle == 'format':
+                cur.executemany(
+                    '%s into %sbooze values (%%s)' % (self.insert, self.table_prefix),
+                    largs
+                    )
+            elif self.driver.paramstyle == 'pyformat':
+                cur.executemany(
+                    '%s into %sbooze values (%%(beer)s)' % (
+                        self.insert, self.table_prefix
+                        ),
+                    margs
+                    )
+            else:
+                self.fail('Unknown paramstyle')
+            _failUnless(self,cur.rowcount in (-1,2),
+                'insert using cursor.executemany set cursor.rowcount to '
+                'incorrect value %r' % cur.rowcount
+                )
+            cur.execute('select name from %sbooze' % self.table_prefix)
+            res = cur.fetchall()
+            self.assertEqual(len(res),2,
+                'cursor.fetchall retrieved incorrect number of rows'
+                )
+            beers = [res[0][0],res[1][0]]
+            beers.sort()
+            self.assertEqual(beers[0],"Boag's",'incorrect data retrieved')
+            self.assertEqual(beers[1],"Cooper's",'incorrect data retrieved')
+        finally:
+            con.close()
+
+    def test_fetchone(self):
+        con = self._connect()
+        try:
+            cur = con.cursor()
+
+            # cursor.fetchone should raise an Error if called before
+            # executing a select-type query
+            self.assertRaises(self.driver.Error,cur.fetchone)
+
+            # cursor.fetchone should raise an Error if called after
+            # executing a query that cannnot return rows
+            self.executeDDL1(cur)
+            self.assertRaises(self.driver.Error,cur.fetchone)
+
+            cur.execute('select name from %sbooze' % self.table_prefix)
+            self.assertEqual(cur.fetchone(),None,
+                'cursor.fetchone should return None if a query retrieves '
+                'no rows'
+                )
+            _failUnless(self,cur.rowcount in (-1,0))
+
+            # cursor.fetchone should raise an Error if called after
+            # executing a query that cannnot return rows
+            cur.execute("%s into %sbooze values ('Victoria Bitter')" % (
+                self.insert, self.table_prefix
+                ))
+            self.assertRaises(self.driver.Error,cur.fetchone)
+
+            cur.execute('select name from %sbooze' % self.table_prefix)
+            r = cur.fetchone()
+            self.assertEqual(len(r),1,
+                'cursor.fetchone should have retrieved a single row'
+                )
+            self.assertEqual(r[0],'Victoria Bitter',
+                'cursor.fetchone retrieved incorrect data'
+                )
+            self.assertEqual(cur.fetchone(),None,
+                'cursor.fetchone should return None if no more rows available'
+                )
+            _failUnless(self,cur.rowcount in (-1,1))
+        finally:
+            con.close()
+
+    samples = [
+        'Carlton Cold',
+        'Carlton Draft',
+        'Mountain Goat',
+        'Redback',
+        'Victoria Bitter',
+        'XXXX'
+        ]
+
+    def _populate(self):
+        ''' Return a list of sql commands to setup the DB for the fetch
+            tests.
+        '''
+        populate = [
+            "%s into %sbooze values ('%s')" % (self.insert, self.table_prefix, s)
+                for s in self.samples
+            ]
+        return populate
+
+    def test_fetchmany(self):
+        con = self._connect()
+        try:
+            cur = con.cursor()
+
+            # cursor.fetchmany should raise an Error if called without
+            #issuing a query
+            self.assertRaises(self.driver.Error,cur.fetchmany,4)
+
+            self.executeDDL1(cur)
+            for sql in self._populate():
+                cur.execute(sql)
+
+            cur.execute('select name from %sbooze' % self.table_prefix)
+            r = cur.fetchmany()
+            self.assertEqual(len(r),1,
+                'cursor.fetchmany retrieved incorrect number of rows, '
+                'default of arraysize is one.'
+                )
+            cur.arraysize=10
+            r = cur.fetchmany(3) # Should get 3 rows
+            self.assertEqual(len(r),3,
+                'cursor.fetchmany retrieved incorrect number of rows'
+                )
+            r = cur.fetchmany(4) # Should get 2 more
+            self.assertEqual(len(r),2,
+                'cursor.fetchmany retrieved incorrect number of rows'
+                )
+            r = cur.fetchmany(4) # Should be an empty sequence
+            self.assertEqual(len(r),0,
+                'cursor.fetchmany should return an empty sequence after '
+                'results are exhausted'
+            )
+            _failUnless(self,cur.rowcount in (-1,6))
+
+            # Same as above, using cursor.arraysize
+            cur.arraysize=4
+            cur.execute('select name from %sbooze' % self.table_prefix)
+            r = cur.fetchmany() # Should get 4 rows
+            self.assertEqual(len(r),4,
+                'cursor.arraysize not being honoured by fetchmany'
+                )
+            r = cur.fetchmany() # Should get 2 more
+            self.assertEqual(len(r),2)
+            r = cur.fetchmany() # Should be an empty sequence
+            self.assertEqual(len(r),0)
+            _failUnless(self,cur.rowcount in (-1,6))
+
+            cur.arraysize=6
+            cur.execute('select name from %sbooze' % self.table_prefix)
+            rows = cur.fetchmany() # Should get all rows
+            _failUnless(self,cur.rowcount in (-1,6))
+            self.assertEqual(len(rows),6)
+            self.assertEqual(len(rows),6)
+            rows = [r[0] for r in rows]
+            rows.sort()
+          
+            # Make sure we get the right data back out
+            for i in range(0,6):
+                self.assertEqual(rows[i],self.samples[i],
+                    'incorrect data retrieved by cursor.fetchmany'
+                    )
+
+            rows = cur.fetchmany() # Should return an empty list
+            self.assertEqual(len(rows),0,
+                'cursor.fetchmany should return an empty sequence if '
+                'called after the whole result set has been fetched'
+                )
+            _failUnless(self,cur.rowcount in (-1,6))
+
+            self.executeDDL2(cur)
+            cur.execute('select name from %sbarflys' % self.table_prefix)
+            r = cur.fetchmany() # Should get empty sequence
+            self.assertEqual(len(r),0,
+                'cursor.fetchmany should return an empty sequence if '
+                'query retrieved no rows'
+                )
+            _failUnless(self,cur.rowcount in (-1,0))
+
+        finally:
+            con.close()
+
+    def test_fetchall(self):
+        con = self._connect()
+        try:
+            cur = con.cursor()
+            # cursor.fetchall should raise an Error if called
+            # without executing a query that may return rows (such
+            # as a select)
+            self.assertRaises(self.driver.Error, cur.fetchall)
+
+            self.executeDDL1(cur)
+            for sql in self._populate():
+                cur.execute(sql)
+
+            # cursor.fetchall should raise an Error if called
+            # after executing a a statement that cannot return rows
+            self.assertRaises(self.driver.Error,cur.fetchall)
+
+            cur.execute('select name from %sbooze' % self.table_prefix)
+            rows = cur.fetchall()
+            _failUnless(self,cur.rowcount in (-1,len(self.samples)))
+            self.assertEqual(len(rows),len(self.samples),
+                'cursor.fetchall did not retrieve all rows'
+                )
+            rows = [r[0] for r in rows]
+            rows.sort()
+            for i in range(0,len(self.samples)):
+                self.assertEqual(rows[i],self.samples[i],
+                'cursor.fetchall retrieved incorrect rows'
+                )
+            rows = cur.fetchall()
+            self.assertEqual(
+                len(rows),0,
+                'cursor.fetchall should return an empty list if called '
+                'after the whole result set has been fetched'
+                )
+            _failUnless(self,cur.rowcount in (-1,len(self.samples)))
+
+            self.executeDDL2(cur)
+            cur.execute('select name from %sbarflys' % self.table_prefix)
+            rows = cur.fetchall()
+            _failUnless(self,cur.rowcount in (-1,0))
+            self.assertEqual(len(rows),0,
+                'cursor.fetchall should return an empty list if '
+                'a select query returns no rows'
+                )
+            
+        finally:
+            con.close()
+    
+    def test_mixedfetch(self):
+        con = self._connect()
+        try:
+            cur = con.cursor()
+            self.executeDDL1(cur)
+            for sql in self._populate():
+                cur.execute(sql)
+
+            cur.execute('select name from %sbooze' % self.table_prefix)
+            rows1  = cur.fetchone()
+            rows23 = cur.fetchmany(2)
+            rows4  = cur.fetchone()
+            rows56 = cur.fetchall()
+            _failUnless(self,cur.rowcount in (-1,6))
+            self.assertEqual(len(rows23),2,
+                'fetchmany returned incorrect number of rows'
+                )
+            self.assertEqual(len(rows56),2,
+                'fetchall returned incorrect number of rows'
+                )
+
+            rows = [rows1[0]]
+            rows.extend([rows23[0][0],rows23[1][0]])
+            rows.append(rows4[0])
+            rows.extend([rows56[0][0],rows56[1][0]])
+            rows.sort()
+            for i in range(0,len(self.samples)):
+                self.assertEqual(rows[i],self.samples[i],
+                    'incorrect data retrieved or inserted'
+                    )
+        finally:
+            con.close()
+
+    def help_nextset_setUp(self,cur):
+        ''' Should create a procedure called deleteme
+            that returns two result sets, first the 
+	    number of rows in booze then "name from booze"
+        '''
+        raise NotImplementedError('Helper not implemented')
+        #sql="""
+        #    create procedure deleteme as
+        #    begin
+        #        select count(*) from booze
+        #        select name from booze
+        #    end
+        #"""
+        #cur.execute(sql)
+
+    def help_nextset_tearDown(self,cur):
+        'If cleaning up is needed after nextSetTest'
+        raise NotImplementedError('Helper not implemented')
+        #cur.execute("drop procedure deleteme")
+
+    def test_nextset(self):
+        con = self._connect()
+        try:
+            cur = con.cursor()
+            if not hasattr(cur,'nextset'):
+                return
+
+            try:
+                self.executeDDL1(cur)
+                sql=self._populate()
+                for sql in self._populate():
+                    cur.execute(sql)
+
+                self.help_nextset_setUp(cur)
+
+                cur.callproc('deleteme')
+                numberofrows=cur.fetchone()
+                assert numberofrows[0]== len(self.samples)
+                assert cur.nextset()
+                names=cur.fetchall()
+                assert len(names) == len(self.samples)
+                s=cur.nextset()
+                assert s == None,'No more return sets, should return None'
+            finally:
+                self.help_nextset_tearDown(cur)
+
+        finally:
+            con.close()
+
+    def test_nextset(self):
+        raise NotImplementedError('Drivers need to override this test')
+
+    def test_arraysize(self):
+        # Not much here - rest of the tests for this are in test_fetchmany
+        con = self._connect()
+        try:
+            cur = con.cursor()
+            _failUnless(self,hasattr(cur,'arraysize'),
+                'cursor.arraysize must be defined'
+                )
+        finally:
+            con.close()
+
+    def test_setinputsizes(self):
+        con = self._connect()
+        try:
+            cur = con.cursor()
+            cur.setinputsizes( (25,) )
+            self._paraminsert(cur) # Make sure cursor still works
+        finally:
+            con.close()
+
+    def test_setoutputsize_basic(self):
+        # Basic test is to make sure setoutputsize doesn't blow up
+        con = self._connect()
+        try:
+            cur = con.cursor()
+            cur.setoutputsize(1000)
+            cur.setoutputsize(2000,0)
+            self._paraminsert(cur) # Make sure the cursor still works
+        finally:
+            con.close()
+
+    def test_setoutputsize(self):
+        # Real test for setoutputsize is driver dependant
+        raise NotImplementedError('Driver needed to override this test')
+
+    def test_None(self):
+        con = self._connect()
+        try:
+            cur = con.cursor()
+            self.executeDDL1(cur)
+            cur.execute("%s into %sbarflys values ('a', NULL)" % (self.insert, self.table_prefix))
+            cur.execute('select drink from %sbarflys' % self.table_prefix)
+            r = cur.fetchall()
+            self.assertEqual(len(r),1)
+            self.assertEqual(len(r[0]),1)
+            self.assertEqual(r[0][0],None,'NULL value not returned as None')
+        finally:
+            con.close()
+
+    def test_Date(self):
+        d1 = self.driver.Date(2002,12,25)
+        d2 = self.driver.DateFromTicks(time.mktime((2002,12,25,0,0,0,0,0,0)))
+        # Can we assume this? API doesn't specify, but it seems implied
+        # self.assertEqual(str(d1),str(d2))
+
+    def test_Time(self):
+        t1 = self.driver.Time(13,45,30)
+        t2 = self.driver.TimeFromTicks(time.mktime((2001,1,1,13,45,30,0,0,0)))
+        # Can we assume this? API doesn't specify, but it seems implied
+        # self.assertEqual(str(t1),str(t2))
+
+    def test_Timestamp(self):
+        t1 = self.driver.Timestamp(2002,12,25,13,45,30)
+        t2 = self.driver.TimestampFromTicks(
+            time.mktime((2002,12,25,13,45,30,0,0,0))
+            )
+        # Can we assume this? API doesn't specify, but it seems implied
+        # self.assertEqual(str(t1),str(t2))
+
+    def test_Binary(self):
+        b = self.driver.Binary(str2bytes('Something'))
+        b = self.driver.Binary(str2bytes(''))
+
+    def test_STRING(self):
+        _failUnless(self, hasattr(self.driver,'STRING'),
+            'module.STRING must be defined'
+            )
+
+    def test_BINARY(self):
+        _failUnless(self, hasattr(self.driver,'BINARY'),
+            'module.BINARY must be defined.'
+            )
+
+    def test_NUMBER(self):
+        _failUnless(self, hasattr(self.driver,'NUMBER'),
+            'module.NUMBER must be defined.'
+            )
+
+    def test_DATETIME(self):
+        _failUnless(self, hasattr(self.driver,'DATETIME'),
+            'module.DATETIME must be defined.'
+            )
+
+    def test_ROWID(self):
+        _failUnless(self, hasattr(self.driver,'ROWID'),
+            'module.ROWID must be defined.'
+            )

+ 52 - 0
desktop/core/ext-py3/phoenixdb-1.1.0/phoenixdb/tests/test_avatica.py

@@ -0,0 +1,52 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+
+import phoenixdb
+from phoenixdb.avatica.client import parse_url, urlparse
+
+from requests.auth import HTTPBasicAuth
+
+
+class ParseUrlTest(unittest.TestCase):
+
+    def test_parse_url(self):
+        self.assertEqual(urlparse.urlparse('http://localhost:8765/'), parse_url('localhost'))
+        # Python 3.9 will interpret "localhost:" as a scheme. Argueably,it is right
+        # self.assertEqual(urlparse.urlparse('http://localhost:2222/'), parse_url('localhost:2222'))
+        self.assertEqual(urlparse.urlparse('http://localhost:2222/'), parse_url('http://localhost:2222/'))
+
+    def test_url_params(self):
+        (url, auth, verify) = phoenixdb._process_args((
+            "https://localhost:8765?authentication=BASIC&"
+            "avatica_user=user&avatica_password=password&truststore=truststore"))
+        self.assertEqual("https://localhost:8765", url)
+        self.assertEqual("truststore", verify)
+        self.assertEqual(auth, HTTPBasicAuth("user", "password"))
+
+        (url, auth, verify) = phoenixdb._process_args(
+            "http://localhost:8765", authentication='BASIC', user='user', password='password',
+            truststore='truststore')
+        self.assertEqual("http://localhost:8765", url)
+        self.assertEqual("truststore", verify)
+        self.assertEqual(auth, HTTPBasicAuth("user", "password"))
+
+        (url, auth, verify) = phoenixdb._process_args(
+            "https://localhost:8765", authentication='SPNEGO', user='user', truststore='truststore')
+        self.assertEqual("https://localhost:8765?doAs=user", url)
+        self.assertEqual("truststore", verify)
+        # SPNEGO auth objects seem to have no working __eq__
+        # self.assertEqual(auth, HTTPSPNEGOAuth(opportunistic_auth=True))

+ 308 - 0
desktop/core/ext-py3/phoenixdb-1.1.0/phoenixdb/tests/test_db.py

@@ -0,0 +1,308 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+
+import phoenixdb.cursor
+from phoenixdb.connection import Connection
+from phoenixdb.errors import InternalError, ProgrammingError
+from phoenixdb.tests import DatabaseTestCase, TEST_DB_URL
+
+
+@unittest.skipIf(TEST_DB_URL is None, "these tests require the PHOENIXDB_TEST_DB_URL environment variable set to a clean database")
+class PhoenixDatabaseTest(DatabaseTestCase):
+
+    def test_select_literal(self):
+        with self.conn.cursor() as cursor:
+            self.createTable("test", "CREATE TABLE {table} (id INTEGER PRIMARY KEY, text VARCHAR)")
+            cursor.executemany("UPSERT INTO test VALUES (?, ?)", [[i, 'text {}'.format(i)] for i in range(10)])
+
+        with self.conn.cursor() as cursor:
+            cursor.itersize = 4
+            cursor.execute("SELECT * FROM test WHERE id>1 ORDER BY id")
+            self.assertEqual(cursor.fetchall(), [[i, 'text {}'.format(i)] for i in range(2, 10)])
+
+    def test_select_parameter(self):
+        with self.conn.cursor() as cursor:
+            self.createTable("test", "CREATE TABLE {table} (id INTEGER PRIMARY KEY, text VARCHAR)")
+            cursor.executemany("UPSERT INTO test VALUES (?, ?)", [[i, 'text {}'.format(i)] for i in range(10)])
+
+        with self.conn.cursor() as cursor:
+            cursor.itersize = 4
+            cursor.execute("SELECT * FROM test WHERE id>? ORDER BY id", [1])
+            self.assertEqual(cursor.fetchall(), [[i, 'text {}'.format(i)] for i in range(2, 10)])
+
+    def _check_dict_cursor(self, cursor):
+        self.createTable("test", "CREATE TABLE {table} (id INTEGER PRIMARY KEY, text VARCHAR)")
+        cursor.execute("UPSERT INTO test VALUES (?, ?)", [1, 'text 1'])
+        cursor.execute("SELECT * FROM test ORDER BY id")
+        self.assertEqual(cursor.fetchall(), [{'ID': 1, 'TEXT': 'text 1'}])
+
+    def test_dict_cursor_default_parameter(self):
+        self.reopen(autocommit=True, cursor_factory=phoenixdb.cursor.DictCursor)
+
+        with self.conn.cursor() as cursor:
+            self._check_dict_cursor(cursor)
+
+    def test_dict_cursor_default_attribute(self):
+        self.conn.cursor_factory = phoenixdb.cursor.DictCursor
+
+        with self.conn.cursor() as cursor:
+            self._check_dict_cursor(cursor)
+
+    def test_dict_cursor(self):
+        self.reopen(autocommit=True, cursor_factory=phoenixdb.cursor.DictCursor)
+
+        with self.conn.cursor(cursor_factory=phoenixdb.cursor.DictCursor) as cursor:
+            self._check_dict_cursor(cursor)
+
+    def test_schema(self):
+
+        with self.conn.cursor() as cursor:
+            try:
+                cursor.execute("CREATE SCHEMA IF NOT EXISTS test_schema")
+            except InternalError as e:
+                if "phoenix.schema.isNamespaceMappingEnabled" in e.message:
+                    self.skipTest(e.message)
+                raise
+
+            self.createTable("test_schema.test", "CREATE TABLE {table} (id INTEGER PRIMARY KEY, text VARCHAR)")
+            cursor.execute("UPSERT INTO test_schema.test VALUES (?, ?)", [1, 'text 1'])
+            cursor.execute("SELECT * FROM test_schema.test ORDER BY id")
+            self.assertEqual(cursor.fetchall(), [[1, 'text 1']])
+
+    def test_transaction(self):
+        self.reopen(autocommit=False)
+        with self.conn.cursor() as cursor:
+            self.createTable("test", "CREATE TABLE {table} (id INTEGER PRIMARY KEY, text VARCHAR)")
+
+            cursor.execute("UPSERT INTO test VALUES (?, ?)", [1, 'one'])
+            cursor.execute("SELECT * FROM test ORDER BY id")
+            self.assertEqual(cursor.fetchall(), [])
+
+            self.conn.commit()
+            cursor.execute("SELECT * FROM test ORDER BY id")
+            self.assertEqual(cursor.fetchall(), [[1, 'one']])
+            self.assertEqual(self.conn.autocommit, False)
+
+            cursor.execute("UPSERT INTO test VALUES (?, ?)", [2, 'two'])
+            self.conn.rollback()
+            cursor.execute("SELECT * FROM test ORDER BY id")
+            self.assertEqual(cursor.fetchall(), [[1, 'one']])
+            self.assertEqual(self.conn.autocommit, False)
+
+            cursor.execute("UPSERT INTO test VALUES (?, ?)", [2, 'two'])
+            # Since we expose the JDBC semantics, this is an implicit commit
+            self.conn.autocommit = True
+            cursor.execute("SELECT * FROM test ORDER BY id")
+            self.assertEqual(cursor.fetchall(), [[1, 'one'], [2, 'two']])
+
+    def test_conn_props(self):
+        phoenix_args, avatica_args = Connection._map_conn_props(
+            {'autoCommit': True,
+             'readonly': True,
+             'transactionIsolation': 3,
+             'schema': 'bubu',
+             'phoenixArg': 'phoenixArg'})
+        self.assertEqual(phoenix_args, {'phoenixArg': 'phoenixArg'})
+        self.assertEqual(avatica_args, {'autoCommit': True,
+                                        'readOnly': True,
+                                        'transactionIsolation': 3,
+                                        'schema': 'bubu'})
+
+    def test_meta(self):
+        with self.conn.cursor() as cursor:
+            try:
+                cursor.execute('drop table if exists DEFAULT_TABLE')
+                cursor.execute('drop table if exists A_SCHEMA.A_TABLE')
+                cursor.execute('drop table if exists B_SCHMEA.B_TABLE')
+
+                cursor.execute('create table DEFAULT_TABLE (ID integer primary key)')
+                cursor.execute('create table A_SCHEMA.A_TABLE (ID_A integer primary key)')
+                cursor.execute('create table B_SCHEMA.B_TABLE (ID_B integer primary key)')
+
+                meta = self.conn.meta()
+
+                self.assertEqual(meta.get_catalogs(), [])
+
+                self.assertEqual(meta.get_schemas(), [
+                    {'TABLE_SCHEM': '', 'TABLE_CATALOG': ''},
+                    {'TABLE_SCHEM': 'A_SCHEMA', 'TABLE_CATALOG': ''},
+                    {'TABLE_SCHEM': 'B_SCHEMA', 'TABLE_CATALOG': ''},
+                    {'TABLE_SCHEM': 'SYSTEM', 'TABLE_CATALOG': ''}])
+
+                self.assertEqual(meta.get_schemas(schemaPattern=''), [
+                    {'TABLE_SCHEM': '', 'TABLE_CATALOG': ''}])
+
+                self.assertEqual(meta.get_schemas(schemaPattern='A_SCHEMA'), [
+                    {'TABLE_SCHEM': 'A_SCHEMA', 'TABLE_CATALOG': ''}])
+
+                a_tables = meta.get_tables()
+                self.assertTrue(len(a_tables) > 3)  # Don't know how many tables SYSTEM has
+
+                a_tables = meta.get_tables(schemaPattern='')
+                self.assertEqual(len(a_tables), 1)
+                self.assertTrue(a_tables[0]['TABLE_NAME'] == 'DEFAULT_TABLE')
+
+                a_tables = meta.get_tables(schemaPattern='A_SCHEMA')
+                self.assertEqual(len(a_tables), 1)
+                self.assertTrue(a_tables[0]['TABLE_NAME'] == 'A_TABLE')
+
+                a_columns = meta.get_columns(schemaPattern='A_SCHEMA', tableNamePattern='A_TABLE')
+                self.assertEqual(len(a_columns), 1)
+                self.assertTrue(a_columns[0]['COLUMN_NAME'] == 'ID_A')
+
+                self.assertTrue(all(elem in meta.get_table_types() for elem in [
+                    {'TABLE_TYPE': 'INDEX'},
+                    {'TABLE_TYPE': 'SEQUENCE'},
+                    {'TABLE_TYPE': 'SYSTEM TABLE'},
+                    {'TABLE_TYPE': 'TABLE'},
+                    {'TABLE_TYPE': 'VIEW'}]))
+
+                self.assertEqual(meta.get_type_info(), [])
+
+            finally:
+                cursor.execute('drop table if exists DEFAULT_TABLE')
+                cursor.execute('drop table if exists A_SCHEMA.A_TABLE')
+                cursor.execute('drop table if exists B_SCHEMA.B_TABLE')
+
+    def test_meta2(self):
+        with self.conn.cursor() as cursor:
+            try:
+                cursor.execute('drop table if exists DEFAULT_TABLE')
+                cursor.execute('drop table if exists A_SCHEMA.A_TABLE')
+                cursor.execute('drop table if exists B_SCHMEA.B_TABLE')
+
+                cursor.execute('''create table DEFAULT_TABLE (ID integer not null, ID2 varchar not null,
+                V1 integer, V2 varchar, constraint PK PRIMARY KEY (ID DESC, ID2 ASC))''')
+                cursor.execute('CREATE INDEX GLOBAL_IDX ON DEFAULT_TABLE (V1) INCLUDE (V2)')
+                cursor.execute('CREATE LOCAL INDEX LOCAL_IDX ON DEFAULT_TABLE (V1)')
+                cursor.execute('create table A_SCHEMA.A_TABLE (ID_A integer primary key)')
+                cursor.execute('create table B_SCHEMA.B_TABLE (ID_B integer primary key)')
+
+                meta = self.conn.meta()
+                self.assertTrue(len(meta.get_primary_keys(table='DEFAULT_TABLE')),
+                                [{'ASC_OR_DESC': '\x00\x00\x00D',
+                                  'COLUMN_NAME': 'ID',
+                                  'COLUMN_SIZE': None,
+                                  'DATA_TYPE': 4,
+                                  'KEY_SEQ': 1,
+                                  'PK_NAME': 'PK',
+                                  'TABLE_CAT': None,
+                                  'TABLE_NAME': 'DEFAULT_TABLE',
+                                  'TABLE_SCHEM': None,
+                                  'TYPE_ID': 4,
+                                  'TYPE_NAME': 'INTEGER',
+                                  'VIEW_CONSTANT': None},
+                                 {'ASC_OR_DESC': '\x00\x00\x00A',
+                                  'COLUMN_NAME': 'ID2',
+                                  'COLUMN_SIZE': None,
+                                  'DATA_TYPE': 12,
+                                  'KEY_SEQ': 2,
+                                  'PK_NAME': 'PK',
+                                  'TABLE_CAT': None,
+                                  'TABLE_NAME': 'DEFAULT_TABLE',
+                                  'TABLE_SCHEM': None,
+                                  'TYPE_ID': 12,
+                                  'TYPE_NAME': 'VARCHAR',
+                                  'VIEW_CONSTANT': None}])
+                self.assertEqual(len(meta.get_primary_keys(schema='A_SCHEMA', table='A_TABLE')), 1)
+                try:
+                    self.assertEqual(len(meta.get_primary_keys(schema='A_SCHEMA', table='B_TABLE')), 0)
+                    self.assertTrue(False)
+                except ProgrammingError:
+                    pass
+
+                self.maxDiff = None
+
+                self.assertEqual(meta.get_index_info(table='NON_EXISTENT'), [])
+
+                self.assertTrue(len(meta.get_index_info(table='DEFAULT_TABLE')) > 1)
+
+            finally:
+                cursor.execute('drop table if exists DEFAULT_TABLE')
+                cursor.execute('drop table if exists A_SCHEMA.A_TABLE')
+                cursor.execute('drop table if exists B_SCHEMA.B_TABLE')
+
+    @unittest.skip("https://issues.apache.org/jira/browse/PHOENIX-6004")
+    def test_case_sensitivity(self):
+        with self.conn.cursor() as cursor:
+            try:
+                cursor.execute('drop table if exists AAA')
+                cursor.execute('drop table if exists "aaa"')
+                cursor.execute('drop table if exists "Aaa"')
+
+                cursor.execute('create table AAA (ID integer primary key, YYY integer)')
+                cursor.execute('create table "aaa" ("ID_x" integer primary key, YYY integer, "Yyy" integer, "yyy" integer)')
+                cursor.execute('create table "Aaa" (ID_X integer primary key, ZZZ integer, "Zzz" integer, "zzz" integer)')
+
+                cursor.execute('upsert into AAA values (1, 2)')
+                cursor.execute('upsert into "aaa" values (11, 12, 13, 14)')
+                cursor.execute('upsert into "Aaa" values (21, 22, 23, 24)')
+
+                cursor.execute('select YYY from AAA')
+                self.assertEqual(cursor.fetchone(), [2])
+
+                cursor.execute('select YYY from "aaa"')
+                self.assertEqual(cursor.fetchone(), [12])
+
+                cursor.execute('select "YYY" from "aaa"')
+                self.assertEqual(cursor.fetchone(), [12])
+
+                cursor.execute('select "Yyy" from "aaa"')
+                self.assertEqual(cursor.fetchone(), [13])
+
+                meta = self.conn.meta()
+
+                self.assertEquals(len(meta.get_tables(schemaPattern='')), 3)
+
+                print(meta.get_columns(schemaPattern='',
+                                       tableNamePattern='"aaa"'))
+
+                self.assertEquals(len(meta.get_tables(schemaPattern='',
+                                                      tableNamePattern='AAA')), 1)
+                self.assertEquals(len(meta.get_tables(schemaPattern='',
+                                                      tableNamePattern='"aaa"')), 1)
+                self.assertEquals(meta.get_columns(tableNamePattern='AAA',
+                                                   columnNamePattern='YYY'), 1)
+                self.assertEquals(meta.get_columns(tableNamePattern='AAA',
+                                                   columnNamePattern='yyy'), 1)
+                self.assertEquals(meta.get_columns(tableNamePattern='AAA',
+                                                   columnNamePattern='"yyy"'), 0)
+            finally:
+                cursor.execute('drop table if exists AAA')
+                cursor.execute('drop table if exists "aaa"')
+                cursor.execute('drop table if exists "Aaa"')
+
+    def test_param_number_mismatch(self):
+        self.createTable("phoenixdb_test_param_number", "CREATE TABLE {table} (id INTEGER PRIMARY KEY, username VARCHAR, name VARCHAR)")
+        with self.conn.cursor() as cursor:
+            cursor.execute("UPSERT INTO phoenixdb_test_param_number VALUES (?, ?, ?)", (123, 'John Doe', 'Doe'))
+            cursor.execute("SELECT * FROM phoenixdb_test_param_number")
+            self.assertEqual(cursor.fetchall(), [
+                [123, 'John Doe', 'Doe']
+            ])
+            with self.assertRaises(ProgrammingError) as cm:
+                cursor.execute("UPSERT INTO phoenixdb_test_param_number VALUES (?, ?)", (123, 'John Doe', 'admin'))
+            self.assertEqual("Number of placeholders (?) must match number of parameters."
+                             " Number of placeholders: 2. Number of parameters: 3", cm.exception.message)
+            with self.assertRaises(ProgrammingError) as cm:
+                cursor.execute("UPSERT INTO phoenixdb_test_param_number VALUES (?, ?, ?)", (123, 'John Doe', 'admin', 'asd'))
+            self.assertEqual("Number of placeholders (?) must match number of parameters."
+                             " Number of placeholders: 3. Number of parameters: 4", cm.exception.message)
+            with self.assertRaises(ProgrammingError) as cm:
+                cursor.execute("UPSERT INTO phoenixdb_test_param_number VALUES (?, ?, ?)", (123, 'John Doe'))
+            self.assertEqual("Number of placeholders (?) must match number of parameters."
+                             " Number of placeholders: 3. Number of parameters: 2", cm.exception.message)

+ 125 - 0
desktop/core/ext-py3/phoenixdb-1.1.0/phoenixdb/tests/test_dbapi20.py

@@ -0,0 +1,125 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+
+import phoenixdb
+from phoenixdb.tests import TEST_DB_URL, httpArgs
+
+from . import dbapi20
+
+
+@unittest.skipIf(TEST_DB_URL is None, "these tests require the PHOENIXDB_TEST_DB_URL environment variable set to a clean database")
+class PhoenixDatabaseAPI20Test(dbapi20.DatabaseAPI20Test):
+    driver = phoenixdb
+    connect_args = (TEST_DB_URL,)
+    connect_kw_args = httpArgs
+
+    ddl1 = 'create table %sbooze (name varchar(20) primary key)' % dbapi20.DatabaseAPI20Test.table_prefix
+    ddl2 = 'create table %sbarflys (name varchar(20) primary key, drink varchar(30))' % dbapi20.DatabaseAPI20Test.table_prefix
+    insert = 'upsert'
+
+    def test_nextset(self):
+        pass
+
+    def test_setoutputsize(self):
+        pass
+
+    def _connect(self):
+        con = dbapi20.DatabaseAPI20Test._connect(self)
+        con.autocommit = True
+        return con
+
+    def test_None(self):
+        con = self._connect()
+        try:
+            cur = con.cursor()
+            self.executeDDL2(cur)
+            cur.execute("%s into %sbarflys values ('a', NULL)" % (self.insert, self.table_prefix))
+            cur.execute('select drink from %sbarflys' % self.table_prefix)
+            r = cur.fetchall()
+            self.assertEqual(len(r), 1)
+            self.assertEqual(len(r[0]), 1)
+            self.assertEqual(r[0][0], None, 'NULL value not returned as None')
+        finally:
+            con.close()
+
+    def test_autocommit(self):
+        con = dbapi20.DatabaseAPI20Test._connect(self)
+        self.assertFalse(con.autocommit)
+        con.autocommit = True
+        self.assertTrue(con.autocommit)
+        con.autocommit = False
+        self.assertFalse(con.autocommit)
+        con.close()
+
+    def test_readonly(self):
+        con = dbapi20.DatabaseAPI20Test._connect(self)
+        self.assertFalse(con.readonly)
+        con.readonly = True
+        self.assertTrue(con.readonly)
+        con.readonly = False
+        self.assertFalse(con.readonly)
+        con.close()
+
+    def test_iter(self):
+        # https://www.python.org/dev/peps/pep-0249/#iter
+        con = self._connect()
+        try:
+            cur = con.cursor()
+            if hasattr(cur, '__iter__'):
+                self.assertIs(cur, iter(cur))
+        finally:
+            con.close()
+
+    def test_next(self):
+        # https://www.python.org/dev/peps/pep-0249/#next
+        con = self._connect()
+        try:
+            cur = con.cursor()
+            if not hasattr(cur, 'next'):
+                return
+
+            # cursor.next should raise an Error if called before
+            # executing a select-type query
+            self.assertRaises(self.driver.Error, cur.next)
+
+            # cursor.next should raise an Error if called after
+            # executing a query that cannnot return rows
+            self.executeDDL1(cur)
+            self.assertRaises(self.driver.Error, cur.next)
+
+            # cursor.next should return None if a query retrieves '
+            # no rows
+            cur.execute('select name from %sbooze' % self.table_prefix)
+            self.assertRaises(StopIteration, cur.next)
+            self.failUnless(cur.rowcount in (-1, 0))
+
+            # cursor.next should raise an Error if called after
+            # executing a query that cannnot return rows
+            cur.execute("%s into %sbooze values ('Victoria Bitter')" % (
+                self.insert, self.table_prefix
+                ))
+            self.assertRaises(self.driver.Error, cur.next)
+
+            cur.execute('select name from %sbooze' % self.table_prefix)
+            r = cur.next()
+            self.assertEqual(len(r), 1, 'cursor.next should have retrieved a row with one column')
+            self.assertEqual(r[0], 'Victoria Bitter', 'cursor.next retrieved incorrect data')
+            # cursor.next should raise StopIteration if no more rows available
+            self.assertRaises(StopIteration, cur.next)
+            self.failUnless(cur.rowcount in (-1, 1))
+        finally:
+            con.close()

+ 60 - 0
desktop/core/ext-py3/phoenixdb-1.1.0/phoenixdb/tests/test_errors.py

@@ -0,0 +1,60 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from phoenixdb.tests import DatabaseTestCase
+
+
+class ProgrammingErrorTest(DatabaseTestCase):
+
+    def test_invalid_sql(self):
+        with self.conn.cursor() as cursor:
+            with self.assertRaises(self.conn.ProgrammingError) as cm:
+                cursor.execute("UPS")
+            self.assertEqual("Syntax error. Encountered \"UPS\" at line 1, column 1.", cm.exception.message)
+            self.assertEqual(601, cm.exception.code)
+            self.assertEqual("42P00", cm.exception.sqlstate)
+
+
+class IntegrityErrorTest(DatabaseTestCase):
+
+    def test_null_in_pk(self):
+        self.createTable("phoenixdb_test_tbl1", "CREATE TABLE {table} (id integer primary key)")
+        with self.conn.cursor() as cursor:
+            with self.assertRaises(self.conn.IntegrityError) as cm:
+                cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (NULL)")
+            self.assertEqual("Constraint violation. PHOENIXDB_TEST_TBL1.ID may not be null", cm.exception.message)
+            self.assertEqual(218, cm.exception.code)
+            self.assertIn(cm.exception.sqlstate, ("22018", "23018"))
+
+
+class DataErrorTest(DatabaseTestCase):
+
+    def test_number_outside_of_range(self):
+        self.createTable("phoenixdb_test_tbl1", "CREATE TABLE {table} (id tinyint primary key)")
+        with self.conn.cursor() as cursor:
+            with self.assertRaises(self.conn.DataError) as cm:
+                cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (10000)")
+            self.assertEqual("Type mismatch. TINYINT and INTEGER for 10000", cm.exception.message)
+            self.assertEqual(203, cm.exception.code)
+            self.assertEqual("22005", cm.exception.sqlstate)
+
+    def test_division_by_zero(self):
+        self.createTable("phoenixdb_test_tbl1", "CREATE TABLE {table} (id integer primary key)")
+        with self.conn.cursor() as cursor:
+            with self.assertRaises(self.conn.DataError) as cm:
+                cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (2/0)")
+            self.assertEqual("Divide by zero.", cm.exception.message)
+            self.assertEqual(202, cm.exception.code)
+            self.assertEqual("22012", cm.exception.sqlstate)

+ 159 - 0
desktop/core/ext-py3/phoenixdb-1.1.0/phoenixdb/tests/test_sqlalchemy.py

@@ -0,0 +1,159 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+import unittest
+
+import sqlalchemy as db
+from sqlalchemy import text
+from sqlalchemy.types import BIGINT, CHAR, VARCHAR
+
+from . import TEST_DB_AUTHENTICATION, TEST_DB_AVATICA_PASSWORD, TEST_DB_AVATICA_USER, \
+    TEST_DB_TRUSTSTORE, TEST_DB_URL
+
+if sys.version_info.major == 3:
+    from urllib.parse import urlparse, urlunparse
+else:
+    from urlparse import urlparse, urlunparse
+
+
+@unittest.skipIf(TEST_DB_URL is None, "these tests require the PHOENIXDB_TEST_DB_URL environment variable set to a clean database")
+class SQLAlchemyTest(unittest.TestCase):
+
+    def test_connection(self):
+        engine = self._create_engine()
+        # connection = engine.connect()
+        metadata = db.MetaData()
+        catalog = db.Table('CATALOG', metadata, schema='SYSTEM', autoload=True, autoload_with=engine)
+        self.assertIn('TABLE_NAME', catalog.columns.keys())
+
+    def test_textual(self):
+        engine = self._create_engine()
+        with engine.connect() as connection:
+            try:
+                connection.execute('drop table if exists ALCHEMY_TEST')
+                connection.execute(text('create table ALCHEMY_TEST (id integer primary key)'))
+                connection.execute(text('upsert into ALCHEMY_TEST values (42)'))
+                # SQLAlchemy autocommit should kick in
+                result = connection.execute(text('select * from ALCHEMY_TEST'))
+                row = result.fetchone()
+                self.assertEqual(row[0], 42)
+            finally:
+                connection.execute('drop table if exists ALCHEMY_TEST')
+
+    def test_schema_filtering(self):
+        engine = self._create_engine()
+        with engine.connect() as connection:
+            try:
+                inspector = db.inspect(engine)
+
+                connection.execute('drop table if exists ALCHEMY_TEST')
+                connection.execute('drop table if exists A.ALCHEMY_TEST_A')
+                connection.execute('drop table if exists B.ALCHEMY_TEST_B')
+
+                self.assertEqual(inspector.get_schema_names(), ['', 'SYSTEM'])
+
+                connection.execute(text('create table ALCHEMY_TEST (ID integer primary key)'))
+                connection.execute(text('create table A.ALCHEMY_TEST_A (ID_A integer primary key)'))
+                connection.execute(text('create table B.ALCHEMY_TEST_B (ID_B integer primary key)'))
+                connection.execute(text('create view ALCHEMY_TEST_VIEW as select * from ALCHEMY_TEST'))
+
+                self.assertEqual(inspector.get_schema_names(), ['', 'A', 'B', 'SYSTEM'])
+
+                self.assertEqual(inspector.get_table_names(), ['ALCHEMY_TEST'])
+                self.assertEqual(inspector.get_table_names(''), ['ALCHEMY_TEST'])
+                self.assertEqual(inspector.get_table_names('A'), ['ALCHEMY_TEST_A'])
+                self.assertEqual(inspector.get_table_names('B'), ['ALCHEMY_TEST_B'])
+
+                self.assertEqual(inspector.get_view_names(), ['ALCHEMY_TEST_VIEW'])
+                
+                self.assertEqual(inspector.get_columns('ALCHEMY_TEST').pop()['name'], 'ID')
+                self.assertEqual(
+                    inspector.get_columns('ALCHEMY_TEST', '').pop()['name'], 'ID')
+                self.assertEqual(
+                    inspector.get_columns('ALCHEMY_TEST_A', 'A').pop()['name'], 'ID_A')
+
+                self.assertTrue(engine.has_table('ALCHEMY_TEST'))
+                self.assertFalse(engine.has_table('ALCHEMY_TEST', 'A'))
+                self.assertTrue(engine.has_table('ALCHEMY_TEST_A', 'A'))
+                self.assertFalse(engine.has_table('ALCHEMY_TEST', 'A'))
+            finally:
+                connection.execute('drop view if exists ALCHEMY_TEST_VIEW')
+                connection.execute('drop table if exists ALCHEMY_TEST')
+                connection.execute('drop table if exists A.ALCHEMY_TEST_A')
+                connection.execute('drop table if exists B.ALCHEMY_TEST_B')
+
+    def test_reflection(self):
+        engine = self._create_engine()
+        with engine.connect() as connection:
+            try:
+                inspector = db.inspect(engine)
+                columns_result = inspector.get_columns('DOES_NOT_EXIST')
+                self.assertEqual([], columns_result)
+                connection.execute('drop table if exists us_population')
+                connection.execute(text('''create table if not exists US_POPULATION (
+                state CHAR(2) NOT NULL,
+                city VARCHAR NOT NULL,
+                population BIGINT
+                CONSTRAINT my_pk PRIMARY KEY (state, city))'''))
+                connection.execute('CREATE INDEX GLOBAL_IDX ON US_POPULATION (state) INCLUDE (city)')
+                connection.execute('CREATE LOCAL INDEX LOCAL_IDX ON US_POPULATION (population)')
+
+                columns_result = inspector.get_columns('US_POPULATION')
+                # The list is not equal to its represenatation
+                self.assertTrue(str(columns_result),
+                                str([{'name': 'STATE', 'type': CHAR(), 'nullable': True,
+                                      'autoincrement': False, 'comment': '', 'default': None},
+                                    {'name': 'CITY', 'type': VARCHAR(), 'nullable': True,
+                                    'autoincrement': False, 'comment': '', 'default': None},
+                                     {'name': 'POPULATION', 'type': BIGINT(), 'nullable': True,
+                                     'autoincrement': False, 'comment': '', 'default': None}]))
+
+                indexes_result = inspector.get_indexes('US_POPULATION')
+                self.assertTrue(indexes_result,
+                                [{'name': 'GLOBAL_IDX', 'unique': False, 'column_names': ['STATE', 'CITY']},
+                                 {'name': 'LOCAL_IDX', 'unique': False, 'column_names': ['_INDEX_ID', 'POPULATION', 'STATE', 'CITY']}])
+
+                pk_result = inspector.get_pk_constraint('US_POPULATION')
+                self.assertTrue(pk_result, {'constrained_columns': ['STATE', 'CITY'], 'name': 'MY_PK'})
+
+            finally:
+                connection.execute('drop table if exists us_population')
+
+    @unittest.skip("ORM feature not implemented")
+    def test_orm(self):
+        pass
+
+    def _create_engine(self):
+        ''''Massage the properties that we use for the DBAPI tests so that they apply to
+        SQLAlchemy'''
+
+        url_parts = urlparse(TEST_DB_URL)
+
+        tls = url_parts.scheme.lower == 'https'
+
+        url_parts = url_parts._replace(scheme='phoenix')
+
+        connect_args = dict()
+        if TEST_DB_AUTHENTICATION:
+            connect_args.update(authentication=TEST_DB_AUTHENTICATION)
+        if TEST_DB_AVATICA_USER:
+            connect_args.update(avatica_user=TEST_DB_AVATICA_USER)
+        if TEST_DB_AVATICA_PASSWORD:
+            connect_args.update(avatica_password=TEST_DB_AVATICA_PASSWORD)
+        if TEST_DB_TRUSTSTORE:
+            connect_args.update(trustore=TEST_DB_TRUSTSTORE)
+
+        return db.create_engine(urlunparse(url_parts), tls=tls, connect_args=connect_args)

+ 380 - 0
desktop/core/ext-py3/phoenixdb-1.1.0/phoenixdb/tests/test_types.py

@@ -0,0 +1,380 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import sys
+import unittest
+from decimal import Decimal
+
+import phoenixdb
+from phoenixdb.tests import DatabaseTestCase
+
+
+class TypesTest(DatabaseTestCase):
+
+    def checkIntType(self, type_name, min_value, max_value):
+        self.createTable("phoenixdb_test_tbl1", "CREATE TABLE {table} (id integer primary key, val {})".format(type_name, table="{table}"))
+        with self.conn.cursor() as cursor:
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (1, 1)")
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (2, NULL)")
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (3, ?)", [1])
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (4, ?)", [None])
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (5, ?)", [min_value])
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (6, ?)", [max_value])
+            cursor.execute("SELECT id, val FROM phoenixdb_test_tbl1 ORDER BY id")
+            self.assertEqual(cursor.description[1].type_code, phoenixdb.NUMBER)
+            self.assertEqual(cursor.fetchall(), [[1, 1], [2, None], [3, 1], [4, None], [5, min_value], [6, max_value]])
+
+            self.assertRaises(
+                self.conn.DatabaseError, cursor.execute,
+                "UPSERT INTO phoenixdb_test_tbl1 VALUES (100, {})".format(min_value - 1))
+
+            self.assertRaises(
+                self.conn.DatabaseError, cursor.execute,
+                "UPSERT INTO phoenixdb_test_tbl1 VALUES (100, {})".format(max_value + 1))
+
+            # XXX The server silently truncates the values
+#            self.assertRaises(self.conn.DatabaseError, cursor.execute, "UPSERT INTO phoenixdb_test_tbl1 VALUES (100, ?)", [min_value - 1])
+#            self.assertRaises(self.conn.DatabaseError, cursor.execute, "UPSERT INTO phoenixdb_test_tbl1 VALUES (100, ?)", [max_value + 1])
+
+    def test_integer(self):
+        self.checkIntType("integer", -2147483648, 2147483647)
+
+    def test_unsigned_int(self):
+        self.checkIntType("unsigned_int", 0, 2147483647)
+
+    def test_bigint(self):
+        self.checkIntType("bigint", -9223372036854775808, 9223372036854775807)
+
+    def test_unsigned_long(self):
+        self.checkIntType("unsigned_long", 0, 9223372036854775807)
+
+    def test_tinyint(self):
+        self.checkIntType("tinyint", -128, 127)
+
+    def test_unsigned_tinyint(self):
+        self.checkIntType("unsigned_tinyint", 0, 127)
+
+    def test_smallint(self):
+        self.checkIntType("smallint", -32768, 32767)
+
+    def test_unsigned_smallint(self):
+        self.checkIntType("unsigned_smallint", 0, 32767)
+
+    def checkFloatType(self, type_name, min_value, max_value):
+        self.createTable("phoenixdb_test_tbl1", "CREATE TABLE {table} (id integer primary key, val {})".format(type_name, table="{table}"))
+        with self.conn.cursor() as cursor:
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (1, 1)")
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (2, NULL)")
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (3, ?)", [1])
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (4, ?)", [None])
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (5, ?)", [min_value])
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (6, ?)", [max_value])
+            cursor.execute("SELECT id, val FROM phoenixdb_test_tbl1 ORDER BY id")
+            self.assertEqual(cursor.description[1].type_code, phoenixdb.NUMBER)
+            rows = cursor.fetchall()
+            self.assertEqual([r[0] for r in rows], [1, 2, 3, 4, 5, 6])
+            self.assertEqual(rows[0][1], 1.0)
+            self.assertEqual(rows[1][1], None)
+            self.assertEqual(rows[2][1], 1.0)
+            self.assertEqual(rows[3][1], None)
+            self.assertAlmostEqual(rows[4][1], min_value)
+            self.assertAlmostEqual(rows[5][1], max_value)
+
+    def test_float(self):
+        self.checkFloatType("float", -3.4028234663852886e+38, 3.4028234663852886e+38)
+
+    def test_unsigned_float(self):
+        self.checkFloatType("unsigned_float", 0, 3.4028234663852886e+38)
+
+    def test_double(self):
+        self.checkFloatType("double", -1.7976931348623158E+308, 1.7976931348623158E+308)
+
+    def test_unsigned_double(self):
+        self.checkFloatType("unsigned_double", 0, 1.7976931348623158E+308)
+
+    def test_decimal(self):
+        self.createTable("phoenixdb_test_tbl1", "CREATE TABLE {table} (id integer primary key, val decimal(8,3))")
+        with self.conn.cursor() as cursor:
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (1, 33333.333)")
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (2, NULL)")
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (3, ?)", [33333.333])
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (4, ?)", [Decimal('33333.333')])
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (5, ?)", [None])
+            cursor.execute("SELECT id, val FROM phoenixdb_test_tbl1 ORDER BY id")
+            self.assertEqual(cursor.description[1].type_code, phoenixdb.NUMBER)
+            rows = cursor.fetchall()
+            self.assertEqual([r[0] for r in rows], [1, 2, 3, 4, 5])
+            self.assertEqual(rows[0][1], Decimal('33333.333'))
+            self.assertEqual(rows[1][1], None)
+            self.assertEqual(rows[2][1], Decimal('33333.333'))
+            self.assertEqual(rows[3][1], Decimal('33333.333'))
+            self.assertEqual(rows[4][1], None)
+            self.assertRaises(
+                self.conn.DatabaseError, cursor.execute,
+                "UPSERT INTO phoenixdb_test_tbl1 VALUES (100, ?)", [Decimal('1234567890')])
+            self.assertRaises(
+                self.conn.DatabaseError, cursor.execute,
+                "UPSERT INTO phoenixdb_test_tbl1 VALUES (101, ?)", [Decimal('123456.789')])
+
+    def test_boolean(self):
+        self.createTable("phoenixdb_test_tbl1", "CREATE TABLE {table} (id integer primary key, val boolean)")
+        with self.conn.cursor() as cursor:
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (1, TRUE)")
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (2, FALSE)")
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (3, NULL)")
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (4, ?)", [True])
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (5, ?)", [False])
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (6, ?)", [None])
+            cursor.execute("SELECT id, val FROM phoenixdb_test_tbl1 ORDER BY id")
+            self.assertEqual(cursor.description[1].type_code, phoenixdb.BOOLEAN)
+            self.assertEqual(cursor.fetchall(), [[1, True], [2, False], [3, None], [4, True], [5, False], [6, None]])
+
+    @unittest.skip("https://issues.apache.org/jira/browse/PHOENIX-4664")
+    def test_time(self):
+        self.createTable("phoenixdb_test_tbl1", "CREATE TABLE {table} (id integer primary key, val time)")
+        with self.conn.cursor() as cursor:
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (1, '1970-01-01 12:01:02')")
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (2, NULL)")
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (3, ?)", [phoenixdb.Time(12, 1, 2)])
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (4, ?)", [datetime.time(12, 1, 2)])
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (5, ?)", [None])
+            cursor.execute("SELECT id, val FROM phoenixdb_test_tbl1 ORDER BY id")
+            self.assertEqual(cursor.fetchall(), [
+                [1, datetime.time(12, 1, 2)],
+                [2, None],
+                [3, datetime.time(12, 1, 2)],
+                [4, datetime.time(12, 1, 2)],
+                [5, None],
+            ])
+
+    @unittest.skip("https://issues.apache.org/jira/browse/CALCITE-797")
+    def test_time_full(self):
+        self.createTable("phoenixdb_test_tbl1", "CREATE TABLE {table} (id integer primary key, val time)")
+        with self.conn.cursor() as cursor:
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (1, '2015-07-12 13:01:02.123')")
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (2, ?)", [datetime.datetime(2015, 7, 12, 13, 1, 2, 123000)])
+            cursor.execute("SELECT id, val FROM phoenixdb_test_tbl1 ORDER BY id")
+            self.assertEqual(cursor.fetchall(), [
+                [1, datetime.datetime(2015, 7, 12, 13, 1, 2, 123000)],
+                [2, datetime.datetime(2015, 7, 12, 13, 1, 2, 123000)],
+            ])
+
+    @unittest.skip("https://issues.apache.org/jira/browse/PHOENIX-4664")
+    def test_date(self):
+        self.createTable("phoenixdb_test_tbl1", "CREATE TABLE {table} (id integer primary key, val date)")
+        with self.conn.cursor() as cursor:
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (1, '2015-07-12 00:00:00')")
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (3, ?)", [phoenixdb.Date(2015, 7, 12)])
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (4, ?)", [datetime.date(2015, 7, 12)])
+            cursor.execute("SELECT id, val FROM phoenixdb_test_tbl1 ORDER BY id")
+            self.assertEqual(cursor.fetchall(), [
+                [1, datetime.date(2015, 7, 12)],
+                [3, datetime.date(2015, 7, 12)],
+                [4, datetime.date(2015, 7, 12)],
+            ])
+
+    @unittest.skip("https://issues.apache.org/jira/browse/CALCITE-798")
+    def test_date_full(self):
+        self.createTable("phoenixdb_test_tbl1", "CREATE TABLE {table} (id integer primary key, val date)")
+        with self.conn.cursor() as cursor:
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (1, '2015-07-12 13:01:02.123')")
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (2, ?)", [datetime.datetime(2015, 7, 12, 13, 1, 2, 123000)])
+            cursor.execute("SELECT id, val FROM phoenixdb_test_tbl1 ORDER BY id")
+            self.assertEqual(cursor.fetchall(), [
+                [1, datetime.datetime(2015, 7, 12, 13, 1, 2, 123000)],
+                [2, datetime.datetime(2015, 7, 12, 13, 1, 2, 123000)],
+            ])
+
+    def test_date_null(self):
+        self.createTable("phoenixdb_test_tbl1", "CREATE TABLE {table} (id integer primary key, val date)")
+        with self.conn.cursor() as cursor:
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (1, NULL)")
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (2, ?)", [None])
+            cursor.execute("SELECT id, val FROM phoenixdb_test_tbl1 ORDER BY id")  # raises NullPointerException on the server
+            self.assertEqual(cursor.fetchall(), [
+                [1, None],
+                [2, None],
+            ])
+
+    @unittest.skip("https://issues.apache.org/jira/browse/PHOENIX-4664")
+    def test_timestamp(self):
+        self.createTable("phoenixdb_test_tbl1", "CREATE TABLE {table} (id integer primary key, val timestamp)")
+        with self.conn.cursor() as cursor:
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (1, '2015-07-12 13:01:02.123')")
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (2, NULL)")
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (3, ?)", [phoenixdb.Timestamp(2015, 7, 12, 13, 1, 2)])
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (4, ?)", [datetime.datetime(2015, 7, 12, 13, 1, 2, 123000)])
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (5, ?)", [None])
+            cursor.execute("SELECT id, val FROM phoenixdb_test_tbl1 ORDER BY id")
+            self.assertEqual(cursor.fetchall(), [
+                [1, datetime.datetime(2015, 7, 12, 13, 1, 2, 123000)],
+                [2, None],
+                [3, datetime.datetime(2015, 7, 12, 13, 1, 2)],
+                [4, datetime.datetime(2015, 7, 12, 13, 1, 2, 123000)],
+                [5, None],
+            ])
+
+    # Minimal date/time/timestamp type test that doesn't trigger PHOENIX-4664
+    def test_time_minimal(self):
+        self.createTable("phoenixdb_test_tbl1", "CREATE TABLE {table} (id integer primary key, val1 date, val2 time, val3 timestamp)")
+        with self.conn.cursor() as cursor:
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (1, '2015-07-12', '2015-07-12 13:01:02', '2015-07-12 13:01:02.123')")
+            cursor.execute("SELECT * FROM phoenixdb_test_tbl1 ORDER BY id")
+            self.assertEqual(cursor.fetchall(), [
+                [1, datetime.date(2015, 7, 12), datetime.time(13, 1, 2), datetime.datetime(2015, 7, 12, 13, 1, 2, 123000)]
+            ])
+
+    @unittest.skip("https://issues.apache.org/jira/browse/CALCITE-796")
+    def test_timestamp_full(self):
+        self.createTable("phoenixdb_test_tbl1", "CREATE TABLE {table} (id integer primary key, val timestamp)")
+        with self.conn.cursor() as cursor:
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (1, '2015-07-12 13:01:02.123456789')")
+            cursor.execute("SELECT id, val FROM phoenixdb_test_tbl1 ORDER BY id")
+            self.assertEqual(cursor.fetchall(), [
+                [1, datetime.datetime(2015, 7, 12, 13, 1, 2, 123456789)],
+            ])
+
+    def test_varchar(self):
+        self.createTable("phoenixdb_test_tbl1", "CREATE TABLE {table} (id integer primary key, val varchar)")
+        with self.conn.cursor() as cursor:
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (1, 'abc')")
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (2, NULL)")
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (3, ?)", ['abc'])
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (4, ?)", [None])
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (5, '')")
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (6, ?)", [''])
+            cursor.execute("SELECT id, val FROM phoenixdb_test_tbl1 ORDER BY id")
+            self.assertEqual(cursor.fetchall(), [[1, 'abc'], [2, None], [3, 'abc'], [4, None], [5, None], [6, None]])
+
+    @unittest.skipIf(sys.version_info[0] < 3, "phoenixdb doesn't support unicode strings in Python2")
+    def test_unicode(self):
+        self.createTable("phoenixdb_test_tbl1", "CREATE TABLE {table} (id integer primary key, val varchar)")
+        with self.conn.cursor() as cursor:
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (1, \
+            '\u00E1rv\u00EDzt\u0171r\u0151 t\u00FCk\u00F6rf\u00FAr\u00F3g\u00E9p')")
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (2, '\u265E')")
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (3, '\U0001F600')")
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (4, ?)",
+                           ['\u00E1rv\u00EDzt\u0171r\u0151 t\u00FCk\u00F6rf\u00FAr\u00F3g\u00E9p'])
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (5, ?)", ['\u265E'])
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (6, ?)", ['\U0001F600'])
+            cursor.execute("SELECT id, val FROM phoenixdb_test_tbl1 ORDER BY id")
+            self.assertEqual(cursor.fetchall(),
+                             [[1, '\u00E1rv\u00EDzt\u0171r\u0151 t\u00FCk\u00F6rf\u00FAr\u00F3g\u00E9p'],
+                              [2, '\u265E'], [3, '\U0001F600'],
+                              [4, '\u00E1rv\u00EDzt\u0171r\u0151 t\u00FCk\u00F6rf\u00FAr\u00F3g\u00E9p'],
+                              [5, '\u265E'], [6, '\U0001F600']])
+
+    def test_varchar_very_long(self):
+        self.createTable("phoenixdb_test_tbl1", "CREATE TABLE {table} (id integer primary key, val varchar)")
+        with self.conn.cursor() as cursor:
+            value = '1234567890' * 1000
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (1, ?)", [value])
+            cursor.execute("SELECT id, val FROM phoenixdb_test_tbl1 ORDER BY id")
+            self.assertEqual(cursor.fetchall(), [[1, value]])
+
+    def test_varchar_limited(self):
+        self.createTable("phoenixdb_test_tbl1", "CREATE TABLE {table} (id integer primary key, val varchar(2))")
+        with self.conn.cursor() as cursor:
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (1, 'ab')")
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (2, NULL)")
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (3, ?)", ['ab'])
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (4, ?)", [None])
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (5, '')")
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (6, ?)", [''])
+            cursor.execute("SELECT id, val FROM phoenixdb_test_tbl1 ORDER BY id")
+            self.assertEqual(cursor.fetchall(), [[1, 'ab'], [2, None], [3, 'ab'], [4, None], [5, None], [6, None]])
+            self.assertRaises(self.conn.DataError, cursor.execute, "UPSERT INTO phoenixdb_test_tbl1 VALUES (100, 'abc')")
+
+    def test_char_null(self):
+        self.createTable("phoenixdb_test_tbl1", "CREATE TABLE {table} (id integer primary key, val char(2))")
+        with self.conn.cursor() as cursor:
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (2, NULL)")
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (4, ?)", [None])
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (5, '')")
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (6, ?)", [''])
+            cursor.execute("SELECT id, val FROM phoenixdb_test_tbl1 ORDER BY id")
+            self.assertEqual(cursor.fetchall(), [[2, None], [4, None], [5, None], [6, None]])
+            self.assertRaises(self.conn.DataError, cursor.execute, "UPSERT INTO phoenixdb_test_tbl1 VALUES (100, 'abc')")
+
+    def test_char(self):
+        self.createTable("phoenixdb_test_tbl1", "CREATE TABLE {table} (id integer primary key, val char(2))")
+        with self.conn.cursor() as cursor:
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (1, 'ab')")
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (2, ?)", ['ab'])
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (3, 'a')")
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (4, ?)", ['b'])
+            cursor.execute("SELECT id, val FROM phoenixdb_test_tbl1 ORDER BY id")
+            self.assertEqual(cursor.fetchall(), [[1, 'ab'], [2, 'ab'], [3, 'a'], [4, 'b']])
+            self.assertRaises(self.conn.DataError, cursor.execute, "UPSERT INTO phoenixdb_test_tbl1 VALUES (100, 'abc')")
+
+    def test_binary(self):
+        self.createTable("phoenixdb_test_tbl1", "CREATE TABLE {table} (id integer primary key, val binary(2))")
+        with self.conn.cursor() as cursor:
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (1, 'ab')")
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (2, ?)", [phoenixdb.Binary(b'ab')])
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (3, '\x01\x00')")
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (4, ?)", [phoenixdb.Binary(b'\x01\x00')])
+            cursor.execute("SELECT id, val FROM phoenixdb_test_tbl1 ORDER BY id")
+            self.assertEqual(cursor.fetchall(), [
+                [1, b'ab'],
+                [2, b'ab'],
+                [3, b'\x01\x00'],
+                [4, b'\x01\x00'],
+            ])
+
+    def test_binary_all_bytes(self):
+        self.createTable("phoenixdb_test_tbl1", "CREATE TABLE {table} (id integer primary key, val binary(256))")
+        with self.conn.cursor() as cursor:
+            if sys.version_info[0] < 3:
+                value = ''.join(map(chr, range(256)))
+            else:
+                value = bytes(range(256))
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (1, ?)", [phoenixdb.Binary(value)])
+            cursor.execute("SELECT id, val FROM phoenixdb_test_tbl1 ORDER BY id")
+            self.assertEqual(cursor.fetchall(), [[1, value]])
+
+    def test_array(self):
+        self.createTable("phoenixdb_test_tbl1", "CREATE TABLE {table} (id integer primary key, val integer[])")
+        with self.conn.cursor() as cursor:
+            cursor.execute("SELECT id, val FROM phoenixdb_test_tbl1 ORDER BY id")
+            self.assertEqual(cursor.fetchall(), [])
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (1, ARRAY[1, 2])")
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (2, ?)", [[2, 3]])
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (3, ?)", [[4]])
+            cursor.execute("SELECT id, val FROM phoenixdb_test_tbl1 ORDER BY id")
+            self.assertEqual(cursor.fetchall(), [
+                [1, [1, 2]],
+                [2, [2, 3]],
+                [3, [4]],
+            ])
+
+    def test_array_boolean(self):
+        self.createTable("phoenixdb_test_tbl1", "CREATE TABLE {table} (id integer primary key, val boolean[])")
+        with self.conn.cursor() as cursor:
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (1, ARRAY[TRUE, TRUE, FALSE])")
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (2, NULL)")
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (3, ?)", ((1, 0, 1),))
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (4, ?)", [[True, True, True]])
+            cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (5, ?)", [[]])
+            cursor.execute("SELECT id, val FROM phoenixdb_test_tbl1 ORDER BY id")
+            self.assertEqual(cursor.fetchall(), [
+                [1, [True, True, False]],
+                [2, None],
+                [3, [True, False, True]],
+                [4, [True, True, True]],
+                [5, None]
+            ])

+ 305 - 0
desktop/core/ext-py3/phoenixdb-1.1.0/phoenixdb/types.py

@@ -0,0 +1,305 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import sys
+import time
+from decimal import Decimal
+
+from phoenixdb.avatica.proto import common_pb2
+
+
+__all__ = [
+    'Date', 'Time', 'Timestamp', 'DateFromTicks', 'TimeFromTicks', 'TimestampFromTicks',
+    'Binary', 'STRING', 'BINARY', 'NUMBER', 'DATETIME', 'ROWID', 'BOOLEAN',
+    'TypeHelper',
+]
+
+
+def Date(year, month, day):
+    """Constructs an object holding a date value."""
+    return datetime.date(year, month, day)
+
+
+def Time(hour, minute, second):
+    """Constructs an object holding a time value."""
+    return datetime.time(hour, minute, second)
+
+
+def Timestamp(year, month, day, hour, minute, second):
+    """Constructs an object holding a datetime/timestamp value."""
+    return datetime.datetime(year, month, day, hour, minute, second)
+
+
+def DateFromTicks(ticks):
+    """Constructs an object holding a date value from the given UNIX timestamp."""
+    return Date(*time.localtime(ticks)[:3])
+
+
+def TimeFromTicks(ticks):
+    """Constructs an object holding a time value from the given UNIX timestamp."""
+    return Time(*time.localtime(ticks)[3:6])
+
+
+def TimestampFromTicks(ticks):
+    """Constructs an object holding a datetime/timestamp value from the given UNIX timestamp."""
+    return Timestamp(*time.localtime(ticks)[:6])
+
+
+def Binary(value):
+    """Constructs an object capable of holding a binary (long) string value."""
+    return bytes(value)
+
+
+def time_from_java_sql_time(n):
+    dt = datetime.datetime(1970, 1, 1) + datetime.timedelta(milliseconds=n)
+    return dt.time()
+
+
+def time_to_java_sql_time(t):
+    return ((t.hour * 60 + t.minute) * 60 + t.second) * 1000 + t.microsecond // 1000
+
+
+def date_from_java_sql_date(n):
+    return datetime.date(1970, 1, 1) + datetime.timedelta(days=n)
+
+
+def date_to_java_sql_date(d):
+    if isinstance(d, datetime.datetime):
+        d = d.date()
+    td = d - datetime.date(1970, 1, 1)
+    return td.days
+
+
+def datetime_from_java_sql_timestamp(n):
+    return datetime.datetime(1970, 1, 1) + datetime.timedelta(milliseconds=n)
+
+
+def datetime_to_java_sql_timestamp(d):
+    td = d - datetime.datetime(1970, 1, 1)
+    return td.microseconds // 1000 + (td.seconds + td.days * 24 * 3600) * 1000
+
+
+# FIXME This doesn't seem to be used anywhere in the code
+class ColumnType(object):
+
+    def __init__(self, eq_types):
+        self.eq_types = tuple(eq_types)
+        self.eq_types_set = set(eq_types)
+
+    def __eq__(self, other):
+        return other in self.eq_types_set
+
+    def __cmp__(self, other):
+        if other in self.eq_types_set:
+            return 0
+        if other < self.eq_types:
+            return 1
+        else:
+            return -1
+
+
+STRING = ColumnType(['VARCHAR', 'CHAR'])
+"""Type object that can be used to describe string-based columns."""
+
+BINARY = ColumnType(['BINARY', 'VARBINARY'])
+"""Type object that can be used to describe (long) binary columns."""
+
+NUMBER = ColumnType([
+    'INTEGER', 'UNSIGNED_INT', 'BIGINT', 'UNSIGNED_LONG', 'TINYINT', 'UNSIGNED_TINYINT',
+    'SMALLINT', 'UNSIGNED_SMALLINT', 'FLOAT', 'UNSIGNED_FLOAT', 'DOUBLE', 'UNSIGNED_DOUBLE', 'DECIMAL'
+])
+"""Type object that can be used to describe numeric columns."""
+
+DATETIME = ColumnType(['TIME', 'DATE', 'TIMESTAMP', 'UNSIGNED_TIME', 'UNSIGNED_DATE', 'UNSIGNED_TIMESTAMP'])
+"""Type object that can be used to describe date/time columns."""
+
+ROWID = ColumnType([])
+"""Only implemented for DB API 2.0 compatibility, not used."""
+
+BOOLEAN = ColumnType(['BOOLEAN'])
+"""Type object that can be used to describe boolean columns. This is a phoenixdb-specific extension."""
+
+if sys.version_info[0] < 3:
+    _long = long  # noqa: F821
+else:
+    _long = int
+
+FIELD_MAP = {
+    'bool_value': [
+        (common_pb2.BOOLEAN, None, None),
+        (common_pb2.PRIMITIVE_BOOLEAN, None, None),
+    ],
+    'string_value': [
+        (common_pb2.CHARACTER, None, None),
+        (common_pb2.PRIMITIVE_CHAR, None, None),
+        (common_pb2.STRING, None, None),
+        (common_pb2.BIG_DECIMAL, str, Decimal),
+    ],
+    'number_value': [
+        (common_pb2.INTEGER, None, int),
+        (common_pb2.PRIMITIVE_INT, None, int),
+        (common_pb2.SHORT, None, int),
+        (common_pb2.PRIMITIVE_SHORT, None, int),
+        (common_pb2.LONG, None, _long),
+        (common_pb2.PRIMITIVE_LONG, None, _long),
+        (common_pb2.BYTE, None, int),
+        (common_pb2.JAVA_SQL_TIME, time_to_java_sql_time, time_from_java_sql_time),
+        (common_pb2.JAVA_SQL_DATE, date_to_java_sql_date, date_from_java_sql_date),
+        (common_pb2.JAVA_SQL_TIMESTAMP, datetime_to_java_sql_timestamp, datetime_from_java_sql_timestamp),
+    ],
+    'bytes_value': [
+        (common_pb2.BYTE_STRING, Binary, None),
+    ],
+    'double_value': [
+        (common_pb2.DOUBLE, float, float),
+        (common_pb2.PRIMITIVE_DOUBLE, float, float)
+    ]
+}
+"""The master map that describes how to handle types, keyed by TypedData field"""
+
+REP_MAP = dict((v[0], (k, v[0], v[1], v[2])) for k in FIELD_MAP for v in FIELD_MAP[k])
+"""Flips the available types to allow for faster lookup by protobuf Rep
+
+This mapping should be structured as:
+    {
+        'common_pb2.BIG_DECIMAL': ('string_value', common_pb2.BIG_DECIMAL, str, Decimal),),
+        ...
+        '<Rep enum>': (<field_name>, <mutate_to function>, <cast_from function>),
+    }
+"""
+
+JDBC_TO_REP = dict([
+    # These are the standard types that are used in Phoenix
+    (-6, common_pb2.BYTE),  # TINYINT
+    (5, common_pb2.SHORT),  # SMALLINT
+    (4, common_pb2.INTEGER),  # INTEGER
+    (-5, common_pb2.LONG),  # BIGINT
+    (6, common_pb2.DOUBLE),  # FLOAT
+    (8, common_pb2.DOUBLE),  # DOUBLE
+    (2, common_pb2.BIG_DECIMAL),  # NUMERIC
+    (1, common_pb2.STRING),  # CHAR
+    (91, common_pb2.JAVA_SQL_DATE),  # DATE
+    (92, common_pb2.JAVA_SQL_TIME),  # TIME
+    (93, common_pb2.JAVA_SQL_TIMESTAMP),  # TIMESTAMP
+    (-2, common_pb2.BYTE_STRING),  # BINARY
+    (-3, common_pb2.BYTE_STRING),  # VARBINARY
+    (16, common_pb2.BOOLEAN),  # BOOLEAN
+    # These are the Non-standard types defined by Phoenix
+    (19, common_pb2.JAVA_SQL_DATE),  # UNSIGNED_DATE
+    (15, common_pb2.DOUBLE),  # UNSIGNED_DOUBLE
+    (14, common_pb2.DOUBLE),  # UNSIGNED_FLOAT
+    (9, common_pb2.INTEGER),  # UNSIGNED_INT
+    (10, common_pb2.LONG),  # UNSIGNED_LONG
+    (13, common_pb2.SHORT),  # UNSIGNED_SMALLINT
+    (20, common_pb2.JAVA_SQL_TIMESTAMP),  # UNSIGNED_TIMESTAMP
+    (11, common_pb2.BYTE),  # UNSIGNED_TINYINT
+    # The following are not used by Phoenix, but some of these are used by Avaticafor
+    # parameter types
+    (-7, common_pb2.BOOLEAN),  # BIT
+    (7, common_pb2.DOUBLE),  # REAL
+    (3, common_pb2.BIG_DECIMAL),  # DECIMAL
+    (12, common_pb2.STRING),  # VARCHAR
+    (-1, common_pb2.STRING),  # LONGVARCHAR
+    (-4, common_pb2.BYTE_STRING),  # LONGVARBINARY
+    (2004, common_pb2.BYTE_STRING),  # BLOB
+    (2005, common_pb2.STRING),  # CLOB
+    (-15, common_pb2.STRING),  # NCHAR
+    (-9, common_pb2.STRING),  # NVARCHAR
+    (-16, common_pb2.STRING),  # LONGNVARCHAR
+    (2011, common_pb2.STRING),  # NCLOB
+    (2009, common_pb2.STRING),  # SQLXML
+    # Returned by Avatica for Arrays in EMPTY resultsets
+    (2000, common_pb2.BYTE_STRING)  # JAVA_OBJECT
+    # These are defined by JDBC, but cannot be mapped
+    # NULL
+    # OTHER
+    # DISTINCT
+    # STRUCT
+    # ARRAY 2003 - We are handling this as a special case
+    # REF
+    # DATALINK
+    # ROWID
+    # REF_CURSOR
+    # TIME WITH TIMEZONE
+    # TIMESTAMP WITH TIMEZONE
+
+    ])
+"""Maps the JDBC Type IDs to Protobuf Reps """
+
+JDBC_MAP = {}
+for k, v in JDBC_TO_REP.items():
+    JDBC_MAP[k & 0xffffffff] = REP_MAP[v]
+"""Flips the available types to allow for faster lookup by JDBC type ID
+
+It has the same format as REP_MAP, but is keyed by JDBC type ID
+"""
+
+
+class TypeHelper(object):
+
+    @staticmethod
+    def from_param(param):
+        """Retrieves a field name and functions to cast to/from based on an AvaticaParameter object
+
+        :param param:
+            Protobuf AvaticaParameter object
+
+        :returns: tuple ``(field_name, rep, mutate_to, cast_from, is_array)``
+            WHERE
+            ``field_name`` is the attribute in ``common_pb2.TypedValue``
+            ``rep`` is the common_pb2.Rep enum
+            ``mutate_to`` is the function to cast values into Phoenix values, if any
+            ``cast_from`` is the function to cast from the Phoenix value to the Python value, if any
+            ``is_array`` the param expects an array instead of scalar
+
+        :raises:
+            NotImplementedError
+        """
+        jdbc_code = param.parameter_type
+        if jdbc_code > 2900 and jdbc_code < 3100:
+            return TypeHelper._from_jdbc(jdbc_code-3000) + (True,)
+        else:
+            return TypeHelper._from_jdbc(jdbc_code) + (False,)
+
+    @staticmethod
+    def from_column(column):
+        """Retrieves a field name and functions to cast to/from based on a TypedValue object
+
+        :param column:
+            Protobuf TypedValue object
+
+        :returns: tuple ``(field_name, rep, mutate_to, cast_from)``
+            WHERE
+            ``field_name`` is the attribute in ``common_pb2.TypedValue``
+            ``rep`` is the common_pb2.Rep enum
+            ``mutate_to`` is the function to cast values into Phoenix values, if any
+            ``cast_from`` is the function to cast from the Phoenix value to the Python value, if any
+
+        :raises:
+            NotImplementedError
+        """
+        if column.type.id == 2003:
+            return TypeHelper._from_jdbc(column.type.component.id)
+        else:
+            return TypeHelper._from_jdbc(column.type.id)
+
+    @staticmethod
+    def _from_jdbc(jdbc_code):
+        if jdbc_code not in JDBC_MAP:
+            # This should not happen. It's either a bug, or Avatica has added new types
+            raise NotImplementedError('JDBC TYPE CODE {} is not supported'.format(jdbc_code))
+
+        return JDBC_MAP[jdbc_code]

+ 24 - 0
desktop/core/ext-py3/phoenixdb-1.1.0/setup.cfg

@@ -0,0 +1,24 @@
+[nosetests]
+verbosity = 2
+testmatch = ^test_.+
+where = phoenixdb/tests
+
+[build_sphinx]
+source-dir = doc
+build-dir = doc/build
+all_files = 1
+
+[upload_sphinx]
+upload-dir = doc/build/html
+
+[flake8]
+max-line-length = 140
+exclude = 
+	e,e3,env,venv,doc,build,dist,.tox,.idea,
+	./phoenixdb/tests/dbapi20.py,
+	./phoenixdb/avatica/proto/*_pb2.py
+
+[egg_info]
+tag_build = 
+tag_date = 0
+

+ 108 - 0
desktop/core/ext-py3/phoenixdb-1.1.0/setup.py

@@ -0,0 +1,108 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+from setuptools import setup, find_packages
+import setuptools
+import sys
+
+cmdclass = {}
+
+try:
+    from sphinx.setup_command import BuildDoc
+    cmdclass['build_sphinx'] = BuildDoc
+except ImportError:
+    pass
+
+
+def readme():
+    with open('README.rst') as f:
+        return f.read()
+
+
+if setuptools.__version__ < '20.8.1':
+    # Workaround for source install on old setuptools
+    # This won't be able to create a proper multi-version pacakage
+    install_requires=[
+        'protobuf>=3.0.0',
+        'requests',
+        'requests-gssapi',
+        'SQLAlchemy'
+    ]
+    if sys.version_info < (3,6):
+        install_requires.append('gssapi<1.6.0')
+    #Don't build the docs on an old stack
+    setup_requires=[]
+else:
+    install_requires=[
+        'protobuf>=3.0.0',
+        'requests',
+        'requests-gssapi',
+        'gssapi<1.6.0;python_version<"3.6"',
+        'SQLAlchemy'
+    ]
+    setup_requires=[
+        'Sphinx;python_version>="3.6"',
+    ],
+
+version = "1.1.0"
+
+setup(
+    name="phoenixdb",
+    version=version,
+    description="Phoenix database adapter for Python",
+    long_description=readme(),
+    author="Apache Software Foundation",
+    author_email="dev@phoenix.apache.org",
+    url="http://phoenix.apache.org/python.html",
+    license="Apache 2",
+    packages=find_packages(),
+    include_package_data=True,
+    cmdclass=cmdclass,
+    command_options={
+        'build_sphinx': {
+            'version': ('setup.py', version),
+            'release': ('setup.py', version),
+        },
+    },
+    classifiers=[
+        'Programming Language :: Python',
+        'Programming Language :: Python :: 2',
+        'Programming Language :: Python :: 2.7',
+        'Programming Language :: Python :: 3',
+        'Programming Language :: Python :: 3.4',
+        'Programming Language :: Python :: 3.5',
+        'Programming Language :: Python :: 3.6',
+        'Programming Language :: Python :: 3.7',
+        'Programming Language :: Python :: 3.8',
+    ],
+    install_requires=install_requires,
+    extras_require={
+        'SQLAlchemy': ['SQLAlchemy'],
+    },
+    tests_require=[
+        'SQLAlchemy',
+        'nose',
+        'flake8'
+    ],
+    setup_requires=setup_requires,
+    entry_points={
+        "sqlalchemy.dialects": [
+            "phoenix = phoenixdb.sqlalchemy_phoenix:PhoenixDialect"
+        ]
+    },
+)

+ 0 - 1
desktop/core/requirements.txt

@@ -43,7 +43,6 @@ Markdown==3.1
 nose==1.3.7
 openpyxl==3.0.9
 pandas==1.4.2
-phoenixdb==1.1.0
 prompt-toolkit==2.0.10
 protobuf==3.17.0
 pyformance==0.3.2

部分文件因文件數量過多而無法顯示