Kaynağa Gözat

HUE-7860 [core] Integrate non IO blocking Python Webserver
Add eventlet 0.21.0 and enum-compat 0.0.2

Ying Chen 8 yıl önce
ebeveyn
işleme
167bb907e6
100 değiştirilmiş dosya ile 14624 ekleme ve 0 silme
  1. 28 0
      desktop/core/ext-py/enum-compat-0.0.2/PKG-INFO
  2. 5 0
      desktop/core/ext-py/enum-compat-0.0.2/setup.cfg
  3. 46 0
      desktop/core/ext-py/enum-compat-0.0.2/setup.py
  4. 152 0
      desktop/core/ext-py/eventlet-0.21.0/AUTHORS
  5. 23 0
      desktop/core/ext-py/eventlet-0.21.0/LICENSE
  6. 4 0
      desktop/core/ext-py/eventlet-0.21.0/MANIFEST.in
  7. 636 0
      desktop/core/ext-py/eventlet-0.21.0/NEWS
  8. 99 0
      desktop/core/ext-py/eventlet-0.21.0/PKG-INFO
  9. 73 0
      desktop/core/ext-py/eventlet-0.21.0/README.rst
  10. 26 0
      desktop/core/ext-py/eventlet-0.21.0/benchmarks/__init__.py
  11. 117 0
      desktop/core/ext-py/eventlet-0.21.0/benchmarks/localhost_socket.py
  12. 86 0
      desktop/core/ext-py/eventlet-0.21.0/benchmarks/spawn.py
  13. 100 0
      desktop/core/ext-py/eventlet-0.21.0/doc/Makefile
  14. 4 0
      desktop/core/ext-py/eventlet-0.21.0/doc/authors.rst
  15. 83 0
      desktop/core/ext-py/eventlet-0.21.0/doc/basic_usage.rst
  16. 4 0
      desktop/core/ext-py/eventlet-0.21.0/doc/common.txt
  17. 203 0
      desktop/core/ext-py/eventlet-0.21.0/doc/conf.py
  18. 112 0
      desktop/core/ext-py/eventlet-0.21.0/doc/design_patterns.rst
  19. 21 0
      desktop/core/ext-py/eventlet-0.21.0/doc/environment.rst
  20. 106 0
      desktop/core/ext-py/eventlet-0.21.0/doc/examples.rst
  21. 10 0
      desktop/core/ext-py/eventlet-0.21.0/doc/history.rst
  22. 54 0
      desktop/core/ext-py/eventlet-0.21.0/doc/hubs.rst
  23. BIN
      desktop/core/ext-py/eventlet-0.21.0/doc/images/threading_illustration.png
  24. 55 0
      desktop/core/ext-py/eventlet-0.21.0/doc/index.rst
  25. 21 0
      desktop/core/ext-py/eventlet-0.21.0/doc/modules.rst
  26. 27 0
      desktop/core/ext-py/eventlet-0.21.0/doc/modules/backdoor.rst
  27. 6 0
      desktop/core/ext-py/eventlet-0.21.0/doc/modules/corolocal.rst
  28. 493 0
      desktop/core/ext-py/eventlet-0.21.0/doc/modules/dagpool.rst
  29. 61 0
      desktop/core/ext-py/eventlet-0.21.0/doc/modules/db_pool.rst
  30. 5 0
      desktop/core/ext-py/eventlet-0.21.0/doc/modules/debug.rst
  31. 5 0
      desktop/core/ext-py/eventlet-0.21.0/doc/modules/event.rst
  32. 6 0
      desktop/core/ext-py/eventlet-0.21.0/doc/modules/greenpool.rst
  33. 5 0
      desktop/core/ext-py/eventlet-0.21.0/doc/modules/greenthread.rst
  34. 5 0
      desktop/core/ext-py/eventlet-0.21.0/doc/modules/pools.rst
  35. 5 0
      desktop/core/ext-py/eventlet-0.21.0/doc/modules/queue.rst
  36. 11 0
      desktop/core/ext-py/eventlet-0.21.0/doc/modules/semaphore.rst
  37. 92 0
      desktop/core/ext-py/eventlet-0.21.0/doc/modules/timeout.rst
  38. 36 0
      desktop/core/ext-py/eventlet-0.21.0/doc/modules/websocket.rst
  39. 130 0
      desktop/core/ext-py/eventlet-0.21.0/doc/modules/wsgi.rst
  40. 30 0
      desktop/core/ext-py/eventlet-0.21.0/doc/modules/zmq.rst
  41. 70 0
      desktop/core/ext-py/eventlet-0.21.0/doc/patching.rst
  42. 92 0
      desktop/core/ext-py/eventlet-0.21.0/doc/ssl.rst
  43. 94 0
      desktop/core/ext-py/eventlet-0.21.0/doc/testing.rst
  44. 30 0
      desktop/core/ext-py/eventlet-0.21.0/doc/threading.rst
  45. 29 0
      desktop/core/ext-py/eventlet-0.21.0/doc/zeromq.rst
  46. 60 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/__init__.py
  47. 136 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/backdoor.py
  48. 157 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/convenience.py
  49. 53 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/corolocal.py
  50. 61 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/coros.py
  51. 602 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/dagpool.py
  52. 461 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/db_pool.py
  53. 174 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/debug.py
  54. 213 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/event.py
  55. 16 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/green/BaseHTTPServer.py
  56. 19 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/green/CGIHTTPServer.py
  57. 37 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/green/MySQLdb.py
  58. 124 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/green/OpenSSL/SSL.py
  59. 5 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/green/OpenSSL/__init__.py
  60. 1 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/green/OpenSSL/crypto.py
  61. 1 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/green/OpenSSL/rand.py
  62. 1 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/green/OpenSSL/tsafe.py
  63. 1 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/green/OpenSSL/version.py
  64. 32 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/green/Queue.py
  65. 14 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/green/SimpleHTTPServer.py
  66. 15 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/green/SocketServer.py
  67. 1 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/green/__init__.py
  68. 33 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/green/_socket_nodns.py
  69. 11 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/green/asynchat.py
  70. 13 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/green/asyncore.py
  71. 47 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/green/builtin.py
  72. 13 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/green/ftplib.py
  73. 191 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/green/http/__init__.py
  74. 1557 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/green/http/client.py
  75. 2152 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/green/http/cookiejar.py
  76. 691 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/green/http/cookies.py
  77. 1266 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/green/http/server.py
  78. 22 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/green/httplib.py
  79. 111 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/green/os.py
  80. 257 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/green/profile.py
  81. 86 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/green/select.py
  82. 34 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/green/selectors.py
  83. 63 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/green/socket.py
  84. 439 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/green/ssl.py
  85. 135 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/green/subprocess.py
  86. 113 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/green/thread.py
  87. 120 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/green/threading.py
  88. 6 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/green/time.py
  89. 40 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/green/urllib/__init__.py
  90. 4 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/green/urllib/error.py
  91. 3 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/green/urllib/parse.py
  92. 50 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/green/urllib/request.py
  93. 3 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/green/urllib/response.py
  94. 20 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/green/urllib2.py
  95. 468 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/green/zmq.py
  96. 8 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/greenio/__init__.py
  97. 494 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/greenio/base.py
  98. 226 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/greenio/py2.py
  99. 213 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/greenio/py3.py
  100. 251 0
      desktop/core/ext-py/eventlet-0.21.0/eventlet/greenpool.py

+ 28 - 0
desktop/core/ext-py/enum-compat-0.0.2/PKG-INFO

@@ -0,0 +1,28 @@
+Metadata-Version: 1.1
+Name: enum-compat
+Version: 0.0.2
+Summary: enum/enum34 compatibility package
+Home-page: https://github.com/jstasiak/enum-compat
+Author: Jakub Stasiak
+Author-email: jakub@stasiak.at
+License: MIT
+Description: 
+        enum-compat
+        ===========
+        
+        This is a virtual package, its whole purpose is to install enum34 on
+        Python older than 3.4. On Python 3.4+ it's a no-op.
+        
+        
+Keywords: enum,compatibility,enum34
+Platform: UNKNOWN
+Classifier: Intended Audience :: Developers
+Classifier: Topic :: Software Development :: Libraries
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5

+ 5 - 0
desktop/core/ext-py/enum-compat-0.0.2/setup.cfg

@@ -0,0 +1,5 @@
+[egg_info]
+tag_build = 
+tag_date = 0
+tag_svn_revision = 0
+

+ 46 - 0
desktop/core/ext-py/enum-compat-0.0.2/setup.py

@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+from __future__ import absolute_import, division, print_function
+
+import sys
+
+from setuptools import setup
+
+has_enum = sys.version_info >= (3, 4)
+
+readme = """
+enum-compat
+===========
+
+This is a virtual package, its whole purpose is to install enum34 on
+Python older than 3.4. On Python 3.4+ it's a no-op.
+
+"""
+
+if __name__ == '__main__':
+    setup(
+        name='enum-compat',
+        version='0.0.2',
+        description='enum/enum34 compatibility package',
+        long_description=readme,
+        author='Jakub Stasiak',
+        author_email='jakub@stasiak.at',
+        url='https://github.com/jstasiak/enum-compat',
+        license='MIT',
+        zip_safe=False,
+        classifiers=[
+            'Intended Audience :: Developers',
+            'Topic :: Software Development :: Libraries',
+            'Programming Language :: Python',
+            'Programming Language :: Python :: 2',
+            'Programming Language :: Python :: 2.6',
+            'Programming Language :: Python :: 2.7',
+            'Programming Language :: Python :: 3',
+            'Programming Language :: Python :: 3.3',
+            'Programming Language :: Python :: 3.4',
+            'Programming Language :: Python :: 3.5',
+        ],
+        keywords=[
+            'enum', 'compatibility', 'enum34',
+        ],
+        install_requires=[] if has_enum else ['enum34'],
+    )

+ 152 - 0
desktop/core/ext-py/eventlet-0.21.0/AUTHORS

@@ -0,0 +1,152 @@
+Maintainer (i.e., Who To Hassle If You Find Bugs)
+-------------------------------------------------
+Sergey Shepelev, temoto on Freenode, temotor@gmail.com
+
+Original Authors
+----------------
+* Bob Ippolito
+* Donovan Preston
+
+Contributors
+------------
+* AG Projects
+* Chris AtLee
+* R\. Tyler Ballance
+* Denis Bilenko
+* Mike Barton
+* Patrick Carlisle
+* Ben Ford
+* Andrew Godwin
+* Brantley Harris
+* Gregory Holt
+* Joe Malicki
+* Chet Murthy
+* Eugene Oden
+* radix
+* Scott Robinson
+* Tavis Rudd
+* Sergey Shepelev
+* Chuck Thier
+* Nick V
+* Daniele Varrazzo
+* Ryan Williams
+* Geoff Salmon
+* Edward George
+* Floris Bruynooghe
+* Paul Oppenheim
+* Jakub Stasiak
+* Aldona Majorek
+* Victor Sergeyev
+* David Szotten
+* Victor Stinner
+* Samuel Merritt
+* Eric Urban
+
+Linden Lab Contributors
+-----------------------
+* John Beisley
+* Tess Chu
+* Nat Goodspeed
+* Dave Kaprielian
+* Kartic Krishnamurthy
+* Bryan O'Sullivan
+* Kent Quirk
+* Ryan Williams
+
+Thanks To
+---------
+* AdamKG, giving the hint that invalid argument errors were introduced post-0.9.0
+* Luke Tucker, bug report regarding wsgi + webob
+* Taso Du Val, reproing an exception squelching bug, saving children's lives  ;-)
+* Luci Stanescu, for reporting twisted hub bug
+* Marcus Cavanaugh, for test case code that has been incredibly useful in tracking down bugs
+* Brian Brunswick, for many helpful questions and suggestions on the mailing list
+* Cesar Alaniz, for uncovering bugs of great import
+* the grugq, for contributing patches, suggestions, and use cases
+* Ralf Schmitt, for wsgi/webob incompatibility bug report and suggested fix
+* Benoit Chesneau, bug report on green.os and patch to fix it
+* Slant, better iterator implementation in tpool
+* Ambroff, nice pygtk hub example
+* Michael Carter, websocket patch to improve location handling
+* Marcin Bachry, nice repro of a bug and good diagnosis leading to the fix
+* David Ziegler, reporting issue #53
+* Favo Yang, twisted hub patch
+* Schmir, patch that fixes readline method with chunked encoding in wsgi.py, advice on patcher
+* Slide, for open-sourcing gogreen
+* Holger Krekel, websocket example small fix
+* mikepk, debugging MySQLdb/tpool issues
+* Malcolm Cleaton, patch for Event exception handling
+* Alexey Borzenkov, for finding and fixing issues with Windows error detection (#66, #69), reducing dependencies in zeromq hub (#71)
+* Anonymous, finding and fixing error in websocket chat example (#70)
+* Edward George, finding and fixing an issue in the [e]poll hubs (#74), and in convenience (#86)
+* Ruijun Luo, figuring out incorrect openssl import for wrap_ssl (#73)
+* rfk, patch to get green zmq to respect noblock flag.
+* Soren Hansen, finding and fixing issue in subprocess (#77)
+* Stefano Rivera, making tests pass in absence of postgres (#78)
+* Joshua Kwan, fixing busy-wait in eventlet.green.ssl.
+* Nick Vatamaniuc, Windows SO_REUSEADDR patch (#83)
+* Clay Gerrard, wsgi handle socket closed by client (#95)
+* Eric Windisch, zmq getsockopt(EVENTS) wake correct threads (pull request 22)
+* Raymond Lu, fixing busy-wait in eventlet.green.ssl.socket.sendall()
+* Thomas Grainger, webcrawler example small fix, "requests" library import bug report, Travis integration
+* Peter Portante, save syscalls in socket.dup(), environ[REMOTE_PORT] in wsgi
+* Peter Skirko, fixing socket.settimeout(0) bug
+* Derk Tegeler, Pre-cache proxied GreenSocket methods (Bitbucket #136)
+* David Malcolm, optional "timeout" argument to the subprocess module (Bitbucket #89)
+* David Goetz, wsgi: Allow minimum_chunk_size to be overriden on a per request basis
+* Dmitry Orlov, websocket: accept Upgrade: websocket (lowercase)
+* Zhang Hua, profile: accumulate results between runs (Bitbucket #162)
+* Astrum Kuo, python3 compatibility fixes; greenthread.unlink() method
+* Davanum Srinivas, Python3 compatibility fixes
+* Dmitriy Kruglyak, PyPy 2.3 compatibility fix
+* Jan Grant, Michael Kerrin, second simultaneous read (GH-94)
+* Simon Jagoe, Python3 octal literal fix
+* Tushar Gohad, wsgi: Support optional headers w/ "100 Continue" responses
+* raylu, fixing operator precedence bug in eventlet.wsgi
+* Christoph Gysin, PEP 8 conformance
+* Andrey Gubarev
+* Corey Wright
+* Deva
+* Johannes Erdfelt
+* Kevin
+* QthCN
+* Steven Hardy
+* Stuart McLaren
+* Tomaz Muraus
+* ChangBo Guo(gcb), fixing typos in the documentation (GH-194)
+* Marc Abramowitz, fixing the README so it renders correctly on PyPI (GH-183)
+* Shaun Stanworth, equal chance to acquire semaphore from different greenthreads (GH-136)
+* Lior Neudorfer, Make sure SSL retries are done using the exact same data buffer
+* Sean Dague, wsgi: Provide python logging compatibility
+* Tim Simmons, Use _socket_nodns and select in dnspython support
+* Antonio Cuni, fix fd double close on PyPy
+* Seyeong Kim
+* Ihar Hrachyshka
+* Janusz Harkot
+* Fukuchi Daisuke
+* Ramakrishnan G
+* ashutosh-mishra
+* Azhar Hussain
+* Josh VanderLinden
+* Levente Polyak
+* Phus Lu
+* Collin Stocks, fixing eventlet.green.urllib2.urlopen() so it accepts cafile, capath, or cadefault arguments
+* Alexis Lee
+* Steven Erenst
+* Piët Delport
+* Alex Villacís Lasso
+* Yashwardhan Singh
+* Tim Burke
+* Ondřej Nový
+* Jarrod Johnson
+* Whitney Young
+* Matthew D. Pagel
+* Matt Yule-Bennett
+* Artur Stawiarski
+* Tal Wrii
+* Roman Podoliaka
+* Gevorg Davoian
+* Ondřej Kobližek
+* Yuichi Bando
+* Feng
+* Aayush Kasurde

+ 23 - 0
desktop/core/ext-py/eventlet-0.21.0/LICENSE

@@ -0,0 +1,23 @@
+Unless otherwise noted, the files in Eventlet are under the following MIT license:
+
+Copyright (c) 2005-2006, Bob Ippolito
+Copyright (c) 2007-2010, Linden Research, Inc.
+Copyright (c) 2008-2010, Eventlet Contributors (see AUTHORS)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.

+ 4 - 0
desktop/core/ext-py/eventlet-0.21.0/MANIFEST.in

@@ -0,0 +1,4 @@
+recursive-include tests *.py *.crt *.key
+recursive-include doc *.rst *.txt *.py Makefile *.png
+recursive-include examples *.py *.html
+include MANIFEST.in NEWS AUTHORS LICENSE README.rst

+ 636 - 0
desktop/core/ext-py/eventlet-0.21.0/NEWS

@@ -0,0 +1,636 @@
+0.21.0
+======
+* New timeout error API: .is_timeout=True on exception object
+  It's now easy to test if network error is transient and retry is appropriate.
+  Please spread the word and invite other libraries to support this interface.
+* hubs: use monotonic clock by default (bundled package); Thanks to Roman Podoliaka and Victor Stinner
+* dns: EVENTLET_NO_GREENDNS option is back, green is still default
+* dns: hosts file was consulted after nameservers
+* ssl: RecursionError on Python3.6+; Thanks to justdoit0823@github and Gevent developers
+* wsgi: log_output=False was not disabling startup and accepted messages
+* greenio: Fixed OSError: [WinError 10038] Socket operation on nonsocket
+* dns: EAI_NODATA was removed from RFC3493 and FreeBSD
+* green.select: fix mark_as_closed() wrong number of args
+* green.zmq: socket.{recv,send}_* signatures did not match recent upstream pyzmq
+* New feature: Add zipkin tracing to eventlet
+* db_pool: proxy Connection.set_isolation_level()
+* green.zmq: support RCVTIMEO (receive timeout)
+* green.profile: Python3 compatibility; Thanks to Artur Stawiarski
+* support: upgrade bundled six to 1.10 (dbfbfc818e3d)
+* python3.6: http.client.request support chunked_encoding
+
+0.20.1
+======
+* dns: try unqualified queries as top level
+* test_import_patched_defaults bended to play with pyopenssl>=16.1.0
+* Explicit environ flag for importing eventlet.__version__ without ignoring import errors
+* Type check Semaphore, GreenPool arguments; Thanks to Matthew D. Pagel
+
+0.20.0
+======
+* IMPORTANT: removed select.poll() function
+* DNS resolving is always green with dnspython bundled in
+* greenio: only trampoline when we block
+* convenience: listen() sets SO_REUSEPORT when available; Thanks to Zhengwei Gao
+* ssl: Fix "TypeError: read() argument 2 must be read-write bytes-like object, not None"
+* greenio: _recv_loop behaviour with recv_into on closed sock
+* ipv6: getaddrinfo would fail with scope index
+* green.zmq: Support {send,recv}_{string,json,pyobj} wrappers
+* greendns: Return answers from /etc/hosts despite nameserver errors
+* patcher: fixed green existing locks fail (Python3)
+* Add DAGPool, a dependency-driven greenthread pool
+* wsgi: Unix socket address representation; Thanks to Samuel Merritt
+* tpool: isolate internal socket from default timeout; Thanks to Alex Villacís Lasso
+* wsgi: only skip Content-Type and Content-Length headers (GH-327)
+* wsgi: 400 on blank Content-Length headers (GH-334)
+* greenio: makefile related pypy socket ref counting
+* ssl: Fix recv_into blocking when reading chunks of data
+* websocket: support Gunicorn environ['gunicorn.socket']
+
+0.19.0
+======
+* ssl: IMPORTANT DoS FIX do_handshake_connect=False in server accept(); Thanks to Garth Mollett
+* patcher: patch existing threading locks; Thanks to Alexis Lee
+* green.urllib2: missing patched ssl module; Thanks to Collin RM Stocks
+* wsgi: environ[headers_raw] tuple of unmodified name: value pairs
+* test against modern pyopenssl 16.0.0 for Python 2.7+; Thanks to Victor Stinner
+* wsgi: document compatibility with python `logging`
+* Minor grammatical improvements and typo fixes to the docs; Thanks to Steven Erenst
+
+0.18.4
+======
+* wsgi: change TCP_NODELAY to TCP_QUICKACK, ignore socket error when not available
+
+0.18.3
+======
+* wsgi: Use buffered writes - fixes partial socket.send without custom
+  writelines(); Github issue #295
+* wsgi: TCP_NODELAY enabled by default
+
+0.18.2
+======
+* wsgi: Fix data loss on partial writes (socket.send); Thanks to Jakub Stasiak
+
+0.18.1
+======
+* IMPORTANT: do not use Eventlet 0.18.0 and 0.18.1
+* patcher: Fix AttributeError in subprocess communicate()
+* greenio: Fix "TypeError: an integer is required" in sendto()
+
+0.18.0
+======
+* IMPORTANT: do not use Eventlet 0.18.0 and 0.18.1
+* greenio: Fixed a bug that could cause send() to start an endless loop on
+  ENOTCONN; Thanks to Seyeong Kim
+* wsgi: Fixed UNIX socket address being trimmed in "wsgi starting" log; Thanks
+  to Ihar Hrachyshka
+* ssl: Ported eventlet.green.OpenSSL to Python 3; Thanks to Victor Stinner
+* greenio: Made read() support buflen=-1 and added readall() (Python 3);
+  Thanks to David Szotten
+* wsgi: Made the error raised in case of chunk read failures more precise (this
+  should be backwards compatible as the new exception class,
+  wsgi.ChunkReadError, is a subclass of ValueError which was being used there
+  before); Thanks to Samuel Merritt
+* greenio: Fixed socket.recv() sometimes returning str instead of bytes on
+  Python 3; Thanks to Janusz Harkot
+* wsgi: Improved request body discarding
+* websocket: Fixed TypeError on empty websocket message (Python 3); Thanks to
+  Fukuchi Daisuke
+* subprocess: Fixed universal_newlines support
+* wsgi: Output of 0-byte chunks is now suppressed; Thanks to Samuel Merritt
+* Improved the documentation; Thanks to Ramakrishnan G, ashutosh-mishra and
+  Azhar Hussain
+* greenio: Changed GreenFileIO.write() (Python 3) to always write all data to
+  match the behavior on Python 2; Thanks to Victor Stinner
+* subprocess: Fixed missing subprocess.mswindows attribute on Python 3.5;
+  Thanks to Josh VanderLinden
+* ssl/monkey patching: Fixed a bug that would cause merely importing eventlet
+  to monkey patch the ssl module; Thanks to David Szotten
+* documentation: Added support for building plain text documentation; thanks
+  to Levente Polyak
+* greenio: Fixed handling blocking IO errors in various GreenSocket methods;
+  Thanks to Victor Stinner
+* greenio: Fixed GreenPipe ignoring the bufsize parameter on Python 2; Thanks
+  to Phus Lu
+* backdoor: Added Unix and IPv6 socket support; Thanks to Eric Urban
+
+Backwards incompatible:
+
+* monkey patching: The following select methods and selector classes are now
+  removed, instead of being left in their respective modules after patching
+  even though they are not green (this also fixes HTTPServer.serve_forever()
+  blocking whole process on Python 3):
+
+  * select.poll
+  * select.epoll
+  * select.devpoll
+  * select.kqueue
+  * select.kevent
+  * selectors.PollSelector
+  * selectors.EpollSelector
+  * selectors.DevpollSelector
+  * selectors.KqueueSelector
+
+  Additionally selectors.DefaultSelector points to a green SelectSelector
+
+* greenio: Fixed send() to no longer behave like sendall() which makes it
+  consistent with Python standard library and removes a source of very subtle
+  errors
+
+0.17.4
+======
+* ssl: incorrect initalization of default context; Thanks to stuart-mclaren
+
+0.17.3
+======
+* green.thread: Python3.3+ fixes; Thanks to Victor Stinner
+* Semaphore.acquire() accepts timeout=-1; Thanks to Victor Stinner
+
+0.17.2
+======
+* wsgi: Provide python logging compatibility; Thanks to Sean Dague
+* greendns: fix premature connection closing in DNS proxy; Thanks to Tim Simmons
+* greenio: correct fd close; Thanks to Antonio Cuni and Victor Sergeyev
+* green.ssl: HTTPS client Python 2.7.9+ compatibility
+* setup: tests.{isolated,manual} polluted top-level packages
+
+0.17.1
+======
+* greendns: fix dns.name import and Python3 compatibility
+
+0.17
+====
+* Full Python3 compatibility; Thanks to Jakub Stasiak
+* greendns: IPv6 support, improved handling of /etc/hosts; Thanks to Floris Bruynooghe
+* tpool: make sure we return results during killall; Thanks to David Szotten
+* semaphore: Don't hog a semaphore if someone else is waiting for it; Thanks to Shaun Stanworth
+* green.socket: create_connection() was wrapping all exceptions in socket.error; Thanks to Donagh McCabe
+* Make sure SSL retries are done using the exact same data buffer; Thanks to Lior Neudorfer
+* greenio: shutdown already closed sockets without error; Thanks to David Szotten
+
+0.16.1
+======
+* Wheel build 0.16.0 incorrectly shipped removed module eventlet.util.
+
+0.16.0
+======
+* Fix SSL socket wrapping and Python 2.7.9 compatibility; Thanks to Jakub Stasiak
+* Fix monkey_patch() on Python 3; Thanks to Victor Stinner
+* Fix "maximum recursion depth exceeded in GreenSocket.__del__"; Thanks to Jakub Stasiak
+* db_pool: BaseConnectionPool.clear updates .current_size #139; Thanks to Andrey Gubarev
+* Fix __str__ method on the TimeoutExpired exception class.; Thanks to Tomaz Muraus
+* hubs: drop Twisted support
+* Removed deprecated modules: api, most of coros, pool, proc, processes and util
+* Improved Python 3 compatibility (including patch by raylu); Thanks to Jakub Stasiak
+* Allow more graceful shutdown of wsgi server; Thanks to Stuart McLaren
+* wsgi.input: Make send_hundred_continue_headers() a public API; Thanks to Tushar Gohad
+* tpool: Windows compatibility, fix ResourceWarning. Thanks to Victor Stinner
+* tests: Fix timers not cleaned up on MySQL test skips; Thanks to Corey Wright
+
+0.15.2
+======
+* greenio: fixed memory leak, introduced in 0.15.1; Thanks to Michael Kerrin, Tushar Gohad
+* wsgi: Support optional headers w/ "100 Continue" responses; Thanks to Tushar Gohad
+
+0.15.1
+======
+* greenio: Fix second simultaneous read (parallel paramiko issue); Thanks to Jan Grant, Michael Kerrin
+* db_pool: customizable connection cleanup function; Thanks to Avery Fay
+
+0.15
+====
+* Python3 compatibility -- **not ready yet**; Thanks to Astrum Kuo, Davanum Srinivas, Jakub Stasiak, Victor Sergeyev
+* coros: remove Actor which was deprecated in 2010-01
+* saranwrap: remove saranwrap which was deprecated in 2010-02
+* PyPy compatibility fixes; Thanks to Dmitriy Kruglyak, Jakub Stasiak
+* green.profile: accumulate results between runs; Thanks to Zhang Hua
+* greenthread: add .unlink() method; Thanks to Astrum Kuo
+* packaging: Generate universal wheels; Thanks to Jakub Stasiak
+* queue: Make join not wait if there are no unfinished tasks; Thanks to Jakub Stasiak
+* tpool: proxy __enter__, __exit__ fixes Bitbucket-158; Thanks to Eric Urban
+* websockets: Add websockets13 support; handle lack of Upgrade header; Thanks to Edward George
+* wsgi: capitalize_response_headers option
+
+0.14
+====
+* wsgi: handle connection socket timeouts; Thanks to Paul Oppenheim
+* wsgi: close timed out client connections
+* greenio: socket pypy compatibility; Thanks to Alex Gaynor
+* wsgi: env['wsgi.input'] was returning 1 byte strings; Thanks to Eric Urban
+* green.ssl: fix NameError; Github #17; Thanks to Jakub Stasiak
+* websocket: allow "websocket" in lowercase in Upgrade header; Compatibility with current Google Chrome; Thanks to Dmitry Orlov
+* wsgi: allow minimum_chunk_size to be overriden on a per request basis; Thanks to David Goetz
+* wsgi: configurable socket_timeout
+
+0.13
+====
+* hubs: kqueue support! Thanks to YAMAMOTO Takashi, Edward George
+* greenio: Fix AttributeError on MacOSX; Bitbucket #136; Thanks to Derk Tegeler
+* green: subprocess: Fix subprocess.communicate() block on Python 2.7; Thanks to Edward George
+* green: select: ensure that hub can .wait() at least once before timeout; Thanks to YAMAMOTO Takashi
+* tpool: single request queue to avoid deadlocks; Bitbucket pull request 31,32; Thanks to Edward George
+* zmq: pyzmq 13.x compatibility; Thanks to Edward George
+* green: subprocess: Popen.wait() accepts new `timeout` kwarg; Python 3.3 and RHEL 6.1 compatibility
+* hubs: EVENTLET_HUB can point to external modules; Thanks to Edward George
+* semaphore: support timeout for acquire(); Thanks to Justin Patrin
+* support: do not clear sys.exc_info if can be preserved (greenlet >= 0.3.2); Thanks to Edward George
+* Travis continous integration; Thanks to Thomas Grainger, Jakub Stasiak
+* wsgi: minimum_chunk_size of last Server altered all previous (global variable); Thanks to Jakub Stasiak
+* doc: hubs: Point to the correct function in exception message; Thanks to Floris Bruynooghe
+
+0.12
+====
+* zmq: Fix 100% busy CPU in idle after .bind(PUB) (thanks to Geoff Salmon)
+* greenio: Fix socket.settimeout() did not switch back to blocking mode (thanks to Peter Skirko)
+* greenio: socket.dup() made excess fcntl syscalls (thanks to Peter Portante)
+* setup: Remove legacy --without-greenlet option and unused httplib2 dependency (thanks to Thomas Grainger)
+* wsgi: environ[REMOTE_PORT], also available in log_format, log accept event (thanks to Peter Portante)
+* tests: Support libzmq 3.0 SNDHWM option (thanks to Geoff Salmon)
+
+0.11
+====
+* ssl: Fix 100% busy CPU in socket.sendall() (thanks to Raymon Lu)
+* zmq: Return linger argument to Socket.close() (thanks to Eric Windisch)
+* tests: SSL tests were always skipped due to bug in skip_if_no_ssl decorator
+
+0.10
+====
+* greenio: Fix relative seek() (thanks to AlanP)
+* db_pool: Fix pool.put() TypeError with min_size > 1 (thanks to Jessica Qi)
+* greenthread: Prevent infinite recursion with linking to current greenthread (thanks to Edward George)
+* zmq: getsockopt(EVENTS) wakes correct threads (thanks to Eric Windisch)
+* wsgi: Handle client disconnect while sending response (thanks to Clay Gerrard)
+* hubs: Ensure that new hub greenlet is parent of old one (thanks to Edward George)
+* os: Fix waitpid() returning (0, 0) (thanks to Vishvananda Ishaya)
+* tpool: Add set_num_threads() method to set the number of tpool threads (thanks to David Ibarra)
+* threading, zmq: Fix Python 2.5 support (thanks to Floris Bruynooghe)
+* tests: tox configuration for all supported Python versions (thanks to Floris Bruynooghe)
+* tests: Fix zmq._QueueLock test in Python2.6
+* tests: Fix patcher_test on Darwin (/bin/true issue) (thanks to Edward George)
+* tests: Skip SSL tests when not available (thanks to Floris Bruynooghe)
+* greenio: Remove deprecated GreenPipe.xreadlines() method, was broken anyway
+
+0.9.17
+======
+* ZeroMQ support calling send and recv from multiple greenthreads (thanks to Geoff Salmon)
+* SSL: unwrap() sends data, and so it needs trampolining (#104 thanks to Brandon Rhodes)
+* hubs.epolls: Fix imports for exception handler (#123 thanks to Johannes Erdfelt)
+* db_pool: Fix .clear() when min_size > 0
+* db_pool: Add MySQL's insert_id() method (thanks to Peter Scott)
+* db_pool: Close connections after timeout, fix get-after-close race condition with using TpooledConnectionPool (thanks to Peter Scott)
+* threading monkey patch fixes (#115 thanks to Johannes Erdfelt)
+* pools: Better accounting of current_size in pools.Pool (#91 thanks to Brett Hoerner)
+* wsgi: environ['RAW_PATH_INFO'] with request path as received from client (thanks to dweimer)
+* wsgi: log_output flag (thanks to Juan Manuel Garcia)
+* wsgi: Limit HTTP header size (thanks to Gregory Holt)
+* wsgi: Configurable maximum URL length (thanks to Tomas Sedovic)
+
+0.9.16
+======
+* SO_REUSEADDR now correctly set.
+
+0.9.15
+======
+* ZeroMQ support without an explicit hub now implemented!  Thanks to Zed Shaw for the patch.
+* zmq module supports the NOBLOCK flag, thanks to rfk. (#76)
+* eventlet.wsgi has a debug flag which can be set to false to not send tracebacks to the client (per redbo's request)
+* Recursive GreenPipe madness forestalled by Soren Hansen (#77)
+* eventlet.green.ssl no longer busywaits on send()
+* EEXIST ignored in epoll hub (#80)
+* eventlet.listen's behavior on Windows improved, thanks to Nick Vatamaniuc (#83)
+* Timeouts raised within tpool.execute are propagated back to the caller (thanks again to redbo for being the squeaky wheel)
+
+0.9.14
+======
+* Many fixes to the ZeroMQ hub, which now requires version 2.0.10 or later.  Thanks to Ben Ford.
+* ZeroMQ hub no longer depends on pollhub, and thus works on Windows (thanks, Alexey Borzenkov)
+* Better handling of connect errors on Windows, thanks again to Alexey Borzenkov.
+* More-robust Event delivery, thanks to Malcolm Cleaton
+* wsgi.py now distinguishes between an empty query string ("") and a non-existent query string (no entry in environ).
+* wsgi.py handles ipv6 correctly (thanks, redbo)
+* Better behavior in tpool when you give it nonsensical numbers, thanks to R. Tyler for the nonsense.  :)
+* Fixed importing on 2.5 (#73, thanks to Ruijun Luo)
+* Hub doesn't hold on to invalid fds (#74, thanks to Edward George)
+* Documentation for eventlet.green.zmq, courtesy of Ben Ford
+
+0.9.13
+======
+* ZeroMQ hub, and eventlet.green.zmq make supersockets green.  Thanks to Ben Ford!
+* eventlet.green.MySQLdb added.  It's an interface to MySQLdb that uses tpool to make it appear nonblocking
+* Greenthread affinity in tpool.  Each greenthread is assigned to the same thread when using tpool, making it easier to work with non-thread-safe libraries.
+* Eventlet now depends on greenlet 0.3 or later.
+* Fixed a hang when using tpool during an import causes another import.  Thanks to mikepk for tracking that down.
+* Improved websocket draft 76 compliance, thanks to Nick V.
+* Rare greenthread.kill() bug fixed, which was probably brought about by a bugfix in greenlet 0.3.
+* Easy_installing eventlet should no longer print an ImportError about greenlet
+* Support for serving up SSL websockets, thanks to chwagssd for reporting #62
+* eventlet.wsgi properly sets 'wsgi.url_scheme' environment variable to 'https', and 'HTTPS' to 'on' if serving over ssl
+* Blocking detector uses setitimer on 2.6 or later, allowing for sub-second block detection, thanks to rtyler.
+* Blocking detector is documented now, too
+* socket.create_connection properly uses dnspython for nonblocking dns.  Thanks to rtyler.
+* Removed EVENTLET_TPOOL_DNS, nobody liked that.  But if you were using it, install dnspython instead.  Thanks to pigmej and gholt.
+* Removed _main_wrapper from greenthread, thanks to Ambroff adding keyword arguments to switch() in 0.3!
+
+0.9.12
+======
+* Eventlet no longer uses the Twisted hub if Twisted is imported -- you must call eventlet.hubs.use_hub('twistedr') if you want to use it.  This prevents strange race conditions for those who want to use both Twisted and Eventlet separately.
+* Removed circular import in twistedr.py
+* Added websocket multi-user chat example
+* Not using exec() in green modules anymore.
+* eventlet.green.socket now contains all attributes of the stdlib socket module, even those that were left out by bugs.
+* Eventlet.wsgi doesn't call print anymore, instead uses the logfiles for everything (it used to print exceptions in one place).
+* Eventlet.wsgi properly closes the connection when an error is raised
+* Better documentation on eventlet.event.Event.send_exception
+* Adding websocket.html to tarball so that you can run the examples without checking out the source
+
+0.9.10
+======
+* Greendns: if dnspython is installed, Eventlet will automatically use it to provide non-blocking DNS queries.  Set the environment variable 'EVENTLET_NO_GREENDNS' if you don't want greendns but have dnspython installed.
+* Full test suite passes on Python 2.7.
+* Tests no longer depend on simplejson for >2.6.
+* Potential-bug fixes in patcher (thanks to Schmir, and thanks to Hudson)
+* Websockets work with query strings (thanks to mcarter)
+* WSGI posthooks that get called after the request completed (thanks to gholt, nice docs, too)
+* Blocking detector merged -- use it to detect places where your code is not yielding to the hub for > 1 second.
+* tpool.Proxy can wrap callables
+* Tweaked Timeout class to do something sensible when True is passed to the constructor
+
+0.9.9
+=====
+* A fix for monkeypatching on systems with psycopg version 2.0.14.
+* Improved support for chunked transfers in wsgi, plus a bunch of tests from schmir (ported from gevent by redbo)
+* A fix for the twisted hub from Favo Yang
+
+0.9.8
+=====
+* Support for psycopg2's asynchronous mode, from Daniele Varrazzo
+* websocket module is now part of core Eventlet with 100% unit test coverage thanks to Ben Ford.  See its documentation at http://eventlet.net/doc/modules/websocket.html
+* Added wrap_ssl convenience method, meaning that we truly no longer need api or util modules.
+* Multiple-reader detection code protects against the common mistake of having multiple greenthreads read from the same socket at the same time, which can be overridden if you know what you're doing.
+* Cleaner monkey_patch API: the "all" keyword is no longer necessary.
+* Pool objects have a more convenient constructor -- no more need to subclass
+* amajorek's reimplementation of GreenPipe
+* Many bug fixes, major and minor.
+
+0.9.7
+=====
+* GreenPipe is now a context manager (thanks, quad)
+* tpool.Proxy supports iterators properly
+* bug fixes in eventlet.green.os (thanks, Benoit)
+* much code cleanup from Tavis
+* a few more example apps
+* multitudinous improvements in Py3k compatibility from amajorek
+
+
+0.9.6
+=====
+* new EVENTLET_HUB environment variable allows you to select a hub without code
+* improved GreenSocket and GreenPipe compatibility with stdlib
+* bugfixes on GreenSocket and GreenPipe objects
+* code coverage increased across the board
+* Queue resizing
+* internal DeprecationWarnings largely eliminated
+* tpool is now reentrant (i.e., can call tpool.execute(tpool.execute(foo)))
+* more reliable access to unpatched modules reduces some race conditions when monkeypatching
+* completely threading-compatible corolocal implementation, plus tests and enthusiastic adoption
+* tests stomp on each others' toes less
+* performance improvements in timers, hubs, greenpool
+* Greenlet-aware profile module courtesy of CCP
+* support for select26 module's epoll
+* better PEP-8 compliance and import cleanup
+* new eventlet.serve convenience function for easy TCP servers
+
+
+0.9.5
+=====
+* support psycopg in db_pool
+* smart patcher that does the right patching when importing without needing to understand plumbing of patched module
+* patcher.monkey_patch() method replacing util.wrap_*
+* monkeypatch threading support
+* removed api.named
+* imported timeout module from gevent, replace exc_after and with_timeout()
+* replace call_after with spawn_after; this is so that users don't see the Timer class
+* added cancel() method to GreenThread to support the semantic of "abort if not already in the middle of something"
+* eventlet.green.os with patched read() and write(), etc
+* moved stuff from wrap_pipes_with_coroutine_pipe into green.os
+* eventlet.green.subprocess instead of eventlet.processes
+* improve patching docs, explaining more about patcher and why you'd use eventlet.green
+* better documentation on greenpiles
+* deprecate api.py completely
+* deprecate util.py completely
+* deprecate saranwrap
+* performance improvements in the hubs
+* much better documentation overall
+* new convenience functions: eventlet.connect and eventlet.listen.  Thanks, Sergey!
+
+
+0.9.4
+=====
+* Deprecated coros.Queue and coros.Channel (use queue.Queue instead)
+* Added putting and getting methods to queue.Queue.
+* Added eventlet.green.Queue which is a greened clone of stdlib Queue, along with stdlib tests.
+* Changed __init__.py so that the version number is readable even if greenlet's not installed.
+* Bugfixes in wsgi, greenpool
+
+0.9.3
+=====
+
+* Moved primary api module to __init__ from api.  It shouldn't be necessary to import eventlet.api anymore; import eventlet should do the same job.
+* Proc module deprecated in favor of greenthread
+* New module greenthread, with new class GreenThread.
+* New GreenPool class that replaces pool.Pool.
+* Deprecated proc module (use greenthread module instead)
+* tpooled gethostbyname is configurable via environment variable EVENTLET_TPOOL_GETHOSTBYNAME
+* Removed greenio.Green_fileobject and refactored the code therein to be more efficient.  Only call makefile() on sockets now; makeGreenFile() is deprecated.  The main loss here is that of the readuntil method.  Also, Green_fileobjects used to be auto-flushing; flush() must be called explicitly now.
+* Added epoll support
+* Improved documentation across the board.
+* New queue module, API-compatible with stdlib Queue
+* New debug module, used for enabling verbosity within Eventlet that can help debug applications or Eventlet itself.
+* Bugfixes in tpool, green.select, patcher
+* Deprecated coros.execute (use eventlet.spawn instead)
+* Deprecated coros.semaphore (use semaphore.Semaphore or semaphore.BoundedSemaphore instead)
+* Moved coros.BoundedSemaphore to semaphore.BoundedSemaphore
+* Moved coros.Semaphore to semaphore.Semaphore
+* Moved coros.event to event.Event
+* Deprecated api.tcp_listener, api.connect_tcp, api.ssl_listener
+* Moved get_hub, use_hub, get_default_hub from eventlet.api to eventlet.hubs
+* Renamed libevent hub to pyevent.
+* Removed previously-deprecated features tcp_server, GreenSSL, erpc, and trap_errors.
+* Removed saranwrap as an option for making db connections nonblocking in db_pool.
+
+0.9.2
+=====
+
+* Bugfix for wsgi.py where it was improperly expecting the environ variable to be a constant when passed to the application.
+* Tpool.py now passes its tests on Windows.
+* Fixed minor performance issue in wsgi.
+
+0.9.1
+=====
+
+* PyOpenSSL is no longer required for Python 2.6: use the eventlet.green.ssl module. 2.5 and 2.4 still require PyOpenSSL.
+* Cleaned up the eventlet.green packages and their associated tests, this should result in fewer version-dependent bugs with these modules.
+* PyOpenSSL is now fully wrapped in eventlet.green.OpenSSL; using it is therefore more consistent with using other green modules.
+* Documentation on using SSL added.
+* New green modules: ayncore, asynchat, SimpleHTTPServer, CGIHTTPServer, ftplib.
+* Fuller thread/threading compatibility: patching threadlocal with corolocal so coroutines behave even more like threads.
+* Improved Windows compatibility for tpool.py
+* With-statement compatibility for pools.Pool objects.
+* Refactored copyrights in the files, added LICENSE and AUTHORS files.
+* Added support for logging x-forwarded-for header in wsgi.
+* api.tcp_server is now deprecated, will be removed in a future release.
+* Added instructions on how to generate coverage reports to the documentation.
+* Renamed GreenFile to Green_fileobject, to better reflect its purpose.
+* Deprecated erpc method in tpool.py
+* Bug fixes in: wsgi.py, twistedr.py, poll.py, greenio.py, util.py, select.py, processes.py, selects.py
+
+0.9.0
+=====
+
+* Full-duplex sockets (simultaneous readers and writers in the same process).
+* Remove modules that distract from the core mission of making it straightforward to write event-driven networking apps:
+    httpd, httpc, channel, greenlib, httpdate, jsonhttp, logutil
+* Removed test dependency on sqlite, using nose instead.
+* Marked known-broken tests using nose's mechanism (most of these are not broken but are simply run in the incorrect context, such as threading-related tests that are incompatible with the libevent hub).
+* Remove copied code from python standard libs (in tests).
+* Added eventlet.patcher which can be used to import "greened" modules.
+
+0.8.16
+======
+* GreenSSLObject properly masks ZeroReturnErrors with an empty read; with unit test.
+* Fixed 2.6 SSL compatibility issue.
+
+0.8.15
+======
+
+* GreenSSL object no longer converts ZeroReturnErrors into empty reads, because that is more compatible with the underlying SSLConnection object.
+* Fixed issue caused by SIGCHLD handler in processes.py
+* Stopped supporting string exceptions in saranwrap and fixed a few test failures.
+
+0.8.14
+======
+* Fixed some more Windows compatibility problems, resolving EVT-37 :
+http://jira.secondlife.com/browse/EVT-37
+* waiting() method on Pool class, which was lost when the Pool implementation
+replaced CoroutinePool.
+
+0.8.13
+======
+* 2.6 SSL compatibility patch by Marcus Cavanaugh.
+* Added greenlet and pyopenssl as dependencies in setup.py.
+
+0.8.12
+======
+
+* The ability to resize() pools of coroutines, which was lost when the
+Pool implementation replaced CoroutinePool.
+* Fixed Cesar's issue with SSL connections, and furthermore did a
+complete overhaul of SSL handling in eventlet so that it's much closer
+to the behavior of the built-in libraries.  In particular, users of
+GreenSSL sockets must now call shutdown() before close(), exactly
+like SSL.Connection objects.
+* A small patch that makes Eventlet work on Windows.  This is the first
+release of Eventlet that works on Windows.
+
+0.8.11
+======
+
+Eventlet can now run on top of twisted reactor. Twisted-based hub is enabled automatically if
+twisted.internet.reactor is imported. It is also possible to "embed" eventlet into a twisted
+application via eventlet.twistedutil.join_reactor. See the examples for details.
+
+A new package, eventlet.twistedutil, is added that makes integration of twisted and eventlet
+easier. It has block_on function that allows to wait for a Deferred to fire and it wraps
+twisted's Protocol in a synchronous interface. This is similar to and is inspired by Christopher
+Armstrong's corotwine library. Thanks to Dan Pascu for reviewing the package.
+
+Another new package, eventlet.green, was added to provide some of the standard modules
+that are fixed not to block other greenlets. This is an alternative to monkey-patching
+the socket, which is impossible to do if you are running twisted reactor.
+The package includes socket, httplib, urllib2.
+
+Much of the core functionality has been refactored and cleaned up, including the removal
+of eventlet.greenlib. This means that it is now possible to use plain greenlets without
+modification in eventlet, and the subclasses of greenlet instead of the old
+eventlet.greenlib.GreenletContext. Calling eventlet.api.get_hub().switch() now checks to
+see whether the current greenlet has a "switch_out" method and calls it if so, providing the
+same functionality that the GreenletContext.swap_out used to. The swap_in behavior can be
+duplicated by overriding the switch method, and the finalize functionality can be duplicated
+by having a try: finally: block around the greenlet's main implementation. The eventlet.backdoor
+module has been ported to this new scheme, although it's signature had to change slightly so
+existing code that used the backdoor will have to be modified.
+
+A number of bugs related to improper scheduling of switch calls has been fixed.
+The fixed functions and classes include api.trampoline, api.sleep, coros.event,
+coros.semaphore, coros.queue.
+
+Many methods of greenio.GreenSocket were fixed to make its behavior more like that of a regular
+socket. Thanks to Marcin Bachry for fixing GreenSocket.dup to preserve the timeout.
+
+Added proc module which provides an easy way to subscribe to coroutine's results. This makes
+it easy to wait for a single greenlet or for a set of greenlets to complete.
+
+wsgi.py now supports chunked transfer requests (patch by Mike Barton)
+
+The following modules were deprecated or removed because they were broken:
+hubs.nginx, hubs.libev, support.pycurls, support.twisteds, cancel method of coros.event class
+
+The following classes are still present but will be removed in the future version:
+- channel.channel (use coros.Channel)
+- coros.CoroutinePool (use pool.Pool)
+
+saranwrap.py now correctly closes the child process when the referring object is deleted,
+received some fixes to its detection of child process death, now correctly deals with the in
+keyword, and it is now possible to use coroutines in a non-blocking fashion in the child process.
+
+Time-based expiry added to db_pool.  This adds the ability to expire connections both by idleness
+and also by total time open.  There is also a connection timeout option.
+
+A small bug in httpd's error method was fixed.
+
+Python 2.3 is no longer supported.
+
+A number of tests was added along with a script to run all of them for all the configurations.
+The script generates an html page with the results.
+
+Thanks to Brian Brunswick for investigation of popen4 badness (eventlet.process)
+Thanks to Marcus Cavanaugh for pointing out some coros.queue(0) bugs.
+
+The twisted integration as well as many other improvements were funded by AG Projects (http://ag-projects.com), thanks!
+
+0.8.x
+=====
+
+Fix a CPU leak that would cause the poll hub to consume 100% CPU in certain conditions, for example the echoserver example. (Donovan Preston)
+
+Fix the libev hub to match libev's callback signature. (Patch by grugq)
+
+Add a backlog argument to api.tcp_listener (Patch by grugq)
+
+0.7.x
+=====
+
+Fix a major memory leak when using the libevent or libev hubs. Timers were not being removed from the hub after they fired. (Thanks Agusto Becciu and the grugq). Also, make it possible to call wrap_socket_with_coroutine_socket without using the threadpool to make dns operations non-blocking (Thanks the grugq).
+
+It's now possible to use eventlet's SSL client to talk to eventlet's SSL server. (Thanks to Ryan Williams)
+
+Fixed a major CPU leak when using select hub. When adding a descriptor to the hub, entries were made in all three dictionaries, readers, writers, and exc, even if the callback is None. Thus every fd would be passed into all three lists when calling select regardless of whether there was a callback for that event or not. When reading the next request out of a keepalive socket, the socket would come back as ready for writing, the hub would notice the callback is None and ignore it, and then loop as fast as possible consuming CPU.
+
+0.6.x
+=====
+
+Fixes some long-standing bugs where sometimes failures in accept() or connect() would cause the coroutine that was waiting to be double-resumed, most often resulting in SwitchingToDeadGreenlet exceptions as well as weird tuple-unpacking exceptions in the CoroutinePool main loop.
+
+0.6.1: Added eventlet.tpool.killall. Blocks until all of the threadpool threads have been told to exit and join()ed. Meant to be used to clean up the threadpool on exit or if calling execv. Used by Spawning.
+
+0.5.x
+=====
+
+"The Pycon 2008 Refactor": The first release which incorporates libevent support. Also comes with significant refactoring and code cleanup, especially to the eventlet.wsgi http server. Docstring coverage is much higher and there is new extensive documentation: http://wiki.secondlife.com/wiki/Eventlet/Documentation
+
+The point releases of 0.5.x fixed some bugs in the wsgi server, most notably handling of Transfer-Encoding: chunked; previously, it would happily send chunked encoding to clients which asked for HTTP/1.0, which isn't legal.
+
+0.2
+=====
+
+Initial re-release of forked linden branch.

+ 99 - 0
desktop/core/ext-py/eventlet-0.21.0/PKG-INFO

@@ -0,0 +1,99 @@
+Metadata-Version: 1.1
+Name: eventlet
+Version: 0.21.0
+Summary: Highly concurrent networking library
+Home-page: http://eventlet.net
+Author: Linden Lab
+Author-email: eventletdev@lists.secondlife.com
+License: UNKNOWN
+Description: Eventlet is a concurrent networking library for Python that allows you to change how you run your code, not how you write it.
+        
+        It uses epoll or libevent for highly scalable non-blocking I/O.  Coroutines ensure that the developer uses a blocking style of programming that is similar to threading, but provide the benefits of non-blocking I/O.  The event dispatch is implicit, which means you can easily use Eventlet from the Python interpreter, or as a small part of a larger application.
+        
+        It's easy to get started using Eventlet, and easy to convert existing
+        applications to use it.  Start off by looking at the `examples`_,
+        `common design patterns`_, and the list of `basic API primitives`_.
+        
+        .. _examples: http://eventlet.net/doc/examples.html
+        .. _common design patterns: http://eventlet.net/doc/design_patterns.html
+        .. _basic API primitives: http://eventlet.net/doc/basic_usage.html
+        
+        
+        Quick Example
+        ===============
+        
+        Here's something you can try right on the command line::
+        
+            % python
+            >>> import eventlet
+            >>> from eventlet.green import urllib2
+            >>> gt = eventlet.spawn(urllib2.urlopen, 'http://eventlet.net')
+            >>> gt2 = eventlet.spawn(urllib2.urlopen, 'http://secondlife.com')
+            >>> gt2.wait()
+            >>> gt.wait()
+        
+        
+        Getting Eventlet
+        ==================
+        
+        The easiest way to get Eventlet is to use pip::
+        
+          pip install -U eventlet
+        
+        To install latest development verson once::
+        
+          pip install -U https://github.com/eventlet/eventlet/archive/master.zip
+        
+        
+        Building the Docs Locally
+        =========================
+        
+        To build a complete set of HTML documentation, you must have Sphinx, which can be found at http://sphinx.pocoo.org/ (or installed with `pip install Sphinx`)::
+        
+          cd doc
+          make html
+        
+        The built html files can be found in doc/_build/html afterward.
+        
+        
+        Twisted
+        =======
+        
+        Eventlet had Twisted hub in the past, but community interest to this integration has dropped over time,
+        now it is not supported, so with apologies for any inconvenience we discontinue Twisted integration.
+        
+        If you have a project that uses Eventlet with Twisted, your options are:
+        
+        * use last working release eventlet==0.14
+        * start a new project with only Twisted hub code, identify and fix problems. As of eventlet 0.13, `EVENTLET_HUB` environment variable can point to external modules.
+        * fork Eventlet, revert Twisted removal, identify and fix problems. This work may be merged back into main project.
+        
+        Apologies for any inconvenience.
+        
+        
+        Flair
+        =====
+        
+        .. image:: https://travis-ci.org/eventlet/eventlet.svg?branch=master
+            :target: https://travis-ci.org/eventlet/eventlet
+        
+        .. image:: https://codecov.io/gh/eventlet/eventlet/branch/master/graph/badge.svg
+            :target: https://codecov.io/gh/eventlet/eventlet
+        
+Platform: UNKNOWN
+Classifier: Development Status :: 4 - Beta
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: MacOS :: MacOS X
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: POSIX
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python
+Classifier: Topic :: Internet
+Classifier: Topic :: Software Development :: Libraries :: Python Modules

+ 73 - 0
desktop/core/ext-py/eventlet-0.21.0/README.rst

@@ -0,0 +1,73 @@
+Eventlet is a concurrent networking library for Python that allows you to change how you run your code, not how you write it.
+
+It uses epoll or libevent for highly scalable non-blocking I/O.  Coroutines ensure that the developer uses a blocking style of programming that is similar to threading, but provide the benefits of non-blocking I/O.  The event dispatch is implicit, which means you can easily use Eventlet from the Python interpreter, or as a small part of a larger application.
+
+It's easy to get started using Eventlet, and easy to convert existing
+applications to use it.  Start off by looking at the `examples`_,
+`common design patterns`_, and the list of `basic API primitives`_.
+
+.. _examples: http://eventlet.net/doc/examples.html
+.. _common design patterns: http://eventlet.net/doc/design_patterns.html
+.. _basic API primitives: http://eventlet.net/doc/basic_usage.html
+
+
+Quick Example
+===============
+
+Here's something you can try right on the command line::
+
+    % python
+    >>> import eventlet
+    >>> from eventlet.green import urllib2
+    >>> gt = eventlet.spawn(urllib2.urlopen, 'http://eventlet.net')
+    >>> gt2 = eventlet.spawn(urllib2.urlopen, 'http://secondlife.com')
+    >>> gt2.wait()
+    >>> gt.wait()
+
+
+Getting Eventlet
+==================
+
+The easiest way to get Eventlet is to use pip::
+
+  pip install -U eventlet
+
+To install latest development verson once::
+
+  pip install -U https://github.com/eventlet/eventlet/archive/master.zip
+
+
+Building the Docs Locally
+=========================
+
+To build a complete set of HTML documentation, you must have Sphinx, which can be found at http://sphinx.pocoo.org/ (or installed with `pip install Sphinx`)::
+
+  cd doc
+  make html
+
+The built html files can be found in doc/_build/html afterward.
+
+
+Twisted
+=======
+
+Eventlet had Twisted hub in the past, but community interest to this integration has dropped over time,
+now it is not supported, so with apologies for any inconvenience we discontinue Twisted integration.
+
+If you have a project that uses Eventlet with Twisted, your options are:
+
+* use last working release eventlet==0.14
+* start a new project with only Twisted hub code, identify and fix problems. As of eventlet 0.13, `EVENTLET_HUB` environment variable can point to external modules.
+* fork Eventlet, revert Twisted removal, identify and fix problems. This work may be merged back into main project.
+
+Apologies for any inconvenience.
+
+
+Flair
+=====
+
+.. image:: https://travis-ci.org/eventlet/eventlet.svg?branch=master
+    :target: https://travis-ci.org/eventlet/eventlet
+
+.. image:: https://codecov.io/gh/eventlet/eventlet/branch/master/graph/badge.svg
+    :target: https://codecov.io/gh/eventlet/eventlet

+ 26 - 0
desktop/core/ext-py/eventlet-0.21.0/benchmarks/__init__.py

@@ -0,0 +1,26 @@
+import gc
+import timeit
+import random
+
+from eventlet.support import six
+
+
+def measure_best(repeat, iters,
+                 common_setup='pass',
+                 common_cleanup='pass',
+                 *funcs):
+    funcs = list(funcs)
+    results = dict([(f, []) for f in funcs])
+
+    for i in six.moves.range(repeat):
+        random.shuffle(funcs)
+        for func in funcs:
+            gc.collect()
+            t = timeit.Timer(func, setup=common_setup)
+            results[func].append(t.timeit(iters))
+            common_cleanup()
+
+    best_results = {}
+    for func, times in six.iteritems(results):
+        best_results[func] = min(times)
+    return best_results

+ 117 - 0
desktop/core/ext-py/eventlet-0.21.0/benchmarks/localhost_socket.py

@@ -0,0 +1,117 @@
+"""Benchmark evaluating eventlet's performance at speaking to itself over a localhost socket."""
+from __future__ import print_function
+
+import time
+
+import benchmarks
+from eventlet.support import six
+
+
+BYTES = 1000
+SIZE = 1
+CONCURRENCY = 50
+TRIES = 5
+
+
+def reader(sock):
+    expect = BYTES
+    while expect > 0:
+        d = sock.recv(min(expect, SIZE))
+        expect -= len(d)
+
+
+def writer(addr, socket_impl):
+    sock = socket_impl(socket.AF_INET, socket.SOCK_STREAM)
+    sock.connect(addr)
+    sent = 0
+    while sent < BYTES:
+        d = 'xy' * (max(min(SIZE / 2, BYTES - sent), 1))
+        sock.sendall(d)
+        sent += len(d)
+
+
+def green_accepter(server_sock, pool):
+    for i in six.moves.range(CONCURRENCY):
+        sock, addr = server_sock.accept()
+        pool.spawn_n(reader, sock)
+
+
+def heavy_accepter(server_sock, pool):
+    for i in six.moves.range(CONCURRENCY):
+        sock, addr = server_sock.accept()
+        t = threading.Thread(None, reader, "reader thread", (sock,))
+        t.start()
+        pool.append(t)
+
+
+import eventlet.green.socket
+import eventlet
+
+from eventlet import debug
+debug.hub_exceptions(True)
+
+
+def launch_green_threads():
+    pool = eventlet.GreenPool(CONCURRENCY * 2 + 1)
+    server_sock = eventlet.green.socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+    server_sock.bind(('localhost', 0))
+    server_sock.listen(50)
+    addr = ('localhost', server_sock.getsockname()[1])
+    pool.spawn_n(green_accepter, server_sock, pool)
+    for i in six.moves.range(CONCURRENCY):
+        pool.spawn_n(writer, addr, eventlet.green.socket.socket)
+    pool.waitall()
+
+
+import threading
+import socket
+
+
+def launch_heavy_threads():
+    threads = []
+    server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+    server_sock.bind(('localhost', 0))
+    server_sock.listen(50)
+    addr = ('localhost', server_sock.getsockname()[1])
+    accepter_thread = threading.Thread(
+        None, heavy_accepter, "accepter thread", (server_sock, threads))
+    accepter_thread.start()
+    threads.append(accepter_thread)
+    for i in six.moves.range(CONCURRENCY):
+        client_thread = threading.Thread(None, writer, "writer thread", (addr, socket.socket))
+        client_thread.start()
+        threads.append(client_thread)
+    for t in threads:
+        t.join()
+
+
+if __name__ == "__main__":
+    import optparse
+    parser = optparse.OptionParser()
+    parser.add_option('--compare-threading', action='store_true', dest='threading', default=False)
+    parser.add_option('-b', '--bytes', type='int', dest='bytes',
+                      default=BYTES)
+    parser.add_option('-s', '--size', type='int', dest='size',
+                      default=SIZE)
+    parser.add_option('-c', '--concurrency', type='int', dest='concurrency',
+                      default=CONCURRENCY)
+    parser.add_option('-t', '--tries', type='int', dest='tries',
+                      default=TRIES)
+
+    opts, args = parser.parse_args()
+    BYTES = opts.bytes
+    SIZE = opts.size
+    CONCURRENCY = opts.concurrency
+    TRIES = opts.tries
+
+    funcs = [launch_green_threads]
+    if opts.threading:
+        funcs = [launch_green_threads, launch_heavy_threads]
+    results = benchmarks.measure_best(TRIES, 3,
+                                      lambda: None, lambda: None,
+                                      *funcs)
+    print("green:", results[launch_green_threads])
+    if opts.threading:
+        print("threads:", results[launch_heavy_threads])
+        print("%", (results[launch_green_threads] - results[launch_heavy_threads]
+                    ) / results[launch_heavy_threads] * 100)

+ 86 - 0
desktop/core/ext-py/eventlet-0.21.0/benchmarks/spawn.py

@@ -0,0 +1,86 @@
+"""Compare spawn to spawn_n"""
+from __future__ import print_function
+
+import eventlet
+import benchmarks
+
+
+def cleanup():
+    eventlet.sleep(0.2)
+
+
+iters = 10000
+best = benchmarks.measure_best(
+    5, iters,
+    'pass',
+    cleanup,
+    eventlet.sleep)
+print("eventlet.sleep (main)", best[eventlet.sleep])
+
+gt = eventlet.spawn(
+    benchmarks.measure_best, 5, iters,
+    'pass',
+    cleanup,
+    eventlet.sleep)
+best = gt.wait()
+print("eventlet.sleep (gt)", best[eventlet.sleep])
+
+
+def dummy(i=None):
+    return i
+
+
+def run_spawn():
+    eventlet.spawn(dummy, 1)
+
+
+def run_spawn_n():
+    eventlet.spawn_n(dummy, 1)
+
+
+def run_spawn_n_kw():
+    eventlet.spawn_n(dummy, i=1)
+
+
+best = benchmarks.measure_best(
+    5, iters,
+    'pass',
+    cleanup,
+    run_spawn_n,
+    run_spawn,
+    run_spawn_n_kw)
+print("eventlet.spawn", best[run_spawn])
+print("eventlet.spawn_n", best[run_spawn_n])
+print("eventlet.spawn_n(**kw)", best[run_spawn_n_kw])
+print("%% %0.1f" % ((best[run_spawn] - best[run_spawn_n]) / best[run_spawn_n] * 100))
+
+pool = None
+
+
+def setup():
+    global pool
+    pool = eventlet.GreenPool(iters)
+
+
+def run_pool_spawn():
+    pool.spawn(dummy, 1)
+
+
+def run_pool_spawn_n():
+    pool.spawn_n(dummy, 1)
+
+
+def cleanup_pool():
+    pool.waitall()
+
+
+best = benchmarks.measure_best(
+    3, iters,
+    setup,
+    cleanup_pool,
+    run_pool_spawn,
+    run_pool_spawn_n,
+)
+print("eventlet.GreenPool.spawn", best[run_pool_spawn])
+print("eventlet.GreenPool.spawn_n", best[run_pool_spawn_n])
+print("%% %0.1f" % ((best[run_pool_spawn] - best[run_pool_spawn_n]) / best[run_pool_spawn_n] * 100))

+ 100 - 0
desktop/core/ext-py/eventlet-0.21.0/doc/Makefile

@@ -0,0 +1,100 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS    =
+SPHINXBUILD   = PYTHONPATH=../:$(PYTHONPATH) sphinx-build
+PAPER         =
+
+# Internal variables.
+PAPEROPT_a4     = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS   = -d _build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean text html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest
+
+help:
+	@echo "Please use \`make <target>' where <target> is one of"
+	@echo "  text      to make text files"
+	@echo "  html      to make standalone HTML files"
+	@echo "  dirhtml   to make HTML files named index.html in directories"
+	@echo "  pickle    to make pickle files"
+	@echo "  json      to make JSON files"
+	@echo "  htmlhelp  to make HTML files and a HTML help project"
+	@echo "  qthelp    to make HTML files and a qthelp project"
+	@echo "  latex     to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+	@echo "  changes   to make an overview of all changed/added/deprecated items"
+	@echo "  linkcheck to check all external links for integrity"
+	@echo "  doctest   to run all doctests embedded in the documentation (if enabled)"
+	@echo "  coverage  to generate a docstring coverage report"
+
+clean:
+	-rm -rf _build/*
+
+text:
+	$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) _build/text
+	@echo
+	@echo "Build finished. The text files are in _build/text."
+
+html:
+	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) _build/html
+	@echo
+	@echo "Build finished. The HTML pages are in _build/html."
+
+dirhtml:
+	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) _build/dirhtml
+	@echo
+	@echo "Build finished. The HTML pages are in _build/dirhtml."
+
+pickle:
+	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) _build/pickle
+	@echo
+	@echo "Build finished; now you can process the pickle files."
+
+json:
+	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) _build/json
+	@echo
+	@echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) _build/htmlhelp
+	@echo
+	@echo "Build finished; now you can run HTML Help Workshop with the" \
+	      ".hhp project file in _build/htmlhelp."
+
+qthelp:
+	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) _build/qthelp
+	@echo
+	@echo "Build finished; now you can run "qcollectiongenerator" with the" \
+	      ".qhcp project file in _build/qthelp, like this:"
+	@echo "# qcollectiongenerator _build/qthelp/Eventlet.qhcp"
+	@echo "To view the help file:"
+	@echo "# assistant -collectionFile _build/qthelp/Eventlet.qhc"
+
+latex:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) _build/latex
+	@echo
+	@echo "Build finished; the LaTeX files are in _build/latex."
+	@echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
+	      "run these through (pdf)latex."
+
+changes:
+	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) _build/changes
+	@echo
+	@echo "The overview file is in _build/changes."
+
+linkcheck:
+	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) _build/linkcheck
+	@echo
+	@echo "Link check complete; look for any errors in the above output " \
+	      "or in _build/linkcheck/output.txt."
+
+doctest:
+	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) _build/doctest
+	@echo "Testing of doctests in the sources finished, look at the " \
+	      "results in _build/doctest/output.txt."
+	      
+coverage:
+	$(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) _build/coverage
+	@echo "Coverage report finished, look at the " \
+		  "results in _build/coverage/python.txt."

+ 4 - 0
desktop/core/ext-py/eventlet-0.21.0/doc/authors.rst

@@ -0,0 +1,4 @@
+Authors
+=======
+
+.. include:: ../AUTHORS

+ 83 - 0
desktop/core/ext-py/eventlet-0.21.0/doc/basic_usage.rst

@@ -0,0 +1,83 @@
+Basic Usage
+=============
+
+If it's your first time to Eventlet, you may find the illuminated examples in the :ref:`design-patterns` document to be a good starting point.
+
+Eventlet is built around the concept of green threads (i.e. coroutines, we use the terms interchangeably) that are launched to do network-related work.  Green threads differ from normal threads in two main ways:
+
+* Green threads are so cheap they are nearly free.  You do not have to conserve green threads like you would normal threads.  In general, there will be at least one green thread per network connection.
+* Green threads cooperatively yield to each other instead of preemptively being scheduled.  The major advantage from this behavior is that shared data structures don't need locks, because only if a yield is explicitly called can another green thread have access to the data structure.  It is also possible to inspect primitives such as queues to see if they have any pending data.
+
+Primary API
+===========
+
+The design goal for Eventlet's API is simplicity and readability.  You should be able to read its code and understand what it's doing.  Fewer lines of code are preferred over excessively clever implementations.  `Like Python itself <http://www.python.org/dev/peps/pep-0020/>`_, there should be one, and only one obvious way to do it in Eventlet!
+
+Though Eventlet has many modules, much of the most-used stuff is accessible simply by doing ``import eventlet``.  Here's a quick summary of the functionality available in the ``eventlet`` module, with links to more verbose documentation on each.
+
+Greenthread Spawn
+-----------------------
+
+.. function:: eventlet.spawn(func, *args, **kw)
+   
+   This launches a greenthread to call *func*.  Spawning off multiple greenthreads gets work done in parallel.  The return value from ``spawn`` is a :class:`greenthread.GreenThread` object, which can be used to retrieve the return value of *func*.  See :func:`spawn <eventlet.greenthread.spawn>` for more details.
+   
+.. function:: eventlet.spawn_n(func, *args, **kw)
+   
+   The same as :func:`spawn`, but it's not possible to know how the function terminated (i.e. no return value or exceptions).  This makes execution faster.  See :func:`spawn_n <eventlet.greenthread.spawn_n>` for more details.
+
+.. function:: eventlet.spawn_after(seconds, func, *args, **kw)
+   
+    Spawns *func* after *seconds* have elapsed; a delayed version of :func:`spawn`.   To abort the spawn and prevent *func* from being called, call :meth:`GreenThread.cancel` on the return value of :func:`spawn_after`.  See :func:`spawn_after <eventlet.greenthread.spawn_after>` for more details.
+
+Greenthread Control
+-----------------------
+
+.. function:: eventlet.sleep(seconds=0)
+
+    Suspends the current greenthread and allows others a chance to process.  See :func:`sleep <eventlet.greenthread.sleep>` for more details.
+
+.. class:: eventlet.GreenPool
+
+   Pools control concurrency.  It's very common in applications to want to consume only a finite amount of memory, or to restrict the amount of connections that one part of the code holds open so as to leave more for the rest, or to behave consistently in the face of unpredictable input data.  GreenPools provide this control.  See :class:`GreenPool <eventlet.greenpool.GreenPool>` for more on how to use these.
+
+.. class:: eventlet.GreenPile
+
+    GreenPile objects represent chunks of work.  In essence a GreenPile is an iterator that can be stuffed with work, and the results read out later. See :class:`GreenPile <eventlet.greenpool.GreenPile>` for more details.
+    
+.. class:: eventlet.Queue
+
+    Queues are a fundamental construct for communicating data between execution units.  Eventlet's Queue class is used to communicate between greenthreads, and provides a bunch of useful features for doing that.  See :class:`Queue <eventlet.queue.Queue>` for more details.
+    
+.. class:: eventlet.Timeout
+
+    This class is a way to add timeouts to anything.  It raises *exception* in the current greenthread after *timeout* seconds.  When *exception* is omitted or ``None``, the Timeout instance itself is raised.
+    
+    Timeout objects are context managers, and so can be used in with statements.
+    See :class:`Timeout <eventlet.timeout.Timeout>` for more details.
+
+Patching Functions
+---------------------
+    
+.. function:: eventlet.import_patched(modulename, *additional_modules, **kw_additional_modules)
+
+    Imports a module in a way that ensures that the module uses "green" versions of the standard library modules, so that everything works nonblockingly.  The only required argument is the name of the module to be imported.  For more information see :ref:`import-green`.
+
+.. function:: eventlet.monkey_patch(all=True, os=False, select=False, socket=False, thread=False, time=False)
+
+    Globally patches certain system modules to be greenthread-friendly. The keyword arguments afford some control over which modules are patched. If *all* is True, then all modules are patched regardless of the other arguments. If it's False, then the rest of the keyword arguments control patching of specific subsections of the standard library.  Most patch the single module of the same name (os, time, select).  The exceptions are socket, which also patches the ssl module if present; and thread, which patches thread, threading, and Queue.  It's safe to call monkey_patch multiple times.  For more information see :ref:`monkey-patch`.
+
+Network Convenience Functions
+------------------------------
+
+.. autofunction:: eventlet.connect
+
+.. autofunction:: eventlet.listen
+
+.. autofunction:: eventlet.wrap_ssl
+
+.. autofunction:: eventlet.serve
+
+.. autoclass:: eventlet.StopServe
+    
+These are the basic primitives of Eventlet; there are a lot more out there in the other Eventlet modules; check out the :doc:`modules`.

+ 4 - 0
desktop/core/ext-py/eventlet-0.21.0/doc/common.txt

@@ -0,0 +1,4 @@
+.. |internal| replace::
+
+        This is considered an internal API, and it might change 
+        unexpectedly without being deprecated first.

+ 203 - 0
desktop/core/ext-py/eventlet-0.21.0/doc/conf.py

@@ -0,0 +1,203 @@
+# -*- coding: utf-8 -*-
+#
+# Eventlet documentation build configuration file, created by
+# sphinx-quickstart on Sat Jul  4 19:48:27 2009.
+#
+# This file is execfile()d with the current directory set to its containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys, os
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#sys.path.append(os.path.abspath('.'))
+
+# -- General configuration -----------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 
+              'sphinx.ext.intersphinx']
+
+# If this is True, '.. todo::' and '.. todolist::' produce output, else they produce
+# nothing. The default is False.
+todo_include_todos = True
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'Eventlet'
+copyright = u'2005-2010, Eventlet Contributors'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+import eventlet
+# The short X.Y version.
+version = '%s.%s' % (eventlet.version_info[0], eventlet.version_info[1])
+# The full version, including alpha/beta/rc tags.
+release = eventlet.__version__
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of documents that shouldn't be included in the build.
+#unused_docs = []
+
+# List of directories, relative to source directory, that shouldn't be searched
+# for source files.
+exclude_trees = ['_build']
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+# Intersphinx references
+intersphinx_mapping = {'http://docs.python.org/': None}
+
+
+# -- Options for HTML output ---------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  Major themes that come with
+# Sphinx are currently 'default' and 'sphinxdoc'.
+html_theme = 'default'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further.  For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents.  If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar.  Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_use_modindex = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it.  The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = ''
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'Eventletdoc'
+
+
+# -- Options for LaTeX output --------------------------------------------------
+
+# The paper size ('letter' or 'a4').
+#latex_paper_size = 'letter'
+
+# The font size ('10pt', '11pt' or '12pt').
+#latex_font_size = '10pt'
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+  ('index', 'Eventlet.tex', u'Eventlet Documentation',
+   u'<eventlet contributors>', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# Additional stuff for the LaTeX preamble.
+#latex_preamble = ''
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_use_modindex = True

+ 112 - 0
desktop/core/ext-py/eventlet-0.21.0/doc/design_patterns.rst

@@ -0,0 +1,112 @@
+.. _design-patterns:
+
+Design Patterns
+=================
+
+There are a bunch of basic patterns that Eventlet usage falls into.  Here are a few examples that show their basic structure.
+
+Client Pattern
+--------------------
+
+The canonical client-side example is a web crawler.  This use case is given a list of urls and wants to retrieve their bodies for later processing.  Here is a very simple example::
+
+    import eventlet
+    from eventlet.green import urllib2
+
+    urls = ["http://www.google.com/intl/en_ALL/images/logo.gif",
+           "https://www.python.org/static/img/python-logo.png",
+           "http://us.i1.yimg.com/us.yimg.com/i/ww/beta/y3.gif"]
+
+    def fetch(url):
+        return urllib2.urlopen(url).read()
+
+    pool = eventlet.GreenPool()
+    for body in pool.imap(fetch, urls):
+        print("got body", len(body))
+
+There is a slightly more complex version of this in the :ref:`web crawler example <web_crawler_example>`.  Here's a tour of the interesting lines in this crawler.
+
+``from eventlet.green import urllib2`` is how you import a cooperatively-yielding version of urllib2.  It is the same in all respects to the standard version, except that it uses green sockets for its communication.  This is an example of the :ref:`import-green` pattern.
+
+``pool = eventlet.GreenPool()`` constructs a :class:`GreenPool <eventlet.greenpool.GreenPool>` of a thousand green threads.  Using a pool is good practice because it provides an upper limit on the amount of work that this crawler will be doing simultaneously, which comes in handy when the input data changes dramatically.
+
+``for body in pool.imap(fetch, urls):`` iterates over the results of calling the fetch function in parallel.  :meth:`imap <eventlet.greenpool.GreenPool.imap>` makes the function calls in parallel, and the results are returned in the order that they were executed.
+
+The key aspect of the client pattern is that it involves collecting the results of each function call; the fact that each fetch is done concurrently is essentially an invisible optimization.  Note also that imap is memory-bounded and won't consume gigabytes of memory if the list of urls grows to the tens of thousands (yes, we had that problem in production once!).
+
+
+Server Pattern
+--------------------
+
+Here's a simple server-side example, a simple echo server::
+
+    import eventlet
+
+    def handle(client):
+        while True:
+            c = client.recv(1)
+            if not c: break
+            client.sendall(c)
+
+    server = eventlet.listen(('0.0.0.0', 6000))
+    pool = eventlet.GreenPool(10000)
+    while True:
+        new_sock, address = server.accept()
+        pool.spawn_n(handle, new_sock)
+
+The file :ref:`echo server example <echo_server_example>` contains a somewhat more robust and complex version of this example.
+
+``server = eventlet.listen(('0.0.0.0', 6000))`` uses a convenience function to create a listening socket.
+
+``pool = eventlet.GreenPool(10000)`` creates a pool of green threads that could handle ten thousand clients.
+
+``pool.spawn_n(handle, new_sock)`` launches a green thread to handle the new client.  The accept loop doesn't care about the return value of the ``handle`` function, so it uses :meth:`spawn_n <eventlet.greenpool.GreenPool.spawn_n>`, instead of :meth:`spawn <eventlet.greenpool.GreenPool.spawn>`.
+
+The difference between the server and the client patterns boils down to the fact that the server has a ``while`` loop calling ``accept()`` repeatedly, and that it hands off the client socket completely to the handle() method, rather than collecting the results.
+
+Dispatch Pattern
+-------------------
+
+One common use case that Linden Lab runs into all the time is a "dispatch" design pattern.  This is a server that is also a client of some other services.  Proxies, aggregators, job workers, and so on are all terms that apply here.  This is the use case that the :class:`GreenPile <eventlet.greenpool.GreenPile>` was designed for.
+
+Here's a somewhat contrived example: a server that receives POSTs from clients that contain a list of urls of RSS feeds.  The server fetches all the feeds concurrently and responds with a list of their titles to the client.  It's easy to imagine it doing something more complex than this, and this could be easily modified to become a Reader-style application::
+
+    import eventlet
+    feedparser = eventlet.import_patched('feedparser')
+
+    pool = eventlet.GreenPool()
+
+    def fetch_title(url):
+        d = feedparser.parse(url)
+        return d.feed.get('title', '')
+
+    def app(environ, start_response):
+        pile = eventlet.GreenPile(pool)
+        for url in environ['wsgi.input'].readlines():
+            pile.spawn(fetch_title, url)
+        titles = '\n'.join(pile)
+        start_response('200 OK', [('Content-type', 'text/plain')])
+        return [titles]
+
+The full version of this example is in the :ref:`feed_scraper_example`, which includes code to start the WSGI server on a particular port.
+
+This example uses a global (gasp) :class:`GreenPool <eventlet.greenpool.GreenPool>` to control concurrency.  If we didn't have a global limit on the number of outgoing requests, then a client could cause the server to open tens of thousands of concurrent connections to external servers, thereby getting feedscraper's IP banned, or various other accidental-or-on-purpose bad behavior.  The pool isn't a complete DoS protection, but it's the bare minimum.
+
+.. highlight:: python
+    :linenothreshold: 1
+
+The interesting lines are in the app function::
+
+    pile = eventlet.GreenPile(pool)
+    for url in environ['wsgi.input'].readlines():
+        pile.spawn(fetch_title, url)
+    titles = '\n'.join(pile)
+
+.. highlight:: python
+    :linenothreshold: 1000
+
+Note that in line 1, the Pile is constructed using the global pool as its argument.  That ties the Pile's concurrency to the global's.  If there are already 1000 concurrent fetches from other clients of feedscraper, this one will block until some of those complete.  Limitations are good!
+
+Line 3 is just a spawn, but note that we don't store any return value from it.  This is because the return value is kept in the Pile itself.  This becomes evident in the next line...
+
+Line 4 is where we use the fact that the Pile is an iterator.  Each element in the iterator is one of the return values from the fetch_title function, which are strings.  We can use a normal Python idiom (:func:`join`) to concatenate these incrementally as they happen.

+ 21 - 0
desktop/core/ext-py/eventlet-0.21.0/doc/environment.rst

@@ -0,0 +1,21 @@
+.. _env_vars:
+
+Environment Variables
+======================
+
+Eventlet's behavior can be controlled by a few environment variables.
+These are only for the advanced user.
+
+EVENTLET_HUB 
+
+   Used to force Eventlet to use the specified hub instead of the
+   optimal one.  See :ref:`understanding_hubs` for the list of
+   acceptable hubs and what they mean (note that picking a hub not on
+   the list will silently fail).  Equivalent to calling
+   :meth:`eventlet.hubs.use_hub` at the beginning of the program.
+
+EVENTLET_THREADPOOL_SIZE
+
+   The size of the threadpool in :mod:`~eventlet.tpool`.  This is an
+   environment variable because tpool constructs its pool on first
+   use, so any control of the pool size needs to happen before then.

+ 106 - 0
desktop/core/ext-py/eventlet-0.21.0/doc/examples.rst

@@ -0,0 +1,106 @@
+Examples
+========
+
+Here are a bunch of small example programs that use Eventlet.  All of these examples can be found in the ``examples`` directory of a source copy of Eventlet.
+
+.. _web_crawler_example:
+
+Web Crawler
+------------
+``examples/webcrawler.py``
+
+.. literalinclude:: ../examples/webcrawler.py
+
+.. _wsgi_server_example:
+
+WSGI Server
+------------
+``examples/wsgi.py``
+
+.. literalinclude:: ../examples/wsgi.py
+
+.. _echo_server_example:
+
+Echo Server
+-----------
+``examples/echoserver.py``
+
+.. literalinclude:: ../examples/echoserver.py
+
+.. _socket_connect_example:
+
+Socket Connect
+--------------
+``examples/connect.py``
+
+.. literalinclude:: ../examples/connect.py
+
+.. _chat_server_example:
+
+Multi-User Chat Server
+-----------------------
+``examples/chat_server.py``
+
+This is a little different from the echo server, in that it broadcasts the 
+messages to all participants, not just the sender.
+        
+.. literalinclude:: ../examples/chat_server.py
+
+.. _feed_scraper_example:
+
+Feed Scraper
+-----------------------
+``examples/feedscraper.py``
+
+This example requires `Feedparser <http://www.feedparser.org/>`_ to be installed or on the PYTHONPATH.
+
+.. literalinclude:: ../examples/feedscraper.py
+
+.. _forwarder_example:
+
+Port Forwarder
+-----------------------
+``examples/forwarder.py``
+
+.. literalinclude:: ../examples/forwarder.py
+
+.. _recursive_crawler_example:
+
+Recursive Web Crawler
+-----------------------------------------
+``examples/recursive_crawler.py``
+
+This is an example recursive web crawler that fetches linked pages from a seed url.
+
+.. literalinclude:: ../examples/recursive_crawler.py
+
+.. _producer_consumer_example:
+
+Producer Consumer Web Crawler
+-----------------------------------------
+``examples/producer_consumer.py``
+
+This is an example implementation of the producer/consumer pattern as well as being identical in functionality to the recursive web crawler.
+
+.. literalinclude:: ../examples/producer_consumer.py
+
+.. _websocket_example:
+
+Websocket Server Example
+--------------------------
+``examples/websocket.py``
+
+This exercises some of the features of the websocket server
+implementation.
+
+.. literalinclude:: ../examples/websocket.py
+
+.. _websocket_chat_example:
+
+Websocket Multi-User Chat Example
+-----------------------------------
+``examples/websocket_chat.py``
+
+This is a mashup of the websocket example and the multi-user chat example, showing how you can do the same sorts of things with websockets that you can do with regular sockets.
+
+.. literalinclude:: ../examples/websocket_chat.py

+ 10 - 0
desktop/core/ext-py/eventlet-0.21.0/doc/history.rst

@@ -0,0 +1,10 @@
+History
+-------
+
+Eventlet began life as Donovan Preston was talking to Bob Ippolito about coroutine-based non-blocking networking frameworks in Python. Most non-blocking frameworks require you to run the "main loop" in order to perform all network operations, but Donovan wondered if a library written using a trampolining style could get away with transparently running the main loop any time i/o was required, stopping the main loop once no more i/o was scheduled. Bob spent a few days during PyCon 2006 writing a proof-of-concept. He named it eventlet, after the coroutine implementation it used, `greenlet <http://cheeseshop.python.org/pypi/greenlet greenlet>`_. Donovan began using eventlet as a light-weight network library for his spare-time project `Pavel <http://soundfarmer.com/Pavel/trunk/ Pavel>`_, and also began writing some unittests.
+
+* http://svn.red-bean.com/bob/eventlet/trunk/
+
+When Donovan started at Linden Lab in May of 2006, he added eventlet as an svn external in the ``indra/lib/python directory``, to be a dependency of the yet-to-be-named backbone project (at the time, it was named restserv). However, including eventlet as an svn external meant that any time the externally hosted project had hosting issues, Linden developers were not able to perform svn updates. Thus, the eventlet source was imported into the linden source tree at the same location, and became a fork.
+
+Bob Ippolito has ceased working on eventlet and has stated his desire for Linden to take it's fork forward to the open source world as "the" eventlet.

+ 54 - 0
desktop/core/ext-py/eventlet-0.21.0/doc/hubs.rst

@@ -0,0 +1,54 @@
+.. _understanding_hubs:
+
+Understanding Eventlet Hubs
+===========================
+
+A hub forms the basis of Eventlet's event loop, which dispatches I/O events and schedules greenthreads.  It is the existence of the hub that promotes coroutines (which can be tricky to program with) into greenthreads (which are easy).
+
+Eventlet has multiple hub implementations, and when you start using it, it tries to select the best hub implementation for your system.  The hubs that it supports are (in order of preference):
+
+**epolls**
+    Requires Python 2.6 or the `python-epoll <http://pypi.python.org/pypi/python-epoll/1.0>`_ package, and Linux.  This is the fastest pure-Python hub.
+**poll**
+    On platforms that support it
+**selects**
+    Lowest-common-denominator, available everywhere.
+**pyevent**
+    This is a libevent-based backend and is thus the fastest.  It's disabled by default, because it does not support native threads, but you can enable it yourself if your use case doesn't require them.  (You have to install pyevent, too.)
+
+If the selected hub is not ideal for the application, another can be selected.  You can make the selection either with the environment variable :ref:`EVENTLET_HUB <env_vars>`, or with use_hub.
+
+.. function:: eventlet.hubs.use_hub(hub=None)
+
+    Use this to control which hub Eventlet selects.  Call it with the name of the desired hub module.  Make sure to do this before the application starts doing any I/O!  Calling use_hub completely eliminates the old hub, and any file descriptors or timers that it had been managing will be forgotten.  Put the call as one of the first lines in the main module.::
+    
+        """ This is the main module """
+        from eventlet import hubs
+        hubs.use_hub("pyevent")
+    
+    Hubs are implemented as thread-local class instances.  :func:`eventlet.hubs.use_hub` only operates on the current thread.  When using multiple threads that each need their own hub, call :func:`eventlet.hubs.use_hub` at the beginning of each thread function that needs a specific hub.  In practice, it may not be necessary to specify a hub in each thread; it works to use one special hub for the main thread, and let other threads use the default hub; this hybrid hub configuration will work fine.
+    
+    It is also possible to use a third-party hub module in place of one of the built-in ones.  Simply pass the module itself to :func:`eventlet.hubs.use_hub`.  The task of writing such a hub is a little beyond the scope of this document, it's probably a good idea to simply inspect the code of the existing hubs to see how they work.::
+
+         from eventlet import hubs    
+         from mypackage import myhub
+         hubs.use_hub(myhub)
+    
+    Supplying None as the argument to :func:`eventlet.hubs.use_hub` causes it to select the default hub.
+
+
+How the Hubs Work
+-----------------
+
+The hub has a main greenlet, MAINLOOP.  When one of the running coroutines needs
+to do some I/O, it registers a listener with the hub (so that the hub knows when to wake it up again), and then switches to MAINLOOP (via ``get_hub().switch()``).  If there are other coroutines that are ready to run, MAINLOOP switches to them, and when they complete or need to do more I/O, they switch back to the MAINLOOP.  In this manner, MAINLOOP ensures that every coroutine gets scheduled when it has some work to do.
+
+MAINLOOP is launched only when the first I/O operation happens, and it is not the same greenlet that __main__ is running in.  This lazy launching is why it's not necessary to explicitly call a dispatch() method like other frameworks, which in turn means that code can start using Eventlet without needing to be substantially restructured.
+
+More Hub-Related Functions
+---------------------------
+
+.. autofunction:: eventlet.hubs.get_hub
+.. autofunction:: eventlet.hubs.get_default_hub
+.. autofunction:: eventlet.hubs.trampoline
+

BIN
desktop/core/ext-py/eventlet-0.21.0/doc/images/threading_illustration.png


+ 55 - 0
desktop/core/ext-py/eventlet-0.21.0/doc/index.rst

@@ -0,0 +1,55 @@
+Eventlet Documentation
+====================================
+
+Code talks!  This is a simple web crawler that fetches a bunch of urls concurrently:
+
+.. code-block:: python
+
+    urls = [
+        "http://www.google.com/intl/en_ALL/images/logo.gif",
+        "http://python.org/images/python-logo.gif",
+        "http://us.i1.yimg.com/us.yimg.com/i/ww/beta/y3.gif",
+    ]
+
+    import eventlet
+    from eventlet.green import urllib2
+
+    def fetch(url):
+        return urllib2.urlopen(url).read()
+
+    pool = eventlet.GreenPool()
+    for body in pool.imap(fetch, urls):
+        print("got body", len(body))
+
+Contents
+=========
+
+.. toctree::
+   :maxdepth: 2
+
+   basic_usage
+   design_patterns
+   patching
+   examples
+   ssl
+   threading
+   zeromq
+   hubs
+   testing
+   environment
+
+   modules
+
+   authors
+   history
+
+License
+---------
+Eventlet is made available under the terms of the open source `MIT license <http://www.opensource.org/licenses/mit-license.php>`_
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`

+ 21 - 0
desktop/core/ext-py/eventlet-0.21.0/doc/modules.rst

@@ -0,0 +1,21 @@
+Module Reference
+======================
+
+.. toctree::
+   :maxdepth: 2
+
+   modules/backdoor
+   modules/corolocal
+   modules/dagpool
+   modules/debug
+   modules/db_pool
+   modules/event
+   modules/greenpool
+   modules/greenthread
+   modules/pools
+   modules/queue
+   modules/semaphore
+   modules/timeout
+   modules/websocket
+   modules/wsgi
+   modules/zmq

+ 27 - 0
desktop/core/ext-py/eventlet-0.21.0/doc/modules/backdoor.rst

@@ -0,0 +1,27 @@
+:mod:`backdoor` -- Python interactive interpreter within a running process
+===============================================================================
+
+The backdoor module is convenient for inspecting the state of a long-running process.  It supplies the normal Python interactive interpreter in a way that does not block the normal operation of the application.  This can be useful for debugging, performance tuning, or simply learning about how things behave in situ.
+
+In the application, spawn a greenthread running backdoor_server on a listening socket::
+    
+    eventlet.spawn(backdoor.backdoor_server, eventlet.listen(('localhost', 3000)))
+    
+When this is running, the backdoor is accessible via telnet to the specified port.
+
+.. code-block:: sh
+
+  $ telnet localhost 3000
+  Python 2.6.2 (r262:71600, Apr 16 2009, 09:17:39) 
+  [GCC 4.0.1 (Apple Computer, Inc. build 5250)] on darwin
+  Type "help", "copyright", "credits" or "license" for more information.
+  >>> import myapp
+  >>> dir(myapp)
+  ['__all__', '__doc__', '__name__', 'myfunc']
+  >>>
+  
+The backdoor cooperatively yields to the rest of the application between commands, so on a running server continuously serving requests, you can observe the internal state changing between interpreter commands.
+
+.. automodule:: eventlet.backdoor
+	:members:
+

+ 6 - 0
desktop/core/ext-py/eventlet-0.21.0/doc/modules/corolocal.rst

@@ -0,0 +1,6 @@
+:mod:`corolocal` -- Coroutine local storage
+=============================================
+
+.. automodule:: eventlet.corolocal
+	:members:
+	:undoc-members:

+ 493 - 0
desktop/core/ext-py/eventlet-0.21.0/doc/modules/dagpool.rst

@@ -0,0 +1,493 @@
+:mod:`dagpool` -- Dependency-Driven Greenthreads
+================================================
+
+Rationale
+*********
+
+The dagpool module provides the :class:`DAGPool <eventlet.dagpool.DAGPool>`
+class, which addresses situations in which the value produced by one
+greenthread might be consumed by several others -- while at the same time a
+consuming greenthread might depend on the output from several different
+greenthreads.
+
+If you have a tree with strict many-to-one dependencies -- each producer
+greenthread provides results to exactly one consumer, though a given consumer
+may depend on multiple producers -- that could be addressed by recursively
+constructing a :class:`GreenPool <eventlet.greenpool.GreenPool>` of producers
+for each consumer, then :meth:`waiting <eventlet.greenpool.GreenPool.waitall>`
+for all producers.
+
+If you have a tree with strict one-to-many dependencies -- each consumer
+greenthread depends on exactly one producer, though a given producer may
+provide results to multiple consumers -- that could be addressed by causing
+each producer to finish by launching a :class:`GreenPool
+<eventlet.greenpool.GreenPool>` of consumers.
+
+But when you have many-to-many dependencies, a tree doesn't suffice. This is
+known as a
+`Directed Acyclic Graph <https://en.wikipedia.org/wiki/Directed_acyclic_graph>`_,
+or DAG.
+
+You might consider sorting the greenthreads into dependency order
+(`topological sort <https://en.wikipedia.org/wiki/Topological_sorting>`_) and
+launching them in a GreenPool. But the concurrency of the GreenPool must be
+strictly constrained to ensure that no greenthread is launched before all its
+upstream producers have completed -- and the appropriate pool size is
+data-dependent. Only a pool of size 1 (serializing all the greenthreads)
+guarantees that a topological sort will produce correct results.
+
+Even if you do serialize all the greenthreads, how do you pass results from
+each producer to all its consumers, which might start at very different points
+in time?
+
+One answer is to associate each greenthread with a distinct key, and store its
+result in a common dict. Then each consumer greenthread can identify its
+direct upstream producers by their keys, and find their results in that dict.
+
+This is the essence of DAGPool.
+
+A DAGPool instance owns a dict, and stores greenthread results in that dict.
+You :meth:`spawn <eventlet.dagpool.DAGPool.spawn>` *all* greenthreads in the
+DAG, specifying for each its own key -- the key with which its result will be
+stored on completion -- plus the keys of the upstream producer greenthreads on
+whose results it directly depends.
+
+Keys need only be unique within the DAGPool instance; they need not be UUIDs.
+A key can be any type that can be used as a dict key. String keys make it
+easier to reason about a DAGPool's behavior, but are by no means required.
+
+The DAGPool passes to each greenthread an iterable of (key, value) pairs.
+The key in each pair is the key of one of the greenthread's specified upstream
+producers; the value is the value returned by that producer greenthread. Pairs
+are delivered in the order results become available; the consuming greenthread
+blocks until the next result can be delivered.
+
+Tutorial
+*******
+Example
+-------
+
+Consider a couple of programs in some compiled language that depend on a set
+of precompiled libraries. Suppose every such build requires as input the
+specific set of library builds on which it directly depends.
+
+::
+
+    a  zlib
+    | /  |
+    |/   |
+    b    c
+    |   /|
+    |  / |
+    | /  |
+    |/   |
+    d    e
+
+We can't run the build for program d until we have the build results for both
+b and c. We can't run the build for library b until we have build results for
+a and zlib. We can, however, immediately run the builds for a and zlib.
+
+So we can use a DAGPool instance to spawn greenthreads running a function such
+as this:
+
+::
+
+    def builder(key, upstream):
+        for libname, product in upstream:
+            # ... configure build for 'key' to use 'product' for 'libname'
+        # all upstream builds have completed
+        # ... run build for 'key'
+        return build_product_for_key
+
+:meth:`spawn <eventlet.dagpool.DAGPool.spawn>` all these greenthreads:
+
+::
+
+    pool = DAGPool()
+    # the upstream producer keys passed to spawn() can be from any iterable,
+    # including a generator
+    pool.spawn("d", ("b", "c"), builder)
+    pool.spawn("e", ["c"], builder)
+    pool.spawn("b", ("a", "zlib"), builder)
+    pool.spawn("c", ["zlib"], builder)
+    pool.spawn("a", (), builder)
+
+As with :func:`eventlet.spawn() <eventlet.spawn>`, if you need to pass special
+build flags to some set of builds, these can be passed as either positional or
+keyword arguments:
+
+::
+
+    def builder(key, upstream, cflags="", linkflags=""):
+        ...
+
+    pool.spawn("d", ("b", "c"), builder, "-o2")
+    pool.spawn("e", ["c"], builder, linkflags="-pie")
+
+However, if the arguments to each builder() call are uniform (as in the
+original example), you could alternatively build a dict of the dependencies
+and call :meth:`spawn_many() <eventlet.dagpool.DAGPool.spawn_many>`:
+
+::
+
+    deps = dict(d=("b", "c"),
+                e=["c"],
+                b=("a", "zlib"),
+                c=["zlib"],
+                a=())
+    pool.spawn_many(deps, builder)
+
+From outside the DAGPool, you can obtain the results for d and e (or in fact
+for any of the build greenthreads) in any of several ways.
+
+:meth:`pool.waitall() <eventlet.dagpool.DAGPool.waitall>` waits until the last of the spawned
+greenthreads has completed, and returns a dict containing results for *all* of
+them:
+
+::
+
+    final = pool.waitall()
+    print("for d: {0}".format(final["d"]))
+    print("for e: {0}".format(final["e"]))
+
+waitall() is an alias for :meth:`wait() <eventlet.dagpool.DAGPool.wait>` with no arguments:
+
+::
+
+    final = pool.wait()
+    print("for d: {0}".format(final["d"]))
+    print("for e: {0}".format(final["e"]))
+
+Or you can specifically wait for only the final programs:
+
+::
+
+    final = pool.wait(["d", "e"])
+
+The returned dict will contain only the specified keys. The keys may be passed
+into wait() from any iterable, including a generator.
+
+You can wait for any specified set of greenthreads; they need not be
+topologically last:
+
+::
+
+    # returns as soon as both a and zlib have returned results, regardless of
+    # what else is still running
+    leaves = pool.wait(["a", "zlib"])
+
+Suppose you want to wait specifically for just *one* of the final programs:
+
+::
+
+    final = pool.wait(["d"])
+    dprog = final["d"]
+
+The above wait() call will return as soon as greenthread d returns a result --
+regardless of whether greenthread e has finished.
+
+:meth:`__getitem()__ <eventlet.dagpool.DAGPool.__getitem__>` is shorthand for
+obtaining a single result:
+
+::
+
+    # waits until greenthread d returns its result
+    dprog = pool["d"]
+
+In contrast, :meth:`get() <eventlet.dagpool.DAGPool.get>` returns immediately,
+whether or not a result is ready:
+
+::
+
+    # returns immediately
+    if pool.get("d") is None:
+        ...
+
+Of course, your greenthread might not include an explicit return statement and
+hence might implicitly return None. You might have to test some other value.
+
+::
+
+    # returns immediately
+    if pool.get("d", "notdone") == "notdone":
+        ...
+
+Suppose you want to process each of the final programs in some way (upload
+it?), but you don't want to have to wait until they've both finished. You
+don't have to poll get() calls -- use :meth:`wait_each()
+<eventlet.dagpool.DAGPool.wait_each>`:
+
+::
+
+    for key, result in pool.wait_each(["d", "e"]):
+        # key will be d or e, in completion order
+        # process result...
+
+As with :meth:`wait() <eventlet.dagpool.DAGPool.wait>`, if you omit the
+argument to wait_each(), it delivers results for all the greenthreads of which
+it's aware:
+
+::
+
+    for key, result in pool.wait_each():
+        # key will be a, zlib, b, c, d, e, in whatever order each completes
+        # process its result...
+
+Introspection
+-------------
+
+Let's say you have set up a :class:`DAGPool <eventlet.dagpool.DAGPool>` with
+the dependencies shown above. To your consternation, your :meth:`waitall()
+<eventlet.dagpool.DAGPool.waitall>` call does not return! The DAGPool instance
+is stuck!
+
+You could change waitall() to :meth:`wait_each()
+<eventlet.dagpool.DAGPool.wait_each>`, and print each key as it becomes
+available:
+
+::
+
+    for key, result in pool.wait_each():
+        print("got result for {0}".format(key))
+        # ... process ...
+
+Once the build for a has completed, this produces:
+
+::
+
+    got result for a
+
+and then stops. Hmm!
+
+You can check the number of :meth:`running <eventlet.dagpool.DAGPool.running>`
+greenthreads:
+
+::
+
+    >>> print(pool.running())
+    4
+
+and the number of :meth:`waiting <eventlet.dagpool.DAGPool.waiting>`
+greenthreads:
+
+::
+
+    >>> print(pool.waiting())
+    4
+
+It's often more informative to ask *which* greenthreads are :meth:`still
+running <eventlet.dagpool.DAGPool.running_keys>`:
+
+::
+
+    >>> print(pool.running_keys())
+    ('c', 'b', 'e', 'd')
+
+but in this case, we already know a has completed.
+
+We can ask for all available results:
+
+::
+
+    >>> print(pool.keys())
+    ('a',)
+    >>> print(pool.items())
+    (('a', result_from_a),)
+
+The :meth:`keys() <eventlet.dagpool.DAGPool.keys>` and :meth:`items()
+<eventlet.dagpool.DAGPool.items>` methods only return keys and items for
+which results are actually available, reflecting the underlying dict.
+
+But what's blocking the works? What are we :meth:`waiting for
+<eventlet.dagpool.DAGPool.waiting_for>`?
+
+::
+
+    >>> print(pool.waiting_for("d"))
+    set(['c', 'b'])
+
+(waiting_for()'s optional argument is a *single* key.)
+
+That doesn't help much yet...
+
+::
+
+    >>> print(pool.waiting_for("b"))
+    set(['zlib'])
+    >>> print(pool.waiting_for("zlib"))
+    KeyError: 'zlib'
+
+Aha! We forgot to even include the zlib build when we were originally
+configuring this DAGPool!
+
+(For non-interactive use, it would be more informative to omit waiting_for()'s
+argument. This usage returns a dict indicating, for each greenthread key,
+which other keys it's waiting for.)
+
+::
+
+    from pprint import pprint
+    pprint(pool.waiting_for())
+
+    {'b': set(['zlib']), 'c': set(['zlib']), 'd': set(['b', 'c']), 'e': set(['c'])}
+
+In this case, a reasonable fix would be to spawn the zlib greenthread:
+
+::
+
+    pool.spawn("zlib", (), builder)
+
+Even if this is the last method call on this DAGPool instance, it should
+unblock all the rest of the DAGPool greenthreads.
+
+Posting
+-------
+
+If we happen to have zlib build results in hand already, though, we could
+instead :meth:`post() <eventlet.dagpool.DAGPool.post>` that result instead of
+rebuilding the library:
+
+::
+
+    pool.post("zlib", result_from_zlib)
+
+This, too, should unblock the rest of the DAGPool greenthreads.
+
+Preloading
+----------
+
+If rebuilding takes nontrivial realtime, it might be useful to record partial
+results, so that in case of interruption you can restart from where you left
+off rather than having to rebuild everything prior to that point.
+
+You could iteratively :meth:`post() <eventlet.dagpool.DAGPool.post>` those
+prior results into a new DAGPool instance; alternatively you can
+:meth:`preload <eventlet.dagpool.DAGPool.__init__>` the :class:`DAGPool
+<eventlet.dagpool.DAGPool>` from an existing dict:
+
+::
+
+    pool = DAGPool(dict(a=result_from_a, zlib=result_from_zlib))
+
+Any DAGPool greenthreads that depend on either a or zlib can immediately
+consume those results.
+
+It also works to construct DAGPool with an iterable of (key, result) pairs.
+
+Exception Propagation
+---------------------
+
+But what if we spawn a zlib build that fails? Suppose the zlib greenthread
+terminates with an exception? In that case none of b, c, d or e can proceed!
+Nor do we want to wait forever for them.
+
+::
+
+    dprog = pool["d"]
+    eventlet.dagpool.PropagateError: PropagateError(d): PropagateError: PropagateError(c): PropagateError: PropagateError(zlib): OriginalError
+
+DAGPool provides a :class:`PropagateError <eventlet.dagpool.PropagateError>`
+exception specifically to wrap such failures. If a DAGPool greenthread
+terminates with an Exception subclass, the DAGPool wraps that exception in a
+PropagateError instance whose *key* attribute is the key of the failing
+greenthread and whose *exc* attribute is the exception that terminated it.
+This PropagateError is stored as the result from that greenthread.
+
+Attempting to consume the result from a greenthread for which a PropagateError
+was stored raises that PropagateError.
+
+::
+
+    pool["zlib"]
+    eventlet.dagpool.PropagateError: PropagateError(zlib): OriginalError
+
+Thus, when greenthread c attempts to consume the result from zlib, the
+PropagateError for zlib is raised. Unless the builder function for greenthread
+c handles that PropagateError exception, that greenthread will itself
+terminate. That PropagateError will be wrapped in another PropagateError whose
+*key* attribute is c and whose *exc* attribute is the PropagateError for zlib.
+
+Similarly, when greenthread d attempts to consume the result from c, the
+PropagateError for c is raised. This in turn is wrapped in a PropagateError
+whose *key* is d and whose *exc* is the PropagateError for c.
+
+When someone attempts to consume the result from d, as shown above, the
+PropagateError for d is raised.
+
+You can programmatically chase the failure path to determine the original
+failure if desired:
+
+::
+
+    orig_err = err
+    key = "unknown"
+    while isinstance(orig_err, PropagateError):
+        key = orig_err.key
+        orig_err = orig_err.exc
+
+Scanning for Success / Exceptions
+---------------------------------
+
+Exception propagation means that we neither perform useless builds nor wait for
+results that will never arrive.
+
+However, it does make it difficult to obtain *partial* results for builds that
+*did* succeed.
+
+For that you can call :meth:`wait_each_success()
+<eventlet.dagpool.DAGPool.wait_each_success>`:
+
+::
+
+    for key, result in pool.wait_each_success():
+        print("{0} succeeded".format(key))
+        # ... process result ...
+
+    a succeeded
+
+Another problem is that although five different greenthreads failed in the
+example, we only see one chain of failures. You can enumerate the bad news
+with :meth:`wait_each_exception() <eventlet.dagpool.DAGPool.wait_each_exception>`:
+
+::
+
+    for key, err in pool.wait_each_exception():
+        print("{0} failed with {1}".format(key, err.exc.__class__.__name__))
+
+    c failed with PropagateError
+    b failed with PropagateError
+    e failed with PropagateError
+    d failed with PropagateError
+    zlib failed with OriginalError
+
+wait_each_exception() yields each PropagateError wrapper as if it were the
+result, rather than raising it as an exception.
+
+Notice that we print :code:`err.exc.__class__.__name__` because
+:code:`err.__class__.__name__` is always PropagateError.
+
+Both wait_each_success() and wait_each_exception() can accept an iterable of
+keys to report:
+
+::
+
+    for key, result in pool.wait_each_success(["d", "e"]):
+        print("{0} succeeded".format(key))
+
+    (no output)
+
+    for key, err in pool.wait_each_exception(["d", "e"]):
+        print("{0} failed with {1}".format(key, err.exc.__class__.__name__))
+
+    e failed with PropagateError
+    d failed with PropagateError
+
+Both wait_each_success() and wait_each_exception() must wait until the
+greenthreads for all specified keys (or all keys) have terminated, one way or
+the other, because of course we can't know until then how to categorize each.
+
+Module Contents
+===============
+
+.. automodule:: eventlet.dagpool
+	:members:

+ 61 - 0
desktop/core/ext-py/eventlet-0.21.0/doc/modules/db_pool.rst

@@ -0,0 +1,61 @@
+:mod:`db_pool` -- DBAPI 2 database connection pooling
+========================================================
+
+The db_pool module is useful for managing database connections.  It provides three primary benefits: cooperative yielding during database operations, concurrency limiting to a database host, and connection reuse.  db_pool is intended to be database-agnostic, compatible with any DB-API 2.0 database module.
+
+*It has currently been tested and used with both MySQLdb and psycopg2.*
+
+A ConnectionPool object represents a pool of connections open to a particular database.  The arguments to the constructor include the database-software-specific module, the host name, and the credentials required for authentication.  After construction, the ConnectionPool object decides when to create and sever connections with the target database.
+
+>>> import MySQLdb
+>>> cp = ConnectionPool(MySQLdb, host='localhost', user='root', passwd='')
+
+Once you have this pool object, you connect to the database by calling :meth:`~eventlet.db_pool.ConnectionPool.get` on it:
+
+>>> conn = cp.get()
+
+This call may either create a new connection, or reuse an existing open connection, depending on whether it has one open already or not.  You can then use the connection object as normal.  When done, you must return the connection to the pool:
+
+>>> conn = cp.get()
+>>> try:
+...     result = conn.cursor().execute('SELECT NOW()')
+... finally:
+...     cp.put(conn)
+
+After you've returned a connection object to the pool, it becomes useless and will raise exceptions if any of its methods are called.
+
+Constructor Arguments
+----------------------
+
+In addition to the database credentials, there are a bunch of keyword constructor arguments to the ConnectionPool that are useful.
+
+* min_size, max_size : The normal Pool arguments.  max_size is the most important constructor argument -- it determines the number of concurrent connections can be open to the destination database.  min_size is not very useful.
+* max_idle : Connections are only allowed to remain unused in the pool for a limited amount of time.  An asynchronous timer periodically wakes up and closes any connections in the pool that have been idle for longer than they are supposed to be.  Without this parameter, the pool would tend to have a 'high-water mark', where the number of connections open at a given time corresponds to the peak historical demand.  This number only has effect on the connections in the pool itself -- if you take a connection out of the pool, you can hold on to it for as long as you want.  If this is set to 0, every connection is closed upon its return to the pool.
+* max_age : The lifespan of a connection.  This works much like max_idle, but the timer is measured from the connection's creation time, and is tracked throughout the connection's life.  This means that if you take a connection out of the pool and hold on to it for some lengthy operation that exceeds max_age, upon putting the connection back in to the pool, it will be closed.  Like max_idle, max_age will not close connections that are taken out of the pool, and, if set to 0, will cause every connection to be closed when put back in the pool.
+* connect_timeout : How long to wait before raising an exception on connect().  If the database module's connect() method takes too long, it raises a ConnectTimeout exception from the get() method on the pool.
+
+DatabaseConnector
+-----------------
+
+If you want to connect to multiple databases easily (and who doesn't), the DatabaseConnector is for you.  It's a pool of pools, containing a ConnectionPool for every host you connect to.
+
+The constructor arguments are:
+
+* module : database module, e.g. MySQLdb.  This is simply passed through to the ConnectionPool.
+* credentials : A dictionary, or dictionary-alike, mapping hostname to connection-argument-dictionary.  This is used for the constructors of the ConnectionPool objects.  Example:
+
+>>> dc = DatabaseConnector(MySQLdb,
+...      {'db.internal.example.com': {'user': 'internal', 'passwd': 's33kr1t'},
+...       'localhost': {'user': 'root', 'passwd': ''}})
+
+If the credentials contain a host named 'default', then the value for 'default' is used whenever trying to connect to a host that has no explicit entry in the database.  This is useful if there is some pool of hosts that share arguments.
+
+* conn_pool : The connection pool class to use.  Defaults to db_pool.ConnectionPool.
+
+The rest of the arguments to the DatabaseConnector constructor are passed on to the ConnectionPool.
+
+*Caveat: The DatabaseConnector is a bit unfinished, it only suits a subset of use cases.*
+
+.. automodule:: eventlet.db_pool
+	:members:
+	:undoc-members:

+ 5 - 0
desktop/core/ext-py/eventlet-0.21.0/doc/modules/debug.rst

@@ -0,0 +1,5 @@
+:mod:`debug` -- Debugging tools for Eventlet
+==================================================
+
+.. automodule:: eventlet.debug
+	:members:

+ 5 - 0
desktop/core/ext-py/eventlet-0.21.0/doc/modules/event.rst

@@ -0,0 +1,5 @@
+:mod:`event` -- Cross-greenthread primitive
+==================================================
+
+.. automodule:: eventlet.event
+	:members:

+ 6 - 0
desktop/core/ext-py/eventlet-0.21.0/doc/modules/greenpool.rst

@@ -0,0 +1,6 @@
+:mod:`greenpool` -- Green Thread Pools
+========================================
+
+.. automodule:: eventlet.greenpool
+	:members:
+

+ 5 - 0
desktop/core/ext-py/eventlet-0.21.0/doc/modules/greenthread.rst

@@ -0,0 +1,5 @@
+:mod:`greenthread` -- Green Thread Implementation
+==================================================
+
+.. automodule:: eventlet.greenthread
+	:members:

+ 5 - 0
desktop/core/ext-py/eventlet-0.21.0/doc/modules/pools.rst

@@ -0,0 +1,5 @@
+:mod:`pools` - Generic pools of resources 
+==========================================
+
+.. automodule:: eventlet.pools
+	:members:

+ 5 - 0
desktop/core/ext-py/eventlet-0.21.0/doc/modules/queue.rst

@@ -0,0 +1,5 @@
+:mod:`queue` -- Queue class
+========================================
+
+.. automodule:: eventlet.queue
+	:members:

+ 11 - 0
desktop/core/ext-py/eventlet-0.21.0/doc/modules/semaphore.rst

@@ -0,0 +1,11 @@
+:mod:`semaphore` -- Semaphore classes
+==================================================
+
+.. autoclass:: eventlet.semaphore.Semaphore
+	:members:
+
+.. autoclass:: eventlet.semaphore.BoundedSemaphore
+	:members:
+	
+.. autoclass:: eventlet.semaphore.CappedSemaphore
+	:members:

+ 92 - 0
desktop/core/ext-py/eventlet-0.21.0/doc/modules/timeout.rst

@@ -0,0 +1,92 @@
+:mod:`timeout` -- Universal Timeouts
+========================================
+
+.. class:: eventlet.timeout.Timeout
+
+    Raises *exception* in the current greenthread after *timeout* seconds::
+
+        timeout = Timeout(seconds, exception)
+        try:
+            ... # execution here is limited by timeout
+        finally:
+            timeout.cancel()
+
+    When *exception* is omitted or is ``None``, the :class:`Timeout` instance
+    itself is raised:
+
+        >>> Timeout(0.1)
+        >>> eventlet.sleep(0.2)
+        Traceback (most recent call last):
+         ...
+        Timeout: 0.1 seconds
+
+    You can use the  ``with`` statement for additional convenience::
+
+        with Timeout(seconds, exception) as timeout:
+            pass # ... code block ...
+
+    This is equivalent to the try/finally block in the first example.
+
+    There is an additional feature when using the ``with`` statement: if
+    *exception* is ``False``, the timeout is still raised, but the with
+    statement suppresses it, so the code outside the with-block won't see it::
+
+        data = None
+        with Timeout(5, False):
+            data = mysock.makefile().readline()
+        if data is None:
+            ... # 5 seconds passed without reading a line
+        else:
+            ... # a line was read within 5 seconds
+
+    As a very special case, if *seconds* is None, the timer is not scheduled,
+    and is only useful if you're planning to raise it directly.
+
+    There are two Timeout caveats to be aware of:
+
+    * If the code block in the try/finally or with-block never cooperatively yields, the timeout cannot be raised.  In Eventlet, this should rarely be a problem, but be aware that you cannot time out CPU-only operations with this class.
+    * If the code block catches and doesn't re-raise :class:`BaseException`  (for example, with ``except:``), then it will catch the Timeout exception, and might not abort as intended.
+
+    When catching timeouts, keep in mind that the one you catch may not be the
+    one you set; if you plan on silencing a timeout, always check that it's the
+    same instance that you set::
+
+        timeout = Timeout(1)
+        try:
+            ...
+        except Timeout as t:
+            if t is not timeout:
+                raise # not my timeout
+
+    .. automethod:: cancel
+    .. autoattribute:: pending
+
+
+.. function:: eventlet.timeout.with_timeout(seconds, function, *args, **kwds)
+
+    Wrap a call to some (yielding) function with a timeout; if the called
+    function fails to return before the timeout, cancel it and return a flag
+    value.
+
+    :param seconds: seconds before timeout occurs
+    :type seconds: int or float
+    :param func: the callable to execute with a timeout; it must cooperatively yield, or else the timeout will not be able to trigger
+    :param \*args: positional arguments to pass to *func*
+    :param \*\*kwds: keyword arguments to pass to *func*
+    :param timeout_value: value to return if timeout occurs (by default raises
+      :class:`Timeout`)
+
+    :rtype: Value returned by *func* if *func* returns before *seconds*, else
+      *timeout_value* if provided, else raises :class:`Timeout`.
+
+    :exception Timeout: if *func* times out and no ``timeout_value`` has
+      been provided.
+    :exception: Any exception raised by *func*
+
+    Example::
+
+        data = with_timeout(30, urllib2.open, 'http://www.google.com/', timeout_value="")
+
+    Here *data* is either the result of the ``get()`` call, or the empty string
+    if it took too long to return.  Any exception raised by the ``get()`` call
+    is passed through to the caller.

+ 36 - 0
desktop/core/ext-py/eventlet-0.21.0/doc/modules/websocket.rst

@@ -0,0 +1,36 @@
+:mod:`websocket` -- Websocket Server
+=====================================
+
+This module provides a simple way to create a `websocket
+<http://dev.w3.org/html5/websockets/>`_ server.  It works with a few
+tweaks in the :mod:`~eventlet.wsgi` module that allow websockets to
+coexist with other WSGI applications.
+
+To create a websocket server, simply decorate a handler method with
+:class:`WebSocketWSGI` and use it as a wsgi application::
+
+    from eventlet import wsgi, websocket
+    import eventlet
+    
+    @websocket.WebSocketWSGI
+    def hello_world(ws):
+        ws.send("hello world")
+    
+    wsgi.server(eventlet.listen(('', 8090)), hello_world)
+
+.. note::
+
+    Please see graceful termination warning in :func:`~eventlet.wsgi.server`
+    documentation
+
+
+You can find a slightly more elaborate version of this code in the file
+``examples/websocket.py``.
+
+As of version 0.9.13, eventlet.websocket supports SSL websockets; all that's necessary is to use an :ref:`SSL wsgi server <wsgi_ssl>`.
+
+.. note :: The web socket spec is still under development, and it will be necessary to change the way that this module works in response to spec changes.
+
+
+.. automodule:: eventlet.websocket
+	:members:

+ 130 - 0
desktop/core/ext-py/eventlet-0.21.0/doc/modules/wsgi.rst

@@ -0,0 +1,130 @@
+:mod:`wsgi` -- WSGI server
+===========================
+
+The wsgi module provides a simple and easy way to start an event-driven
+`WSGI <http://wsgi.org/wsgi/>`_ server.  This can serve as an embedded
+web server in an application, or as the basis for a more full-featured web
+server package.  One such package is `Spawning <http://pypi.python.org/pypi/Spawning/>`_.
+
+To launch a wsgi server, simply create a socket and call :func:`eventlet.wsgi.server` with it::
+
+    from eventlet import wsgi
+    import eventlet
+
+    def hello_world(env, start_response):
+        start_response('200 OK', [('Content-Type', 'text/plain')])
+        return ['Hello, World!\r\n']
+
+    wsgi.server(eventlet.listen(('', 8090)), hello_world)
+
+
+You can find a slightly more elaborate version of this code in the file
+``examples/wsgi.py``.
+
+.. automodule:: eventlet.wsgi
+	:members:
+
+.. _wsgi_ssl:
+
+SSL
+---
+
+Creating a secure server is only slightly more involved than the base example.  All that's needed is to pass an SSL-wrapped socket to the :func:`~eventlet.wsgi.server` method::
+
+    wsgi.server(eventlet.wrap_ssl(eventlet.listen(('', 8090)),
+                                  certfile='cert.crt',
+                                  keyfile='private.key',
+                                  server_side=True),
+                hello_world)
+
+Applications can detect whether they are inside a secure server by the value of the ``env['wsgi.url_scheme']`` environment variable.
+
+
+Non-Standard Extension to Support Post Hooks
+--------------------------------------------
+Eventlet's WSGI server supports a non-standard extension to the WSGI
+specification where :samp:`env['eventlet.posthooks']` contains an array of
+`post hooks` that will be called after fully sending a response. Each post hook
+is a tuple of :samp:`(func, args, kwargs)` and the `func` will be called with
+the WSGI environment dictionary, followed by the `args` and then the `kwargs`
+in the post hook.
+
+For example::
+
+    from eventlet import wsgi
+    import eventlet
+
+    def hook(env, arg1, arg2, kwarg3=None, kwarg4=None):
+        print('Hook called: %s %s %s %s %s' % (env, arg1, arg2, kwarg3, kwarg4))
+
+    def hello_world(env, start_response):
+        env['eventlet.posthooks'].append(
+            (hook, ('arg1', 'arg2'), {'kwarg3': 3, 'kwarg4': 4}))
+        start_response('200 OK', [('Content-Type', 'text/plain')])
+        return ['Hello, World!\r\n']
+
+    wsgi.server(eventlet.listen(('', 8090)), hello_world)
+
+The above code will print the WSGI environment and the other passed function
+arguments for every request processed.
+
+Post hooks are useful when code needs to be executed after a response has been
+fully sent to the client (or when the client disconnects early). One example is
+for more accurate logging of bandwidth used, as client disconnects use less
+bandwidth than the actual Content-Length.
+
+
+"100 Continue" Response Headers
+-------------------------------
+
+Eventlet's WSGI server supports sending (optional) headers with HTTP "100 Continue"
+provisional responses.  This is useful in such cases where a WSGI server expects
+to complete a PUT request as a single HTTP request/response pair, and also wants to
+communicate back to client as part of the same HTTP transaction.  An example is
+where the HTTP server wants to pass hints back to the client about characteristics
+of data payload it can accept.  As an example, an HTTP server may pass a hint in a
+header the accompanying "100 Continue" response to the client indicating it can or
+cannot accept encrypted data payloads, and thus client can make the encrypted vs
+unencrypted decision before starting to send the data).  
+
+This works well for WSGI servers as the WSGI specification mandates HTTP
+expect/continue mechanism (PEP333).
+
+To define the "100 Continue" response headers, one may call
+:func:`set_hundred_continue_response_header` on :samp:`env['wsgi.input']`
+as shown in the following example::
+
+    from eventlet import wsgi
+    import eventlet
+
+    def wsgi_app(env, start_response):
+        # Define "100 Continue" response headers
+        env['wsgi.input'].set_hundred_continue_response_headers(
+            [('Hundred-Continue-Header-1', 'H1'),
+             ('Hundred-Continue-Header-k', 'Hk')])
+        # The following read() causes "100 Continue" response to
+        # the client.  Headers 'Hundred-Continue-Header-1' and 
+        # 'Hundred-Continue-Header-K' are sent with the response
+        # following the "HTTP/1.1 100 Continue\r\n" status line
+        text = env['wsgi.input'].read()
+        start_response('200 OK', [('Content-Length', str(len(text)))])
+        return [text]
+
+You can find a more elaborate example in the file:
+``tests/wsgi_test.py``, :func:`test_024a_expect_100_continue_with_headers`.
+
+
+Per HTTP RFC 7231 (http://tools.ietf.org/html/rfc7231#section-6.2) a client is
+required to be able to process one or more 100 continue responses.  A sample
+use case might be a user protocol where the server may want to use a 100-continue
+response to indicate to a client that it is working on a request and the 
+client should not timeout.
+
+To support multiple 100-continue responses, evenlet wsgi module exports
+the API :func:`send_hundred_continue_response`.
+
+Sample use cases for chunked and non-chunked HTTP scenarios are included
+in the wsgi test case ``tests/wsgi_test.py``,
+:func:`test_024b_expect_100_continue_with_headers_multiple_chunked` and
+:func:`test_024c_expect_100_continue_with_headers_multiple_nonchunked`.
+

+ 30 - 0
desktop/core/ext-py/eventlet-0.21.0/doc/modules/zmq.rst

@@ -0,0 +1,30 @@
+:mod:`eventlet.green.zmq` -- ØMQ support
+========================================
+
+:mod:`pyzmq <zmq>` [1]_ is a python binding to the C++ ØMQ [2]_ library written in Cython [3]_.
+:mod:`eventlet.green.zmq` is greenthread aware version of `pyzmq`.
+
+.. automodule:: eventlet.green.zmq
+    :show-inheritance:
+
+.. currentmodule:: eventlet.green.zmq
+
+.. autoclass:: Context
+    :show-inheritance:
+
+    .. automethod:: socket
+
+.. autoclass:: Socket
+    :show-inheritance:
+    :inherited-members:
+
+    .. automethod:: recv
+
+    .. automethod:: send
+
+.. module:: zmq
+
+
+.. [1] http://github.com/zeromq/pyzmq
+.. [2] http://www.zeromq.com
+.. [3] http://www.cython.org

+ 70 - 0
desktop/core/ext-py/eventlet-0.21.0/doc/patching.rst

@@ -0,0 +1,70 @@
+Greening The World
+==================
+
+One of the challenges of writing a library like Eventlet is that the built-in networking libraries don't natively support the sort of cooperative yielding that we need.  What we must do instead is patch standard library modules in certain key places so that they do cooperatively yield.  We've in the past considered doing this automatically upon importing Eventlet, but have decided against that course of action because it is un-Pythonic to change the behavior of module A simply by importing module B.
+
+Therefore, the application using Eventlet must explicitly green the world for itself, using one or both of the convenient methods provided.
+
+.. _import-green:
+
+Import Green
+--------------
+
+The first way of greening an application is to import networking-related libraries from the ``eventlet.green`` package.  It contains libraries that have the same interfaces as common standard ones, but they are modified to behave well with green threads.  Using this method is a good engineering practice, because the true dependencies are apparent in every file::
+
+  from eventlet.green import socket
+  from eventlet.green import threading
+  from eventlet.green import asyncore
+  
+This works best if every library can be imported green in this manner.  If ``eventlet.green`` lacks a module (for example, non-python-standard modules), then :func:`~eventlet.patcher.import_patched` function can come to the rescue.  It is a replacement for the builtin import statement that greens any module on import.
+
+.. function:: eventlet.patcher.import_patched(module_name, *additional_modules, **kw_additional_modules)
+
+    Imports a module in a greened manner, so that the module's use of networking libraries like socket will use Eventlet's green versions instead.  The only required argument is the name of the module to be imported::
+    
+        import eventlet
+        httplib2 = eventlet.import_patched('httplib2')
+        
+    Under the hood, it works by temporarily swapping out the "normal" versions of the libraries in sys.modules for an eventlet.green equivalent.  When the import of the to-be-patched module completes, the state of sys.modules is restored.  Therefore, if the patched module contains the statement 'import socket', import_patched will have it reference eventlet.green.socket.  One weakness of this approach is that it doesn't work for late binding (i.e. imports that happen during runtime).  Late binding of imports is fortunately rarely done (it's slow and against `PEP-8 <http://www.python.org/dev/peps/pep-0008/>`_), so in most cases import_patched will work just fine.
+    
+    One other aspect of import_patched is the ability to specify exactly which modules are patched.  Doing so may provide a slight performance benefit since only the needed modules are imported, whereas import_patched with no arguments imports a bunch of modules in case they're needed.  The *additional_modules* and *kw_additional_modules* arguments are both sequences of name/module pairs.  Either or both can be used::
+    
+        from eventlet.green import socket
+        from eventlet.green import SocketServer        
+        BaseHTTPServer = eventlet.import_patched('BaseHTTPServer',
+                                ('socket', socket),
+                                ('SocketServer', SocketServer))
+        BaseHTTPServer = eventlet.import_patched('BaseHTTPServer',
+                                socket=socket, SocketServer=SocketServer)
+
+.. _monkey-patch:
+
+Monkeypatching the Standard Library
+----------------------------------------
+
+The other way of greening an application is simply to monkeypatch the standard
+library.  This has the disadvantage of appearing quite magical, but the advantage of avoiding the late-binding problem.
+
+.. function:: eventlet.patcher.monkey_patch(os=None, select=None, socket=None, thread=None, time=None, psycopg=None)
+
+    This function monkeypatches the key system modules by replacing their key elements with green equivalents.  If no arguments are specified, everything is patched::
+    
+        import eventlet
+        eventlet.monkey_patch()
+
+    The keyword arguments afford some control over which modules are patched, in case that's important.  Most patch the single module of the same name (e.g. time=True means that the time module is patched [time.sleep is patched by eventlet.sleep]).  The exceptions to this rule are *socket*, which also patches the :mod:`ssl` module if present; and *thread*, which patches :mod:`thread`, :mod:`threading`, and :mod:`Queue`.
+    
+    Here's an example of using monkey_patch to patch only a few modules::
+    
+        import eventlet
+        eventlet.monkey_patch(socket=True, select=True)
+         
+    It is important to call :func:`~eventlet.patcher.monkey_patch` as early in the lifetime of the application as possible.  Try to do it as one of the first lines in the main module.  The reason for this is that sometimes there is a class that inherits from a class that needs to be greened -- e.g. a class that inherits from socket.socket -- and inheritance is done at import time, so therefore the monkeypatching should happen before the derived class is defined.      It's safe to call monkey_patch multiple times.
+
+    The psycopg monkeypatching relies on Daniele Varrazzo's green psycopg2 branch; see `the announcement <https://lists.secondlife.com/pipermail/eventletdev/2010-April/000800.html>`_ for more information.
+
+.. function:: eventlet.patcher.is_monkey_patched(module)
+
+   Returns whether or not the specified module is currently monkeypatched. *module* can either be the module itself or the module's name.
+
+    Based entirely off the name of the module, so if you import a module some other way than with the import keyword (including :func:`~eventlet.patcher.import_patched`), is_monkey_patched might not be correct about that particular module.

+ 92 - 0
desktop/core/ext-py/eventlet-0.21.0/doc/ssl.rst

@@ -0,0 +1,92 @@
+Using SSL With Eventlet
+========================
+
+Eventlet makes it easy to use non-blocking SSL sockets.  If you're using Python 2.6 or later, you're all set, eventlet wraps the built-in ssl module.  If on Python 2.5 or 2.4, you have to install pyOpenSSL_ to use eventlet.
+
+In either case, the ``green`` modules handle SSL sockets transparently, just like their standard counterparts.  As an example, :mod:`eventlet.green.urllib2` can be used to fetch https urls in as non-blocking a fashion as you please::
+
+    from eventlet.green import urllib2
+    from eventlet import spawn
+    bodies = [spawn(urllib2.urlopen, url)
+         for url in ("https://secondlife.com","https://google.com")]
+    for b in bodies:
+        print(b.wait().read())
+
+
+With Python 2.6
+----------------
+
+To use ssl sockets directly in Python 2.6, use :mod:`eventlet.green.ssl`, which is a non-blocking wrapper around the standard Python :mod:`ssl` module, and which has the same interface.  See the standard documentation for instructions on use.
+
+With Python 2.5 or Earlier
+---------------------------
+
+Prior to Python 2.6, there is no :mod:`ssl`, so SSL support is much weaker.  Eventlet relies on pyOpenSSL to implement its SSL support on these older versions, so be sure to install pyOpenSSL, or you'll get an ImportError whenever your system tries to make an SSL connection.
+
+Once pyOpenSSL is installed, you can then use the ``eventlet.green`` modules, like :mod:`eventlet.green.httplib` to fetch https urls.  You can also use :func:`eventlet.green.socket.ssl`, which is a nonblocking wrapper for :func:`socket.ssl`.
+
+PyOpenSSL
+----------
+
+:mod:`eventlet.green.OpenSSL` has exactly the same interface as pyOpenSSL_ `(docs) <http://pyopenssl.sourceforge.net/pyOpenSSL.html/>`_, and works in all versions of Python.  This module is much more powerful than :func:`socket.ssl`, and may have some advantages over :mod:`ssl`, depending on your needs.
+
+For testing purpose first create self-signed certificate using following commands ::
+
+    $ openssl genrsa 1024 > server.key
+    $ openssl req -new -x509 -nodes -sha1 -days 365 -key server.key > server.cert 
+
+Keep these Private key and Self-signed certificate in same directory as `server.py` and `client.py` for simplicity sake.
+
+Here's an example of a server (`server.py`) ::
+
+    from eventlet.green import socket
+    from eventlet.green.OpenSSL import SSL
+
+    # insecure context, only for example purposes
+    context = SSL.Context(SSL.SSLv23_METHOD)
+    # Pass server's private key created
+    context.use_privatekey_file('server.key')
+    # Pass self-signed certificate created
+    context.use_certificate_file('server.cert')
+
+    # create underlying green socket and wrap it in ssl
+    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+    connection = SSL.Connection(context, sock)
+
+    # configure as server
+    connection.set_accept_state()
+    connection.bind(('127.0.0.1', 8443))
+    connection.listen(50)
+
+    # accept one client connection then close up shop
+    client_conn, addr = connection.accept()
+    print(client_conn.read(100))
+    client_conn.shutdown()
+    client_conn.close()
+    connection.close()
+
+Here's an example of a client (`client.py`) ::
+	
+    import socket
+    # Create socket
+    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+    # Connect to server
+    s.connect(('127.0.0.1', 8443))
+    sslSocket = socket.ssl(s)
+    print repr(sslSocket.server())
+    print repr(sslSocket.issuer())
+    sslSocket.write('Hello secure socket\n')
+    # Close client
+    s.close()
+
+Running example::
+
+In first terminal
+
+    $ python server.py
+
+In another terminal 
+
+    $ python client.py
+
+.. _pyOpenSSL: https://launchpad.net/pyopenssl

+ 94 - 0
desktop/core/ext-py/eventlet-0.21.0/doc/testing.rst

@@ -0,0 +1,94 @@
+Testing Eventlet
+================
+
+Eventlet is tested using `Nose <http://somethingaboutorange.com/mrl/projects/nose/>`_.  To run tests, simply install nose, and then, in the eventlet tree, do:
+
+.. code-block:: sh
+
+  $ python setup.py test
+
+If you want access to all the nose plugins via command line, you can run:
+
+.. code-block:: sh
+
+  $ python setup.py nosetests
+
+Lastly, you can just use nose directly if you want:
+
+.. code-block:: sh
+
+  $ nosetests
+
+That's it!  The output from running nose is the same as unittest's output, if the entire directory was one big test file.
+
+Many tests are skipped based on environmental factors; for example, it makes no sense to test kqueue-specific functionality when your OS does not support it.  These are printed as S's during execution, and in the summary printed after the tests run it will tell you how many were skipped.
+
+Doctests
+--------
+
+To run the doctests included in many of the eventlet modules, use this command:
+
+.. code-block :: sh
+
+  $ nosetests --with-doctest eventlet/*.py
+
+Currently there are 16 doctests.
+
+Standard Library Tests
+----------------------
+
+Eventlet provides the ability to test itself with the standard Python networking tests.  This verifies that the libraries it wraps work at least as well as the standard ones do.  The directory tests/stdlib contains a bunch of stubs that import the standard lib tests from your system and run them.  If you do not have any tests in your python distribution, they'll simply fail to import.
+
+There's a convenience module called all.py designed to handle the impedance mismatch between Nose and the standard tests:
+
+.. code-block:: sh
+
+  $ nosetests tests/stdlib/all.py
+
+That will run all the tests, though the output will be a little weird because it will look like Nose is running about 20 tests, each of which consists of a bunch of sub-tests.  Not all test modules are present in all versions of Python, so there will be an occasional printout of "Not importing %s, it doesn't exist in this installation/version of Python".
+
+If you see "Ran 0 tests in 0.001s", it means that your Python installation lacks its own tests.  This is usually the case for Linux distributions.  One way to get the missing tests is to download a source tarball (of the same version you have installed on your system!) and copy its Lib/test directory into the correct place on your PYTHONPATH.
+
+
+Testing Eventlet Hubs
+---------------------
+
+When you run the tests, Eventlet will use the most appropriate hub for the current platform to do its dispatch.  It's sometimes useful when making changes to Eventlet to test those changes on hubs other than the default.  You can do this with the ``EVENTLET_HUB`` environment variable.
+
+.. code-block:: sh
+
+ $ EVENTLET_HUB=epolls nosetests
+
+See :ref:`understanding_hubs` for the full list of hubs.
+
+
+Writing Tests
+-------------
+
+What follows are some notes on writing tests, in no particular order.
+
+The filename convention when writing a test for module `foo` is to name the test `foo_test.py`.  We don't yet have a convention for tests that are of finer granularity, but a sensible one might be `foo_class_test.py`.
+
+If you are writing a test that involves a client connecting to a spawned server, it is best to not use a hardcoded port because that makes it harder to parallelize tests.  Instead bind the server to 0, and then look up its port when connecting the client, like this::
+
+  server_sock = eventlet.listener(('127.0.0.1', 0))
+  client_sock = eventlet.connect(('localhost', server_sock.getsockname()[1]))
+
+Coverage
+--------
+
+Coverage.py is an awesome tool for evaluating how much code was exercised by unit tests.  Nose supports it if both are installed, so it's easy to generate coverage reports for eventlet.  Here's how:
+
+.. code-block:: sh
+
+ nosetests --with-coverage --cover-package=eventlet
+
+After running the tests to completion, this will emit a huge wodge of module names and line numbers.  For some reason, the ``--cover-inclusive`` option breaks everything rather than serving its purpose of limiting the coverage to the local files, so don't use that.
+
+The html option is quite useful because it generates nicely-formatted HTML files that are much easier to read than line-number soup.  Here's a command that generates the annotation, dumping the html files into a directory called "cover":
+
+.. code-block:: sh
+
+  coverage html -d cover --omit='tempmod,<console>,tests'
+
+(``tempmod`` and ``console`` are omitted because they get thrown away at the completion of their unit tests and coverage.py isn't smart enough to detect this.)

+ 30 - 0
desktop/core/ext-py/eventlet-0.21.0/doc/threading.rst

@@ -0,0 +1,30 @@
+Threads
+========
+
+Eventlet is thread-safe and can be used in conjunction with normal Python threads.  The way this works is that coroutines are confined to their 'parent' Python thread.  It's like each thread contains its own little world of coroutines that can switch between themselves but not between coroutines in other threads.
+
+.. image:: /images/threading_illustration.png
+
+You can only communicate cross-thread using the "real" thread primitives and pipes.  Fortunately, there's little reason to use threads for concurrency when you're already using coroutines.
+
+The vast majority of the times you'll want to use threads are to wrap some operation that is not "green", such as a C library that uses its own OS calls to do socket operations.  The :mod:`~eventlet.tpool` module is provided to make these uses simpler.
+
+The optional :ref:`pyevent hub <understanding_hubs>` is not compatible with threads.
+
+Tpool - Simple thread pool
+---------------------------
+
+The simplest thing to do with :mod:`~eventlet.tpool` is to :func:`~eventlet.tpool.execute` a function with it.  The function will be run in a random thread in the pool, while the calling coroutine blocks on its completion::
+
+ >>> import thread
+ >>> from eventlet import tpool
+ >>> def my_func(starting_ident):
+ ...     print("running in new thread:", starting_ident != thread.get_ident())
+ ...
+ >>> tpool.execute(my_func, thread.get_ident())
+ running in new thread: True
+
+By default there are 20 threads in the pool, but you can configure this by setting the environment variable ``EVENTLET_THREADPOOL_SIZE`` to the desired pool size before importing tpool.
+
+.. automodule:: eventlet.tpool
+	:members:

+ 29 - 0
desktop/core/ext-py/eventlet-0.21.0/doc/zeromq.rst

@@ -0,0 +1,29 @@
+Zeromq
+######
+
+What is ØMQ?
+============
+
+"A ØMQ socket is what you get when you take a normal TCP socket, inject it with a mix of radioactive isotopes stolen
+from a secret Soviet atomic research project, bombard it with 1950-era cosmic rays, and put it into the hands of a drug-addled
+comic book author with a badly-disguised fetish for bulging muscles clad in spandex."
+
+Key differences to conventional sockets
+Generally speaking, conventional sockets present a synchronous interface to either connection-oriented reliable byte streams (SOCK_STREAM),
+or connection-less unreliable datagrams (SOCK_DGRAM). In comparison, 0MQ sockets present an abstraction of an asynchronous message queue,
+with the exact queueing semantics depending on the socket type in use. Where conventional sockets transfer streams of bytes or discrete datagrams,
+0MQ sockets transfer discrete messages.
+
+0MQ sockets being asynchronous means that the timings of the physical connection setup and teardown,
+reconnect and effective delivery are transparent to the user and organized by 0MQ itself.
+Further, messages may be queued in the event that a peer is unavailable to receive them.
+
+Conventional sockets allow only strict one-to-one (two peers), many-to-one (many clients, one server),
+or in some cases one-to-many (multicast) relationships. With the exception of ZMQ::PAIR,
+0MQ sockets may be connected to multiple endpoints using connect(),
+while simultaneously accepting incoming connections from multiple endpoints bound to the socket using bind(), thus allowing many-to-many relationships.
+
+API documentation
+=================
+
+ØMQ support is provided in the :mod:`eventlet.green.zmq` module.

+ 60 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/__init__.py

@@ -0,0 +1,60 @@
+import os
+
+
+version_info = (0, 21, 0)
+__version__ = '.'.join(map(str, version_info))
+# This is to make Debian packaging easier, it ignores import
+# errors of greenlet so that the packager can still at least
+# access the version.  Also this makes easy_install a little quieter
+if os.environ.get('EVENTLET_IMPORT_VERSION_ONLY') != '1':
+    from eventlet import convenience
+    from eventlet import event
+    from eventlet import greenpool
+    from eventlet import greenthread
+    from eventlet import patcher
+    from eventlet import queue
+    from eventlet import semaphore
+    from eventlet import support
+    from eventlet import timeout
+    import greenlet
+
+    connect = convenience.connect
+    listen = convenience.listen
+    serve = convenience.serve
+    StopServe = convenience.StopServe
+    wrap_ssl = convenience.wrap_ssl
+
+    Event = event.Event
+
+    GreenPool = greenpool.GreenPool
+    GreenPile = greenpool.GreenPile
+
+    sleep = greenthread.sleep
+    spawn = greenthread.spawn
+    spawn_n = greenthread.spawn_n
+    spawn_after = greenthread.spawn_after
+    kill = greenthread.kill
+
+    import_patched = patcher.import_patched
+    monkey_patch = patcher.monkey_patch
+
+    Queue = queue.Queue
+
+    Semaphore = semaphore.Semaphore
+    CappedSemaphore = semaphore.CappedSemaphore
+    BoundedSemaphore = semaphore.BoundedSemaphore
+
+    Timeout = timeout.Timeout
+    with_timeout = timeout.with_timeout
+    wrap_is_timeout = timeout.wrap_is_timeout
+    is_timeout = timeout.is_timeout
+
+    getcurrent = greenlet.greenlet.getcurrent
+
+    # deprecated
+    TimeoutError, exc_after, call_after_global = (
+        support.wrap_deprecated(old, new)(fun) for old, new, fun in (
+            ('TimeoutError', 'Timeout', Timeout),
+            ('exc_after', 'greenthread.exc_after', greenthread.exc_after),
+            ('call_after_global', 'greenthread.call_after_global', greenthread.call_after_global),
+        ))

+ 136 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/backdoor.py

@@ -0,0 +1,136 @@
+from __future__ import print_function
+
+from code import InteractiveConsole
+import errno
+import socket
+import sys
+import errno
+import traceback
+
+import eventlet
+from eventlet import hubs
+from eventlet.support import greenlets, get_errno
+
+try:
+    sys.ps1
+except AttributeError:
+    sys.ps1 = '>>> '
+try:
+    sys.ps2
+except AttributeError:
+    sys.ps2 = '... '
+
+
+class FileProxy(object):
+    def __init__(self, f):
+        self.f = f
+
+    def isatty(self):
+        return True
+
+    def flush(self):
+        pass
+
+    def write(self, data, *a, **kw):
+        self.f.write(data, *a, **kw)
+        self.f.flush()
+
+    def readline(self, *a):
+        return self.f.readline(*a).replace('\r\n', '\n')
+
+    def __getattr__(self, attr):
+        return getattr(self.f, attr)
+
+
+# @@tavis: the `locals` args below mask the built-in function.  Should
+# be renamed.
+class SocketConsole(greenlets.greenlet):
+    def __init__(self, desc, hostport, locals):
+        self.hostport = hostport
+        self.locals = locals
+        # mangle the socket
+        self.desc = FileProxy(desc)
+        greenlets.greenlet.__init__(self)
+
+    def run(self):
+        try:
+            console = InteractiveConsole(self.locals)
+            console.interact()
+        finally:
+            self.switch_out()
+            self.finalize()
+
+    def switch(self, *args, **kw):
+        self.saved = sys.stdin, sys.stderr, sys.stdout
+        sys.stdin = sys.stdout = sys.stderr = self.desc
+        greenlets.greenlet.switch(self, *args, **kw)
+
+    def switch_out(self):
+        sys.stdin, sys.stderr, sys.stdout = self.saved
+
+    def finalize(self):
+        # restore the state of the socket
+        self.desc = None
+        if len(self.hostport) >= 2:
+            host = self.hostport[0]
+            port = self.hostport[1]
+            print("backdoor closed to %s:%s" % (host, port,))
+        else:
+            print('backdoor closed')
+
+
+def backdoor_server(sock, locals=None):
+    """ Blocking function that runs a backdoor server on the socket *sock*,
+    accepting connections and running backdoor consoles for each client that
+    connects.
+
+    The *locals* argument is a dictionary that will be included in the locals()
+    of the interpreters.  It can be convenient to stick important application
+    variables in here.
+    """
+    listening_on = sock.getsockname()
+    if sock.family == socket.AF_INET:
+        # Expand result to IP + port
+        listening_on = '%s:%s' % listening_on
+    elif sock.family == socket.AF_INET6:
+        ip, port, _, _ = listening_on
+        listening_on = '%s:%s' % (ip, port,)
+    # No action needed if sock.family == socket.AF_UNIX
+
+    print("backdoor server listening on %s" % (listening_on,))
+    try:
+        try:
+            while True:
+                socketpair = sock.accept()
+                backdoor(socketpair, locals)
+        except socket.error as e:
+            # Broken pipe means it was shutdown
+            if get_errno(e) != errno.EPIPE:
+                raise
+    finally:
+        sock.close()
+
+
+def backdoor(conn_info, locals=None):
+    """Sets up an interactive console on a socket with a single connected
+    client.  This does not block the caller, as it spawns a new greenlet to
+    handle the console.  This is meant to be called from within an accept loop
+    (such as backdoor_server).
+    """
+    conn, addr = conn_info
+    if conn.family == socket.AF_INET:
+        host, port = addr
+        print("backdoor to %s:%s" % (host, port))
+    elif conn.family == socket.AF_INET6:
+        host, port, _, _ = addr
+        print("backdoor to %s:%s" % (host, port))
+    else:
+        print('backdoor opened')
+    fl = conn.makefile("rw")
+    console = SocketConsole(fl, addr, locals)
+    hub = hubs.get_hub()
+    hub.schedule_call_global(0, console.switch)
+
+
+if __name__ == '__main__':
+    backdoor_server(eventlet.listen(('127.0.0.1', 9000)), {})

+ 157 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/convenience.py

@@ -0,0 +1,157 @@
+import sys
+
+from eventlet import greenio
+from eventlet import greenpool
+from eventlet import greenthread
+from eventlet.green import socket
+from eventlet.support import greenlets as greenlet
+
+
+def connect(addr, family=socket.AF_INET, bind=None):
+    """Convenience function for opening client sockets.
+
+    :param addr: Address of the server to connect to.  For TCP sockets, this is a (host, port) tuple.
+    :param family: Socket family, optional.  See :mod:`socket` documentation for available families.
+    :param bind: Local address to bind to, optional.
+    :return: The connected green socket object.
+    """
+    sock = socket.socket(family, socket.SOCK_STREAM)
+    if bind is not None:
+        sock.bind(bind)
+    sock.connect(addr)
+    return sock
+
+
+def listen(addr, family=socket.AF_INET, backlog=50):
+    """Convenience function for opening server sockets.  This
+    socket can be used in :func:`~eventlet.serve` or a custom ``accept()`` loop.
+
+    Sets SO_REUSEADDR on the socket to save on annoyance.
+
+    :param addr: Address to listen on.  For TCP sockets, this is a (host, port)  tuple.
+    :param family: Socket family, optional.  See :mod:`socket` documentation for available families.
+    :param backlog:
+
+        The maximum number of queued connections. Should be at least 1; the maximum
+        value is system-dependent.
+
+    :return: The listening green socket object.
+    """
+    sock = socket.socket(family, socket.SOCK_STREAM)
+    if sys.platform[:3] != "win":
+        sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+    if hasattr(socket, 'SO_REUSEPORT'):
+        # NOTE(zhengwei): linux kernel >= 3.9
+        sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
+    sock.bind(addr)
+    sock.listen(backlog)
+    return sock
+
+
+class StopServe(Exception):
+    """Exception class used for quitting :func:`~eventlet.serve` gracefully."""
+    pass
+
+
+def _stop_checker(t, server_gt, conn):
+    try:
+        try:
+            t.wait()
+        finally:
+            conn.close()
+    except greenlet.GreenletExit:
+        pass
+    except Exception:
+        greenthread.kill(server_gt, *sys.exc_info())
+
+
+def serve(sock, handle, concurrency=1000):
+    """Runs a server on the supplied socket.  Calls the function *handle* in a
+    separate greenthread for every incoming client connection.  *handle* takes
+    two arguments: the client socket object, and the client address::
+
+        def myhandle(client_sock, client_addr):
+            print("client connected", client_addr)
+
+        eventlet.serve(eventlet.listen(('127.0.0.1', 9999)), myhandle)
+
+    Returning from *handle* closes the client socket.
+
+    :func:`serve` blocks the calling greenthread; it won't return until
+    the server completes.  If you desire an immediate return,
+    spawn a new greenthread for :func:`serve`.
+
+    Any uncaught exceptions raised in *handle* are raised as exceptions
+    from :func:`serve`, terminating the server, so be sure to be aware of the
+    exceptions your application can raise.  The return value of *handle* is
+    ignored.
+
+    Raise a :class:`~eventlet.StopServe` exception to gracefully terminate the
+    server -- that's the only way to get the server() function to return rather
+    than raise.
+
+    The value in *concurrency* controls the maximum number of
+    greenthreads that will be open at any time handling requests.  When
+    the server hits the concurrency limit, it stops accepting new
+    connections until the existing ones complete.
+    """
+    pool = greenpool.GreenPool(concurrency)
+    server_gt = greenthread.getcurrent()
+
+    while True:
+        try:
+            conn, addr = sock.accept()
+            gt = pool.spawn(handle, conn, addr)
+            gt.link(_stop_checker, server_gt, conn)
+            conn, addr, gt = None, None, None
+        except StopServe:
+            return
+
+
+def wrap_ssl(sock, *a, **kw):
+    """Convenience function for converting a regular socket into an
+    SSL socket.  Has the same interface as :func:`ssl.wrap_socket`,
+    but can also use PyOpenSSL. Though, note that it ignores the
+    `cert_reqs`, `ssl_version`, `ca_certs`, `do_handshake_on_connect`,
+    and `suppress_ragged_eofs` arguments when using PyOpenSSL.
+
+    The preferred idiom is to call wrap_ssl directly on the creation
+    method, e.g., ``wrap_ssl(connect(addr))`` or
+    ``wrap_ssl(listen(addr), server_side=True)``. This way there is
+    no "naked" socket sitting around to accidentally corrupt the SSL
+    session.
+
+    :return Green SSL object.
+    """
+    return wrap_ssl_impl(sock, *a, **kw)
+
+try:
+    from eventlet.green import ssl
+    wrap_ssl_impl = ssl.wrap_socket
+except ImportError:
+    # trying PyOpenSSL
+    try:
+        from eventlet.green.OpenSSL import SSL
+    except ImportError:
+        def wrap_ssl_impl(*a, **kw):
+            raise ImportError(
+                "To use SSL with Eventlet, you must install PyOpenSSL or use Python 2.6 or later.")
+    else:
+        def wrap_ssl_impl(sock, keyfile=None, certfile=None, server_side=False,
+                          cert_reqs=None, ssl_version=None, ca_certs=None,
+                          do_handshake_on_connect=True,
+                          suppress_ragged_eofs=True, ciphers=None):
+            # theoretically the ssl_version could be respected in this line
+            context = SSL.Context(SSL.SSLv23_METHOD)
+            if certfile is not None:
+                context.use_certificate_file(certfile)
+            if keyfile is not None:
+                context.use_privatekey_file(keyfile)
+            context.set_verify(SSL.VERIFY_NONE, lambda *x: True)
+
+            connection = SSL.Connection(context, sock)
+            if server_side:
+                connection.set_accept_state()
+            else:
+                connection.set_connect_state()
+            return connection

+ 53 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/corolocal.py

@@ -0,0 +1,53 @@
+import weakref
+
+from eventlet import greenthread
+
+__all__ = ['get_ident', 'local']
+
+
+def get_ident():
+    """ Returns ``id()`` of current greenlet.  Useful for debugging."""
+    return id(greenthread.getcurrent())
+
+
+# the entire purpose of this class is to store off the constructor
+# arguments in a local variable without calling __init__ directly
+class _localbase(object):
+    __slots__ = '_local__args', '_local__greens'
+
+    def __new__(cls, *args, **kw):
+        self = object.__new__(cls)
+        object.__setattr__(self, '_local__args', (args, kw))
+        object.__setattr__(self, '_local__greens', weakref.WeakKeyDictionary())
+        if (args or kw) and (cls.__init__ is object.__init__):
+            raise TypeError("Initialization arguments are not supported")
+        return self
+
+
+def _patch(thrl):
+    greens = object.__getattribute__(thrl, '_local__greens')
+    # until we can store the localdict on greenlets themselves,
+    # we store it in _local__greens on the local object
+    cur = greenthread.getcurrent()
+    if cur not in greens:
+        # must be the first time we've seen this greenlet, call __init__
+        greens[cur] = {}
+        cls = type(thrl)
+        if cls.__init__ is not object.__init__:
+            args, kw = object.__getattribute__(thrl, '_local__args')
+            thrl.__init__(*args, **kw)
+    object.__setattr__(thrl, '__dict__', greens[cur])
+
+
+class local(_localbase):
+    def __getattribute__(self, attr):
+        _patch(self)
+        return object.__getattribute__(self, attr)
+
+    def __setattr__(self, attr, value):
+        _patch(self)
+        return object.__setattr__(self, attr, value)
+
+    def __delattr__(self, attr):
+        _patch(self)
+        return object.__delattr__(self, attr)

+ 61 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/coros.py

@@ -0,0 +1,61 @@
+from __future__ import print_function
+
+from eventlet import event as _event
+
+
+class metaphore(object):
+    """This is sort of an inverse semaphore: a counter that starts at 0 and
+    waits only if nonzero. It's used to implement a "wait for all" scenario.
+
+    >>> from eventlet import coros, spawn_n
+    >>> count = coros.metaphore()
+    >>> count.wait()
+    >>> def decrementer(count, id):
+    ...     print("{0} decrementing".format(id))
+    ...     count.dec()
+    ...
+    >>> _ = spawn_n(decrementer, count, 'A')
+    >>> _ = spawn_n(decrementer, count, 'B')
+    >>> count.inc(2)
+    >>> count.wait()
+    A decrementing
+    B decrementing
+    """
+
+    def __init__(self):
+        self.counter = 0
+        self.event = _event.Event()
+        # send() right away, else we'd wait on the default 0 count!
+        self.event.send()
+
+    def inc(self, by=1):
+        """Increment our counter. If this transitions the counter from zero to
+        nonzero, make any subsequent :meth:`wait` call wait.
+        """
+        assert by > 0
+        self.counter += by
+        if self.counter == by:
+            # If we just incremented self.counter by 'by', and the new count
+            # equals 'by', then the old value of self.counter was 0.
+            # Transitioning from 0 to a nonzero value means wait() must
+            # actually wait.
+            self.event.reset()
+
+    def dec(self, by=1):
+        """Decrement our counter. If this transitions the counter from nonzero
+        to zero, a current or subsequent wait() call need no longer wait.
+        """
+        assert by > 0
+        self.counter -= by
+        if self.counter <= 0:
+            # Don't leave self.counter < 0, that will screw things up in
+            # future calls.
+            self.counter = 0
+            # Transitioning from nonzero to 0 means wait() need no longer wait.
+            self.event.send()
+
+    def wait(self):
+        """Suspend the caller only if our count is nonzero. In that case,
+        resume the caller once the count decrements to zero again.
+        """
+        self.event.wait()

+ 602 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/dagpool.py

@@ -0,0 +1,602 @@
+# @file   dagpool.py
+# @author Nat Goodspeed
+# @date   2016-08-08
+# @brief  Provide DAGPool class
+
+from eventlet.event import Event
+from eventlet import greenthread
+from eventlet.support import six
+import collections
+
+
+# value distinguished from any other Python value including None
+_MISSING = object()
+
+
+class Collision(Exception):
+    """
+    DAGPool raises Collision when you try to launch two greenthreads with the
+    same key, or post() a result for a key corresponding to a greenthread, or
+    post() twice for the same key. As with KeyError, str(collision) names the
+    key in question.
+    """
+    pass
+
+
+class PropagateError(Exception):
+    """
+    When a DAGPool greenthread terminates with an exception instead of
+    returning a result, attempting to retrieve its value raises
+    PropagateError.
+
+    Attributes:
+
+    key
+        the key of the greenthread which raised the exception
+
+    exc
+        the exception object raised by the greenthread
+    """
+    def __init__(self, key, exc):
+        # initialize base class with a reasonable string message
+        msg = "PropagateError({0}): {1}: {2}" \
+              .format(key, exc.__class__.__name__, exc)
+        super(PropagateError, self).__init__(msg)
+        self.msg = msg
+        # Unless we set args, this is unpickleable:
+        # https://bugs.python.org/issue1692335
+        self.args = (key, exc)
+        self.key = key
+        self.exc = exc
+
+    def __str__(self):
+        return self.msg
+
+
+class DAGPool(object):
+    """
+    A DAGPool is a pool that constrains greenthreads, not by max concurrency,
+    but by data dependencies.
+
+    This is a way to implement general DAG dependencies. A simple dependency
+    tree (flowing in either direction) can straightforwardly be implemented
+    using recursion and (e.g.)
+    :meth:`GreenThread.imap() <eventlet.greenthread.GreenThread.imap>`.
+    What gets complicated is when a given node depends on several other nodes
+    as well as contributing to several other nodes.
+
+    With DAGPool, you concurrently launch all applicable greenthreads; each
+    will proceed as soon as it has all required inputs. The DAG is implicit in
+    which items are required by each greenthread.
+
+    Each greenthread is launched in a DAGPool with a key: any value that can
+    serve as a Python dict key. The caller also specifies an iterable of other
+    keys on which this greenthread depends. This iterable may be empty.
+
+    The greenthread callable must accept (key, results), where:
+
+    key
+        is its own key
+
+    results
+        is an iterable of (key, value) pairs.
+
+    A newly-launched DAGPool greenthread is entered immediately, and can
+    perform any necessary setup work. At some point it will iterate over the
+    (key, value) pairs from the passed 'results' iterable. Doing so blocks the
+    greenthread until a value is available for each of the keys specified in
+    its initial dependencies iterable. These (key, value) pairs are delivered
+    in chronological order, *not* the order in which they are initially
+    specified: each value will be delivered as soon as it becomes available.
+
+    The value returned by a DAGPool greenthread becomes the value for its
+    key, which unblocks any other greenthreads waiting on that key.
+
+    If a DAGPool greenthread terminates with an exception instead of returning
+    a value, attempting to retrieve the value raises :class:`PropagateError`,
+    which binds the key of the original greenthread and the original
+    exception. Unless the greenthread attempting to retrieve the value handles
+    PropagateError, that exception will in turn be wrapped in a PropagateError
+    of its own, and so forth. The code that ultimately handles PropagateError
+    can follow the chain of PropagateError.exc attributes to discover the flow
+    of that exception through the DAG of greenthreads.
+
+    External greenthreads may also interact with a DAGPool. See :meth:`wait_each`,
+    :meth:`waitall`, :meth:`post`.
+
+    It is not recommended to constrain external DAGPool producer greenthreads
+    in a :class:`GreenPool <eventlet.greenpool.GreenPool>`: it may be hard to
+    provably avoid deadlock.
+
+    .. automethod:: __init__
+    .. automethod:: __getitem__
+    """
+
+    _Coro = collections.namedtuple("_Coro", ("greenthread", "pending"))
+
+    def __init__(self, preload={}):
+        """
+        DAGPool can be prepopulated with an initial dict or iterable of (key,
+        value) pairs. These (key, value) pairs are of course immediately
+        available for any greenthread that depends on any of those keys.
+        """
+        try:
+            # If a dict is passed, copy it. Don't risk a subsequent
+            # modification to passed dict affecting our internal state.
+            iteritems = six.iteritems(preload)
+        except AttributeError:
+            # Not a dict, just an iterable of (key, value) pairs
+            iteritems = preload
+
+        # Load the initial dict
+        self.values = dict(iteritems)
+
+        # track greenthreads
+        self.coros = {}
+
+        # The key to blocking greenthreads is the Event.
+        self.event = Event()
+
+    def waitall(self):
+        """
+        waitall() blocks the calling greenthread until there is a value for
+        every DAGPool greenthread launched by :meth:`spawn`. It returns a dict
+        containing all :class:`preload data <DAGPool>`, all data from
+        :meth:`post` and all values returned by spawned greenthreads.
+
+        See also :meth:`wait`.
+        """
+        # waitall() is an alias for compatibility with GreenPool
+        return self.wait()
+
+    def wait(self, keys=_MISSING):
+        """
+        *keys* is an optional iterable of keys. If you omit the argument, it
+        waits for all the keys from :class:`preload data <DAGPool>`, from
+        :meth:`post` calls and from :meth:`spawn` calls: in other words, all
+        the keys of which this DAGPool is aware.
+
+        wait() blocks the calling greenthread until all of the relevant keys
+        have values. wait() returns a dict whose keys are the relevant keys,
+        and whose values come from the *preload* data, from values returned by
+        DAGPool greenthreads or from :meth:`post` calls.
+
+        If a DAGPool greenthread terminates with an exception, wait() will
+        raise :class:`PropagateError` wrapping that exception. If more than
+        one greenthread terminates with an exception, it is indeterminate
+        which one wait() will raise.
+
+        If an external greenthread posts a :class:`PropagateError` instance,
+        wait() will raise that PropagateError. If more than one greenthread
+        posts PropagateError, it is indeterminate which one wait() will raise.
+
+        See also :meth:`wait_each_success`, :meth:`wait_each_exception`.
+        """
+        # This is mostly redundant with wait_each() functionality.
+        return dict(self.wait_each(keys))
+
+    def wait_each(self, keys=_MISSING):
+        """
+        *keys* is an optional iterable of keys. If you omit the argument, it
+        waits for all the keys from :class:`preload data <DAGPool>`, from
+        :meth:`post` calls and from :meth:`spawn` calls: in other words, all
+        the keys of which this DAGPool is aware.
+
+        wait_each() is a generator producing (key, value) pairs as a value
+        becomes available for each requested key. wait_each() blocks the
+        calling greenthread until the next value becomes available. If the
+        DAGPool was prepopulated with values for any of the relevant keys, of
+        course those can be delivered immediately without waiting.
+
+        Delivery order is intentionally decoupled from the initial sequence of
+        keys: each value is delivered as soon as it becomes available. If
+        multiple keys are available at the same time, wait_each() delivers
+        each of the ready ones in arbitrary order before blocking again.
+
+        The DAGPool does not distinguish between a value returned by one of
+        its own greenthreads and one provided by a :meth:`post` call or *preload* data.
+
+        The wait_each() generator terminates (raises StopIteration) when all
+        specified keys have been delivered. Thus, typical usage might be:
+
+        ::
+
+            for key, value in dagpool.wait_each(keys):
+                # process this ready key and value
+            # continue processing now that we've gotten values for all keys
+
+        By implication, if you pass wait_each() an empty iterable of keys, it
+        returns immediately without yielding anything.
+
+        If the value to be delivered is a :class:`PropagateError` exception object, the
+        generator raises that PropagateError instead of yielding it.
+
+        See also :meth:`wait_each_success`, :meth:`wait_each_exception`.
+        """
+        # Build a local set() and then call _wait_each().
+        return self._wait_each(self._get_keyset_for_wait_each(keys))
+
+    def wait_each_success(self, keys=_MISSING):
+        """
+        wait_each_success() filters results so that only success values are
+        yielded. In other words, unlike :meth:`wait_each`, wait_each_success()
+        will not raise :class:`PropagateError`. Not every provided (or
+        defaulted) key will necessarily be represented, though naturally the
+        generator will not finish until all have completed.
+
+        In all other respects, wait_each_success() behaves like :meth:`wait_each`.
+        """
+        for key, value in self._wait_each_raw(self._get_keyset_for_wait_each(keys)):
+            if not isinstance(value, PropagateError):
+                yield key, value
+
+    def wait_each_exception(self, keys=_MISSING):
+        """
+        wait_each_exception() filters results so that only exceptions are
+        yielded. Not every provided (or defaulted) key will necessarily be
+        represented, though naturally the generator will not finish until
+        all have completed.
+
+        Unlike other DAGPool methods, wait_each_exception() simply yields
+        :class:`PropagateError` instances as values rather than raising them.
+
+        In all other respects, wait_each_exception() behaves like :meth:`wait_each`.
+        """
+        for key, value in self._wait_each_raw(self._get_keyset_for_wait_each(keys)):
+            if isinstance(value, PropagateError):
+                yield key, value
+
+    def _get_keyset_for_wait_each(self, keys):
+        """
+        wait_each(), wait_each_success() and wait_each_exception() promise
+        that if you pass an iterable of keys, the method will wait for results
+        from those keys -- but if you omit the keys argument, the method will
+        wait for results from all known keys. This helper implements that
+        distinction, returning a set() of the relevant keys.
+        """
+        if keys is not _MISSING:
+            return set(keys)
+        else:
+            # keys arg omitted -- use all the keys we know about
+            return set(six.iterkeys(self.coros)) | set(six.iterkeys(self.values))
+
+    def _wait_each(self, pending):
+        """
+        When _wait_each() encounters a value of PropagateError, it raises it.
+
+        In all other respects, _wait_each() behaves like _wait_each_raw().
+        """
+        for key, value in self._wait_each_raw(pending):
+            yield key, self._value_or_raise(value)
+
+    @staticmethod
+    def _value_or_raise(value):
+        # Most methods attempting to deliver PropagateError should raise that
+        # instead of simply returning it.
+        if isinstance(value, PropagateError):
+            raise value
+        return value
+
+    def _wait_each_raw(self, pending):
+        """
+        pending is a set() of keys for which we intend to wait. THIS SET WILL
+        BE DESTRUCTIVELY MODIFIED: as each key acquires a value, that key will
+        be removed from the passed 'pending' set.
+
+        _wait_each_raw() does not treat a PropagateError instance specially:
+        it will be yielded to the caller like any other value.
+
+        In all other respects, _wait_each_raw() behaves like wait_each().
+        """
+        while True:
+            # Before even waiting, show caller any (key, value) pairs that
+            # are already available. Copy 'pending' because we want to be able
+            # to remove items from the original set while iterating.
+            for key in pending.copy():
+                value = self.values.get(key, _MISSING)
+                if value is not _MISSING:
+                    # found one, it's no longer pending
+                    pending.remove(key)
+                    yield (key, value)
+
+            if not pending:
+                # Once we've yielded all the caller's keys, done.
+                break
+
+            # There are still more keys pending, so wait.
+            self.event.wait()
+
+    def spawn(self, key, depends, function, *args, **kwds):
+        """
+        Launch the passed *function(key, results, ...)* as a greenthread,
+        passing it:
+
+        - the specified *key*
+        - an iterable of (key, value) pairs
+        - whatever other positional args or keywords you specify.
+
+        Iterating over the *results* iterable behaves like calling
+        :meth:`wait_each(depends) <DAGPool.wait_each>`.
+
+        Returning from *function()* behaves like
+        :meth:`post(key, return_value) <DAGPool.post>`.
+
+        If *function()* terminates with an exception, that exception is wrapped
+        in :class:`PropagateError` with the greenthread's *key* and (effectively) posted
+        as the value for that key. Attempting to retrieve that value will
+        raise that PropagateError.
+
+        Thus, if the greenthread with key 'a' terminates with an exception,
+        and greenthread 'b' depends on 'a', when greenthread 'b' attempts to
+        iterate through its *results* argument, it will encounter
+        PropagateError. So by default, an uncaught exception will propagate
+        through all the downstream dependencies.
+
+        If you pass :meth:`spawn` a key already passed to spawn() or :meth:`post`, spawn()
+        raises :class:`Collision`.
+        """
+        if key in self.coros or key in self.values:
+            raise Collision(key)
+
+        # The order is a bit tricky. First construct the set() of keys.
+        pending = set(depends)
+        # It's important that we pass to _wait_each() the same 'pending' set()
+        # that we store in self.coros for this key. The generator-iterator
+        # returned by _wait_each() becomes the function's 'results' iterable.
+        newcoro = greenthread.spawn(self._wrapper, function, key,
+                                    self._wait_each(pending),
+                                    *args, **kwds)
+        # Also capture the same (!) set in the new _Coro object for this key.
+        # We must be able to observe ready keys being removed from the set.
+        self.coros[key] = self._Coro(newcoro, pending)
+
+    def _wrapper(self, function, key, results, *args, **kwds):
+        """
+        This wrapper runs the top-level function in a DAGPool greenthread,
+        posting its return value (or PropagateError) to the DAGPool.
+        """
+        try:
+            # call our passed function
+            result = function(key, results, *args, **kwds)
+        except Exception as err:
+            # Wrap any exception it may raise in a PropagateError.
+            result = PropagateError(key, err)
+        finally:
+            # function() has returned (or terminated with an exception). We no
+            # longer need to track this greenthread in self.coros. Remove it
+            # first so post() won't complain about a running greenthread.
+            del self.coros[key]
+
+        try:
+            # as advertised, try to post() our return value
+            self.post(key, result)
+        except Collision:
+            # if we've already post()ed a result, oh well
+            pass
+
+        # also, in case anyone cares...
+        return result
+
+    def spawn_many(self, depends, function, *args, **kwds):
+        """
+        spawn_many() accepts a single *function* whose parameters are the same
+        as for :meth:`spawn`.
+
+        The difference is that spawn_many() accepts a dependency dict
+        *depends*. A new greenthread is spawned for each key in the dict. That
+        dict key's value should be an iterable of other keys on which this
+        greenthread depends.
+
+        If the *depends* dict contains any key already passed to :meth:`spawn`
+        or :meth:`post`, spawn_many() raises :class:`Collision`. It is
+        indeterminate how many of the other keys in *depends* will have
+        successfully spawned greenthreads.
+        """
+        # Iterate over 'depends' items, relying on self.spawn() not to
+        # context-switch so no one can modify 'depends' along the way.
+        for key, deps in six.iteritems(depends):
+            self.spawn(key, deps, function, *args, **kwds)
+
+    def kill(self, key):
+        """
+        Kill the greenthread that was spawned with the specified *key*.
+
+        If no such greenthread was spawned, raise KeyError.
+        """
+        # let KeyError, if any, propagate
+        self.coros[key].greenthread.kill()
+        # once killed, remove it
+        del self.coros[key]
+
+    def post(self, key, value, replace=False):
+        """
+        post(key, value) stores the passed *value* for the passed *key*. It
+        then causes each greenthread blocked on its results iterable, or on
+        :meth:`wait_each(keys) <DAGPool.wait_each>`, to check for new values.
+        A waiting greenthread might not literally resume on every single
+        post() of a relevant key, but the first post() of a relevant key
+        ensures that it will resume eventually, and when it does it will catch
+        up with all relevant post() calls.
+
+        Calling post(key, value) when there is a running greenthread with that
+        same *key* raises :class:`Collision`. If you must post(key, value) instead of
+        letting the greenthread run to completion, you must first call
+        :meth:`kill(key) <DAGPool.kill>`.
+
+        The DAGPool implicitly post()s the return value from each of its
+        greenthreads. But a greenthread may explicitly post() a value for its
+        own key, which will cause its return value to be discarded.
+
+        Calling post(key, value, replace=False) (the default *replace*) when a
+        value for that key has already been posted, by any means, raises
+        :class:`Collision`.
+
+        Calling post(key, value, replace=True) when a value for that key has
+        already been posted, by any means, replaces the previously-stored
+        value. However, that may make it complicated to reason about the
+        behavior of greenthreads waiting on that key.
+
+        After a post(key, value1) followed by post(key, value2, replace=True),
+        it is unspecified which pending :meth:`wait_each([key...]) <DAGPool.wait_each>`
+        calls (or greenthreads iterating over *results* involving that key)
+        will observe *value1* versus *value2*. It is guaranteed that
+        subsequent wait_each([key...]) calls (or greenthreads spawned after
+        that point) will observe *value2*.
+
+        A successful call to
+        post(key, :class:`PropagateError(key, ExceptionSubclass) <PropagateError>`)
+        ensures that any subsequent attempt to retrieve that key's value will
+        raise that PropagateError instance.
+        """
+        # First, check if we're trying to post() to a key with a running
+        # greenthread.
+        # A DAGPool greenthread is explicitly permitted to post() to its
+        # OWN key.
+        coro = self.coros.get(key, _MISSING)
+        if coro is not _MISSING and coro.greenthread is not greenthread.getcurrent():
+            # oh oh, trying to post a value for running greenthread from
+            # some other greenthread
+            raise Collision(key)
+
+        # Here, either we're posting a value for a key with no greenthread or
+        # we're posting from that greenthread itself.
+
+        # Has somebody already post()ed a value for this key?
+        # Unless replace == True, this is a problem.
+        if key in self.values and not replace:
+            raise Collision(key)
+
+        # Either we've never before posted a value for this key, or we're
+        # posting with replace == True.
+
+        # update our database
+        self.values[key] = value
+        # and wake up pending waiters
+        self.event.send()
+        # The comment in Event.reset() says: "it's better to create a new
+        # event rather than reset an old one". Okay, fine. We do want to be
+        # able to support new waiters, so create a new Event.
+        self.event = Event()
+
+    def __getitem__(self, key):
+        """
+        __getitem__(key) (aka dagpool[key]) blocks until *key* has a value,
+        then delivers that value.
+        """
+        # This is a degenerate case of wait_each(). Construct a tuple
+        # containing only this 'key'. wait_each() will yield exactly one (key,
+        # value) pair. Return just its value.
+        for _, value in self.wait_each((key,)):
+            return value
+
+    def get(self, key, default=None):
+        """
+        get() returns the value for *key*. If *key* does not yet have a value,
+        get() returns *default*.
+        """
+        return self._value_or_raise(self.values.get(key, default))
+
+    def keys(self):
+        """
+        Return a snapshot tuple of keys for which we currently have values.
+        """
+        # Explicitly return a copy rather than an iterator: don't assume our
+        # caller will finish iterating before new values are posted.
+        return tuple(six.iterkeys(self.values))
+
+    def items(self):
+        """
+        Return a snapshot tuple of currently-available (key, value) pairs.
+        """
+        # Don't assume our caller will finish iterating before new values are
+        # posted.
+        return tuple((key, self._value_or_raise(value))
+                     for key, value in six.iteritems(self.values))
+
+    def running(self):
+        """
+        Return number of running DAGPool greenthreads. This includes
+        greenthreads blocked while iterating through their *results* iterable,
+        that is, greenthreads waiting on values from other keys.
+        """
+        return len(self.coros)
+
+    def running_keys(self):
+        """
+        Return keys for running DAGPool greenthreads. This includes
+        greenthreads blocked while iterating through their *results* iterable,
+        that is, greenthreads waiting on values from other keys.
+        """
+        # return snapshot; don't assume caller will finish iterating before we
+        # next modify self.coros
+        return tuple(six.iterkeys(self.coros))
+
+    def waiting(self):
+        """
+        Return number of waiting DAGPool greenthreads, that is, greenthreads
+        still waiting on values from other keys. This explicitly does *not*
+        include external greenthreads waiting on :meth:`wait`,
+        :meth:`waitall`, :meth:`wait_each`.
+        """
+        # n.b. if Event would provide a count of its waiters, we could say
+        # something about external greenthreads as well.
+        # The logic to determine this count is exactly the same as the general
+        # waiting_for() call.
+        return len(self.waiting_for())
+
+    # Use _MISSING instead of None as the default 'key' param so we can permit
+    # None as a supported key.
+    def waiting_for(self, key=_MISSING):
+        """
+        waiting_for(key) returns a set() of the keys for which the DAGPool
+        greenthread spawned with that *key* is still waiting. If you pass a
+        *key* for which no greenthread was spawned, waiting_for() raises
+        KeyError.
+
+        waiting_for() without argument returns a dict. Its keys are the keys
+        of DAGPool greenthreads still waiting on one or more values. In the
+        returned dict, the value of each such key is the set of other keys for
+        which that greenthread is still waiting.
+
+        This method allows diagnosing a "hung" DAGPool. If certain
+        greenthreads are making no progress, it's possible that they are
+        waiting on keys for which there is no greenthread and no :meth:`post` data.
+        """
+        # We may have greenthreads whose 'pending' entry indicates they're
+        # waiting on some keys even though values have now been posted for
+        # some or all of those keys, because those greenthreads have not yet
+        # regained control since values were posted. So make a point of
+        # excluding values that are now available.
+        available = set(six.iterkeys(self.values))
+
+        if key is not _MISSING:
+            # waiting_for(key) is semantically different than waiting_for().
+            # It's just that they both seem to want the same method name.
+            coro = self.coros.get(key, _MISSING)
+            if coro is _MISSING:
+                # Hmm, no running greenthread with this key. But was there
+                # EVER a greenthread with this key? If not, let KeyError
+                # propagate.
+                self.values[key]
+                # Oh good, there's a value for this key. Either the
+                # greenthread finished, or somebody posted a value. Just say
+                # the greenthread isn't waiting for anything.
+                return set()
+            else:
+                # coro is the _Coro for the running greenthread with the
+                # specified key.
+                return coro.pending - available
+
+        # This is a waiting_for() call, i.e. a general query rather than for a
+        # specific key.
+
+        # Start by iterating over (key, coro) pairs in self.coros. Generate
+        # (key, pending) pairs in which 'pending' is the set of keys on which
+        # the greenthread believes it's waiting, minus the set of keys that
+        # are now available. Filter out any pair in which 'pending' is empty,
+        # that is, that greenthread will be unblocked next time it resumes.
+        # Make a dict from those pairs.
+        return dict((key, pending)
+                    for key, pending in ((key, (coro.pending - available))
+                                         for key, coro in six.iteritems(self.coros))
+                    if pending)

+ 461 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/db_pool.py

@@ -0,0 +1,461 @@
+from __future__ import print_function
+
+from collections import deque
+from contextlib import contextmanager
+import sys
+import time
+
+from eventlet.pools import Pool
+from eventlet import timeout
+from eventlet import hubs
+from eventlet.hubs.timer import Timer
+from eventlet.greenthread import GreenThread
+
+
+_MISSING = object()
+
+
+class ConnectTimeout(Exception):
+    pass
+
+
+def cleanup_rollback(conn):
+    conn.rollback()
+
+
+class BaseConnectionPool(Pool):
+    def __init__(self, db_module,
+                 min_size=0, max_size=4,
+                 max_idle=10, max_age=30,
+                 connect_timeout=5,
+                 cleanup=cleanup_rollback,
+                 *args, **kwargs):
+        """
+        Constructs a pool with at least *min_size* connections and at most
+        *max_size* connections.  Uses *db_module* to construct new connections.
+
+        The *max_idle* parameter determines how long pooled connections can
+        remain idle, in seconds.  After *max_idle* seconds have elapsed
+        without the connection being used, the pool closes the connection.
+
+        *max_age* is how long any particular connection is allowed to live.
+        Connections that have been open for longer than *max_age* seconds are
+        closed, regardless of idle time.  If *max_age* is 0, all connections are
+        closed on return to the pool, reducing it to a concurrency limiter.
+
+        *connect_timeout* is the duration in seconds that the pool will wait
+        before timing out on connect() to the database.  If triggered, the
+        timeout will raise a ConnectTimeout from get().
+
+        The remainder of the arguments are used as parameters to the
+        *db_module*'s connection constructor.
+        """
+        assert(db_module)
+        self._db_module = db_module
+        self._args = args
+        self._kwargs = kwargs
+        self.max_idle = max_idle
+        self.max_age = max_age
+        self.connect_timeout = connect_timeout
+        self._expiration_timer = None
+        self.cleanup = cleanup
+        super(BaseConnectionPool, self).__init__(min_size=min_size,
+                                                 max_size=max_size,
+                                                 order_as_stack=True)
+
+    def _schedule_expiration(self):
+        """Sets up a timer that will call _expire_old_connections when the
+        oldest connection currently in the free pool is ready to expire.  This
+        is the earliest possible time that a connection could expire, thus, the
+        timer will be running as infrequently as possible without missing a
+        possible expiration.
+
+        If this function is called when a timer is already scheduled, it does
+        nothing.
+
+        If max_age or max_idle is 0, _schedule_expiration likewise does nothing.
+        """
+        if self.max_age is 0 or self.max_idle is 0:
+            # expiration is unnecessary because all connections will be expired
+            # on put
+            return
+
+        if (self._expiration_timer is not None
+                and not getattr(self._expiration_timer, 'called', False)):
+            # the next timer is already scheduled
+            return
+
+        try:
+            now = time.time()
+            self._expire_old_connections(now)
+            # the last item in the list, because of the stack ordering,
+            # is going to be the most-idle
+            idle_delay = (self.free_items[-1][0] - now) + self.max_idle
+            oldest = min([t[1] for t in self.free_items])
+            age_delay = (oldest - now) + self.max_age
+
+            next_delay = min(idle_delay, age_delay)
+        except (IndexError, ValueError):
+            # no free items, unschedule ourselves
+            self._expiration_timer = None
+            return
+
+        if next_delay > 0:
+            # set up a continuous self-calling loop
+            self._expiration_timer = Timer(next_delay, GreenThread(hubs.get_hub().greenlet).switch,
+                                           self._schedule_expiration, [], {})
+            self._expiration_timer.schedule()
+
+    def _expire_old_connections(self, now):
+        """Iterates through the open connections contained in the pool, closing
+        ones that have remained idle for longer than max_idle seconds, or have
+        been in existence for longer than max_age seconds.
+
+        *now* is the current time, as returned by time.time().
+        """
+        original_count = len(self.free_items)
+        expired = [
+            conn
+            for last_used, created_at, conn in self.free_items
+            if self._is_expired(now, last_used, created_at)]
+
+        new_free = [
+            (last_used, created_at, conn)
+            for last_used, created_at, conn in self.free_items
+            if not self._is_expired(now, last_used, created_at)]
+        self.free_items.clear()
+        self.free_items.extend(new_free)
+
+        # adjust the current size counter to account for expired
+        # connections
+        self.current_size -= original_count - len(self.free_items)
+
+        for conn in expired:
+            self._safe_close(conn, quiet=True)
+
+    def _is_expired(self, now, last_used, created_at):
+        """Returns true and closes the connection if it's expired.
+        """
+        if (self.max_idle <= 0 or self.max_age <= 0
+                or now - last_used > self.max_idle
+                or now - created_at > self.max_age):
+            return True
+        return False
+
+    def _unwrap_connection(self, conn):
+        """If the connection was wrapped by a subclass of
+        BaseConnectionWrapper and is still functional (as determined
+        by the __nonzero__, or __bool__ in python3, method), returns
+        the unwrapped connection.  If anything goes wrong with this
+        process, returns None.
+        """
+        base = None
+        try:
+            if conn:
+                base = conn._base
+                conn._destroy()
+            else:
+                base = None
+        except AttributeError:
+            pass
+        return base
+
+    def _safe_close(self, conn, quiet=False):
+        """Closes the (already unwrapped) connection, squelching any
+        exceptions.
+        """
+        try:
+            conn.close()
+        except AttributeError:
+            pass  # conn is None, or junk
+        except Exception:
+            if not quiet:
+                print("Connection.close raised: %s" % (sys.exc_info()[1]))
+
+    def get(self):
+        conn = super(BaseConnectionPool, self).get()
+
+        # None is a flag value that means that put got called with
+        # something it couldn't use
+        if conn is None:
+            try:
+                conn = self.create()
+            except Exception:
+                # unconditionally increase the free pool because
+                # even if there are waiters, doing a full put
+                # would incur a greenlib switch and thus lose the
+                # exception stack
+                self.current_size -= 1
+                raise
+
+        # if the call to get() draws from the free pool, it will come
+        # back as a tuple
+        if isinstance(conn, tuple):
+            _last_used, created_at, conn = conn
+        else:
+            created_at = time.time()
+
+        # wrap the connection so the consumer can call close() safely
+        wrapped = PooledConnectionWrapper(conn, self)
+        # annotating the wrapper so that when it gets put in the pool
+        # again, we'll know how old it is
+        wrapped._db_pool_created_at = created_at
+        return wrapped
+
+    def put(self, conn, cleanup=_MISSING):
+        created_at = getattr(conn, '_db_pool_created_at', 0)
+        now = time.time()
+        conn = self._unwrap_connection(conn)
+
+        if self._is_expired(now, now, created_at):
+            self._safe_close(conn, quiet=False)
+            conn = None
+        elif cleanup is not None:
+            if cleanup is _MISSING:
+                cleanup = self.cleanup
+            # by default, call rollback in case the connection is in the middle
+            # of a transaction. However, rollback has performance implications
+            # so optionally do nothing or call something else like ping
+            try:
+                if conn:
+                    cleanup(conn)
+            except Exception as e:
+                # we don't care what the exception was, we just know the
+                # connection is dead
+                print("WARNING: cleanup %s raised: %s" % (cleanup, e))
+                conn = None
+            except:
+                conn = None
+                raise
+
+        if conn is not None:
+            super(BaseConnectionPool, self).put((now, created_at, conn))
+        else:
+            # wake up any waiters with a flag value that indicates
+            # they need to manufacture a connection
+            if self.waiting() > 0:
+                super(BaseConnectionPool, self).put(None)
+            else:
+                # no waiters -- just change the size
+                self.current_size -= 1
+        self._schedule_expiration()
+
+    @contextmanager
+    def item(self, cleanup=_MISSING):
+        conn = self.get()
+        try:
+            yield conn
+        finally:
+            self.put(conn, cleanup=cleanup)
+
+    def clear(self):
+        """Close all connections that this pool still holds a reference to,
+        and removes all references to them.
+        """
+        if self._expiration_timer:
+            self._expiration_timer.cancel()
+        free_items, self.free_items = self.free_items, deque()
+        for item in free_items:
+            # Free items created using min_size>0 are not tuples.
+            conn = item[2] if isinstance(item, tuple) else item
+            self._safe_close(conn, quiet=True)
+            self.current_size -= 1
+
+    def __del__(self):
+        self.clear()
+
+
+class TpooledConnectionPool(BaseConnectionPool):
+    """A pool which gives out :class:`~eventlet.tpool.Proxy`-based database
+    connections.
+    """
+
+    def create(self):
+        now = time.time()
+        return now, now, self.connect(
+            self._db_module, self.connect_timeout, *self._args, **self._kwargs)
+
+    @classmethod
+    def connect(cls, db_module, connect_timeout, *args, **kw):
+        t = timeout.Timeout(connect_timeout, ConnectTimeout())
+        try:
+            from eventlet import tpool
+            conn = tpool.execute(db_module.connect, *args, **kw)
+            return tpool.Proxy(conn, autowrap_names=('cursor',))
+        finally:
+            t.cancel()
+
+
+class RawConnectionPool(BaseConnectionPool):
+    """A pool which gives out plain database connections.
+    """
+
+    def create(self):
+        now = time.time()
+        return now, now, self.connect(
+            self._db_module, self.connect_timeout, *self._args, **self._kwargs)
+
+    @classmethod
+    def connect(cls, db_module, connect_timeout, *args, **kw):
+        t = timeout.Timeout(connect_timeout, ConnectTimeout())
+        try:
+            return db_module.connect(*args, **kw)
+        finally:
+            t.cancel()
+
+
+# default connection pool is the tpool one
+ConnectionPool = TpooledConnectionPool
+
+
+class GenericConnectionWrapper(object):
+    def __init__(self, baseconn):
+        self._base = baseconn
+
+    # Proxy all method calls to self._base
+    # FIXME: remove repetition; options to consider:
+    # * for name in (...):
+    #     setattr(class, name, lambda self, *a, **kw: getattr(self._base, name)(*a, **kw))
+    # * def __getattr__(self, name): if name in (...): return getattr(self._base, name)
+    # * other?
+    def __enter__(self):
+        return self._base.__enter__()
+
+    def __exit__(self, exc, value, tb):
+        return self._base.__exit__(exc, value, tb)
+
+    def __repr__(self):
+        return self._base.__repr__()
+
+    _proxy_funcs = (
+        'affected_rows',
+        'autocommit',
+        'begin',
+        'change_user',
+        'character_set_name',
+        'close',
+        'commit',
+        'cursor',
+        'dump_debug_info',
+        'errno',
+        'error',
+        'errorhandler',
+        'insert_id',
+        'literal',
+        'ping',
+        'query',
+        'rollback',
+        'select_db',
+        'server_capabilities',
+        'set_character_set',
+        'set_isolation_level',
+        'set_server_option',
+        'set_sql_mode',
+        'show_warnings',
+        'shutdown',
+        'sqlstate',
+        'stat',
+        'store_result',
+        'string_literal',
+        'thread_id',
+        'use_result',
+        'warning_count',
+    )
+for _proxy_fun in GenericConnectionWrapper._proxy_funcs:
+    # excess wrapper for early binding (closure by value)
+    def _wrapper(_proxy_fun=_proxy_fun):
+        def _proxy_method(self, *args, **kwargs):
+            return getattr(self._base, _proxy_fun)(*args, **kwargs)
+        _proxy_method.func_name = _proxy_fun
+        _proxy_method.__name__ = _proxy_fun
+        _proxy_method.__qualname__ = 'GenericConnectionWrapper.' + _proxy_fun
+        return _proxy_method
+    setattr(GenericConnectionWrapper, _proxy_fun, _wrapper(_proxy_fun))
+del GenericConnectionWrapper._proxy_funcs
+del _proxy_fun
+del _wrapper
+
+
+class PooledConnectionWrapper(GenericConnectionWrapper):
+    """A connection wrapper where:
+    - the close method returns the connection to the pool instead of closing it directly
+    - ``bool(conn)`` returns a reasonable value
+    - returns itself to the pool if it gets garbage collected
+    """
+
+    def __init__(self, baseconn, pool):
+        super(PooledConnectionWrapper, self).__init__(baseconn)
+        self._pool = pool
+
+    def __nonzero__(self):
+        return (hasattr(self, '_base') and bool(self._base))
+
+    __bool__ = __nonzero__
+
+    def _destroy(self):
+        self._pool = None
+        try:
+            del self._base
+        except AttributeError:
+            pass
+
+    def close(self):
+        """Return the connection to the pool, and remove the
+        reference to it so that you can't use it again through this
+        wrapper object.
+        """
+        if self and self._pool:
+            self._pool.put(self)
+        self._destroy()
+
+    def __del__(self):
+        return  # this causes some issues if __del__ is called in the
+        # main coroutine, so for now this is disabled
+        # self.close()
+
+
+class DatabaseConnector(object):
+    """
+    This is an object which will maintain a collection of database
+    connection pools on a per-host basis.
+    """
+
+    def __init__(self, module, credentials,
+                 conn_pool=None, *args, **kwargs):
+        """constructor
+        *module*
+            Database module to use.
+        *credentials*
+            Mapping of hostname to connect arguments (e.g. username and password)
+        """
+        assert(module)
+        self._conn_pool_class = conn_pool
+        if self._conn_pool_class is None:
+            self._conn_pool_class = ConnectionPool
+        self._module = module
+        self._args = args
+        self._kwargs = kwargs
+        # this is a map of hostname to username/password
+        self._credentials = credentials
+        self._databases = {}
+
+    def credentials_for(self, host):
+        if host in self._credentials:
+            return self._credentials[host]
+        else:
+            return self._credentials.get('default', None)
+
+    def get(self, host, dbname):
+        """Returns a ConnectionPool to the target host and schema.
+        """
+        key = (host, dbname)
+        if key not in self._databases:
+            new_kwargs = self._kwargs.copy()
+            new_kwargs['db'] = dbname
+            new_kwargs['host'] = host
+            new_kwargs.update(self.credentials_for(host))
+            dbpool = self._conn_pool_class(
+                self._module, *self._args, **new_kwargs)
+            self._databases[key] = dbpool
+
+        return self._databases[key]

+ 174 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/debug.py

@@ -0,0 +1,174 @@
+"""The debug module contains utilities and functions for better
+debugging Eventlet-powered applications."""
+from __future__ import print_function
+
+import os
+import sys
+import linecache
+import re
+import inspect
+
+__all__ = ['spew', 'unspew', 'format_hub_listeners', 'format_hub_timers',
+           'hub_listener_stacks', 'hub_exceptions', 'tpool_exceptions',
+           'hub_prevent_multiple_readers', 'hub_timer_stacks',
+           'hub_blocking_detection']
+
+_token_splitter = re.compile('\W+')
+
+
+class Spew(object):
+
+    def __init__(self, trace_names=None, show_values=True):
+        self.trace_names = trace_names
+        self.show_values = show_values
+
+    def __call__(self, frame, event, arg):
+        if event == 'line':
+            lineno = frame.f_lineno
+            if '__file__' in frame.f_globals:
+                filename = frame.f_globals['__file__']
+                if (filename.endswith('.pyc') or
+                        filename.endswith('.pyo')):
+                    filename = filename[:-1]
+                name = frame.f_globals['__name__']
+                line = linecache.getline(filename, lineno)
+            else:
+                name = '[unknown]'
+                try:
+                    src = inspect.getsourcelines(frame)
+                    line = src[lineno]
+                except IOError:
+                    line = 'Unknown code named [%s].  VM instruction #%d' % (
+                        frame.f_code.co_name, frame.f_lasti)
+            if self.trace_names is None or name in self.trace_names:
+                print('%s:%s: %s' % (name, lineno, line.rstrip()))
+                if not self.show_values:
+                    return self
+                details = []
+                tokens = _token_splitter.split(line)
+                for tok in tokens:
+                    if tok in frame.f_globals:
+                        details.append('%s=%r' % (tok, frame.f_globals[tok]))
+                    if tok in frame.f_locals:
+                        details.append('%s=%r' % (tok, frame.f_locals[tok]))
+                if details:
+                    print("\t%s" % ' '.join(details))
+        return self
+
+
+def spew(trace_names=None, show_values=False):
+    """Install a trace hook which writes incredibly detailed logs
+    about what code is being executed to stdout.
+    """
+    sys.settrace(Spew(trace_names, show_values))
+
+
+def unspew():
+    """Remove the trace hook installed by spew.
+    """
+    sys.settrace(None)
+
+
+def format_hub_listeners():
+    """ Returns a formatted string of the current listeners on the current
+    hub.  This can be useful in determining what's going on in the event system,
+    especially when used in conjunction with :func:`hub_listener_stacks`.
+    """
+    from eventlet import hubs
+    hub = hubs.get_hub()
+    result = ['READERS:']
+    for l in hub.get_readers():
+        result.append(repr(l))
+    result.append('WRITERS:')
+    for l in hub.get_writers():
+        result.append(repr(l))
+    return os.linesep.join(result)
+
+
+def format_hub_timers():
+    """ Returns a formatted string of the current timers on the current
+    hub.  This can be useful in determining what's going on in the event system,
+    especially when used in conjunction with :func:`hub_timer_stacks`.
+    """
+    from eventlet import hubs
+    hub = hubs.get_hub()
+    result = ['TIMERS:']
+    for l in hub.timers:
+        result.append(repr(l))
+    return os.linesep.join(result)
+
+
+def hub_listener_stacks(state=False):
+    """Toggles whether or not the hub records the stack when clients register
+    listeners on file descriptors.  This can be useful when trying to figure
+    out what the hub is up to at any given moment.  To inspect the stacks
+    of the current listeners, call :func:`format_hub_listeners` at critical
+    junctures in the application logic.
+    """
+    from eventlet import hubs
+    hubs.get_hub().set_debug_listeners(state)
+
+
+def hub_timer_stacks(state=False):
+    """Toggles whether or not the hub records the stack when timers are set.
+    To inspect the stacks of the current timers, call :func:`format_hub_timers`
+    at critical junctures in the application logic.
+    """
+    from eventlet.hubs import timer
+    timer._g_debug = state
+
+
+def hub_prevent_multiple_readers(state=True):
+    """Toggle prevention of multiple greenlets reading from a socket
+
+    When multiple greenlets read from the same socket it is often hard
+    to predict which greenlet will receive what data.  To achieve
+    resource sharing consider using ``eventlet.pools.Pool`` instead.
+
+    But if you really know what you are doing you can change the state
+    to ``False`` to stop the hub from protecting against this mistake.
+    """
+    from eventlet.hubs import hub
+    hub.g_prevent_multiple_readers = state
+
+
+def hub_exceptions(state=True):
+    """Toggles whether the hub prints exceptions that are raised from its
+    timers.  This can be useful to see how greenthreads are terminating.
+    """
+    from eventlet import hubs
+    hubs.get_hub().set_timer_exceptions(state)
+    from eventlet import greenpool
+    greenpool.DEBUG = state
+
+
+def tpool_exceptions(state=False):
+    """Toggles whether tpool itself prints exceptions that are raised from
+    functions that are executed in it, in addition to raising them like
+    it normally does."""
+    from eventlet import tpool
+    tpool.QUIET = not state
+
+
+def hub_blocking_detection(state=False, resolution=1):
+    """Toggles whether Eventlet makes an effort to detect blocking
+    behavior in an application.
+
+    It does this by telling the kernel to raise a SIGALARM after a
+    short timeout, and clearing the timeout every time the hub
+    greenlet is resumed.  Therefore, any code that runs for a long
+    time without yielding to the hub will get interrupted by the
+    blocking detector (don't use it in production!).
+
+    The *resolution* argument governs how long the SIGALARM timeout
+    waits in seconds.  The implementation uses :func:`signal.setitimer`
+    and can be specified as a floating-point value.
+    The shorter the resolution, the greater the chance of false
+    positives.
+    """
+    from eventlet import hubs
+    assert resolution > 0
+    hubs.get_hub().debug_blocking = state
+    hubs.get_hub().debug_blocking_resolution = resolution
+    if not state:
+        hubs.get_hub().block_detect_post()

+ 213 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/event.py

@@ -0,0 +1,213 @@
+from __future__ import print_function
+
+from eventlet import hubs
+from eventlet.support import greenlets as greenlet
+
+__all__ = ['Event']
+
+
+class NOT_USED:
+    def __repr__(self):
+        return 'NOT_USED'
+
+NOT_USED = NOT_USED()
+
+
+class Event(object):
+    """An abstraction where an arbitrary number of coroutines
+    can wait for one event from another.
+
+    Events are similar to a Queue that can only hold one item, but differ
+    in two important ways:
+
+    1. calling :meth:`send` never unschedules the current greenthread
+    2. :meth:`send` can only be called once; create a new event to send again.
+
+    They are good for communicating results between coroutines, and
+    are the basis for how
+    :meth:`GreenThread.wait() <eventlet.greenthread.GreenThread.wait>`
+    is implemented.
+
+    >>> from eventlet import event
+    >>> import eventlet
+    >>> evt = event.Event()
+    >>> def baz(b):
+    ...     evt.send(b + 1)
+    ...
+    >>> _ = eventlet.spawn_n(baz, 3)
+    >>> evt.wait()
+    4
+    """
+    _result = None
+    _exc = None
+
+    def __init__(self):
+        self._waiters = set()
+        self.reset()
+
+    def __str__(self):
+        params = (self.__class__.__name__, hex(id(self)),
+                  self._result, self._exc, len(self._waiters))
+        return '<%s at %s result=%r _exc=%r _waiters[%d]>' % params
+
+    def reset(self):
+        # this is kind of a misfeature and doesn't work perfectly well,
+        # it's better to create a new event rather than reset an old one
+        # removing documentation so that we don't get new use cases for it
+        assert self._result is not NOT_USED, 'Trying to re-reset() a fresh event.'
+        self._result = NOT_USED
+        self._exc = None
+
+    def ready(self):
+        """ Return true if the :meth:`wait` call will return immediately.
+        Used to avoid waiting for things that might take a while to time out.
+        For example, you can put a bunch of events into a list, and then visit
+        them all repeatedly, calling :meth:`ready` until one returns ``True``,
+        and then you can :meth:`wait` on that one."""
+        return self._result is not NOT_USED
+
+    def has_exception(self):
+        return self._exc is not None
+
+    def has_result(self):
+        return self._result is not NOT_USED and self._exc is None
+
+    def poll(self, notready=None):
+        if self.ready():
+            return self.wait()
+        return notready
+
+    # QQQ make it return tuple (type, value, tb) instead of raising
+    # because
+    # 1) "poll" does not imply raising
+    # 2) it's better not to screw up caller's sys.exc_info() by default
+    #    (e.g. if caller wants to calls the function in except or finally)
+    def poll_exception(self, notready=None):
+        if self.has_exception():
+            return self.wait()
+        return notready
+
+    def poll_result(self, notready=None):
+        if self.has_result():
+            return self.wait()
+        return notready
+
+    def wait(self):
+        """Wait until another coroutine calls :meth:`send`.
+        Returns the value the other coroutine passed to
+        :meth:`send`.
+
+        >>> from eventlet import event
+        >>> import eventlet
+        >>> evt = event.Event()
+        >>> def wait_on():
+        ...    retval = evt.wait()
+        ...    print("waited for {0}".format(retval))
+        >>> _ = eventlet.spawn(wait_on)
+        >>> evt.send('result')
+        >>> eventlet.sleep(0)
+        waited for result
+
+        Returns immediately if the event has already
+        occurred.
+
+        >>> evt.wait()
+        'result'
+        """
+        current = greenlet.getcurrent()
+        if self._result is NOT_USED:
+            self._waiters.add(current)
+            try:
+                return hubs.get_hub().switch()
+            finally:
+                self._waiters.discard(current)
+        if self._exc is not None:
+            current.throw(*self._exc)
+        return self._result
+
+    def send(self, result=None, exc=None):
+        """Makes arrangements for the waiters to be woken with the
+        result and then returns immediately to the parent.
+
+        >>> from eventlet import event
+        >>> import eventlet
+        >>> evt = event.Event()
+        >>> def waiter():
+        ...     print('about to wait')
+        ...     result = evt.wait()
+        ...     print('waited for {0}'.format(result))
+        >>> _ = eventlet.spawn(waiter)
+        >>> eventlet.sleep(0)
+        about to wait
+        >>> evt.send('a')
+        >>> eventlet.sleep(0)
+        waited for a
+
+        It is an error to call :meth:`send` multiple times on the same event.
+
+        >>> evt.send('whoops')
+        Traceback (most recent call last):
+        ...
+        AssertionError: Trying to re-send() an already-triggered event.
+
+        Use :meth:`reset` between :meth:`send` s to reuse an event object.
+        """
+        assert self._result is NOT_USED, 'Trying to re-send() an already-triggered event.'
+        self._result = result
+        if exc is not None and not isinstance(exc, tuple):
+            exc = (exc, )
+        self._exc = exc
+        hub = hubs.get_hub()
+        for waiter in self._waiters:
+            hub.schedule_call_global(
+                0, self._do_send, self._result, self._exc, waiter)
+
+    def _do_send(self, result, exc, waiter):
+        if waiter in self._waiters:
+            if exc is None:
+                waiter.switch(result)
+            else:
+                waiter.throw(*exc)
+
+    def send_exception(self, *args):
+        """Same as :meth:`send`, but sends an exception to waiters.
+
+        The arguments to send_exception are the same as the arguments
+        to ``raise``.  If a single exception object is passed in, it
+        will be re-raised when :meth:`wait` is called, generating a
+        new stacktrace.
+
+           >>> from eventlet import event
+           >>> evt = event.Event()
+           >>> evt.send_exception(RuntimeError())
+           >>> evt.wait()
+           Traceback (most recent call last):
+             File "<stdin>", line 1, in <module>
+             File "eventlet/event.py", line 120, in wait
+               current.throw(*self._exc)
+           RuntimeError
+
+        If it's important to preserve the entire original stack trace,
+        you must pass in the entire :func:`sys.exc_info` tuple.
+
+           >>> import sys
+           >>> evt = event.Event()
+           >>> try:
+           ...     raise RuntimeError()
+           ... except RuntimeError:
+           ...     evt.send_exception(*sys.exc_info())
+           ...
+           >>> evt.wait()
+           Traceback (most recent call last):
+             File "<stdin>", line 1, in <module>
+             File "eventlet/event.py", line 120, in wait
+               current.throw(*self._exc)
+             File "<stdin>", line 2, in <module>
+           RuntimeError
+
+        Note that doing so stores a traceback object directly on the
+        Event object, which may cause reference cycles. See the
+        :func:`sys.exc_info` documentation.
+        """
+        # the arguments and the same as for greenlet.throw
+        return self.send(None, args)

+ 16 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/green/BaseHTTPServer.py

@@ -0,0 +1,16 @@
+from eventlet import patcher
+from eventlet.green import socket
+from eventlet.green import SocketServer
+from eventlet.support import six
+
+patcher.inject(
+    'BaseHTTPServer' if six.PY2 else 'http.server',
+    globals(),
+    ('socket', socket),
+    ('SocketServer', SocketServer),
+    ('socketserver', SocketServer))
+
+del patcher
+
+if __name__ == '__main__':
+    test()

+ 19 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/green/CGIHTTPServer.py

@@ -0,0 +1,19 @@
+from eventlet import patcher
+from eventlet.green import BaseHTTPServer
+from eventlet.green import SimpleHTTPServer
+from eventlet.green import urllib
+from eventlet.green import select
+
+test = None  # bind prior to patcher.inject to silence pyflakes warning below
+patcher.inject(
+    'CGIHTTPServer',
+    globals(),
+    ('BaseHTTPServer', BaseHTTPServer),
+    ('SimpleHTTPServer', SimpleHTTPServer),
+    ('urllib', urllib),
+    ('select', select))
+
+del patcher
+
+if __name__ == '__main__':
+    test()  # pyflakes false alarm here unless test = None above

+ 37 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/green/MySQLdb.py

@@ -0,0 +1,37 @@
+__MySQLdb = __import__('MySQLdb')
+
+__all__ = __MySQLdb.__all__
+__patched__ = ["connect", "Connect", 'Connection', 'connections']
+
+from eventlet.patcher import slurp_properties
+slurp_properties(
+    __MySQLdb, globals(),
+    ignore=__patched__, srckeys=dir(__MySQLdb))
+
+from eventlet import tpool
+
+__orig_connections = __import__('MySQLdb.connections').connections
+
+
+def Connection(*args, **kw):
+    conn = tpool.execute(__orig_connections.Connection, *args, **kw)
+    return tpool.Proxy(conn, autowrap_names=('cursor',))
+connect = Connect = Connection
+
+
+# replicate the MySQLdb.connections module but with a tpooled Connection factory
+class MySQLdbConnectionsModule(object):
+    pass
+
+connections = MySQLdbConnectionsModule()
+for var in dir(__orig_connections):
+    if not var.startswith('__'):
+        setattr(connections, var, getattr(__orig_connections, var))
+connections.Connection = Connection
+
+cursors = __import__('MySQLdb.cursors').cursors
+converters = __import__('MySQLdb.converters').converters
+
+# TODO support instantiating cursors.FooCursor objects directly
+# TODO though this is a low priority, it would be nice if we supported
+# subclassing eventlet.green.MySQLdb.connections.Connection

+ 124 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/green/OpenSSL/SSL.py

@@ -0,0 +1,124 @@
+from OpenSSL import SSL as orig_SSL
+from OpenSSL.SSL import *
+from eventlet.support import get_errno
+from eventlet import greenio
+from eventlet.hubs import trampoline
+import socket
+
+
+class GreenConnection(greenio.GreenSocket):
+    """ Nonblocking wrapper for SSL.Connection objects.
+    """
+
+    def __init__(self, ctx, sock=None):
+        if sock is not None:
+            fd = orig_SSL.Connection(ctx, sock)
+        else:
+            # if we're given a Connection object directly, use it;
+            # this is used in the inherited accept() method
+            fd = ctx
+        super(ConnectionType, self).__init__(fd)
+
+    def do_handshake(self):
+        """ Perform an SSL handshake (usually called after renegotiate or one of
+        set_accept_state or set_accept_state). This can raise the same exceptions as
+        send and recv. """
+        if self.act_non_blocking:
+            return self.fd.do_handshake()
+        while True:
+            try:
+                return self.fd.do_handshake()
+            except WantReadError:
+                trampoline(self.fd.fileno(),
+                           read=True,
+                           timeout=self.gettimeout(),
+                           timeout_exc=socket.timeout)
+            except WantWriteError:
+                trampoline(self.fd.fileno(),
+                           write=True,
+                           timeout=self.gettimeout(),
+                           timeout_exc=socket.timeout)
+
+    def dup(self):
+        raise NotImplementedError("Dup not supported on SSL sockets")
+
+    def makefile(self, mode='r', bufsize=-1):
+        raise NotImplementedError("Makefile not supported on SSL sockets")
+
+    def read(self, size):
+        """Works like a blocking call to SSL_read(), whose behavior is
+        described here:  http://www.openssl.org/docs/ssl/SSL_read.html"""
+        if self.act_non_blocking:
+            return self.fd.read(size)
+        while True:
+            try:
+                return self.fd.read(size)
+            except WantReadError:
+                trampoline(self.fd.fileno(),
+                           read=True,
+                           timeout=self.gettimeout(),
+                           timeout_exc=socket.timeout)
+            except WantWriteError:
+                trampoline(self.fd.fileno(),
+                           write=True,
+                           timeout=self.gettimeout(),
+                           timeout_exc=socket.timeout)
+            except SysCallError as e:
+                if get_errno(e) == -1 or get_errno(e) > 0:
+                    return ''
+
+    recv = read
+
+    def write(self, data):
+        """Works like a blocking call to SSL_write(), whose behavior is
+        described here:  http://www.openssl.org/docs/ssl/SSL_write.html"""
+        if not data:
+            return 0  # calling SSL_write() with 0 bytes to be sent is undefined
+        if self.act_non_blocking:
+            return self.fd.write(data)
+        while True:
+            try:
+                return self.fd.write(data)
+            except WantReadError:
+                trampoline(self.fd.fileno(),
+                           read=True,
+                           timeout=self.gettimeout(),
+                           timeout_exc=socket.timeout)
+            except WantWriteError:
+                trampoline(self.fd.fileno(),
+                           write=True,
+                           timeout=self.gettimeout(),
+                           timeout_exc=socket.timeout)
+
+    send = write
+
+    def sendall(self, data):
+        """Send "all" data on the connection. This calls send() repeatedly until
+        all data is sent. If an error occurs, it's impossible to tell how much data
+        has been sent.
+
+        No return value."""
+        tail = self.send(data)
+        while tail < len(data):
+            tail += self.send(data[tail:])
+
+    def shutdown(self):
+        if self.act_non_blocking:
+            return self.fd.shutdown()
+        while True:
+            try:
+                return self.fd.shutdown()
+            except WantReadError:
+                trampoline(self.fd.fileno(),
+                           read=True,
+                           timeout=self.gettimeout(),
+                           timeout_exc=socket.timeout)
+            except WantWriteError:
+                trampoline(self.fd.fileno(),
+                           write=True,
+                           timeout=self.gettimeout(),
+                           timeout_exc=socket.timeout)
+
+Connection = ConnectionType = GreenConnection
+
+del greenio

+ 5 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/green/OpenSSL/__init__.py

@@ -0,0 +1,5 @@
+from . import rand
+from . import crypto
+from . import SSL
+from . import tsafe
+from .version import __version__

+ 1 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/green/OpenSSL/crypto.py

@@ -0,0 +1 @@
+from OpenSSL.crypto import *

+ 1 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/green/OpenSSL/rand.py

@@ -0,0 +1 @@
+from OpenSSL.rand import *

+ 1 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/green/OpenSSL/tsafe.py

@@ -0,0 +1 @@
+from OpenSSL.tsafe import *

+ 1 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/green/OpenSSL/version.py

@@ -0,0 +1 @@
+from OpenSSL.version import __version__, __doc__

+ 32 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/green/Queue.py

@@ -0,0 +1,32 @@
+from eventlet import queue
+
+__all__ = ['Empty', 'Full', 'LifoQueue', 'PriorityQueue', 'Queue']
+
+__patched__ = ['LifoQueue', 'PriorityQueue', 'Queue']
+
+# these classes exist to paper over the major operational difference between
+# eventlet.queue.Queue and the stdlib equivalents
+
+
+class Queue(queue.Queue):
+    def __init__(self, maxsize=0):
+        if maxsize == 0:
+            maxsize = None
+        super(Queue, self).__init__(maxsize)
+
+
+class PriorityQueue(queue.PriorityQueue):
+    def __init__(self, maxsize=0):
+        if maxsize == 0:
+            maxsize = None
+        super(PriorityQueue, self).__init__(maxsize)
+
+
+class LifoQueue(queue.LifoQueue):
+    def __init__(self, maxsize=0):
+        if maxsize == 0:
+            maxsize = None
+        super(LifoQueue, self).__init__(maxsize)
+
+Empty = queue.Empty
+Full = queue.Full

+ 14 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/green/SimpleHTTPServer.py

@@ -0,0 +1,14 @@
+from eventlet import patcher
+from eventlet.green import BaseHTTPServer
+from eventlet.green import urllib
+
+patcher.inject(
+    'SimpleHTTPServer',
+    globals(),
+    ('BaseHTTPServer', BaseHTTPServer),
+    ('urllib', urllib))
+
+del patcher
+
+if __name__ == '__main__':
+    test()

+ 15 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/green/SocketServer.py

@@ -0,0 +1,15 @@
+from eventlet import patcher
+
+from eventlet.green import socket
+from eventlet.green import select
+from eventlet.green import threading
+from eventlet.support import six
+
+patcher.inject(
+    'SocketServer' if six.PY2 else 'socketserver',
+    globals(),
+    ('socket', socket),
+    ('select', select),
+    ('threading', threading))
+
+# QQQ ForkingMixIn should be fixed to use green waitpid?

+ 1 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/green/__init__.py

@@ -0,0 +1 @@
+# this package contains modules from the standard library converted to use eventlet

+ 33 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/green/_socket_nodns.py

@@ -0,0 +1,33 @@
+__socket = __import__('socket')
+
+__all__ = __socket.__all__
+__patched__ = ['fromfd', 'socketpair', 'ssl', 'socket', 'timeout']
+
+import eventlet.patcher
+eventlet.patcher.slurp_properties(__socket, globals(), ignore=__patched__, srckeys=dir(__socket))
+
+os = __import__('os')
+import sys
+from eventlet import greenio
+
+
+socket = greenio.GreenSocket
+_GLOBAL_DEFAULT_TIMEOUT = greenio._GLOBAL_DEFAULT_TIMEOUT
+timeout = greenio.socket_timeout
+
+try:
+    __original_fromfd__ = __socket.fromfd
+
+    def fromfd(*args):
+        return socket(__original_fromfd__(*args))
+except AttributeError:
+    pass
+
+try:
+    __original_socketpair__ = __socket.socketpair
+
+    def socketpair(*args):
+        one, two = __original_socketpair__(*args)
+        return socket(one), socket(two)
+except AttributeError:
+    pass

+ 11 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/green/asynchat.py

@@ -0,0 +1,11 @@
+from eventlet import patcher
+from eventlet.green import asyncore
+from eventlet.green import socket
+
+patcher.inject(
+    'asynchat',
+    globals(),
+    ('asyncore', asyncore),
+    ('socket', socket))
+
+del patcher

+ 13 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/green/asyncore.py

@@ -0,0 +1,13 @@
+from eventlet import patcher
+from eventlet.green import select
+from eventlet.green import socket
+from eventlet.green import time
+
+patcher.inject(
+    "asyncore",
+    globals(),
+    ('select', select),
+    ('socket', socket),
+    ('time', time))
+
+del patcher

+ 47 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/green/builtin.py

@@ -0,0 +1,47 @@
+"""
+In order to detect a filehandle that's been closed, our only clue may be
+the operating system returning the same filehandle in response to some
+other  operation.
+
+The builtins 'file' and 'open' are patched to collaborate with the
+notify_opened protocol.
+"""
+
+builtins_orig = __builtins__
+
+from eventlet import hubs
+from eventlet.hubs import hub
+from eventlet.patcher import slurp_properties
+import sys
+
+__all__ = dir(builtins_orig)
+__patched__ = ['file', 'open']
+
+slurp_properties(builtins_orig, globals(),
+                 ignore=__patched__, srckeys=dir(builtins_orig))
+
+hubs.get_hub()
+
+__original_file = file
+
+
+class file(__original_file):
+    def __init__(self, *args, **kwargs):
+        super(file, self).__init__(*args, **kwargs)
+        hubs.notify_opened(self.fileno())
+
+__original_open = open
+__opening = False
+
+
+def open(*args):
+    global __opening
+    result = __original_open(*args)
+    if not __opening:
+        # This is incredibly ugly. 'open' is used under the hood by
+        # the import process. So, ensure we don't wind up in an
+        # infinite loop.
+        __opening = True
+        hubs.notify_opened(result.fileno())
+        __opening = False
+    return result

+ 13 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/green/ftplib.py

@@ -0,0 +1,13 @@
+from eventlet import patcher
+
+# *NOTE: there might be some funny business with the "SOCKS" module
+# if it even still exists
+from eventlet.green import socket
+
+patcher.inject('ftplib', globals(), ('socket', socket))
+
+del patcher
+
+# Run test program when run as a script
+if __name__ == '__main__':
+    test()

+ 191 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/green/http/__init__.py

@@ -0,0 +1,191 @@
+# This is part of Python source code with Eventlet-specific modifications.
+#
+# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+# 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights
+# Reserved
+#
+# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
+# --------------------------------------------
+#
+# 1. This LICENSE AGREEMENT is between the Python Software Foundation
+# ("PSF"), and the Individual or Organization ("Licensee") accessing and
+# otherwise using this software ("Python") in source or binary form and
+# its associated documentation.
+#
+# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
+# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
+# analyze, test, perform and/or display publicly, prepare derivative works,
+# distribute, and otherwise use Python alone or in any derivative version,
+# provided, however, that PSF's License Agreement and PSF's notice of copyright,
+# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+# 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights
+# Reserved" are retained in Python alone or in any derivative version prepared by
+# Licensee.
+#
+# 3. In the event Licensee prepares a derivative work that is based on
+# or incorporates Python or any part thereof, and wants to make
+# the derivative work available to others as provided herein, then
+# Licensee hereby agrees to include in any such work a brief summary of
+# the changes made to Python.
+#
+# 4. PSF is making Python available to Licensee on an "AS IS"
+# basis.  PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+# IMPLIED.  BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
+# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
+# INFRINGE ANY THIRD PARTY RIGHTS.
+#
+# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
+# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+#
+# 6. This License Agreement will automatically terminate upon a material
+# breach of its terms and conditions.
+#
+# 7. Nothing in this License Agreement shall be deemed to create any
+# relationship of agency, partnership, or joint venture between PSF and
+# Licensee.  This License Agreement does not grant permission to use PSF
+# trademarks or trade name in a trademark sense to endorse or promote
+# products or services of Licensee, or any third party.
+#
+# 8. By copying, installing or otherwise using Python, Licensee
+# agrees to be bound by the terms and conditions of this License
+# Agreement.
+from eventlet.support import six
+assert six.PY3, 'This is a Python 3 module'
+
+from enum import IntEnum
+
+__all__ = ['HTTPStatus']
+
+class HTTPStatus(IntEnum):
+    """HTTP status codes and reason phrases
+
+    Status codes from the following RFCs are all observed:
+
+        * RFC 7231: Hypertext Transfer Protocol (HTTP/1.1), obsoletes 2616
+        * RFC 6585: Additional HTTP Status Codes
+        * RFC 3229: Delta encoding in HTTP
+        * RFC 4918: HTTP Extensions for WebDAV, obsoletes 2518
+        * RFC 5842: Binding Extensions to WebDAV
+        * RFC 7238: Permanent Redirect
+        * RFC 2295: Transparent Content Negotiation in HTTP
+        * RFC 2774: An HTTP Extension Framework
+    """
+    def __new__(cls, value, phrase, description=''):
+        obj = int.__new__(cls, value)
+        obj._value_ = value
+
+        obj.phrase = phrase
+        obj.description = description
+        return obj
+
+    # informational
+    CONTINUE = 100, 'Continue', 'Request received, please continue'
+    SWITCHING_PROTOCOLS = (101, 'Switching Protocols',
+            'Switching to new protocol; obey Upgrade header')
+    PROCESSING = 102, 'Processing'
+
+    # success
+    OK = 200, 'OK', 'Request fulfilled, document follows'
+    CREATED = 201, 'Created', 'Document created, URL follows'
+    ACCEPTED = (202, 'Accepted',
+        'Request accepted, processing continues off-line')
+    NON_AUTHORITATIVE_INFORMATION = (203,
+        'Non-Authoritative Information', 'Request fulfilled from cache')
+    NO_CONTENT = 204, 'No Content', 'Request fulfilled, nothing follows'
+    RESET_CONTENT = 205, 'Reset Content', 'Clear input form for further input'
+    PARTIAL_CONTENT = 206, 'Partial Content', 'Partial content follows'
+    MULTI_STATUS = 207, 'Multi-Status'
+    ALREADY_REPORTED = 208, 'Already Reported'
+    IM_USED = 226, 'IM Used'
+
+    # redirection
+    MULTIPLE_CHOICES = (300, 'Multiple Choices',
+        'Object has several resources -- see URI list')
+    MOVED_PERMANENTLY = (301, 'Moved Permanently',
+        'Object moved permanently -- see URI list')
+    FOUND = 302, 'Found', 'Object moved temporarily -- see URI list'
+    SEE_OTHER = 303, 'See Other', 'Object moved -- see Method and URL list'
+    NOT_MODIFIED = (304, 'Not Modified',
+        'Document has not changed since given time')
+    USE_PROXY = (305, 'Use Proxy',
+        'You must use proxy specified in Location to access this resource')
+    TEMPORARY_REDIRECT = (307, 'Temporary Redirect',
+        'Object moved temporarily -- see URI list')
+    PERMANENT_REDIRECT = (308, 'Permanent Redirect',
+        'Object moved temporarily -- see URI list')
+
+    # client error
+    BAD_REQUEST = (400, 'Bad Request',
+        'Bad request syntax or unsupported method')
+    UNAUTHORIZED = (401, 'Unauthorized',
+        'No permission -- see authorization schemes')
+    PAYMENT_REQUIRED = (402, 'Payment Required',
+        'No payment -- see charging schemes')
+    FORBIDDEN = (403, 'Forbidden',
+        'Request forbidden -- authorization will not help')
+    NOT_FOUND = (404, 'Not Found',
+        'Nothing matches the given URI')
+    METHOD_NOT_ALLOWED = (405, 'Method Not Allowed',
+        'Specified method is invalid for this resource')
+    NOT_ACCEPTABLE = (406, 'Not Acceptable',
+        'URI not available in preferred format')
+    PROXY_AUTHENTICATION_REQUIRED = (407,
+        'Proxy Authentication Required',
+        'You must authenticate with this proxy before proceeding')
+    REQUEST_TIMEOUT = (408, 'Request Timeout',
+        'Request timed out; try again later')
+    CONFLICT = 409, 'Conflict', 'Request conflict'
+    GONE = (410, 'Gone',
+        'URI no longer exists and has been permanently removed')
+    LENGTH_REQUIRED = (411, 'Length Required',
+        'Client must specify Content-Length')
+    PRECONDITION_FAILED = (412, 'Precondition Failed',
+        'Precondition in headers is false')
+    REQUEST_ENTITY_TOO_LARGE = (413, 'Request Entity Too Large',
+        'Entity is too large')
+    REQUEST_URI_TOO_LONG = (414, 'Request-URI Too Long',
+        'URI is too long')
+    UNSUPPORTED_MEDIA_TYPE = (415, 'Unsupported Media Type',
+        'Entity body in unsupported format')
+    REQUESTED_RANGE_NOT_SATISFIABLE = (416,
+        'Requested Range Not Satisfiable',
+        'Cannot satisfy request range')
+    EXPECTATION_FAILED = (417, 'Expectation Failed',
+        'Expect condition could not be satisfied')
+    UNPROCESSABLE_ENTITY = 422, 'Unprocessable Entity'
+    LOCKED = 423, 'Locked'
+    FAILED_DEPENDENCY = 424, 'Failed Dependency'
+    UPGRADE_REQUIRED = 426, 'Upgrade Required'
+    PRECONDITION_REQUIRED = (428, 'Precondition Required',
+        'The origin server requires the request to be conditional')
+    TOO_MANY_REQUESTS = (429, 'Too Many Requests',
+        'The user has sent too many requests in '
+        'a given amount of time ("rate limiting")')
+    REQUEST_HEADER_FIELDS_TOO_LARGE = (431,
+        'Request Header Fields Too Large',
+        'The server is unwilling to process the request because its header '
+        'fields are too large')
+
+    # server errors
+    INTERNAL_SERVER_ERROR = (500, 'Internal Server Error',
+        'Server got itself in trouble')
+    NOT_IMPLEMENTED = (501, 'Not Implemented',
+        'Server does not support this operation')
+    BAD_GATEWAY = (502, 'Bad Gateway',
+        'Invalid responses from another server/proxy')
+    SERVICE_UNAVAILABLE = (503, 'Service Unavailable',
+        'The server cannot process the request due to a high load')
+    GATEWAY_TIMEOUT = (504, 'Gateway Timeout',
+        'The gateway server did not receive a timely response')
+    HTTP_VERSION_NOT_SUPPORTED = (505, 'HTTP Version Not Supported',
+        'Cannot fulfill request')
+    VARIANT_ALSO_NEGOTIATES = 506, 'Variant Also Negotiates'
+    INSUFFICIENT_STORAGE = 507, 'Insufficient Storage'
+    LOOP_DETECTED = 508, 'Loop Detected'
+    NOT_EXTENDED = 510, 'Not Extended'
+    NETWORK_AUTHENTICATION_REQUIRED = (511,
+        'Network Authentication Required',
+        'The client needs to authenticate to gain network access')

+ 1557 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/green/http/client.py

@@ -0,0 +1,1557 @@
+# This is part of Python source code with Eventlet-specific modifications.
+#
+# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+# 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights
+# Reserved
+#
+# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
+# --------------------------------------------
+#
+# 1. This LICENSE AGREEMENT is between the Python Software Foundation
+# ("PSF"), and the Individual or Organization ("Licensee") accessing and
+# otherwise using this software ("Python") in source or binary form and
+# its associated documentation.
+#
+# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
+# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
+# analyze, test, perform and/or display publicly, prepare derivative works,
+# distribute, and otherwise use Python alone or in any derivative version,
+# provided, however, that PSF's License Agreement and PSF's notice of copyright,
+# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+# 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights
+# Reserved" are retained in Python alone or in any derivative version prepared by
+# Licensee.
+#
+# 3. In the event Licensee prepares a derivative work that is based on
+# or incorporates Python or any part thereof, and wants to make
+# the derivative work available to others as provided herein, then
+# Licensee hereby agrees to include in any such work a brief summary of
+# the changes made to Python.
+#
+# 4. PSF is making Python available to Licensee on an "AS IS"
+# basis.  PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+# IMPLIED.  BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
+# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
+# INFRINGE ANY THIRD PARTY RIGHTS.
+#
+# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
+# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+#
+# 6. This License Agreement will automatically terminate upon a material
+# breach of its terms and conditions.
+#
+# 7. Nothing in this License Agreement shall be deemed to create any
+# relationship of agency, partnership, or joint venture between PSF and
+# Licensee.  This License Agreement does not grant permission to use PSF
+# trademarks or trade name in a trademark sense to endorse or promote
+# products or services of Licensee, or any third party.
+#
+# 8. By copying, installing or otherwise using Python, Licensee
+# agrees to be bound by the terms and conditions of this License
+# Agreement.
+"""HTTP/1.1 client library
+
+<intro stuff goes here>
+<other stuff, too>
+
+HTTPConnection goes through a number of "states", which define when a client
+may legally make another request or fetch the response for a particular
+request. This diagram details these state transitions:
+
+    (null)
+      |
+      | HTTPConnection()
+      v
+    Idle
+      |
+      | putrequest()
+      v
+    Request-started
+      |
+      | ( putheader() )*  endheaders()
+      v
+    Request-sent
+      |\_____________________________
+      |                              | getresponse() raises
+      | response = getresponse()     | ConnectionError
+      v                              v
+    Unread-response                Idle
+    [Response-headers-read]
+      |\____________________
+      |                     |
+      | response.read()     | putrequest()
+      v                     v
+    Idle                  Req-started-unread-response
+                     ______/|
+                   /        |
+   response.read() |        | ( putheader() )*  endheaders()
+                   v        v
+       Request-started    Req-sent-unread-response
+                            |
+                            | response.read()
+                            v
+                          Request-sent
+
+This diagram presents the following rules:
+  -- a second request may not be started until {response-headers-read}
+  -- a response [object] cannot be retrieved until {request-sent}
+  -- there is no differentiation between an unread response body and a
+     partially read response body
+
+Note: this enforcement is applied by the HTTPConnection class. The
+      HTTPResponse class does not enforce this state machine, which
+      implies sophisticated clients may accelerate the request/response
+      pipeline. Caution should be taken, though: accelerating the states
+      beyond the above pattern may imply knowledge of the server's
+      connection-close behavior for certain requests. For example, it
+      is impossible to tell whether the server will close the connection
+      UNTIL the response headers have been read; this means that further
+      requests cannot be placed into the pipeline until it is known that
+      the server will NOT be closing the connection.
+
+Logical State                  __state            __response
+-------------                  -------            ----------
+Idle                           _CS_IDLE           None
+Request-started                _CS_REQ_STARTED    None
+Request-sent                   _CS_REQ_SENT       None
+Unread-response                _CS_IDLE           <response_class>
+Req-started-unread-response    _CS_REQ_STARTED    <response_class>
+Req-sent-unread-response       _CS_REQ_SENT       <response_class>
+"""
+
+import email.parser
+import email.message
+import io
+import re
+import collections
+from urllib.parse import urlsplit
+
+from eventlet.green import http, os, socket
+
+# HTTPMessage, parse_headers(), and the HTTP status code constants are
+# intentionally omitted for simplicity
+__all__ = ["HTTPResponse", "HTTPConnection",
+           "HTTPException", "NotConnected", "UnknownProtocol",
+           "UnknownTransferEncoding", "UnimplementedFileMode",
+           "IncompleteRead", "InvalidURL", "ImproperConnectionState",
+           "CannotSendRequest", "CannotSendHeader", "ResponseNotReady",
+           "BadStatusLine", "LineTooLong", "RemoteDisconnected", "error",
+           "responses"]
+
+HTTP_PORT = 80
+HTTPS_PORT = 443
+
+_UNKNOWN = 'UNKNOWN'
+
+# connection states
+_CS_IDLE = 'Idle'
+_CS_REQ_STARTED = 'Request-started'
+_CS_REQ_SENT = 'Request-sent'
+
+
+# hack to maintain backwards compatibility
+globals().update(http.HTTPStatus.__members__)
+
+# another hack to maintain backwards compatibility
+# Mapping status codes to official W3C names
+responses = {v: v.phrase for v in http.HTTPStatus.__members__.values()}
+
+# maximal amount of data to read at one time in _safe_read
+MAXAMOUNT = 1048576
+
+# maximal line length when calling readline().
+_MAXLINE = 65536
+_MAXHEADERS = 100
+
+# Header name/value ABNF (http://tools.ietf.org/html/rfc7230#section-3.2)
+#
+# VCHAR          = %x21-7E
+# obs-text       = %x80-FF
+# header-field   = field-name ":" OWS field-value OWS
+# field-name     = token
+# field-value    = *( field-content / obs-fold )
+# field-content  = field-vchar [ 1*( SP / HTAB ) field-vchar ]
+# field-vchar    = VCHAR / obs-text
+#
+# obs-fold       = CRLF 1*( SP / HTAB )
+#                ; obsolete line folding
+#                ; see Section 3.2.4
+
+# token          = 1*tchar
+#
+# tchar          = "!" / "#" / "$" / "%" / "&" / "'" / "*"
+#                / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~"
+#                / DIGIT / ALPHA
+#                ; any VCHAR, except delimiters
+#
+# VCHAR defined in http://tools.ietf.org/html/rfc5234#appendix-B.1
+
+# the patterns for both name and value are more leniant than RFC
+# definitions to allow for backwards compatibility
+# Eventlet change: match used instead of fullmatch for Python 3.3 compatibility
+_is_legal_header_name = re.compile(rb'[^:\s][^:\r\n]*\Z').match
+_is_illegal_header_value = re.compile(rb'\n(?![ \t])|\r(?![ \t\n])').search
+
+# We always set the Content-Length header for these methods because some
+# servers will otherwise respond with a 411
+_METHODS_EXPECTING_BODY = {'PATCH', 'POST', 'PUT'}
+
+
+def _encode(data, name='data'):
+    """Call data.encode("latin-1") but show a better error message."""
+    try:
+        return data.encode("latin-1")
+    except UnicodeEncodeError as err:
+        raise UnicodeEncodeError(
+            err.encoding,
+            err.object,
+            err.start,
+            err.end,
+            "%s (%.20r) is not valid Latin-1. Use %s.encode('utf-8') "
+            "if you want to send it encoded in UTF-8." %
+            (name.title(), data[err.start:err.end], name)) from None
+
+
+class HTTPMessage(email.message.Message):
+    # XXX The only usage of this method is in
+    # http.server.CGIHTTPRequestHandler.  Maybe move the code there so
+    # that it doesn't need to be part of the public API.  The API has
+    # never been defined so this could cause backwards compatibility
+    # issues.
+
+    def getallmatchingheaders(self, name):
+        """Find all header lines matching a given header name.
+
+        Look through the list of headers and find all lines matching a given
+        header name (and their continuation lines).  A list of the lines is
+        returned, without interpretation.  If the header does not occur, an
+        empty list is returned.  If the header occurs multiple times, all
+        occurrences are returned.  Case is not important in the header name.
+
+        """
+        name = name.lower() + ':'
+        n = len(name)
+        lst = []
+        hit = 0
+        for line in self.keys():
+            if line[:n].lower() == name:
+                hit = 1
+            elif not line[:1].isspace():
+                hit = 0
+            if hit:
+                lst.append(line)
+        return lst
+
+def parse_headers(fp, _class=HTTPMessage):
+    """Parses only RFC2822 headers from a file pointer.
+
+    email Parser wants to see strings rather than bytes.
+    But a TextIOWrapper around self.rfile would buffer too many bytes
+    from the stream, bytes which we later need to read as bytes.
+    So we read the correct bytes here, as bytes, for email Parser
+    to parse.
+
+    """
+    headers = []
+    while True:
+        line = fp.readline(_MAXLINE + 1)
+        if len(line) > _MAXLINE:
+            raise LineTooLong("header line")
+        headers.append(line)
+        if len(headers) > _MAXHEADERS:
+            raise HTTPException("got more than %d headers" % _MAXHEADERS)
+        if line in (b'\r\n', b'\n', b''):
+            break
+    hstring = b''.join(headers).decode('iso-8859-1')
+    return email.parser.Parser(_class=_class).parsestr(hstring)
+
+
+class HTTPResponse(io.BufferedIOBase):
+
+    # See RFC 2616 sec 19.6 and RFC 1945 sec 6 for details.
+
+    # The bytes from the socket object are iso-8859-1 strings.
+    # See RFC 2616 sec 2.2 which notes an exception for MIME-encoded
+    # text following RFC 2047.  The basic status line parsing only
+    # accepts iso-8859-1.
+
+    def __init__(self, sock, debuglevel=0, method=None, url=None):
+        # If the response includes a content-length header, we need to
+        # make sure that the client doesn't read more than the
+        # specified number of bytes.  If it does, it will block until
+        # the server times out and closes the connection.  This will
+        # happen if a self.fp.read() is done (without a size) whether
+        # self.fp is buffered or not.  So, no self.fp.read() by
+        # clients unless they know what they are doing.
+        self.fp = sock.makefile("rb")
+        self.debuglevel = debuglevel
+        self._method = method
+
+        # The HTTPResponse object is returned via urllib.  The clients
+        # of http and urllib expect different attributes for the
+        # headers.  headers is used here and supports urllib.  msg is
+        # provided as a backwards compatibility layer for http
+        # clients.
+
+        self.headers = self.msg = None
+
+        # from the Status-Line of the response
+        self.version = _UNKNOWN # HTTP-Version
+        self.status = _UNKNOWN  # Status-Code
+        self.reason = _UNKNOWN  # Reason-Phrase
+
+        self.chunked = _UNKNOWN         # is "chunked" being used?
+        self.chunk_left = _UNKNOWN      # bytes left to read in current chunk
+        self.length = _UNKNOWN          # number of bytes left in response
+        self.will_close = _UNKNOWN      # conn will close at end of response
+
+    def _read_status(self):
+        line = str(self.fp.readline(_MAXLINE + 1), "iso-8859-1")
+        if len(line) > _MAXLINE:
+            raise LineTooLong("status line")
+        if self.debuglevel > 0:
+            print("reply:", repr(line))
+        if not line:
+            # Presumably, the server closed the connection before
+            # sending a valid response.
+            raise RemoteDisconnected("Remote end closed connection without"
+                                     " response")
+        try:
+            version, status, reason = line.split(None, 2)
+        except ValueError:
+            try:
+                version, status = line.split(None, 1)
+                reason = ""
+            except ValueError:
+                # empty version will cause next test to fail.
+                version = ""
+        if not version.startswith("HTTP/"):
+            self._close_conn()
+            raise BadStatusLine(line)
+
+        # The status code is a three-digit number
+        try:
+            status = int(status)
+            if status < 100 or status > 999:
+                raise BadStatusLine(line)
+        except ValueError:
+            raise BadStatusLine(line)
+        return version, status, reason
+
+    def begin(self):
+        if self.headers is not None:
+            # we've already started reading the response
+            return
+
+        # read until we get a non-100 response
+        while True:
+            version, status, reason = self._read_status()
+            if status != CONTINUE:
+                break
+            # skip the header from the 100 response
+            while True:
+                skip = self.fp.readline(_MAXLINE + 1)
+                if len(skip) > _MAXLINE:
+                    raise LineTooLong("header line")
+                skip = skip.strip()
+                if not skip:
+                    break
+                if self.debuglevel > 0:
+                    print("header:", skip)
+
+        self.code = self.status = status
+        self.reason = reason.strip()
+        if version in ("HTTP/1.0", "HTTP/0.9"):
+            # Some servers might still return "0.9", treat it as 1.0 anyway
+            self.version = 10
+        elif version.startswith("HTTP/1."):
+            self.version = 11   # use HTTP/1.1 code for HTTP/1.x where x>=1
+        else:
+            raise UnknownProtocol(version)
+
+        self.headers = self.msg = parse_headers(self.fp)
+
+        if self.debuglevel > 0:
+            for hdr in self.headers:
+                print("header:", hdr, end=" ")
+
+        # are we using the chunked-style of transfer encoding?
+        tr_enc = self.headers.get("transfer-encoding")
+        if tr_enc and tr_enc.lower() == "chunked":
+            self.chunked = True
+            self.chunk_left = None
+        else:
+            self.chunked = False
+
+        # will the connection close at the end of the response?
+        self.will_close = self._check_close()
+
+        # do we have a Content-Length?
+        # NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked"
+        self.length = None
+        length = self.headers.get("content-length")
+
+         # are we using the chunked-style of transfer encoding?
+        tr_enc = self.headers.get("transfer-encoding")
+        if length and not self.chunked:
+            try:
+                self.length = int(length)
+            except ValueError:
+                self.length = None
+            else:
+                if self.length < 0:  # ignore nonsensical negative lengths
+                    self.length = None
+        else:
+            self.length = None
+
+        # does the body have a fixed length? (of zero)
+        if (status == NO_CONTENT or status == NOT_MODIFIED or
+            100 <= status < 200 or      # 1xx codes
+            self._method == "HEAD"):
+            self.length = 0
+
+        # if the connection remains open, and we aren't using chunked, and
+        # a content-length was not provided, then assume that the connection
+        # WILL close.
+        if (not self.will_close and
+            not self.chunked and
+            self.length is None):
+            self.will_close = True
+
+    def _check_close(self):
+        conn = self.headers.get("connection")
+        if self.version == 11:
+            # An HTTP/1.1 proxy is assumed to stay open unless
+            # explicitly closed.
+            conn = self.headers.get("connection")
+            if conn and "close" in conn.lower():
+                return True
+            return False
+
+        # Some HTTP/1.0 implementations have support for persistent
+        # connections, using rules different than HTTP/1.1.
+
+        # For older HTTP, Keep-Alive indicates persistent connection.
+        if self.headers.get("keep-alive"):
+            return False
+
+        # At least Akamai returns a "Connection: Keep-Alive" header,
+        # which was supposed to be sent by the client.
+        if conn and "keep-alive" in conn.lower():
+            return False
+
+        # Proxy-Connection is a netscape hack.
+        pconn = self.headers.get("proxy-connection")
+        if pconn and "keep-alive" in pconn.lower():
+            return False
+
+        # otherwise, assume it will close
+        return True
+
+    def _close_conn(self):
+        fp = self.fp
+        self.fp = None
+        fp.close()
+
+    def close(self):
+        try:
+            super().close() # set "closed" flag
+        finally:
+            if self.fp:
+                self._close_conn()
+
+    # These implementations are for the benefit of io.BufferedReader.
+
+    # XXX This class should probably be revised to act more like
+    # the "raw stream" that BufferedReader expects.
+
+    def flush(self):
+        super().flush()
+        if self.fp:
+            self.fp.flush()
+
+    def readable(self):
+        """Always returns True"""
+        return True
+
+    # End of "raw stream" methods
+
+    def isclosed(self):
+        """True if the connection is closed."""
+        # NOTE: it is possible that we will not ever call self.close(). This
+        #       case occurs when will_close is TRUE, length is None, and we
+        #       read up to the last byte, but NOT past it.
+        #
+        # IMPLIES: if will_close is FALSE, then self.close() will ALWAYS be
+        #          called, meaning self.isclosed() is meaningful.
+        return self.fp is None
+
+    def read(self, amt=None):
+        if self.fp is None:
+            return b""
+
+        if self._method == "HEAD":
+            self._close_conn()
+            return b""
+
+        if amt is not None:
+            # Amount is given, implement using readinto
+            b = bytearray(amt)
+            n = self.readinto(b)
+            return memoryview(b)[:n].tobytes()
+        else:
+            # Amount is not given (unbounded read) so we must check self.length
+            # and self.chunked
+
+            if self.chunked:
+                return self._readall_chunked()
+
+            if self.length is None:
+                s = self.fp.read()
+            else:
+                try:
+                    s = self._safe_read(self.length)
+                except IncompleteRead:
+                    self._close_conn()
+                    raise
+                self.length = 0
+            self._close_conn()        # we read everything
+            return s
+
+    def readinto(self, b):
+        """Read up to len(b) bytes into bytearray b and return the number
+        of bytes read.
+        """
+
+        if self.fp is None:
+            return 0
+
+        if self._method == "HEAD":
+            self._close_conn()
+            return 0
+
+        if self.chunked:
+            return self._readinto_chunked(b)
+
+        if self.length is not None:
+            if len(b) > self.length:
+                # clip the read to the "end of response"
+                b = memoryview(b)[0:self.length]
+
+        # we do not use _safe_read() here because this may be a .will_close
+        # connection, and the user is reading more bytes than will be provided
+        # (for example, reading in 1k chunks)
+        n = self.fp.readinto(b)
+        if not n and b:
+            # Ideally, we would raise IncompleteRead if the content-length
+            # wasn't satisfied, but it might break compatibility.
+            self._close_conn()
+        elif self.length is not None:
+            self.length -= n
+            if not self.length:
+                self._close_conn()
+        return n
+
+    def _read_next_chunk_size(self):
+        # Read the next chunk size from the file
+        line = self.fp.readline(_MAXLINE + 1)
+        if len(line) > _MAXLINE:
+            raise LineTooLong("chunk size")
+        i = line.find(b";")
+        if i >= 0:
+            line = line[:i] # strip chunk-extensions
+        try:
+            return int(line, 16)
+        except ValueError:
+            # close the connection as protocol synchronisation is
+            # probably lost
+            self._close_conn()
+            raise
+
+    def _read_and_discard_trailer(self):
+        # read and discard trailer up to the CRLF terminator
+        ### note: we shouldn't have any trailers!
+        while True:
+            line = self.fp.readline(_MAXLINE + 1)
+            if len(line) > _MAXLINE:
+                raise LineTooLong("trailer line")
+            if not line:
+                # a vanishingly small number of sites EOF without
+                # sending the trailer
+                break
+            if line in (b'\r\n', b'\n', b''):
+                break
+
+    def _get_chunk_left(self):
+        # return self.chunk_left, reading a new chunk if necessary.
+        # chunk_left == 0: at the end of the current chunk, need to close it
+        # chunk_left == None: No current chunk, should read next.
+        # This function returns non-zero or None if the last chunk has
+        # been read.
+        chunk_left = self.chunk_left
+        if not chunk_left: # Can be 0 or None
+            if chunk_left is not None:
+                # We are at the end of chunk. dicard chunk end
+                self._safe_read(2)  # toss the CRLF at the end of the chunk
+            try:
+                chunk_left = self._read_next_chunk_size()
+            except ValueError:
+                raise IncompleteRead(b'')
+            if chunk_left == 0:
+                # last chunk: 1*("0") [ chunk-extension ] CRLF
+                self._read_and_discard_trailer()
+                # we read everything; close the "file"
+                self._close_conn()
+                chunk_left = None
+            self.chunk_left = chunk_left
+        return chunk_left
+
+    def _readall_chunked(self):
+        assert self.chunked != _UNKNOWN
+        value = []
+        try:
+            while True:
+                chunk_left = self._get_chunk_left()
+                if chunk_left is None:
+                    break
+                value.append(self._safe_read(chunk_left))
+                self.chunk_left = 0
+            return b''.join(value)
+        except IncompleteRead:
+            raise IncompleteRead(b''.join(value))
+
+    def _readinto_chunked(self, b):
+        assert self.chunked != _UNKNOWN
+        total_bytes = 0
+        mvb = memoryview(b)
+        try:
+            while True:
+                chunk_left = self._get_chunk_left()
+                if chunk_left is None:
+                    return total_bytes
+
+                if len(mvb) <= chunk_left:
+                    n = self._safe_readinto(mvb)
+                    self.chunk_left = chunk_left - n
+                    return total_bytes + n
+
+                temp_mvb = mvb[:chunk_left]
+                n = self._safe_readinto(temp_mvb)
+                mvb = mvb[n:]
+                total_bytes += n
+                self.chunk_left = 0
+
+        except IncompleteRead:
+            raise IncompleteRead(bytes(b[0:total_bytes]))
+
+    def _safe_read(self, amt):
+        """Read the number of bytes requested, compensating for partial reads.
+
+        Normally, we have a blocking socket, but a read() can be interrupted
+        by a signal (resulting in a partial read).
+
+        Note that we cannot distinguish between EOF and an interrupt when zero
+        bytes have been read. IncompleteRead() will be raised in this
+        situation.
+
+        This function should be used when <amt> bytes "should" be present for
+        reading. If the bytes are truly not available (due to EOF), then the
+        IncompleteRead exception can be used to detect the problem.
+        """
+        s = []
+        while amt > 0:
+            chunk = self.fp.read(min(amt, MAXAMOUNT))
+            if not chunk:
+                raise IncompleteRead(b''.join(s), amt)
+            s.append(chunk)
+            amt -= len(chunk)
+        return b"".join(s)
+
+    def _safe_readinto(self, b):
+        """Same as _safe_read, but for reading into a buffer."""
+        total_bytes = 0
+        mvb = memoryview(b)
+        while total_bytes < len(b):
+            if MAXAMOUNT < len(mvb):
+                temp_mvb = mvb[0:MAXAMOUNT]
+                n = self.fp.readinto(temp_mvb)
+            else:
+                n = self.fp.readinto(mvb)
+            if not n:
+                raise IncompleteRead(bytes(mvb[0:total_bytes]), len(b))
+            mvb = mvb[n:]
+            total_bytes += n
+        return total_bytes
+
+    def read1(self, n=-1):
+        """Read with at most one underlying system call.  If at least one
+        byte is buffered, return that instead.
+        """
+        if self.fp is None or self._method == "HEAD":
+            return b""
+        if self.chunked:
+            return self._read1_chunked(n)
+        if self.length is not None and (n < 0 or n > self.length):
+            n = self.length
+        try:
+            result = self.fp.read1(n)
+        except ValueError:
+            if n >= 0:
+                raise
+            # some implementations, like BufferedReader, don't support -1
+            # Read an arbitrarily selected largeish chunk.
+            result = self.fp.read1(16*1024)
+        if not result and n:
+            self._close_conn()
+        elif self.length is not None:
+            self.length -= len(result)
+        return result
+
+    def peek(self, n=-1):
+        # Having this enables IOBase.readline() to read more than one
+        # byte at a time
+        if self.fp is None or self._method == "HEAD":
+            return b""
+        if self.chunked:
+            return self._peek_chunked(n)
+        return self.fp.peek(n)
+
+    def readline(self, limit=-1):
+        if self.fp is None or self._method == "HEAD":
+            return b""
+        if self.chunked:
+            # Fallback to IOBase readline which uses peek() and read()
+            return super().readline(limit)
+        if self.length is not None and (limit < 0 or limit > self.length):
+            limit = self.length
+        result = self.fp.readline(limit)
+        if not result and limit:
+            self._close_conn()
+        elif self.length is not None:
+            self.length -= len(result)
+        return result
+
+    def _read1_chunked(self, n):
+        # Strictly speaking, _get_chunk_left() may cause more than one read,
+        # but that is ok, since that is to satisfy the chunked protocol.
+        chunk_left = self._get_chunk_left()
+        if chunk_left is None or n == 0:
+            return b''
+        if not (0 <= n <= chunk_left):
+            n = chunk_left # if n is negative or larger than chunk_left
+        read = self.fp.read1(n)
+        self.chunk_left -= len(read)
+        if not read:
+            raise IncompleteRead(b"")
+        return read
+
+    def _peek_chunked(self, n):
+        # Strictly speaking, _get_chunk_left() may cause more than one read,
+        # but that is ok, since that is to satisfy the chunked protocol.
+        try:
+            chunk_left = self._get_chunk_left()
+        except IncompleteRead:
+            return b'' # peek doesn't worry about protocol
+        if chunk_left is None:
+            return b'' # eof
+        # peek is allowed to return more than requested.  Just request the
+        # entire chunk, and truncate what we get.
+        return self.fp.peek(chunk_left)[:chunk_left]
+
+    def fileno(self):
+        return self.fp.fileno()
+
+    def getheader(self, name, default=None):
+        '''Returns the value of the header matching *name*.
+
+        If there are multiple matching headers, the values are
+        combined into a single string separated by commas and spaces.
+
+        If no matching header is found, returns *default* or None if
+        the *default* is not specified.
+
+        If the headers are unknown, raises http.client.ResponseNotReady.
+
+        '''
+        if self.headers is None:
+            raise ResponseNotReady()
+        headers = self.headers.get_all(name) or default
+        if isinstance(headers, str) or not hasattr(headers, '__iter__'):
+            return headers
+        else:
+            return ', '.join(headers)
+
+    def getheaders(self):
+        """Return list of (header, value) tuples."""
+        if self.headers is None:
+            raise ResponseNotReady()
+        return list(self.headers.items())
+
+    # We override IOBase.__iter__ so that it doesn't check for closed-ness
+
+    def __iter__(self):
+        return self
+
+    # For compatibility with old-style urllib responses.
+
+    def info(self):
+        '''Returns an instance of the class mimetools.Message containing
+        meta-information associated with the URL.
+
+        When the method is HTTP, these headers are those returned by
+        the server at the head of the retrieved HTML page (including
+        Content-Length and Content-Type).
+
+        When the method is FTP, a Content-Length header will be
+        present if (as is now usual) the server passed back a file
+        length in response to the FTP retrieval request. A
+        Content-Type header will be present if the MIME type can be
+        guessed.
+
+        When the method is local-file, returned headers will include
+        a Date representing the file's last-modified time, a
+        Content-Length giving file size, and a Content-Type
+        containing a guess at the file's type. See also the
+        description of the mimetools module.
+
+        '''
+        return self.headers
+
+    def geturl(self):
+        '''Return the real URL of the page.
+
+        In some cases, the HTTP server redirects a client to another
+        URL. The urlopen() function handles this transparently, but in
+        some cases the caller needs to know which URL the client was
+        redirected to. The geturl() method can be used to get at this
+        redirected URL.
+
+        '''
+        return self.url
+
+    def getcode(self):
+        '''Return the HTTP status code that was sent with the response,
+        or None if the URL is not an HTTP URL.
+
+        '''
+        return self.status
+
+class HTTPConnection:
+
+    _http_vsn = 11
+    _http_vsn_str = 'HTTP/1.1'
+
+    response_class = HTTPResponse
+    default_port = HTTP_PORT
+    auto_open = 1
+    debuglevel = 0
+
+    @staticmethod
+    def _is_textIO(stream):
+        """Test whether a file-like object is a text or a binary stream.
+        """
+        return isinstance(stream, io.TextIOBase)
+
+    @staticmethod
+    def _get_content_length(body, method):
+        """Get the content-length based on the body.
+
+        If the body is None, we set Content-Length: 0 for methods that expect
+        a body (RFC 7230, Section 3.3.2). We also set the Content-Length for
+        any method if the body is a str or bytes-like object and not a file.
+        """
+        if body is None:
+            # do an explicit check for not None here to distinguish
+            # between unset and set but empty
+            if method.upper() in _METHODS_EXPECTING_BODY:
+                return 0
+            else:
+                return None
+
+        if hasattr(body, 'read'):
+            # file-like object.
+            return None
+
+        try:
+            # does it implement the buffer protocol (bytes, bytearray, array)?
+            mv = memoryview(body)
+            return mv.nbytes
+        except TypeError:
+            pass
+
+        if isinstance(body, str):
+            return len(body)
+
+        return None
+
+    def __init__(self, host, port=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
+                 source_address=None):
+        self.timeout = timeout
+        self.source_address = source_address
+        self.sock = None
+        self._buffer = []
+        self.__response = None
+        self.__state = _CS_IDLE
+        self._method = None
+        self._tunnel_host = None
+        self._tunnel_port = None
+        self._tunnel_headers = {}
+
+        (self.host, self.port) = self._get_hostport(host, port)
+
+        # This is stored as an instance variable to allow unit
+        # tests to replace it with a suitable mockup
+        self._create_connection = socket.create_connection
+
+    def set_tunnel(self, host, port=None, headers=None):
+        """Set up host and port for HTTP CONNECT tunnelling.
+
+        In a connection that uses HTTP CONNECT tunneling, the host passed to the
+        constructor is used as a proxy server that relays all communication to
+        the endpoint passed to `set_tunnel`. This done by sending an HTTP
+        CONNECT request to the proxy server when the connection is established.
+
+        This method must be called before the HTML connection has been
+        established.
+
+        The headers argument should be a mapping of extra HTTP headers to send
+        with the CONNECT request.
+        """
+
+        if self.sock:
+            raise RuntimeError("Can't set up tunnel for established connection")
+
+        self._tunnel_host, self._tunnel_port = self._get_hostport(host, port)
+        if headers:
+            self._tunnel_headers = headers
+        else:
+            self._tunnel_headers.clear()
+
+    def _get_hostport(self, host, port):
+        if port is None:
+            i = host.rfind(':')
+            j = host.rfind(']')         # ipv6 addresses have [...]
+            if i > j:
+                try:
+                    port = int(host[i+1:])
+                except ValueError:
+                    if host[i+1:] == "": # http://foo.com:/ == http://foo.com/
+                        port = self.default_port
+                    else:
+                        raise InvalidURL("nonnumeric port: '%s'" % host[i+1:])
+                host = host[:i]
+            else:
+                port = self.default_port
+            if host and host[0] == '[' and host[-1] == ']':
+                host = host[1:-1]
+
+        return (host, port)
+
+    def set_debuglevel(self, level):
+        self.debuglevel = level
+
+    def _tunnel(self):
+        connect_str = "CONNECT %s:%d HTTP/1.0\r\n" % (self._tunnel_host,
+            self._tunnel_port)
+        connect_bytes = connect_str.encode("ascii")
+        self.send(connect_bytes)
+        for header, value in self._tunnel_headers.items():
+            header_str = "%s: %s\r\n" % (header, value)
+            header_bytes = header_str.encode("latin-1")
+            self.send(header_bytes)
+        self.send(b'\r\n')
+
+        response = self.response_class(self.sock, method=self._method)
+        (version, code, message) = response._read_status()
+
+        if code != http.HTTPStatus.OK:
+            self.close()
+            raise OSError("Tunnel connection failed: %d %s" % (code,
+                                                               message.strip()))
+        while True:
+            line = response.fp.readline(_MAXLINE + 1)
+            if len(line) > _MAXLINE:
+                raise LineTooLong("header line")
+            if not line:
+                # for sites which EOF without sending a trailer
+                break
+            if line in (b'\r\n', b'\n', b''):
+                break
+
+            if self.debuglevel > 0:
+                print('header:', line.decode())
+
+    def connect(self):
+        """Connect to the host and port specified in __init__."""
+        self.sock = self._create_connection(
+            (self.host,self.port), self.timeout, self.source_address)
+        self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+
+        if self._tunnel_host:
+            self._tunnel()
+
+    def close(self):
+        """Close the connection to the HTTP server."""
+        self.__state = _CS_IDLE
+        try:
+            sock = self.sock
+            if sock:
+                self.sock = None
+                sock.close()   # close it manually... there may be other refs
+        finally:
+            response = self.__response
+            if response:
+                self.__response = None
+                response.close()
+
+    def send(self, data):
+        """Send `data' to the server.
+        ``data`` can be a string object, a bytes object, an array object, a
+        file-like object that supports a .read() method, or an iterable object.
+        """
+
+        if self.sock is None:
+            if self.auto_open:
+                self.connect()
+            else:
+                raise NotConnected()
+
+        if self.debuglevel > 0:
+            print("send:", repr(data))
+        blocksize = 8192
+        if hasattr(data, "read") :
+            if self.debuglevel > 0:
+                print("sendIng a read()able")
+            encode = False
+            try:
+                mode = data.mode
+            except AttributeError:
+                # io.BytesIO and other file-like objects don't have a `mode`
+                # attribute.
+                pass
+            else:
+                if "b" not in mode:
+                    encode = True
+                    if self.debuglevel > 0:
+                        print("encoding file using iso-8859-1")
+            while 1:
+                datablock = data.read(blocksize)
+                if not datablock:
+                    break
+                if encode:
+                    datablock = datablock.encode("iso-8859-1")
+                self.sock.sendall(datablock)
+            return
+        try:
+            self.sock.sendall(data)
+        except TypeError:
+            if isinstance(data, collections.Iterable):
+                for d in data:
+                    self.sock.sendall(d)
+            else:
+                raise TypeError("data should be a bytes-like object "
+                                "or an iterable, got %r" % type(data))
+
+    def _output(self, s):
+        """Add a line of output to the current request buffer.
+
+        Assumes that the line does *not* end with \\r\\n.
+        """
+        self._buffer.append(s)
+
+    def _read_readable(self, readable):
+        blocksize = 8192
+        if self.debuglevel > 0:
+            print("sendIng a read()able")
+        encode = self._is_textIO(readable)
+        if encode and self.debuglevel > 0:
+            print("encoding file using iso-8859-1")
+        while True:
+            datablock = readable.read(blocksize)
+            if not datablock:
+                break
+            if encode:
+                datablock = datablock.encode("iso-8859-1")
+            yield datablock
+
+    def _send_output(self, message_body=None, encode_chunked=False):
+        """Send the currently buffered request and clear the buffer.
+
+        Appends an extra \\r\\n to the buffer.
+        A message_body may be specified, to be appended to the request.
+        """
+        self._buffer.extend((b"", b""))
+        msg = b"\r\n".join(self._buffer)
+        del self._buffer[:]
+        self.send(msg)
+
+        if message_body is not None:
+
+            # create a consistent interface to message_body
+            if hasattr(message_body, 'read'):
+                # Let file-like take precedence over byte-like.  This
+                # is needed to allow the current position of mmap'ed
+                # files to be taken into account.
+                chunks = self._read_readable(message_body)
+            else:
+                try:
+                    # this is solely to check to see if message_body
+                    # implements the buffer API.  it /would/ be easier
+                    # to capture if PyObject_CheckBuffer was exposed
+                    # to Python.
+                    memoryview(message_body)
+                except TypeError:
+                    try:
+                        chunks = iter(message_body)
+                    except TypeError:
+                        raise TypeError("message_body should be a bytes-like "
+                                        "object or an iterable, got %r"
+                                        % type(message_body))
+                else:
+                    # the object implements the buffer interface and
+                    # can be passed directly into socket methods
+                    chunks = (message_body,)
+
+            for chunk in chunks:
+                if not chunk:
+                    if self.debuglevel > 0:
+                        print('Zero length chunk ignored')
+                    continue
+
+                if encode_chunked and self._http_vsn == 11:
+                    # chunked encoding
+                    chunk = '{0:X}\r\n'.format(len(chunk)).encode('ascii') + chunk + b'\r\n'
+                self.send(chunk)
+
+            if encode_chunked and self._http_vsn == 11:
+                # end chunked transfer
+                self.send(b'0\r\n\r\n')
+
+    def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0):
+        """Send a request to the server.
+
+        `method' specifies an HTTP request method, e.g. 'GET'.
+        `url' specifies the object being requested, e.g. '/index.html'.
+        `skip_host' if True does not add automatically a 'Host:' header
+        `skip_accept_encoding' if True does not add automatically an
+           'Accept-Encoding:' header
+        """
+
+        # if a prior response has been completed, then forget about it.
+        if self.__response and self.__response.isclosed():
+            self.__response = None
+
+
+        # in certain cases, we cannot issue another request on this connection.
+        # this occurs when:
+        #   1) we are in the process of sending a request.   (_CS_REQ_STARTED)
+        #   2) a response to a previous request has signalled that it is going
+        #      to close the connection upon completion.
+        #   3) the headers for the previous response have not been read, thus
+        #      we cannot determine whether point (2) is true.   (_CS_REQ_SENT)
+        #
+        # if there is no prior response, then we can request at will.
+        #
+        # if point (2) is true, then we will have passed the socket to the
+        # response (effectively meaning, "there is no prior response"), and
+        # will open a new one when a new request is made.
+        #
+        # Note: if a prior response exists, then we *can* start a new request.
+        #       We are not allowed to begin fetching the response to this new
+        #       request, however, until that prior response is complete.
+        #
+        if self.__state == _CS_IDLE:
+            self.__state = _CS_REQ_STARTED
+        else:
+            raise CannotSendRequest(self.__state)
+
+        # Save the method we use, we need it later in the response phase
+        self._method = method
+        if not url:
+            url = '/'
+        request = '%s %s %s' % (method, url, self._http_vsn_str)
+
+        # Non-ASCII characters should have been eliminated earlier
+        self._output(request.encode('ascii'))
+
+        if self._http_vsn == 11:
+            # Issue some standard headers for better HTTP/1.1 compliance
+
+            if not skip_host:
+                # this header is issued *only* for HTTP/1.1
+                # connections. more specifically, this means it is
+                # only issued when the client uses the new
+                # HTTPConnection() class. backwards-compat clients
+                # will be using HTTP/1.0 and those clients may be
+                # issuing this header themselves. we should NOT issue
+                # it twice; some web servers (such as Apache) barf
+                # when they see two Host: headers
+
+                # If we need a non-standard port,include it in the
+                # header.  If the request is going through a proxy,
+                # but the host of the actual URL, not the host of the
+                # proxy.
+
+                netloc = ''
+                if url.startswith('http'):
+                    nil, netloc, nil, nil, nil = urlsplit(url)
+
+                if netloc:
+                    try:
+                        netloc_enc = netloc.encode("ascii")
+                    except UnicodeEncodeError:
+                        netloc_enc = netloc.encode("idna")
+                    self.putheader('Host', netloc_enc)
+                else:
+                    if self._tunnel_host:
+                        host = self._tunnel_host
+                        port = self._tunnel_port
+                    else:
+                        host = self.host
+                        port = self.port
+
+                    try:
+                        host_enc = host.encode("ascii")
+                    except UnicodeEncodeError:
+                        host_enc = host.encode("idna")
+
+                    # As per RFC 273, IPv6 address should be wrapped with []
+                    # when used as Host header
+
+                    if host.find(':') >= 0:
+                        host_enc = b'[' + host_enc + b']'
+
+                    if port == self.default_port:
+                        self.putheader('Host', host_enc)
+                    else:
+                        host_enc = host_enc.decode("ascii")
+                        self.putheader('Host', "%s:%s" % (host_enc, port))
+
+            # note: we are assuming that clients will not attempt to set these
+            #       headers since *this* library must deal with the
+            #       consequences. this also means that when the supporting
+            #       libraries are updated to recognize other forms, then this
+            #       code should be changed (removed or updated).
+
+            # we only want a Content-Encoding of "identity" since we don't
+            # support encodings such as x-gzip or x-deflate.
+            if not skip_accept_encoding:
+                self.putheader('Accept-Encoding', 'identity')
+
+            # we can accept "chunked" Transfer-Encodings, but no others
+            # NOTE: no TE header implies *only* "chunked"
+            #self.putheader('TE', 'chunked')
+
+            # if TE is supplied in the header, then it must appear in a
+            # Connection header.
+            #self.putheader('Connection', 'TE')
+
+        else:
+            # For HTTP/1.0, the server will assume "not chunked"
+            pass
+
+    def putheader(self, header, *values):
+        """Send a request header line to the server.
+
+        For example: h.putheader('Accept', 'text/html')
+        """
+        if self.__state != _CS_REQ_STARTED:
+            raise CannotSendHeader()
+
+        if hasattr(header, 'encode'):
+            header = header.encode('ascii')
+
+        if not _is_legal_header_name(header):
+            raise ValueError('Invalid header name %r' % (header,))
+
+        values = list(values)
+        for i, one_value in enumerate(values):
+            if hasattr(one_value, 'encode'):
+                values[i] = one_value.encode('latin-1')
+            elif isinstance(one_value, int):
+                values[i] = str(one_value).encode('ascii')
+
+            if _is_illegal_header_value(values[i]):
+                raise ValueError('Invalid header value %r' % (values[i],))
+
+        value = b'\r\n\t'.join(values)
+        header = header + b': ' + value
+        self._output(header)
+
+    def endheaders(self, message_body=None, *, encode_chunked=False):
+        """Indicate that the last header line has been sent to the server.
+
+        This method sends the request to the server.  The optional message_body
+        argument can be used to pass a message body associated with the
+        request.
+        """
+        if self.__state == _CS_REQ_STARTED:
+            self.__state = _CS_REQ_SENT
+        else:
+            raise CannotSendHeader()
+        self._send_output(message_body, encode_chunked=encode_chunked)
+
+    def request(self, method, url, body=None, headers={}, *,
+                encode_chunked=False):
+        """Send a complete request to the server."""
+        self._send_request(method, url, body, headers, encode_chunked)
+
+    def _set_content_length(self, body, method):
+        # Set the content-length based on the body. If the body is "empty", we
+        # set Content-Length: 0 for methods that expect a body (RFC 7230,
+        # Section 3.3.2). If the body is set for other methods, we set the
+        # header provided we can figure out what the length is.
+        thelen = None
+        method_expects_body = method.upper() in _METHODS_EXPECTING_BODY
+        if body is None and method_expects_body:
+            thelen = '0'
+        elif body is not None:
+            try:
+                thelen = str(len(body))
+            except TypeError:
+                # If this is a file-like object, try to
+                # fstat its file descriptor
+                try:
+                    thelen = str(os.fstat(body.fileno()).st_size)
+                except (AttributeError, OSError):
+                    # Don't send a length if this failed
+                    if self.debuglevel > 0: print("Cannot stat!!")
+
+        if thelen is not None:
+            self.putheader('Content-Length', thelen)
+
+    def _send_request(self, method, url, body, headers, encode_chunked):
+        # Honor explicitly requested Host: and Accept-Encoding: headers.
+        header_names = frozenset(k.lower() for k in headers)
+        skips = {}
+        if 'host' in header_names:
+            skips['skip_host'] = 1
+        if 'accept-encoding' in header_names:
+            skips['skip_accept_encoding'] = 1
+
+        self.putrequest(method, url, **skips)
+
+        # chunked encoding will happen if HTTP/1.1 is used and either
+        # the caller passes encode_chunked=True or the following
+        # conditions hold:
+        # 1. content-length has not been explicitly set
+        # 2. the body is a file or iterable, but not a str or bytes-like
+        # 3. Transfer-Encoding has NOT been explicitly set by the caller
+
+        if 'content-length' not in header_names:
+            # only chunk body if not explicitly set for backwards
+            # compatibility, assuming the client code is already handling the
+            # chunking
+            if 'transfer-encoding' not in header_names:
+                # if content-length cannot be automatically determined, fall
+                # back to chunked encoding
+                encode_chunked = False
+                content_length = self._get_content_length(body, method)
+                if content_length is None:
+                    if body is not None:
+                        if self.debuglevel > 0:
+                            print('Unable to determine size of %r' % body)
+                        encode_chunked = True
+                        self.putheader('Transfer-Encoding', 'chunked')
+                else:
+                    self.putheader('Content-Length', str(content_length))
+        else:
+            encode_chunked = False
+
+        for hdr, value in headers.items():
+            self.putheader(hdr, value)
+        if isinstance(body, str):
+            # RFC 2616 Section 3.7.1 says that text default has a
+            # default charset of iso-8859-1.
+            body = _encode(body, 'body')
+        self.endheaders(body, encode_chunked=encode_chunked)
+
+    def getresponse(self):
+        """Get the response from the server.
+
+        If the HTTPConnection is in the correct state, returns an
+        instance of HTTPResponse or of whatever object is returned by
+        the response_class variable.
+
+        If a request has not been sent or if a previous response has
+        not be handled, ResponseNotReady is raised.  If the HTTP
+        response indicates that the connection should be closed, then
+        it will be closed before the response is returned.  When the
+        connection is closed, the underlying socket is closed.
+        """
+
+        # if a prior response has been completed, then forget about it.
+        if self.__response and self.__response.isclosed():
+            self.__response = None
+
+        # if a prior response exists, then it must be completed (otherwise, we
+        # cannot read this response's header to determine the connection-close
+        # behavior)
+        #
+        # note: if a prior response existed, but was connection-close, then the
+        # socket and response were made independent of this HTTPConnection
+        # object since a new request requires that we open a whole new
+        # connection
+        #
+        # this means the prior response had one of two states:
+        #   1) will_close: this connection was reset and the prior socket and
+        #                  response operate independently
+        #   2) persistent: the response was retained and we await its
+        #                  isclosed() status to become true.
+        #
+        if self.__state != _CS_REQ_SENT or self.__response:
+            raise ResponseNotReady(self.__state)
+
+        if self.debuglevel > 0:
+            response = self.response_class(self.sock, self.debuglevel,
+                                           method=self._method)
+        else:
+            response = self.response_class(self.sock, method=self._method)
+
+        try:
+            try:
+                response.begin()
+            except ConnectionError:
+                self.close()
+                raise
+            assert response.will_close != _UNKNOWN
+            self.__state = _CS_IDLE
+
+            if response.will_close:
+                # this effectively passes the connection to the response
+                self.close()
+            else:
+                # remember this, so we can tell when it is complete
+                self.__response = response
+
+            return response
+        except:
+            response.close()
+            raise
+
+try:
+    import ssl
+except ImportError:
+    pass
+else:
+    class HTTPSConnection(HTTPConnection):
+        "This class allows communication via SSL."
+
+        default_port = HTTPS_PORT
+
+        # XXX Should key_file and cert_file be deprecated in favour of context?
+
+        def __init__(self, host, port=None, key_file=None, cert_file=None,
+                     timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
+                     source_address=None, *, context=None,
+                     check_hostname=None):
+            super(HTTPSConnection, self).__init__(host, port, timeout,
+                                                  source_address)
+            self.key_file = key_file
+            self.cert_file = cert_file
+            if context is None:
+                context = ssl._create_default_https_context()
+            will_verify = context.verify_mode != ssl.CERT_NONE
+            if check_hostname is None:
+                check_hostname = context.check_hostname
+            if check_hostname and not will_verify:
+                raise ValueError("check_hostname needs a SSL context with "
+                                 "either CERT_OPTIONAL or CERT_REQUIRED")
+            if key_file or cert_file:
+                context.load_cert_chain(cert_file, key_file)
+            self._context = context
+            self._check_hostname = check_hostname
+
+        def connect(self):
+            "Connect to a host on a given (SSL) port."
+
+            super().connect()
+
+            if self._tunnel_host:
+                server_hostname = self._tunnel_host
+            else:
+                server_hostname = self.host
+
+            self.sock = self._context.wrap_socket(self.sock,
+                                                  server_hostname=server_hostname)
+            if not self._context.check_hostname and self._check_hostname:
+                try:
+                    ssl.match_hostname(self.sock.getpeercert(), server_hostname)
+                except Exception:
+                    self.sock.shutdown(socket.SHUT_RDWR)
+                    self.sock.close()
+                    raise
+
+    __all__.append("HTTPSConnection")
+
+class HTTPException(Exception):
+    # Subclasses that define an __init__ must call Exception.__init__
+    # or define self.args.  Otherwise, str() will fail.
+    pass
+
+class NotConnected(HTTPException):
+    pass
+
+class InvalidURL(HTTPException):
+    pass
+
+class UnknownProtocol(HTTPException):
+    def __init__(self, version):
+        self.args = version,
+        self.version = version
+
+class UnknownTransferEncoding(HTTPException):
+    pass
+
+class UnimplementedFileMode(HTTPException):
+    pass
+
+class IncompleteRead(HTTPException):
+    def __init__(self, partial, expected=None):
+        self.args = partial,
+        self.partial = partial
+        self.expected = expected
+    def __repr__(self):
+        if self.expected is not None:
+            e = ', %i more expected' % self.expected
+        else:
+            e = ''
+        return '%s(%i bytes read%s)' % (self.__class__.__name__,
+                                        len(self.partial), e)
+    def __str__(self):
+        return repr(self)
+
+class ImproperConnectionState(HTTPException):
+    pass
+
+class CannotSendRequest(ImproperConnectionState):
+    pass
+
+class CannotSendHeader(ImproperConnectionState):
+    pass
+
+class ResponseNotReady(ImproperConnectionState):
+    pass
+
+class BadStatusLine(HTTPException):
+    def __init__(self, line):
+        if not line:
+            line = repr(line)
+        self.args = line,
+        self.line = line
+
+class LineTooLong(HTTPException):
+    def __init__(self, line_type):
+        HTTPException.__init__(self, "got more than %d bytes when reading %s"
+                                     % (_MAXLINE, line_type))
+
+class RemoteDisconnected(ConnectionResetError, BadStatusLine):
+    def __init__(self, *pos, **kw):
+        BadStatusLine.__init__(self, "")
+        ConnectionResetError.__init__(self, *pos, **kw)
+
+# for backwards compatibility
+error = HTTPException

+ 2152 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/green/http/cookiejar.py

@@ -0,0 +1,2152 @@
+# This is part of Python source code with Eventlet-specific modifications.
+#
+# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+# 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights
+# Reserved
+#
+# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
+# --------------------------------------------
+#
+# 1. This LICENSE AGREEMENT is between the Python Software Foundation
+# ("PSF"), and the Individual or Organization ("Licensee") accessing and
+# otherwise using this software ("Python") in source or binary form and
+# its associated documentation.
+#
+# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
+# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
+# analyze, test, perform and/or display publicly, prepare derivative works,
+# distribute, and otherwise use Python alone or in any derivative version,
+# provided, however, that PSF's License Agreement and PSF's notice of copyright,
+# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+# 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights
+# Reserved" are retained in Python alone or in any derivative version prepared by
+# Licensee.
+#
+# 3. In the event Licensee prepares a derivative work that is based on
+# or incorporates Python or any part thereof, and wants to make
+# the derivative work available to others as provided herein, then
+# Licensee hereby agrees to include in any such work a brief summary of
+# the changes made to Python.
+#
+# 4. PSF is making Python available to Licensee on an "AS IS"
+# basis.  PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+# IMPLIED.  BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
+# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
+# INFRINGE ANY THIRD PARTY RIGHTS.
+#
+# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
+# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+#
+# 6. This License Agreement will automatically terminate upon a material
+# breach of its terms and conditions.
+#
+# 7. Nothing in this License Agreement shall be deemed to create any
+# relationship of agency, partnership, or joint venture between PSF and
+# Licensee.  This License Agreement does not grant permission to use PSF
+# trademarks or trade name in a trademark sense to endorse or promote
+# products or services of Licensee, or any third party.
+#
+# 8. By copying, installing or otherwise using Python, Licensee
+# agrees to be bound by the terms and conditions of this License
+# Agreement.
+r"""HTTP cookie handling for web clients.
+
+This module has (now fairly distant) origins in Gisle Aas' Perl module
+HTTP::Cookies, from the libwww-perl library.
+
+Docstrings, comments and debug strings in this code refer to the
+attributes of the HTTP cookie system as cookie-attributes, to distinguish
+them clearly from Python attributes.
+
+Class diagram (note that BSDDBCookieJar and the MSIE* classes are not
+distributed with the Python standard library, but are available from
+http://wwwsearch.sf.net/):
+
+                        CookieJar____
+                        /     \      \
+            FileCookieJar      \      \
+             /    |   \         \      \
+ MozillaCookieJar | LWPCookieJar \      \
+                  |               |      \
+                  |   ---MSIEBase |       \
+                  |  /      |     |        \
+                  | /   MSIEDBCookieJar BSDDBCookieJar
+                  |/
+               MSIECookieJar
+
+"""
+
+__all__ = ['Cookie', 'CookieJar', 'CookiePolicy', 'DefaultCookiePolicy',
+           'FileCookieJar', 'LWPCookieJar', 'LoadError', 'MozillaCookieJar']
+
+import copy
+import datetime
+import re
+import time
+# Eventlet change: urllib.request used to be imported here but it's not used,
+# removed for clarity
+import urllib.parse
+from calendar import timegm
+
+from eventlet.green import threading as _threading, time
+from eventlet.green.http import client as http_client  # only for the default HTTP port
+
+debug = False   # set to True to enable debugging via the logging module
+logger = None
+
+def _debug(*args):
+    if not debug:
+        return
+    global logger
+    if not logger:
+        import logging
+        logger = logging.getLogger("http.cookiejar")
+    return logger.debug(*args)
+
+
+DEFAULT_HTTP_PORT = str(http_client.HTTP_PORT)
+MISSING_FILENAME_TEXT = ("a filename was not supplied (nor was the CookieJar "
+                         "instance initialised with one)")
+
+def _warn_unhandled_exception():
+    # There are a few catch-all except: statements in this module, for
+    # catching input that's bad in unexpected ways.  Warn if any
+    # exceptions are caught there.
+    import io, warnings, traceback
+    f = io.StringIO()
+    traceback.print_exc(None, f)
+    msg = f.getvalue()
+    warnings.warn("http.cookiejar bug!\n%s" % msg, stacklevel=2)
+
+
+# Date/time conversion
+# -----------------------------------------------------------------------------
+
+EPOCH_YEAR = 1970
+def _timegm(tt):
+    year, month, mday, hour, min, sec = tt[:6]
+    if ((year >= EPOCH_YEAR) and (1 <= month <= 12) and (1 <= mday <= 31) and
+        (0 <= hour <= 24) and (0 <= min <= 59) and (0 <= sec <= 61)):
+        return timegm(tt)
+    else:
+        return None
+
+DAYS = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
+MONTHS = ["Jan", "Feb", "Mar", "Apr", "May", "Jun",
+          "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
+MONTHS_LOWER = []
+for month in MONTHS: MONTHS_LOWER.append(month.lower())
+
+def time2isoz(t=None):
+    """Return a string representing time in seconds since epoch, t.
+
+    If the function is called without an argument, it will use the current
+    time.
+
+    The format of the returned string is like "YYYY-MM-DD hh:mm:ssZ",
+    representing Universal Time (UTC, aka GMT).  An example of this format is:
+
+    1994-11-24 08:49:37Z
+
+    """
+    if t is None:
+        dt = datetime.datetime.utcnow()
+    else:
+        dt = datetime.datetime.utcfromtimestamp(t)
+    return "%04d-%02d-%02d %02d:%02d:%02dZ" % (
+        dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
+
+def time2netscape(t=None):
+    """Return a string representing time in seconds since epoch, t.
+
+    If the function is called without an argument, it will use the current
+    time.
+
+    The format of the returned string is like this:
+
+    Wed, DD-Mon-YYYY HH:MM:SS GMT
+
+    """
+    if t is None:
+        dt = datetime.datetime.utcnow()
+    else:
+        dt = datetime.datetime.utcfromtimestamp(t)
+    return "%s %02d-%s-%04d %02d:%02d:%02d GMT" % (
+        DAYS[dt.weekday()], dt.day, MONTHS[dt.month-1],
+        dt.year, dt.hour, dt.minute, dt.second)
+
+
+UTC_ZONES = {"GMT": None, "UTC": None, "UT": None, "Z": None}
+
+TIMEZONE_RE = re.compile(r"^([-+])?(\d\d?):?(\d\d)?$", re.ASCII)
+def offset_from_tz_string(tz):
+    offset = None
+    if tz in UTC_ZONES:
+        offset = 0
+    else:
+        m = TIMEZONE_RE.search(tz)
+        if m:
+            offset = 3600 * int(m.group(2))
+            if m.group(3):
+                offset = offset + 60 * int(m.group(3))
+            if m.group(1) == '-':
+                offset = -offset
+    return offset
+
+def _str2time(day, mon, yr, hr, min, sec, tz):
+    yr = int(yr)
+    if yr > datetime.MAXYEAR:
+        return None
+
+    # translate month name to number
+    # month numbers start with 1 (January)
+    try:
+        mon = MONTHS_LOWER.index(mon.lower())+1
+    except ValueError:
+        # maybe it's already a number
+        try:
+            imon = int(mon)
+        except ValueError:
+            return None
+        if 1 <= imon <= 12:
+            mon = imon
+        else:
+            return None
+
+    # make sure clock elements are defined
+    if hr is None: hr = 0
+    if min is None: min = 0
+    if sec is None: sec = 0
+
+    day = int(day)
+    hr = int(hr)
+    min = int(min)
+    sec = int(sec)
+
+    if yr < 1000:
+        # find "obvious" year
+        cur_yr = time.localtime(time.time())[0]
+        m = cur_yr % 100
+        tmp = yr
+        yr = yr + cur_yr - m
+        m = m - tmp
+        if abs(m) > 50:
+            if m > 0: yr = yr + 100
+            else: yr = yr - 100
+
+    # convert UTC time tuple to seconds since epoch (not timezone-adjusted)
+    t = _timegm((yr, mon, day, hr, min, sec, tz))
+
+    if t is not None:
+        # adjust time using timezone string, to get absolute time since epoch
+        if tz is None:
+            tz = "UTC"
+        tz = tz.upper()
+        offset = offset_from_tz_string(tz)
+        if offset is None:
+            return None
+        t = t - offset
+
+    return t
+
+STRICT_DATE_RE = re.compile(
+    r"^[SMTWF][a-z][a-z], (\d\d) ([JFMASOND][a-z][a-z]) "
+    "(\d\d\d\d) (\d\d):(\d\d):(\d\d) GMT$", re.ASCII)
+WEEKDAY_RE = re.compile(
+    r"^(?:Sun|Mon|Tue|Wed|Thu|Fri|Sat)[a-z]*,?\s*", re.I | re.ASCII)
+LOOSE_HTTP_DATE_RE = re.compile(
+    r"""^
+    (\d\d?)            # day
+       (?:\s+|[-\/])
+    (\w+)              # month
+        (?:\s+|[-\/])
+    (\d+)              # year
+    (?:
+          (?:\s+|:)    # separator before clock
+       (\d\d?):(\d\d)  # hour:min
+       (?::(\d\d))?    # optional seconds
+    )?                 # optional clock
+       \s*
+    ([-+]?\d{2,4}|(?![APap][Mm]\b)[A-Za-z]+)? # timezone
+       \s*
+    (?:\(\w+\))?       # ASCII representation of timezone in parens.
+       \s*$""", re.X | re.ASCII)
+def http2time(text):
+    """Returns time in seconds since epoch of time represented by a string.
+
+    Return value is an integer.
+
+    None is returned if the format of str is unrecognized, the time is outside
+    the representable range, or the timezone string is not recognized.  If the
+    string contains no timezone, UTC is assumed.
+
+    The timezone in the string may be numerical (like "-0800" or "+0100") or a
+    string timezone (like "UTC", "GMT", "BST" or "EST").  Currently, only the
+    timezone strings equivalent to UTC (zero offset) are known to the function.
+
+    The function loosely parses the following formats:
+
+    Wed, 09 Feb 1994 22:23:32 GMT       -- HTTP format
+    Tuesday, 08-Feb-94 14:15:29 GMT     -- old rfc850 HTTP format
+    Tuesday, 08-Feb-1994 14:15:29 GMT   -- broken rfc850 HTTP format
+    09 Feb 1994 22:23:32 GMT            -- HTTP format (no weekday)
+    08-Feb-94 14:15:29 GMT              -- rfc850 format (no weekday)
+    08-Feb-1994 14:15:29 GMT            -- broken rfc850 format (no weekday)
+
+    The parser ignores leading and trailing whitespace.  The time may be
+    absent.
+
+    If the year is given with only 2 digits, the function will select the
+    century that makes the year closest to the current date.
+
+    """
+    # fast exit for strictly conforming string
+    m = STRICT_DATE_RE.search(text)
+    if m:
+        g = m.groups()
+        mon = MONTHS_LOWER.index(g[1].lower()) + 1
+        tt = (int(g[2]), mon, int(g[0]),
+              int(g[3]), int(g[4]), float(g[5]))
+        return _timegm(tt)
+
+    # No, we need some messy parsing...
+
+    # clean up
+    text = text.lstrip()
+    text = WEEKDAY_RE.sub("", text, 1)  # Useless weekday
+
+    # tz is time zone specifier string
+    day, mon, yr, hr, min, sec, tz = [None]*7
+
+    # loose regexp parse
+    m = LOOSE_HTTP_DATE_RE.search(text)
+    if m is not None:
+        day, mon, yr, hr, min, sec, tz = m.groups()
+    else:
+        return None  # bad format
+
+    return _str2time(day, mon, yr, hr, min, sec, tz)
+
+ISO_DATE_RE = re.compile(
+    """^
+    (\d{4})              # year
+       [-\/]?
+    (\d\d?)              # numerical month
+       [-\/]?
+    (\d\d?)              # day
+   (?:
+         (?:\s+|[-:Tt])  # separator before clock
+      (\d\d?):?(\d\d)    # hour:min
+      (?::?(\d\d(?:\.\d*)?))?  # optional seconds (and fractional)
+   )?                    # optional clock
+      \s*
+   ([-+]?\d\d?:?(:?\d\d)?
+    |Z|z)?               # timezone  (Z is "zero meridian", i.e. GMT)
+      \s*$""", re.X | re. ASCII)
+def iso2time(text):
+    """
+    As for http2time, but parses the ISO 8601 formats:
+
+    1994-02-03 14:15:29 -0100    -- ISO 8601 format
+    1994-02-03 14:15:29          -- zone is optional
+    1994-02-03                   -- only date
+    1994-02-03T14:15:29          -- Use T as separator
+    19940203T141529Z             -- ISO 8601 compact format
+    19940203                     -- only date
+
+    """
+    # clean up
+    text = text.lstrip()
+
+    # tz is time zone specifier string
+    day, mon, yr, hr, min, sec, tz = [None]*7
+
+    # loose regexp parse
+    m = ISO_DATE_RE.search(text)
+    if m is not None:
+        # XXX there's an extra bit of the timezone I'm ignoring here: is
+        #   this the right thing to do?
+        yr, mon, day, hr, min, sec, tz, _ = m.groups()
+    else:
+        return None  # bad format
+
+    return _str2time(day, mon, yr, hr, min, sec, tz)
+
+
+# Header parsing
+# -----------------------------------------------------------------------------
+
+def unmatched(match):
+    """Return unmatched part of re.Match object."""
+    start, end = match.span(0)
+    return match.string[:start]+match.string[end:]
+
+HEADER_TOKEN_RE =        re.compile(r"^\s*([^=\s;,]+)")
+HEADER_QUOTED_VALUE_RE = re.compile(r"^\s*=\s*\"([^\"\\]*(?:\\.[^\"\\]*)*)\"")
+HEADER_VALUE_RE =        re.compile(r"^\s*=\s*([^\s;,]*)")
+HEADER_ESCAPE_RE = re.compile(r"\\(.)")
+def split_header_words(header_values):
+    r"""Parse header values into a list of lists containing key,value pairs.
+
+    The function knows how to deal with ",", ";" and "=" as well as quoted
+    values after "=".  A list of space separated tokens are parsed as if they
+    were separated by ";".
+
+    If the header_values passed as argument contains multiple values, then they
+    are treated as if they were a single value separated by comma ",".
+
+    This means that this function is useful for parsing header fields that
+    follow this syntax (BNF as from the HTTP/1.1 specification, but we relax
+    the requirement for tokens).
+
+      headers           = #header
+      header            = (token | parameter) *( [";"] (token | parameter))
+
+      token             = 1*<any CHAR except CTLs or separators>
+      separators        = "(" | ")" | "<" | ">" | "@"
+                        | "," | ";" | ":" | "\" | <">
+                        | "/" | "[" | "]" | "?" | "="
+                        | "{" | "}" | SP | HT
+
+      quoted-string     = ( <"> *(qdtext | quoted-pair ) <"> )
+      qdtext            = <any TEXT except <">>
+      quoted-pair       = "\" CHAR
+
+      parameter         = attribute "=" value
+      attribute         = token
+      value             = token | quoted-string
+
+    Each header is represented by a list of key/value pairs.  The value for a
+    simple token (not part of a parameter) is None.  Syntactically incorrect
+    headers will not necessarily be parsed as you would want.
+
+    This is easier to describe with some examples:
+
+    >>> split_header_words(['foo="bar"; port="80,81"; discard, bar=baz'])
+    [[('foo', 'bar'), ('port', '80,81'), ('discard', None)], [('bar', 'baz')]]
+    >>> split_header_words(['text/html; charset="iso-8859-1"'])
+    [[('text/html', None), ('charset', 'iso-8859-1')]]
+    >>> split_header_words([r'Basic realm="\"foo\bar\""'])
+    [[('Basic', None), ('realm', '"foobar"')]]
+
+    """
+    assert not isinstance(header_values, str)
+    result = []
+    for text in header_values:
+        orig_text = text
+        pairs = []
+        while text:
+            m = HEADER_TOKEN_RE.search(text)
+            if m:
+                text = unmatched(m)
+                name = m.group(1)
+                m = HEADER_QUOTED_VALUE_RE.search(text)
+                if m:  # quoted value
+                    text = unmatched(m)
+                    value = m.group(1)
+                    value = HEADER_ESCAPE_RE.sub(r"\1", value)
+                else:
+                    m = HEADER_VALUE_RE.search(text)
+                    if m:  # unquoted value
+                        text = unmatched(m)
+                        value = m.group(1)
+                        value = value.rstrip()
+                    else:
+                        # no value, a lone token
+                        value = None
+                pairs.append((name, value))
+            elif text.lstrip().startswith(","):
+                # concatenated headers, as per RFC 2616 section 4.2
+                text = text.lstrip()[1:]
+                if pairs: result.append(pairs)
+                pairs = []
+            else:
+                # skip junk
+                non_junk, nr_junk_chars = re.subn("^[=\s;]*", "", text)
+                assert nr_junk_chars > 0, (
+                    "split_header_words bug: '%s', '%s', %s" %
+                    (orig_text, text, pairs))
+                text = non_junk
+        if pairs: result.append(pairs)
+    return result
+
+HEADER_JOIN_ESCAPE_RE = re.compile(r"([\"\\])")
+def join_header_words(lists):
+    """Do the inverse (almost) of the conversion done by split_header_words.
+
+    Takes a list of lists of (key, value) pairs and produces a single header
+    value.  Attribute values are quoted if needed.
+
+    >>> join_header_words([[("text/plain", None), ("charset", "iso-8859-1")]])
+    'text/plain; charset="iso-8859-1"'
+    >>> join_header_words([[("text/plain", None)], [("charset", "iso-8859-1")]])
+    'text/plain, charset="iso-8859-1"'
+
+    """
+    headers = []
+    for pairs in lists:
+        attr = []
+        for k, v in pairs:
+            if v is not None:
+                if not re.search(r"^\w+$", v):
+                    v = HEADER_JOIN_ESCAPE_RE.sub(r"\\\1", v)  # escape " and \
+                    v = '"%s"' % v
+                k = "%s=%s" % (k, v)
+            attr.append(k)
+        if attr: headers.append("; ".join(attr))
+    return ", ".join(headers)
+
+def strip_quotes(text):
+    if text.startswith('"'):
+        text = text[1:]
+    if text.endswith('"'):
+        text = text[:-1]
+    return text
+
+def parse_ns_headers(ns_headers):
+    """Ad-hoc parser for Netscape protocol cookie-attributes.
+
+    The old Netscape cookie format for Set-Cookie can for instance contain
+    an unquoted "," in the expires field, so we have to use this ad-hoc
+    parser instead of split_header_words.
+
+    XXX This may not make the best possible effort to parse all the crap
+    that Netscape Cookie headers contain.  Ronald Tschalar's HTTPClient
+    parser is probably better, so could do worse than following that if
+    this ever gives any trouble.
+
+    Currently, this is also used for parsing RFC 2109 cookies.
+
+    """
+    known_attrs = ("expires", "domain", "path", "secure",
+                   # RFC 2109 attrs (may turn up in Netscape cookies, too)
+                   "version", "port", "max-age")
+
+    result = []
+    for ns_header in ns_headers:
+        pairs = []
+        version_set = False
+
+        # XXX: The following does not strictly adhere to RFCs in that empty
+        # names and values are legal (the former will only appear once and will
+        # be overwritten if multiple occurrences are present). This is
+        # mostly to deal with backwards compatibility.
+        for ii, param in enumerate(ns_header.split(';')):
+            param = param.strip()
+
+            key, sep, val = param.partition('=')
+            key = key.strip()
+
+            if not key:
+                if ii == 0:
+                    break
+                else:
+                    continue
+
+            # allow for a distinction between present and empty and missing
+            # altogether
+            val = val.strip() if sep else None
+
+            if ii != 0:
+                lc = key.lower()
+                if lc in known_attrs:
+                    key = lc
+
+                if key == "version":
+                    # This is an RFC 2109 cookie.
+                    if val is not None:
+                        val = strip_quotes(val)
+                    version_set = True
+                elif key == "expires":
+                    # convert expires date to seconds since epoch
+                    if val is not None:
+                        val = http2time(strip_quotes(val))  # None if invalid
+            pairs.append((key, val))
+
+        if pairs:
+            if not version_set:
+                pairs.append(("version", "0"))
+            result.append(pairs)
+
+    return result
+
+
+IPV4_RE = re.compile(r"\.\d+$", re.ASCII)
+def is_HDN(text):
+    """Return True if text is a host domain name."""
+    # XXX
+    # This may well be wrong.  Which RFC is HDN defined in, if any (for
+    #  the purposes of RFC 2965)?
+    # For the current implementation, what about IPv6?  Remember to look
+    #  at other uses of IPV4_RE also, if change this.
+    if IPV4_RE.search(text):
+        return False
+    if text == "":
+        return False
+    if text[0] == "." or text[-1] == ".":
+        return False
+    return True
+
+def domain_match(A, B):
+    """Return True if domain A domain-matches domain B, according to RFC 2965.
+
+    A and B may be host domain names or IP addresses.
+
+    RFC 2965, section 1:
+
+    Host names can be specified either as an IP address or a HDN string.
+    Sometimes we compare one host name with another.  (Such comparisons SHALL
+    be case-insensitive.)  Host A's name domain-matches host B's if
+
+         *  their host name strings string-compare equal; or
+
+         * A is a HDN string and has the form NB, where N is a non-empty
+            name string, B has the form .B', and B' is a HDN string.  (So,
+            x.y.com domain-matches .Y.com but not Y.com.)
+
+    Note that domain-match is not a commutative operation: a.b.c.com
+    domain-matches .c.com, but not the reverse.
+
+    """
+    # Note that, if A or B are IP addresses, the only relevant part of the
+    # definition of the domain-match algorithm is the direct string-compare.
+    A = A.lower()
+    B = B.lower()
+    if A == B:
+        return True
+    if not is_HDN(A):
+        return False
+    i = A.rfind(B)
+    if i == -1 or i == 0:
+        # A does not have form NB, or N is the empty string
+        return False
+    if not B.startswith("."):
+        return False
+    if not is_HDN(B[1:]):
+        return False
+    return True
+
+def liberal_is_HDN(text):
+    """Return True if text is a sort-of-like a host domain name.
+
+    For accepting/blocking domains.
+
+    """
+    if IPV4_RE.search(text):
+        return False
+    return True
+
+def user_domain_match(A, B):
+    """For blocking/accepting domains.
+
+    A and B may be host domain names or IP addresses.
+
+    """
+    A = A.lower()
+    B = B.lower()
+    if not (liberal_is_HDN(A) and liberal_is_HDN(B)):
+        if A == B:
+            # equal IP addresses
+            return True
+        return False
+    initial_dot = B.startswith(".")
+    if initial_dot and A.endswith(B):
+        return True
+    if not initial_dot and A == B:
+        return True
+    return False
+
+cut_port_re = re.compile(r":\d+$", re.ASCII)
+def request_host(request):
+    """Return request-host, as defined by RFC 2965.
+
+    Variation from RFC: returned value is lowercased, for convenient
+    comparison.
+
+    """
+    url = request.get_full_url()
+    host = urllib.parse.urlparse(url)[1]
+    if host == "":
+        host = request.get_header("Host", "")
+
+    # remove port, if present
+    host = cut_port_re.sub("", host, 1)
+    return host.lower()
+
+def eff_request_host(request):
+    """Return a tuple (request-host, effective request-host name).
+
+    As defined by RFC 2965, except both are lowercased.
+
+    """
+    erhn = req_host = request_host(request)
+    if req_host.find(".") == -1 and not IPV4_RE.search(req_host):
+        erhn = req_host + ".local"
+    return req_host, erhn
+
+def request_path(request):
+    """Path component of request-URI, as defined by RFC 2965."""
+    url = request.get_full_url()
+    parts = urllib.parse.urlsplit(url)
+    path = escape_path(parts.path)
+    if not path.startswith("/"):
+        # fix bad RFC 2396 absoluteURI
+        path = "/" + path
+    return path
+
+def request_port(request):
+    host = request.host
+    i = host.find(':')
+    if i >= 0:
+        port = host[i+1:]
+        try:
+            int(port)
+        except ValueError:
+            _debug("nonnumeric port: '%s'", port)
+            return None
+    else:
+        port = DEFAULT_HTTP_PORT
+    return port
+
+# Characters in addition to A-Z, a-z, 0-9, '_', '.', and '-' that don't
+# need to be escaped to form a valid HTTP URL (RFCs 2396 and 1738).
+HTTP_PATH_SAFE = "%/;:@&=+$,!~*'()"
+ESCAPED_CHAR_RE = re.compile(r"%([0-9a-fA-F][0-9a-fA-F])")
+def uppercase_escaped_char(match):
+    return "%%%s" % match.group(1).upper()
+def escape_path(path):
+    """Escape any invalid characters in HTTP URL, and uppercase all escapes."""
+    # There's no knowing what character encoding was used to create URLs
+    # containing %-escapes, but since we have to pick one to escape invalid
+    # path characters, we pick UTF-8, as recommended in the HTML 4.0
+    # specification:
+    # http://www.w3.org/TR/REC-html40/appendix/notes.html#h-B.2.1
+    # And here, kind of: draft-fielding-uri-rfc2396bis-03
+    # (And in draft IRI specification: draft-duerst-iri-05)
+    # (And here, for new URI schemes: RFC 2718)
+    path = urllib.parse.quote(path, HTTP_PATH_SAFE)
+    path = ESCAPED_CHAR_RE.sub(uppercase_escaped_char, path)
+    return path
+
+def reach(h):
+    """Return reach of host h, as defined by RFC 2965, section 1.
+
+    The reach R of a host name H is defined as follows:
+
+       *  If
+
+          -  H is the host domain name of a host; and,
+
+          -  H has the form A.B; and
+
+          -  A has no embedded (that is, interior) dots; and
+
+          -  B has at least one embedded dot, or B is the string "local".
+             then the reach of H is .B.
+
+       *  Otherwise, the reach of H is H.
+
+    >>> reach("www.acme.com")
+    '.acme.com'
+    >>> reach("acme.com")
+    'acme.com'
+    >>> reach("acme.local")
+    '.local'
+
+    """
+    i = h.find(".")
+    if i >= 0:
+        #a = h[:i]  # this line is only here to show what a is
+        b = h[i+1:]
+        i = b.find(".")
+        if is_HDN(h) and (i >= 0 or b == "local"):
+            return "."+b
+    return h
+
+def is_third_party(request):
+    """
+
+    RFC 2965, section 3.3.6:
+
+        An unverifiable transaction is to a third-party host if its request-
+        host U does not domain-match the reach R of the request-host O in the
+        origin transaction.
+
+    """
+    req_host = request_host(request)
+    if not domain_match(req_host, reach(request.origin_req_host)):
+        return True
+    else:
+        return False
+
+
+class Cookie:
+    """HTTP Cookie.
+
+    This class represents both Netscape and RFC 2965 cookies.
+
+    This is deliberately a very simple class.  It just holds attributes.  It's
+    possible to construct Cookie instances that don't comply with the cookie
+    standards.  CookieJar.make_cookies is the factory function for Cookie
+    objects -- it deals with cookie parsing, supplying defaults, and
+    normalising to the representation used in this class.  CookiePolicy is
+    responsible for checking them to see whether they should be accepted from
+    and returned to the server.
+
+    Note that the port may be present in the headers, but unspecified ("Port"
+    rather than"Port=80", for example); if this is the case, port is None.
+
+    """
+
+    def __init__(self, version, name, value,
+                 port, port_specified,
+                 domain, domain_specified, domain_initial_dot,
+                 path, path_specified,
+                 secure,
+                 expires,
+                 discard,
+                 comment,
+                 comment_url,
+                 rest,
+                 rfc2109=False,
+                 ):
+
+        if version is not None: version = int(version)
+        if expires is not None: expires = int(float(expires))
+        if port is None and port_specified is True:
+            raise ValueError("if port is None, port_specified must be false")
+
+        self.version = version
+        self.name = name
+        self.value = value
+        self.port = port
+        self.port_specified = port_specified
+        # normalise case, as per RFC 2965 section 3.3.3
+        self.domain = domain.lower()
+        self.domain_specified = domain_specified
+        # Sigh.  We need to know whether the domain given in the
+        # cookie-attribute had an initial dot, in order to follow RFC 2965
+        # (as clarified in draft errata).  Needed for the returned $Domain
+        # value.
+        self.domain_initial_dot = domain_initial_dot
+        self.path = path
+        self.path_specified = path_specified
+        self.secure = secure
+        self.expires = expires
+        self.discard = discard
+        self.comment = comment
+        self.comment_url = comment_url
+        self.rfc2109 = rfc2109
+
+        self._rest = copy.copy(rest)
+
+    def has_nonstandard_attr(self, name):
+        return name in self._rest
+    def get_nonstandard_attr(self, name, default=None):
+        return self._rest.get(name, default)
+    def set_nonstandard_attr(self, name, value):
+        self._rest[name] = value
+
+    def is_expired(self, now=None):
+        if now is None: now = time.time()
+        if (self.expires is not None) and (self.expires <= now):
+            return True
+        return False
+
+    def __str__(self):
+        if self.port is None: p = ""
+        else: p = ":"+self.port
+        limit = self.domain + p + self.path
+        if self.value is not None:
+            namevalue = "%s=%s" % (self.name, self.value)
+        else:
+            namevalue = self.name
+        return "<Cookie %s for %s>" % (namevalue, limit)
+
+    def __repr__(self):
+        args = []
+        for name in ("version", "name", "value",
+                     "port", "port_specified",
+                     "domain", "domain_specified", "domain_initial_dot",
+                     "path", "path_specified",
+                     "secure", "expires", "discard", "comment", "comment_url",
+                     ):
+            attr = getattr(self, name)
+            args.append("%s=%s" % (name, repr(attr)))
+        args.append("rest=%s" % repr(self._rest))
+        args.append("rfc2109=%s" % repr(self.rfc2109))
+        return "%s(%s)" % (self.__class__.__name__, ", ".join(args))
+
+
+class CookiePolicy:
+    """Defines which cookies get accepted from and returned to server.
+
+    May also modify cookies, though this is probably a bad idea.
+
+    The subclass DefaultCookiePolicy defines the standard rules for Netscape
+    and RFC 2965 cookies -- override that if you want a customised policy.
+
+    """
+    def set_ok(self, cookie, request):
+        """Return true if (and only if) cookie should be accepted from server.
+
+        Currently, pre-expired cookies never get this far -- the CookieJar
+        class deletes such cookies itself.
+
+        """
+        raise NotImplementedError()
+
+    def return_ok(self, cookie, request):
+        """Return true if (and only if) cookie should be returned to server."""
+        raise NotImplementedError()
+
+    def domain_return_ok(self, domain, request):
+        """Return false if cookies should not be returned, given cookie domain.
+        """
+        return True
+
+    def path_return_ok(self, path, request):
+        """Return false if cookies should not be returned, given cookie path.
+        """
+        return True
+
+
+class DefaultCookiePolicy(CookiePolicy):
+    """Implements the standard rules for accepting and returning cookies."""
+
+    DomainStrictNoDots = 1
+    DomainStrictNonDomain = 2
+    DomainRFC2965Match = 4
+
+    DomainLiberal = 0
+    DomainStrict = DomainStrictNoDots|DomainStrictNonDomain
+
+    def __init__(self,
+                 blocked_domains=None, allowed_domains=None,
+                 netscape=True, rfc2965=False,
+                 rfc2109_as_netscape=None,
+                 hide_cookie2=False,
+                 strict_domain=False,
+                 strict_rfc2965_unverifiable=True,
+                 strict_ns_unverifiable=False,
+                 strict_ns_domain=DomainLiberal,
+                 strict_ns_set_initial_dollar=False,
+                 strict_ns_set_path=False,
+                 ):
+        """Constructor arguments should be passed as keyword arguments only."""
+        self.netscape = netscape
+        self.rfc2965 = rfc2965
+        self.rfc2109_as_netscape = rfc2109_as_netscape
+        self.hide_cookie2 = hide_cookie2
+        self.strict_domain = strict_domain
+        self.strict_rfc2965_unverifiable = strict_rfc2965_unverifiable
+        self.strict_ns_unverifiable = strict_ns_unverifiable
+        self.strict_ns_domain = strict_ns_domain
+        self.strict_ns_set_initial_dollar = strict_ns_set_initial_dollar
+        self.strict_ns_set_path = strict_ns_set_path
+
+        if blocked_domains is not None:
+            self._blocked_domains = tuple(blocked_domains)
+        else:
+            self._blocked_domains = ()
+
+        if allowed_domains is not None:
+            allowed_domains = tuple(allowed_domains)
+        self._allowed_domains = allowed_domains
+
+    def blocked_domains(self):
+        """Return the sequence of blocked domains (as a tuple)."""
+        return self._blocked_domains
+    def set_blocked_domains(self, blocked_domains):
+        """Set the sequence of blocked domains."""
+        self._blocked_domains = tuple(blocked_domains)
+
+    def is_blocked(self, domain):
+        for blocked_domain in self._blocked_domains:
+            if user_domain_match(domain, blocked_domain):
+                return True
+        return False
+
+    def allowed_domains(self):
+        """Return None, or the sequence of allowed domains (as a tuple)."""
+        return self._allowed_domains
+    def set_allowed_domains(self, allowed_domains):
+        """Set the sequence of allowed domains, or None."""
+        if allowed_domains is not None:
+            allowed_domains = tuple(allowed_domains)
+        self._allowed_domains = allowed_domains
+
+    def is_not_allowed(self, domain):
+        if self._allowed_domains is None:
+            return False
+        for allowed_domain in self._allowed_domains:
+            if user_domain_match(domain, allowed_domain):
+                return False
+        return True
+
+    def set_ok(self, cookie, request):
+        """
+        If you override .set_ok(), be sure to call this method.  If it returns
+        false, so should your subclass (assuming your subclass wants to be more
+        strict about which cookies to accept).
+
+        """
+        _debug(" - checking cookie %s=%s", cookie.name, cookie.value)
+
+        assert cookie.name is not None
+
+        for n in "version", "verifiability", "name", "path", "domain", "port":
+            fn_name = "set_ok_"+n
+            fn = getattr(self, fn_name)
+            if not fn(cookie, request):
+                return False
+
+        return True
+
+    def set_ok_version(self, cookie, request):
+        if cookie.version is None:
+            # Version is always set to 0 by parse_ns_headers if it's a Netscape
+            # cookie, so this must be an invalid RFC 2965 cookie.
+            _debug("   Set-Cookie2 without version attribute (%s=%s)",
+                   cookie.name, cookie.value)
+            return False
+        if cookie.version > 0 and not self.rfc2965:
+            _debug("   RFC 2965 cookies are switched off")
+            return False
+        elif cookie.version == 0 and not self.netscape:
+            _debug("   Netscape cookies are switched off")
+            return False
+        return True
+
+    def set_ok_verifiability(self, cookie, request):
+        if request.unverifiable and is_third_party(request):
+            if cookie.version > 0 and self.strict_rfc2965_unverifiable:
+                _debug("   third-party RFC 2965 cookie during "
+                             "unverifiable transaction")
+                return False
+            elif cookie.version == 0 and self.strict_ns_unverifiable:
+                _debug("   third-party Netscape cookie during "
+                             "unverifiable transaction")
+                return False
+        return True
+
+    def set_ok_name(self, cookie, request):
+        # Try and stop servers setting V0 cookies designed to hack other
+        # servers that know both V0 and V1 protocols.
+        if (cookie.version == 0 and self.strict_ns_set_initial_dollar and
+            cookie.name.startswith("$")):
+            _debug("   illegal name (starts with '$'): '%s'", cookie.name)
+            return False
+        return True
+
+    def set_ok_path(self, cookie, request):
+        if cookie.path_specified:
+            req_path = request_path(request)
+            if ((cookie.version > 0 or
+                 (cookie.version == 0 and self.strict_ns_set_path)) and
+                not req_path.startswith(cookie.path)):
+                _debug("   path attribute %s is not a prefix of request "
+                       "path %s", cookie.path, req_path)
+                return False
+        return True
+
+    def set_ok_domain(self, cookie, request):
+        if self.is_blocked(cookie.domain):
+            _debug("   domain %s is in user block-list", cookie.domain)
+            return False
+        if self.is_not_allowed(cookie.domain):
+            _debug("   domain %s is not in user allow-list", cookie.domain)
+            return False
+        if cookie.domain_specified:
+            req_host, erhn = eff_request_host(request)
+            domain = cookie.domain
+            if self.strict_domain and (domain.count(".") >= 2):
+                # XXX This should probably be compared with the Konqueror
+                # (kcookiejar.cpp) and Mozilla implementations, but it's a
+                # losing battle.
+                i = domain.rfind(".")
+                j = domain.rfind(".", 0, i)
+                if j == 0:  # domain like .foo.bar
+                    tld = domain[i+1:]
+                    sld = domain[j+1:i]
+                    if sld.lower() in ("co", "ac", "com", "edu", "org", "net",
+                       "gov", "mil", "int", "aero", "biz", "cat", "coop",
+                       "info", "jobs", "mobi", "museum", "name", "pro",
+                       "travel", "eu") and len(tld) == 2:
+                        # domain like .co.uk
+                        _debug("   country-code second level domain %s", domain)
+                        return False
+            if domain.startswith("."):
+                undotted_domain = domain[1:]
+            else:
+                undotted_domain = domain
+            embedded_dots = (undotted_domain.find(".") >= 0)
+            if not embedded_dots and domain != ".local":
+                _debug("   non-local domain %s contains no embedded dot",
+                       domain)
+                return False
+            if cookie.version == 0:
+                if (not erhn.endswith(domain) and
+                    (not erhn.startswith(".") and
+                     not ("."+erhn).endswith(domain))):
+                    _debug("   effective request-host %s (even with added "
+                           "initial dot) does not end with %s",
+                           erhn, domain)
+                    return False
+            if (cookie.version > 0 or
+                (self.strict_ns_domain & self.DomainRFC2965Match)):
+                if not domain_match(erhn, domain):
+                    _debug("   effective request-host %s does not domain-match "
+                           "%s", erhn, domain)
+                    return False
+            if (cookie.version > 0 or
+                (self.strict_ns_domain & self.DomainStrictNoDots)):
+                host_prefix = req_host[:-len(domain)]
+                if (host_prefix.find(".") >= 0 and
+                    not IPV4_RE.search(req_host)):
+                    _debug("   host prefix %s for domain %s contains a dot",
+                           host_prefix, domain)
+                    return False
+        return True
+
+    def set_ok_port(self, cookie, request):
+        if cookie.port_specified:
+            req_port = request_port(request)
+            if req_port is None:
+                req_port = "80"
+            else:
+                req_port = str(req_port)
+            for p in cookie.port.split(","):
+                try:
+                    int(p)
+                except ValueError:
+                    _debug("   bad port %s (not numeric)", p)
+                    return False
+                if p == req_port:
+                    break
+            else:
+                _debug("   request port (%s) not found in %s",
+                       req_port, cookie.port)
+                return False
+        return True
+
+    def return_ok(self, cookie, request):
+        """
+        If you override .return_ok(), be sure to call this method.  If it
+        returns false, so should your subclass (assuming your subclass wants to
+        be more strict about which cookies to return).
+
+        """
+        # Path has already been checked by .path_return_ok(), and domain
+        # blocking done by .domain_return_ok().
+        _debug(" - checking cookie %s=%s", cookie.name, cookie.value)
+
+        for n in "version", "verifiability", "secure", "expires", "port", "domain":
+            fn_name = "return_ok_"+n
+            fn = getattr(self, fn_name)
+            if not fn(cookie, request):
+                return False
+        return True
+
+    def return_ok_version(self, cookie, request):
+        if cookie.version > 0 and not self.rfc2965:
+            _debug("   RFC 2965 cookies are switched off")
+            return False
+        elif cookie.version == 0 and not self.netscape:
+            _debug("   Netscape cookies are switched off")
+            return False
+        return True
+
+    def return_ok_verifiability(self, cookie, request):
+        if request.unverifiable and is_third_party(request):
+            if cookie.version > 0 and self.strict_rfc2965_unverifiable:
+                _debug("   third-party RFC 2965 cookie during unverifiable "
+                       "transaction")
+                return False
+            elif cookie.version == 0 and self.strict_ns_unverifiable:
+                _debug("   third-party Netscape cookie during unverifiable "
+                       "transaction")
+                return False
+        return True
+
+    def return_ok_secure(self, cookie, request):
+        if cookie.secure and request.type != "https":
+            _debug("   secure cookie with non-secure request")
+            return False
+        return True
+
+    def return_ok_expires(self, cookie, request):
+        if cookie.is_expired(self._now):
+            _debug("   cookie expired")
+            return False
+        return True
+
+    def return_ok_port(self, cookie, request):
+        if cookie.port:
+            req_port = request_port(request)
+            if req_port is None:
+                req_port = "80"
+            for p in cookie.port.split(","):
+                if p == req_port:
+                    break
+            else:
+                _debug("   request port %s does not match cookie port %s",
+                       req_port, cookie.port)
+                return False
+        return True
+
+    def return_ok_domain(self, cookie, request):
+        req_host, erhn = eff_request_host(request)
+        domain = cookie.domain
+
+        # strict check of non-domain cookies: Mozilla does this, MSIE5 doesn't
+        if (cookie.version == 0 and
+            (self.strict_ns_domain & self.DomainStrictNonDomain) and
+            not cookie.domain_specified and domain != erhn):
+            _debug("   cookie with unspecified domain does not string-compare "
+                   "equal to request domain")
+            return False
+
+        if cookie.version > 0 and not domain_match(erhn, domain):
+            _debug("   effective request-host name %s does not domain-match "
+                   "RFC 2965 cookie domain %s", erhn, domain)
+            return False
+        if cookie.version == 0 and not ("."+erhn).endswith(domain):
+            _debug("   request-host %s does not match Netscape cookie domain "
+                   "%s", req_host, domain)
+            return False
+        return True
+
+    def domain_return_ok(self, domain, request):
+        # Liberal check of.  This is here as an optimization to avoid
+        # having to load lots of MSIE cookie files unless necessary.
+        req_host, erhn = eff_request_host(request)
+        if not req_host.startswith("."):
+            req_host = "."+req_host
+        if not erhn.startswith("."):
+            erhn = "."+erhn
+        if not (req_host.endswith(domain) or erhn.endswith(domain)):
+            #_debug("   request domain %s does not match cookie domain %s",
+            #       req_host, domain)
+            return False
+
+        if self.is_blocked(domain):
+            _debug("   domain %s is in user block-list", domain)
+            return False
+        if self.is_not_allowed(domain):
+            _debug("   domain %s is not in user allow-list", domain)
+            return False
+
+        return True
+
+    def path_return_ok(self, path, request):
+        _debug("- checking cookie path=%s", path)
+        req_path = request_path(request)
+        if not req_path.startswith(path):
+            _debug("  %s does not path-match %s", req_path, path)
+            return False
+        return True
+
+
+def vals_sorted_by_key(adict):
+    keys = sorted(adict.keys())
+    return map(adict.get, keys)
+
+def deepvalues(mapping):
+    """Iterates over nested mapping, depth-first, in sorted order by key."""
+    values = vals_sorted_by_key(mapping)
+    for obj in values:
+        mapping = False
+        try:
+            obj.items
+        except AttributeError:
+            pass
+        else:
+            mapping = True
+            yield from deepvalues(obj)
+        if not mapping:
+            yield obj
+
+
+# Used as second parameter to dict.get() method, to distinguish absent
+# dict key from one with a None value.
+class Absent: pass
+
+class CookieJar:
+    """Collection of HTTP cookies.
+
+    You may not need to know about this class: try
+    urllib.request.build_opener(HTTPCookieProcessor).open(url).
+    """
+
+    non_word_re = re.compile(r"\W")
+    quote_re = re.compile(r"([\"\\])")
+    strict_domain_re = re.compile(r"\.?[^.]*")
+    domain_re = re.compile(r"[^.]*")
+    dots_re = re.compile(r"^\.+")
+
+    magic_re = re.compile(r"^\#LWP-Cookies-(\d+\.\d+)", re.ASCII)
+
+    def __init__(self, policy=None):
+        if policy is None:
+            policy = DefaultCookiePolicy()
+        self._policy = policy
+
+        self._cookies_lock = _threading.RLock()
+        self._cookies = {}
+
+    def set_policy(self, policy):
+        self._policy = policy
+
+    def _cookies_for_domain(self, domain, request):
+        cookies = []
+        if not self._policy.domain_return_ok(domain, request):
+            return []
+        _debug("Checking %s for cookies to return", domain)
+        cookies_by_path = self._cookies[domain]
+        for path in cookies_by_path.keys():
+            if not self._policy.path_return_ok(path, request):
+                continue
+            cookies_by_name = cookies_by_path[path]
+            for cookie in cookies_by_name.values():
+                if not self._policy.return_ok(cookie, request):
+                    _debug("   not returning cookie")
+                    continue
+                _debug("   it's a match")
+                cookies.append(cookie)
+        return cookies
+
+    def _cookies_for_request(self, request):
+        """Return a list of cookies to be returned to server."""
+        cookies = []
+        for domain in self._cookies.keys():
+            cookies.extend(self._cookies_for_domain(domain, request))
+        return cookies
+
+    def _cookie_attrs(self, cookies):
+        """Return a list of cookie-attributes to be returned to server.
+
+        like ['foo="bar"; $Path="/"', ...]
+
+        The $Version attribute is also added when appropriate (currently only
+        once per request).
+
+        """
+        # add cookies in order of most specific (ie. longest) path first
+        cookies.sort(key=lambda a: len(a.path), reverse=True)
+
+        version_set = False
+
+        attrs = []
+        for cookie in cookies:
+            # set version of Cookie header
+            # XXX
+            # What should it be if multiple matching Set-Cookie headers have
+            #  different versions themselves?
+            # Answer: there is no answer; was supposed to be settled by
+            #  RFC 2965 errata, but that may never appear...
+            version = cookie.version
+            if not version_set:
+                version_set = True
+                if version > 0:
+                    attrs.append("$Version=%s" % version)
+
+            # quote cookie value if necessary
+            # (not for Netscape protocol, which already has any quotes
+            #  intact, due to the poorly-specified Netscape Cookie: syntax)
+            if ((cookie.value is not None) and
+                self.non_word_re.search(cookie.value) and version > 0):
+                value = self.quote_re.sub(r"\\\1", cookie.value)
+            else:
+                value = cookie.value
+
+            # add cookie-attributes to be returned in Cookie header
+            if cookie.value is None:
+                attrs.append(cookie.name)
+            else:
+                attrs.append("%s=%s" % (cookie.name, value))
+            if version > 0:
+                if cookie.path_specified:
+                    attrs.append('$Path="%s"' % cookie.path)
+                if cookie.domain.startswith("."):
+                    domain = cookie.domain
+                    if (not cookie.domain_initial_dot and
+                        domain.startswith(".")):
+                        domain = domain[1:]
+                    attrs.append('$Domain="%s"' % domain)
+                if cookie.port is not None:
+                    p = "$Port"
+                    if cookie.port_specified:
+                        p = p + ('="%s"' % cookie.port)
+                    attrs.append(p)
+
+        return attrs
+
+    def add_cookie_header(self, request):
+        """Add correct Cookie: header to request (urllib.request.Request object).
+
+        The Cookie2 header is also added unless policy.hide_cookie2 is true.
+
+        """
+        _debug("add_cookie_header")
+        self._cookies_lock.acquire()
+        try:
+
+            self._policy._now = self._now = int(time.time())
+
+            cookies = self._cookies_for_request(request)
+
+            attrs = self._cookie_attrs(cookies)
+            if attrs:
+                if not request.has_header("Cookie"):
+                    request.add_unredirected_header(
+                        "Cookie", "; ".join(attrs))
+
+            # if necessary, advertise that we know RFC 2965
+            if (self._policy.rfc2965 and not self._policy.hide_cookie2 and
+                not request.has_header("Cookie2")):
+                for cookie in cookies:
+                    if cookie.version != 1:
+                        request.add_unredirected_header("Cookie2", '$Version="1"')
+                        break
+
+        finally:
+            self._cookies_lock.release()
+
+        self.clear_expired_cookies()
+
+    def _normalized_cookie_tuples(self, attrs_set):
+        """Return list of tuples containing normalised cookie information.
+
+        attrs_set is the list of lists of key,value pairs extracted from
+        the Set-Cookie or Set-Cookie2 headers.
+
+        Tuples are name, value, standard, rest, where name and value are the
+        cookie name and value, standard is a dictionary containing the standard
+        cookie-attributes (discard, secure, version, expires or max-age,
+        domain, path and port) and rest is a dictionary containing the rest of
+        the cookie-attributes.
+
+        """
+        cookie_tuples = []
+
+        boolean_attrs = "discard", "secure"
+        value_attrs = ("version",
+                       "expires", "max-age",
+                       "domain", "path", "port",
+                       "comment", "commenturl")
+
+        for cookie_attrs in attrs_set:
+            name, value = cookie_attrs[0]
+
+            # Build dictionary of standard cookie-attributes (standard) and
+            # dictionary of other cookie-attributes (rest).
+
+            # Note: expiry time is normalised to seconds since epoch.  V0
+            # cookies should have the Expires cookie-attribute, and V1 cookies
+            # should have Max-Age, but since V1 includes RFC 2109 cookies (and
+            # since V0 cookies may be a mish-mash of Netscape and RFC 2109), we
+            # accept either (but prefer Max-Age).
+            max_age_set = False
+
+            bad_cookie = False
+
+            standard = {}
+            rest = {}
+            for k, v in cookie_attrs[1:]:
+                lc = k.lower()
+                # don't lose case distinction for unknown fields
+                if lc in value_attrs or lc in boolean_attrs:
+                    k = lc
+                if k in boolean_attrs and v is None:
+                    # boolean cookie-attribute is present, but has no value
+                    # (like "discard", rather than "port=80")
+                    v = True
+                if k in standard:
+                    # only first value is significant
+                    continue
+                if k == "domain":
+                    if v is None:
+                        _debug("   missing value for domain attribute")
+                        bad_cookie = True
+                        break
+                    # RFC 2965 section 3.3.3
+                    v = v.lower()
+                if k == "expires":
+                    if max_age_set:
+                        # Prefer max-age to expires (like Mozilla)
+                        continue
+                    if v is None:
+                        _debug("   missing or invalid value for expires "
+                              "attribute: treating as session cookie")
+                        continue
+                if k == "max-age":
+                    max_age_set = True
+                    try:
+                        v = int(v)
+                    except ValueError:
+                        _debug("   missing or invalid (non-numeric) value for "
+                              "max-age attribute")
+                        bad_cookie = True
+                        break
+                    # convert RFC 2965 Max-Age to seconds since epoch
+                    # XXX Strictly you're supposed to follow RFC 2616
+                    #   age-calculation rules.  Remember that zero Max-Age
+                    #   is a request to discard (old and new) cookie, though.
+                    k = "expires"
+                    v = self._now + v
+                if (k in value_attrs) or (k in boolean_attrs):
+                    if (v is None and
+                        k not in ("port", "comment", "commenturl")):
+                        _debug("   missing value for %s attribute" % k)
+                        bad_cookie = True
+                        break
+                    standard[k] = v
+                else:
+                    rest[k] = v
+
+            if bad_cookie:
+                continue
+
+            cookie_tuples.append((name, value, standard, rest))
+
+        return cookie_tuples
+
+    def _cookie_from_cookie_tuple(self, tup, request):
+        # standard is dict of standard cookie-attributes, rest is dict of the
+        # rest of them
+        name, value, standard, rest = tup
+
+        domain = standard.get("domain", Absent)
+        path = standard.get("path", Absent)
+        port = standard.get("port", Absent)
+        expires = standard.get("expires", Absent)
+
+        # set the easy defaults
+        version = standard.get("version", None)
+        if version is not None:
+            try:
+                version = int(version)
+            except ValueError:
+                return None  # invalid version, ignore cookie
+        secure = standard.get("secure", False)
+        # (discard is also set if expires is Absent)
+        discard = standard.get("discard", False)
+        comment = standard.get("comment", None)
+        comment_url = standard.get("commenturl", None)
+
+        # set default path
+        if path is not Absent and path != "":
+            path_specified = True
+            path = escape_path(path)
+        else:
+            path_specified = False
+            path = request_path(request)
+            i = path.rfind("/")
+            if i != -1:
+                if version == 0:
+                    # Netscape spec parts company from reality here
+                    path = path[:i]
+                else:
+                    path = path[:i+1]
+            if len(path) == 0: path = "/"
+
+        # set default domain
+        domain_specified = domain is not Absent
+        # but first we have to remember whether it starts with a dot
+        domain_initial_dot = False
+        if domain_specified:
+            domain_initial_dot = bool(domain.startswith("."))
+        if domain is Absent:
+            req_host, erhn = eff_request_host(request)
+            domain = erhn
+        elif not domain.startswith("."):
+            domain = "."+domain
+
+        # set default port
+        port_specified = False
+        if port is not Absent:
+            if port is None:
+                # Port attr present, but has no value: default to request port.
+                # Cookie should then only be sent back on that port.
+                port = request_port(request)
+            else:
+                port_specified = True
+                port = re.sub(r"\s+", "", port)
+        else:
+            # No port attr present.  Cookie can be sent back on any port.
+            port = None
+
+        # set default expires and discard
+        if expires is Absent:
+            expires = None
+            discard = True
+        elif expires <= self._now:
+            # Expiry date in past is request to delete cookie.  This can't be
+            # in DefaultCookiePolicy, because can't delete cookies there.
+            try:
+                self.clear(domain, path, name)
+            except KeyError:
+                pass
+            _debug("Expiring cookie, domain='%s', path='%s', name='%s'",
+                   domain, path, name)
+            return None
+
+        return Cookie(version,
+                      name, value,
+                      port, port_specified,
+                      domain, domain_specified, domain_initial_dot,
+                      path, path_specified,
+                      secure,
+                      expires,
+                      discard,
+                      comment,
+                      comment_url,
+                      rest)
+
+    def _cookies_from_attrs_set(self, attrs_set, request):
+        cookie_tuples = self._normalized_cookie_tuples(attrs_set)
+
+        cookies = []
+        for tup in cookie_tuples:
+            cookie = self._cookie_from_cookie_tuple(tup, request)
+            if cookie: cookies.append(cookie)
+        return cookies
+
+    def _process_rfc2109_cookies(self, cookies):
+        rfc2109_as_ns = getattr(self._policy, 'rfc2109_as_netscape', None)
+        if rfc2109_as_ns is None:
+            rfc2109_as_ns = not self._policy.rfc2965
+        for cookie in cookies:
+            if cookie.version == 1:
+                cookie.rfc2109 = True
+                if rfc2109_as_ns:
+                    # treat 2109 cookies as Netscape cookies rather than
+                    # as RFC2965 cookies
+                    cookie.version = 0
+
+    def make_cookies(self, response, request):
+        """Return sequence of Cookie objects extracted from response object."""
+        # get cookie-attributes for RFC 2965 and Netscape protocols
+        headers = response.info()
+        rfc2965_hdrs = headers.get_all("Set-Cookie2", [])
+        ns_hdrs = headers.get_all("Set-Cookie", [])
+
+        rfc2965 = self._policy.rfc2965
+        netscape = self._policy.netscape
+
+        if ((not rfc2965_hdrs and not ns_hdrs) or
+            (not ns_hdrs and not rfc2965) or
+            (not rfc2965_hdrs and not netscape) or
+            (not netscape and not rfc2965)):
+            return []  # no relevant cookie headers: quick exit
+
+        try:
+            cookies = self._cookies_from_attrs_set(
+                split_header_words(rfc2965_hdrs), request)
+        except Exception:
+            _warn_unhandled_exception()
+            cookies = []
+
+        if ns_hdrs and netscape:
+            try:
+                # RFC 2109 and Netscape cookies
+                ns_cookies = self._cookies_from_attrs_set(
+                    parse_ns_headers(ns_hdrs), request)
+            except Exception:
+                _warn_unhandled_exception()
+                ns_cookies = []
+            self._process_rfc2109_cookies(ns_cookies)
+
+            # Look for Netscape cookies (from Set-Cookie headers) that match
+            # corresponding RFC 2965 cookies (from Set-Cookie2 headers).
+            # For each match, keep the RFC 2965 cookie and ignore the Netscape
+            # cookie (RFC 2965 section 9.1).  Actually, RFC 2109 cookies are
+            # bundled in with the Netscape cookies for this purpose, which is
+            # reasonable behaviour.
+            if rfc2965:
+                lookup = {}
+                for cookie in cookies:
+                    lookup[(cookie.domain, cookie.path, cookie.name)] = None
+
+                def no_matching_rfc2965(ns_cookie, lookup=lookup):
+                    key = ns_cookie.domain, ns_cookie.path, ns_cookie.name
+                    return key not in lookup
+                ns_cookies = filter(no_matching_rfc2965, ns_cookies)
+
+            if ns_cookies:
+                cookies.extend(ns_cookies)
+
+        return cookies
+
+    def set_cookie_if_ok(self, cookie, request):
+        """Set a cookie if policy says it's OK to do so."""
+        self._cookies_lock.acquire()
+        try:
+            self._policy._now = self._now = int(time.time())
+
+            if self._policy.set_ok(cookie, request):
+                self.set_cookie(cookie)
+
+
+        finally:
+            self._cookies_lock.release()
+
+    def set_cookie(self, cookie):
+        """Set a cookie, without checking whether or not it should be set."""
+        c = self._cookies
+        self._cookies_lock.acquire()
+        try:
+            if cookie.domain not in c: c[cookie.domain] = {}
+            c2 = c[cookie.domain]
+            if cookie.path not in c2: c2[cookie.path] = {}
+            c3 = c2[cookie.path]
+            c3[cookie.name] = cookie
+        finally:
+            self._cookies_lock.release()
+
+    def extract_cookies(self, response, request):
+        """Extract cookies from response, where allowable given the request."""
+        _debug("extract_cookies: %s", response.info())
+        self._cookies_lock.acquire()
+        try:
+            self._policy._now = self._now = int(time.time())
+
+            for cookie in self.make_cookies(response, request):
+                if self._policy.set_ok(cookie, request):
+                    _debug(" setting cookie: %s", cookie)
+                    self.set_cookie(cookie)
+        finally:
+            self._cookies_lock.release()
+
+    def clear(self, domain=None, path=None, name=None):
+        """Clear some cookies.
+
+        Invoking this method without arguments will clear all cookies.  If
+        given a single argument, only cookies belonging to that domain will be
+        removed.  If given two arguments, cookies belonging to the specified
+        path within that domain are removed.  If given three arguments, then
+        the cookie with the specified name, path and domain is removed.
+
+        Raises KeyError if no matching cookie exists.
+
+        """
+        if name is not None:
+            if (domain is None) or (path is None):
+                raise ValueError(
+                    "domain and path must be given to remove a cookie by name")
+            del self._cookies[domain][path][name]
+        elif path is not None:
+            if domain is None:
+                raise ValueError(
+                    "domain must be given to remove cookies by path")
+            del self._cookies[domain][path]
+        elif domain is not None:
+            del self._cookies[domain]
+        else:
+            self._cookies = {}
+
+    def clear_session_cookies(self):
+        """Discard all session cookies.
+
+        Note that the .save() method won't save session cookies anyway, unless
+        you ask otherwise by passing a true ignore_discard argument.
+
+        """
+        self._cookies_lock.acquire()
+        try:
+            for cookie in self:
+                if cookie.discard:
+                    self.clear(cookie.domain, cookie.path, cookie.name)
+        finally:
+            self._cookies_lock.release()
+
+    def clear_expired_cookies(self):
+        """Discard all expired cookies.
+
+        You probably don't need to call this method: expired cookies are never
+        sent back to the server (provided you're using DefaultCookiePolicy),
+        this method is called by CookieJar itself every so often, and the
+        .save() method won't save expired cookies anyway (unless you ask
+        otherwise by passing a true ignore_expires argument).
+
+        """
+        self._cookies_lock.acquire()
+        try:
+            now = time.time()
+            for cookie in self:
+                if cookie.is_expired(now):
+                    self.clear(cookie.domain, cookie.path, cookie.name)
+        finally:
+            self._cookies_lock.release()
+
+    def __iter__(self):
+        return deepvalues(self._cookies)
+
+    def __len__(self):
+        """Return number of contained cookies."""
+        i = 0
+        for cookie in self: i = i + 1
+        return i
+
+    def __repr__(self):
+        r = []
+        for cookie in self: r.append(repr(cookie))
+        return "<%s[%s]>" % (self.__class__.__name__, ", ".join(r))
+
+    def __str__(self):
+        r = []
+        for cookie in self: r.append(str(cookie))
+        return "<%s[%s]>" % (self.__class__.__name__, ", ".join(r))
+
+
+# derives from OSError for backwards-compatibility with Python 2.4.0
+class LoadError(OSError): pass
+
+class FileCookieJar(CookieJar):
+    """CookieJar that can be loaded from and saved to a file."""
+
+    def __init__(self, filename=None, delayload=False, policy=None):
+        """
+        Cookies are NOT loaded from the named file until either the .load() or
+        .revert() method is called.
+
+        """
+        CookieJar.__init__(self, policy)
+        if filename is not None:
+            try:
+                filename+""
+            except:
+                raise ValueError("filename must be string-like")
+        self.filename = filename
+        self.delayload = bool(delayload)
+
+    def save(self, filename=None, ignore_discard=False, ignore_expires=False):
+        """Save cookies to a file."""
+        raise NotImplementedError()
+
+    def load(self, filename=None, ignore_discard=False, ignore_expires=False):
+        """Load cookies from a file."""
+        if filename is None:
+            if self.filename is not None: filename = self.filename
+            else: raise ValueError(MISSING_FILENAME_TEXT)
+
+        with open(filename) as f:
+            self._really_load(f, filename, ignore_discard, ignore_expires)
+
+    def revert(self, filename=None,
+               ignore_discard=False, ignore_expires=False):
+        """Clear all cookies and reload cookies from a saved file.
+
+        Raises LoadError (or OSError) if reversion is not successful; the
+        object's state will not be altered if this happens.
+
+        """
+        if filename is None:
+            if self.filename is not None: filename = self.filename
+            else: raise ValueError(MISSING_FILENAME_TEXT)
+
+        self._cookies_lock.acquire()
+        try:
+
+            old_state = copy.deepcopy(self._cookies)
+            self._cookies = {}
+            try:
+                self.load(filename, ignore_discard, ignore_expires)
+            except OSError:
+                self._cookies = old_state
+                raise
+
+        finally:
+            self._cookies_lock.release()
+
+
+def lwp_cookie_str(cookie):
+    """Return string representation of Cookie in the LWP cookie file format.
+
+    Actually, the format is extended a bit -- see module docstring.
+
+    """
+    h = [(cookie.name, cookie.value),
+         ("path", cookie.path),
+         ("domain", cookie.domain)]
+    if cookie.port is not None: h.append(("port", cookie.port))
+    if cookie.path_specified: h.append(("path_spec", None))
+    if cookie.port_specified: h.append(("port_spec", None))
+    if cookie.domain_initial_dot: h.append(("domain_dot", None))
+    if cookie.secure: h.append(("secure", None))
+    if cookie.expires: h.append(("expires",
+                               time2isoz(float(cookie.expires))))
+    if cookie.discard: h.append(("discard", None))
+    if cookie.comment: h.append(("comment", cookie.comment))
+    if cookie.comment_url: h.append(("commenturl", cookie.comment_url))
+
+    keys = sorted(cookie._rest.keys())
+    for k in keys:
+        h.append((k, str(cookie._rest[k])))
+
+    h.append(("version", str(cookie.version)))
+
+    return join_header_words([h])
+
+class LWPCookieJar(FileCookieJar):
+    """
+    The LWPCookieJar saves a sequence of "Set-Cookie3" lines.
+    "Set-Cookie3" is the format used by the libwww-perl library, not known
+    to be compatible with any browser, but which is easy to read and
+    doesn't lose information about RFC 2965 cookies.
+
+    Additional methods
+
+    as_lwp_str(ignore_discard=True, ignore_expired=True)
+
+    """
+
+    def as_lwp_str(self, ignore_discard=True, ignore_expires=True):
+        """Return cookies as a string of "\\n"-separated "Set-Cookie3" headers.
+
+        ignore_discard and ignore_expires: see docstring for FileCookieJar.save
+
+        """
+        now = time.time()
+        r = []
+        for cookie in self:
+            if not ignore_discard and cookie.discard:
+                continue
+            if not ignore_expires and cookie.is_expired(now):
+                continue
+            r.append("Set-Cookie3: %s" % lwp_cookie_str(cookie))
+        return "\n".join(r+[""])
+
+    def save(self, filename=None, ignore_discard=False, ignore_expires=False):
+        if filename is None:
+            if self.filename is not None: filename = self.filename
+            else: raise ValueError(MISSING_FILENAME_TEXT)
+
+        with open(filename, "w") as f:
+            # There really isn't an LWP Cookies 2.0 format, but this indicates
+            # that there is extra information in here (domain_dot and
+            # port_spec) while still being compatible with libwww-perl, I hope.
+            f.write("#LWP-Cookies-2.0\n")
+            f.write(self.as_lwp_str(ignore_discard, ignore_expires))
+
+    def _really_load(self, f, filename, ignore_discard, ignore_expires):
+        magic = f.readline()
+        if not self.magic_re.search(magic):
+            msg = ("%r does not look like a Set-Cookie3 (LWP) format "
+                   "file" % filename)
+            raise LoadError(msg)
+
+        now = time.time()
+
+        header = "Set-Cookie3:"
+        boolean_attrs = ("port_spec", "path_spec", "domain_dot",
+                         "secure", "discard")
+        value_attrs = ("version",
+                       "port", "path", "domain",
+                       "expires",
+                       "comment", "commenturl")
+
+        try:
+            while 1:
+                line = f.readline()
+                if line == "": break
+                if not line.startswith(header):
+                    continue
+                line = line[len(header):].strip()
+
+                for data in split_header_words([line]):
+                    name, value = data[0]
+                    standard = {}
+                    rest = {}
+                    for k in boolean_attrs:
+                        standard[k] = False
+                    for k, v in data[1:]:
+                        if k is not None:
+                            lc = k.lower()
+                        else:
+                            lc = None
+                        # don't lose case distinction for unknown fields
+                        if (lc in value_attrs) or (lc in boolean_attrs):
+                            k = lc
+                        if k in boolean_attrs:
+                            if v is None: v = True
+                            standard[k] = v
+                        elif k in value_attrs:
+                            standard[k] = v
+                        else:
+                            rest[k] = v
+
+                    h = standard.get
+                    expires = h("expires")
+                    discard = h("discard")
+                    if expires is not None:
+                        expires = iso2time(expires)
+                    if expires is None:
+                        discard = True
+                    domain = h("domain")
+                    domain_specified = domain.startswith(".")
+                    c = Cookie(h("version"), name, value,
+                               h("port"), h("port_spec"),
+                               domain, domain_specified, h("domain_dot"),
+                               h("path"), h("path_spec"),
+                               h("secure"),
+                               expires,
+                               discard,
+                               h("comment"),
+                               h("commenturl"),
+                               rest)
+                    if not ignore_discard and c.discard:
+                        continue
+                    if not ignore_expires and c.is_expired(now):
+                        continue
+                    self.set_cookie(c)
+        except OSError:
+            raise
+        except Exception:
+            _warn_unhandled_exception()
+            raise LoadError("invalid Set-Cookie3 format file %r: %r" %
+                            (filename, line))
+
+
+class MozillaCookieJar(FileCookieJar):
+    """
+
+    WARNING: you may want to backup your browser's cookies file if you use
+    this class to save cookies.  I *think* it works, but there have been
+    bugs in the past!
+
+    This class differs from CookieJar only in the format it uses to save and
+    load cookies to and from a file.  This class uses the Mozilla/Netscape
+    `cookies.txt' format.  lynx uses this file format, too.
+
+    Don't expect cookies saved while the browser is running to be noticed by
+    the browser (in fact, Mozilla on unix will overwrite your saved cookies if
+    you change them on disk while it's running; on Windows, you probably can't
+    save at all while the browser is running).
+
+    Note that the Mozilla/Netscape format will downgrade RFC2965 cookies to
+    Netscape cookies on saving.
+
+    In particular, the cookie version and port number information is lost,
+    together with information about whether or not Path, Port and Discard were
+    specified by the Set-Cookie2 (or Set-Cookie) header, and whether or not the
+    domain as set in the HTTP header started with a dot (yes, I'm aware some
+    domains in Netscape files start with a dot and some don't -- trust me, you
+    really don't want to know any more about this).
+
+    Note that though Mozilla and Netscape use the same format, they use
+    slightly different headers.  The class saves cookies using the Netscape
+    header by default (Mozilla can cope with that).
+
+    """
+    magic_re = re.compile("#( Netscape)? HTTP Cookie File")
+    header = """\
+# Netscape HTTP Cookie File
+# http://curl.haxx.se/rfc/cookie_spec.html
+# This is a generated file!  Do not edit.
+
+"""
+
+    def _really_load(self, f, filename, ignore_discard, ignore_expires):
+        now = time.time()
+
+        magic = f.readline()
+        if not self.magic_re.search(magic):
+            raise LoadError(
+                "%r does not look like a Netscape format cookies file" %
+                filename)
+
+        try:
+            while 1:
+                line = f.readline()
+                if line == "": break
+
+                # last field may be absent, so keep any trailing tab
+                if line.endswith("\n"): line = line[:-1]
+
+                # skip comments and blank lines XXX what is $ for?
+                if (line.strip().startswith(("#", "$")) or
+                    line.strip() == ""):
+                    continue
+
+                domain, domain_specified, path, secure, expires, name, value = \
+                        line.split("\t")
+                secure = (secure == "TRUE")
+                domain_specified = (domain_specified == "TRUE")
+                if name == "":
+                    # cookies.txt regards 'Set-Cookie: foo' as a cookie
+                    # with no name, whereas http.cookiejar regards it as a
+                    # cookie with no value.
+                    name = value
+                    value = None
+
+                initial_dot = domain.startswith(".")
+                assert domain_specified == initial_dot
+
+                discard = False
+                if expires == "":
+                    expires = None
+                    discard = True
+
+                # assume path_specified is false
+                c = Cookie(0, name, value,
+                           None, False,
+                           domain, domain_specified, initial_dot,
+                           path, False,
+                           secure,
+                           expires,
+                           discard,
+                           None,
+                           None,
+                           {})
+                if not ignore_discard and c.discard:
+                    continue
+                if not ignore_expires and c.is_expired(now):
+                    continue
+                self.set_cookie(c)
+
+        except OSError:
+            raise
+        except Exception:
+            _warn_unhandled_exception()
+            raise LoadError("invalid Netscape format cookies file %r: %r" %
+                            (filename, line))
+
+    def save(self, filename=None, ignore_discard=False, ignore_expires=False):
+        if filename is None:
+            if self.filename is not None: filename = self.filename
+            else: raise ValueError(MISSING_FILENAME_TEXT)
+
+        with open(filename, "w") as f:
+            f.write(self.header)
+            now = time.time()
+            for cookie in self:
+                if not ignore_discard and cookie.discard:
+                    continue
+                if not ignore_expires and cookie.is_expired(now):
+                    continue
+                if cookie.secure: secure = "TRUE"
+                else: secure = "FALSE"
+                if cookie.domain.startswith("."): initial_dot = "TRUE"
+                else: initial_dot = "FALSE"
+                if cookie.expires is not None:
+                    expires = str(cookie.expires)
+                else:
+                    expires = ""
+                if cookie.value is None:
+                    # cookies.txt regards 'Set-Cookie: foo' as a cookie
+                    # with no name, whereas http.cookiejar regards it as a
+                    # cookie with no value.
+                    name = ""
+                    value = cookie.name
+                else:
+                    name = cookie.name
+                    value = cookie.value
+                f.write(
+                    "\t".join([cookie.domain, initial_dot, cookie.path,
+                               secure, expires, name, value])+
+                    "\n")

+ 691 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/green/http/cookies.py

@@ -0,0 +1,691 @@
+# This is part of Python source code with Eventlet-specific modifications.
+#
+# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+# 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights
+# Reserved
+#
+# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
+# --------------------------------------------
+#
+# 1. This LICENSE AGREEMENT is between the Python Software Foundation
+# ("PSF"), and the Individual or Organization ("Licensee") accessing and
+# otherwise using this software ("Python") in source or binary form and
+# its associated documentation.
+#
+# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
+# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
+# analyze, test, perform and/or display publicly, prepare derivative works,
+# distribute, and otherwise use Python alone or in any derivative version,
+# provided, however, that PSF's License Agreement and PSF's notice of copyright,
+# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+# 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights
+# Reserved" are retained in Python alone or in any derivative version prepared by
+# Licensee.
+#
+# 3. In the event Licensee prepares a derivative work that is based on
+# or incorporates Python or any part thereof, and wants to make
+# the derivative work available to others as provided herein, then
+# Licensee hereby agrees to include in any such work a brief summary of
+# the changes made to Python.
+#
+# 4. PSF is making Python available to Licensee on an "AS IS"
+# basis.  PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+# IMPLIED.  BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
+# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
+# INFRINGE ANY THIRD PARTY RIGHTS.
+#
+# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
+# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+#
+# 6. This License Agreement will automatically terminate upon a material
+# breach of its terms and conditions.
+#
+# 7. Nothing in this License Agreement shall be deemed to create any
+# relationship of agency, partnership, or joint venture between PSF and
+# Licensee.  This License Agreement does not grant permission to use PSF
+# trademarks or trade name in a trademark sense to endorse or promote
+# products or services of Licensee, or any third party.
+#
+# 8. By copying, installing or otherwise using Python, Licensee
+# agrees to be bound by the terms and conditions of this License
+# Agreement.
+####
+# Copyright 2000 by Timothy O'Malley <timo@alum.mit.edu>
+#
+#                All Rights Reserved
+#
+# Permission to use, copy, modify, and distribute this software
+# and its documentation for any purpose and without fee is hereby
+# granted, provided that the above copyright notice appear in all
+# copies and that both that copyright notice and this permission
+# notice appear in supporting documentation, and that the name of
+# Timothy O'Malley  not be used in advertising or publicity
+# pertaining to distribution of the software without specific, written
+# prior permission.
+#
+# Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
+# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR
+# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+#
+####
+#
+# Id: Cookie.py,v 2.29 2000/08/23 05:28:49 timo Exp
+#   by Timothy O'Malley <timo@alum.mit.edu>
+#
+#  Cookie.py is a Python module for the handling of HTTP
+#  cookies as a Python dictionary.  See RFC 2109 for more
+#  information on cookies.
+#
+#  The original idea to treat Cookies as a dictionary came from
+#  Dave Mitchell (davem@magnet.com) in 1995, when he released the
+#  first version of nscookie.py.
+#
+####
+
+r"""
+Here's a sample session to show how to use this module.
+At the moment, this is the only documentation.
+
+The Basics
+----------
+
+Importing is easy...
+
+   >>> from http import cookies
+
+Most of the time you start by creating a cookie.
+
+   >>> C = cookies.SimpleCookie()
+
+Once you've created your Cookie, you can add values just as if it were
+a dictionary.
+
+   >>> C = cookies.SimpleCookie()
+   >>> C["fig"] = "newton"
+   >>> C["sugar"] = "wafer"
+   >>> C.output()
+   'Set-Cookie: fig=newton\r\nSet-Cookie: sugar=wafer'
+
+Notice that the printable representation of a Cookie is the
+appropriate format for a Set-Cookie: header.  This is the
+default behavior.  You can change the header and printed
+attributes by using the .output() function
+
+   >>> C = cookies.SimpleCookie()
+   >>> C["rocky"] = "road"
+   >>> C["rocky"]["path"] = "/cookie"
+   >>> print(C.output(header="Cookie:"))
+   Cookie: rocky=road; Path=/cookie
+   >>> print(C.output(attrs=[], header="Cookie:"))
+   Cookie: rocky=road
+
+The load() method of a Cookie extracts cookies from a string.  In a
+CGI script, you would use this method to extract the cookies from the
+HTTP_COOKIE environment variable.
+
+   >>> C = cookies.SimpleCookie()
+   >>> C.load("chips=ahoy; vienna=finger")
+   >>> C.output()
+   'Set-Cookie: chips=ahoy\r\nSet-Cookie: vienna=finger'
+
+The load() method is darn-tootin smart about identifying cookies
+within a string.  Escaped quotation marks, nested semicolons, and other
+such trickeries do not confuse it.
+
+   >>> C = cookies.SimpleCookie()
+   >>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=\\012;";')
+   >>> print(C)
+   Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=\012;"
+
+Each element of the Cookie also supports all of the RFC 2109
+Cookie attributes.  Here's an example which sets the Path
+attribute.
+
+   >>> C = cookies.SimpleCookie()
+   >>> C["oreo"] = "doublestuff"
+   >>> C["oreo"]["path"] = "/"
+   >>> print(C)
+   Set-Cookie: oreo=doublestuff; Path=/
+
+Each dictionary element has a 'value' attribute, which gives you
+back the value associated with the key.
+
+   >>> C = cookies.SimpleCookie()
+   >>> C["twix"] = "none for you"
+   >>> C["twix"].value
+   'none for you'
+
+The SimpleCookie expects that all values should be standard strings.
+Just to be sure, SimpleCookie invokes the str() builtin to convert
+the value to a string, when the values are set dictionary-style.
+
+   >>> C = cookies.SimpleCookie()
+   >>> C["number"] = 7
+   >>> C["string"] = "seven"
+   >>> C["number"].value
+   '7'
+   >>> C["string"].value
+   'seven'
+   >>> C.output()
+   'Set-Cookie: number=7\r\nSet-Cookie: string=seven'
+
+Finis.
+"""
+
+#
+# Import our required modules
+#
+import re
+import string
+
+__all__ = ["CookieError", "BaseCookie", "SimpleCookie"]
+
+_nulljoin = ''.join
+_semispacejoin = '; '.join
+_spacejoin = ' '.join
+
+def _warn_deprecated_setter(setter):
+    import warnings
+    msg = ('The .%s setter is deprecated. The attribute will be read-only in '
+           'future releases. Please use the set() method instead.' % setter)
+    warnings.warn(msg, DeprecationWarning, stacklevel=3)
+
+#
+# Define an exception visible to External modules
+#
+class CookieError(Exception):
+    pass
+
+
+# These quoting routines conform to the RFC2109 specification, which in
+# turn references the character definitions from RFC2068.  They provide
+# a two-way quoting algorithm.  Any non-text character is translated
+# into a 4 character sequence: a forward-slash followed by the
+# three-digit octal equivalent of the character.  Any '\' or '"' is
+# quoted with a preceding '\' slash.
+# Because of the way browsers really handle cookies (as opposed to what
+# the RFC says) we also encode "," and ";".
+#
+# These are taken from RFC2068 and RFC2109.
+#       _LegalChars       is the list of chars which don't require "'s
+#       _Translator       hash-table for fast quoting
+#
+_LegalChars = string.ascii_letters + string.digits + "!#$%&'*+-.^_`|~:"
+_UnescapedChars = _LegalChars + ' ()/<=>?@[]{}'
+
+_Translator = {n: '\\%03o' % n
+               for n in set(range(256)) - set(map(ord, _UnescapedChars))}
+_Translator.update({
+    ord('"'): '\\"',
+    ord('\\'): '\\\\',
+})
+
+# Eventlet change: match used instead of fullmatch for Python 3.3 compatibility
+_is_legal_key = re.compile(r'[%s]+\Z' % re.escape(_LegalChars)).match
+
+def _quote(str):
+    r"""Quote a string for use in a cookie header.
+
+    If the string does not need to be double-quoted, then just return the
+    string.  Otherwise, surround the string in doublequotes and quote
+    (with a \) special characters.
+    """
+    if str is None or _is_legal_key(str):
+        return str
+    else:
+        return '"' + str.translate(_Translator) + '"'
+
+
+_OctalPatt = re.compile(r"\\[0-3][0-7][0-7]")
+_QuotePatt = re.compile(r"[\\].")
+
+def _unquote(str):
+    # If there aren't any doublequotes,
+    # then there can't be any special characters.  See RFC 2109.
+    if str is None or len(str) < 2:
+        return str
+    if str[0] != '"' or str[-1] != '"':
+        return str
+
+    # We have to assume that we must decode this string.
+    # Down to work.
+
+    # Remove the "s
+    str = str[1:-1]
+
+    # Check for special sequences.  Examples:
+    #    \012 --> \n
+    #    \"   --> "
+    #
+    i = 0
+    n = len(str)
+    res = []
+    while 0 <= i < n:
+        o_match = _OctalPatt.search(str, i)
+        q_match = _QuotePatt.search(str, i)
+        if not o_match and not q_match:              # Neither matched
+            res.append(str[i:])
+            break
+        # else:
+        j = k = -1
+        if o_match:
+            j = o_match.start(0)
+        if q_match:
+            k = q_match.start(0)
+        if q_match and (not o_match or k < j):     # QuotePatt matched
+            res.append(str[i:k])
+            res.append(str[k+1])
+            i = k + 2
+        else:                                      # OctalPatt matched
+            res.append(str[i:j])
+            res.append(chr(int(str[j+1:j+4], 8)))
+            i = j + 4
+    return _nulljoin(res)
+
+# The _getdate() routine is used to set the expiration time in the cookie's HTTP
+# header.  By default, _getdate() returns the current time in the appropriate
+# "expires" format for a Set-Cookie header.  The one optional argument is an
+# offset from now, in seconds.  For example, an offset of -3600 means "one hour
+# ago".  The offset may be a floating point number.
+#
+
+_weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
+
+_monthname = [None,
+              'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
+              'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
+
+def _getdate(future=0, weekdayname=_weekdayname, monthname=_monthname):
+    from eventlet.green.time import gmtime, time
+    now = time()
+    year, month, day, hh, mm, ss, wd, y, z = gmtime(now + future)
+    return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % \
+           (weekdayname[wd], day, monthname[month], year, hh, mm, ss)
+
+
+class Morsel(dict):
+    """A class to hold ONE (key, value) pair.
+
+    In a cookie, each such pair may have several attributes, so this class is
+    used to keep the attributes associated with the appropriate key,value pair.
+    This class also includes a coded_value attribute, which is used to hold
+    the network representation of the value.  This is most useful when Python
+    objects are pickled for network transit.
+    """
+    # RFC 2109 lists these attributes as reserved:
+    #   path       comment         domain
+    #   max-age    secure      version
+    #
+    # For historical reasons, these attributes are also reserved:
+    #   expires
+    #
+    # This is an extension from Microsoft:
+    #   httponly
+    #
+    # This dictionary provides a mapping from the lowercase
+    # variant on the left to the appropriate traditional
+    # formatting on the right.
+    _reserved = {
+        "expires"  : "expires",
+        "path"     : "Path",
+        "comment"  : "Comment",
+        "domain"   : "Domain",
+        "max-age"  : "Max-Age",
+        "secure"   : "Secure",
+        "httponly" : "HttpOnly",
+        "version"  : "Version",
+    }
+
+    _flags = {'secure', 'httponly'}
+
+    def __init__(self):
+        # Set defaults
+        self._key = self._value = self._coded_value = None
+
+        # Set default attributes
+        for key in self._reserved:
+            dict.__setitem__(self, key, "")
+
+    @property
+    def key(self):
+        return self._key
+
+    @key.setter
+    def key(self, key):
+        _warn_deprecated_setter('key')
+        self._key = key
+
+    @property
+    def value(self):
+        return self._value
+
+    @value.setter
+    def value(self, value):
+        _warn_deprecated_setter('value')
+        self._value = value
+
+    @property
+    def coded_value(self):
+        return self._coded_value
+
+    @coded_value.setter
+    def coded_value(self, coded_value):
+        _warn_deprecated_setter('coded_value')
+        self._coded_value = coded_value
+
+    def __setitem__(self, K, V):
+        K = K.lower()
+        if not K in self._reserved:
+            raise CookieError("Invalid attribute %r" % (K,))
+        dict.__setitem__(self, K, V)
+
+    def setdefault(self, key, val=None):
+        key = key.lower()
+        if key not in self._reserved:
+            raise CookieError("Invalid attribute %r" % (key,))
+        return dict.setdefault(self, key, val)
+
+    def __eq__(self, morsel):
+        if not isinstance(morsel, Morsel):
+            return NotImplemented
+        return (dict.__eq__(self, morsel) and
+                self._value == morsel._value and
+                self._key == morsel._key and
+                self._coded_value == morsel._coded_value)
+
+    __ne__ = object.__ne__
+
+    def copy(self):
+        morsel = Morsel()
+        dict.update(morsel, self)
+        morsel.__dict__.update(self.__dict__)
+        return morsel
+
+    def update(self, values):
+        data = {}
+        for key, val in dict(values).items():
+            key = key.lower()
+            if key not in self._reserved:
+                raise CookieError("Invalid attribute %r" % (key,))
+            data[key] = val
+        dict.update(self, data)
+
+    def isReservedKey(self, K):
+        return K.lower() in self._reserved
+
+    def set(self, key, val, coded_val, LegalChars=_LegalChars):
+        if LegalChars != _LegalChars:
+            import warnings
+            warnings.warn(
+                'LegalChars parameter is deprecated, ignored and will '
+                'be removed in future versions.', DeprecationWarning,
+                stacklevel=2)
+
+        if key.lower() in self._reserved:
+            raise CookieError('Attempt to set a reserved key %r' % (key,))
+        if not _is_legal_key(key):
+            raise CookieError('Illegal key %r' % (key,))
+
+        # It's a good key, so save it.
+        self._key = key
+        self._value = val
+        self._coded_value = coded_val
+
+    def __getstate__(self):
+        return {
+            'key': self._key,
+            'value': self._value,
+            'coded_value': self._coded_value,
+        }
+
+    def __setstate__(self, state):
+        self._key = state['key']
+        self._value = state['value']
+        self._coded_value = state['coded_value']
+
+    def output(self, attrs=None, header="Set-Cookie:"):
+        return "%s %s" % (header, self.OutputString(attrs))
+
+    __str__ = output
+
+    def __repr__(self):
+        return '<%s: %s>' % (self.__class__.__name__, self.OutputString())
+
+    def js_output(self, attrs=None):
+        # Print javascript
+        return """
+        <script type="text/javascript">
+        <!-- begin hiding
+        document.cookie = \"%s\";
+        // end hiding -->
+        </script>
+        """ % (self.OutputString(attrs).replace('"', r'\"'))
+
+    def OutputString(self, attrs=None):
+        # Build up our result
+        #
+        result = []
+        append = result.append
+
+        # First, the key=value pair
+        append("%s=%s" % (self.key, self.coded_value))
+
+        # Now add any defined attributes
+        if attrs is None:
+            attrs = self._reserved
+        items = sorted(self.items())
+        for key, value in items:
+            if value == "":
+                continue
+            if key not in attrs:
+                continue
+            if key == "expires" and isinstance(value, int):
+                append("%s=%s" % (self._reserved[key], _getdate(value)))
+            elif key == "max-age" and isinstance(value, int):
+                append("%s=%d" % (self._reserved[key], value))
+            elif key in self._flags:
+                if value:
+                    append(str(self._reserved[key]))
+            else:
+                append("%s=%s" % (self._reserved[key], value))
+
+        # Return the result
+        return _semispacejoin(result)
+
+
+#
+# Pattern for finding cookie
+#
+# This used to be strict parsing based on the RFC2109 and RFC2068
+# specifications.  I have since discovered that MSIE 3.0x doesn't
+# follow the character rules outlined in those specs.  As a
+# result, the parsing rules here are less strict.
+#
+
+_LegalKeyChars  = r"\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\="
+_LegalValueChars = _LegalKeyChars + '\[\]'
+_CookiePattern = re.compile(r"""
+    (?x)                           # This is a verbose pattern
+    \s*                            # Optional whitespace at start of cookie
+    (?P<key>                       # Start of group 'key'
+    [""" + _LegalKeyChars + r"""]+?   # Any word of at least one letter
+    )                              # End of group 'key'
+    (                              # Optional group: there may not be a value.
+    \s*=\s*                          # Equal Sign
+    (?P<val>                         # Start of group 'val'
+    "(?:[^\\"]|\\.)*"                  # Any doublequoted string
+    |                                  # or
+    \w{3},\s[\w\d\s-]{9,11}\s[\d:]{8}\sGMT  # Special case for "expires" attr
+    |                                  # or
+    [""" + _LegalValueChars + r"""]*      # Any word or empty string
+    )                                # End of group 'val'
+    )?                             # End of optional value group
+    \s*                            # Any number of spaces.
+    (\s+|;|$)                      # Ending either at space, semicolon, or EOS.
+    """, re.ASCII)                 # May be removed if safe.
+
+
+# At long last, here is the cookie class.  Using this class is almost just like
+# using a dictionary.  See this module's docstring for example usage.
+#
+class BaseCookie(dict):
+    """A container class for a set of Morsels."""
+
+    def value_decode(self, val):
+        """real_value, coded_value = value_decode(STRING)
+        Called prior to setting a cookie's value from the network
+        representation.  The VALUE is the value read from HTTP
+        header.
+        Override this function to modify the behavior of cookies.
+        """
+        return val, val
+
+    def value_encode(self, val):
+        """real_value, coded_value = value_encode(VALUE)
+        Called prior to setting a cookie's value from the dictionary
+        representation.  The VALUE is the value being assigned.
+        Override this function to modify the behavior of cookies.
+        """
+        strval = str(val)
+        return strval, strval
+
+    def __init__(self, input=None):
+        if input:
+            self.load(input)
+
+    def __set(self, key, real_value, coded_value):
+        """Private method for setting a cookie's value"""
+        M = self.get(key, Morsel())
+        M.set(key, real_value, coded_value)
+        dict.__setitem__(self, key, M)
+
+    def __setitem__(self, key, value):
+        """Dictionary style assignment."""
+        if isinstance(value, Morsel):
+            # allow assignment of constructed Morsels (e.g. for pickling)
+            dict.__setitem__(self, key, value)
+        else:
+            rval, cval = self.value_encode(value)
+            self.__set(key, rval, cval)
+
+    def output(self, attrs=None, header="Set-Cookie:", sep="\015\012"):
+        """Return a string suitable for HTTP."""
+        result = []
+        items = sorted(self.items())
+        for key, value in items:
+            result.append(value.output(attrs, header))
+        return sep.join(result)
+
+    __str__ = output
+
+    def __repr__(self):
+        l = []
+        items = sorted(self.items())
+        for key, value in items:
+            l.append('%s=%s' % (key, repr(value.value)))
+        return '<%s: %s>' % (self.__class__.__name__, _spacejoin(l))
+
+    def js_output(self, attrs=None):
+        """Return a string suitable for JavaScript."""
+        result = []
+        items = sorted(self.items())
+        for key, value in items:
+            result.append(value.js_output(attrs))
+        return _nulljoin(result)
+
+    def load(self, rawdata):
+        """Load cookies from a string (presumably HTTP_COOKIE) or
+        from a dictionary.  Loading cookies from a dictionary 'd'
+        is equivalent to calling:
+            map(Cookie.__setitem__, d.keys(), d.values())
+        """
+        if isinstance(rawdata, str):
+            self.__parse_string(rawdata)
+        else:
+            # self.update() wouldn't call our custom __setitem__
+            for key, value in rawdata.items():
+                self[key] = value
+        return
+
+    def __parse_string(self, str, patt=_CookiePattern):
+        i = 0                 # Our starting point
+        n = len(str)          # Length of string
+        parsed_items = []     # Parsed (type, key, value) triples
+        morsel_seen = False   # A key=value pair was previously encountered
+
+        TYPE_ATTRIBUTE = 1
+        TYPE_KEYVALUE = 2
+
+        # We first parse the whole cookie string and reject it if it's
+        # syntactically invalid (this helps avoid some classes of injection
+        # attacks).
+        while 0 <= i < n:
+            # Start looking for a cookie
+            match = patt.match(str, i)
+            if not match:
+                # No more cookies
+                break
+
+            key, value = match.group("key"), match.group("val")
+            i = match.end(0)
+
+            if key[0] == "$":
+                if not morsel_seen:
+                    # We ignore attributes which pertain to the cookie
+                    # mechanism as a whole, such as "$Version".
+                    # See RFC 2965. (Does anyone care?)
+                    continue
+                parsed_items.append((TYPE_ATTRIBUTE, key[1:], value))
+            elif key.lower() in Morsel._reserved:
+                if not morsel_seen:
+                    # Invalid cookie string
+                    return
+                if value is None:
+                    if key.lower() in Morsel._flags:
+                        parsed_items.append((TYPE_ATTRIBUTE, key, True))
+                    else:
+                        # Invalid cookie string
+                        return
+                else:
+                    parsed_items.append((TYPE_ATTRIBUTE, key, _unquote(value)))
+            elif value is not None:
+                parsed_items.append((TYPE_KEYVALUE, key, self.value_decode(value)))
+                morsel_seen = True
+            else:
+                # Invalid cookie string
+                return
+
+        # The cookie string is valid, apply it.
+        M = None         # current morsel
+        for tp, key, value in parsed_items:
+            if tp == TYPE_ATTRIBUTE:
+                assert M is not None
+                M[key] = value
+            else:
+                assert tp == TYPE_KEYVALUE
+                rval, cval = value
+                self.__set(key, rval, cval)
+                M = self[key]
+
+
+class SimpleCookie(BaseCookie):
+    """
+    SimpleCookie supports strings as cookie values.  When setting
+    the value using the dictionary assignment notation, SimpleCookie
+    calls the builtin str() to convert the value to a string.  Values
+    received from HTTP are kept as strings.
+    """
+    def value_decode(self, val):
+        return _unquote(val), val
+
+    def value_encode(self, val):
+        strval = str(val)
+        return strval, _quote(strval)

+ 1266 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/green/http/server.py

@@ -0,0 +1,1266 @@
+# This is part of Python source code with Eventlet-specific modifications.
+#
+# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+# 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights
+# Reserved
+#
+# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
+# --------------------------------------------
+#
+# 1. This LICENSE AGREEMENT is between the Python Software Foundation
+# ("PSF"), and the Individual or Organization ("Licensee") accessing and
+# otherwise using this software ("Python") in source or binary form and
+# its associated documentation.
+#
+# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
+# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
+# analyze, test, perform and/or display publicly, prepare derivative works,
+# distribute, and otherwise use Python alone or in any derivative version,
+# provided, however, that PSF's License Agreement and PSF's notice of copyright,
+# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+# 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights
+# Reserved" are retained in Python alone or in any derivative version prepared by
+# Licensee.
+#
+# 3. In the event Licensee prepares a derivative work that is based on
+# or incorporates Python or any part thereof, and wants to make
+# the derivative work available to others as provided herein, then
+# Licensee hereby agrees to include in any such work a brief summary of
+# the changes made to Python.
+#
+# 4. PSF is making Python available to Licensee on an "AS IS"
+# basis.  PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+# IMPLIED.  BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
+# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
+# INFRINGE ANY THIRD PARTY RIGHTS.
+#
+# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
+# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+#
+# 6. This License Agreement will automatically terminate upon a material
+# breach of its terms and conditions.
+#
+# 7. Nothing in this License Agreement shall be deemed to create any
+# relationship of agency, partnership, or joint venture between PSF and
+# Licensee.  This License Agreement does not grant permission to use PSF
+# trademarks or trade name in a trademark sense to endorse or promote
+# products or services of Licensee, or any third party.
+#
+# 8. By copying, installing or otherwise using Python, Licensee
+# agrees to be bound by the terms and conditions of this License
+# Agreement.
+"""HTTP server classes.
+
+Note: BaseHTTPRequestHandler doesn't implement any HTTP request; see
+SimpleHTTPRequestHandler for simple implementations of GET, HEAD and POST,
+and CGIHTTPRequestHandler for CGI scripts.
+
+It does, however, optionally implement HTTP/1.1 persistent connections,
+as of version 0.3.
+
+Notes on CGIHTTPRequestHandler
+------------------------------
+
+This class implements GET and POST requests to cgi-bin scripts.
+
+If the os.fork() function is not present (e.g. on Windows),
+subprocess.Popen() is used as a fallback, with slightly altered semantics.
+
+In all cases, the implementation is intentionally naive -- all
+requests are executed synchronously.
+
+SECURITY WARNING: DON'T USE THIS CODE UNLESS YOU ARE INSIDE A FIREWALL
+-- it may execute arbitrary Python code or external programs.
+
+Note that status code 200 is sent prior to execution of a CGI script, so
+scripts cannot send other status codes such as 302 (redirect).
+
+XXX To do:
+
+- log requests even later (to capture byte count)
+- log user-agent header and other interesting goodies
+- send error log to separate file
+"""
+
+
+# See also:
+#
+# HTTP Working Group                                        T. Berners-Lee
+# INTERNET-DRAFT                                            R. T. Fielding
+# <draft-ietf-http-v10-spec-00.txt>                     H. Frystyk Nielsen
+# Expires September 8, 1995                                  March 8, 1995
+#
+# URL: http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt
+#
+# and
+#
+# Network Working Group                                      R. Fielding
+# Request for Comments: 2616                                       et al
+# Obsoletes: 2068                                              June 1999
+# Category: Standards Track
+#
+# URL: http://www.faqs.org/rfcs/rfc2616.html
+
+# Log files
+# ---------
+#
+# Here's a quote from the NCSA httpd docs about log file format.
+#
+# | The logfile format is as follows. Each line consists of:
+# |
+# | host rfc931 authuser [DD/Mon/YYYY:hh:mm:ss] "request" ddd bbbb
+# |
+# |        host: Either the DNS name or the IP number of the remote client
+# |        rfc931: Any information returned by identd for this person,
+# |                - otherwise.
+# |        authuser: If user sent a userid for authentication, the user name,
+# |                  - otherwise.
+# |        DD: Day
+# |        Mon: Month (calendar name)
+# |        YYYY: Year
+# |        hh: hour (24-hour format, the machine's timezone)
+# |        mm: minutes
+# |        ss: seconds
+# |        request: The first line of the HTTP request as sent by the client.
+# |        ddd: the status code returned by the server, - if not available.
+# |        bbbb: the total number of bytes sent,
+# |              *not including the HTTP/1.0 header*, - if not available
+# |
+# | You can determine the name of the file accessed through request.
+#
+# (Actually, the latter is only true if you know the server configuration
+# at the time the request was made!)
+
+__version__ = "0.6"
+
+__all__ = [
+    "HTTPServer", "BaseHTTPRequestHandler",
+    "SimpleHTTPRequestHandler", "CGIHTTPRequestHandler",
+]
+
+import email.utils
+import html
+import io
+import mimetypes
+import posixpath
+import shutil
+import sys
+import urllib.parse
+import copy
+import argparse
+
+from eventlet.green import (
+    os,
+    time,
+    select,
+    socket,
+    SocketServer as socketserver,
+    subprocess,
+)
+from eventlet.green.http import client as http_client, HTTPStatus
+
+
+# Default error message template
+DEFAULT_ERROR_MESSAGE = """\
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
+        "http://www.w3.org/TR/html4/strict.dtd">
+<html>
+    <head>
+        <meta http-equiv="Content-Type" content="text/html;charset=utf-8">
+        <title>Error response</title>
+    </head>
+    <body>
+        <h1>Error response</h1>
+        <p>Error code: %(code)d</p>
+        <p>Message: %(message)s.</p>
+        <p>Error code explanation: %(code)s - %(explain)s.</p>
+    </body>
+</html>
+"""
+
+DEFAULT_ERROR_CONTENT_TYPE = "text/html;charset=utf-8"
+
+class HTTPServer(socketserver.TCPServer):
+
+    allow_reuse_address = 1    # Seems to make sense in testing environment
+
+    def server_bind(self):
+        """Override server_bind to store the server name."""
+        socketserver.TCPServer.server_bind(self)
+        host, port = self.server_address[:2]
+        self.server_name = socket.getfqdn(host)
+        self.server_port = port
+
+
+class BaseHTTPRequestHandler(socketserver.StreamRequestHandler):
+
+    """HTTP request handler base class.
+
+    The following explanation of HTTP serves to guide you through the
+    code as well as to expose any misunderstandings I may have about
+    HTTP (so you don't need to read the code to figure out I'm wrong
+    :-).
+
+    HTTP (HyperText Transfer Protocol) is an extensible protocol on
+    top of a reliable stream transport (e.g. TCP/IP).  The protocol
+    recognizes three parts to a request:
+
+    1. One line identifying the request type and path
+    2. An optional set of RFC-822-style headers
+    3. An optional data part
+
+    The headers and data are separated by a blank line.
+
+    The first line of the request has the form
+
+    <command> <path> <version>
+
+    where <command> is a (case-sensitive) keyword such as GET or POST,
+    <path> is a string containing path information for the request,
+    and <version> should be the string "HTTP/1.0" or "HTTP/1.1".
+    <path> is encoded using the URL encoding scheme (using %xx to signify
+    the ASCII character with hex code xx).
+
+    The specification specifies that lines are separated by CRLF but
+    for compatibility with the widest range of clients recommends
+    servers also handle LF.  Similarly, whitespace in the request line
+    is treated sensibly (allowing multiple spaces between components
+    and allowing trailing whitespace).
+
+    Similarly, for output, lines ought to be separated by CRLF pairs
+    but most clients grok LF characters just fine.
+
+    If the first line of the request has the form
+
+    <command> <path>
+
+    (i.e. <version> is left out) then this is assumed to be an HTTP
+    0.9 request; this form has no optional headers and data part and
+    the reply consists of just the data.
+
+    The reply form of the HTTP 1.x protocol again has three parts:
+
+    1. One line giving the response code
+    2. An optional set of RFC-822-style headers
+    3. The data
+
+    Again, the headers and data are separated by a blank line.
+
+    The response code line has the form
+
+    <version> <responsecode> <responsestring>
+
+    where <version> is the protocol version ("HTTP/1.0" or "HTTP/1.1"),
+    <responsecode> is a 3-digit response code indicating success or
+    failure of the request, and <responsestring> is an optional
+    human-readable string explaining what the response code means.
+
+    This server parses the request and the headers, and then calls a
+    function specific to the request type (<command>).  Specifically,
+    a request SPAM will be handled by a method do_SPAM().  If no
+    such method exists the server sends an error response to the
+    client.  If it exists, it is called with no arguments:
+
+    do_SPAM()
+
+    Note that the request name is case sensitive (i.e. SPAM and spam
+    are different requests).
+
+    The various request details are stored in instance variables:
+
+    - client_address is the client IP address in the form (host,
+    port);
+
+    - command, path and version are the broken-down request line;
+
+    - headers is an instance of email.message.Message (or a derived
+    class) containing the header information;
+
+    - rfile is a file object open for reading positioned at the
+    start of the optional input data part;
+
+    - wfile is a file object open for writing.
+
+    IT IS IMPORTANT TO ADHERE TO THE PROTOCOL FOR WRITING!
+
+    The first thing to be written must be the response line.  Then
+    follow 0 or more header lines, then a blank line, and then the
+    actual data (if any).  The meaning of the header lines depends on
+    the command executed by the server; in most cases, when data is
+    returned, there should be at least one header line of the form
+
+    Content-type: <type>/<subtype>
+
+    where <type> and <subtype> should be registered MIME types,
+    e.g. "text/html" or "text/plain".
+
+    """
+
+    # The Python system version, truncated to its first component.
+    sys_version = "Python/" + sys.version.split()[0]
+
+    # The server software version.  You may want to override this.
+    # The format is multiple whitespace-separated strings,
+    # where each string is of the form name[/version].
+    server_version = "BaseHTTP/" + __version__
+
+    error_message_format = DEFAULT_ERROR_MESSAGE
+    error_content_type = DEFAULT_ERROR_CONTENT_TYPE
+
+    # The default request version.  This only affects responses up until
+    # the point where the request line is parsed, so it mainly decides what
+    # the client gets back when sending a malformed request line.
+    # Most web servers default to HTTP 0.9, i.e. don't send a status line.
+    default_request_version = "HTTP/0.9"
+
+    def parse_request(self):
+        """Parse a request (internal).
+
+        The request should be stored in self.raw_requestline; the results
+        are in self.command, self.path, self.request_version and
+        self.headers.
+
+        Return True for success, False for failure; on failure, an
+        error is sent back.
+
+        """
+        self.command = None  # set in case of error on the first line
+        self.request_version = version = self.default_request_version
+        self.close_connection = True
+        requestline = str(self.raw_requestline, 'iso-8859-1')
+        requestline = requestline.rstrip('\r\n')
+        self.requestline = requestline
+        words = requestline.split()
+        if len(words) == 3:
+            command, path, version = words
+            try:
+                if version[:5] != 'HTTP/':
+                    raise ValueError
+                base_version_number = version.split('/', 1)[1]
+                version_number = base_version_number.split(".")
+                # RFC 2145 section 3.1 says there can be only one "." and
+                #   - major and minor numbers MUST be treated as
+                #      separate integers;
+                #   - HTTP/2.4 is a lower version than HTTP/2.13, which in
+                #      turn is lower than HTTP/12.3;
+                #   - Leading zeros MUST be ignored by recipients.
+                if len(version_number) != 2:
+                    raise ValueError
+                version_number = int(version_number[0]), int(version_number[1])
+            except (ValueError, IndexError):
+                self.send_error(
+                    HTTPStatus.BAD_REQUEST,
+                    "Bad request version (%r)" % version)
+                return False
+            if version_number >= (1, 1) and self.protocol_version >= "HTTP/1.1":
+                self.close_connection = False
+            if version_number >= (2, 0):
+                self.send_error(
+                    HTTPStatus.HTTP_VERSION_NOT_SUPPORTED,
+                    "Invalid HTTP version (%s)" % base_version_number)
+                return False
+        elif len(words) == 2:
+            command, path = words
+            self.close_connection = True
+            if command != 'GET':
+                self.send_error(
+                    HTTPStatus.BAD_REQUEST,
+                    "Bad HTTP/0.9 request type (%r)" % command)
+                return False
+        elif not words:
+            return False
+        else:
+            self.send_error(
+                HTTPStatus.BAD_REQUEST,
+                "Bad request syntax (%r)" % requestline)
+            return False
+        self.command, self.path, self.request_version = command, path, version
+
+        # Examine the headers and look for a Connection directive.
+        try:
+            self.headers = http_client.parse_headers(self.rfile,
+                                                     _class=self.MessageClass)
+        except http_client.LineTooLong as err:
+            self.send_error(
+                HTTPStatus.REQUEST_HEADER_FIELDS_TOO_LARGE,
+                "Line too long",
+                str(err))
+            return False
+        except http_client.HTTPException as err:
+            self.send_error(
+                HTTPStatus.REQUEST_HEADER_FIELDS_TOO_LARGE,
+                "Too many headers",
+                str(err)
+            )
+            return False
+
+        conntype = self.headers.get('Connection', "")
+        if conntype.lower() == 'close':
+            self.close_connection = True
+        elif (conntype.lower() == 'keep-alive' and
+              self.protocol_version >= "HTTP/1.1"):
+            self.close_connection = False
+        # Examine the headers and look for an Expect directive
+        expect = self.headers.get('Expect', "")
+        if (expect.lower() == "100-continue" and
+                self.protocol_version >= "HTTP/1.1" and
+                self.request_version >= "HTTP/1.1"):
+            if not self.handle_expect_100():
+                return False
+        return True
+
+    def handle_expect_100(self):
+        """Decide what to do with an "Expect: 100-continue" header.
+
+        If the client is expecting a 100 Continue response, we must
+        respond with either a 100 Continue or a final response before
+        waiting for the request body. The default is to always respond
+        with a 100 Continue. You can behave differently (for example,
+        reject unauthorized requests) by overriding this method.
+
+        This method should either return True (possibly after sending
+        a 100 Continue response) or send an error response and return
+        False.
+
+        """
+        self.send_response_only(HTTPStatus.CONTINUE)
+        self.end_headers()
+        return True
+
+    def handle_one_request(self):
+        """Handle a single HTTP request.
+
+        You normally don't need to override this method; see the class
+        __doc__ string for information on how to handle specific HTTP
+        commands such as GET and POST.
+
+        """
+        try:
+            self.raw_requestline = self.rfile.readline(65537)
+            if len(self.raw_requestline) > 65536:
+                self.requestline = ''
+                self.request_version = ''
+                self.command = ''
+                self.send_error(HTTPStatus.REQUEST_URI_TOO_LONG)
+                return
+            if not self.raw_requestline:
+                self.close_connection = True
+                return
+            if not self.parse_request():
+                # An error code has been sent, just exit
+                return
+            mname = 'do_' + self.command
+            if not hasattr(self, mname):
+                self.send_error(
+                    HTTPStatus.NOT_IMPLEMENTED,
+                    "Unsupported method (%r)" % self.command)
+                return
+            method = getattr(self, mname)
+            method()
+            self.wfile.flush() #actually send the response if not already done.
+        except socket.timeout as e:
+            #a read or a write timed out.  Discard this connection
+            self.log_error("Request timed out: %r", e)
+            self.close_connection = True
+            return
+
+    def handle(self):
+        """Handle multiple requests if necessary."""
+        self.close_connection = True
+
+        self.handle_one_request()
+        while not self.close_connection:
+            self.handle_one_request()
+
+    def send_error(self, code, message=None, explain=None):
+        """Send and log an error reply.
+
+        Arguments are
+        * code:    an HTTP error code
+                   3 digits
+        * message: a simple optional 1 line reason phrase.
+                   *( HTAB / SP / VCHAR / %x80-FF )
+                   defaults to short entry matching the response code
+        * explain: a detailed message defaults to the long entry
+                   matching the response code.
+
+        This sends an error response (so it must be called before any
+        output has been generated), logs the error, and finally sends
+        a piece of HTML explaining the error to the user.
+
+        """
+
+        try:
+            shortmsg, longmsg = self.responses[code]
+        except KeyError:
+            shortmsg, longmsg = '???', '???'
+        if message is None:
+            message = shortmsg
+        if explain is None:
+            explain = longmsg
+        self.log_error("code %d, message %s", code, message)
+        self.send_response(code, message)
+        self.send_header('Connection', 'close')
+
+        # Message body is omitted for cases described in:
+        #  - RFC7230: 3.3. 1xx, 204(No Content), 304(Not Modified)
+        #  - RFC7231: 6.3.6. 205(Reset Content)
+        body = None
+        if (code >= 200 and
+            code not in (HTTPStatus.NO_CONTENT,
+                         HTTPStatus.RESET_CONTENT,
+                         HTTPStatus.NOT_MODIFIED)):
+            # HTML encode to prevent Cross Site Scripting attacks
+            # (see bug #1100201)
+            content = (self.error_message_format % {
+                'code': code,
+                'message': html.escape(message, quote=False),
+                'explain': html.escape(explain, quote=False)
+            })
+            body = content.encode('UTF-8', 'replace')
+            self.send_header("Content-Type", self.error_content_type)
+            self.send_header('Content-Length', int(len(body)))
+        self.end_headers()
+
+        if self.command != 'HEAD' and body:
+            self.wfile.write(body)
+
+    def send_response(self, code, message=None):
+        """Add the response header to the headers buffer and log the
+        response code.
+
+        Also send two standard headers with the server software
+        version and the current date.
+
+        """
+        self.log_request(code)
+        self.send_response_only(code, message)
+        self.send_header('Server', self.version_string())
+        self.send_header('Date', self.date_time_string())
+
+    def send_response_only(self, code, message=None):
+        """Send the response header only."""
+        if self.request_version != 'HTTP/0.9':
+            if message is None:
+                if code in self.responses:
+                    message = self.responses[code][0]
+                else:
+                    message = ''
+            if not hasattr(self, '_headers_buffer'):
+                self._headers_buffer = []
+            self._headers_buffer.append(("%s %d %s\r\n" %
+                    (self.protocol_version, code, message)).encode(
+                        'latin-1', 'strict'))
+
+    def send_header(self, keyword, value):
+        """Send a MIME header to the headers buffer."""
+        if self.request_version != 'HTTP/0.9':
+            if not hasattr(self, '_headers_buffer'):
+                self._headers_buffer = []
+            self._headers_buffer.append(
+                ("%s: %s\r\n" % (keyword, value)).encode('latin-1', 'strict'))
+
+        if keyword.lower() == 'connection':
+            if value.lower() == 'close':
+                self.close_connection = True
+            elif value.lower() == 'keep-alive':
+                self.close_connection = False
+
+    def end_headers(self):
+        """Send the blank line ending the MIME headers."""
+        if self.request_version != 'HTTP/0.9':
+            self._headers_buffer.append(b"\r\n")
+            self.flush_headers()
+
+    def flush_headers(self):
+        if hasattr(self, '_headers_buffer'):
+            self.wfile.write(b"".join(self._headers_buffer))
+            self._headers_buffer = []
+
+    def log_request(self, code='-', size='-'):
+        """Log an accepted request.
+
+        This is called by send_response().
+
+        """
+        if isinstance(code, HTTPStatus):
+            code = code.value
+        self.log_message('"%s" %s %s',
+                         self.requestline, str(code), str(size))
+
+    def log_error(self, format, *args):
+        """Log an error.
+
+        This is called when a request cannot be fulfilled.  By
+        default it passes the message on to log_message().
+
+        Arguments are the same as for log_message().
+
+        XXX This should go to the separate error log.
+
+        """
+
+        self.log_message(format, *args)
+
+    def log_message(self, format, *args):
+        """Log an arbitrary message.
+
+        This is used by all other logging functions.  Override
+        it if you have specific logging wishes.
+
+        The first argument, FORMAT, is a format string for the
+        message to be logged.  If the format string contains
+        any % escapes requiring parameters, they should be
+        specified as subsequent arguments (it's just like
+        printf!).
+
+        The client ip and current date/time are prefixed to
+        every message.
+
+        """
+
+        sys.stderr.write("%s - - [%s] %s\n" %
+                         (self.address_string(),
+                          self.log_date_time_string(),
+                          format%args))
+
+    def version_string(self):
+        """Return the server software version string."""
+        return self.server_version + ' ' + self.sys_version
+
+    def date_time_string(self, timestamp=None):
+        """Return the current date and time formatted for a message header."""
+        if timestamp is None:
+            timestamp = time.time()
+        return email.utils.formatdate(timestamp, usegmt=True)
+
+    def log_date_time_string(self):
+        """Return the current time formatted for logging."""
+        now = time.time()
+        year, month, day, hh, mm, ss, x, y, z = time.localtime(now)
+        s = "%02d/%3s/%04d %02d:%02d:%02d" % (
+                day, self.monthname[month], year, hh, mm, ss)
+        return s
+
+    weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
+
+    monthname = [None,
+                 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
+                 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
+
+    def address_string(self):
+        """Return the client address."""
+
+        return self.client_address[0]
+
+    # Essentially static class variables
+
+    # The version of the HTTP protocol we support.
+    # Set this to HTTP/1.1 to enable automatic keepalive
+    protocol_version = "HTTP/1.0"
+
+    # MessageClass used to parse headers
+    MessageClass = http_client.HTTPMessage
+
+    # hack to maintain backwards compatibility
+    responses = {
+        v: (v.phrase, v.description)
+        for v in HTTPStatus.__members__.values()
+    }
+
+
+class SimpleHTTPRequestHandler(BaseHTTPRequestHandler):
+
+    """Simple HTTP request handler with GET and HEAD commands.
+
+    This serves files from the current directory and any of its
+    subdirectories.  The MIME type for files is determined by
+    calling the .guess_type() method.
+
+    The GET and HEAD requests are identical except that the HEAD
+    request omits the actual contents of the file.
+
+    """
+
+    server_version = "SimpleHTTP/" + __version__
+
+    def do_GET(self):
+        """Serve a GET request."""
+        f = self.send_head()
+        if f:
+            try:
+                self.copyfile(f, self.wfile)
+            finally:
+                f.close()
+
+    def do_HEAD(self):
+        """Serve a HEAD request."""
+        f = self.send_head()
+        if f:
+            f.close()
+
+    def send_head(self):
+        """Common code for GET and HEAD commands.
+
+        This sends the response code and MIME headers.
+
+        Return value is either a file object (which has to be copied
+        to the outputfile by the caller unless the command was HEAD,
+        and must be closed by the caller under all circumstances), or
+        None, in which case the caller has nothing further to do.
+
+        """
+        path = self.translate_path(self.path)
+        f = None
+        if os.path.isdir(path):
+            parts = urllib.parse.urlsplit(self.path)
+            if not parts.path.endswith('/'):
+                # redirect browser - doing basically what apache does
+                self.send_response(HTTPStatus.MOVED_PERMANENTLY)
+                new_parts = (parts[0], parts[1], parts[2] + '/',
+                             parts[3], parts[4])
+                new_url = urllib.parse.urlunsplit(new_parts)
+                self.send_header("Location", new_url)
+                self.end_headers()
+                return None
+            for index in "index.html", "index.htm":
+                index = os.path.join(path, index)
+                if os.path.exists(index):
+                    path = index
+                    break
+            else:
+                return self.list_directory(path)
+        ctype = self.guess_type(path)
+        try:
+            f = open(path, 'rb')
+        except OSError:
+            self.send_error(HTTPStatus.NOT_FOUND, "File not found")
+            return None
+        try:
+            self.send_response(HTTPStatus.OK)
+            self.send_header("Content-type", ctype)
+            fs = os.fstat(f.fileno())
+            self.send_header("Content-Length", str(fs[6]))
+            self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
+            self.end_headers()
+            return f
+        except:
+            f.close()
+            raise
+
+    def list_directory(self, path):
+        """Helper to produce a directory listing (absent index.html).
+
+        Return value is either a file object, or None (indicating an
+        error).  In either case, the headers are sent, making the
+        interface the same as for send_head().
+
+        """
+        try:
+            list = os.listdir(path)
+        except OSError:
+            self.send_error(
+                HTTPStatus.NOT_FOUND,
+                "No permission to list directory")
+            return None
+        list.sort(key=lambda a: a.lower())
+        r = []
+        try:
+            displaypath = urllib.parse.unquote(self.path,
+                                               errors='surrogatepass')
+        except UnicodeDecodeError:
+            displaypath = urllib.parse.unquote(path)
+        displaypath = html.escape(displaypath, quote=False)
+        enc = sys.getfilesystemencoding()
+        title = 'Directory listing for %s' % displaypath
+        r.append('<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" '
+                 '"http://www.w3.org/TR/html4/strict.dtd">')
+        r.append('<html>\n<head>')
+        r.append('<meta http-equiv="Content-Type" '
+                 'content="text/html; charset=%s">' % enc)
+        r.append('<title>%s</title>\n</head>' % title)
+        r.append('<body>\n<h1>%s</h1>' % title)
+        r.append('<hr>\n<ul>')
+        for name in list:
+            fullname = os.path.join(path, name)
+            displayname = linkname = name
+            # Append / for directories or @ for symbolic links
+            if os.path.isdir(fullname):
+                displayname = name + "/"
+                linkname = name + "/"
+            if os.path.islink(fullname):
+                displayname = name + "@"
+                # Note: a link to a directory displays with @ and links with /
+            r.append('<li><a href="%s">%s</a></li>'
+                    % (urllib.parse.quote(linkname,
+                                          errors='surrogatepass'),
+                       html.escape(displayname, quote=False)))
+        r.append('</ul>\n<hr>\n</body>\n</html>\n')
+        encoded = '\n'.join(r).encode(enc, 'surrogateescape')
+        f = io.BytesIO()
+        f.write(encoded)
+        f.seek(0)
+        self.send_response(HTTPStatus.OK)
+        self.send_header("Content-type", "text/html; charset=%s" % enc)
+        self.send_header("Content-Length", str(len(encoded)))
+        self.end_headers()
+        return f
+
+    def translate_path(self, path):
+        """Translate a /-separated PATH to the local filename syntax.
+
+        Components that mean special things to the local file system
+        (e.g. drive or directory names) are ignored.  (XXX They should
+        probably be diagnosed.)
+
+        """
+        # abandon query parameters
+        path = path.split('?',1)[0]
+        path = path.split('#',1)[0]
+        # Don't forget explicit trailing slash when normalizing. Issue17324
+        trailing_slash = path.rstrip().endswith('/')
+        try:
+            path = urllib.parse.unquote(path, errors='surrogatepass')
+        except UnicodeDecodeError:
+            path = urllib.parse.unquote(path)
+        path = posixpath.normpath(path)
+        words = path.split('/')
+        words = filter(None, words)
+        path = os.getcwd()
+        for word in words:
+            if os.path.dirname(word) or word in (os.curdir, os.pardir):
+                # Ignore components that are not a simple file/directory name
+                continue
+            path = os.path.join(path, word)
+        if trailing_slash:
+            path += '/'
+        return path
+
+    def copyfile(self, source, outputfile):
+        """Copy all data between two file objects.
+
+        The SOURCE argument is a file object open for reading
+        (or anything with a read() method) and the DESTINATION
+        argument is a file object open for writing (or
+        anything with a write() method).
+
+        The only reason for overriding this would be to change
+        the block size or perhaps to replace newlines by CRLF
+        -- note however that this the default server uses this
+        to copy binary data as well.
+
+        """
+        shutil.copyfileobj(source, outputfile)
+
+    def guess_type(self, path):
+        """Guess the type of a file.
+
+        Argument is a PATH (a filename).
+
+        Return value is a string of the form type/subtype,
+        usable for a MIME Content-type header.
+
+        The default implementation looks the file's extension
+        up in the table self.extensions_map, using application/octet-stream
+        as a default; however it would be permissible (if
+        slow) to look inside the data to make a better guess.
+
+        """
+
+        base, ext = posixpath.splitext(path)
+        if ext in self.extensions_map:
+            return self.extensions_map[ext]
+        ext = ext.lower()
+        if ext in self.extensions_map:
+            return self.extensions_map[ext]
+        else:
+            return self.extensions_map['']
+
+    if not mimetypes.inited:
+        mimetypes.init() # try to read system mime.types
+    extensions_map = mimetypes.types_map.copy()
+    extensions_map.update({
+        '': 'application/octet-stream', # Default
+        '.py': 'text/plain',
+        '.c': 'text/plain',
+        '.h': 'text/plain',
+        })
+
+
+# Utilities for CGIHTTPRequestHandler
+
+def _url_collapse_path(path):
+    """
+    Given a URL path, remove extra '/'s and '.' path elements and collapse
+    any '..' references and returns a collapsed path.
+
+    Implements something akin to RFC-2396 5.2 step 6 to parse relative paths.
+    The utility of this function is limited to is_cgi method and helps
+    preventing some security attacks.
+
+    Returns: The reconstituted URL, which will always start with a '/'.
+
+    Raises: IndexError if too many '..' occur within the path.
+
+    """
+    # Query component should not be involved.
+    path, _, query = path.partition('?')
+    path = urllib.parse.unquote(path)
+
+    # Similar to os.path.split(os.path.normpath(path)) but specific to URL
+    # path semantics rather than local operating system semantics.
+    path_parts = path.split('/')
+    head_parts = []
+    for part in path_parts[:-1]:
+        if part == '..':
+            head_parts.pop() # IndexError if more '..' than prior parts
+        elif part and part != '.':
+            head_parts.append( part )
+    if path_parts:
+        tail_part = path_parts.pop()
+        if tail_part:
+            if tail_part == '..':
+                head_parts.pop()
+                tail_part = ''
+            elif tail_part == '.':
+                tail_part = ''
+    else:
+        tail_part = ''
+
+    if query:
+        tail_part = '?'.join((tail_part, query))
+
+    splitpath = ('/' + '/'.join(head_parts), tail_part)
+    collapsed_path = "/".join(splitpath)
+
+    return collapsed_path
+
+
+
+nobody = None
+
+def nobody_uid():
+    """Internal routine to get nobody's uid"""
+    global nobody
+    if nobody:
+        return nobody
+    try:
+        import pwd
+    except ImportError:
+        return -1
+    try:
+        nobody = pwd.getpwnam('nobody')[2]
+    except KeyError:
+        nobody = 1 + max(x[2] for x in pwd.getpwall())
+    return nobody
+
+
+def executable(path):
+    """Test for executable file."""
+    return os.access(path, os.X_OK)
+
+
+class CGIHTTPRequestHandler(SimpleHTTPRequestHandler):
+
+    """Complete HTTP server with GET, HEAD and POST commands.
+
+    GET and HEAD also support running CGI scripts.
+
+    The POST command is *only* implemented for CGI scripts.
+
+    """
+
+    # Determine platform specifics
+    have_fork = hasattr(os, 'fork')
+
+    # Make rfile unbuffered -- we need to read one line and then pass
+    # the rest to a subprocess, so we can't use buffered input.
+    rbufsize = 0
+
+    def do_POST(self):
+        """Serve a POST request.
+
+        This is only implemented for CGI scripts.
+
+        """
+
+        if self.is_cgi():
+            self.run_cgi()
+        else:
+            self.send_error(
+                HTTPStatus.NOT_IMPLEMENTED,
+                "Can only POST to CGI scripts")
+
+    def send_head(self):
+        """Version of send_head that support CGI scripts"""
+        if self.is_cgi():
+            return self.run_cgi()
+        else:
+            return SimpleHTTPRequestHandler.send_head(self)
+
+    def is_cgi(self):
+        """Test whether self.path corresponds to a CGI script.
+
+        Returns True and updates the cgi_info attribute to the tuple
+        (dir, rest) if self.path requires running a CGI script.
+        Returns False otherwise.
+
+        If any exception is raised, the caller should assume that
+        self.path was rejected as invalid and act accordingly.
+
+        The default implementation tests whether the normalized url
+        path begins with one of the strings in self.cgi_directories
+        (and the next character is a '/' or the end of the string).
+
+        """
+        collapsed_path = _url_collapse_path(self.path)
+        dir_sep = collapsed_path.find('/', 1)
+        head, tail = collapsed_path[:dir_sep], collapsed_path[dir_sep+1:]
+        if head in self.cgi_directories:
+            self.cgi_info = head, tail
+            return True
+        return False
+
+
+    cgi_directories = ['/cgi-bin', '/htbin']
+
+    def is_executable(self, path):
+        """Test whether argument path is an executable file."""
+        return executable(path)
+
+    def is_python(self, path):
+        """Test whether argument path is a Python script."""
+        head, tail = os.path.splitext(path)
+        return tail.lower() in (".py", ".pyw")
+
+    def run_cgi(self):
+        """Execute a CGI script."""
+        dir, rest = self.cgi_info
+        path = dir + '/' + rest
+        i = path.find('/', len(dir)+1)
+        while i >= 0:
+            nextdir = path[:i]
+            nextrest = path[i+1:]
+
+            scriptdir = self.translate_path(nextdir)
+            if os.path.isdir(scriptdir):
+                dir, rest = nextdir, nextrest
+                i = path.find('/', len(dir)+1)
+            else:
+                break
+
+        # find an explicit query string, if present.
+        rest, _, query = rest.partition('?')
+
+        # dissect the part after the directory name into a script name &
+        # a possible additional path, to be stored in PATH_INFO.
+        i = rest.find('/')
+        if i >= 0:
+            script, rest = rest[:i], rest[i:]
+        else:
+            script, rest = rest, ''
+
+        scriptname = dir + '/' + script
+        scriptfile = self.translate_path(scriptname)
+        if not os.path.exists(scriptfile):
+            self.send_error(
+                HTTPStatus.NOT_FOUND,
+                "No such CGI script (%r)" % scriptname)
+            return
+        if not os.path.isfile(scriptfile):
+            self.send_error(
+                HTTPStatus.FORBIDDEN,
+                "CGI script is not a plain file (%r)" % scriptname)
+            return
+        ispy = self.is_python(scriptname)
+        if self.have_fork or not ispy:
+            if not self.is_executable(scriptfile):
+                self.send_error(
+                    HTTPStatus.FORBIDDEN,
+                    "CGI script is not executable (%r)" % scriptname)
+                return
+
+        # Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html
+        # XXX Much of the following could be prepared ahead of time!
+        env = copy.deepcopy(os.environ)
+        env['SERVER_SOFTWARE'] = self.version_string()
+        env['SERVER_NAME'] = self.server.server_name
+        env['GATEWAY_INTERFACE'] = 'CGI/1.1'
+        env['SERVER_PROTOCOL'] = self.protocol_version
+        env['SERVER_PORT'] = str(self.server.server_port)
+        env['REQUEST_METHOD'] = self.command
+        uqrest = urllib.parse.unquote(rest)
+        env['PATH_INFO'] = uqrest
+        env['PATH_TRANSLATED'] = self.translate_path(uqrest)
+        env['SCRIPT_NAME'] = scriptname
+        if query:
+            env['QUERY_STRING'] = query
+        env['REMOTE_ADDR'] = self.client_address[0]
+        authorization = self.headers.get("authorization")
+        if authorization:
+            authorization = authorization.split()
+            if len(authorization) == 2:
+                import base64, binascii
+                env['AUTH_TYPE'] = authorization[0]
+                if authorization[0].lower() == "basic":
+                    try:
+                        authorization = authorization[1].encode('ascii')
+                        authorization = base64.decodebytes(authorization).\
+                                        decode('ascii')
+                    except (binascii.Error, UnicodeError):
+                        pass
+                    else:
+                        authorization = authorization.split(':')
+                        if len(authorization) == 2:
+                            env['REMOTE_USER'] = authorization[0]
+        # XXX REMOTE_IDENT
+        if self.headers.get('content-type') is None:
+            env['CONTENT_TYPE'] = self.headers.get_content_type()
+        else:
+            env['CONTENT_TYPE'] = self.headers['content-type']
+        length = self.headers.get('content-length')
+        if length:
+            env['CONTENT_LENGTH'] = length
+        referer = self.headers.get('referer')
+        if referer:
+            env['HTTP_REFERER'] = referer
+        accept = []
+        for line in self.headers.getallmatchingheaders('accept'):
+            if line[:1] in "\t\n\r ":
+                accept.append(line.strip())
+            else:
+                accept = accept + line[7:].split(',')
+        env['HTTP_ACCEPT'] = ','.join(accept)
+        ua = self.headers.get('user-agent')
+        if ua:
+            env['HTTP_USER_AGENT'] = ua
+        co = filter(None, self.headers.get_all('cookie', []))
+        cookie_str = ', '.join(co)
+        if cookie_str:
+            env['HTTP_COOKIE'] = cookie_str
+        # XXX Other HTTP_* headers
+        # Since we're setting the env in the parent, provide empty
+        # values to override previously set values
+        for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH',
+                  'HTTP_USER_AGENT', 'HTTP_COOKIE', 'HTTP_REFERER'):
+            env.setdefault(k, "")
+
+        self.send_response(HTTPStatus.OK, "Script output follows")
+        self.flush_headers()
+
+        decoded_query = query.replace('+', ' ')
+
+        if self.have_fork:
+            # Unix -- fork as we should
+            args = [script]
+            if '=' not in decoded_query:
+                args.append(decoded_query)
+            nobody = nobody_uid()
+            self.wfile.flush() # Always flush before forking
+            pid = os.fork()
+            if pid != 0:
+                # Parent
+                pid, sts = os.waitpid(pid, 0)
+                # throw away additional data [see bug #427345]
+                while select.select([self.rfile], [], [], 0)[0]:
+                    if not self.rfile.read(1):
+                        break
+                if sts:
+                    self.log_error("CGI script exit status %#x", sts)
+                return
+            # Child
+            try:
+                try:
+                    os.setuid(nobody)
+                except OSError:
+                    pass
+                os.dup2(self.rfile.fileno(), 0)
+                os.dup2(self.wfile.fileno(), 1)
+                os.execve(scriptfile, args, env)
+            except:
+                self.server.handle_error(self.request, self.client_address)
+                os._exit(127)
+
+        else:
+            # Non-Unix -- use subprocess
+            cmdline = [scriptfile]
+            if self.is_python(scriptfile):
+                interp = sys.executable
+                if interp.lower().endswith("w.exe"):
+                    # On Windows, use python.exe, not pythonw.exe
+                    interp = interp[:-5] + interp[-4:]
+                cmdline = [interp, '-u'] + cmdline
+            if '=' not in query:
+                cmdline.append(query)
+            self.log_message("command: %s", subprocess.list2cmdline(cmdline))
+            try:
+                nbytes = int(length)
+            except (TypeError, ValueError):
+                nbytes = 0
+            p = subprocess.Popen(cmdline,
+                                 stdin=subprocess.PIPE,
+                                 stdout=subprocess.PIPE,
+                                 stderr=subprocess.PIPE,
+                                 env = env
+                                 )
+            if self.command.lower() == "post" and nbytes > 0:
+                data = self.rfile.read(nbytes)
+            else:
+                data = None
+            # throw away additional data [see bug #427345]
+            while select.select([self.rfile._sock], [], [], 0)[0]:
+                if not self.rfile._sock.recv(1):
+                    break
+            stdout, stderr = p.communicate(data)
+            self.wfile.write(stdout)
+            if stderr:
+                self.log_error('%s', stderr)
+            p.stderr.close()
+            p.stdout.close()
+            status = p.returncode
+            if status:
+                self.log_error("CGI script exit status %#x", status)
+            else:
+                self.log_message("CGI script exited OK")
+
+
+def test(HandlerClass=BaseHTTPRequestHandler,
+         ServerClass=HTTPServer, protocol="HTTP/1.0", port=8000, bind=""):
+    """Test the HTTP request handler class.
+
+    This runs an HTTP server on port 8000 (or the port argument).
+
+    """
+    server_address = (bind, port)
+
+    HandlerClass.protocol_version = protocol
+    with ServerClass(server_address, HandlerClass) as httpd:
+        sa = httpd.socket.getsockname()
+        serve_message = "Serving HTTP on {host} port {port} (http://{host}:{port}/) ..."
+        print(serve_message.format(host=sa[0], port=sa[1]))
+        try:
+            httpd.serve_forever()
+        except KeyboardInterrupt:
+            print("\nKeyboard interrupt received, exiting.")
+            sys.exit(0)
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--cgi', action='store_true',
+                       help='Run as CGI Server')
+    parser.add_argument('--bind', '-b', default='', metavar='ADDRESS',
+                        help='Specify alternate bind address '
+                             '[default: all interfaces]')
+    parser.add_argument('port', action='store',
+                        default=8000, type=int,
+                        nargs='?',
+                        help='Specify alternate port [default: 8000]')
+    args = parser.parse_args()
+    if args.cgi:
+        handler_class = CGIHTTPRequestHandler
+    else:
+        handler_class = SimpleHTTPRequestHandler
+    test(HandlerClass=handler_class, port=args.port, bind=args.bind)

+ 22 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/green/httplib.py

@@ -0,0 +1,22 @@
+from eventlet import patcher
+from eventlet.green import socket
+from eventlet.support import six
+
+to_patch = [('socket', socket)]
+
+try:
+    from eventlet.green import ssl
+    to_patch.append(('ssl', ssl))
+except ImportError:
+    pass
+
+if six.PY2:
+    patcher.inject('httplib', globals(), *to_patch)
+if six.PY3:
+    from eventlet.green.http import client
+    for name in dir(client):
+        if name not in patcher.__exclude:
+            globals()[name] = getattr(client, name)
+
+if __name__ == '__main__':
+    test()

+ 111 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/green/os.py

@@ -0,0 +1,111 @@
+os_orig = __import__("os")
+import errno
+socket = __import__("socket")
+
+from eventlet import greenio
+from eventlet.support import get_errno
+from eventlet import greenthread
+from eventlet import hubs
+from eventlet.patcher import slurp_properties
+
+__all__ = os_orig.__all__
+__patched__ = ['fdopen', 'read', 'write', 'wait', 'waitpid', 'open']
+
+slurp_properties(
+    os_orig,
+    globals(),
+    ignore=__patched__,
+    srckeys=dir(os_orig))
+
+
+def fdopen(fd, *args, **kw):
+    """fdopen(fd [, mode='r' [, bufsize]]) -> file_object
+
+    Return an open file object connected to a file descriptor."""
+    if not isinstance(fd, int):
+        raise TypeError('fd should be int, not %r' % fd)
+    try:
+        return greenio.GreenPipe(fd, *args, **kw)
+    except IOError as e:
+        raise OSError(*e.args)
+
+__original_read__ = os_orig.read
+
+
+def read(fd, n):
+    """read(fd, buffersize) -> string
+
+    Read a file descriptor."""
+    while True:
+        try:
+            return __original_read__(fd, n)
+        except (OSError, IOError) as e:
+            if get_errno(e) != errno.EAGAIN:
+                raise
+        except socket.error as e:
+            if get_errno(e) == errno.EPIPE:
+                return ''
+            raise
+        try:
+            hubs.trampoline(fd, read=True)
+        except hubs.IOClosed:
+            return ''
+
+__original_write__ = os_orig.write
+
+
+def write(fd, st):
+    """write(fd, string) -> byteswritten
+
+    Write a string to a file descriptor.
+    """
+    while True:
+        try:
+            return __original_write__(fd, st)
+        except (OSError, IOError) as e:
+            if get_errno(e) != errno.EAGAIN:
+                raise
+        except socket.error as e:
+            if get_errno(e) != errno.EPIPE:
+                raise
+        hubs.trampoline(fd, write=True)
+
+
+def wait():
+    """wait() -> (pid, status)
+
+    Wait for completion of a child process."""
+    return waitpid(0, 0)
+
+__original_waitpid__ = os_orig.waitpid
+
+
+def waitpid(pid, options):
+    """waitpid(...)
+    waitpid(pid, options) -> (pid, status)
+
+    Wait for completion of a given child process."""
+    if options & os_orig.WNOHANG != 0:
+        return __original_waitpid__(pid, options)
+    else:
+        new_options = options | os_orig.WNOHANG
+        while True:
+            rpid, status = __original_waitpid__(pid, new_options)
+            if rpid and status >= 0:
+                return rpid, status
+            greenthread.sleep(0.01)
+
+__original_open__ = os_orig.open
+
+
+def open(file, flags, mode=0o777, dir_fd=None):
+    """ Wrap os.open
+        This behaves identically, but collaborates with
+        the hub's notify_opened protocol.
+    """
+    if dir_fd is not None:
+        fd = __original_open__(file, flags, mode, dir_fd=dir_fd)
+    else:
+        fd = __original_open__(file, flags, mode)
+    hubs.notify_opened(fd)
+    return fd

+ 257 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/green/profile.py

@@ -0,0 +1,257 @@
+# Copyright (c) 2010, CCP Games
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#     * Neither the name of CCP Games nor the
+#       names of its contributors may be used to endorse or promote products
+#       derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY CCP GAMES ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL CCP GAMES BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""This module is API-equivalent to the standard library :mod:`profile` module
+lbut it is greenthread-aware as well as thread-aware.  Use this module
+to profile Eventlet-based applications in preference to either :mod:`profile` or :mod:`cProfile`.
+FIXME: No testcases for this module.
+"""
+
+profile_orig = __import__('profile')
+__all__ = profile_orig.__all__
+
+from eventlet.patcher import slurp_properties
+slurp_properties(profile_orig, globals(), srckeys=dir(profile_orig))
+
+import sys
+import functools
+
+from eventlet import greenthread
+from eventlet import patcher
+from eventlet.support import six
+
+thread = patcher.original(six.moves._thread.__name__)  # non-monkeypatched module needed
+
+
+# This class provides the start() and stop() functions
+class Profile(profile_orig.Profile):
+    base = profile_orig.Profile
+
+    def __init__(self, timer=None, bias=None):
+        self.current_tasklet = greenthread.getcurrent()
+        self.thread_id = thread.get_ident()
+        self.base.__init__(self, timer, bias)
+        self.sleeping = {}
+
+    def __call__(self, *args):
+        """make callable, allowing an instance to be the profiler"""
+        self.dispatcher(*args)
+
+    def _setup(self):
+        self._has_setup = True
+        self.cur = None
+        self.timings = {}
+        self.current_tasklet = greenthread.getcurrent()
+        self.thread_id = thread.get_ident()
+        self.simulate_call("profiler")
+
+    def start(self, name="start"):
+        if getattr(self, "running", False):
+            return
+        self._setup()
+        self.simulate_call("start")
+        self.running = True
+        sys.setprofile(self.dispatcher)
+
+    def stop(self):
+        sys.setprofile(None)
+        self.running = False
+        self.TallyTimings()
+
+    # special cases for the original run commands, makin sure to
+    # clear the timer context.
+    def runctx(self, cmd, globals, locals):
+        if not getattr(self, "_has_setup", False):
+            self._setup()
+        try:
+            return profile_orig.Profile.runctx(self, cmd, globals, locals)
+        finally:
+            self.TallyTimings()
+
+    def runcall(self, func, *args, **kw):
+        if not getattr(self, "_has_setup", False):
+            self._setup()
+        try:
+            return profile_orig.Profile.runcall(self, func, *args, **kw)
+        finally:
+            self.TallyTimings()
+
+    def trace_dispatch_return_extend_back(self, frame, t):
+        """A hack function to override error checking in parent class.  It
+        allows invalid returns (where frames weren't preveiously entered into
+        the profiler) which can happen for all the tasklets that suddenly start
+        to get monitored. This means that the time will eventually be attributed
+        to a call high in the chain, when there is a tasklet switch
+        """
+        if isinstance(self.cur[-2], Profile.fake_frame):
+            return False
+            self.trace_dispatch_call(frame, 0)
+        return self.trace_dispatch_return(frame, t)
+
+    def trace_dispatch_c_return_extend_back(self, frame, t):
+        # same for c return
+        if isinstance(self.cur[-2], Profile.fake_frame):
+            return False  # ignore bogus returns
+            self.trace_dispatch_c_call(frame, 0)
+        return self.trace_dispatch_return(frame, t)
+
+    def SwitchTasklet(self, t0, t1, t):
+        # tally the time spent in the old tasklet
+        pt, it, et, fn, frame, rcur = self.cur
+        cur = (pt, it + t, et, fn, frame, rcur)
+
+        # we are switching to a new tasklet, store the old
+        self.sleeping[t0] = cur, self.timings
+        self.current_tasklet = t1
+
+        # find the new one
+        try:
+            self.cur, self.timings = self.sleeping.pop(t1)
+        except KeyError:
+            self.cur, self.timings = None, {}
+            self.simulate_call("profiler")
+            self.simulate_call("new_tasklet")
+
+    def TallyTimings(self):
+        oldtimings = self.sleeping
+        self.sleeping = {}
+
+        # first, unwind the main "cur"
+        self.cur = self.Unwind(self.cur, self.timings)
+
+        # we must keep the timings dicts separate for each tasklet, since it contains
+        # the 'ns' item, recursion count of each function in that tasklet.  This is
+        # used in the Unwind dude.
+        for tasklet, (cur, timings) in six.iteritems(oldtimings):
+            self.Unwind(cur, timings)
+
+            for k, v in six.iteritems(timings):
+                if k not in self.timings:
+                    self.timings[k] = v
+                else:
+                    # accumulate all to the self.timings
+                    cc, ns, tt, ct, callers = self.timings[k]
+                    # ns should be 0 after unwinding
+                    cc += v[0]
+                    tt += v[2]
+                    ct += v[3]
+                    for k1, v1 in six.iteritems(v[4]):
+                        callers[k1] = callers.get(k1, 0) + v1
+                    self.timings[k] = cc, ns, tt, ct, callers
+
+    def Unwind(self, cur, timings):
+        "A function to unwind a 'cur' frame and tally the results"
+        "see profile.trace_dispatch_return() for details"
+        # also see simulate_cmd_complete()
+        while(cur[-1]):
+            rpt, rit, ret, rfn, frame, rcur = cur
+            frame_total = rit + ret
+
+            if rfn in timings:
+                cc, ns, tt, ct, callers = timings[rfn]
+            else:
+                cc, ns, tt, ct, callers = 0, 0, 0, 0, {}
+
+            if not ns:
+                ct = ct + frame_total
+                cc = cc + 1
+
+            if rcur:
+                ppt, pit, pet, pfn, pframe, pcur = rcur
+            else:
+                pfn = None
+
+            if pfn in callers:
+                callers[pfn] = callers[pfn] + 1  # hack: gather more
+            elif pfn:
+                callers[pfn] = 1
+
+            timings[rfn] = cc, ns - 1, tt + rit, ct, callers
+
+            ppt, pit, pet, pfn, pframe, pcur = rcur
+            rcur = ppt, pit + rpt, pet + frame_total, pfn, pframe, pcur
+            cur = rcur
+        return cur
+
+
+def ContextWrap(f):
+    @functools.wraps(f)
+    def ContextWrapper(self, arg, t):
+        current = greenthread.getcurrent()
+        if current != self.current_tasklet:
+            self.SwitchTasklet(self.current_tasklet, current, t)
+            t = 0.0  # the time was billed to the previous tasklet
+        return f(self, arg, t)
+    return ContextWrapper
+
+
+# Add "return safety" to the dispatchers
+Profile.dispatch = dict(profile_orig.Profile.dispatch, **{
+    'return': Profile.trace_dispatch_return_extend_back,
+    'c_return': Profile.trace_dispatch_c_return_extend_back,
+})
+# Add automatic tasklet detection to the callbacks.
+Profile.dispatch = dict((k, ContextWrap(v)) for k, v in six.viewitems(Profile.dispatch))
+
+
+# run statements shamelessly stolen from profile.py
+def run(statement, filename=None, sort=-1):
+    """Run statement under profiler optionally saving results in filename
+
+    This function takes a single argument that can be passed to the
+    "exec" statement, and an optional file name.  In all cases this
+    routine attempts to "exec" its first argument and gather profiling
+    statistics from the execution. If no file name is present, then this
+    function automatically prints a simple profiling report, sorted by the
+    standard name string (file/line/function-name) that is presented in
+    each line.
+    """
+    prof = Profile()
+    try:
+        prof = prof.run(statement)
+    except SystemExit:
+        pass
+    if filename is not None:
+        prof.dump_stats(filename)
+    else:
+        return prof.print_stats(sort)
+
+
+def runctx(statement, globals, locals, filename=None):
+    """Run statement under profiler, supplying your own globals and locals,
+    optionally saving results in filename.
+
+    statement and filename have the same semantics as profile.run
+    """
+    prof = Profile()
+    try:
+        prof = prof.runctx(statement, globals, locals)
+    except SystemExit:
+        pass
+
+    if filename is not None:
+        prof.dump_stats(filename)
+    else:
+        return prof.print_stats()

+ 86 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/green/select.py

@@ -0,0 +1,86 @@
+import eventlet
+from eventlet.hubs import get_hub
+from eventlet.support import six
+__select = eventlet.patcher.original('select')
+error = __select.error
+
+
+__patched__ = ['select']
+__deleted__ = ['devpoll', 'poll', 'epoll', 'kqueue', 'kevent']
+
+
+def get_fileno(obj):
+    # The purpose of this function is to exactly replicate
+    # the behavior of the select module when confronted with
+    # abnormal filenos; the details are extensively tested in
+    # the stdlib test/test_select.py.
+    try:
+        f = obj.fileno
+    except AttributeError:
+        if not isinstance(obj, six.integer_types):
+            raise TypeError("Expected int or long, got %s" % type(obj))
+        return obj
+    else:
+        rv = f()
+        if not isinstance(rv, six.integer_types):
+            raise TypeError("Expected int or long, got %s" % type(rv))
+        return rv
+
+
+def select(read_list, write_list, error_list, timeout=None):
+    # error checking like this is required by the stdlib unit tests
+    if timeout is not None:
+        try:
+            timeout = float(timeout)
+        except ValueError:
+            raise TypeError("Expected number for timeout")
+    hub = get_hub()
+    timers = []
+    current = eventlet.getcurrent()
+    assert hub.greenlet is not current, 'do not call blocking functions from the mainloop'
+    ds = {}
+    for r in read_list:
+        ds[get_fileno(r)] = {'read': r}
+    for w in write_list:
+        ds.setdefault(get_fileno(w), {})['write'] = w
+    for e in error_list:
+        ds.setdefault(get_fileno(e), {})['error'] = e
+
+    listeners = []
+
+    def on_read(d):
+        original = ds[get_fileno(d)]['read']
+        current.switch(([original], [], []))
+
+    def on_write(d):
+        original = ds[get_fileno(d)]['write']
+        current.switch(([], [original], []))
+
+    def on_timeout2():
+        current.switch(([], [], []))
+
+    def on_timeout():
+        # ensure that BaseHub.run() has a chance to call self.wait()
+        # at least once before timed out.  otherwise the following code
+        # can time out erroneously.
+        #
+        # s1, s2 = socket.socketpair()
+        # print(select.select([], [s1], [], 0))
+        timers.append(hub.schedule_call_global(0, on_timeout2))
+
+    if timeout is not None:
+        timers.append(hub.schedule_call_global(timeout, on_timeout))
+    try:
+        for k, v in six.iteritems(ds):
+            if v.get('read'):
+                listeners.append(hub.add(hub.READ, k, on_read, current.throw, lambda: None))
+            if v.get('write'):
+                listeners.append(hub.add(hub.WRITE, k, on_write, current.throw, lambda: None))
+        try:
+            return hub.switch()
+        finally:
+            for l in listeners:
+                hub.remove(l)
+    finally:
+        for t in timers:
+            t.cancel()

+ 34 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/green/selectors.py

@@ -0,0 +1,34 @@
+import sys
+
+from eventlet import patcher
+from eventlet.green import select
+
+__patched__ = [
+    'DefaultSelector',
+    'SelectSelector',
+]
+
+# We only have green select so the options are:
+# * leave it be and have selectors that block
+# * try to pretend the "bad" selectors don't exist
+# * replace all with SelectSelector for the price of possibly different
+#   performance characteristic and missing fileno() method (if someone
+#   uses it it'll result in a crash, we may want to implement it in the future)
+#
+# This module used to follow the third approach but just removing the offending
+# selectors is less error prone and less confusing approach.
+__deleted__ = [
+    'PollSelector',
+    'EpollSelector',
+    'DevpollSelector',
+    'KqueueSelector',
+]
+
+patcher.inject('selectors', globals(), ('select', select))
+
+del patcher
+
+if sys.platform != 'win32':
+    SelectSelector._select = staticmethod(select.select)
+
+DefaultSelector = SelectSelector

+ 63 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/green/socket.py

@@ -0,0 +1,63 @@
+import os
+import sys
+
+__import__('eventlet.green._socket_nodns')
+__socket = sys.modules['eventlet.green._socket_nodns']
+
+__all__ = __socket.__all__
+__patched__ = __socket.__patched__ + [
+    'create_connection',
+    'getaddrinfo',
+    'gethostbyname',
+    'gethostbyname_ex',
+    'getnameinfo',
+]
+
+from eventlet.patcher import slurp_properties
+slurp_properties(__socket, globals(), srckeys=dir(__socket))
+
+
+if os.environ.get("EVENTLET_NO_GREENDNS", '').lower() != 'yes':
+    from eventlet.support import greendns
+    gethostbyname = greendns.gethostbyname
+    getaddrinfo = greendns.getaddrinfo
+    gethostbyname_ex = greendns.gethostbyname_ex
+    getnameinfo = greendns.getnameinfo
+    del greendns
+
+
+def create_connection(address,
+                      timeout=_GLOBAL_DEFAULT_TIMEOUT,
+                      source_address=None):
+    """Connect to *address* and return the socket object.
+
+    Convenience function.  Connect to *address* (a 2-tuple ``(host,
+    port)``) and return the socket object.  Passing the optional
+    *timeout* parameter will set the timeout on the socket instance
+    before attempting to connect.  If no *timeout* is supplied, the
+    global default timeout setting returned by :func:`getdefaulttimeout`
+    is used.
+    """
+
+    err = "getaddrinfo returns an empty list"
+    host, port = address
+    for res in getaddrinfo(host, port, 0, SOCK_STREAM):
+        af, socktype, proto, canonname, sa = res
+        sock = None
+        try:
+            sock = socket(af, socktype, proto)
+            if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
+                sock.settimeout(timeout)
+            if source_address:
+                sock.bind(source_address)
+            sock.connect(sa)
+            return sock
+
+        except error as e:
+            err = e
+            if sock is not None:
+                sock.close()
+
+    if not isinstance(err, error):
+        err = error(err)
+    raise err

+ 439 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/green/ssl.py

@@ -0,0 +1,439 @@
+__ssl = __import__('ssl')
+
+from eventlet.patcher import slurp_properties
+slurp_properties(__ssl, globals(), srckeys=dir(__ssl))
+
+import errno
+import functools
+import sys
+import time
+
+from eventlet import greenio
+from eventlet.greenio import (
+    set_nonblocking, GreenSocket, CONNECT_ERR, CONNECT_SUCCESS,
+)
+from eventlet.hubs import trampoline, IOClosed
+from eventlet.support import get_errno, PY33, six
+orig_socket = __import__('socket')
+socket = orig_socket.socket
+if sys.version_info >= (2, 7):
+    has_ciphers = True
+    timeout_exc = SSLError
+else:
+    has_ciphers = False
+    timeout_exc = orig_socket.timeout
+
+__patched__ = [
+    'SSLSocket', 'SSLContext', 'wrap_socket', 'sslwrap_simple',
+    'create_default_context', '_create_default_https_context']
+
+_original_sslsocket = __ssl.SSLSocket
+
+
+class GreenSSLSocket(_original_sslsocket):
+    """ This is a green version of the SSLSocket class from the ssl module added
+    in 2.6.  For documentation on it, please see the Python standard
+    documentation.
+
+    Python nonblocking ssl objects don't give errors when the other end
+    of the socket is closed (they do notice when the other end is shutdown,
+    though).  Any write/read operations will simply hang if the socket is
+    closed from the other end.  There is no obvious fix for this problem;
+    it appears to be a limitation of Python's ssl object implementation.
+    A workaround is to set a reasonable timeout on the socket using
+    settimeout(), and to close/reopen the connection when a timeout
+    occurs at an unexpected juncture in the code.
+    """
+    # we are inheriting from SSLSocket because its constructor calls
+    # do_handshake whose behavior we wish to override
+
+    def __init__(self, sock, keyfile=None, certfile=None,
+                 server_side=False, cert_reqs=CERT_NONE,
+                 ssl_version=PROTOCOL_SSLv23, ca_certs=None,
+                 do_handshake_on_connect=True, *args, **kw):
+        if not isinstance(sock, GreenSocket):
+            sock = GreenSocket(sock)
+
+        self.act_non_blocking = sock.act_non_blocking
+
+        if six.PY2:
+            # On Python 2 SSLSocket constructor queries the timeout, it'd break without
+            # this assignment
+            self._timeout = sock.gettimeout()
+
+        # nonblocking socket handshaking on connect got disabled so let's pretend it's disabled
+        # even when it's on
+        super(GreenSSLSocket, self).__init__(
+            sock.fd, keyfile, certfile, server_side, cert_reqs, ssl_version,
+            ca_certs, do_handshake_on_connect and six.PY2, *args, **kw)
+
+        # the superclass initializer trashes the methods so we remove
+        # the local-object versions of them and let the actual class
+        # methods shine through
+        # Note: This for Python 2
+        try:
+            for fn in orig_socket._delegate_methods:
+                delattr(self, fn)
+        except AttributeError:
+            pass
+
+        if six.PY3:
+            # Python 3 SSLSocket construction process overwrites the timeout so restore it
+            self._timeout = sock.gettimeout()
+
+            # it also sets timeout to None internally apparently (tested with 3.4.2)
+            _original_sslsocket.settimeout(self, 0.0)
+            assert _original_sslsocket.gettimeout(self) == 0.0
+
+            # see note above about handshaking
+            self.do_handshake_on_connect = do_handshake_on_connect
+            if do_handshake_on_connect and self._connected:
+                self.do_handshake()
+
+    def settimeout(self, timeout):
+        self._timeout = timeout
+
+    def gettimeout(self):
+        return self._timeout
+
+    def setblocking(self, flag):
+        if flag:
+            self.act_non_blocking = False
+            self._timeout = None
+        else:
+            self.act_non_blocking = True
+            self._timeout = 0.0
+
+    def _call_trampolining(self, func, *a, **kw):
+        if self.act_non_blocking:
+            return func(*a, **kw)
+        else:
+            while True:
+                try:
+                    return func(*a, **kw)
+                except SSLError as exc:
+                    if get_errno(exc) == SSL_ERROR_WANT_READ:
+                        trampoline(self,
+                                   read=True,
+                                   timeout=self.gettimeout(),
+                                   timeout_exc=timeout_exc('timed out'))
+                    elif get_errno(exc) == SSL_ERROR_WANT_WRITE:
+                        trampoline(self,
+                                   write=True,
+                                   timeout=self.gettimeout(),
+                                   timeout_exc=timeout_exc('timed out'))
+                    else:
+                        raise
+
+    def write(self, data):
+        """Write DATA to the underlying SSL channel.  Returns
+        number of bytes of DATA actually transmitted."""
+        return self._call_trampolining(
+            super(GreenSSLSocket, self).write, data)
+
+    def read(self, *args, **kwargs):
+        """Read up to LEN bytes and return them.
+        Return zero-length string on EOF."""
+        try:
+            return self._call_trampolining(
+                super(GreenSSLSocket, self).read, *args, **kwargs)
+        except IOClosed:
+            return b''
+
+    def send(self, data, flags=0):
+        if self._sslobj:
+            return self._call_trampolining(
+                super(GreenSSLSocket, self).send, data, flags)
+        else:
+            trampoline(self, write=True, timeout_exc=timeout_exc('timed out'))
+            return socket.send(self, data, flags)
+
+    def sendto(self, data, addr, flags=0):
+        # *NOTE: gross, copied code from ssl.py becase it's not factored well enough to be used as-is
+        if self._sslobj:
+            raise ValueError("sendto not allowed on instances of %s" %
+                             self.__class__)
+        else:
+            trampoline(self, write=True, timeout_exc=timeout_exc('timed out'))
+            return socket.sendto(self, data, addr, flags)
+
+    def sendall(self, data, flags=0):
+        # *NOTE: gross, copied code from ssl.py becase it's not factored well enough to be used as-is
+        if self._sslobj:
+            if flags != 0:
+                raise ValueError(
+                    "non-zero flags not allowed in calls to sendall() on %s" %
+                    self.__class__)
+            amount = len(data)
+            count = 0
+            data_to_send = data
+            while (count < amount):
+                v = self.send(data_to_send)
+                count += v
+                if v == 0:
+                    trampoline(self, write=True, timeout_exc=timeout_exc('timed out'))
+                else:
+                    data_to_send = data[count:]
+            return amount
+        else:
+            while True:
+                try:
+                    return socket.sendall(self, data, flags)
+                except orig_socket.error as e:
+                    if self.act_non_blocking:
+                        raise
+                    erno = get_errno(e)
+                    if erno in greenio.SOCKET_BLOCKING:
+                        trampoline(self, write=True,
+                                   timeout=self.gettimeout(), timeout_exc=timeout_exc('timed out'))
+                    elif erno in greenio.SOCKET_CLOSED:
+                        return ''
+                    raise
+
+    def recv(self, buflen=1024, flags=0):
+        return self._base_recv(buflen, flags, into=False)
+
+    def recv_into(self, buffer, nbytes=None, flags=0):
+        # Copied verbatim from CPython
+        if buffer and nbytes is None:
+            nbytes = len(buffer)
+        elif nbytes is None:
+            nbytes = 1024
+        # end of CPython code
+
+        return self._base_recv(nbytes, flags, into=True, buffer_=buffer)
+
+    def _base_recv(self, nbytes, flags, into, buffer_=None):
+        if into:
+            plain_socket_function = socket.recv_into
+        else:
+            plain_socket_function = socket.recv
+
+        # *NOTE: gross, copied code from ssl.py becase it's not factored well enough to be used as-is
+        if self._sslobj:
+            if flags != 0:
+                raise ValueError(
+                    "non-zero flags not allowed in calls to %s() on %s" %
+                    plain_socket_function.__name__, self.__class__)
+            if sys.version_info < (2, 7) and into:
+                # Python 2.6 SSLSocket.read() doesn't support reading into
+                # a given buffer so we need to emulate
+                data = self.read(nbytes)
+                buffer_[:len(data)] = data
+                read = len(data)
+            elif into:
+                read = self.read(nbytes, buffer_)
+            else:
+                read = self.read(nbytes)
+            return read
+        else:
+            while True:
+                try:
+                    args = [self, nbytes, flags]
+                    if into:
+                        args.insert(1, buffer_)
+                    return plain_socket_function(*args)
+                except orig_socket.error as e:
+                    if self.act_non_blocking:
+                        raise
+                    erno = get_errno(e)
+                    if erno in greenio.SOCKET_BLOCKING:
+                        try:
+                            trampoline(
+                                self, read=True,
+                                timeout=self.gettimeout(), timeout_exc=timeout_exc('timed out'))
+                        except IOClosed:
+                            return b''
+                    elif erno in greenio.SOCKET_CLOSED:
+                        return b''
+                    raise
+
+    def recvfrom(self, addr, buflen=1024, flags=0):
+        if not self.act_non_blocking:
+            trampoline(self, read=True, timeout=self.gettimeout(),
+                       timeout_exc=timeout_exc('timed out'))
+        return super(GreenSSLSocket, self).recvfrom(addr, buflen, flags)
+
+    def recvfrom_into(self, buffer, nbytes=None, flags=0):
+        if not self.act_non_blocking:
+            trampoline(self, read=True, timeout=self.gettimeout(),
+                       timeout_exc=timeout_exc('timed out'))
+        return super(GreenSSLSocket, self).recvfrom_into(buffer, nbytes, flags)
+
+    def unwrap(self):
+        return GreenSocket(self._call_trampolining(
+            super(GreenSSLSocket, self).unwrap))
+
+    def do_handshake(self):
+        """Perform a TLS/SSL handshake."""
+        return self._call_trampolining(
+            super(GreenSSLSocket, self).do_handshake)
+
+    def _socket_connect(self, addr):
+        real_connect = socket.connect
+        if self.act_non_blocking:
+            return real_connect(self, addr)
+        else:
+            # *NOTE: gross, copied code from greenio because it's not factored
+            # well enough to reuse
+            if self.gettimeout() is None:
+                while True:
+                    try:
+                        return real_connect(self, addr)
+                    except orig_socket.error as exc:
+                        if get_errno(exc) in CONNECT_ERR:
+                            trampoline(self, write=True)
+                        elif get_errno(exc) in CONNECT_SUCCESS:
+                            return
+                        else:
+                            raise
+            else:
+                end = time.time() + self.gettimeout()
+                while True:
+                    try:
+                        real_connect(self, addr)
+                    except orig_socket.error as exc:
+                        if get_errno(exc) in CONNECT_ERR:
+                            trampoline(
+                                self, write=True,
+                                timeout=end - time.time(), timeout_exc=timeout_exc('timed out'))
+                        elif get_errno(exc) in CONNECT_SUCCESS:
+                            return
+                        else:
+                            raise
+                    if time.time() >= end:
+                        raise timeout_exc('timed out')
+
+    def connect(self, addr):
+        """Connects to remote ADDR, and then wraps the connection in
+        an SSL channel."""
+        # *NOTE: grrrrr copied this code from ssl.py because of the reference
+        # to socket.connect which we don't want to call directly
+        if self._sslobj:
+            raise ValueError("attempt to connect already-connected SSLSocket!")
+        self._socket_connect(addr)
+        server_side = False
+        try:
+            sslwrap = _ssl.sslwrap
+        except AttributeError:
+            # sslwrap was removed in 3.x and later in 2.7.9
+            if six.PY2:
+                sslobj = self._context._wrap_socket(self._sock, server_side, ssl_sock=self)
+            else:
+                context = self.context if PY33 else self._context
+                sslobj = context._wrap_socket(self, server_side)
+        else:
+            sslobj = sslwrap(self._sock, server_side, self.keyfile, self.certfile,
+                             self.cert_reqs, self.ssl_version,
+                             self.ca_certs, *([self.ciphers] if has_ciphers else []))
+
+        try:
+            # This is added in Python 3.5, http://bugs.python.org/issue21965
+            SSLObject
+        except NameError:
+            self._sslobj = sslobj
+        else:
+            self._sslobj = SSLObject(sslobj, owner=self)
+
+        if self.do_handshake_on_connect:
+            self.do_handshake()
+
+    def accept(self):
+        """Accepts a new connection from a remote client, and returns
+        a tuple containing that new connection wrapped with a server-side
+        SSL channel, and the address of the remote client."""
+        # RDW grr duplication of code from greenio
+        if self.act_non_blocking:
+            newsock, addr = socket.accept(self)
+        else:
+            while True:
+                try:
+                    newsock, addr = socket.accept(self)
+                    set_nonblocking(newsock)
+                    break
+                except orig_socket.error as e:
+                    if get_errno(e) not in greenio.SOCKET_BLOCKING:
+                        raise
+                    trampoline(self, read=True, timeout=self.gettimeout(),
+                               timeout_exc=timeout_exc('timed out'))
+
+        new_ssl = type(self)(
+            newsock,
+            keyfile=self.keyfile,
+            certfile=self.certfile,
+            server_side=True,
+            cert_reqs=self.cert_reqs,
+            ssl_version=self.ssl_version,
+            ca_certs=self.ca_certs,
+            do_handshake_on_connect=False,
+            suppress_ragged_eofs=self.suppress_ragged_eofs)
+        return (new_ssl, addr)
+
+    def dup(self):
+        raise NotImplementedError("Can't dup an ssl object")
+
+SSLSocket = GreenSSLSocket
+
+
+def wrap_socket(sock, *a, **kw):
+    return GreenSSLSocket(sock, *a, **kw)
+
+
+if hasattr(__ssl, 'sslwrap_simple'):
+    def sslwrap_simple(sock, keyfile=None, certfile=None):
+        """A replacement for the old socket.ssl function.  Designed
+        for compatibility with Python 2.5 and earlier.  Will disappear in
+        Python 3.0."""
+        ssl_sock = GreenSSLSocket(sock, keyfile=keyfile, certfile=certfile,
+                                  server_side=False,
+                                  cert_reqs=CERT_NONE,
+                                  ssl_version=PROTOCOL_SSLv23,
+                                  ca_certs=None)
+        return ssl_sock
+
+
+if hasattr(__ssl, 'SSLContext'):
+    _original_sslcontext = __ssl.SSLContext
+
+    class GreenSSLContext(_original_sslcontext):
+        __slots__ = ()
+
+        def wrap_socket(self, sock, *a, **kw):
+            return GreenSSLSocket(sock, *a, _context=self, **kw)
+
+        # https://github.com/eventlet/eventlet/issues/371
+        # Thanks to Gevent developers for sharing patch to this problem.
+        if hasattr(_original_sslcontext.options, 'setter'):
+            # In 3.6, these became properties. They want to access the
+            # property __set__ method in the superclass, and they do so by using
+            # super(SSLContext, SSLContext). But we rebind SSLContext when we monkey
+            # patch, which causes infinite recursion.
+            # https://github.com/python/cpython/commit/328067c468f82e4ec1b5c510a4e84509e010f296
+            @_original_sslcontext.options.setter
+            def options(self, value):
+                super(_original_sslcontext, _original_sslcontext).options.__set__(self, value)
+
+            @_original_sslcontext.verify_flags.setter
+            def verify_flags(self, value):
+                super(_original_sslcontext, _original_sslcontext).verify_flags.__set__(self, value)
+
+            @_original_sslcontext.verify_mode.setter
+            def verify_mode(self, value):
+                super(_original_sslcontext, _original_sslcontext).verify_mode.__set__(self, value)
+
+    SSLContext = GreenSSLContext
+
+    if hasattr(__ssl, 'create_default_context'):
+        _original_create_default_context = __ssl.create_default_context
+
+        def green_create_default_context(*a, **kw):
+            # We can't just monkey-patch on the green version of `wrap_socket`
+            # on to SSLContext instances, but SSLContext.create_default_context
+            # does a bunch of work. Rather than re-implementing it all, just
+            # switch out the __class__ to get our `wrap_socket` implementation
+            context = _original_create_default_context(*a, **kw)
+            context.__class__ = GreenSSLContext
+            return context
+
+        create_default_context = green_create_default_context
+        _create_default_https_context = green_create_default_context

+ 135 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/green/subprocess.py

@@ -0,0 +1,135 @@
+import errno
+import sys
+from types import FunctionType
+
+import eventlet
+from eventlet import greenio
+from eventlet import patcher
+from eventlet.green import select, threading, time
+from eventlet.support import six
+
+
+__patched__ = ['call', 'check_call', 'Popen']
+to_patch = [('select', select), ('threading', threading), ('time', time)]
+
+if sys.version_info > (3, 4):
+    from eventlet.green import selectors
+    to_patch.append(('selectors', selectors))
+
+patcher.inject('subprocess', globals(), *to_patch)
+subprocess_orig = patcher.original("subprocess")
+mswindows = sys.platform == "win32"
+
+
+if getattr(subprocess_orig, 'TimeoutExpired', None) is None:
+    # Backported from Python 3.3.
+    # https://bitbucket.org/eventlet/eventlet/issue/89
+    class TimeoutExpired(Exception):
+        """This exception is raised when the timeout expires while waiting for
+        a child process.
+        """
+
+        def __init__(self, cmd, timeout, output=None):
+            self.cmd = cmd
+            self.timeout = timeout
+            self.output = output
+
+        def __str__(self):
+            return ("Command '%s' timed out after %s seconds" %
+                    (self.cmd, self.timeout))
+
+
+# This is the meat of this module, the green version of Popen.
+class Popen(subprocess_orig.Popen):
+    """eventlet-friendly version of subprocess.Popen"""
+    # We do not believe that Windows pipes support non-blocking I/O. At least,
+    # the Python file objects stored on our base-class object have no
+    # setblocking() method, and the Python fcntl module doesn't exist on
+    # Windows. (see eventlet.greenio.set_nonblocking()) As the sole purpose of
+    # this __init__() override is to wrap the pipes for eventlet-friendly
+    # non-blocking I/O, don't even bother overriding it on Windows.
+    if not mswindows:
+        def __init__(self, args, bufsize=0, *argss, **kwds):
+            self.args = args
+            # Forward the call to base-class constructor
+            subprocess_orig.Popen.__init__(self, args, 0, *argss, **kwds)
+            # Now wrap the pipes, if any. This logic is loosely borrowed from
+            # eventlet.processes.Process.run() method.
+            for attr in "stdin", "stdout", "stderr":
+                pipe = getattr(self, attr)
+                if pipe is not None and type(pipe) != greenio.GreenPipe:
+                    # https://github.com/eventlet/eventlet/issues/243
+                    # AttributeError: '_io.TextIOWrapper' object has no attribute 'mode'
+                    mode = getattr(pipe, 'mode', '')
+                    if not mode:
+                        if pipe.readable():
+                            mode += 'r'
+                        if pipe.writable():
+                            mode += 'w'
+                        # ValueError: can't have unbuffered text I/O
+                        if bufsize == 0:
+                            bufsize = -1
+                    wrapped_pipe = greenio.GreenPipe(pipe, mode, bufsize)
+                    setattr(self, attr, wrapped_pipe)
+        __init__.__doc__ = subprocess_orig.Popen.__init__.__doc__
+
+    def wait(self, timeout=None, check_interval=0.01):
+        # Instead of a blocking OS call, this version of wait() uses logic
+        # borrowed from the eventlet 0.2 processes.Process.wait() method.
+        if timeout is not None:
+            endtime = time.time() + timeout
+        try:
+            while True:
+                status = self.poll()
+                if status is not None:
+                    return status
+                if timeout is not None and time.time() > endtime:
+                    raise TimeoutExpired(self.args, timeout)
+                eventlet.sleep(check_interval)
+        except OSError as e:
+            if e.errno == errno.ECHILD:
+                # no child process, this happens if the child process
+                # already died and has been cleaned up
+                return -1
+            else:
+                raise
+    wait.__doc__ = subprocess_orig.Popen.wait.__doc__
+
+    if not mswindows:
+        # don't want to rewrite the original _communicate() method, we
+        # just want a version that uses eventlet.green.select.select()
+        # instead of select.select().
+        _communicate = FunctionType(
+            six.get_function_code(six.get_unbound_function(
+                subprocess_orig.Popen._communicate)),
+            globals())
+        try:
+            _communicate_with_select = FunctionType(
+                six.get_function_code(six.get_unbound_function(
+                    subprocess_orig.Popen._communicate_with_select)),
+                globals())
+            _communicate_with_poll = FunctionType(
+                six.get_function_code(six.get_unbound_function(
+                    subprocess_orig.Popen._communicate_with_poll)),
+                globals())
+        except AttributeError:
+            pass
+
+
+# Borrow subprocess.call() and check_call(), but patch them so they reference
+# OUR Popen class rather than subprocess.Popen.
+def patched_function(function):
+    new_function = FunctionType(six.get_function_code(function), globals())
+    if six.PY3:
+        new_function.__kwdefaults__ = function.__kwdefaults__
+    new_function.__defaults__ = function.__defaults__
+    return new_function
+
+
+call = patched_function(subprocess_orig.call)
+check_call = patched_function(subprocess_orig.check_call)
+# check_output is Python 2.7+
+if hasattr(subprocess_orig, 'check_output'):
+    __patched__.append('check_output')
+    check_output = patched_function(subprocess_orig.check_output)
+del patched_function

+ 113 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/green/thread.py

@@ -0,0 +1,113 @@
+"""Implements the standard thread module, using greenthreads."""
+from eventlet.support.six.moves import _thread as __thread
+from eventlet.support import greenlets as greenlet, six
+from eventlet import greenthread
+from eventlet.semaphore import Semaphore as LockType
+import sys
+
+
+__patched__ = ['get_ident', 'start_new_thread', 'start_new', 'allocate_lock',
+               'allocate', 'exit', 'interrupt_main', 'stack_size', '_local',
+               'LockType', '_count']
+
+error = __thread.error
+__threadcount = 0
+
+
+if six.PY3:
+    def _set_sentinel():
+        # TODO this is a dummy code, reimplementing this may be needed:
+        # https://hg.python.org/cpython/file/b5e9bc4352e1/Modules/_threadmodule.c#l1203
+        return allocate_lock()
+
+    TIMEOUT_MAX = __thread.TIMEOUT_MAX
+
+
+def _count():
+    return __threadcount
+
+
+def get_ident(gr=None):
+    if gr is None:
+        return id(greenlet.getcurrent())
+    else:
+        return id(gr)
+
+
+def __thread_body(func, args, kwargs):
+    global __threadcount
+    __threadcount += 1
+    try:
+        func(*args, **kwargs)
+    finally:
+        __threadcount -= 1
+
+
+def start_new_thread(function, args=(), kwargs=None):
+    if (sys.version_info >= (3, 4)
+            and getattr(function, '__module__', '') == 'threading'
+            and hasattr(function, '__self__')):
+        # Since Python 3.4, threading.Thread uses an internal lock
+        # automatically released when the python thread state is deleted.
+        # With monkey patching, eventlet uses green threads without python
+        # thread state, so the lock is not automatically released.
+        #
+        # Wrap _bootstrap_inner() to release explicitly the thread state lock
+        # when the thread completes.
+        thread = function.__self__
+        bootstrap_inner = thread._bootstrap_inner
+
+        def wrap_bootstrap_inner():
+            try:
+                bootstrap_inner()
+            finally:
+                # The lock can be cleared (ex: by a fork())
+                if thread._tstate_lock is not None:
+                    thread._tstate_lock.release()
+
+        thread._bootstrap_inner = wrap_bootstrap_inner
+
+    kwargs = kwargs or {}
+    g = greenthread.spawn_n(__thread_body, function, args, kwargs)
+    return get_ident(g)
+
+
+start_new = start_new_thread
+
+
+def allocate_lock(*a):
+    return LockType(1)
+
+
+allocate = allocate_lock
+
+
+def exit():
+    raise greenlet.GreenletExit
+
+
+exit_thread = __thread.exit_thread
+
+
+def interrupt_main():
+    curr = greenlet.getcurrent()
+    if curr.parent and not curr.parent.dead:
+        curr.parent.throw(KeyboardInterrupt())
+    else:
+        raise KeyboardInterrupt()
+
+
+if hasattr(__thread, 'stack_size'):
+    __original_stack_size__ = __thread.stack_size
+
+    def stack_size(size=None):
+        if size is None:
+            return __original_stack_size__()
+        if size > __original_stack_size__():
+            return __original_stack_size__(size)
+        else:
+            pass
+            # not going to decrease stack_size, because otherwise other greenlets in
+            # this thread will suffer
+
+from eventlet.corolocal import local as _local

+ 120 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/green/threading.py

@@ -0,0 +1,120 @@
+"""Implements the standard threading module, using greenthreads."""
+from eventlet import patcher
+from eventlet.green import thread
+from eventlet.green import time
+from eventlet.support import greenlets as greenlet, six
+
+__patched__ = ['_start_new_thread', '_allocate_lock',
+               '_sleep', 'local', 'stack_size', 'Lock', 'currentThread',
+               'current_thread', '_after_fork', '_shutdown']
+
+if six.PY2:
+    __patched__ += ['_get_ident']
+else:
+    __patched__ += ['get_ident', '_set_sentinel']
+
+__orig_threading = patcher.original('threading')
+__threadlocal = __orig_threading.local()
+
+
+patcher.inject(
+    'threading',
+    globals(),
+    ('thread' if six.PY2 else '_thread', thread),
+    ('time', time))
+
+del patcher
+
+
+_count = 1
+
+
+class _GreenThread(object):
+    """Wrapper for GreenThread objects to provide Thread-like attributes
+    and methods"""
+
+    def __init__(self, g):
+        global _count
+        self._g = g
+        self._name = 'GreenThread-%d' % _count
+        _count += 1
+
+    def __repr__(self):
+        return '<_GreenThread(%s, %r)>' % (self._name, self._g)
+
+    def join(self, timeout=None):
+        return self._g.wait()
+
+    def getName(self):
+        return self._name
+    get_name = getName
+
+    def setName(self, name):
+        self._name = str(name)
+    set_name = setName
+
+    name = property(getName, setName)
+
+    ident = property(lambda self: id(self._g))
+
+    def isAlive(self):
+        return True
+    is_alive = isAlive
+
+    daemon = property(lambda self: True)
+
+    def isDaemon(self):
+        return self.daemon
+    is_daemon = isDaemon
+
+
+__threading = None
+
+
+def _fixup_thread(t):
+    # Some third-party packages (lockfile) will try to patch the
+    # threading.Thread class with a get_name attribute if it doesn't
+    # exist. Since we might return Thread objects from the original
+    # threading package that won't get patched, let's make sure each
+    # individual object gets patched too our patched threading.Thread
+    # class has been patched. This is why monkey patching can be bad...
+    global __threading
+    if not __threading:
+        __threading = __import__('threading')
+
+    if (hasattr(__threading.Thread, 'get_name') and
+            not hasattr(t, 'get_name')):
+        t.get_name = t.getName
+    return t
+
+
+def current_thread():
+    g = greenlet.getcurrent()
+    if not g:
+        # Not currently in a greenthread, fall back to standard function
+        return _fixup_thread(__orig_threading.current_thread())
+
+    try:
+        active = __threadlocal.active
+    except AttributeError:
+        active = __threadlocal.active = {}
+
+    try:
+        t = active[id(g)]
+    except KeyError:
+        # Add green thread to active if we can clean it up on exit
+        def cleanup(g):
+            del active[id(g)]
+        try:
+            g.link(cleanup)
+        except AttributeError:
+            # Not a GreenThread type, so there's no way to hook into
+            # the green thread exiting. Fall back to the standard
+            # function then.
+            t = _fixup_thread(__orig_threading.currentThread())
+        else:
+            t = active[id(g)] = _GreenThread(g)
+
+    return t
+
+currentThread = current_thread

+ 6 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/green/time.py

@@ -0,0 +1,6 @@
+__time = __import__('time')
+from eventlet.patcher import slurp_properties
+__patched__ = ['sleep']
+slurp_properties(__time, globals(), ignore=__patched__, srckeys=dir(__time))
+from eventlet.greenthread import sleep
+sleep  # silence pyflakes

+ 40 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/green/urllib/__init__.py

@@ -0,0 +1,40 @@
+from eventlet import patcher
+from eventlet.green import socket
+from eventlet.green import time
+from eventlet.green import httplib
+from eventlet.green import ftplib
+from eventlet.support import six
+
+if six.PY2:
+    to_patch = [('socket', socket), ('httplib', httplib),
+                ('time', time), ('ftplib', ftplib)]
+    try:
+        from eventlet.green import ssl
+        to_patch.append(('ssl', ssl))
+    except ImportError:
+        pass
+
+    patcher.inject('urllib', globals(), *to_patch)
+    try:
+        URLopener
+    except NameError:
+        patcher.inject('urllib.request', globals(), *to_patch)
+
+
+    # patch a bunch of things that have imports inside the
+    # function body; this is lame and hacky but I don't feel
+    # too bad because urllib is a hacky pile of junk that no
+    # one should be using anyhow
+    URLopener.open_http = patcher.patch_function(URLopener.open_http, ('httplib', httplib))
+    if hasattr(URLopener, 'open_https'):
+        URLopener.open_https = patcher.patch_function(URLopener.open_https, ('httplib', httplib))
+
+    URLopener.open_ftp = patcher.patch_function(URLopener.open_ftp, ('ftplib', ftplib))
+    ftpwrapper.init = patcher.patch_function(ftpwrapper.init, ('ftplib', ftplib))
+    ftpwrapper.retrfile = patcher.patch_function(ftpwrapper.retrfile, ('ftplib', ftplib))
+
+    del patcher
+
+    # Run test program when run as a script
+    if __name__ == '__main__':
+        main()

+ 4 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/green/urllib/error.py

@@ -0,0 +1,4 @@
+from eventlet import patcher
+from eventlet.green.urllib import response
+patcher.inject('urllib.error', globals(), ('urllib.response', response))
+del patcher

+ 3 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/green/urllib/parse.py

@@ -0,0 +1,3 @@
+from eventlet import patcher
+patcher.inject('urllib.parse', globals())
+del patcher

+ 50 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/green/urllib/request.py

@@ -0,0 +1,50 @@
+from eventlet import patcher
+from eventlet.green import ftplib, http, os, socket, time
+from eventlet.green.http import client as http_client
+from eventlet.green.urllib import error, parse, response
+
+# TODO should we also have green email version?
+# import email
+
+
+to_patch = [
+    # This (http module) is needed here, otherwise test__greenness hangs
+    # forever on Python 3 because parts of non-green http (including
+    # http.client) leak into our patched urllib.request. There may be a nicer
+    # way to handle this (I didn't dig too deep) but this does the job. Jakub
+    ('http', http),
+
+    ('http.client', http_client),
+    ('os', os),
+    ('socket', socket),
+    ('time', time),
+    ('urllib.error', error),
+    ('urllib.parse', parse),
+    ('urllib.response', response),
+]
+
+try:
+    from eventlet.green import ssl
+except ImportError:
+    pass
+else:
+    to_patch.append(('ssl', ssl))
+
+patcher.inject('urllib.request', globals(), *to_patch)
+del to_patch
+
+to_patch_in_functions = [('ftplib', ftplib)]
+del ftplib
+
+FTPHandler.ftp_open = patcher.patch_function(FTPHandler.ftp_open, *to_patch_in_functions)
+URLopener.open_ftp = patcher.patch_function(URLopener.open_ftp, *to_patch_in_functions)
+
+ftperrors = patcher.patch_function(ftperrors, *to_patch_in_functions)
+
+ftpwrapper.init = patcher.patch_function(ftpwrapper.init, *to_patch_in_functions)
+ftpwrapper.retrfile = patcher.patch_function(ftpwrapper.retrfile, *to_patch_in_functions)
+
+del error
+del parse
+del response
+del to_patch_in_functions

+ 3 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/green/urllib/response.py

@@ -0,0 +1,3 @@
+from eventlet import patcher
+patcher.inject('urllib.response', globals())
+del patcher

+ 20 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/green/urllib2.py

@@ -0,0 +1,20 @@
+from eventlet import patcher
+from eventlet.green import ftplib
+from eventlet.green import httplib
+from eventlet.green import socket
+from eventlet.green import ssl
+from eventlet.green import time
+from eventlet.green import urllib
+
+patcher.inject(
+    'urllib2',
+    globals(),
+    ('httplib', httplib),
+    ('socket', socket),
+    ('ssl', ssl),
+    ('time', time),
+    ('urllib', urllib))
+
+FTPHandler.ftp_open = patcher.patch_function(FTPHandler.ftp_open, ('ftplib', ftplib))
+
+del patcher

+ 468 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/green/zmq.py

@@ -0,0 +1,468 @@
+# -*- coding: utf-8 -*-
+"""The :mod:`zmq` module wraps the :class:`Socket` and :class:`Context`
+found in :mod:`pyzmq <zmq>` to be non blocking
+"""
+
+from __future__ import with_statement
+
+__zmq__ = __import__('zmq')
+from eventlet import hubs
+from eventlet.patcher import slurp_properties
+from eventlet.support import greenlets as greenlet
+
+__patched__ = ['Context', 'Socket']
+slurp_properties(__zmq__, globals(), ignore=__patched__)
+
+from collections import deque
+
+try:
+    # alias XREQ/XREP to DEALER/ROUTER if available
+    if not hasattr(__zmq__, 'XREQ'):
+        XREQ = DEALER
+    if not hasattr(__zmq__, 'XREP'):
+        XREP = ROUTER
+except NameError:
+    pass
+
+
+class LockReleaseError(Exception):
+    pass
+
+
+class _QueueLock(object):
+    """A Lock that can be acquired by at most one thread. Any other
+    thread calling acquire will be blocked in a queue. When release
+    is called, the threads are awoken in the order they blocked,
+    one at a time. This lock can be required recursively by the same
+    thread."""
+
+    def __init__(self):
+        self._waiters = deque()
+        self._count = 0
+        self._holder = None
+        self._hub = hubs.get_hub()
+
+    def __nonzero__(self):
+        return bool(self._count)
+
+    __bool__ = __nonzero__
+
+    def __enter__(self):
+        self.acquire()
+
+    def __exit__(self, type, value, traceback):
+        self.release()
+
+    def acquire(self):
+        current = greenlet.getcurrent()
+        if (self._waiters or self._count > 0) and self._holder is not current:
+            # block until lock is free
+            self._waiters.append(current)
+            self._hub.switch()
+            w = self._waiters.popleft()
+
+            assert w is current, 'Waiting threads woken out of order'
+            assert self._count == 0, 'After waking a thread, the lock must be unacquired'
+
+        self._holder = current
+        self._count += 1
+
+    def release(self):
+        if self._count <= 0:
+            raise LockReleaseError("Cannot release unacquired lock")
+
+        self._count -= 1
+        if self._count == 0:
+            self._holder = None
+            if self._waiters:
+                # wake next
+                self._hub.schedule_call_global(0, self._waiters[0].switch)
+
+
+class _BlockedThread(object):
+    """Is either empty, or represents a single blocked thread that
+    blocked itself by calling the block() method. The thread can be
+    awoken by calling wake(). Wake() can be called multiple times and
+    all but the first call will have no effect."""
+
+    def __init__(self):
+        self._blocked_thread = None
+        self._wakeupper = None
+        self._hub = hubs.get_hub()
+
+    def __nonzero__(self):
+        return self._blocked_thread is not None
+
+    __bool__ = __nonzero__
+
+    def block(self, deadline=None):
+        if self._blocked_thread is not None:
+            raise Exception("Cannot block more than one thread on one BlockedThread")
+        self._blocked_thread = greenlet.getcurrent()
+
+        if deadline is not None:
+            self._hub.schedule_call_local(deadline - self._hub.clock(), self.wake)
+
+        try:
+            self._hub.switch()
+        finally:
+            self._blocked_thread = None
+            # cleanup the wakeup task
+            if self._wakeupper is not None:
+                # Important to cancel the wakeup task so it doesn't
+                # spuriously wake this greenthread later on.
+                self._wakeupper.cancel()
+                self._wakeupper = None
+
+    def wake(self):
+        """Schedules the blocked thread to be awoken and return
+        True. If wake has already been called or if there is no
+        blocked thread, then this call has no effect and returns
+        False."""
+        if self._blocked_thread is not None and self._wakeupper is None:
+            self._wakeupper = self._hub.schedule_call_global(0, self._blocked_thread.switch)
+            return True
+        return False
+
+
+class Context(__zmq__.Context):
+    """Subclass of :class:`zmq.Context`
+    """
+
+    def socket(self, socket_type):
+        """Overridden method to ensure that the green version of socket is used
+
+        Behaves the same as :meth:`zmq.Context.socket`, but ensures
+        that a :class:`Socket` with all of its send and recv methods set to be
+        non-blocking is returned
+        """
+        if self.closed:
+            raise ZMQError(ENOTSUP)
+        return Socket(self, socket_type)
+
+
+def _wraps(source_fn):
+    """A decorator that copies the __name__ and __doc__ from the given
+    function
+    """
+    def wrapper(dest_fn):
+        dest_fn.__name__ = source_fn.__name__
+        dest_fn.__doc__ = source_fn.__doc__
+        return dest_fn
+    return wrapper
+
+# Implementation notes: Each socket in 0mq contains a pipe that the
+# background IO threads use to communicate with the socket. These
+# events are important because they tell the socket when it is able to
+# send and when it has messages waiting to be received. The read end
+# of the events pipe is the same FD that getsockopt(zmq.FD) returns.
+#
+# Events are read from the socket's event pipe only on the thread that
+# the 0mq context is associated with, which is the native thread the
+# greenthreads are running on, and the only operations that cause the
+# events to be read and processed are send(), recv() and
+# getsockopt(zmq.EVENTS). This means that after doing any of these
+# three operations, the ability of the socket to send or receive a
+# message without blocking may have changed, but after the events are
+# read the FD is no longer readable so the hub may not signal our
+# listener.
+#
+# If we understand that after calling send() a message might be ready
+# to be received and that after calling recv() a message might be able
+# to be sent, what should we do next? There are two approaches:
+#
+#  1. Always wake the other thread if there is one waiting. This
+#  wakeup may be spurious because the socket might not actually be
+#  ready for a send() or recv().  However, if a thread is in a
+#  tight-loop successfully calling send() or recv() then the wakeups
+#  are naturally batched and there's very little cost added to each
+#  send/recv call.
+#
+# or
+#
+#  2. Call getsockopt(zmq.EVENTS) and explicitly check if the other
+#  thread should be woken up. This avoids spurious wake-ups but may
+#  add overhead because getsockopt will cause all events to be
+#  processed, whereas send and recv throttle processing
+#  events. Admittedly, all of the events will need to be processed
+#  eventually, but it is likely faster to batch the processing.
+#
+# Which approach is better? I have no idea.
+#
+# TODO:
+# - Support MessageTrackers and make MessageTracker.wait green
+
+_Socket = __zmq__.Socket
+_Socket_recv = _Socket.recv
+_Socket_send = _Socket.send
+_Socket_send_multipart = _Socket.send_multipart
+_Socket_recv_multipart = _Socket.recv_multipart
+_Socket_send_string = _Socket.send_string
+_Socket_recv_string = _Socket.recv_string
+_Socket_send_pyobj = _Socket.send_pyobj
+_Socket_recv_pyobj = _Socket.recv_pyobj
+_Socket_send_json = _Socket.send_json
+_Socket_recv_json = _Socket.recv_json
+_Socket_getsockopt = _Socket.getsockopt
+
+
+class Socket(_Socket):
+    """Green version of :class:`zmq.core.socket.Socket
+
+    The following three methods are always overridden:
+        * send
+        * recv
+        * getsockopt
+    To ensure that the ``zmq.NOBLOCK`` flag is set and that sending or receiving
+    is deferred to the hub (using :func:`eventlet.hubs.trampoline`) if a
+    ``zmq.EAGAIN`` (retry) error is raised
+
+    For some socket types, the following methods are also overridden:
+        * send_multipart
+        * recv_multipart
+    """
+
+    def __init__(self, context, socket_type):
+        super(Socket, self).__init__(context, socket_type)
+
+        self.__dict__['_eventlet_send_event'] = _BlockedThread()
+        self.__dict__['_eventlet_recv_event'] = _BlockedThread()
+        self.__dict__['_eventlet_send_lock'] = _QueueLock()
+        self.__dict__['_eventlet_recv_lock'] = _QueueLock()
+
+        def event(fd):
+            # Some events arrived at the zmq socket. This may mean
+            # there's a message that can be read or there's space for
+            # a message to be written.
+            send_wake = self._eventlet_send_event.wake()
+            recv_wake = self._eventlet_recv_event.wake()
+            if not send_wake and not recv_wake:
+                # if no waiting send or recv thread was woken up, then
+                # force the zmq socket's events to be processed to
+                # avoid repeated wakeups
+                _Socket_getsockopt(self, EVENTS)
+
+        hub = hubs.get_hub()
+        self.__dict__['_eventlet_listener'] = hub.add(hub.READ,
+                                                      self.getsockopt(FD),
+                                                      event,
+                                                      lambda _: None,
+                                                      lambda: None)
+        self.__dict__['_eventlet_clock'] = hub.clock
+
+    @_wraps(_Socket.close)
+    def close(self, linger=None):
+        super(Socket, self).close(linger)
+        if self._eventlet_listener is not None:
+            hubs.get_hub().remove(self._eventlet_listener)
+            self.__dict__['_eventlet_listener'] = None
+            # wake any blocked threads
+            self._eventlet_send_event.wake()
+            self._eventlet_recv_event.wake()
+
+    @_wraps(_Socket.getsockopt)
+    def getsockopt(self, option):
+        result = _Socket_getsockopt(self, option)
+        if option == EVENTS:
+            # Getting the events causes the zmq socket to process
+            # events which may mean a msg can be sent or received. If
+            # there is a greenthread blocked and waiting for events,
+            # it will miss the edge-triggered read event, so wake it
+            # up.
+            if (result & POLLOUT):
+                self._eventlet_send_event.wake()
+            if (result & POLLIN):
+                self._eventlet_recv_event.wake()
+        return result
+
+    @_wraps(_Socket.send)
+    def send(self, msg, flags=0, copy=True, track=False):
+        """A send method that's safe to use when multiple greenthreads
+        are calling send, send_multipart, recv and recv_multipart on
+        the same socket.
+        """
+        if flags & NOBLOCK:
+            result = _Socket_send(self, msg, flags, copy, track)
+            # Instead of calling both wake methods, could call
+            # self.getsockopt(EVENTS) which would trigger wakeups if
+            # needed.
+            self._eventlet_send_event.wake()
+            self._eventlet_recv_event.wake()
+            return result
+
+        # TODO: pyzmq will copy the message buffer and create Message
+        # objects under some circumstances. We could do that work here
+        # once to avoid doing it every time the send is retried.
+        flags |= NOBLOCK
+        with self._eventlet_send_lock:
+            while True:
+                try:
+                    return _Socket_send(self, msg, flags, copy, track)
+                except ZMQError as e:
+                    if e.errno == EAGAIN:
+                        self._eventlet_send_event.block()
+                    else:
+                        raise
+                finally:
+                    # The call to send processes 0mq events and may
+                    # make the socket ready to recv. Wake the next
+                    # receiver. (Could check EVENTS for POLLIN here)
+                    self._eventlet_recv_event.wake()
+
+    @_wraps(_Socket.send_multipart)
+    def send_multipart(self, msg_parts, flags=0, copy=True, track=False):
+        """A send_multipart method that's safe to use when multiple
+        greenthreads are calling send, send_multipart, recv and
+        recv_multipart on the same socket.
+        """
+        if flags & NOBLOCK:
+            return _Socket_send_multipart(self, msg_parts, flags, copy, track)
+
+        # acquire lock here so the subsequent calls to send for the
+        # message parts after the first don't block
+        with self._eventlet_send_lock:
+            return _Socket_send_multipart(self, msg_parts, flags, copy, track)
+
+    @_wraps(_Socket.send_string)
+    def send_string(self, u, flags=0, copy=True, encoding='utf-8'):
+        """A send_string method that's safe to use when multiple
+        greenthreads are calling send, send_string, recv and
+        recv_string on the same socket.
+        """
+        if flags & NOBLOCK:
+            return _Socket_send_string(self, u, flags, copy, encoding)
+
+        # acquire lock here so the subsequent calls to send for the
+        # message parts after the first don't block
+        with self._eventlet_send_lock:
+            return _Socket_send_string(self, u, flags, copy, encoding)
+
+    @_wraps(_Socket.send_pyobj)
+    def send_pyobj(self, obj, flags=0, protocol=2):
+        """A send_pyobj method that's safe to use when multiple
+        greenthreads are calling send, send_pyobj, recv and
+        recv_pyobj on the same socket.
+        """
+        if flags & NOBLOCK:
+            return _Socket_send_pyobj(self, obj, flags, protocol)
+
+        # acquire lock here so the subsequent calls to send for the
+        # message parts after the first don't block
+        with self._eventlet_send_lock:
+            return _Socket_send_pyobj(self, obj, flags, protocol)
+
+    @_wraps(_Socket.send_json)
+    def send_json(self, obj, flags=0, **kwargs):
+        """A send_json method that's safe to use when multiple
+        greenthreads are calling send, send_json, recv and
+        recv_json on the same socket.
+        """
+        if flags & NOBLOCK:
+            return _Socket_send_json(self, obj, flags, **kwargs)
+
+        # acquire lock here so the subsequent calls to send for the
+        # message parts after the first don't block
+        with self._eventlet_send_lock:
+            return _Socket_send_json(self, obj, flags, **kwargs)
+
+    @_wraps(_Socket.recv)
+    def recv(self, flags=0, copy=True, track=False):
+        """A recv method that's safe to use when multiple greenthreads
+        are calling send, send_multipart, recv and recv_multipart on
+        the same socket.
+        """
+        if flags & NOBLOCK:
+            msg = _Socket_recv(self, flags, copy, track)
+            # Instead of calling both wake methods, could call
+            # self.getsockopt(EVENTS) which would trigger wakeups if
+            # needed.
+            self._eventlet_send_event.wake()
+            self._eventlet_recv_event.wake()
+            return msg
+
+        deadline = None
+        if hasattr(__zmq__, 'RCVTIMEO'):
+            sock_timeout = self.getsockopt(__zmq__.RCVTIMEO)
+            if sock_timeout == -1:
+                pass
+            elif sock_timeout > 0:
+                deadline = self._eventlet_clock() + sock_timeout / 1000.0
+            else:
+                raise ValueError(sock_timeout)
+
+        flags |= NOBLOCK
+        with self._eventlet_recv_lock:
+            while True:
+                try:
+                    return _Socket_recv(self, flags, copy, track)
+                except ZMQError as e:
+                    if e.errno == EAGAIN:
+                        # zmq in its wisdom decided to reuse EAGAIN for timeouts
+                        if deadline is not None and self._eventlet_clock() > deadline:
+                            e.is_timeout = True
+                            raise
+
+                        self._eventlet_recv_event.block(deadline=deadline)
+                    else:
+                        raise
+                finally:
+                    # The call to recv processes 0mq events and may
+                    # make the socket ready to send. Wake the next
+                    # receiver. (Could check EVENTS for POLLOUT here)
+                    self._eventlet_send_event.wake()
+
+    @_wraps(_Socket.recv_multipart)
+    def recv_multipart(self, flags=0, copy=True, track=False):
+        """A recv_multipart method that's safe to use when multiple
+        greenthreads are calling send, send_multipart, recv and
+        recv_multipart on the same socket.
+        """
+        if flags & NOBLOCK:
+            return _Socket_recv_multipart(self, flags, copy, track)
+
+        # acquire lock here so the subsequent calls to recv for the
+        # message parts after the first don't block
+        with self._eventlet_recv_lock:
+            return _Socket_recv_multipart(self, flags, copy, track)
+
+    @_wraps(_Socket.recv_string)
+    def recv_string(self, flags=0, encoding='utf-8'):
+        """A recv_string method that's safe to use when multiple
+        greenthreads are calling send, send_string, recv and
+        recv_string on the same socket.
+        """
+        if flags & NOBLOCK:
+            return _Socket_recv_string(self, flags, encoding)
+
+        # acquire lock here so the subsequent calls to recv for the
+        # message parts after the first don't block
+        with self._eventlet_recv_lock:
+            return _Socket_recv_string(self, flags, encoding)
+
+    @_wraps(_Socket.recv_json)
+    def recv_json(self, flags=0, **kwargs):
+        """A recv_json method that's safe to use when multiple
+        greenthreads are calling send, send_json, recv and
+        recv_json on the same socket.
+        """
+        if flags & NOBLOCK:
+            return _Socket_recv_json(self, flags, **kwargs)
+
+        # acquire lock here so the subsequent calls to recv for the
+        # message parts after the first don't block
+        with self._eventlet_recv_lock:
+            return _Socket_recv_json(self, flags, **kwargs)
+
+    @_wraps(_Socket.recv_pyobj)
+    def recv_pyobj(self, flags=0):
+        """A recv_pyobj method that's safe to use when multiple
+        greenthreads are calling send, send_pyobj, recv and
+        recv_pyobj on the same socket.
+        """
+        if flags & NOBLOCK:
+            return _Socket_recv_pyobj(self, flags)
+
+        # acquire lock here so the subsequent calls to recv for the
+        # message parts after the first don't block
+        with self._eventlet_recv_lock:
+            return _Socket_recv_pyobj(self, flags)

+ 8 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/greenio/__init__.py

@@ -0,0 +1,8 @@
+from eventlet.support import six
+
+from eventlet.greenio.base import *  # noqa
+
+if six.PY2:
+    from eventlet.greenio.py2 import *  # noqa
+else:
+    from eventlet.greenio.py3 import *  # noqa

+ 494 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/greenio/base.py

@@ -0,0 +1,494 @@
+import errno
+import os
+import socket
+import sys
+import time
+import warnings
+
+import eventlet
+from eventlet.hubs import trampoline, notify_opened, IOClosed
+from eventlet.support import get_errno, six
+
+__all__ = [
+    'GreenSocket', '_GLOBAL_DEFAULT_TIMEOUT', 'set_nonblocking',
+    'SOCKET_BLOCKING', 'SOCKET_CLOSED', 'CONNECT_ERR', 'CONNECT_SUCCESS',
+    'shutdown_safe', 'SSL',
+    'socket_timeout',
+]
+
+BUFFER_SIZE = 4096
+CONNECT_ERR = set((errno.EINPROGRESS, errno.EALREADY, errno.EWOULDBLOCK))
+CONNECT_SUCCESS = set((0, errno.EISCONN))
+if sys.platform[:3] == "win":
+    CONNECT_ERR.add(errno.WSAEINVAL)   # Bug 67
+
+if six.PY2:
+    _python2_fileobject = socket._fileobject
+
+_original_socket = eventlet.patcher.original('socket').socket
+
+
+socket_timeout = eventlet.timeout.wrap_is_timeout(socket.timeout)
+
+
+def socket_connect(descriptor, address):
+    """
+    Attempts to connect to the address, returns the descriptor if it succeeds,
+    returns None if it needs to trampoline, and raises any exceptions.
+    """
+    err = descriptor.connect_ex(address)
+    if err in CONNECT_ERR:
+        return None
+    if err not in CONNECT_SUCCESS:
+        raise socket.error(err, errno.errorcode[err])
+    return descriptor
+
+
+def socket_checkerr(descriptor):
+    err = descriptor.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
+    if err not in CONNECT_SUCCESS:
+        raise socket.error(err, errno.errorcode[err])
+
+
+def socket_accept(descriptor):
+    """
+    Attempts to accept() on the descriptor, returns a client,address tuple
+    if it succeeds; returns None if it needs to trampoline, and raises
+    any exceptions.
+    """
+    try:
+        return descriptor.accept()
+    except socket.error as e:
+        if get_errno(e) == errno.EWOULDBLOCK:
+            return None
+        raise
+
+
+if sys.platform[:3] == "win":
+    # winsock sometimes throws ENOTCONN
+    SOCKET_BLOCKING = set((errno.EAGAIN, errno.EWOULDBLOCK,))
+    SOCKET_CLOSED = set((errno.ECONNRESET, errno.ENOTCONN, errno.ESHUTDOWN))
+else:
+    # oddly, on linux/darwin, an unconnected socket is expected to block,
+    # so we treat ENOTCONN the same as EWOULDBLOCK
+    SOCKET_BLOCKING = set((errno.EAGAIN, errno.EWOULDBLOCK, errno.ENOTCONN))
+    SOCKET_CLOSED = set((errno.ECONNRESET, errno.ESHUTDOWN, errno.EPIPE))
+
+
+def set_nonblocking(fd):
+    """
+    Sets the descriptor to be nonblocking.  Works on many file-like
+    objects as well as sockets.  Only sockets can be nonblocking on
+    Windows, however.
+    """
+    try:
+        setblocking = fd.setblocking
+    except AttributeError:
+        # fd has no setblocking() method. It could be that this version of
+        # Python predates socket.setblocking(). In that case, we can still set
+        # the flag "by hand" on the underlying OS fileno using the fcntl
+        # module.
+        try:
+            import fcntl
+        except ImportError:
+            # Whoops, Windows has no fcntl module. This might not be a socket
+            # at all, but rather a file-like object with no setblocking()
+            # method. In particular, on Windows, pipes don't support
+            # non-blocking I/O and therefore don't have that method. Which
+            # means fcntl wouldn't help even if we could load it.
+            raise NotImplementedError("set_nonblocking() on a file object "
+                                      "with no setblocking() method "
+                                      "(Windows pipes don't support non-blocking I/O)")
+        # We managed to import fcntl.
+        fileno = fd.fileno()
+        orig_flags = fcntl.fcntl(fileno, fcntl.F_GETFL)
+        new_flags = orig_flags | os.O_NONBLOCK
+        if new_flags != orig_flags:
+            fcntl.fcntl(fileno, fcntl.F_SETFL, new_flags)
+    else:
+        # socket supports setblocking()
+        setblocking(0)
+
+
+try:
+    from socket import _GLOBAL_DEFAULT_TIMEOUT
+except ImportError:
+    _GLOBAL_DEFAULT_TIMEOUT = object()
+
+
+class GreenSocket(object):
+    """
+    Green version of socket.socket class, that is intended to be 100%
+    API-compatible.
+
+    It also recognizes the keyword parameter, 'set_nonblocking=True'.
+    Pass False to indicate that socket is already in non-blocking mode
+    to save syscalls.
+    """
+
+    # This placeholder is to prevent __getattr__ from creating an infinite call loop
+    fd = None
+
+    def __init__(self, family=socket.AF_INET, *args, **kwargs):
+        should_set_nonblocking = kwargs.pop('set_nonblocking', True)
+        if isinstance(family, six.integer_types):
+            fd = _original_socket(family, *args, **kwargs)
+            # Notify the hub that this is a newly-opened socket.
+            notify_opened(fd.fileno())
+        else:
+            fd = family
+
+        # import timeout from other socket, if it was there
+        try:
+            self._timeout = fd.gettimeout() or socket.getdefaulttimeout()
+        except AttributeError:
+            self._timeout = socket.getdefaulttimeout()
+
+        if should_set_nonblocking:
+            set_nonblocking(fd)
+        self.fd = fd
+        # when client calls setblocking(0) or settimeout(0) the socket must
+        # act non-blocking
+        self.act_non_blocking = False
+
+        # Copy some attributes from underlying real socket.
+        # This is the easiest way that i found to fix
+        # https://bitbucket.org/eventlet/eventlet/issue/136
+        # Only `getsockopt` is required to fix that issue, others
+        # are just premature optimization to save __getattr__ call.
+        self.bind = fd.bind
+        self.close = fd.close
+        self.fileno = fd.fileno
+        self.getsockname = fd.getsockname
+        self.getsockopt = fd.getsockopt
+        self.listen = fd.listen
+        self.setsockopt = fd.setsockopt
+        self.shutdown = fd.shutdown
+        self._closed = False
+
+    @property
+    def _sock(self):
+        return self
+
+    if six.PY3:
+        def _get_io_refs(self):
+            return self.fd._io_refs
+
+        def _set_io_refs(self, value):
+            self.fd._io_refs = value
+
+        _io_refs = property(_get_io_refs, _set_io_refs)
+
+    # Forward unknown attributes to fd, cache the value for future use.
+    # I do not see any simple attribute which could be changed
+    # so caching everything in self is fine.
+    # If we find such attributes - only attributes having __get__ might be cached.
+    # For now - I do not want to complicate it.
+    def __getattr__(self, name):
+        if self.fd is None:
+            raise AttributeError(name)
+        attr = getattr(self.fd, name)
+        setattr(self, name, attr)
+        return attr
+
+    def _trampoline(self, fd, read=False, write=False, timeout=None, timeout_exc=None):
+        """ We need to trampoline via the event hub.
+            We catch any signal back from the hub indicating that the operation we
+            were waiting on was associated with a filehandle that's since been
+            invalidated.
+        """
+        if self._closed:
+            # If we did any logging, alerting to a second trampoline attempt on a closed
+            # socket here would be useful.
+            raise IOClosed()
+        try:
+            return trampoline(fd, read=read, write=write, timeout=timeout,
+                              timeout_exc=timeout_exc,
+                              mark_as_closed=self._mark_as_closed)
+        except IOClosed:
+            # This socket's been obsoleted. De-fang it.
+            self._mark_as_closed()
+            raise
+
+    def accept(self):
+        if self.act_non_blocking:
+            return self.fd.accept()
+        fd = self.fd
+        _timeout_exc = socket_timeout('timed out')
+        while True:
+            res = socket_accept(fd)
+            if res is not None:
+                client, addr = res
+                set_nonblocking(client)
+                return type(self)(client), addr
+            self._trampoline(fd, read=True, timeout=self.gettimeout(), timeout_exc=_timeout_exc)
+
+    def _mark_as_closed(self):
+        """ Mark this socket as being closed """
+        self._closed = True
+
+    def __del__(self):
+        # This is in case self.close is not assigned yet (currently the constructor does it)
+        close = getattr(self, 'close', None)
+        if close is not None:
+            close()
+
+    def connect(self, address):
+        if self.act_non_blocking:
+            return self.fd.connect(address)
+        fd = self.fd
+        _timeout_exc = socket_timeout('timed out')
+        if self.gettimeout() is None:
+            while not socket_connect(fd, address):
+                try:
+                    self._trampoline(fd, write=True)
+                except IOClosed:
+                    raise socket.error(errno.EBADFD)
+                socket_checkerr(fd)
+        else:
+            end = time.time() + self.gettimeout()
+            while True:
+                if socket_connect(fd, address):
+                    return
+                if time.time() >= end:
+                    raise _timeout_exc
+                timeout = end - time.time()
+                try:
+                    self._trampoline(fd, write=True, timeout=timeout, timeout_exc=_timeout_exc)
+                except IOClosed:
+                    # ... we need some workable errno here.
+                    raise socket.error(errno.EBADFD)
+                socket_checkerr(fd)
+
+    def connect_ex(self, address):
+        if self.act_non_blocking:
+            return self.fd.connect_ex(address)
+        fd = self.fd
+        if self.gettimeout() is None:
+            while not socket_connect(fd, address):
+                try:
+                    self._trampoline(fd, write=True)
+                    socket_checkerr(fd)
+                except socket.error as ex:
+                    return get_errno(ex)
+                except IOClosed:
+                    return errno.EBADFD
+        else:
+            end = time.time() + self.gettimeout()
+            timeout_exc = socket.timeout(errno.EAGAIN)
+            while True:
+                try:
+                    if socket_connect(fd, address):
+                        return 0
+                    if time.time() >= end:
+                        raise timeout_exc
+                    self._trampoline(fd, write=True, timeout=end - time.time(),
+                                     timeout_exc=timeout_exc)
+                    socket_checkerr(fd)
+                except socket.error as ex:
+                    return get_errno(ex)
+                except IOClosed:
+                    return errno.EBADFD
+
+    def dup(self, *args, **kw):
+        sock = self.fd.dup(*args, **kw)
+        newsock = type(self)(sock, set_nonblocking=False)
+        newsock.settimeout(self.gettimeout())
+        return newsock
+
+    if six.PY3:
+        def makefile(self, *args, **kwargs):
+            return _original_socket.makefile(self, *args, **kwargs)
+    else:
+        def makefile(self, *args, **kwargs):
+            dupped = self.dup()
+            res = _python2_fileobject(dupped, *args, **kwargs)
+            if hasattr(dupped, "_drop"):
+                dupped._drop()
+                # Making the close function of dupped None so that when garbage collector
+                # kicks in and tries to call del, which will ultimately call close, _drop
+                # doesn't get called on dupped twice as it has been already explicitly called in
+                # previous line
+                dupped.close = None
+            return res
+
+    def makeGreenFile(self, *args, **kw):
+        warnings.warn("makeGreenFile has been deprecated, please use "
+                      "makefile instead", DeprecationWarning, stacklevel=2)
+        return self.makefile(*args, **kw)
+
+    def _read_trampoline(self):
+        self._trampoline(
+            self.fd,
+            read=True,
+            timeout=self.gettimeout(),
+            timeout_exc=socket_timeout('timed out'))
+
+    def _recv_loop(self, recv_meth, empty_val, *args):
+        fd = self.fd
+        if self.act_non_blocking:
+            return recv_meth(*args)
+
+        while True:
+            try:
+                # recv: bufsize=0?
+                # recv_into: buffer is empty?
+                # This is needed because behind the scenes we use sockets in
+                # nonblocking mode and builtin recv* methods. Attempting to read
+                # 0 bytes from a nonblocking socket using a builtin recv* method
+                # does not raise a timeout exception. Since we're simulating
+                # a blocking socket here we need to produce a timeout exception
+                # if needed, hence the call to trampoline.
+                if not args[0]:
+                    self._read_trampoline()
+                return recv_meth(*args)
+            except socket.error as e:
+                if get_errno(e) in SOCKET_BLOCKING:
+                    pass
+                elif get_errno(e) in SOCKET_CLOSED:
+                    return empty_val
+                else:
+                    raise
+
+            try:
+                self._read_trampoline()
+            except IOClosed as e:
+                # Perhaps we should return '' instead?
+                raise EOFError()
+
+    def recv(self, bufsize, flags=0):
+        return self._recv_loop(self.fd.recv, b'', bufsize, flags)
+
+    def recvfrom(self, bufsize, flags=0):
+        return self._recv_loop(self.fd.recvfrom, b'', bufsize, flags)
+
+    def recv_into(self, buffer, nbytes=0, flags=0):
+        return self._recv_loop(self.fd.recv_into, 0, buffer, nbytes, flags)
+
+    def recvfrom_into(self, buffer, nbytes=0, flags=0):
+        return self._recv_loop(self.fd.recvfrom_into, 0, buffer, nbytes, flags)
+
+    def _send_loop(self, send_method, data, *args):
+        if self.act_non_blocking:
+            return send_method(data, *args)
+
+        _timeout_exc = socket_timeout('timed out')
+        while True:
+            try:
+                return send_method(data, *args)
+            except socket.error as e:
+                eno = get_errno(e)
+                if eno == errno.ENOTCONN or eno not in SOCKET_BLOCKING:
+                    raise
+
+            try:
+                self._trampoline(self.fd, write=True, timeout=self.gettimeout(),
+                                 timeout_exc=_timeout_exc)
+            except IOClosed:
+                raise socket.error(errno.ECONNRESET, 'Connection closed by another thread')
+
+    def send(self, data, flags=0):
+        return self._send_loop(self.fd.send, data, flags)
+
+    def sendto(self, data, *args):
+        return self._send_loop(self.fd.sendto, data, *args)
+
+    def sendall(self, data, flags=0):
+        tail = self.send(data, flags)
+        len_data = len(data)
+        while tail < len_data:
+            tail += self.send(data[tail:], flags)
+
+    def setblocking(self, flag):
+        if flag:
+            self.act_non_blocking = False
+            self._timeout = None
+        else:
+            self.act_non_blocking = True
+            self._timeout = 0.0
+
+    def settimeout(self, howlong):
+        if howlong is None or howlong == _GLOBAL_DEFAULT_TIMEOUT:
+            self.setblocking(True)
+            return
+        try:
+            f = howlong.__float__
+        except AttributeError:
+            raise TypeError('a float is required')
+        howlong = f()
+        if howlong < 0.0:
+            raise ValueError('Timeout value out of range')
+        if howlong == 0.0:
+            self.act_non_blocking = True
+            self._timeout = 0.0
+        else:
+            self.act_non_blocking = False
+            self._timeout = howlong
+
+    def gettimeout(self):
+        return self._timeout
+
+    if "__pypy__" in sys.builtin_module_names:
+        def _reuse(self):
+            getattr(self.fd, '_sock', self.fd)._reuse()
+
+        def _drop(self):
+            getattr(self.fd, '_sock', self.fd)._drop()
+
+
+def _operation_on_closed_file(*args, **kwargs):
+    raise ValueError("I/O operation on closed file")
+
+
+greenpipe_doc = """
+    GreenPipe is a cooperative replacement for file class.
+    It will cooperate on pipes. It will block on regular file.
+    Differneces from file class:
+    - mode is r/w property. Should re r/o
+    - encoding property not implemented
+    - write/writelines will not raise TypeError exception when non-string data is written
+      it will write str(data) instead
+    - Universal new lines are not supported and newlines property not implementeded
+    - file argument can be descriptor, file name or file object.
+    """
+
+# import SSL module here so we can refer to greenio.SSL.exceptionclass
+try:
+    from OpenSSL import SSL
+except ImportError:
+    # pyOpenSSL not installed, define exceptions anyway for convenience
+    class SSL(object):
+        class WantWriteError(Exception):
+            pass
+
+        class WantReadError(Exception):
+            pass
+
+        class ZeroReturnError(Exception):
+            pass
+
+        class SysCallError(Exception):
+            pass
+
+
+def shutdown_safe(sock):
+    """ Shuts down the socket. This is a convenience method for
+    code that wants to gracefully handle regular sockets, SSL.Connection
+    sockets from PyOpenSSL and ssl.SSLSocket objects from Python 2.6
+    interchangeably.  Both types of ssl socket require a shutdown() before
+    close, but they have different arity on their shutdown method.
+
+    Regular sockets don't need a shutdown before close, but it doesn't hurt.
+    """
+    try:
+        try:
+            # socket, ssl.SSLSocket
+            return sock.shutdown(socket.SHUT_RDWR)
+        except TypeError:
+            # SSL.Connection
+            return sock.shutdown()
+    except socket.error as e:
+        # we don't care if the socket is already closed;
+        # this will often be the case in an http server context
+        if get_errno(e) not in (errno.ENOTCONN, errno.EBADF, errno.ENOTSOCK):
+            raise

+ 226 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/greenio/py2.py

@@ -0,0 +1,226 @@
+import errno
+import os
+
+from eventlet.greenio.base import (
+    _operation_on_closed_file,
+    greenpipe_doc,
+    set_nonblocking,
+    socket,
+    SOCKET_BLOCKING,
+)
+from eventlet.hubs import trampoline, notify_close, notify_opened, IOClosed
+from eventlet.support import get_errno, six
+
+__all__ = ['_fileobject', 'GreenPipe']
+
+_fileobject = socket._fileobject
+
+
+class GreenPipe(_fileobject):
+
+    __doc__ = greenpipe_doc
+
+    def __init__(self, f, mode='r', bufsize=-1):
+        if not isinstance(f, six.string_types + (int, file)):
+            raise TypeError('f(ile) should be int, str, unicode or file, not %r' % f)
+
+        if isinstance(f, six.string_types):
+            f = open(f, mode, 0)
+
+        if isinstance(f, int):
+            fileno = f
+            self._name = "<fd:%d>" % fileno
+        else:
+            fileno = os.dup(f.fileno())
+            self._name = f.name
+            if f.mode != mode:
+                raise ValueError('file.mode %r does not match mode parameter %r' % (f.mode, mode))
+            self._name = f.name
+            f.close()
+
+        super(GreenPipe, self).__init__(_SocketDuckForFd(fileno), mode, bufsize)
+        set_nonblocking(self)
+        self.softspace = 0
+
+    @property
+    def name(self):
+        return self._name
+
+    def __repr__(self):
+        return "<%s %s %r, mode %r at 0x%x>" % (
+            self.closed and 'closed' or 'open',
+            self.__class__.__name__,
+            self.name,
+            self.mode,
+            (id(self) < 0) and (sys.maxint + id(self)) or id(self))
+
+    def close(self):
+        super(GreenPipe, self).close()
+        for method in [
+                'fileno', 'flush', 'isatty', 'next', 'read', 'readinto',
+                'readline', 'readlines', 'seek', 'tell', 'truncate',
+                'write', 'xreadlines', '__iter__', '__next__', 'writelines']:
+            setattr(self, method, _operation_on_closed_file)
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, *args):
+        self.close()
+
+    def _get_readahead_len(self):
+        return len(self._rbuf.getvalue())
+
+    def _clear_readahead_buf(self):
+        len = self._get_readahead_len()
+        if len > 0:
+            self.read(len)
+
+    def tell(self):
+        self.flush()
+        try:
+            return os.lseek(self.fileno(), 0, 1) - self._get_readahead_len()
+        except OSError as e:
+            raise IOError(*e.args)
+
+    def seek(self, offset, whence=0):
+        self.flush()
+        if whence == 1 and offset == 0:  # tell synonym
+            return self.tell()
+        if whence == 1:  # adjust offset by what is read ahead
+            offset -= self._get_readahead_len()
+        try:
+            rv = os.lseek(self.fileno(), offset, whence)
+        except OSError as e:
+            raise IOError(*e.args)
+        else:
+            self._clear_readahead_buf()
+            return rv
+
+    if getattr(file, "truncate", None):  # not all OSes implement truncate
+        def truncate(self, size=-1):
+            self.flush()
+            if size == -1:
+                size = self.tell()
+            try:
+                rv = os.ftruncate(self.fileno(), size)
+            except OSError as e:
+                raise IOError(*e.args)
+            else:
+                self.seek(size)  # move position&clear buffer
+                return rv
+
+    def isatty(self):
+        try:
+            return os.isatty(self.fileno())
+        except OSError as e:
+            raise IOError(*e.args)
+
+
+class _SocketDuckForFd(object):
+    """Class implementing all socket method used by _fileobject
+    in cooperative manner using low level os I/O calls.
+    """
+    _refcount = 0
+
+    def __init__(self, fileno):
+        self._fileno = fileno
+        notify_opened(fileno)
+        self._closed = False
+
+    def _trampoline(self, fd, read=False, write=False, timeout=None, timeout_exc=None):
+        if self._closed:
+            # Don't trampoline if we're already closed.
+            raise IOClosed()
+        try:
+            return trampoline(fd, read=read, write=write, timeout=timeout,
+                              timeout_exc=timeout_exc,
+                              mark_as_closed=self._mark_as_closed)
+        except IOClosed:
+            # Our fileno has been obsoleted. Defang ourselves to
+            # prevent spurious closes.
+            self._mark_as_closed()
+            raise
+
+    def _mark_as_closed(self):
+        current = self._closed
+        self._closed = True
+        return current
+
+    @property
+    def _sock(self):
+        return self
+
+    def fileno(self):
+        return self._fileno
+
+    def recv(self, buflen):
+        while True:
+            try:
+                data = os.read(self._fileno, buflen)
+                return data
+            except OSError as e:
+                if get_errno(e) not in SOCKET_BLOCKING:
+                    raise IOError(*e.args)
+            self._trampoline(self, read=True)
+
+    def recv_into(self, buf, nbytes=0, flags=0):
+        if nbytes == 0:
+            nbytes = len(buf)
+        data = self.recv(nbytes)
+        buf[:nbytes] = data
+        return len(data)
+
+    def send(self, data):
+        while True:
+            try:
+                return os.write(self._fileno, data)
+            except OSError as e:
+                if get_errno(e) not in SOCKET_BLOCKING:
+                    raise IOError(*e.args)
+                else:
+                    trampoline(self, write=True)
+
+    def sendall(self, data):
+        len_data = len(data)
+        os_write = os.write
+        fileno = self._fileno
+        try:
+            total_sent = os_write(fileno, data)
+        except OSError as e:
+            if get_errno(e) != errno.EAGAIN:
+                raise IOError(*e.args)
+            total_sent = 0
+        while total_sent < len_data:
+            self._trampoline(self, write=True)
+            try:
+                total_sent += os_write(fileno, data[total_sent:])
+            except OSError as e:
+                if get_errno(e) != errno. EAGAIN:
+                    raise IOError(*e.args)
+
+    def __del__(self):
+        self._close()
+
+    def _close(self):
+        was_closed = self._mark_as_closed()
+        if was_closed:
+            return
+        notify_close(self._fileno)
+        try:
+            os.close(self._fileno)
+        except:
+            # os.close may fail if __init__ didn't complete
+            # (i.e file dscriptor passed to popen was invalid
+            pass
+
+    def __repr__(self):
+        return "%s:%d" % (self.__class__.__name__, self._fileno)
+
+    def _reuse(self):
+        self._refcount += 1
+
+    def _drop(self):
+        self._refcount -= 1
+        if self._refcount == 0:
+            self._close()

+ 213 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/greenio/py3.py

@@ -0,0 +1,213 @@
+import _pyio as _original_pyio
+import errno
+import os as _original_os
+import socket as _original_socket
+from io import (
+    BufferedRandom as _OriginalBufferedRandom,
+    BufferedReader as _OriginalBufferedReader,
+    BufferedWriter as _OriginalBufferedWriter,
+    DEFAULT_BUFFER_SIZE,
+    TextIOWrapper as _OriginalTextIOWrapper,
+    IOBase as _OriginalIOBase,
+)
+from types import FunctionType
+
+from eventlet.greenio.base import (
+    _operation_on_closed_file,
+    greenpipe_doc,
+    set_nonblocking,
+    SOCKET_BLOCKING,
+)
+from eventlet.hubs import notify_close, notify_opened, IOClosed, trampoline
+from eventlet.support import get_errno, six
+
+__all__ = ['_fileobject', 'GreenPipe']
+
+# TODO get rid of this, it only seems like the original _fileobject
+_fileobject = _original_socket.SocketIO
+
+# Large part of the following code is copied from the original
+# eventlet.greenio module
+
+
+class GreenFileIO(_OriginalIOBase):
+    def __init__(self, name, mode='r', closefd=True, opener=None):
+        if isinstance(name, int):
+            fileno = name
+            self._name = "<fd:%d>" % fileno
+        else:
+            assert isinstance(name, six.string_types)
+            with open(name, mode) as fd:
+                self._name = fd.name
+                fileno = _original_os.dup(fd.fileno())
+
+        notify_opened(fileno)
+        self._fileno = fileno
+        self._mode = mode
+        self._closed = False
+        set_nonblocking(self)
+        self._seekable = None
+
+    @property
+    def closed(self):
+        return self._closed
+
+    def seekable(self):
+        if self._seekable is None:
+            try:
+                _original_os.lseek(self._fileno, 0, _original_os.SEEK_CUR)
+            except IOError as e:
+                if get_errno(e) == errno.ESPIPE:
+                    self._seekable = False
+                else:
+                    raise
+            else:
+                self._seekable = True
+
+        return self._seekable
+
+    def readable(self):
+        return 'r' in self._mode or '+' in self._mode
+
+    def writable(self):
+        return 'w' in self._mode or '+' in self._mode
+
+    def fileno(self):
+        return self._fileno
+
+    def read(self, size=-1):
+        if size == -1:
+            return self.readall()
+
+        while True:
+            try:
+                return _original_os.read(self._fileno, size)
+            except OSError as e:
+                if get_errno(e) not in SOCKET_BLOCKING:
+                    raise IOError(*e.args)
+                self._trampoline(self, read=True)
+
+    def readall(self):
+        buf = []
+        while True:
+            try:
+                chunk = _original_os.read(self._fileno, DEFAULT_BUFFER_SIZE)
+                if chunk == b'':
+                    return b''.join(buf)
+                buf.append(chunk)
+            except OSError as e:
+                if get_errno(e) not in SOCKET_BLOCKING:
+                    raise IOError(*e.args)
+                self._trampoline(self, read=True)
+
+    def readinto(self, b):
+        up_to = len(b)
+        data = self.read(up_to)
+        bytes_read = len(data)
+        b[:bytes_read] = data
+        return bytes_read
+
+    def isatty(self):
+        try:
+            return _original_os.isatty(self.fileno())
+        except OSError as e:
+            raise IOError(*e.args)
+
+    def _trampoline(self, fd, read=False, write=False, timeout=None, timeout_exc=None):
+        if self._closed:
+            # Don't trampoline if we're already closed.
+            raise IOClosed()
+        try:
+            return trampoline(fd, read=read, write=write, timeout=timeout,
+                              timeout_exc=timeout_exc,
+                              mark_as_closed=self._mark_as_closed)
+        except IOClosed:
+            # Our fileno has been obsoleted. Defang ourselves to
+            # prevent spurious closes.
+            self._mark_as_closed()
+            raise
+
+    def _mark_as_closed(self):
+        """ Mark this socket as being closed """
+        self._closed = True
+
+    def write(self, data):
+        view = memoryview(data)
+        datalen = len(data)
+        offset = 0
+        while offset < datalen:
+            try:
+                written = _original_os.write(self._fileno, view[offset:])
+            except OSError as e:
+                if get_errno(e) not in SOCKET_BLOCKING:
+                    raise IOError(*e.args)
+                trampoline(self, write=True)
+            else:
+                offset += written
+        return offset
+
+    def close(self):
+        if not self._closed:
+            self._closed = True
+            _original_os.close(self._fileno)
+        notify_close(self._fileno)
+        for method in [
+                'fileno', 'flush', 'isatty', 'next', 'read', 'readinto',
+                'readline', 'readlines', 'seek', 'tell', 'truncate',
+                'write', 'xreadlines', '__iter__', '__next__', 'writelines']:
+            setattr(self, method, _operation_on_closed_file)
+
+    def truncate(self, size=-1):
+        if size == -1:
+            size = self.tell()
+        try:
+            rv = _original_os.ftruncate(self._fileno, size)
+        except OSError as e:
+            raise IOError(*e.args)
+        else:
+            self.seek(size)  # move position&clear buffer
+            return rv
+
+    def seek(self, offset, whence=_original_os.SEEK_SET):
+        try:
+            return _original_os.lseek(self._fileno, offset, whence)
+        except OSError as e:
+            raise IOError(*e.args)
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, *args):
+        self.close()
+
+
+_open_environment = dict(globals())
+_open_environment.update(dict(
+    BufferedRandom=_OriginalBufferedRandom,
+    BufferedWriter=_OriginalBufferedWriter,
+    BufferedReader=_OriginalBufferedReader,
+    TextIOWrapper=_OriginalTextIOWrapper,
+    FileIO=GreenFileIO,
+    os=_original_os,
+))
+
+_open = FunctionType(
+    six.get_function_code(_original_pyio.open),
+    _open_environment,
+)
+
+
+def GreenPipe(name, mode="r", buffering=-1, encoding=None, errors=None,
+              newline=None, closefd=True, opener=None):
+    try:
+        fileno = name.fileno()
+    except AttributeError:
+        pass
+    else:
+        fileno = _original_os.dup(fileno)
+        name.close()
+        name = fileno
+
+    return _open(name, mode, buffering, encoding, errors, newline, closefd, opener)
+
+GreenPipe.__doc__ = greenpipe_doc

+ 251 - 0
desktop/core/ext-py/eventlet-0.21.0/eventlet/greenpool.py

@@ -0,0 +1,251 @@
+import traceback
+
+import eventlet
+from eventlet import queue
+from eventlet.support import greenlets as greenlet
+from eventlet.support import six
+
+__all__ = ['GreenPool', 'GreenPile']
+
+DEBUG = True
+
+
+class GreenPool(object):
+    """The GreenPool class is a pool of green threads.
+    """
+
+    def __init__(self, size=1000):
+        try:
+            size = int(size)
+        except ValueError as e:
+            msg = 'GreenPool() expect size :: int, actual: {0} {1}'.format(type(size), str(e))
+            raise TypeError(msg)
+        if size < 0:
+            msg = 'GreenPool() expect size >= 0, actual: {0}'.format(repr(size))
+            raise ValueError(msg)
+        self.size = size
+        self.coroutines_running = set()
+        self.sem = eventlet.Semaphore(size)
+        self.no_coros_running = eventlet.Event()
+
+    def resize(self, new_size):
+        """ Change the max number of greenthreads doing work at any given time.
+
+        If resize is called when there are more than *new_size* greenthreads
+        already working on tasks, they will be allowed to complete but no new
+        tasks will be allowed to get launched until enough greenthreads finish
+        their tasks to drop the overall quantity below *new_size*.  Until
+        then, the return value of free() will be negative.
+        """
+        size_delta = new_size - self.size
+        self.sem.counter += size_delta
+        self.size = new_size
+
+    def running(self):
+        """ Returns the number of greenthreads that are currently executing
+        functions in the GreenPool."""
+        return len(self.coroutines_running)
+
+    def free(self):
+        """ Returns the number of greenthreads available for use.
+
+        If zero or less, the next call to :meth:`spawn` or :meth:`spawn_n` will
+        block the calling greenthread until a slot becomes available."""
+        return self.sem.counter
+
+    def spawn(self, function, *args, **kwargs):
+        """Run the *function* with its arguments in its own green thread.
+        Returns the :class:`GreenThread <eventlet.GreenThread>`
+        object that is running the function, which can be used to retrieve the
+        results.
+
+        If the pool is currently at capacity, ``spawn`` will block until one of
+        the running greenthreads completes its task and frees up a slot.
+
+        This function is reentrant; *function* can call ``spawn`` on the same
+        pool without risk of deadlocking the whole thing.
+        """
+        # if reentering an empty pool, don't try to wait on a coroutine freeing
+        # itself -- instead, just execute in the current coroutine
+        current = eventlet.getcurrent()
+        if self.sem.locked() and current in self.coroutines_running:
+            # a bit hacky to use the GT without switching to it
+            gt = eventlet.greenthread.GreenThread(current)
+            gt.main(function, args, kwargs)
+            return gt
+        else:
+            self.sem.acquire()
+            gt = eventlet.spawn(function, *args, **kwargs)
+            if not self.coroutines_running:
+                self.no_coros_running = eventlet.Event()
+            self.coroutines_running.add(gt)
+            gt.link(self._spawn_done)
+        return gt
+
+    def _spawn_n_impl(self, func, args, kwargs, coro):
+        try:
+            try:
+                func(*args, **kwargs)
+            except (KeyboardInterrupt, SystemExit, greenlet.GreenletExit):
+                raise
+            except:
+                if DEBUG:
+                    traceback.print_exc()
+        finally:
+            if coro is None:
+                return
+            else:
+                coro = eventlet.getcurrent()
+                self._spawn_done(coro)
+
+    def spawn_n(self, function, *args, **kwargs):
+        """Create a greenthread to run the *function*, the same as
+        :meth:`spawn`.  The difference is that :meth:`spawn_n` returns
+        None; the results of *function* are not retrievable.
+        """
+        # if reentering an empty pool, don't try to wait on a coroutine freeing
+        # itself -- instead, just execute in the current coroutine
+        current = eventlet.getcurrent()
+        if self.sem.locked() and current in self.coroutines_running:
+            self._spawn_n_impl(function, args, kwargs, None)
+        else:
+            self.sem.acquire()
+            g = eventlet.spawn_n(
+                self._spawn_n_impl,
+                function, args, kwargs, True)
+            if not self.coroutines_running:
+                self.no_coros_running = eventlet.Event()
+            self.coroutines_running.add(g)
+
+    def waitall(self):
+        """Waits until all greenthreads in the pool are finished working."""
+        assert eventlet.getcurrent() not in self.coroutines_running, \
+            "Calling waitall() from within one of the " \
+            "GreenPool's greenthreads will never terminate."
+        if self.running():
+            self.no_coros_running.wait()
+
+    def _spawn_done(self, coro):
+        self.sem.release()
+        if coro is not None:
+            self.coroutines_running.remove(coro)
+        # if done processing (no more work is waiting for processing),
+        # we can finish off any waitall() calls that might be pending
+        if self.sem.balance == self.size:
+            self.no_coros_running.send(None)
+
+    def waiting(self):
+        """Return the number of greenthreads waiting to spawn.
+        """
+        if self.sem.balance < 0:
+            return -self.sem.balance
+        else:
+            return 0
+
+    def _do_map(self, func, it, gi):
+        for args in it:
+            gi.spawn(func, *args)
+        gi.spawn(return_stop_iteration)
+
+    def starmap(self, function, iterable):
+        """This is the same as :func:`itertools.starmap`, except that *func* is
+        executed in a separate green thread for each item, with the concurrency
+        limited by the pool's size. In operation, starmap consumes a constant
+        amount of memory, proportional to the size of the pool, and is thus
+        suited for iterating over extremely long input lists.
+        """
+        if function is None:
+            function = lambda *a: a
+        gi = GreenMap(self.size)
+        eventlet.spawn_n(self._do_map, function, iterable, gi)
+        return gi
+
+    def imap(self, function, *iterables):
+        """This is the same as :func:`itertools.imap`, and has the same
+        concurrency and memory behavior as :meth:`starmap`.
+
+        It's quite convenient for, e.g., farming out jobs from a file::
+
+           def worker(line):
+               return do_something(line)
+           pool = GreenPool()
+           for result in pool.imap(worker, open("filename", 'r')):
+               print(result)
+        """
+        return self.starmap(function, six.moves.zip(*iterables))
+
+
+def return_stop_iteration():
+    return StopIteration()
+
+
+class GreenPile(object):
+    """GreenPile is an abstraction representing a bunch of I/O-related tasks.
+
+    Construct a GreenPile with an existing GreenPool object.  The GreenPile will
+    then use that pool's concurrency as it processes its jobs.  There can be
+    many GreenPiles associated with a single GreenPool.
+
+    A GreenPile can also be constructed standalone, not associated with any
+    GreenPool.  To do this, construct it with an integer size parameter instead
+    of a GreenPool.
+
+    It is not advisable to iterate over a GreenPile in a different greenthread
+    than the one which is calling spawn.  The iterator will exit early in that
+    situation.
+    """
+
+    def __init__(self, size_or_pool=1000):
+        if isinstance(size_or_pool, GreenPool):
+            self.pool = size_or_pool
+        else:
+            self.pool = GreenPool(size_or_pool)
+        self.waiters = queue.LightQueue()
+        self.used = False
+        self.counter = 0
+
+    def spawn(self, func, *args, **kw):
+        """Runs *func* in its own green thread, with the result available by
+        iterating over the GreenPile object."""
+        self.used = True
+        self.counter += 1
+        try:
+            gt = self.pool.spawn(func, *args, **kw)
+            self.waiters.put(gt)
+        except:
+            self.counter -= 1
+            raise
+
+    def __iter__(self):
+        return self
+
+    def next(self):
+        """Wait for the next result, suspending the current greenthread until it
+        is available.  Raises StopIteration when there are no more results."""
+        if self.counter == 0 and self.used:
+            raise StopIteration()
+        try:
+            return self.waiters.get().wait()
+        finally:
+            self.counter -= 1
+    __next__ = next
+
+
+# this is identical to GreenPile but it blocks on spawn if the results
+# aren't consumed, and it doesn't generate its own StopIteration exception,
+# instead relying on the spawning process to send one in when it's done
+class GreenMap(GreenPile):
+    def __init__(self, size_or_pool):
+        super(GreenMap, self).__init__(size_or_pool)
+        self.waiters = queue.LightQueue(maxsize=self.pool.size)
+
+    def next(self):
+        try:
+            val = self.waiters.get().wait()
+            if isinstance(val, StopIteration):
+                raise val
+            else:
+                return val
+        finally:
+            self.counter -= 1
+    __next__ = next

Bu fark içinde çok fazla dosya değişikliği olduğu için bazı dosyalar gösterilmiyor