소스 검색

[core] Fix some code not compatible with python 3 (#1244)

* [desktop, apps] Fix code not compatible with Python 3 (#1239)

Most of the changes listed are typical py2->py3 porting issues,
but the ldap_access.py changes are more complex. Recent versions of
python-ldap changed the way that strings are returned after certain
api calls, leading to inconsistencies and errors while trying to log
in via LDAP authentication.

GH issue: #1239

* [desktop, apps] Fix style warnings due issues raised by CI (#1239)

After the introduction of more precise CI linter/validation steps some
errors were raised related to Python code formatting.

Co-authored-by: Romain Rigaux <romain.rigaux@gmail.com>
Luca Toscano 5 년 전
부모
커밋
c04d89a277

+ 27 - 19
apps/jobbrowser/src/jobbrowser/views.py

@@ -20,6 +20,7 @@ from future import standard_library
 standard_library.install_aliases()
 from builtins import filter
 from builtins import str
+import functools
 import logging
 import re
 import string
@@ -229,7 +230,8 @@ def massage_task_for_json(task):
     'id': task.taskId,
     'shortId': task.taskId_short,
     'url': task.taskId and reverse('jobbrowser.views.single_task', kwargs={'job': task.jobId, 'taskid': task.taskId}) or '',
-    'logs': task.taskAttemptIds and reverse('single_task_attempt_logs', kwargs={'job': task.jobId, 'taskid': task.taskId, 'attemptid': task.taskAttemptIds[-1]}) or '',
+    'logs': task.taskAttemptIds and reverse('single_task_attempt_logs', kwargs={'job': task.jobId, 'taskid': task.taskId,
+                                                                                'attemptid': task.taskAttemptIds[-1]}) or '',
     'type': task.taskType
   }
   return task
@@ -423,7 +425,10 @@ def job_single_logs(request, job, offset=LOG_OFFSET_BYTES):
   task = None
 
   failed_tasks = job.filter_tasks(task_states=('failed',))
-  failed_tasks.sort(cmp_exec_time)
+  if sys.version_info[0] > 2:
+    failed_tasks.sort(key=functools.cmp_to_key(cmp_exec_time))
+  else:
+    failed_tasks.sort(cmp_exec_time)
   if failed_tasks:
     task = failed_tasks[0]
     if not task.taskAttemptIds and len(failed_tasks) > 1: # In some cases the last task ends up without any attempt
@@ -433,7 +438,10 @@ def job_single_logs(request, job, offset=LOG_OFFSET_BYTES):
     if job.is_mr2:
       task_states.append('scheduled')
     recent_tasks = job.filter_tasks(task_states=task_states, task_types=('map', 'reduce',))
-    recent_tasks.sort(cmp_exec_time, reverse=True)
+    if sys.version_info[0] > 2:
+      recent_tasks.sort(key=functools.cmp_to_key(cmp_exec_time), reverse=True)
+    else:
+      recent_tasks.sort(cmp_exec_time, reverse=True)
     if recent_tasks:
       task = recent_tasks[0]
 
@@ -544,7 +552,7 @@ def single_task_attempt_logs(request, job, taskid, attemptid, offset=LOG_OFFSET_
     elif job_link.is_mr2:
       diagnostic_log = attempt.diagnostics
     else:
-      diagnostic_log =  ", ".join(task.diagnosticMap[attempt.attemptId])
+      diagnostic_log = ", ".join(task.diagnosticMap[attempt.attemptId])
     logs = [diagnostic_log]
     # Add remaining logs
     logs += [section.strip() for section in attempt.get_task_log(offset=offset)]
@@ -591,7 +599,7 @@ def task_attempt_counters(request, job, taskid, attemptid):
   counters = {}
   if attempt:
     counters = attempt.counters
-  return render("counters.html", request, {'counters':counters})
+  return render("counters.html", request, {'counters': counters})
 
 @access_log_level(logging.WARN)
 def kill_task_attempt(request, attemptid):
@@ -608,7 +616,7 @@ def trackers(request):
   """
   trackers = get_tasktrackers(request)
 
-  return render("tasktrackers.mako", request, {'trackers':trackers})
+  return render("tasktrackers.mako", request, {'trackers': trackers})
 
 def single_tracker(request, trackerid):
   jt = get_api(request.user, request.jt)
@@ -617,7 +625,7 @@ def single_tracker(request, trackerid):
     tracker = jt.get_tracker(trackerid)
   except Exception as e:
     raise PopupException(_('The tracker could not be contacted.'), detail=e)
-  return render("tasktracker.mako", request, {'tracker':tracker})
+  return render("tasktracker.mako", request, {'tracker': tracker})
 
 def container(request, node_manager_http_address, containerid):
   jt = get_api(request.user, request.jt)
@@ -627,7 +635,7 @@ def container(request, node_manager_http_address, containerid):
   except Exception as e:
     # TODO: add a redirect of some kind
     raise PopupException(_('The container disappears as soon as the job finishes.'), detail=e)
-  return render("container.mako", request, {'tracker':tracker})
+  return render("container.mako", request, {'tracker': tracker})
 
 
 def clusterstatus(request):
@@ -640,7 +648,7 @@ def queues(request):
   """
   We get here from /queues
   """
-  return render("queues.html", request, { "queuelist" : request.jt.queues()})
+  return render("queues.html", request, {"queuelist": request.jt.queues()})
 
 @check_job_permission
 def set_job_priority(request, job):
@@ -701,7 +709,7 @@ def format_counter_name(s):
   return string.capwords(re.sub('_', ' ', splitCamels(s)).lower())
 
 
-def get_state_link(request, option=None, val='', VALID_OPTIONS = ("state", "user", "text", "taskstate")):
+def get_state_link(request, option=None, val='', VALID_OPTIONS=("state", "user", "text", "taskstate")):
   """
     constructs the query string for the state of the current query for the jobs page.
     pass in the request, and an optional option/value pair; these are used for creating
@@ -719,7 +727,7 @@ def get_state_link(request, option=None, val='', VALID_OPTIONS = ("state", "user
   if option is not None:
     states[option] = val
 
-  return "&".join([ "%s=%s" % (key, quote_plus(value)) for key, value in states.items() ])
+  return "&".join(["%s=%s" % (key, quote_plus(value)) for key, value in states.items()])
 
 
 ## All Unused below
@@ -738,7 +746,7 @@ def get_tasktrackers(request):
   """
   Return a ThriftTaskTrackerStatusList object containing all task trackers
   """
-  return [ Tracker(tracker) for tracker in request.jt.all_task_trackers().trackers]
+  return [Tracker(tracker) for tracker in request.jt.all_task_trackers().trackers]
 
 
 def get_single_job(request, jobid):
@@ -786,11 +794,11 @@ def jobbrowser(request):
   jobqueues = request.jt.queues()
 
   return render("jobbrowser.html", request, {
-      "clusterstatus" : status,
-      "queues" : jobqueues,
-      "alljobs" : alljobs,
-      "runningjobs" : runningjobs,
-      "failedjobs" : failedjobs,
-      "killedjobs" : killedjobs,
-      "completedjobs" : completedjobs
+      "clusterstatus": status,
+      "queues": jobqueues,
+      "alljobs": alljobs,
+      "runningjobs": runningjobs,
+      "failedjobs": failedjobs,
+      "killedjobs": killedjobs,
+      "completedjobs": completedjobs
   })

+ 16 - 11
apps/useradmin/src/useradmin/ldap_access.py

@@ -177,7 +177,7 @@ class LdapConnection(object):
     else:
       try:
         # Do anonymous bind
-        self.ldap_handle.simple_bind_s('','')
+        self.ldap_handle.simple_bind_s('', '')
       except Exception as e:
         self.handle_bind_exception(e)
 
@@ -216,8 +216,10 @@ class LdapConnection(object):
   @classmethod
   def _transform_find_user_results(cls, result_data, user_name_attr):
     """
-    :param result_data: List of dictionaries that have ldap attributes and their associated values. Generally the result list from an ldapsearch request.
-    :param user_name_attr: The ldap attribute that is returned by the server to map to ``username`` in the return dictionary.
+    :param result_data: List of dictionaries that have ldap attributes and their associated values.
+                        Generally the result list from an ldapsearch request.
+    :param user_name_attr: The ldap attribute that is returned by the server to map to ``username``
+                           in the return dictionary.
 
     :returns list of dictionaries that take on the following form: {
       'dn': <distinguished name of entry>,
@@ -243,7 +245,7 @@ class LdapConnection(object):
 
           ldap_info = {
             'dn': dn,
-            'username': data[user_name_attr][0]
+            'username': smart_text(data[user_name_attr][0])
           }
 
           if 'givenName' in data:
@@ -257,12 +259,12 @@ class LdapConnection(object):
               LOG.warn('Last name is truncated to 30 characters for [<User: %s>].' % ldap_info['username'])
             ldap_info['last'] = last_name[:30]
           if 'mail' in data:
-            ldap_info['email'] = data['mail'][0]
+            ldap_info['email'] = smart_text(data['mail'][0])
           # memberOf and isMemberOf should be the same if they both exist
           if 'memberOf' in data:
-            ldap_info['groups'] = data['memberOf']
+            ldap_info['groups'] = [smart_text(member) for member in data['memberOf']]
           if 'isMemberOf' in data:
-            ldap_info['groups'] = data['isMemberOf']
+            ldap_info['groups'] = [smart_text(member) for member in data['isMemberOf']]
 
           user_info.append(ldap_info)
     return user_info
@@ -296,13 +298,15 @@ class LdapConnection(object):
           if group_member_attr in data and group_member_attr.lower() != 'memberuid':
             ldap_info['members'] = data[group_member_attr]
           else:
-            LOG.warn('Skipping import of non-posix users from group %s since group_member_attr is memberUid or group did not contain any members' % group_name)
+            LOG.warn('Skipping import of non-posix users from group %s since group_member_attr '
+                     'is memberUid or group did not contain any members' % group_name)
             ldap_info['members'] = []
 
           if 'posixgroup' in (item.lower() for item in data['objectClass']) and 'memberUid' in data:
             ldap_info['posix_members'] = data['memberUid']
           else:
-            LOG.warn('Skipping import of posix users from group %s since posixGroup not an objectClass or no memberUids found' % group_name)
+            LOG.warn('Skipping import of posix users from group %s since posixGroup '
+                     'not an objectClass or no memberUids found' % group_name)
             ldap_info['posix_members'] = []
 
           group_info.append(ldap_info)
@@ -368,11 +372,12 @@ class LdapConnection(object):
       else:
         return []
     except ldap.LDAPError as e:
-       LOG.warn("LDAP Error: %s" % e)
+      LOG.warn("LDAP Error: %s" % e)
 
     return None
 
-  def find_groups(self, groupname_pattern, search_attr=None, group_name_attr=None, group_member_attr=None, group_filter=None, find_by_dn=False, scope=SCOPE_SUBTREE):
+  def find_groups(self, groupname_pattern, search_attr=None, group_name_attr=None,
+                  group_member_attr=None, group_filter=None, find_by_dn=False, scope=SCOPE_SUBTREE):
     """
     LDAP search helper method for finding groups
 

+ 27 - 24
desktop/core/src/desktop/kt_renewer.py

@@ -25,7 +25,7 @@ from desktop.conf import KERBEROS as CONF
 LOG = logging.getLogger(__name__)
 SPEC = DjangoCommandSupervisee("kt_renewer")
 
-NEED_KRB181_WORKAROUND=None
+NEED_KRB181_WORKAROUND = None
 
 def renew_from_kt():
   cmdv = [CONF.KINIT_PATH.get(),
@@ -36,26 +36,23 @@ def renew_from_kt():
   retries = 0
   max_retries = 3
   while retries < max_retries:
-     LOG.info("Reinitting kerberos retry attempt %s from keytab %s" % (retries, " ".join(cmdv)))
+    LOG.info("Reinitting kerberos retry attempt %s from keytab %s" % (retries, " ".join(cmdv)))
 
-     subp = subprocess.Popen(cmdv,
-                          stdout=subprocess.PIPE,
-                          stderr=subprocess.PIPE,
-                          close_fds=True,
-                          bufsize=-1)
-     subp.wait()
-     if subp.returncode != 0:
-       retries = retries + 1
-       LOG.error("Couldn't reinit from keytab! `kinit' exited with %s.\n%s\n%s" % (
-              subp.returncode,
-              "\n".join(subp.stdout.readlines()),
-              "\n".join(subp.stderr.readlines())))
-       if retries >= max_retries:
-          LOG.error("FATAL: max_retries of %s reached. Exiting..." % max_retries)
-          sys.exit(subp.returncode)
-       time.sleep(3)
-     else:
-       break
+    subp = subprocess.Popen(cmdv, stdout=subprocess.PIPE,
+                            stderr=subprocess.PIPE, close_fds=True,
+                            bufsize=-1)
+    subp.wait()
+    if subp.returncode != 0:
+      retries = retries + 1
+      LOG.error("Couldn't reinit from keytab! `kinit' exited with %s.\n%s\n%s" % (
+                subp.returncode,
+                "\n".join(subp.stdout.readlines()), "\n".join(subp.stderr.readlines())))
+      if retries >= max_retries:
+        LOG.error("FATAL: max_retries of %s reached. Exiting..." % max_retries)
+        sys.exit(subp.returncode)
+      time.sleep(3)
+    else:
+      break
 
   global NEED_KRB181_WORKAROUND
   if NEED_KRB181_WORKAROUND is None:
@@ -92,11 +89,17 @@ def detect_conf_var():
   Sun Java Krb5LoginModule in Java6, so we need to take an action to work
   around it.
   """
-  f = file(CONF.CCACHE_PATH.get(), "rb")
-
   try:
-    data = f.read()
-    return "X-CACHECONF:" in data
+    # TODO: the binary check for X-CACHECONF seems fragile, it should be replaced
+    # with something more robust.
+    if sys.version_info[0] > 2:
+      f = open(CONF.CCACHE_PATH.get(), "rb")
+      data = f.read()
+      return b"X-CACHECONF:" in data
+    else:
+      f = file(CONF.CCACHE_PATH.get(), "rb")
+      data = f.read()
+      return "X-CACHECONF:" in data
   finally:
     f.close()
 

+ 1679 - 1679
desktop/core/src/desktop/lib/wsgiserver.py

@@ -22,38 +22,38 @@ A high-speed, production ready, thread pooled, generic WSGI server.
 Simplest example on how to use this module directly
 (without using CherryPy's application machinery):
 
-    from cherrypy import wsgiserver
+  from cherrypy import wsgiserver
 
-    def my_crazy_app(environ, start_response):
-        status = '200 OK'
-        response_headers = [('Content-type','text/plain')]
-        start_response(status, response_headers)
-        return ['Hello world!\n']
+  def my_crazy_app(environ, start_response):
+    status = '200 OK'
+    response_headers = [('Content-type','text/plain')]
+    start_response(status, response_headers)
+    return ['Hello world!\n']
 
-    server = wsgiserver.CherryPyWSGIServer(
-                ('0.0.0.0', 8070), my_crazy_app,
-                server_name='www.cherrypy.example')
+  server = wsgiserver.CherryPyWSGIServer(
+        ('0.0.0.0', 8070), my_crazy_app,
+        server_name='www.cherrypy.example')
 
 The CherryPy WSGI server can serve as many WSGI applications
 as you want in one instance by using a WSGIPathInfoDispatcher:
 
-    d = WSGIPathInfoDispatcher({'/': my_crazy_app, '/blog': my_blog_app})
-    server = wsgiserver.CherryPyWSGIServer(('0.0.0.0', 80), d)
+  d = WSGIPathInfoDispatcher({'/': my_crazy_app, '/blog': my_blog_app})
+  server = wsgiserver.CherryPyWSGIServer(('0.0.0.0', 80), d)
 
 Want SSL support? Just set these attributes:
 
-    server.ssl_certificate = <filename>
-    server.ssl_private_key = <filename>
+  server.ssl_certificate = <filename>
+  server.ssl_private_key = <filename>
 
 Supports also SSL certificate chains with this attribute:
 
-    server.ssl_certificate_chain = <filename>
+  server.ssl_certificate_chain = <filename>
 
-    if __name__ == '__main__':
-        try:
-            server.start()
-        except KeyboardInterrupt:
-            server.stop()
+  if __name__ == '__main__':
+    try:
+      server.start()
+    except KeyboardInterrupt:
+      server.stop()
 
 This won't call the CherryPy engine (application side) at all, only the
 WSGI server, which is independant from the rest of CherryPy. Don't
@@ -64,39 +64,39 @@ For those of you wanting to understand internals of this module, here's the
 basic call flow. The server's listening thread runs a very tight loop,
 sticking incoming connections onto a Queue:
 
-    server = CherryPyWSGIServer(...)
-    server.start()
-    while True:
-        tick()
-        # This blocks until a request comes in:
-        child = socket.accept()
-        conn = HTTPConnection(child, ...)
-        server.requests.put(conn)
+  server = CherryPyWSGIServer(...)
+  server.start()
+  while True:
+    tick()
+    # This blocks until a request comes in:
+    child = socket.accept()
+    conn = HTTPConnection(child, ...)
+    server.requests.put(conn)
 
 Worker threads are kept in a pool and poll the Queue, popping off and then
 handling each connection in turn. Each connection can consist of an arbitrary
 number of requests and their responses, so we run a nested loop:
 
-    while True:
-        conn = server.requests.get()
-        conn.communicate()
-        ->  while True:
-                req = HTTPRequest(...)
-                req.parse_request()
-                ->  # Read the Request-Line, e.g. "GET /page HTTP/1.1"
-                    req.rfile.readline()
-                    req.read_headers()
-                req.respond()
-                ->  response = wsgi_app(...)
-                    try:
-                        for chunk in response:
-                            if chunk:
-                                req.write(chunk)
-                    finally:
-                        if hasattr(response, "close"):
-                            response.close()
-                if req.close_connection:
-                    return
+  while True:
+    conn = server.requests.get()
+    conn.communicate()
+    ->  while True:
+        req = HTTPRequest(...)
+        req.parse_request()
+        ->  # Read the Request-Line, e.g. "GET /page HTTP/1.1"
+          req.rfile.readline()
+          req.read_headers()
+        req.respond()
+        ->  response = wsgi_app(...)
+          try:
+            for chunk in response:
+              if chunk:
+                req.write(chunk)
+          finally:
+            if hasattr(response, "close"):
+              response.close()
+        if req.close_connection:
+          return
 """
 
 
@@ -114,9 +114,9 @@ quoted_slash = re.compile("(?i)%2F")
 from email.utils import formatdate
 import socket
 try:
-    import io as StringIO
+  import io as StringIO
 except ImportError:
-    import io
+  import io
 
 _fileobject_uses_str_type = isinstance(socket._fileobject(None)._rbuf, basestring)
 
@@ -127,10 +127,10 @@ import traceback
 import warnings
 
 try:
-    from OpenSSL import SSL
-    from OpenSSL import crypto
+  from OpenSSL import SSL
+  from OpenSSL import crypto
 except ImportError:
-    SSL = None
+  SSL = None
 
 import errno
 import logging
@@ -149,1741 +149,1741 @@ LOG = logging.getLogger(__name__)
 
 
 def plat_specific_errors(*errnames):
-    """Return error numbers for all errors in errnames on this platform.
+  """Return error numbers for all errors in errnames on this platform.
 
-    The 'errno' module contains different global constants depending on
-    the specific platform (OS). This function will return the list of
-    numeric values for a given list of potential names.
-    """
-    errno_names = dir(errno)
-    nums = [getattr(errno, k) for k in errnames if k in errno_names]
-    # de-dupe the list
-    return list(dict.fromkeys(nums).keys())
+  The 'errno' module contains different global constants depending on
+  the specific platform (OS). This function will return the list of
+  numeric values for a given list of potential names.
+  """
+  errno_names = dir(errno)
+  nums = [getattr(errno, k) for k in errnames if k in errno_names]
+  # de-dupe the list
+  return list(dict.fromkeys(nums).keys())
 
 socket_error_eintr = plat_specific_errors("EINTR", "WSAEINTR")
 
 socket_errors_to_ignore = plat_specific_errors(
-    "EPIPE",
-    "EBADF", "WSAEBADF",
-    "ENOTSOCK", "WSAENOTSOCK",
-    "ETIMEDOUT", "WSAETIMEDOUT",
-    "ECONNREFUSED", "WSAECONNREFUSED",
-    "ECONNRESET", "WSAECONNRESET",
-    "ECONNABORTED", "WSAECONNABORTED",
-    "ENETRESET", "WSAENETRESET",
-    "EHOSTDOWN", "EHOSTUNREACH",
-    )
+  "EPIPE",
+  "EBADF", "WSAEBADF",
+  "ENOTSOCK", "WSAENOTSOCK",
+  "ETIMEDOUT", "WSAETIMEDOUT",
+  "ECONNREFUSED", "WSAECONNREFUSED",
+  "ECONNRESET", "WSAECONNRESET",
+  "ECONNABORTED", "WSAECONNABORTED",
+  "ENETRESET", "WSAENETRESET",
+  "EHOSTDOWN", "EHOSTUNREACH",
+  )
 socket_errors_to_ignore.append("timed out")
 
 socket_errors_nonblocking = plat_specific_errors(
-    'EAGAIN', 'EWOULDBLOCK', 'WSAEWOULDBLOCK')
+  'EAGAIN', 'EWOULDBLOCK', 'WSAEWOULDBLOCK')
 
 comma_separated_headers = ['ACCEPT', 'ACCEPT-CHARSET', 'ACCEPT-ENCODING',
-    'ACCEPT-LANGUAGE', 'ACCEPT-RANGES', 'ALLOW', 'CACHE-CONTROL',
-    'CONNECTION', 'CONTENT-ENCODING', 'CONTENT-LANGUAGE', 'EXPECT',
-    'IF-MATCH', 'IF-NONE-MATCH', 'PRAGMA', 'PROXY-AUTHENTICATE', 'TE',
-    'TRAILER', 'TRANSFER-ENCODING', 'UPGRADE', 'VARY', 'VIA', 'WARNING',
-    'WWW-AUTHENTICATE']
+  'ACCEPT-LANGUAGE', 'ACCEPT-RANGES', 'ALLOW', 'CACHE-CONTROL',
+  'CONNECTION', 'CONTENT-ENCODING', 'CONTENT-LANGUAGE', 'EXPECT',
+  'IF-MATCH', 'IF-NONE-MATCH', 'PRAGMA', 'PROXY-AUTHENTICATE', 'TE',
+  'TRAILER', 'TRANSFER-ENCODING', 'UPGRADE', 'VARY', 'VIA', 'WARNING',
+  'WWW-AUTHENTICATE']
 
 
 class WSGIPathInfoDispatcher(object):
-    """A WSGI dispatcher for dispatch based on the PATH_INFO.
+  """A WSGI dispatcher for dispatch based on the PATH_INFO.
 
-    apps: a dict or list of (path_prefix, app) pairs.
-    """
+  apps: a dict or list of (path_prefix, app) pairs.
+  """
 
-    def __init__(self, apps):
-        try:
-            apps = list(apps.items())
-        except AttributeError:
-            pass
+  def __init__(self, apps):
+    try:
+      apps = list(apps.items())
+    except AttributeError:
+      pass
 
-        # Sort the apps by len(path), descending
-        apps.sort()
-        apps.reverse()
+    # Sort the apps by len(path), descending
+    apps.sort()
+    apps.reverse()
 
-        # The path_prefix strings must start, but not end, with a slash.
-        # Use "" instead of "/".
-        self.apps = [(p.rstrip("/"), a) for p, a in apps]
+    # The path_prefix strings must start, but not end, with a slash.
+    # Use "" instead of "/".
+    self.apps = [(p.rstrip("/"), a) for p, a in apps]
 
-    def __call__(self, environ, start_response):
-        path = environ["PATH_INFO"] or "/"
-        for p, app in self.apps:
-            # The apps list should be sorted by length, descending.
-            if path.startswith(p + "/") or path == p:
-                environ = environ.copy()
-                environ["SCRIPT_NAME"] = environ["SCRIPT_NAME"] + p
-                environ["PATH_INFO"] = path[len(p):]
-                return app(environ, start_response)
+  def __call__(self, environ, start_response):
+    path = environ["PATH_INFO"] or "/"
+    for p, app in self.apps:
+      # The apps list should be sorted by length, descending.
+      if path.startswith(p + "/") or path == p:
+        environ = environ.copy()
+        environ["SCRIPT_NAME"] = environ["SCRIPT_NAME"] + p
+        environ["PATH_INFO"] = path[len(p):]
+        return app(environ, start_response)
 
-        start_response('404 Not Found', [('Content-Type', 'text/plain'),
-                                         ('Content-Length', '0')])
-        return ['']
+    start_response('404 Not Found', [('Content-Type', 'text/plain'),
+                     ('Content-Length', '0')])
+    return ['']
 
 
 class MaxSizeExceeded(Exception):
-    pass
+  pass
 
 class SizeCheckWrapper(object):
-    """Wraps a file-like object, raising MaxSizeExceeded if too large."""
-
-    def __init__(self, rfile, maxlen):
-        self.rfile = rfile
-        self.maxlen = maxlen
-        self.bytes_read = 0
-
-    def _check_length(self):
-        if self.maxlen and self.bytes_read > self.maxlen:
-            raise MaxSizeExceeded()
-
-    def read(self, size=None):
-        data = self.rfile.read(size)
-        self.bytes_read += len(data)
-        self._check_length()
-        return data
-
-    def readline(self, size=None):
-        if size is not None:
-            data = self.rfile.readline(size)
-            self.bytes_read += len(data)
-            self._check_length()
-            return data
-
-        # User didn't specify a size ...
-        # We read the line in chunks to make sure it's not a 100MB line !
-        res = []
-        while True:
-            data = self.rfile.readline(256)
-            self.bytes_read += len(data)
-            self._check_length()
-            res.append(data)
-            # See http://www.cherrypy.org/ticket/421
-            if len(data) < 256 or data[-1:] == "\n":
-                return ''.join(res)
-
-    def readlines(self, sizehint=0):
-        # Shamelessly stolen from StringIO
-        total = 0
-        lines = []
-        line = self.readline()
-        while line:
-            lines.append(line)
-            total += len(line)
-            if 0 < sizehint <= total:
-                break
-            line = self.readline()
-        return lines
-
-    def close(self):
-        self.rfile.close()
-
-    def __iter__(self):
-        return self
-
-    def __next__(self):
-        data = next(self.rfile)
-        self.bytes_read += len(data)
-        self._check_length()
-        return data
+  """Wraps a file-like object, raising MaxSizeExceeded if too large."""
+
+  def __init__(self, rfile, maxlen):
+    self.rfile = rfile
+    self.maxlen = maxlen
+    self.bytes_read = 0
+
+  def _check_length(self):
+    if self.maxlen and self.bytes_read > self.maxlen:
+      raise MaxSizeExceeded()
+
+  def read(self, size=None):
+    data = self.rfile.read(size)
+    self.bytes_read += len(data)
+    self._check_length()
+    return data
+
+  def readline(self, size=None):
+    if size is not None:
+      data = self.rfile.readline(size)
+      self.bytes_read += len(data)
+      self._check_length()
+      return data
+
+    # User didn't specify a size ...
+    # We read the line in chunks to make sure it's not a 100MB line !
+    res = []
+    while True:
+      data = self.rfile.readline(256)
+      self.bytes_read += len(data)
+      self._check_length()
+      res.append(data)
+      # See http://www.cherrypy.org/ticket/421
+      if len(data) < 256 or data[-1:] == "\n":
+        return ''.join(res)
+
+  def readlines(self, sizehint=0):
+    # Shamelessly stolen from StringIO
+    total = 0
+    lines = []
+    line = self.readline()
+    while line:
+      lines.append(line)
+      total += len(line)
+      if 0 < sizehint <= total:
+        break
+      line = self.readline()
+    return lines
+
+  def close(self):
+    self.rfile.close()
+
+  def __iter__(self):
+    return self
+
+  def __next__(self):
+    data = next(self.rfile)
+    self.bytes_read += len(data)
+    self._check_length()
+    return data
 
 
 class HTTPRequest(object):
-    """An HTTP Request (and response).
-
-    A single HTTP connection may consist of multiple request/response pairs.
-
-    send: the 'send' method from the connection's socket object.
-    wsgi_app: the WSGI application to call.
-    environ: a partial WSGI environ (server and connection entries).
-        The caller MUST set the following entries:
-        * All wsgi.* entries, including .input
-        * SERVER_NAME and SERVER_PORT
-        * Any SSL_* entries
-        * Any custom entries like REMOTE_ADDR and REMOTE_PORT
-        * SERVER_SOFTWARE: the value to write in the "Server" response header.
-        * ACTUAL_SERVER_PROTOCOL: the value to write in the Status-Line of
-            the response. From RFC 2145: "An HTTP server SHOULD send a
-            response version equal to the highest version for which the
-            server is at least conditionally compliant, and whose major
-            version is less than or equal to the one received in the
-            request.  An HTTP server MUST NOT send a version for which
-            it is not at least conditionally compliant."
-
-    outheaders: a list of header tuples to write in the response.
-    ready: when True, the request has been parsed and is ready to begin
-        generating the response. When False, signals the calling Connection
-        that the response should not be generated and the connection should
-        close.
-    close_connection: signals the calling Connection that the request
-        should close. This does not imply an error! The client and/or
-        server may each request that the connection be closed.
-    chunked_write: if True, output will be encoded with the "chunked"
-        transfer-coding. This value is set automatically inside
-        send_headers.
-    """
-
-    max_request_header_size = 0
-    max_request_body_size = 0
-
-    def __init__(self, wfile, environ, wsgi_app):
-        self.rfile = environ['wsgi.input']
-        self.wfile = wfile
-        self.environ = environ.copy()
-        self.wsgi_app = wsgi_app
+  """An HTTP Request (and response).
+
+  A single HTTP connection may consist of multiple request/response pairs.
+
+  send: the 'send' method from the connection's socket object.
+  wsgi_app: the WSGI application to call.
+  environ: a partial WSGI environ (server and connection entries).
+    The caller MUST set the following entries:
+    * All wsgi.* entries, including .input
+    * SERVER_NAME and SERVER_PORT
+    * Any SSL_* entries
+    * Any custom entries like REMOTE_ADDR and REMOTE_PORT
+    * SERVER_SOFTWARE: the value to write in the "Server" response header.
+    * ACTUAL_SERVER_PROTOCOL: the value to write in the Status-Line of
+      the response. From RFC 2145: "An HTTP server SHOULD send a
+      response version equal to the highest version for which the
+      server is at least conditionally compliant, and whose major
+      version is less than or equal to the one received in the
+      request.  An HTTP server MUST NOT send a version for which
+      it is not at least conditionally compliant."
+
+  outheaders: a list of header tuples to write in the response.
+  ready: when True, the request has been parsed and is ready to begin
+    generating the response. When False, signals the calling Connection
+    that the response should not be generated and the connection should
+    close.
+  close_connection: signals the calling Connection that the request
+    should close. This does not imply an error! The client and/or
+    server may each request that the connection be closed.
+  chunked_write: if True, output will be encoded with the "chunked"
+    transfer-coding. This value is set automatically inside
+    send_headers.
+  """
+
+  max_request_header_size = 0
+  max_request_body_size = 0
+
+  def __init__(self, wfile, environ, wsgi_app):
+    self.rfile = environ['wsgi.input']
+    self.wfile = wfile
+    self.environ = environ.copy()
+    self.wsgi_app = wsgi_app
+
+    self.ready = False
+    self.started_request = False
+    self.started_response = False
+    self.status = ""
+    self.outheaders = []
+    self.sent_headers = False
+    self.close_connection = False
+    self.chunked_write = False
+
+  def parse_request(self):
+    """Parse the next HTTP request start-line and message-headers."""
+    self.rfile.maxlen = self.max_request_header_size
+    self.rfile.bytes_read = 0
 
+    try:
+      self._parse_request()
+    except MaxSizeExceeded:
+      self.simple_response("413 Request Entity Too Large")
+      return
+
+  def _parse_request(self):
+    # HTTP/1.1 connections are persistent by default. If a client
+    # requests a page, then idles (leaves the connection open),
+    # then rfile.readline() will raise socket.error("timed out").
+    # Note that it does this based on the value given to settimeout(),
+    # and doesn't need the client to request or acknowledge the close
+    # (although your TCP stack might suffer for it: cf Apache's history
+    # with FIN_WAIT_2).
+    request_line = self.rfile.readline()
+    # Set started_request to True so communicate() knows to send 408
+    # from here on out.
+    self.started_request = True
+    if not request_line:
+      # Force self.ready = False so the connection will close.
+      self.ready = False
+      return
+
+    if request_line == "\r\n":
+      # RFC 2616 sec 4.1: "...if the server is reading the protocol
+      # stream at the beginning of a message and receives a CRLF
+      # first, it should ignore the CRLF."
+      # But only ignore one leading line! else we enable a DoS.
+      request_line = self.rfile.readline()
+      if not request_line:
         self.ready = False
-        self.started_request = False
-        self.started_response = False
-        self.status = ""
-        self.outheaders = []
-        self.sent_headers = False
-        self.close_connection = False
-        self.chunked_write = False
-
-    def parse_request(self):
-        """Parse the next HTTP request start-line and message-headers."""
-        self.rfile.maxlen = self.max_request_header_size
-        self.rfile.bytes_read = 0
+        return
 
-        try:
-            self._parse_request()
-        except MaxSizeExceeded:
-            self.simple_response("413 Request Entity Too Large")
-            return
-
-    def _parse_request(self):
-        # HTTP/1.1 connections are persistent by default. If a client
-        # requests a page, then idles (leaves the connection open),
-        # then rfile.readline() will raise socket.error("timed out").
-        # Note that it does this based on the value given to settimeout(),
-        # and doesn't need the client to request or acknowledge the close
-        # (although your TCP stack might suffer for it: cf Apache's history
-        # with FIN_WAIT_2).
-        request_line = self.rfile.readline()
-        # Set started_request to True so communicate() knows to send 408
-        # from here on out.
-        self.started_request = True
-        if not request_line:
-            # Force self.ready = False so the connection will close.
-            self.ready = False
-            return
+    environ = self.environ
 
-        if request_line == "\r\n":
-            # RFC 2616 sec 4.1: "...if the server is reading the protocol
-            # stream at the beginning of a message and receives a CRLF
-            # first, it should ignore the CRLF."
-            # But only ignore one leading line! else we enable a DoS.
-            request_line = self.rfile.readline()
-            if not request_line:
-                self.ready = False
-                return
+    try:
+      method, path, req_protocol = request_line.strip().split(" ", 2)
+    except ValueError:
+      self.simple_response(400, "Malformed Request-Line")
+      return
+
+    environ["REQUEST_METHOD"] = method
+
+    # path may be an abs_path (including "http://host.domain.tld");
+    scheme, location, path, params, qs, frag = lib_urlparse(path)
+
+    if frag:
+      self.simple_response("400 Bad Request",
+                 "Illegal #fragment in Request-URI.")
+      return
+
+    if scheme:
+      environ["wsgi.url_scheme"] = scheme
+    if params:
+      path = path + ";" + params
+
+    environ["SCRIPT_NAME"] = ""
+
+    # Unquote the path+params (e.g. "/this%20path" -> "this path").
+    # http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2
+    #
+    # But note that "...a URI must be separated into its components
+    # before the escaped characters within those components can be
+    # safely decoded." http://www.ietf.org/rfc/rfc2396.txt, sec 2.4.2
+    atoms = [urllib_unquote(x) for x in quoted_slash.split(path)]
+    path = "%2F".join(atoms)
+    environ["PATH_INFO"] = path
+
+    # Note that, like wsgiref and most other WSGI servers,
+    # we unquote the path but not the query string.
+    environ["QUERY_STRING"] = qs
+
+    # Compare request and server HTTP protocol versions, in case our
+    # server does not support the requested protocol. Limit our output
+    # to min(req, server). We want the following output:
+    #     request    server     actual written   supported response
+    #     protocol   protocol  response protocol    feature set
+    # a     1.0        1.0           1.0                1.0
+    # b     1.0        1.1           1.1                1.0
+    # c     1.1        1.0           1.0                1.0
+    # d     1.1        1.1           1.1                1.1
+    # Notice that, in (b), the response will be "HTTP/1.1" even though
+    # the client only understands 1.0. RFC 2616 10.5.6 says we should
+    # only return 505 if the _major_ version is different.
+    rp = int(req_protocol[5]), int(req_protocol[7])
+    server_protocol = environ["ACTUAL_SERVER_PROTOCOL"]
+    sp = int(server_protocol[5]), int(server_protocol[7])
+    if sp[0] != rp[0]:
+      self.simple_response("505 HTTP Version Not Supported")
+      return
+    # Bah. "SERVER_PROTOCOL" is actually the REQUEST protocol.
+    environ["SERVER_PROTOCOL"] = req_protocol
+    self.response_protocol = "HTTP/%s.%s" % min(rp, sp)
+
+    # If the Request-URI was an absoluteURI, use its location atom.
+    if location:
+      environ["SERVER_NAME"] = location
+
+    # then all the http headers
+    try:
+      self.read_headers()
+    except ValueError as ex:
+      self.simple_response("400 Bad Request", repr(ex.args))
+      return
+
+    mrbs = self.max_request_body_size
+    if mrbs and int(environ.get("CONTENT_LENGTH", 0)) > mrbs:
+      self.simple_response("413 Request Entity Too Large")
+      return
+
+    # Persistent connection support
+    if self.response_protocol == "HTTP/1.1":
+      # Both server and client are HTTP/1.1
+      if environ.get("HTTP_CONNECTION", "") == "close":
+        self.close_connection = True
+    else:
+      # Either the server or client (or both) are HTTP/1.0
+      if environ.get("HTTP_CONNECTION", "") != "Keep-Alive":
+        self.close_connection = True
+
+    # Transfer-Encoding support
+    te = None
+    if self.response_protocol == "HTTP/1.1":
+      te = environ.get("HTTP_TRANSFER_ENCODING")
+      if te:
+        te = [x.strip().lower() for x in te.split(",") if x.strip()]
+
+    self.chunked_read = False
+
+    if te:
+      for enc in te:
+        if enc == "chunked":
+          self.chunked_read = True
+        else:
+          # Note that, even if we see "chunked", we must reject
+          # if there is an extension we don't recognize.
+          self.simple_response("501 Unimplemented")
+          self.close_connection = True
+          return
+
+    # From PEP 333:
+    # "Servers and gateways that implement HTTP 1.1 must provide
+    # transparent support for HTTP 1.1's "expect/continue" mechanism.
+    # This may be done in any of several ways:
+    #   1. Respond to requests containing an Expect: 100-continue request
+    #      with an immediate "100 Continue" response, and proceed normally.
+    #   2. Proceed with the request normally, but provide the application
+    #      with a wsgi.input stream that will send the "100 Continue"
+    #      response if/when the application first attempts to read from
+    #      the input stream. The read request must then remain blocked
+    #      until the client responds.
+    #   3. Wait until the client decides that the server does not support
+    #      expect/continue, and sends the request body on its own.
+    #      (This is suboptimal, and is not recommended.)
+    #
+    # We used to do 3, but are now doing 1. Maybe we'll do 2 someday,
+    # but it seems like it would be a big slowdown for such a rare case.
+    if environ.get("HTTP_EXPECT", "") == "100-continue":
+      self.simple_response(100)
+
+    self.ready = True
+
+  def read_headers(self):
+    """Read header lines from the incoming stream."""
+    environ = self.environ
 
-        environ = self.environ
+    while True:
+      line = self.rfile.readline()
+      if not line:
+        # No more data--illegal end of headers
+        raise ValueError("Illegal end of headers.")
+
+      if line == '\r\n':
+        # Normal end of headers
+        break
+
+      if line[0] in ' \t':
+        # It's a continuation line.
+        v = line.strip()
+      else:
+        k, v = line.split(":", 1)
+        k, v = k.strip().upper(), v.strip()
+        envname = "HTTP_" + k.replace("-", "_")
+
+      if k in comma_separated_headers:
+        existing = environ.get(envname)
+        if existing:
+          v = ", ".join((existing, v))
+      environ[envname] = v
+
+    ct = environ.pop("HTTP_CONTENT_TYPE", None)
+    if ct is not None:
+      environ["CONTENT_TYPE"] = ct
+    cl = environ.pop("HTTP_CONTENT_LENGTH", None)
+    if cl is not None:
+      environ["CONTENT_LENGTH"] = cl
+
+  def decode_chunked(self):
+    """Decode the 'chunked' transfer coding."""
+    cl = 0
+    data = string_io()
+    while True:
+      line = self.rfile.readline().strip().split(";", 1)
+      chunk_size = int(line.pop(0), 16)
+      if chunk_size <= 0:
+        break
+##            if line: chunk_extension = line[0]
+      cl += chunk_size
+      data.write(self.rfile.read(chunk_size))
+      crlf = self.rfile.read(2)
+      if crlf != "\r\n":
+        self.simple_response("400 Bad Request",
+                   "Bad chunked transfer coding "
+                   "(expected '\\r\\n', got %r)" % crlf)
+        return
+
+    # Grab any trailer headers
+    self.read_headers()
+
+    data.seek(0)
+    self.environ["wsgi.input"] = data
+    self.environ["CONTENT_LENGTH"] = str(cl) or ""
+    return True
+
+  def respond(self):
+    """Call the appropriate WSGI app and write its iterable output."""
+    # Set rfile.maxlen to ensure we don't read past Content-Length.
+    # This will also be used to read the entire request body if errors
+    # are raised before the app can read the body.
+    if self.chunked_read:
+      # If chunked, Content-Length will be 0.
+      self.rfile.maxlen = self.max_request_body_size
+    else:
+      cl = int(self.environ.get("CONTENT_LENGTH", 0))
+      if self.max_request_body_size:
+        self.rfile.maxlen = min(cl, self.max_request_body_size)
+      else:
+        self.rfile.maxlen = cl
+    self.rfile.bytes_read = 0
 
-        try:
-            method, path, req_protocol = request_line.strip().split(" ", 2)
-        except ValueError:
-            self.simple_response(400, "Malformed Request-Line")
-            return
+    try:
+      self._respond()
+    except MaxSizeExceeded:
+      if not self.sent_headers:
+        self.simple_response("413 Request Entity Too Large")
+      return
+
+  def _respond(self):
+    if self.chunked_read:
+      if not self.decode_chunked():
+        self.close_connection = True
+        return
+
+    response = self.wsgi_app(self.environ, self.start_response)
+    try:
+      for chunk in response:
+        # "The start_response callable must not actually transmit
+        # the response headers. Instead, it must store them for the
+        # server or gateway to transmit only after the first
+        # iteration of the application return value that yields
+        # a NON-EMPTY string, or upon the application's first
+        # invocation of the write() callable." (PEP 333)
+        if chunk:
+          self.write(chunk)
+    finally:
+      if hasattr(response, "close"):
+        response.close()
+
+    if (self.ready and not self.sent_headers):
+      self.sent_headers = True
+      self.send_headers()
+    if self.chunked_write:
+      self.wfile.sendall("0\r\n\r\n")
+
+  def simple_response(self, status, msg=""):
+    """Write a simple response back to the client."""
+    status = str(status)
+    buf = ["%s %s\r\n" % (self.environ['ACTUAL_SERVER_PROTOCOL'], status),
+         "Content-Length: %s\r\n" % len(msg),
+         "Content-Type: text/plain\r\n"]
+
+    if status[:3] == "413" and self.response_protocol == 'HTTP/1.1':
+      # Request Entity Too Large
+      self.close_connection = True
+      buf.append("Connection: close\r\n")
+
+    buf.append("\r\n")
+    if msg:
+      buf.append(msg)
 
-        environ["REQUEST_METHOD"] = method
+    try:
+      self.wfile.sendall("".join(buf))
+    except socket.error as x:
+      if x.args[0] not in socket_errors_to_ignore:
+        raise
+
+  def start_response(self, status, headers, exc_info = None):
+    """WSGI callable to begin the HTTP response."""
+    # "The application may call start_response more than once,
+    # if and only if the exc_info argument is provided."
+    if self.started_response and not exc_info:
+      raise AssertionError("WSGI start_response called a second "
+                 "time with no exc_info.")
+
+    # "if exc_info is provided, and the HTTP headers have already been
+    # sent, start_response must raise an error, and should raise the
+    # exc_info tuple."
+    if self.sent_headers:
+      try:
+        raise (exc_info[0], exc_info[1], exc_info[2])
+      finally:
+        exc_info = None
+
+    self.started_response = True
+    self.status = status
+    self.outheaders.extend(headers)
+    return self.write
+
+  def write(self, chunk):
+    """WSGI callable to write unbuffered data to the client.
+
+    This method is also used internally by start_response (to write
+    data from the iterable returned by the WSGI application).
+    """
+    if not self.started_response:
+      raise AssertionError("WSGI write called before start_response.")
 
-        # path may be an abs_path (including "http://host.domain.tld");
-        scheme, location, path, params, qs, frag = lib_urlparse(path)
+    if not self.sent_headers:
+      self.sent_headers = True
+      self.send_headers()
 
-        if frag:
-            self.simple_response("400 Bad Request",
-                                 "Illegal #fragment in Request-URI.")
-            return
+    if self.chunked_write and chunk:
+      buf = [hex(len(chunk))[2:], "\r\n", chunk, "\r\n"]
+      self.wfile.sendall("".join(buf))
+    else:
+      self.wfile.sendall(chunk)
+
+  def send_headers(self):
+    """Assert, process, and send the HTTP response message-headers."""
+    hkeys = [key.lower() for key, value in self.outheaders]
+    status = int(self.status[:3])
+
+    if status == 413:
+      # Request Entity Too Large. Close conn to avoid garbage.
+      self.close_connection = True
+    elif "content-length" not in hkeys:
+      # "All 1xx (informational), 204 (no content),
+      # and 304 (not modified) responses MUST NOT
+      # include a message-body." So no point chunking.
+      if status < 200 or status in (204, 205, 304):
+        pass
+      else:
+        if (self.response_protocol == 'HTTP/1.1'
+          and self.environ["REQUEST_METHOD"] != 'HEAD'):
+          # Use the chunked transfer-coding
+          self.chunked_write = True
+          self.outheaders.append(("Transfer-Encoding", "chunked"))
+        else:
+          # Closing the conn is the only way to determine len.
+          self.close_connection = True
+
+    if "connection" not in hkeys:
+      if self.response_protocol == 'HTTP/1.1':
+        # Both server and client are HTTP/1.1 or better
+        if self.close_connection:
+          self.outheaders.append(("Connection", "close"))
+      else:
+        # Server and/or client are HTTP/1.0
+        if not self.close_connection:
+          self.outheaders.append(("Connection", "Keep-Alive"))
+
+    if (not self.close_connection) and (not self.chunked_read):
+      # Read any remaining request body data on the socket.
+      # "If an origin server receives a request that does not include an
+      # Expect request-header field with the "100-continue" expectation,
+      # the request includes a request body, and the server responds
+      # with a final status code before reading the entire request body
+      # from the transport connection, then the server SHOULD NOT close
+      # the transport connection until it has read the entire request,
+      # or until the client closes the connection. Otherwise, the client
+      # might not reliably receive the response message. However, this
+      # requirement is not be construed as preventing a server from
+      # defending itself against denial-of-service attacks, or from
+      # badly broken client implementations."
+      size = self.rfile.maxlen - self.rfile.bytes_read
+      if size > 0:
+        self.rfile.read(size)
+
+    if "date" not in hkeys:
+      self.outheaders.append(("Date", formatdate()))
+
+    if "server" not in hkeys:
+      self.outheaders.append(("Server", self.environ['SERVER_SOFTWARE']))
+
+    buf = [self.environ['ACTUAL_SERVER_PROTOCOL'], " ", self.status, "\r\n"]
+    try:
+      buf += [k + ": " + v + "\r\n" for k, v in self.outheaders]
+    except TypeError:
+      if not isinstance(k, str):
+        raise TypeError("WSGI response header key %r is not a string.")
+      if not isinstance(v, str):
+        raise TypeError("WSGI response header value %r is not a string.")
+      else:
+        raise
+    buf.append("\r\n")
+    self.wfile.sendall("".join(buf))
 
-        if scheme:
-            environ["wsgi.url_scheme"] = scheme
-        if params:
-            path = path + ";" + params
-
-        environ["SCRIPT_NAME"] = ""
-
-        # Unquote the path+params (e.g. "/this%20path" -> "this path").
-        # http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2
-        #
-        # But note that "...a URI must be separated into its components
-        # before the escaped characters within those components can be
-        # safely decoded." http://www.ietf.org/rfc/rfc2396.txt, sec 2.4.2
-        atoms = [urllib_unquote(x) for x in quoted_slash.split(path)]
-        path = "%2F".join(atoms)
-        environ["PATH_INFO"] = path
-
-        # Note that, like wsgiref and most other WSGI servers,
-        # we unquote the path but not the query string.
-        environ["QUERY_STRING"] = qs
-
-        # Compare request and server HTTP protocol versions, in case our
-        # server does not support the requested protocol. Limit our output
-        # to min(req, server). We want the following output:
-        #     request    server     actual written   supported response
-        #     protocol   protocol  response protocol    feature set
-        # a     1.0        1.0           1.0                1.0
-        # b     1.0        1.1           1.1                1.0
-        # c     1.1        1.0           1.0                1.0
-        # d     1.1        1.1           1.1                1.1
-        # Notice that, in (b), the response will be "HTTP/1.1" even though
-        # the client only understands 1.0. RFC 2616 10.5.6 says we should
-        # only return 505 if the _major_ version is different.
-        rp = int(req_protocol[5]), int(req_protocol[7])
-        server_protocol = environ["ACTUAL_SERVER_PROTOCOL"]
-        sp = int(server_protocol[5]), int(server_protocol[7])
-        if sp[0] != rp[0]:
-            self.simple_response("505 HTTP Version Not Supported")
-            return
-        # Bah. "SERVER_PROTOCOL" is actually the REQUEST protocol.
-        environ["SERVER_PROTOCOL"] = req_protocol
-        self.response_protocol = "HTTP/%s.%s" % min(rp, sp)
 
-        # If the Request-URI was an absoluteURI, use its location atom.
-        if location:
-            environ["SERVER_NAME"] = location
+class NoSSLError(Exception):
+  """Exception raised when a client speaks HTTP to an HTTPS socket."""
+  pass
 
-        # then all the http headers
-        try:
-            self.read_headers()
-        except ValueError as ex:
-            self.simple_response("400 Bad Request", repr(ex.args))
-            return
 
-        mrbs = self.max_request_body_size
-        if mrbs and int(environ.get("CONTENT_LENGTH", 0)) > mrbs:
-            self.simple_response("413 Request Entity Too Large")
-            return
+class FatalSSLAlert(Exception):
+  """Exception raised when the SSL implementation signals a fatal alert."""
+  pass
 
-        # Persistent connection support
-        if self.response_protocol == "HTTP/1.1":
-            # Both server and client are HTTP/1.1
-            if environ.get("HTTP_CONNECTION", "") == "close":
-                self.close_connection = True
-        else:
-            # Either the server or client (or both) are HTTP/1.0
-            if environ.get("HTTP_CONNECTION", "") != "Keep-Alive":
-                self.close_connection = True
-
-        # Transfer-Encoding support
-        te = None
-        if self.response_protocol == "HTTP/1.1":
-            te = environ.get("HTTP_TRANSFER_ENCODING")
-            if te:
-                te = [x.strip().lower() for x in te.split(",") if x.strip()]
-
-        self.chunked_read = False
-
-        if te:
-            for enc in te:
-                if enc == "chunked":
-                    self.chunked_read = True
-                else:
-                    # Note that, even if we see "chunked", we must reject
-                    # if there is an extension we don't recognize.
-                    self.simple_response("501 Unimplemented")
-                    self.close_connection = True
-                    return
-
-        # From PEP 333:
-        # "Servers and gateways that implement HTTP 1.1 must provide
-        # transparent support for HTTP 1.1's "expect/continue" mechanism.
-        # This may be done in any of several ways:
-        #   1. Respond to requests containing an Expect: 100-continue request
-        #      with an immediate "100 Continue" response, and proceed normally.
-        #   2. Proceed with the request normally, but provide the application
-        #      with a wsgi.input stream that will send the "100 Continue"
-        #      response if/when the application first attempts to read from
-        #      the input stream. The read request must then remain blocked
-        #      until the client responds.
-        #   3. Wait until the client decides that the server does not support
-        #      expect/continue, and sends the request body on its own.
-        #      (This is suboptimal, and is not recommended.)
-        #
-        # We used to do 3, but are now doing 1. Maybe we'll do 2 someday,
-        # but it seems like it would be a big slowdown for such a rare case.
-        if environ.get("HTTP_EXPECT", "") == "100-continue":
-            self.simple_response(100)
-
-        self.ready = True
-
-    def read_headers(self):
-        """Read header lines from the incoming stream."""
-        environ = self.environ
 
-        while True:
-            line = self.rfile.readline()
-            if not line:
-                # No more data--illegal end of headers
-                raise ValueError("Illegal end of headers.")
-
-            if line == '\r\n':
-                # Normal end of headers
-                break
-
-            if line[0] in ' \t':
-                # It's a continuation line.
-                v = line.strip()
-            else:
-                k, v = line.split(":", 1)
-                k, v = k.strip().upper(), v.strip()
-                envname = "HTTP_" + k.replace("-", "_")
-
-            if k in comma_separated_headers:
-                existing = environ.get(envname)
-                if existing:
-                    v = ", ".join((existing, v))
-            environ[envname] = v
-
-        ct = environ.pop("HTTP_CONTENT_TYPE", None)
-        if ct is not None:
-            environ["CONTENT_TYPE"] = ct
-        cl = environ.pop("HTTP_CONTENT_LENGTH", None)
-        if cl is not None:
-            environ["CONTENT_LENGTH"] = cl
-
-    def decode_chunked(self):
-        """Decode the 'chunked' transfer coding."""
-        cl = 0
-        data = string_io()
-        while True:
-            line = self.rfile.readline().strip().split(";", 1)
-            chunk_size = int(line.pop(0), 16)
-            if chunk_size <= 0:
-                break
-##            if line: chunk_extension = line[0]
-            cl += chunk_size
-            data.write(self.rfile.read(chunk_size))
-            crlf = self.rfile.read(2)
-            if crlf != "\r\n":
-                self.simple_response("400 Bad Request",
-                                     "Bad chunked transfer coding "
-                                     "(expected '\\r\\n', got %r)" % crlf)
-                return
-
-        # Grab any trailer headers
-        self.read_headers()
-
-        data.seek(0)
-        self.environ["wsgi.input"] = data
-        self.environ["CONTENT_LENGTH"] = str(cl) or ""
-        return True
-
-    def respond(self):
-        """Call the appropriate WSGI app and write its iterable output."""
-        # Set rfile.maxlen to ensure we don't read past Content-Length.
-        # This will also be used to read the entire request body if errors
-        # are raised before the app can read the body.
-        if self.chunked_read:
-            # If chunked, Content-Length will be 0.
-            self.rfile.maxlen = self.max_request_body_size
-        else:
-            cl = int(self.environ.get("CONTENT_LENGTH", 0))
-            if self.max_request_body_size:
-                self.rfile.maxlen = min(cl, self.max_request_body_size)
-            else:
-                self.rfile.maxlen = cl
-        self.rfile.bytes_read = 0
+if not _fileobject_uses_str_type:
+  class CP_fileobject(socket._fileobject):
+    """Faux file object attached to a socket object."""
 
+    def sendall(self, data):
+      """Sendall for non-blocking sockets."""
+      while data:
         try:
-            self._respond()
-        except MaxSizeExceeded:
-            if not self.sent_headers:
-                self.simple_response("413 Request Entity Too Large")
-            return
+          bytes_sent = self.send(data)
+          data = data[bytes_sent:]
+        except socket.error as e:
+          if e.args[0] not in socket_errors_nonblocking:
+            raise
 
-    def _respond(self):
-        if self.chunked_read:
-            if not self.decode_chunked():
-                self.close_connection = True
-                return
+    def send(self, data):
+      return self._sock.send(data)
 
-        response = self.wsgi_app(self.environ, self.start_response)
-        try:
-            for chunk in response:
-                # "The start_response callable must not actually transmit
-                # the response headers. Instead, it must store them for the
-                # server or gateway to transmit only after the first
-                # iteration of the application return value that yields
-                # a NON-EMPTY string, or upon the application's first
-                # invocation of the write() callable." (PEP 333)
-                if chunk:
-                    self.write(chunk)
-        finally:
-            if hasattr(response, "close"):
-                response.close()
-
-        if (self.ready and not self.sent_headers):
-            self.sent_headers = True
-            self.send_headers()
-        if self.chunked_write:
-            self.wfile.sendall("0\r\n\r\n")
-
-    def simple_response(self, status, msg=""):
-        """Write a simple response back to the client."""
-        status = str(status)
-        buf = ["%s %s\r\n" % (self.environ['ACTUAL_SERVER_PROTOCOL'], status),
-               "Content-Length: %s\r\n" % len(msg),
-               "Content-Type: text/plain\r\n"]
-
-        if status[:3] == "413" and self.response_protocol == 'HTTP/1.1':
-            # Request Entity Too Large
-            self.close_connection = True
-            buf.append("Connection: close\r\n")
-
-        buf.append("\r\n")
-        if msg:
-            buf.append(msg)
+    def flush(self):
+      if self._wbuf:
+        buffer = "".join(self._wbuf)
+        self._wbuf = []
+        self.sendall(buffer)
 
+    def recv(self, size):
+      while True:
         try:
-            self.wfile.sendall("".join(buf))
-        except socket.error as x:
-            if x.args[0] not in socket_errors_to_ignore:
-                raise
-
-    def start_response(self, status, headers, exc_info = None):
-        """WSGI callable to begin the HTTP response."""
-        # "The application may call start_response more than once,
-        # if and only if the exc_info argument is provided."
-        if self.started_response and not exc_info:
-            raise AssertionError("WSGI start_response called a second "
-                                 "time with no exc_info.")
-
-        # "if exc_info is provided, and the HTTP headers have already been
-        # sent, start_response must raise an error, and should raise the
-        # exc_info tuple."
-        if self.sent_headers:
-            try:
-                raise exc_info[0], exc_info[1], exc_info[2]
-            finally:
-                exc_info = None
-
-        self.started_response = True
-        self.status = status
-        self.outheaders.extend(headers)
-        return self.write
-
-    def write(self, chunk):
-        """WSGI callable to write unbuffered data to the client.
-
-        This method is also used internally by start_response (to write
-        data from the iterable returned by the WSGI application).
-        """
-        if not self.started_response:
-            raise AssertionError("WSGI write called before start_response.")
-
-        if not self.sent_headers:
-            self.sent_headers = True
-            self.send_headers()
-
-        if self.chunked_write and chunk:
-            buf = [hex(len(chunk))[2:], "\r\n", chunk, "\r\n"]
-            self.wfile.sendall("".join(buf))
-        else:
-            self.wfile.sendall(chunk)
-
-    def send_headers(self):
-        """Assert, process, and send the HTTP response message-headers."""
-        hkeys = [key.lower() for key, value in self.outheaders]
-        status = int(self.status[:3])
-
-        if status == 413:
-            # Request Entity Too Large. Close conn to avoid garbage.
-            self.close_connection = True
-        elif "content-length" not in hkeys:
-            # "All 1xx (informational), 204 (no content),
-            # and 304 (not modified) responses MUST NOT
-            # include a message-body." So no point chunking.
-            if status < 200 or status in (204, 205, 304):
-                pass
-            else:
-                if (self.response_protocol == 'HTTP/1.1'
-                    and self.environ["REQUEST_METHOD"] != 'HEAD'):
-                    # Use the chunked transfer-coding
-                    self.chunked_write = True
-                    self.outheaders.append(("Transfer-Encoding", "chunked"))
-                else:
-                    # Closing the conn is the only way to determine len.
-                    self.close_connection = True
-
-        if "connection" not in hkeys:
-            if self.response_protocol == 'HTTP/1.1':
-                # Both server and client are HTTP/1.1 or better
-                if self.close_connection:
-                    self.outheaders.append(("Connection", "close"))
-            else:
-                # Server and/or client are HTTP/1.0
-                if not self.close_connection:
-                    self.outheaders.append(("Connection", "Keep-Alive"))
-
-        if (not self.close_connection) and (not self.chunked_read):
-            # Read any remaining request body data on the socket.
-            # "If an origin server receives a request that does not include an
-            # Expect request-header field with the "100-continue" expectation,
-            # the request includes a request body, and the server responds
-            # with a final status code before reading the entire request body
-            # from the transport connection, then the server SHOULD NOT close
-            # the transport connection until it has read the entire request,
-            # or until the client closes the connection. Otherwise, the client
-            # might not reliably receive the response message. However, this
-            # requirement is not be construed as preventing a server from
-            # defending itself against denial-of-service attacks, or from
-            # badly broken client implementations."
-            size = self.rfile.maxlen - self.rfile.bytes_read
-            if size > 0:
-                self.rfile.read(size)
-
-        if "date" not in hkeys:
-            self.outheaders.append(("Date", formatdate()))
-
-        if "server" not in hkeys:
-            self.outheaders.append(("Server", self.environ['SERVER_SOFTWARE']))
-
-        buf = [self.environ['ACTUAL_SERVER_PROTOCOL'], " ", self.status, "\r\n"]
-        try:
-            buf += [k + ": " + v + "\r\n" for k, v in self.outheaders]
-        except TypeError:
-            if not isinstance(k, str):
-                raise TypeError("WSGI response header key %r is not a string.")
-            if not isinstance(v, str):
-                raise TypeError("WSGI response header value %r is not a string.")
+          return self._sock.recv(size)
+        except socket.error as e:
+          if (e.args[0] not in socket_errors_nonblocking
+            and e.args[0] not in socket_error_eintr):
+            raise
+
+    def read(self, size=-1):
+      # Use max, disallow tiny reads in a loop as they are very inefficient.
+      # We never leave read() with any leftover data from a new recv() call
+      # in our internal buffer.
+      rbufsize = max(self._rbufsize, self.default_bufsize)
+      # Our use of StringIO rather than lists of string objects returned by
+      # recv() minimizes memory usage and fragmentation that occurs when
+      # rbufsize is large compared to the typical return value of recv().
+      buf = self._rbuf
+      buf.seek(0, 2)  # seek end
+      if size < 0:
+        # Read until EOF
+        self._rbuf = string_io()  # reset _rbuf.  we consume it via buf.
+        while True:
+          data = self.recv(rbufsize)
+          if not data:
+            break
+          buf.write(data)
+        return buf.getvalue()
+      else:
+        # Read until size bytes or EOF seen, whichever comes first
+        buf_len = buf.tell()
+        if buf_len >= size:
+          # Already have size bytes in our buffer?  Extract and return.
+          buf.seek(0)
+          rv = buf.read(size)
+          self._rbuf = string_io()
+          self._rbuf.write(buf.read())
+          return rv
+
+        self._rbuf = string_io()  # reset _rbuf.  we consume it via buf.
+        while True:
+          left = size - buf_len
+          # recv() will malloc the amount of memory given as its
+          # parameter even though it often returns much less data
+          # than that.  The returned data string is short lived
+          # as we copy it into a StringIO and free it.  This avoids
+          # fragmentation issues on many platforms.
+          data = self.recv(left)
+          if not data:
+            break
+          n = len(data)
+          if n == size and not buf_len:
+            # Shortcut.  Avoid buffer data copies when:
+            # - We have no data in our buffer.
+            # AND
+            # - Our call to recv returned exactly the
+            #   number of bytes we were asked to read.
+            return data
+          if n == left:
+            buf.write(data)
+            del data  # explicit free
+            break
+          # NOTE: (HUE-2893) This was backported from CherryPy PR
+          # #14, which fixes uploading chunked files with SSL.
+          elif n > left:
+            # Could happen with SSL transport. Differ
+            # extra data read to the next call
+            buf.write(data[:left])
+            self._rbuf.write(data[left:])
+            del data
+            break
+          buf.write(data)
+          buf_len += n
+          del data  # explicit free
+          #assert buf_len == buf.tell()
+        return buf.getvalue()
+
+    def readline(self, size=-1):
+      buf = self._rbuf
+      buf.seek(0, 2)  # seek end
+      if buf.tell() > 0:
+        # check if we already have it in our buffer
+        buf.seek(0)
+        bline = buf.readline(size)
+        if bline.endswith('\n') or len(bline) == size:
+          self._rbuf = string_io()
+          self._rbuf.write(buf.read())
+          return bline
+        del bline
+      if size < 0:
+        # Read until \n or EOF, whichever comes first
+        if self._rbufsize <= 1:
+          # Speed up unbuffered case
+          buf.seek(0)
+          buffers = [buf.read()]
+          self._rbuf = string_io()  # reset _rbuf.  we consume it via buf.
+          data = None
+          recv = self.recv
+          while data != "\n":
+            data = recv(1)
+            if not data:
+              break
+            buffers.append(data)
+          return "".join(buffers)
+
+        buf.seek(0, 2)  # seek end
+        self._rbuf = string_io()  # reset _rbuf.  we consume it via buf.
+        while True:
+          data = self.recv(self._rbufsize)
+          if not data:
+            break
+          nl = data.find('\n')
+          if nl >= 0:
+            nl += 1
+            buf.write(data[:nl])
+            self._rbuf.write(data[nl:])
+            del data
+            break
+          buf.write(data)
+        return buf.getvalue()
+      else:
+        # Read until size bytes or \n or EOF seen, whichever comes first
+        buf.seek(0, 2)  # seek end
+        buf_len = buf.tell()
+        if buf_len >= size:
+          buf.seek(0)
+          rv = buf.read(size)
+          self._rbuf = string_io()
+          self._rbuf.write(buf.read())
+          return rv
+        self._rbuf = string_io()  # reset _rbuf.  we consume it via buf.
+        while True:
+          data = self.recv(self._rbufsize)
+          if not data:
+            break
+          left = size - buf_len
+          # did we just receive a newline?
+          nl = data.find('\n', 0, left)
+          if nl >= 0:
+            nl += 1
+            # save the excess data to _rbuf
+            self._rbuf.write(data[nl:])
+            if buf_len:
+              buf.write(data[:nl])
+              break
             else:
-                raise
-        buf.append("\r\n")
-        self.wfile.sendall("".join(buf))
+              # Shortcut.  Avoid data copy through buf when returning
+              # a substring of our first recv().
+              return data[:nl]
+          n = len(data)
+          if n == size and not buf_len:
+            # Shortcut.  Avoid data copy through buf when
+            # returning exactly all of our first recv().
+            return data
+          if n >= left:
+            buf.write(data[:left])
+            self._rbuf.write(data[left:])
+            break
+          buf.write(data)
+          buf_len += n
+          #assert buf_len == buf.tell()
+        return buf.getvalue()
 
+else:
+  class CP_fileobject(socket._fileobject):
+    """Faux file object attached to a socket object."""
 
-class NoSSLError(Exception):
-    """Exception raised when a client speaks HTTP to an HTTPS socket."""
-    pass
+    def sendall(self, data):
+      """Sendall for non-blocking sockets."""
+      while data:
+        try:
+          bytes_sent = self.send(data)
+          data = data[bytes_sent:]
+        except socket.error as e:
+          if e.args[0] not in socket_errors_nonblocking:
+            raise
 
+    def send(self, data):
+      return self._sock.send(data)
 
-class FatalSSLAlert(Exception):
-    """Exception raised when the SSL implementation signals a fatal alert."""
-    pass
+    def flush(self):
+      if self._wbuf:
+        buffer = "".join(self._wbuf)
+        self._wbuf = []
+        self.sendall(buffer)
 
+    def recv(self, size):
+      while True:
+        try:
+          return self._sock.recv(size)
+        except socket.error as e:
+          if (e.args[0] not in socket_errors_nonblocking
+            and e.args[0] not in socket_error_eintr):
+            raise
 
-if not _fileobject_uses_str_type:
-    class CP_fileobject(socket._fileobject):
-        """Faux file object attached to a socket object."""
-
-        def sendall(self, data):
-            """Sendall for non-blocking sockets."""
-            while data:
-                try:
-                    bytes_sent = self.send(data)
-                    data = data[bytes_sent:]
-                except socket.error as e:
-                    if e.args[0] not in socket_errors_nonblocking:
-                        raise
-
-        def send(self, data):
-            return self._sock.send(data)
-
-        def flush(self):
-            if self._wbuf:
-                buffer = "".join(self._wbuf)
-                self._wbuf = []
-                self.sendall(buffer)
-
-        def recv(self, size):
-            while True:
-                try:
-                    return self._sock.recv(size)
-                except socket.error as e:
-                    if (e.args[0] not in socket_errors_nonblocking
-                        and e.args[0] not in socket_error_eintr):
-                        raise
-
-        def read(self, size=-1):
-            # Use max, disallow tiny reads in a loop as they are very inefficient.
-            # We never leave read() with any leftover data from a new recv() call
-            # in our internal buffer.
-            rbufsize = max(self._rbufsize, self.default_bufsize)
-            # Our use of StringIO rather than lists of string objects returned by
-            # recv() minimizes memory usage and fragmentation that occurs when
-            # rbufsize is large compared to the typical return value of recv().
-            buf = self._rbuf
-            buf.seek(0, 2)  # seek end
-            if size < 0:
-                # Read until EOF
-                self._rbuf = string_io()  # reset _rbuf.  we consume it via buf.
-                while True:
-                    data = self.recv(rbufsize)
-                    if not data:
-                        break
-                    buf.write(data)
-                return buf.getvalue()
-            else:
-                # Read until size bytes or EOF seen, whichever comes first
-                buf_len = buf.tell()
-                if buf_len >= size:
-                    # Already have size bytes in our buffer?  Extract and return.
-                    buf.seek(0)
-                    rv = buf.read(size)
-                    self._rbuf = string_io()
-                    self._rbuf.write(buf.read())
-                    return rv
-
-                self._rbuf = string_io()  # reset _rbuf.  we consume it via buf.
-                while True:
-                    left = size - buf_len
-                    # recv() will malloc the amount of memory given as its
-                    # parameter even though it often returns much less data
-                    # than that.  The returned data string is short lived
-                    # as we copy it into a StringIO and free it.  This avoids
-                    # fragmentation issues on many platforms.
-                    data = self.recv(left)
-                    if not data:
-                        break
-                    n = len(data)
-                    if n == size and not buf_len:
-                        # Shortcut.  Avoid buffer data copies when:
-                        # - We have no data in our buffer.
-                        # AND
-                        # - Our call to recv returned exactly the
-                        #   number of bytes we were asked to read.
-                        return data
-                    if n == left:
-                        buf.write(data)
-                        del data  # explicit free
-                        break
-                    # NOTE: (HUE-2893) This was backported from CherryPy PR
-                    # #14, which fixes uploading chunked files with SSL.
-                    elif n > left:
-                        # Could happen with SSL transport. Differ
-                        # extra data read to the next call
-                        buf.write(data[:left])
-                        self._rbuf.write(data[left:])
-                        del data
-                        break
-                    buf.write(data)
-                    buf_len += n
-                    del data  # explicit free
-                    #assert buf_len == buf.tell()
-                return buf.getvalue()
-
-        def readline(self, size=-1):
-            buf = self._rbuf
-            buf.seek(0, 2)  # seek end
-            if buf.tell() > 0:
-                # check if we already have it in our buffer
-                buf.seek(0)
-                bline = buf.readline(size)
-                if bline.endswith('\n') or len(bline) == size:
-                    self._rbuf = string_io()
-                    self._rbuf.write(buf.read())
-                    return bline
-                del bline
-            if size < 0:
-                # Read until \n or EOF, whichever comes first
-                if self._rbufsize <= 1:
-                    # Speed up unbuffered case
-                    buf.seek(0)
-                    buffers = [buf.read()]
-                    self._rbuf = string_io()  # reset _rbuf.  we consume it via buf.
-                    data = None
-                    recv = self.recv
-                    while data != "\n":
-                        data = recv(1)
-                        if not data:
-                            break
-                        buffers.append(data)
-                    return "".join(buffers)
-
-                buf.seek(0, 2)  # seek end
-                self._rbuf = string_io()  # reset _rbuf.  we consume it via buf.
-                while True:
-                    data = self.recv(self._rbufsize)
-                    if not data:
-                        break
-                    nl = data.find('\n')
-                    if nl >= 0:
-                        nl += 1
-                        buf.write(data[:nl])
-                        self._rbuf.write(data[nl:])
-                        del data
-                        break
-                    buf.write(data)
-                return buf.getvalue()
-            else:
-                # Read until size bytes or \n or EOF seen, whichever comes first
-                buf.seek(0, 2)  # seek end
-                buf_len = buf.tell()
-                if buf_len >= size:
-                    buf.seek(0)
-                    rv = buf.read(size)
-                    self._rbuf = string_io()
-                    self._rbuf.write(buf.read())
-                    return rv
-                self._rbuf = string_io()  # reset _rbuf.  we consume it via buf.
-                while True:
-                    data = self.recv(self._rbufsize)
-                    if not data:
-                        break
-                    left = size - buf_len
-                    # did we just receive a newline?
-                    nl = data.find('\n', 0, left)
-                    if nl >= 0:
-                        nl += 1
-                        # save the excess data to _rbuf
-                        self._rbuf.write(data[nl:])
-                        if buf_len:
-                            buf.write(data[:nl])
-                            break
-                        else:
-                            # Shortcut.  Avoid data copy through buf when returning
-                            # a substring of our first recv().
-                            return data[:nl]
-                    n = len(data)
-                    if n == size and not buf_len:
-                        # Shortcut.  Avoid data copy through buf when
-                        # returning exactly all of our first recv().
-                        return data
-                    if n >= left:
-                        buf.write(data[:left])
-                        self._rbuf.write(data[left:])
-                        break
-                    buf.write(data)
-                    buf_len += n
-                    #assert buf_len == buf.tell()
-                return buf.getvalue()
+    def read(self, size=-1):
+      if size < 0:
+        # Read until EOF
+        buffers = [self._rbuf]
+        self._rbuf = ""
+        if self._rbufsize <= 1:
+          recv_size = self.default_bufsize
+        else:
+          recv_size = self._rbufsize
 
-else:
-    class CP_fileobject(socket._fileobject):
-        """Faux file object attached to a socket object."""
-
-        def sendall(self, data):
-            """Sendall for non-blocking sockets."""
-            while data:
-                try:
-                    bytes_sent = self.send(data)
-                    data = data[bytes_sent:]
-                except socket.error as e:
-                    if e.args[0] not in socket_errors_nonblocking:
-                        raise
-
-        def send(self, data):
-            return self._sock.send(data)
-
-        def flush(self):
-            if self._wbuf:
-                buffer = "".join(self._wbuf)
-                self._wbuf = []
-                self.sendall(buffer)
-
-        def recv(self, size):
-            while True:
-                try:
-                    return self._sock.recv(size)
-                except socket.error as e:
-                    if (e.args[0] not in socket_errors_nonblocking
-                        and e.args[0] not in socket_error_eintr):
-                        raise
-
-        def read(self, size=-1):
-            if size < 0:
-                # Read until EOF
-                buffers = [self._rbuf]
-                self._rbuf = ""
-                if self._rbufsize <= 1:
-                    recv_size = self.default_bufsize
-                else:
-                    recv_size = self._rbufsize
-
-                while True:
-                    data = self.recv(recv_size)
-                    if not data:
-                        break
-                    buffers.append(data)
-                return "".join(buffers)
-            else:
-                # Read until size bytes or EOF seen, whichever comes first
-                data = self._rbuf
-                buf_len = len(data)
-                if buf_len >= size:
-                    self._rbuf = data[size:]
-                    return data[:size]
-                buffers = []
-                if data:
-                    buffers.append(data)
-                self._rbuf = ""
-                while True:
-                    left = size - buf_len
-                    recv_size = max(self._rbufsize, left)
-                    data = self.recv(recv_size)
-                    if not data:
-                        break
-                    buffers.append(data)
-                    n = len(data)
-                    if n >= left:
-                        self._rbuf = data[left:]
-                        buffers[-1] = data[:left]
-                        break
-                    buf_len += n
-                return "".join(buffers)
-
-        def readline(self, size=-1):
-            data = self._rbuf
-            if size < 0:
-                # Read until \n or EOF, whichever comes first
-                if self._rbufsize <= 1:
-                    # Speed up unbuffered case
-                    assert data == ""
-                    buffers = []
-                    while data != "\n":
-                        data = self.recv(1)
-                        if not data:
-                            break
-                        buffers.append(data)
-                    return "".join(buffers)
-                nl = data.find('\n')
-                if nl >= 0:
-                    nl += 1
-                    self._rbuf = data[nl:]
-                    return data[:nl]
-                buffers = []
-                if data:
-                    buffers.append(data)
-                self._rbuf = ""
-                while True:
-                    data = self.recv(self._rbufsize)
-                    if not data:
-                        break
-                    buffers.append(data)
-                    nl = data.find('\n')
-                    if nl >= 0:
-                        nl += 1
-                        self._rbuf = data[nl:]
-                        buffers[-1] = data[:nl]
-                        break
-                return "".join(buffers)
-            else:
-                # Read until size bytes or \n or EOF seen, whichever comes first
-                nl = data.find('\n', 0, size)
-                if nl >= 0:
-                    nl += 1
-                    self._rbuf = data[nl:]
-                    return data[:nl]
-                buf_len = len(data)
-                if buf_len >= size:
-                    self._rbuf = data[size:]
-                    return data[:size]
-                buffers = []
-                if data:
-                    buffers.append(data)
-                self._rbuf = ""
-                while True:
-                    data = self.recv(self._rbufsize)
-                    if not data:
-                        break
-                    buffers.append(data)
-                    left = size - buf_len
-                    nl = data.find('\n', 0, left)
-                    if nl >= 0:
-                        nl += 1
-                        self._rbuf = data[nl:]
-                        buffers[-1] = data[:nl]
-                        break
-                    n = len(data)
-                    if n >= left:
-                        self._rbuf = data[left:]
-                        buffers[-1] = data[:left]
-                        break
-                    buf_len += n
-                return "".join(buffers)
+        while True:
+          data = self.recv(recv_size)
+          if not data:
+            break
+          buffers.append(data)
+        return "".join(buffers)
+      else:
+        # Read until size bytes or EOF seen, whichever comes first
+        data = self._rbuf
+        buf_len = len(data)
+        if buf_len >= size:
+          self._rbuf = data[size:]
+          return data[:size]
+        buffers = []
+        if data:
+          buffers.append(data)
+        self._rbuf = ""
+        while True:
+          left = size - buf_len
+          recv_size = max(self._rbufsize, left)
+          data = self.recv(recv_size)
+          if not data:
+            break
+          buffers.append(data)
+          n = len(data)
+          if n >= left:
+            self._rbuf = data[left:]
+            buffers[-1] = data[:left]
+            break
+          buf_len += n
+        return "".join(buffers)
+
+    def readline(self, size=-1):
+      data = self._rbuf
+      if size < 0:
+        # Read until \n or EOF, whichever comes first
+        if self._rbufsize <= 1:
+          # Speed up unbuffered case
+          assert data == ""
+          buffers = []
+          while data != "\n":
+            data = self.recv(1)
+            if not data:
+              break
+            buffers.append(data)
+          return "".join(buffers)
+        nl = data.find('\n')
+        if nl >= 0:
+          nl += 1
+          self._rbuf = data[nl:]
+          return data[:nl]
+        buffers = []
+        if data:
+          buffers.append(data)
+        self._rbuf = ""
+        while True:
+          data = self.recv(self._rbufsize)
+          if not data:
+            break
+          buffers.append(data)
+          nl = data.find('\n')
+          if nl >= 0:
+            nl += 1
+            self._rbuf = data[nl:]
+            buffers[-1] = data[:nl]
+            break
+        return "".join(buffers)
+      else:
+        # Read until size bytes or \n or EOF seen, whichever comes first
+        nl = data.find('\n', 0, size)
+        if nl >= 0:
+          nl += 1
+          self._rbuf = data[nl:]
+          return data[:nl]
+        buf_len = len(data)
+        if buf_len >= size:
+          self._rbuf = data[size:]
+          return data[:size]
+        buffers = []
+        if data:
+          buffers.append(data)
+        self._rbuf = ""
+        while True:
+          data = self.recv(self._rbufsize)
+          if not data:
+            break
+          buffers.append(data)
+          left = size - buf_len
+          nl = data.find('\n', 0, left)
+          if nl >= 0:
+            nl += 1
+            self._rbuf = data[nl:]
+            buffers[-1] = data[:nl]
+            break
+          n = len(data)
+          if n >= left:
+            self._rbuf = data[left:]
+            buffers[-1] = data[:left]
+            break
+          buf_len += n
+        return "".join(buffers)
 
 
 class SSL_fileobject(CP_fileobject):
-    """SSL file object attached to a socket object."""
+  """SSL file object attached to a socket object."""
 
-    ssl_timeout = 3
-    ssl_retry = .01
+  ssl_timeout = 3
+  ssl_retry = .01
 
-    def _safe_call(self, is_reader, call, *args, **kwargs):
-        """Wrap the given call with SSL error-trapping.
+  def _safe_call(self, is_reader, call, *args, **kwargs):
+    """Wrap the given call with SSL error-trapping.
 
-        is_reader: if False EOF errors will be raised. If True, EOF errors
-            will return "" (to emulate normal sockets).
-        """
-        start = time.time()
-        while True:
-            try:
-                return call(*args, **kwargs)
-            except SSL.WantReadError:
-                # Sleep and try again. This is dangerous, because it means
-                # the rest of the stack has no way of differentiating
-                # between a "new handshake" error and "client dropped".
-                # Note this isn't an endless loop: there's a timeout below.
-                time.sleep(self.ssl_retry)
-            except SSL.WantWriteError:
-                time.sleep(self.ssl_retry)
-            except SSL.SysCallError as e:
-                if is_reader and e.args == (-1, 'Unexpected EOF'):
-                    return ""
-
-                errnum = e.args[0]
-                if is_reader and errnum in socket_errors_to_ignore:
-                    return ""
-                raise socket.error(errnum)
-            except SSL.Error as e:
-                if is_reader and e.args == (-1, 'Unexpected EOF'):
-                    return ""
-
-                thirdarg = None
-                try:
-                    thirdarg = e.args[0][0][2]
-                except IndexError:
-                    pass
-
-                if thirdarg == 'http request':
-                    # The client is talking HTTP to an HTTPS server.
-                    raise NoSSLError()
-                raise FatalSSLAlert(*e.args)
-
-            if time.time() - start > self.ssl_timeout:
-                raise socket.timeout("timed out")
-
-    def recv(self, *args, **kwargs):
-        buf = []
-        r = super(SSL_fileobject, self).recv
-        while True:
-            data = self._safe_call(True, r, *args, **kwargs)
-            buf.append(data)
-            p = self._sock.pending()
-            if not p:
-                return "".join(buf)
-
-    def sendall(self, *args, **kwargs):
-        return self._safe_call(False, super(SSL_fileobject, self).sendall, *args, **kwargs)
+    is_reader: if False EOF errors will be raised. If True, EOF errors
+      will return "" (to emulate normal sockets).
+    """
+    start = time.time()
+    while True:
+      try:
+        return call(*args, **kwargs)
+      except SSL.WantReadError:
+        # Sleep and try again. This is dangerous, because it means
+        # the rest of the stack has no way of differentiating
+        # between a "new handshake" error and "client dropped".
+        # Note this isn't an endless loop: there's a timeout below.
+        time.sleep(self.ssl_retry)
+      except SSL.WantWriteError:
+        time.sleep(self.ssl_retry)
+      except SSL.SysCallError as e:
+        if is_reader and e.args == (-1, 'Unexpected EOF'):
+          return ""
+
+        errnum = e.args[0]
+        if is_reader and errnum in socket_errors_to_ignore:
+          return ""
+        raise socket.error(errnum)
+      except SSL.Error as e:
+        if is_reader and e.args == (-1, 'Unexpected EOF'):
+          return ""
+
+        thirdarg = None
+        try:
+          thirdarg = e.args[0][0][2]
+        except IndexError:
+          pass
 
-    def send(self, *args, **kwargs):
-        return self._safe_call(False, super(SSL_fileobject, self).send, *args, **kwargs)
+        if thirdarg == 'http request':
+          # The client is talking HTTP to an HTTPS server.
+          raise NoSSLError()
+        raise FatalSSLAlert(*e.args)
 
+      if time.time() - start > self.ssl_timeout:
+        raise socket.timeout("timed out")
 
-class HTTPConnection(object):
-    """An HTTP connection (active socket).
+  def recv(self, *args, **kwargs):
+    buf = []
+    r = super(SSL_fileobject, self).recv
+    while True:
+      data = self._safe_call(True, r, *args, **kwargs)
+      buf.append(data)
+      p = self._sock.pending()
+      if not p:
+        return "".join(buf)
 
-    socket: the raw socket object (usually TCP) for this connection.
-    wsgi_app: the WSGI application for this server/connection.
-    environ: a WSGI environ template. This will be copied for each request.
+  def sendall(self, *args, **kwargs):
+    return self._safe_call(False, super(SSL_fileobject, self).sendall, *args, **kwargs)
 
-    rfile: a fileobject for reading from the socket.
-    send: a function for writing (+ flush) to the socket.
-    """
+  def send(self, *args, **kwargs):
+    return self._safe_call(False, super(SSL_fileobject, self).send, *args, **kwargs)
 
-    rbufsize = -1
-    RequestHandlerClass = HTTPRequest
-    environ = {"wsgi.version": (1, 0),
-               "wsgi.url_scheme": "http",
-               "wsgi.multithread": True,
-               "wsgi.multiprocess": False,
-               "wsgi.run_once": False,
-               "wsgi.errors": sys.stderr,
-               }
-
-    def __init__(self, sock, wsgi_app, environ):
-        self.socket = sock
-        self.wsgi_app = wsgi_app
-
-        # Copy the class environ into self.
-        self.environ = self.environ.copy()
-        self.environ.update(environ)
-
-        if SSL and isinstance(sock, SSL.ConnectionType):
-            timeout = sock.gettimeout()
-            self.rfile = SSL_fileobject(sock, "rb", self.rbufsize)
-            self.rfile.ssl_timeout = timeout
-            self.wfile = SSL_fileobject(sock, "wb", -1)
-            self.wfile.ssl_timeout = timeout
-        else:
-            self.rfile = CP_fileobject(sock, "rb", self.rbufsize)
-            self.wfile = CP_fileobject(sock, "wb", -1)
 
-        # Wrap wsgi.input but not HTTPConnection.rfile itself.
-        # We're also not setting maxlen yet; we'll do that separately
-        # for headers and body for each iteration of self.communicate
-        # (if maxlen is 0 the wrapper doesn't check length).
-        self.environ["wsgi.input"] = SizeCheckWrapper(self.rfile, 0)
+class HTTPConnection(object):
+  """An HTTP connection (active socket).
+
+  socket: the raw socket object (usually TCP) for this connection.
+  wsgi_app: the WSGI application for this server/connection.
+  environ: a WSGI environ template. This will be copied for each request.
+
+  rfile: a fileobject for reading from the socket.
+  send: a function for writing (+ flush) to the socket.
+  """
+
+  rbufsize = -1
+  RequestHandlerClass = HTTPRequest
+  environ = {"wsgi.version": (1, 0),
+         "wsgi.url_scheme": "http",
+         "wsgi.multithread": True,
+         "wsgi.multiprocess": False,
+         "wsgi.run_once": False,
+         "wsgi.errors": sys.stderr,
+         }
+
+  def __init__(self, sock, wsgi_app, environ):
+    self.socket = sock
+    self.wsgi_app = wsgi_app
+
+    # Copy the class environ into self.
+    self.environ = self.environ.copy()
+    self.environ.update(environ)
+
+    if SSL and isinstance(sock, SSL.ConnectionType):
+      timeout = sock.gettimeout()
+      self.rfile = SSL_fileobject(sock, "rb", self.rbufsize)
+      self.rfile.ssl_timeout = timeout
+      self.wfile = SSL_fileobject(sock, "wb", -1)
+      self.wfile.ssl_timeout = timeout
+    else:
+      self.rfile = CP_fileobject(sock, "rb", self.rbufsize)
+      self.wfile = CP_fileobject(sock, "wb", -1)
 
-    def communicate(self):
-        """Read each request and respond appropriately."""
-        try:
-            while True:
-                # (re)set req to None so that if something goes wrong in
-                # the RequestHandlerClass constructor, the error doesn't
-                # get written to the previous request.
-                req = None
-                req = self.RequestHandlerClass(self.wfile, self.environ,
-                                               self.wsgi_app)
-
-                # This order of operations should guarantee correct pipelining.
-                req.parse_request()
-                if not req.ready:
-                    # Something went wrong in the parsing (and the server has
-                    # probably already made a simple_response). Return and
-                    # let the conn close.
-                    return
-
-                req.respond()
-                if req.close_connection:
-                    return
+    # Wrap wsgi.input but not HTTPConnection.rfile itself.
+    # We're also not setting maxlen yet; we'll do that separately
+    # for headers and body for each iteration of self.communicate
+    # (if maxlen is 0 the wrapper doesn't check length).
+    self.environ["wsgi.input"] = SizeCheckWrapper(self.rfile, 0)
 
-        except socket.error as e:
-            errnum = e.args[0]
-            if errnum == 'timed out':
-                # Don't send a 408 if there is no outstanding request; only
-                # if we're in the middle of a request.
-                # See http://www.cherrypy.org/ticket/853
-                if req and req.started_request and not req.sent_headers:
-                    req.simple_response("408 Request Timeout")
-            elif errnum not in socket_errors_to_ignore:
-                if req and not req.sent_headers:
-                    req.simple_response("500 Internal Server Error",
-                                        format_exc())
-            return
-        except (KeyboardInterrupt, SystemExit):
-            raise
-        except FatalSSLAlert as e:
-            # Close the connection.
-            return
-        except NoSSLError:
-            if req and not req.sent_headers:
-                # Unwrap our wfile
-                req.wfile = CP_fileobject(self.socket._sock, "wb", -1)
-                req.simple_response("400 Bad Request",
-                    "The client sent a plain HTTP request, but "
-                    "this server only speaks HTTPS on this port.")
-                self.linger = True
-        except Exception as e:
-            if req and not req.sent_headers:
-                req.simple_response("500 Internal Server Error", format_exc())
-
-    linger = False
-
-    def close(self):
-        """Close the socket underlying this connection."""
-        self.rfile.close()
-
-        if not self.linger:
-            # Python's socket module does NOT call close on the kernel socket
-            # when you call socket.close(). We do so manually here because we
-            # want this server to send a FIN TCP segment immediately. Note this
-            # must be called *before* calling socket.close(), because the latter
-            # drops its reference to the kernel socket.
-            self.socket._sock.close()
-            self.socket.close()
-        else:
-            # On the other hand, sometimes we want to hang around for a bit
-            # to make sure the client has a chance to read our entire
-            # response. Skipping the close() calls here delays the FIN
-            # packet until the socket object is garbage-collected later.
-            # Someday, perhaps, we'll do the full lingering_close that
-            # Apache does, but not today.
-            pass
+  def communicate(self):
+    """Read each request and respond appropriately."""
+    try:
+      while True:
+        # (re)set req to None so that if something goes wrong in
+        # the RequestHandlerClass constructor, the error doesn't
+        # get written to the previous request.
+        req = None
+        req = self.RequestHandlerClass(self.wfile, self.environ,
+                         self.wsgi_app)
+
+        # This order of operations should guarantee correct pipelining.
+        req.parse_request()
+        if not req.ready:
+          # Something went wrong in the parsing (and the server has
+          # probably already made a simple_response). Return and
+          # let the conn close.
+          return
+
+        req.respond()
+        if req.close_connection:
+          return
+
+    except socket.error as e:
+      errnum = e.args[0]
+      if errnum == 'timed out':
+        # Don't send a 408 if there is no outstanding request; only
+        # if we're in the middle of a request.
+        # See http://www.cherrypy.org/ticket/853
+        if req and req.started_request and not req.sent_headers:
+          req.simple_response("408 Request Timeout")
+      elif errnum not in socket_errors_to_ignore:
+        if req and not req.sent_headers:
+          req.simple_response("500 Internal Server Error",
+                    format_exc())
+      return
+    except (KeyboardInterrupt, SystemExit):
+      raise
+    except FatalSSLAlert as e:
+      # Close the connection.
+      return
+    except NoSSLError:
+      if req and not req.sent_headers:
+        # Unwrap our wfile
+        req.wfile = CP_fileobject(self.socket._sock, "wb", -1)
+        req.simple_response("400 Bad Request",
+          "The client sent a plain HTTP request, but "
+          "this server only speaks HTTPS on this port.")
+        self.linger = True
+    except Exception as e:
+      if req and not req.sent_headers:
+        req.simple_response("500 Internal Server Error", format_exc())
+
+  linger = False
+
+  def close(self):
+    """Close the socket underlying this connection."""
+    self.rfile.close()
+
+    if not self.linger:
+      # Python's socket module does NOT call close on the kernel socket
+      # when you call socket.close(). We do so manually here because we
+      # want this server to send a FIN TCP segment immediately. Note this
+      # must be called *before* calling socket.close(), because the latter
+      # drops its reference to the kernel socket.
+      self.socket._sock.close()
+      self.socket.close()
+    else:
+      # On the other hand, sometimes we want to hang around for a bit
+      # to make sure the client has a chance to read our entire
+      # response. Skipping the close() calls here delays the FIN
+      # packet until the socket object is garbage-collected later.
+      # Someday, perhaps, we'll do the full lingering_close that
+      # Apache does, but not today.
+      pass
 
 
 def format_exc(limit=None):
-    """Like print_exc() but return a string. Backport for Python 2.3."""
-    try:
-        etype, value, tb = sys.exc_info()
-        return ''.join(traceback.format_exception(etype, value, tb, limit))
-    finally:
-        etype = value = tb = None
+  """Like print_exc() but return a string. Backport for Python 2.3."""
+  try:
+    etype, value, tb = sys.exc_info()
+    return ''.join(traceback.format_exception(etype, value, tb, limit))
+  finally:
+    etype = value = tb = None
 
 
 _SHUTDOWNREQUEST = None
 
 class WorkerThread(threading.Thread):
-    """Thread which continuously polls a Queue for Connection objects.
+  """Thread which continuously polls a Queue for Connection objects.
 
-    server: the HTTP Server which spawned this thread, and which owns the
-        Queue and is placing active connections into it.
-    ready: a simple flag for the calling server to know when this thread
-        has begun polling the Queue.
+  server: the HTTP Server which spawned this thread, and which owns the
+    Queue and is placing active connections into it.
+  ready: a simple flag for the calling server to know when this thread
+    has begun polling the Queue.
 
-    Due to the timing issues of polling a Queue, a WorkerThread does not
-    check its own 'ready' flag after it has started. To stop the thread,
-    it is necessary to stick a _SHUTDOWNREQUEST object onto the Queue
-    (one for each running WorkerThread).
-    """
+  Due to the timing issues of polling a Queue, a WorkerThread does not
+  check its own 'ready' flag after it has started. To stop the thread,
+  it is necessary to stick a _SHUTDOWNREQUEST object onto the Queue
+  (one for each running WorkerThread).
+  """
 
-    conn = None
+  conn = None
 
-    def __init__(self, server):
-        self.ready = False
-        self.server = server
-        threading.Thread.__init__(self)
+  def __init__(self, server):
+    self.ready = False
+    self.server = server
+    threading.Thread.__init__(self)
 
-    def run(self):
+  def run(self):
+    try:
+      self.ready = True
+      while True:
         try:
-            self.ready = True
-            while True:
-                try:
-                    conn = self.server.requests.get()
-                    if conn is _SHUTDOWNREQUEST:
-                        return
-
-                    self.conn = conn
-                    try:
-                        conn.communicate()
-                    finally:
-                        conn.close()
-                        self.conn = None
-                except Exception as ex:
-                    LOG.exception('WSGI (%s) error: %s' % (self, ex))
-        except (KeyboardInterrupt, SystemExit) as exc:
-            self.server.interrupt = exc
+          conn = self.server.requests.get()
+          if conn is _SHUTDOWNREQUEST:
             return
 
+          self.conn = conn
+          try:
+            conn.communicate()
+          finally:
+            conn.close()
+            self.conn = None
+        except Exception as ex:
+          LOG.exception('WSGI (%s) error: %s' % (self, ex))
+    except (KeyboardInterrupt, SystemExit) as exc:
+      self.server.interrupt = exc
+      return
 
-class ThreadPool(object):
-    """A Request Queue for the CherryPyWSGIServer which pools threads.
-
-    ThreadPool objects must provide min, get(), put(obj), start()
-    and stop(timeout) attributes.
-    """
 
-    def __init__(self, server, min=10, max=-1):
-        self.server = server
-        self.min = min
-        self.max = max
-        self._threads = []
-        self._queue = queue.Queue()
-        self.get = self._queue.get
-
-    def start(self):
-        """Start the pool of threads."""
-        for i in range(self.min):
-            self._threads.append(WorkerThread(self.server))
-        for worker in self._threads:
-            worker.setName("CP WSGIServer " + worker.getName())
-            worker.start()
-        for worker in self._threads:
-            while not worker.ready:
-                time.sleep(.1)
-
-    def _get_idle(self):
-        """Number of worker threads which are idle. Read-only."""
-        return len([t for t in self._threads if t.conn is None])
-    idle = property(_get_idle, doc=_get_idle.__doc__)
-
-    def put(self, obj):
-        self._queue.put(obj)
-        if obj is _SHUTDOWNREQUEST:
-            return
-
-    def grow(self, amount):
-        """Spawn new worker threads (not above self.max)."""
-        for i in range(amount):
-            if self.max > 0 and len(self._threads) >= self.max:
-                break
-            worker = WorkerThread(self.server)
-            worker.setName("CP WSGIServer " + worker.getName())
-            self._threads.append(worker)
-            worker.start()
-
-    def shrink(self, amount):
-        """Kill off worker threads (not below self.min)."""
-        # Grow/shrink the pool if necessary.
-        # Remove any dead threads from our list
-        for t in self._threads:
-            if not t.isAlive():
-                self._threads.remove(t)
-                amount -= 1
-
-        if amount > 0:
-            for i in range(min(amount, len(self._threads) - self.min)):
-                # Put a number of shutdown requests on the queue equal
-                # to 'amount'. Once each of those is processed by a worker,
-                # that worker will terminate and be culled from our list
-                # in self.put.
-                self._queue.put(_SHUTDOWNREQUEST)
-
-    def stop(self, timeout=5):
-        # Must shut down threads here so the code that calls
-        # this method can know when all threads are stopped.
-        for worker in self._threads:
-            self._queue.put(_SHUTDOWNREQUEST)
-
-        # Don't join currentThread (when stop is called inside a request).
-        current = threading.currentThread()
-        while self._threads:
-            worker = self._threads.pop()
-            if worker is not current and worker.isAlive():
-                try:
-                    if timeout is None or timeout < 0:
-                        worker.join()
-                    else:
-                        worker.join(timeout)
-                        if worker.isAlive():
-                            # We exhausted the timeout.
-                            # Forcibly shut down the socket.
-                            c = worker.conn
-                            if c and not c.rfile.closed:
-                                if SSL and isinstance(c.socket, SSL.ConnectionType):
-                                    # pyOpenSSL.socket.shutdown takes no args
-                                    c.socket.shutdown()
-                                else:
-                                    c.socket.shutdown(socket.SHUT_RD)
-                            worker.join()
-                except (AssertionError,
-                        # Ignore repeated Ctrl-C.
-                        # See http://www.cherrypy.org/ticket/691.
-                        KeyboardInterrupt) as exc1:
-                    pass
+class ThreadPool(object):
+  """A Request Queue for the CherryPyWSGIServer which pools threads.
+
+  ThreadPool objects must provide min, get(), put(obj), start()
+  and stop(timeout) attributes.
+  """
+
+  def __init__(self, server, min=10, max=-1):
+    self.server = server
+    self.min = min
+    self.max = max
+    self._threads = []
+    self._queue = queue.Queue()
+    self.get = self._queue.get
+
+  def start(self):
+    """Start the pool of threads."""
+    for i in range(self.min):
+      self._threads.append(WorkerThread(self.server))
+    for worker in self._threads:
+      worker.setName("CP WSGIServer " + worker.getName())
+      worker.start()
+    for worker in self._threads:
+      while not worker.ready:
+        time.sleep(.1)
+
+  def _get_idle(self):
+    """Number of worker threads which are idle. Read-only."""
+    return len([t for t in self._threads if t.conn is None])
+  idle = property(_get_idle, doc=_get_idle.__doc__)
+
+  def put(self, obj):
+    self._queue.put(obj)
+    if obj is _SHUTDOWNREQUEST:
+      return
+
+  def grow(self, amount):
+    """Spawn new worker threads (not above self.max)."""
+    for i in range(amount):
+      if self.max > 0 and len(self._threads) >= self.max:
+        break
+      worker = WorkerThread(self.server)
+      worker.setName("CP WSGIServer " + worker.getName())
+      self._threads.append(worker)
+      worker.start()
+
+  def shrink(self, amount):
+    """Kill off worker threads (not below self.min)."""
+    # Grow/shrink the pool if necessary.
+    # Remove any dead threads from our list
+    for t in self._threads:
+      if not t.isAlive():
+        self._threads.remove(t)
+        amount -= 1
+
+    if amount > 0:
+      for i in range(min(amount, len(self._threads) - self.min)):
+        # Put a number of shutdown requests on the queue equal
+        # to 'amount'. Once each of those is processed by a worker,
+        # that worker will terminate and be culled from our list
+        # in self.put.
+        self._queue.put(_SHUTDOWNREQUEST)
+
+  def stop(self, timeout=5):
+    # Must shut down threads here so the code that calls
+    # this method can know when all threads are stopped.
+    for worker in self._threads:
+      self._queue.put(_SHUTDOWNREQUEST)
+
+    # Don't join currentThread (when stop is called inside a request).
+    current = threading.currentThread()
+    while self._threads:
+      worker = self._threads.pop()
+      if worker is not current and worker.isAlive():
+        try:
+          if timeout is None or timeout < 0:
+            worker.join()
+          else:
+            worker.join(timeout)
+            if worker.isAlive():
+              # We exhausted the timeout.
+              # Forcibly shut down the socket.
+              c = worker.conn
+              if c and not c.rfile.closed:
+                if SSL and isinstance(c.socket, SSL.ConnectionType):
+                  # pyOpenSSL.socket.shutdown takes no args
+                  c.socket.shutdown()
+                else:
+                  c.socket.shutdown(socket.SHUT_RD)
+              worker.join()
+        except (AssertionError,
+            # Ignore repeated Ctrl-C.
+            # See http://www.cherrypy.org/ticket/691.
+            KeyboardInterrupt) as exc1:
+          pass
 
 
 
 class SSLConnection(object):
-    """A thread-safe wrapper for an SSL.Connection.
-
-    *args: the arguments to create the wrapped SSL.Connection(*args).
-    """
-
-    def __init__(self, *args):
-        self._ssl_conn = SSL.Connection(*args)
-        self._lock = threading.RLock()
-
-    for f in ('get_context', 'pending', 'send', 'write', 'recv', 'read',
-              'renegotiate', 'bind', 'listen', 'connect', 'accept',
-              'setblocking', 'fileno', 'shutdown', 'close', 'get_cipher_list',
-              'getpeername', 'getsockname', 'getsockopt', 'setsockopt',
-              'makefile', 'get_app_data', 'set_app_data', 'state_string',
-              'sock_shutdown', 'get_peer_certificate', 'want_read',
-              'want_write', 'set_connect_state', 'set_accept_state',
-              'connect_ex', 'sendall', 'settimeout'):
-        exec("""def %s(self, *args):
-        self._lock.acquire()
-        try:
-            return self._ssl_conn.%s(*args)
-        finally:
-            self._lock.release()
+  """A thread-safe wrapper for an SSL.Connection.
+
+  *args: the arguments to create the wrapped SSL.Connection(*args).
+  """
+
+  def __init__(self, *args):
+    self._ssl_conn = SSL.Connection(*args)
+    self._lock = threading.RLock()
+
+  for f in ('get_context', 'pending', 'send', 'write', 'recv', 'read',
+        'renegotiate', 'bind', 'listen', 'connect', 'accept',
+        'setblocking', 'fileno', 'shutdown', 'close', 'get_cipher_list',
+        'getpeername', 'getsockname', 'getsockopt', 'setsockopt',
+        'makefile', 'get_app_data', 'set_app_data', 'state_string',
+        'sock_shutdown', 'get_peer_certificate', 'want_read',
+        'want_write', 'set_connect_state', 'set_accept_state',
+        'connect_ex', 'sendall', 'settimeout'):
+    exec("""def %s(self, *args):
+    self._lock.acquire()
+    try:
+      return self._ssl_conn.%s(*args)
+    finally:
+      self._lock.release()
 """ % (f, f))
 
 
 try:
-    import fcntl
+  import fcntl
 except ImportError:
-    try:
-        from ctypes import windll, WinError
-    except ImportError:
-        def prevent_socket_inheritance(sock):
-            """Dummy function, since neither fcntl nor ctypes are available."""
-            pass
-    else:
-        def prevent_socket_inheritance(sock):
-            """Mark the given socket fd as non-inheritable (Windows)."""
-            if not windll.kernel32.SetHandleInformation(sock.fileno(), 1, 0):
-                raise WinError()
-else:
+  try:
+    from ctypes import windll, WinError
+  except ImportError:
+    def prevent_socket_inheritance(sock):
+      """Dummy function, since neither fcntl nor ctypes are available."""
+      pass
+  else:
     def prevent_socket_inheritance(sock):
-        """Mark the given socket fd as non-inheritable (POSIX)."""
-        fd = sock.fileno()
-        old_flags = fcntl.fcntl(fd, fcntl.F_GETFD)
-        fcntl.fcntl(fd, fcntl.F_SETFD, old_flags | fcntl.FD_CLOEXEC)
+      """Mark the given socket fd as non-inheritable (Windows)."""
+      if not windll.kernel32.SetHandleInformation(sock.fileno(), 1, 0):
+        raise WinError()
+else:
+  def prevent_socket_inheritance(sock):
+    """Mark the given socket fd as non-inheritable (POSIX)."""
+    fd = sock.fileno()
+    old_flags = fcntl.fcntl(fd, fcntl.F_GETFD)
+    fcntl.fcntl(fd, fcntl.F_SETFD, old_flags | fcntl.FD_CLOEXEC)
 
 
 class CherryPyWSGIServer(object):
-    """An HTTP server for WSGI.
-
-    bind_addr: The interface on which to listen for connections.
-        For TCP sockets, a (host, port) tuple. Host values may be any IPv4
-        or IPv6 address, or any valid hostname. The string 'localhost' is a
-        synonym for '127.0.0.1' (or '::1', if your hosts file prefers IPv6).
-        The string '0.0.0.0' is a special IPv4 entry meaning "any active
-        interface" (INADDR_ANY), and '::' is the similar IN6ADDR_ANY for
-        IPv6. The empty string or None are not allowed.
-
-        For UNIX sockets, supply the filename as a string.
-    wsgi_app: the WSGI 'application callable'; multiple WSGI applications
-        may be passed as (path_prefix, app) pairs.
-    numthreads: the number of worker threads to create (default 10).
-    server_name: the string to set for WSGI's SERVER_NAME environ entry.
-        Defaults to socket.gethostname().
-    max: the maximum number of queued requests (defaults to -1 = no limit).
-    request_queue_size: the 'backlog' argument to socket.listen();
-        specifies the maximum number of queued connections (default 5).
-    timeout: the timeout in seconds for accepted connections (default 10).
-
-    nodelay: if True (the default since 3.1), sets the TCP_NODELAY socket
-        option.
-
-    protocol: the version string to write in the Status-Line of all
-        HTTP responses. For example, "HTTP/1.1" (the default). This
-        also limits the supported features used in the response.
-
-
-    SSL/HTTPS
-    ---------
-    The OpenSSL module must be importable for SSL functionality.
-    You can obtain it from http://pyopenssl.sourceforge.net/
-
-    ssl_certificate: the filename of the server SSL certificate.
-    ssl_privatekey: the filename of the server's private key file.
-
-    If either of these is None (both are None by default), this server
-    will not use SSL. If both are given and are valid, they will be read
-    on server start and used in the SSL context for the listening socket.
-    """
-
-    protocol = "HTTP/1.1"
-    _bind_addr = "127.0.0.1"
-    version = "CherryPy/3.1.2"
-    ready = False
-    _interrupt = None
-
-    nodelay = True
-
-    ConnectionClass = HTTPConnection
-    environ = {
-        "SERVER_SOFTWARE": os.getenv('SERVER_SOFTWARE')
-    }
-
-    # Paths to certificate and private key files
-    ssl_certificate = None
-    ssl_private_key = None
-    ssl_certificate_chain = None
-    ssl_cipher_list = "DEFAULT:!aNULL:!eNULL:!LOW:!EXPORT:!SSLv2"
-    ssl_password_cb = None
-    ssl_no_renegotiation = False
-
-    def __init__(self, bind_addr, wsgi_app, numthreads=10, server_name=None,
-                 max=-1, request_queue_size=5, timeout=10, shutdown_timeout=5):
-        self.requests = ThreadPool(self, min=numthreads or 1, max=max)
-
-        if callable(wsgi_app):
-            # We've been handed a single wsgi_app, in CP-2.1 style.
-            # Assume it's mounted at "".
-            self.wsgi_app = wsgi_app
-        else:
-            # We've been handed a list of (path_prefix, wsgi_app) tuples,
-            # so that the server can call different wsgi_apps, and also
-            # correctly set SCRIPT_NAME.
-            warnings.warn("The ability to pass multiple apps is deprecated "
-                          "and will be removed in 3.2. You should explicitly "
-                          "include a WSGIPathInfoDispatcher instead.",
-                          DeprecationWarning)
-            self.wsgi_app = WSGIPathInfoDispatcher(wsgi_app)
-
-        self.bind_addr = bind_addr
-        if not server_name:
-            server_name = socket.gethostname()
-        self.server_name = server_name
-        self.request_queue_size = request_queue_size
-
-        self.timeout = timeout
-        self.shutdown_timeout = shutdown_timeout
-
-    def _get_numthreads(self):
-        return self.requests.min
-    def _set_numthreads(self, value):
-        self.requests.min = value
-    numthreads = property(_get_numthreads, _set_numthreads)
-
-    def __str__(self):
-        return "%s.%s(%r)" % (self.__module__, self.__class__.__name__,
-                              self.bind_addr)
-
-    def _get_bind_addr(self):
-        return self._bind_addr
-    def _set_bind_addr(self, value):
-        if isinstance(value, tuple) and value[0] in ('', None):
-            # Despite the socket module docs, using '' does not
-            # allow AI_PASSIVE to work. Passing None instead
-            # returns '0.0.0.0' like we want. In other words:
-            #     host    AI_PASSIVE     result
-            #      ''         Y         192.168.x.y
-            #      ''         N         192.168.x.y
-            #     None        Y         0.0.0.0
-            #     None        N         127.0.0.1
-            # But since you can get the same effect with an explicit
-            # '0.0.0.0', we deny both the empty string and None as values.
-            raise ValueError("Host values of '' or None are not allowed. "
-                             "Use '0.0.0.0' (IPv4) or '::' (IPv6) instead "
-                             "to listen on all active interfaces.")
-        self._bind_addr = value
-    bind_addr = property(_get_bind_addr, _set_bind_addr,
-        doc="""The interface on which to listen for connections.
-
-        For TCP sockets, a (host, port) tuple. Host values may be any IPv4
-        or IPv6 address, or any valid hostname. The string 'localhost' is a
-        synonym for '127.0.0.1' (or '::1', if your hosts file prefers IPv6).
-        The string '0.0.0.0' is a special IPv4 entry meaning "any active
-        interface" (INADDR_ANY), and '::' is the similar IN6ADDR_ANY for
-        IPv6. The empty string or None are not allowed.
-
-        For UNIX sockets, supply the filename as a string.""")
-
-    def start(self):
-        """Run the server forever."""
-        self.bind_server()
-        self.listen_and_loop()
-
-
-    def bind_server(self):
-        # We don't have to trap KeyboardInterrupt or SystemExit here,
-        # because cherrpy.server already does so, calling self.stop() for us.
-        # If you're using this server with another framework, you should
-        # trap those exceptions in whatever code block calls start().
-        self._interrupt = None
-
-        # Select the appropriate socket
-        if isinstance(self.bind_addr, basestring):
-            # AF_UNIX socket
-
-            # So we can reuse the socket...
-            try:
-              os.unlink(self.bind_addr)
-            except IOError:
-              pass
+  """An HTTP server for WSGI.
+
+  bind_addr: The interface on which to listen for connections.
+    For TCP sockets, a (host, port) tuple. Host values may be any IPv4
+    or IPv6 address, or any valid hostname. The string 'localhost' is a
+    synonym for '127.0.0.1' (or '::1', if your hosts file prefers IPv6).
+    The string '0.0.0.0' is a special IPv4 entry meaning "any active
+    interface" (INADDR_ANY), and '::' is the similar IN6ADDR_ANY for
+    IPv6. The empty string or None are not allowed.
+
+    For UNIX sockets, supply the filename as a string.
+  wsgi_app: the WSGI 'application callable'; multiple WSGI applications
+    may be passed as (path_prefix, app) pairs.
+  numthreads: the number of worker threads to create (default 10).
+  server_name: the string to set for WSGI's SERVER_NAME environ entry.
+    Defaults to socket.gethostname().
+  max: the maximum number of queued requests (defaults to -1 = no limit).
+  request_queue_size: the 'backlog' argument to socket.listen();
+    specifies the maximum number of queued connections (default 5).
+  timeout: the timeout in seconds for accepted connections (default 10).
+
+  nodelay: if True (the default since 3.1), sets the TCP_NODELAY socket
+    option.
+
+  protocol: the version string to write in the Status-Line of all
+    HTTP responses. For example, "HTTP/1.1" (the default). This
+    also limits the supported features used in the response.
+
+
+  SSL/HTTPS
+  ---------
+  The OpenSSL module must be importable for SSL functionality.
+  You can obtain it from http://pyopenssl.sourceforge.net/
+
+  ssl_certificate: the filename of the server SSL certificate.
+  ssl_privatekey: the filename of the server's private key file.
+
+  If either of these is None (both are None by default), this server
+  will not use SSL. If both are given and are valid, they will be read
+  on server start and used in the SSL context for the listening socket.
+  """
+
+  protocol = "HTTP/1.1"
+  _bind_addr = "127.0.0.1"
+  version = "CherryPy/3.1.2"
+  ready = False
+  _interrupt = None
+
+  nodelay = True
+
+  ConnectionClass = HTTPConnection
+  environ = {
+    "SERVER_SOFTWARE": os.getenv('SERVER_SOFTWARE')
+  }
+
+  # Paths to certificate and private key files
+  ssl_certificate = None
+  ssl_private_key = None
+  ssl_certificate_chain = None
+  ssl_cipher_list = "DEFAULT:!aNULL:!eNULL:!LOW:!EXPORT:!SSLv2"
+  ssl_password_cb = None
+  ssl_no_renegotiation = False
+
+  def __init__(self, bind_addr, wsgi_app, numthreads=10, server_name=None,
+         max=-1, request_queue_size=5, timeout=10, shutdown_timeout=5):
+    self.requests = ThreadPool(self, min=numthreads or 1, max=max)
+
+    if callable(wsgi_app):
+      # We've been handed a single wsgi_app, in CP-2.1 style.
+      # Assume it's mounted at "".
+      self.wsgi_app = wsgi_app
+    else:
+      # We've been handed a list of (path_prefix, wsgi_app) tuples,
+      # so that the server can call different wsgi_apps, and also
+      # correctly set SCRIPT_NAME.
+      warnings.warn("The ability to pass multiple apps is deprecated "
+              "and will be removed in 3.2. You should explicitly "
+              "include a WSGIPathInfoDispatcher instead.",
+              DeprecationWarning)
+      self.wsgi_app = WSGIPathInfoDispatcher(wsgi_app)
+
+    self.bind_addr = bind_addr
+    if not server_name:
+      server_name = socket.gethostname()
+    self.server_name = server_name
+    self.request_queue_size = request_queue_size
+
+    self.timeout = timeout
+    self.shutdown_timeout = shutdown_timeout
+
+  def _get_numthreads(self):
+    return self.requests.min
+  def _set_numthreads(self, value):
+    self.requests.min = value
+  numthreads = property(_get_numthreads, _set_numthreads)
+
+  def __str__(self):
+    return "%s.%s(%r)" % (self.__module__, self.__class__.__name__,
+                self.bind_addr)
+
+  def _get_bind_addr(self):
+    return self._bind_addr
+  def _set_bind_addr(self, value):
+    if isinstance(value, tuple) and value[0] in ('', None):
+      # Despite the socket module docs, using '' does not
+      # allow AI_PASSIVE to work. Passing None instead
+      # returns '0.0.0.0' like we want. In other words:
+      #     host    AI_PASSIVE     result
+      #      ''         Y         192.168.x.y
+      #      ''         N         192.168.x.y
+      #     None        Y         0.0.0.0
+      #     None        N         127.0.0.1
+      # But since you can get the same effect with an explicit
+      # '0.0.0.0', we deny both the empty string and None as values.
+      raise ValueError("Host values of '' or None are not allowed. "
+               "Use '0.0.0.0' (IPv4) or '::' (IPv6) instead "
+               "to listen on all active interfaces.")
+    self._bind_addr = value
+  bind_addr = property(_get_bind_addr, _set_bind_addr,
+    doc="""The interface on which to listen for connections.
+
+    For TCP sockets, a (host, port) tuple. Host values may be any IPv4
+    or IPv6 address, or any valid hostname. The string 'localhost' is a
+    synonym for '127.0.0.1' (or '::1', if your hosts file prefers IPv6).
+    The string '0.0.0.0' is a special IPv4 entry meaning "any active
+    interface" (INADDR_ANY), and '::' is the similar IN6ADDR_ANY for
+    IPv6. The empty string or None are not allowed.
+
+    For UNIX sockets, supply the filename as a string.""")
+
+  def start(self):
+    """Run the server forever."""
+    self.bind_server()
+    self.listen_and_loop()
+
+
+  def bind_server(self):
+    # We don't have to trap KeyboardInterrupt or SystemExit here,
+    # because cherrpy.server already does so, calling self.stop() for us.
+    # If you're using this server with another framework, you should
+    # trap those exceptions in whatever code block calls start().
+    self._interrupt = None
+
+    # Select the appropriate socket
+    if isinstance(self.bind_addr, basestring):
+      # AF_UNIX socket
+
+      # So we can reuse the socket...
+      try:
+        os.unlink(self.bind_addr)
+      except IOError:
+        pass
+
+      # So everyone can access the socket...
+      try:
+        os.chmod(self.bind_addr, 0o777)
+      except IOError:
+        pass
+
+      info = [(socket.AF_UNIX, socket.SOCK_STREAM, 0, "", self.bind_addr)]
+    else:
+      # AF_INET or AF_INET6 socket
+      # Get the correct address family for our host (allows IPv6 addresses)
+      host, port = self.bind_addr
+      try:
+        info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
+                      socket.SOCK_STREAM, 0, socket.AI_PASSIVE)
+      except socket.gaierror:
+        # Probably a DNS issue. Assume IPv4.
+        info = [(socket.AF_INET, socket.SOCK_STREAM, 0, "", self.bind_addr)]
+
+    self.socket = None
+    msg = "No socket could be created"
+    for res in info:
+      af, socktype, proto, canonname, sa = res
+      try:
+        self._bind(af, socktype, proto)
+      except socket.error as msg:
+        if self.socket:
+          self.socket.close()
+        self.socket = None
+        continue
+      break
+    if not self.socket:
+      raise socket.error(msg)
 
-            # So everyone can access the socket...
-            try:
-              os.chmod(self.bind_addr, 0o777)
-            except IOError:
-              pass
+  def listen_and_loop(self):
+    """
+    Listen on the socket, and then loop forever accepting and handling
+    connections.
+    """
+    # Timeout so KeyboardInterrupt can be caught on Win32
+    self.socket.settimeout(1)
+    self.socket.listen(self.request_queue_size)
+
+    # Create worker threads
+    self.requests.start()
+
+    self.ready = True
+    while self.ready:
+      self.tick()
+      if self.interrupt:
+        while self.interrupt is True:
+          # Wait for self.stop() to complete. See _set_interrupt.
+          time.sleep(0.1)
+        if self.interrupt:
+          raise self.interrupt
+
+  def _bind(self, family, type, proto=0):
+    """Create (or recreate) the actual socket object."""
+    self.socket = socket.socket(family, type, proto)
+    prevent_socket_inheritance(self.socket)
+    self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+    if self.nodelay:
+      self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+    if self.ssl_certificate and self.ssl_private_key:
+      if SSL is None:
+        raise ImportError("You must install pyOpenSSL to use HTTPS.")
+
+      # See http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/442473
+      ctx = SSL.Context(SSL.SSLv23_METHOD)
+
+      if self.ssl_password_cb is not None:
+        ctx.set_passwd_cb(self.ssl_password_cb)
+
+      ctx.set_cipher_list(self.ssl_cipher_list)
+      try:
+        ctx.use_privatekey_file(self.ssl_private_key)
+        ctx.use_certificate_file(self.ssl_certificate)
+        if self.ssl_certificate_chain:
+        ctx.use_certificate_chain_file(self.ssl_certificate_chain)
+      except Exception as ex:
+        logging.exception('SSL key and certificate could not be found or have a problem')
+        raise ex
+      options = SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3
+      if self.ssl_no_renegotiation:
+        options |= SSL.OP_NO_RENEGOTIATION
+      ctx.set_options(options)
+      self.socket = SSLConnection(ctx, self.socket)
+      self.populate_ssl_environ()
+
+      # If listening on the IPV6 any address ('::' = IN6ADDR_ANY),
+      # activate dual-stack. See http://www.cherrypy.org/ticket/871.
+      if (not isinstance(self.bind_addr, basestring)
+        and self.bind_addr[0] == '::' and family == socket.AF_INET6):
+        try:
+          self.socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
+        except (AttributeError, socket.error):
+          # Apparently, the socket option is not available in
+          # this machine's TCP stack
+          pass
 
-            info = [(socket.AF_UNIX, socket.SOCK_STREAM, 0, "", self.bind_addr)]
-        else:
-            # AF_INET or AF_INET6 socket
-            # Get the correct address family for our host (allows IPv6 addresses)
-            host, port = self.bind_addr
-            try:
-                info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
-                                          socket.SOCK_STREAM, 0, socket.AI_PASSIVE)
-            except socket.gaierror:
-                # Probably a DNS issue. Assume IPv4.
-                info = [(socket.AF_INET, socket.SOCK_STREAM, 0, "", self.bind_addr)]
+    self.socket.bind(self.bind_addr)
 
-        self.socket = None
-        msg = "No socket could be created"
-        for res in info:
-            af, socktype, proto, canonname, sa = res
-            try:
-                self._bind(af, socktype, proto)
-            except socket.error as msg:
-                if self.socket:
-                    self.socket.close()
-                self.socket = None
-                continue
-            break
-        if not self.socket:
-            raise socket.error(msg)
-
-    def listen_and_loop(self):
-        """
-        Listen on the socket, and then loop forever accepting and handling
-        connections.
-        """
-        # Timeout so KeyboardInterrupt can be caught on Win32
-        self.socket.settimeout(1)
-        self.socket.listen(self.request_queue_size)
-
-        # Create worker threads
-        self.requests.start()
-
-        self.ready = True
-        while self.ready:
-            self.tick()
-            if self.interrupt:
-                while self.interrupt is True:
-                    # Wait for self.stop() to complete. See _set_interrupt.
-                    time.sleep(0.1)
-                if self.interrupt:
-                    raise self.interrupt
-
-    def _bind(self, family, type, proto=0):
-        """Create (or recreate) the actual socket object."""
-        self.socket = socket.socket(family, type, proto)
-        prevent_socket_inheritance(self.socket)
-        self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
-        if self.nodelay:
-            self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
-        if self.ssl_certificate and self.ssl_private_key:
-            if SSL is None:
-                raise ImportError("You must install pyOpenSSL to use HTTPS.")
-
-            # See http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/442473
-            ctx = SSL.Context(SSL.SSLv23_METHOD)
-
-            if self.ssl_password_cb is not None:
-              ctx.set_passwd_cb(self.ssl_password_cb)
-
-            ctx.set_cipher_list(self.ssl_cipher_list)
-            try:
-              ctx.use_privatekey_file(self.ssl_private_key)
-              ctx.use_certificate_file(self.ssl_certificate)
-              if self.ssl_certificate_chain:
-                ctx.use_certificate_chain_file(self.ssl_certificate_chain)
-            except Exception as ex:
-              logging.exception('SSL key and certificate could not be found or have a problem')
-              raise ex
-            options = SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3
-            if self.ssl_no_renegotiation:
-              options |= SSL.OP_NO_RENEGOTIATION
-            ctx.set_options(options)
-            self.socket = SSLConnection(ctx, self.socket)
-            self.populate_ssl_environ()
-
-            # If listening on the IPV6 any address ('::' = IN6ADDR_ANY),
-            # activate dual-stack. See http://www.cherrypy.org/ticket/871.
-            if (not isinstance(self.bind_addr, basestring)
-                and self.bind_addr[0] == '::' and family == socket.AF_INET6):
-                try:
-                    self.socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
-                except (AttributeError, socket.error):
-                    # Apparently, the socket option is not available in
-                    # this machine's TCP stack
-                    pass
-
-        self.socket.bind(self.bind_addr)
-
-    def tick(self):
-        """Accept a new connection and put it on the Queue."""
+  def tick(self):
+    """Accept a new connection and put it on the Queue."""
+    try:
+      s, addr = self.socket.accept()
+      prevent_socket_inheritance(s)
+      if not self.ready:
+        return
+      if hasattr(s, 'settimeout'):
+        s.settimeout(self.timeout)
+
+      environ = self.environ.copy()
+      # SERVER_SOFTWARE is common for IIS. It's also helpful for
+      # us to pass a default value for the "Server" response header.
+      if environ.get("SERVER_SOFTWARE") is None:
+        environ["SERVER_SOFTWARE"] = "%s WSGI Server" % self.version
+      # set a non-standard environ entry so the WSGI app can know what
+      # the *real* server protocol is (and what features to support).
+      # See http://www.faqs.org/rfcs/rfc2145.html.
+      environ["ACTUAL_SERVER_PROTOCOL"] = self.protocol
+      environ["SERVER_NAME"] = self.server_name
+
+      if isinstance(self.bind_addr, basestring):
+        # AF_UNIX. This isn't really allowed by WSGI, which doesn't
+        # address unix domain sockets. But it's better than nothing.
+        environ["SERVER_PORT"] = ""
+      else:
+        environ["SERVER_PORT"] = str(self.bind_addr[1])
+        # optional values
+        # Until we do DNS lookups, omit REMOTE_HOST
+        environ["REMOTE_ADDR"] = addr[0]
+        environ["REMOTE_PORT"] = str(addr[1])
+
+      conn = self.ConnectionClass(s, self.wsgi_app, environ)
+      self.requests.put(conn)
+    except socket.timeout:
+      # The only reason for the timeout in start() is so we can
+      # notice keyboard interrupts on Win32, which don't interrupt
+      # accept() by default
+      return
+    except socket.error as x:
+      if x.args[0] in socket_error_eintr:
+        # I *think* this is right. EINTR should occur when a signal
+        # is received during the accept() call; all docs say retry
+        # the call, and I *think* I'm reading it right that Python
+        # will then go ahead and poll for and handle the signal
+        # elsewhere. See http://www.cherrypy.org/ticket/707.
+        return
+      if x.args[0] in socket_errors_nonblocking:
+        # Just try again. See http://www.cherrypy.org/ticket/479.
+        return
+      if x.args[0] in socket_errors_to_ignore:
+        # Our socket was closed.
+        # See http://www.cherrypy.org/ticket/686.
+        return
+      raise
+
+  def _get_interrupt(self):
+    return self._interrupt
+  def _set_interrupt(self, interrupt):
+    self._interrupt = True
+    self.stop()
+    self._interrupt = interrupt
+  interrupt = property(_get_interrupt, _set_interrupt,
+             doc="Set this to an Exception instance to "
+               "interrupt the server.")
+
+  def stop(self):
+    """Gracefully shutdown a server that is serving forever."""
+    self.ready = False
+
+    sock = getattr(self, "socket", None)
+    if sock:
+      if not isinstance(self.bind_addr, basestring):
+        # Touch our own socket to make accept() return immediately.
         try:
-            s, addr = self.socket.accept()
-            prevent_socket_inheritance(s)
-            if not self.ready:
-                return
-            if hasattr(s, 'settimeout'):
-                s.settimeout(self.timeout)
-
-            environ = self.environ.copy()
-            # SERVER_SOFTWARE is common for IIS. It's also helpful for
-            # us to pass a default value for the "Server" response header.
-            if environ.get("SERVER_SOFTWARE") is None:
-                environ["SERVER_SOFTWARE"] = "%s WSGI Server" % self.version
-            # set a non-standard environ entry so the WSGI app can know what
-            # the *real* server protocol is (and what features to support).
-            # See http://www.faqs.org/rfcs/rfc2145.html.
-            environ["ACTUAL_SERVER_PROTOCOL"] = self.protocol
-            environ["SERVER_NAME"] = self.server_name
-
-            if isinstance(self.bind_addr, basestring):
-                # AF_UNIX. This isn't really allowed by WSGI, which doesn't
-                # address unix domain sockets. But it's better than nothing.
-                environ["SERVER_PORT"] = ""
-            else:
-                environ["SERVER_PORT"] = str(self.bind_addr[1])
-                # optional values
-                # Until we do DNS lookups, omit REMOTE_HOST
-                environ["REMOTE_ADDR"] = addr[0]
-                environ["REMOTE_PORT"] = str(addr[1])
-
-            conn = self.ConnectionClass(s, self.wsgi_app, environ)
-            self.requests.put(conn)
-        except socket.timeout:
-            # The only reason for the timeout in start() is so we can
-            # notice keyboard interrupts on Win32, which don't interrupt
-            # accept() by default
-            return
+          host, port = sock.getsockname()[:2]
         except socket.error as x:
-            if x.args[0] in socket_error_eintr:
-                # I *think* this is right. EINTR should occur when a signal
-                # is received during the accept() call; all docs say retry
-                # the call, and I *think* I'm reading it right that Python
-                # will then go ahead and poll for and handle the signal
-                # elsewhere. See http://www.cherrypy.org/ticket/707.
-                return
-            if x.args[0] in socket_errors_nonblocking:
-                # Just try again. See http://www.cherrypy.org/ticket/479.
-                return
-            if x.args[0] in socket_errors_to_ignore:
-                # Our socket was closed.
-                # See http://www.cherrypy.org/ticket/686.
-                return
+          if x.args[0] not in socket_errors_to_ignore:
             raise
-
-    def _get_interrupt(self):
-        return self._interrupt
-    def _set_interrupt(self, interrupt):
-        self._interrupt = True
-        self.stop()
-        self._interrupt = interrupt
-    interrupt = property(_get_interrupt, _set_interrupt,
-                         doc="Set this to an Exception instance to "
-                             "interrupt the server.")
-
-    def stop(self):
-        """Gracefully shutdown a server that is serving forever."""
-        self.ready = False
-
-        sock = getattr(self, "socket", None)
-        if sock:
-            if not isinstance(self.bind_addr, basestring):
-                # Touch our own socket to make accept() return immediately.
-                try:
-                    host, port = sock.getsockname()[:2]
-                except socket.error as x:
-                    if x.args[0] not in socket_errors_to_ignore:
-                        raise
-                else:
-                    # Note that we're explicitly NOT using AI_PASSIVE,
-                    # here, because we want an actual IP to touch.
-                    # localhost won't work if we've bound to a public IP,
-                    # but it will if we bound to '0.0.0.0' (INADDR_ANY).
-                    for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC,
-                                                  socket.SOCK_STREAM):
-                        af, socktype, proto, canonname, sa = res
-                        s = None
-                        try:
-                            s = socket.socket(af, socktype, proto)
-                            # See http://groups.google.com/group/cherrypy-users/
-                            #        browse_frm/thread/bbfe5eb39c904fe0
-                            s.settimeout(1.0)
-                            s.connect((host, port))
-                            s.close()
-                        except socket.error:
-                            if s:
-                                s.close()
-            if hasattr(sock, "close"):
-                sock.close()
-            self.socket = None
-
-        self.requests.stop(self.shutdown_timeout)
-
-    def populate_ssl_environ(self):
-        """Create WSGI environ entries to be merged into each request."""
-        cert = open(self.ssl_certificate, 'rb').read()
-        cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert)
-        ssl_environ = {
-            "wsgi.url_scheme": "https",
-            "HTTPS": "on",
-            # pyOpenSSL doesn't provide access to any of these AFAICT
+        else:
+          # Note that we're explicitly NOT using AI_PASSIVE,
+          # here, because we want an actual IP to touch.
+          # localhost won't work if we've bound to a public IP,
+          # but it will if we bound to '0.0.0.0' (INADDR_ANY).
+          for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC,
+                          socket.SOCK_STREAM):
+            af, socktype, proto, canonname, sa = res
+            s = None
+            try:
+              s = socket.socket(af, socktype, proto)
+              # See http://groups.google.com/group/cherrypy-users/
+              #        browse_frm/thread/bbfe5eb39c904fe0
+              s.settimeout(1.0)
+              s.connect((host, port))
+              s.close()
+            except socket.error:
+              if s:
+                s.close()
+      if hasattr(sock, "close"):
+        sock.close()
+      self.socket = None
+
+    self.requests.stop(self.shutdown_timeout)
+
+  def populate_ssl_environ(self):
+    """Create WSGI environ entries to be merged into each request."""
+    cert = open(self.ssl_certificate, 'rb').read()
+    cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert)
+    ssl_environ = {
+      "wsgi.url_scheme": "https",
+      "HTTPS": "on",
+      # pyOpenSSL doesn't provide access to any of these AFAICT
 ##            'SSL_PROTOCOL': 'SSLv2',
-##            SSL_CIPHER 	string 	The cipher specification name
-##            SSL_VERSION_INTERFACE 	string 	The mod_ssl program version
-##            SSL_VERSION_LIBRARY 	string 	The OpenSSL program version
-            }
-
-        # Server certificate attributes
-        ssl_environ.update({
-            'SSL_SERVER_M_VERSION': cert.get_version(),
-            'SSL_SERVER_M_SERIAL': cert.get_serial_number(),
+##            SSL_CIPHER  string  The cipher specification name
+##            SSL_VERSION_INTERFACE   string  The mod_ssl program version
+##            SSL_VERSION_LIBRARY   string  The OpenSSL program version
+      }
+
+    # Server certificate attributes
+    ssl_environ.update({
+      'SSL_SERVER_M_VERSION': cert.get_version(),
+      'SSL_SERVER_M_SERIAL': cert.get_serial_number(),
 ##            'SSL_SERVER_V_START': Validity of server's certificate (start time),
 ##            'SSL_SERVER_V_END': Validity of server's certificate (end time),
-            })
-
-        for prefix, dn in [("I", cert.get_issuer()),
-                           ("S", cert.get_subject())]:
-            # X509Name objects don't seem to have a way to get the
-            # complete DN string. Use str() and slice it instead,
-            # because str(dn) == "<X509Name object '/C=US/ST=...'>"
-            dnstr = str(dn)[18:-2]
-
-            wsgikey = 'SSL_SERVER_%s_DN' % prefix
-            ssl_environ[wsgikey] = dnstr
-
-            # The DN should be of the form: /k1=v1/k2=v2, but we must allow
-            # for any value to contain slashes itself (in a URL).
-            while dnstr:
-                pos = dnstr.rfind("=")
-                dnstr, value = dnstr[:pos], dnstr[pos + 1:]
-                pos = dnstr.rfind("/")
-                dnstr, key = dnstr[:pos], dnstr[pos + 1:]
-                if key and value:
-                    wsgikey = 'SSL_SERVER_%s_DN_%s' % (prefix, key)
-                    ssl_environ[wsgikey] = value
-
-        self.environ.update(ssl_environ)
+      })
+
+    for prefix, dn in [("I", cert.get_issuer()),
+               ("S", cert.get_subject())]:
+      # X509Name objects don't seem to have a way to get the
+      # complete DN string. Use str() and slice it instead,
+      # because str(dn) == "<X509Name object '/C=US/ST=...'>"
+      dnstr = str(dn)[18:-2]
+
+      wsgikey = 'SSL_SERVER_%s_DN' % prefix
+      ssl_environ[wsgikey] = dnstr
+
+      # The DN should be of the form: /k1=v1/k2=v2, but we must allow
+      # for any value to contain slashes itself (in a URL).
+      while dnstr:
+        pos = dnstr.rfind("=")
+        dnstr, value = dnstr[:pos], dnstr[pos + 1:]
+        pos = dnstr.rfind("/")
+        dnstr, key = dnstr[:pos], dnstr[pos + 1:]
+        if key and value:
+          wsgikey = 'SSL_SERVER_%s_DN_%s' % (prefix, key)
+          ssl_environ[wsgikey] = value
+
+    self.environ.update(ssl_environ)

+ 8 - 2
desktop/core/src/desktop/supervisor.py

@@ -333,7 +333,10 @@ def main():
     pidfile_context.break_lock()
 
   if options.daemonize:
-    outfile = open_file(os.path.join(log_dir, 'supervisor.out'), 'a+', 0)
+    if sys.version_info[0] > 2:
+      outfile = open_file(os.path.join(log_dir, 'supervisor.out'), 'ba+', 0)
+    else:
+      outfile = open_file(os.path.join(log_dir, 'supervisor.out'), 'a+', 0)
     context = daemon.DaemonContext(
         working_directory=root,
         pidfile=pidfile_context,
@@ -365,7 +368,10 @@ def main():
         preexec_fn = None
 
       if options.daemonize:
-        log_stdout = open_file(os.path.join(log_dir, name + '.out'), 'a+', 0)
+        if sys.version_info[0] > 2:
+          log_stdout = open_file(os.path.join(log_dir, name + '.out'), 'ba+', 0)
+        else:
+          log_stdout = open_file(os.path.join(log_dir, name + '.out'), 'a+', 0)
         log_stderr = log_stdout
       else:
         # Passing None to subprocess.Popen later makes the subprocess inherit the