| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516 |
- #
- # Module providing the `Pool` class for managing a process pool
- #
- # processing/pool.py
- #
- # Copyright (c) 2007-2008, R Oudkerk --- see COPYING.txt
- #
- __all__ = ['Pool']
- #
- # Imports
- #
- import processing
- import threading
- import Queue
- import itertools
- import collections
- import time
- from processing import Process
- from processing.logger import debug
- from processing.finalize import Finalize
- from processing.queue import SimpleQueue
- #
- # Constants representing the state of a pool
- #
- RUN = 0
- CLOSE = 1
- TERMINATE = 2
- #
- # Miscellaneous
- #
- newJobId = itertools.count().next
- def mapstar(args):
- return map(*args)
- #
- # Code run by worker processes
- #
- def worker(inqueue, outqueue, initializer=None, initargs=()):
- put = outqueue.put
- if initializer is not None:
- initializer(*initargs)
- for job, i, func, args, kwds in iter(inqueue.get, None):
- try:
- result = (True, func(*args, **kwds))
- except Exception, e:
- result = (False, e)
- put((job, i, result))
- debug('worker got sentinel -- exiting')
-
- #
- # Class representing a process pool
- #
- class Pool(object):
- '''
- Class which supports an async version of the `apply()` builtin
- '''
- def __init__(self, processes=None, initializer=None, initargs=()):
- self._inqueue = SimpleQueue()
- self._outqueue = SimpleQueue()
- self._taskqueue = Queue.Queue()
- self._cache = {}
- self._state = RUN
- if processes is None:
- try:
- processes = processing.cpuCount()
- except NotImplementedError:
- processes = 1
-
- self._pool = [
- Process(target=worker, args=(self._inqueue, self._outqueue,
- initializer, initargs))
- for i in range(processes)
- ]
-
- for i, w in enumerate(self._pool):
- w.setName('PoolWorker-' + ':'.join(map(str, w._identity)))
- w.start()
-
- self._task_handler = threading.Thread(
- target=Pool._handleTasks,
- args=(self._taskqueue, self._inqueue, self._outqueue, self._pool)
- )
- self._task_handler.setDaemon(True)
- self._task_handler._state = RUN
- self._task_handler.start()
- self._result_handler = threading.Thread(
- target=Pool._handleResults,
- args=(self._outqueue, self._cache)
- )
- self._result_handler.setDaemon(True)
- self._result_handler._state = RUN
- self._result_handler.start()
- self._terminate = Finalize(
- self, Pool._terminatePool,
- args=(self._taskqueue, self._inqueue, self._outqueue,
- self._cache, self._pool, self._task_handler,
- self._result_handler),
- exitpriority=5
- )
- def apply(self, func, args=(), kwds={}):
- '''
- Equivalent of `apply()` builtin
- '''
- assert self._state == RUN
- return self.applyAsync(func, args, kwds).get()
- def map(self, func, iterable, chunksize=None):
- '''
- Equivalent of `map()` builtin
- '''
- assert self._state == RUN
- return self.mapAsync(func, iterable, chunksize).get()
- def imap(self, func, iterable, chunksize=1):
- '''
- Equivalent of `itertool.imap()` -- can be MUCH slower than `Pool.map()`
- '''
- assert self._state == RUN
- if chunksize == 1:
- result = IMapIterator(self._cache)
- self._taskqueue.put((((result._job, i, func, (x,), {})
- for i, x in enumerate(iterable)), result._setLength))
- return result
- else:
- assert chunksize > 1
- task_batches = Pool._getTasks(func, iterable, chunksize)
- result = IMapIterator(self._cache)
- self._taskqueue.put((((result._job, i, mapstar, (x,), {})
- for i, x in enumerate(task_batches)), result._setLength))
- return (item for chunk in result for item in chunk)
- def imapUnordered(self, func, iterable, chunksize=1):
- '''
- Like `imap()` method but ordering of results is arbitrary
- '''
- assert self._state == RUN
- if chunksize == 1:
- result = IMapUnorderedIterator(self._cache)
- self._taskqueue.put((((result._job, i, func, (x,), {})
- for i, x in enumerate(iterable)), result._setLength))
- return result
- else:
- assert chunksize > 1
- task_batches = Pool._getTasks(func, iterable, chunksize)
- result = IMapUnorderedIterator(self._cache)
- self._taskqueue.put((((result._job, i, mapstar, (x,), {})
- for i, x in enumerate(task_batches)), result._setLength))
- return (item for chunk in result for item in chunk)
-
- def applyAsync(self, func, args=(), kwds={}, callback=None):
- '''
- Asynchronous equivalent of `apply()` builtin
- '''
- assert self._state == RUN
- result = ApplyResult(self._cache, callback)
- self._taskqueue.put(([(result._job, None, func, args, kwds)], None))
- return result
- def mapAsync(self, func, iterable, chunksize=None, callback=None):
- '''
- Asynchronous equivalent of `map()` builtin
- '''
- assert self._state == RUN
- if not hasattr(iterable, '__len__'):
- iterable = list(iterable)
-
- if chunksize is None:
- chunksize, extra = divmod(len(iterable), len(self._pool) * 4)
- if extra:
- chunksize += 1
-
- task_batches = Pool._getTasks(func, iterable, chunksize)
- result = MapResult(self._cache, chunksize, len(iterable), callback)
- self._taskqueue.put((((result._job, i, mapstar, (x,), {})
- for i, x in enumerate(task_batches)), None))
- return result
- @staticmethod
- def _handleTasks(taskqueue, inqueue, outqueue, pool):
- thread = threading.currentThread()
- put = inqueue._writer.send
- for taskseq, setLength in iter(taskqueue.get, None):
- i = -1
- for i, task in enumerate(taskseq):
- if thread._state:
- debug('task handler found thread._state != RUN')
- break
- put(task)
- else:
- if setLength:
- debug('doing setLength()')
- setLength(i+1)
- continue
- break
- else:
- debug('task handler got sentinel')
-
- # tell result handler to finish when cache is empty
- outqueue.put(None)
- # tell workers there is no more work
- debug('task handler sending sentinel to workers')
- for p in pool:
- put(None)
- debug('task handler exiting')
- @staticmethod
- def _handleResults(outqueue, cache):
- thread = threading.currentThread()
- get = outqueue._reader.recv
- for job, i, obj in iter(get, None):
- if thread._state:
- assert thread._state == TERMINATE
- debug('result handler found thread._state=TERMINATE')
- return
- try:
- cache[job]._set(i, obj)
- except KeyError:
- pass
- else:
- debug('result handler got sentinel')
- while cache and thread._state != TERMINATE:
- item = get()
- if item is None:
- debug('result handler ignoring extra sentinel')
- continue
- job, i, obj = item
- try:
- cache[job]._set(i, obj)
- except KeyError:
- pass
- debug('result handler exiting: len(cache)=%s, thread._state=%s',
- len(cache), thread._state)
- @staticmethod
- def _getTasks(func, it, size):
- it = iter(it)
- while 1:
- x = tuple(itertools.islice(it, size))
- if not x:
- return
- yield (func, x)
- def __reduce__(self):
- raise NotImplementedError, \
- 'pool objects cannot be passed between processes or pickled'
-
- def close(self):
- debug('closing pool')
- self._state = CLOSE
- self._taskqueue.put(None)
- def terminate(self):
- debug('terminating pool')
- self._state = TERMINATE
- self._terminate()
- def join(self):
- debug('joining pool')
- assert self._state in (CLOSE, TERMINATE)
- self._task_handler.join()
- self._result_handler.join()
- for p in self._pool:
- p.join()
- @staticmethod
- def _terminatePool(taskqueue, inqueue, outqueue, cache, pool,
- task_handler, result_handler):
- debug('finalizing pool')
-
- if not result_handler.isAlive():
- debug('result handler already finished -- no need to terminate')
- return
-
- cache = {}
- task_handler._state = TERMINATE
- result_handler._state = TERMINATE
- debug('sending sentinels')
- taskqueue.put(None)
- outqueue.put(None)
-
- debug('getting read lock on inqueue')
- inqueue._rlock.acquire()
- debug('terminating workers')
- for p in pool:
- p.terminate()
- if task_handler.isAlive():
- debug('removing tasks from inqueue until task handler finished')
- while task_handler.isAlive() and inqueue._reader.poll():
- inqueue._reader.recv()
- time.sleep(0)
- debug('joining result handler')
- result_handler.join()
- debug('joining task handler')
- task_handler.join()
- debug('joining pool workers')
- for p in pool:
- p.join()
- debug('closing connections')
- inqueue._reader.close()
- outqueue._reader.close()
- inqueue._writer.close()
- outqueue._writer.close()
- # deprecated
- apply_async = applyAsync
- map_async = mapAsync
- imap_unordered = imapUnordered
- #
- # Class whose instances are returned by `Pool.applyAsync()`
- #
- class ApplyResult(object):
- def __init__(self, cache, callback):
- self._cond = threading.Condition(threading.Lock())
- self._job = newJobId()
- self._cache = cache
- self._ready = False
- self._callback = callback
- cache[self._job] = self
-
- def ready(self):
- return self._ready
-
- def successful(self):
- assert self._ready
- return self._success
-
- def wait(self, timeout=None):
- self._cond.acquire()
- try:
- if not self._ready:
- self._cond.wait(timeout)
- finally:
- self._cond.release()
- def get(self, timeout=None):
- self.wait(timeout)
- if not self._ready:
- raise processing.TimeoutError
- if self._success:
- return self._value
- else:
- raise self._value
- def _set(self, i, obj):
- self._success, self._value = obj
- if self._callback and self._success:
- self._callback(self._value)
- self._cond.acquire()
- try:
- self._ready = True
- self._cond.notify()
- finally:
- self._cond.release()
- del self._cache[self._job]
- #
- # Class whose instances are returned by `Pool.mapAsync()`
- #
- class MapResult(ApplyResult):
-
- def __init__(self, cache, chunksize, length, callback):
- ApplyResult.__init__(self, cache, callback)
- self._success = True
- self._value = [None] * length
- self._chunksize = chunksize
- if chunksize <= 0:
- self._number_left = 0
- self._ready = True
- else:
- self._number_left = length//chunksize + bool(length % chunksize)
-
- def _set(self, i, (success, result)):
- if success:
- self._value[i*self._chunksize:(i+1)*self._chunksize] = result
- self._number_left -= 1
- if self._number_left == 0:
- if self._callback:
- self._callback(self._value)
- del self._cache[self._job]
- self._cond.acquire()
- try:
- self._ready = True
- self._cond.notify()
- finally:
- self._cond.release()
- else:
- self._success = False
- self._value = result
- del self._cache[self._job]
- self._cond.acquire()
- try:
- self._ready = True
- self._cond.notify()
- finally:
- self._cond.release()
- #
- # Class whose instances are returned by `Pool.imap()`
- #
- class IMapIterator(object):
- def __init__(self, cache):
- self._cond = threading.Condition(threading.Lock())
- self._job = newJobId()
- self._cache = cache
- self._items = collections.deque()
- self._index = 0
- self._length = None
- self._unsorted = {}
- cache[self._job] = self
-
- def __iter__(self):
- return self
-
- def next(self, timeout=None):
- self._cond.acquire()
- try:
- try:
- item = self._items.popleft()
- except IndexError:
- if self._index == self._length:
- raise StopIteration
- self._cond.wait(timeout)
- try:
- item = self._items.popleft()
- except IndexError:
- if self._index == self._length:
- raise StopIteration
- raise processing.TimeoutError
- finally:
- self._cond.release()
- success, value = item
- if success:
- return value
- raise value
-
- def _set(self, i, obj):
- self._cond.acquire()
- try:
- if self._index == i:
- self._items.append(obj)
- self._index += 1
- while self._index in self._unsorted:
- obj = self._unsorted.pop(self._index)
- self._items.append(obj)
- self._index += 1
- self._cond.notify()
- else:
- self._unsorted[i] = obj
-
- if self._index == self._length:
- del self._cache[self._job]
- finally:
- self._cond.release()
-
- def _setLength(self, length):
- self._cond.acquire()
- try:
- self._length = length
- if self._index == self._length:
- self._cond.notify()
- del self._cache[self._job]
- finally:
- self._cond.release()
- #
- # Class whose instances are returned by `Pool.imapUnordered()`
- #
- class IMapUnorderedIterator(IMapIterator):
- def _set(self, i, obj):
- self._cond.acquire()
- try:
- self._items.append(obj)
- self._index += 1
- self._cond.notify()
- if self._index == self._length:
- del self._cache[self._job]
- finally:
- self._cond.release()
|