context.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422
  1. from __future__ import absolute_import
  2. import os
  3. import sys
  4. import threading
  5. import warnings
  6. from . import process
  7. __all__ = [] # things are copied from here to __init__.py
  8. W_NO_EXECV = """\
  9. force_execv is not supported as the billiard C extension \
  10. is not installed\
  11. """
  12. #
  13. # Exceptions
  14. #
  15. from .exceptions import ( # noqa
  16. ProcessError,
  17. BufferTooShort,
  18. TimeoutError,
  19. AuthenticationError,
  20. TimeLimitExceeded,
  21. SoftTimeLimitExceeded,
  22. WorkerLostError,
  23. )
  24. #
  25. # Base type for contexts
  26. #
  27. class BaseContext(object):
  28. ProcessError = ProcessError
  29. BufferTooShort = BufferTooShort
  30. TimeoutError = TimeoutError
  31. AuthenticationError = AuthenticationError
  32. TimeLimitExceeded = TimeLimitExceeded
  33. SoftTimeLimitExceeded = SoftTimeLimitExceeded
  34. WorkerLostError = WorkerLostError
  35. current_process = staticmethod(process.current_process)
  36. active_children = staticmethod(process.active_children)
  37. if hasattr(os, 'cpu_count'):
  38. def cpu_count(self):
  39. '''Returns the number of CPUs in the system'''
  40. num = os.cpu_count()
  41. if num is None:
  42. raise NotImplementedError('cannot determine number of cpus')
  43. else:
  44. return num
  45. else:
  46. def cpu_count(self): # noqa
  47. if sys.platform == 'win32':
  48. try:
  49. num = int(os.environ['NUMBER_OF_PROCESSORS'])
  50. except (ValueError, KeyError):
  51. num = 0
  52. elif 'bsd' in sys.platform or sys.platform == 'darwin':
  53. comm = '/sbin/sysctl -n hw.ncpu'
  54. if sys.platform == 'darwin':
  55. comm = '/usr' + comm
  56. try:
  57. with os.popen(comm) as p:
  58. num = int(p.read())
  59. except ValueError:
  60. num = 0
  61. else:
  62. try:
  63. num = os.sysconf('SC_NPROCESSORS_ONLN')
  64. except (ValueError, OSError, AttributeError):
  65. num = 0
  66. if num >= 1:
  67. return num
  68. else:
  69. raise NotImplementedError('cannot determine number of cpus')
  70. def Manager(self):
  71. '''Returns a manager associated with a running server process
  72. The managers methods such as `Lock()`, `Condition()` and `Queue()`
  73. can be used to create shared objects.
  74. '''
  75. from .managers import SyncManager
  76. m = SyncManager(ctx=self.get_context())
  77. m.start()
  78. return m
  79. def Pipe(self, duplex=True, rnonblock=False, wnonblock=False):
  80. '''Returns two connection object connected by a pipe'''
  81. from .connection import Pipe
  82. return Pipe(duplex, rnonblock, wnonblock)
  83. def Lock(self):
  84. '''Returns a non-recursive lock object'''
  85. from .synchronize import Lock
  86. return Lock(ctx=self.get_context())
  87. def RLock(self):
  88. '''Returns a recursive lock object'''
  89. from .synchronize import RLock
  90. return RLock(ctx=self.get_context())
  91. def Condition(self, lock=None):
  92. '''Returns a condition object'''
  93. from .synchronize import Condition
  94. return Condition(lock, ctx=self.get_context())
  95. def Semaphore(self, value=1):
  96. '''Returns a semaphore object'''
  97. from .synchronize import Semaphore
  98. return Semaphore(value, ctx=self.get_context())
  99. def BoundedSemaphore(self, value=1):
  100. '''Returns a bounded semaphore object'''
  101. from .synchronize import BoundedSemaphore
  102. return BoundedSemaphore(value, ctx=self.get_context())
  103. def Event(self):
  104. '''Returns an event object'''
  105. from .synchronize import Event
  106. return Event(ctx=self.get_context())
  107. def Barrier(self, parties, action=None, timeout=None):
  108. '''Returns a barrier object'''
  109. from .synchronize import Barrier
  110. return Barrier(parties, action, timeout, ctx=self.get_context())
  111. def Queue(self, maxsize=0):
  112. '''Returns a queue object'''
  113. from .queues import Queue
  114. return Queue(maxsize, ctx=self.get_context())
  115. def JoinableQueue(self, maxsize=0):
  116. '''Returns a queue object'''
  117. from .queues import JoinableQueue
  118. return JoinableQueue(maxsize, ctx=self.get_context())
  119. def SimpleQueue(self):
  120. '''Returns a queue object'''
  121. from .queues import SimpleQueue
  122. return SimpleQueue(ctx=self.get_context())
  123. def Pool(self, processes=None, initializer=None, initargs=(),
  124. maxtasksperchild=None, timeout=None, soft_timeout=None,
  125. lost_worker_timeout=None, max_restarts=None,
  126. max_restart_freq=1, on_process_up=None, on_process_down=None,
  127. on_timeout_set=None, on_timeout_cancel=None, threads=True,
  128. semaphore=None, putlocks=False, allow_restart=False):
  129. '''Returns a process pool object'''
  130. from .pool import Pool
  131. return Pool(processes, initializer, initargs, maxtasksperchild,
  132. timeout, soft_timeout, lost_worker_timeout,
  133. max_restarts, max_restart_freq, on_process_up,
  134. on_process_down, on_timeout_set, on_timeout_cancel,
  135. threads, semaphore, putlocks, allow_restart,
  136. context=self.get_context())
  137. def RawValue(self, typecode_or_type, *args):
  138. '''Returns a shared object'''
  139. from .sharedctypes import RawValue
  140. return RawValue(typecode_or_type, *args)
  141. def RawArray(self, typecode_or_type, size_or_initializer):
  142. '''Returns a shared array'''
  143. from .sharedctypes import RawArray
  144. return RawArray(typecode_or_type, size_or_initializer)
  145. def Value(self, typecode_or_type, *args, **kwargs):
  146. '''Returns a synchronized shared object'''
  147. from .sharedctypes import Value
  148. lock = kwargs.get('lock', True)
  149. return Value(typecode_or_type, *args, lock=lock,
  150. ctx=self.get_context())
  151. def Array(self, typecode_or_type, size_or_initializer, *args, **kwargs):
  152. '''Returns a synchronized shared array'''
  153. from .sharedctypes import Array
  154. lock = kwargs.get('lock', True)
  155. return Array(typecode_or_type, size_or_initializer, lock=lock,
  156. ctx=self.get_context())
  157. def freeze_support(self):
  158. '''Check whether this is a fake forked process in a frozen executable.
  159. If so then run code specified by commandline and exit.
  160. '''
  161. if sys.platform == 'win32' and getattr(sys, 'frozen', False):
  162. from .spawn import freeze_support
  163. freeze_support()
  164. def get_logger(self):
  165. '''Return package logger -- if it does not already exist then
  166. it is created.
  167. '''
  168. from .util import get_logger
  169. return get_logger()
  170. def log_to_stderr(self, level=None):
  171. '''Turn on logging and add a handler which prints to stderr'''
  172. from .util import log_to_stderr
  173. return log_to_stderr(level)
  174. def allow_connection_pickling(self):
  175. '''Install support for sending connections and sockets
  176. between processes
  177. '''
  178. # This is undocumented. In previous versions of multiprocessing
  179. # its only effect was to make socket objects inheritable on Windows.
  180. from . import connection # noqa
  181. def set_executable(self, executable):
  182. '''Sets the path to a python.exe or pythonw.exe binary used to run
  183. child processes instead of sys.executable when using the 'spawn'
  184. start method. Useful for people embedding Python.
  185. '''
  186. from .spawn import set_executable
  187. set_executable(executable)
  188. def set_forkserver_preload(self, module_names):
  189. '''Set list of module names to try to load in forkserver process.
  190. This is really just a hint.
  191. '''
  192. from .forkserver import set_forkserver_preload
  193. set_forkserver_preload(module_names)
  194. def get_context(self, method=None):
  195. if method is None:
  196. return self
  197. try:
  198. ctx = _concrete_contexts[method]
  199. except KeyError:
  200. raise ValueError('cannot find context for %r' % method)
  201. ctx._check_available()
  202. return ctx
  203. def get_start_method(self, allow_none=False):
  204. return self._name
  205. def set_start_method(self, method=None):
  206. raise ValueError('cannot set start method of concrete context')
  207. def forking_is_enabled(self):
  208. # XXX for compatibility with billiard <3.4
  209. return (self.get_start_method() or 'fork') == 'fork'
  210. def forking_enable(self, value):
  211. # XXX for compatibility with billiard <3.4
  212. if not value:
  213. from ._ext import supports_exec
  214. if supports_exec:
  215. self.set_start_method('spawn', force=True)
  216. else:
  217. warnings.warn(RuntimeWarning(W_NO_EXECV))
  218. def _check_available(self):
  219. pass
  220. #
  221. # Type of default context -- underlying context can be set at most once
  222. #
  223. class Process(process.BaseProcess):
  224. _start_method = None
  225. @staticmethod
  226. def _Popen(process_obj):
  227. return _default_context.get_context().Process._Popen(process_obj)
  228. class DefaultContext(BaseContext):
  229. Process = Process
  230. def __init__(self, context):
  231. self._default_context = context
  232. self._actual_context = None
  233. def get_context(self, method=None):
  234. if method is None:
  235. if self._actual_context is None:
  236. self._actual_context = self._default_context
  237. return self._actual_context
  238. else:
  239. return super(DefaultContext, self).get_context(method)
  240. def set_start_method(self, method, force=False):
  241. if self._actual_context is not None and not force:
  242. raise RuntimeError('context has already been set')
  243. if method is None and force:
  244. self._actual_context = None
  245. return
  246. self._actual_context = self.get_context(method)
  247. def get_start_method(self, allow_none=False):
  248. if self._actual_context is None:
  249. if allow_none:
  250. return None
  251. self._actual_context = self._default_context
  252. return self._actual_context._name
  253. def get_all_start_methods(self):
  254. if sys.platform == 'win32':
  255. return ['spawn']
  256. else:
  257. from . import reduction
  258. if reduction.HAVE_SEND_HANDLE:
  259. return ['fork', 'spawn', 'forkserver']
  260. else:
  261. return ['fork', 'spawn']
  262. DefaultContext.__all__ = list(x for x in dir(DefaultContext) if x[0] != '_')
  263. #
  264. # Context types for fixed start method
  265. #
  266. if sys.platform != 'win32':
  267. class ForkProcess(process.BaseProcess):
  268. _start_method = 'fork'
  269. @staticmethod
  270. def _Popen(process_obj):
  271. from .popen_fork import Popen
  272. return Popen(process_obj)
  273. class SpawnProcess(process.BaseProcess):
  274. _start_method = 'spawn'
  275. @staticmethod
  276. def _Popen(process_obj):
  277. from .popen_spawn_posix import Popen
  278. return Popen(process_obj)
  279. class ForkServerProcess(process.BaseProcess):
  280. _start_method = 'forkserver'
  281. @staticmethod
  282. def _Popen(process_obj):
  283. from .popen_forkserver import Popen
  284. return Popen(process_obj)
  285. class ForkContext(BaseContext):
  286. _name = 'fork'
  287. Process = ForkProcess
  288. class SpawnContext(BaseContext):
  289. _name = 'spawn'
  290. Process = SpawnProcess
  291. class ForkServerContext(BaseContext):
  292. _name = 'forkserver'
  293. Process = ForkServerProcess
  294. def _check_available(self):
  295. from . import reduction
  296. if not reduction.HAVE_SEND_HANDLE:
  297. raise ValueError('forkserver start method not available')
  298. _concrete_contexts = {
  299. 'fork': ForkContext(),
  300. 'spawn': SpawnContext(),
  301. 'forkserver': ForkServerContext(),
  302. }
  303. _default_context = DefaultContext(_concrete_contexts['fork'])
  304. else:
  305. class SpawnProcess(process.BaseProcess):
  306. _start_method = 'spawn'
  307. @staticmethod
  308. def _Popen(process_obj):
  309. from .popen_spawn_win32 import Popen
  310. return Popen(process_obj)
  311. class SpawnContext(BaseContext):
  312. _name = 'spawn'
  313. Process = SpawnProcess
  314. _concrete_contexts = {
  315. 'spawn': SpawnContext(),
  316. }
  317. _default_context = DefaultContext(_concrete_contexts['spawn'])
  318. #
  319. # Force the start method
  320. #
  321. def _force_start_method(method):
  322. _default_context._actual_context = _concrete_contexts[method]
  323. #
  324. # Check that the current thread is spawning a child process
  325. #
  326. _tls = threading.local()
  327. def get_spawning_popen():
  328. return getattr(_tls, 'spawning_popen', None)
  329. def set_spawning_popen(popen):
  330. _tls.spawning_popen = popen
  331. def assert_spawning(obj):
  332. if get_spawning_popen() is None:
  333. raise RuntimeError(
  334. '%s objects should only be shared between processes'
  335. ' through inheritance' % type(obj).__name__
  336. )