def __init__(self, taskmaster, num, stack_size):
"""Create a new parallel job given a taskmaster.
The taskmaster's next_task() method should return the next
task that needs to be executed, or None if there are no more
tasks. The taskmaster's executed() method will be called
for each task when it is successfully executed or failed()
will be called if the task failed to execute (i.e. execute()
raised an exception).
Note: calls to taskmaster are serialized, but calls to
execute() on distinct tasks are not serialized, because
that is the whole point of parallel jobs: they can execute
multiple tasks simultaneously. """
self.taskmaster = taskmaster
self.interrupted = InterruptState()
self.tp = ThreadPool(num, stack_size, self.interrupted)
self.maxjobs = num

def test_various_ops_large_stack(self):
if verbose:
print 'with 1MB thread stack size...'
try:
threading.stack_size(0x100000)
except thread.error:
if verbose:
print 'platform does not support changing thread stack size'
return
self.test_various_ops()
threading.stack_size(0)
# this test is not applicable to jython since
# 1. Lock is equiv to RLock, so this weird sync behavior won't be seen
# 2. We use a weak hash map to map these threads
# 3. This behavior doesn't make sense for Jython since any foreign
# Java threads can use the same underlying locks, etc

def __init__(self, num, taskmaster):
"""
create 'num' jobs using the given taskmaster.
If 'num' is 1 or less, then a serial job will be used,
otherwise a parallel job with 'num' worker threads will
be used.
The 'num_jobs' attribute will be set to the actual number of jobs
allocated. If more than one job is requested but the Parallel
class can't do it, it gets reset to 1. Wrapping interfaces that
care should check the value of 'num_jobs' after initialization.
"""
self.job = None
if num > 1:
stack_size = explicit_stack_size
if stack_size is None:
stack_size = default_stack_size
try:
self.job = Parallel(taskmaster, num, stack_size)
self.num_jobs = num
except NameError:
pass
if self.job is None:
self.job = Serial(taskmaster)
self.num_jobs = 1

def bigstack(*args, **kwargs):
'''Decorator that increases the stack size of a function and the recursion
limit. The function runs in a separated thread with a stack size specified
by the 'stacksize' parameter (default: 128MiB). Also the recursion limit can
be modified by the 'recursionlimit' parameter (default: 1M), but be aware
that this is a variable shared by the whole python environment, so a
subsequent invocation of a decorated function may change it.'''
stacksize = kwargs.get('stacksize', 128 * _M)
recursionlimit = kwargs.get('recursionlimit', _M)
def _decorator(fn):
'''This is the bigstack decorator itself.'''
@_functools.wraps(fn)
def _fn(*args, **kwargs):
# no two functions can change the stack size
with _lock:
_threading.stack_size(stacksize)
_sys.setrecursionlimit(recursionlimit)
# only new threads get the redefined stack size
pool = _mpool.ThreadPool(processes=1)
async_result = pool.apply_async(fn, args, kwargs)
return async_result.get()
return _fn
if not args:
return _decorator
# return the decorated function when used without keyword arguments
if not isinstance(args[0], _types.FunctionType):
raise ValueError('use keyword argument as bigstack parameters')
return _decorator(args[0])