def run(method, *args, **kwargs): async = kwargs.pop('async', True) thread = threading.Thread(target=close_connection(method), args=args, kwargs=kwargs) thread = Process(target=close_connection(counter)) thread.start()
def apply_async(fn, name=None, method='thread'): """ replaces celery apply_async """ def inner(fn, name, method, *args, **kwargs): task_id = get_id() kwargs.update({ '_name': name, '_task_id': task_id, }) thread = method(target=fn, args=args, kwargs=kwargs) thread.start() # Celery API compat thread.request = AttrDict(id=task_id) return thread if name is None: name = get_name(fn) if method == 'thread': method = Thread elif method == 'process': method = Process else: raise NotImplementedError("%s concurrency method is not supported." % method) fn.apply_async = partial(inner, close_connection(keep_state(fn)), name, method) fn.delay = fn.apply_async return fn
def execute(scripts, serialize=False, run_async=None): """ executes the operations on the servers serialize: execute one backend at a time run_async: do not join threads (overrides route.run_async) """ if settings.ORCHESTRATION_DISABLE_EXECUTION: logger.info( 'Orchestration execution is dissabled by ORCHESTRATION_DISABLE_EXECUTION.' ) return [] # Execute scripts on each server executions = [] threads_to_join = [] logs = [] for key, value in scripts.items(): route, __, async_action = key backend, operations = value args = (route.host, ) if run_async is None: is_async = not serialize and (route.run_async or async_action) else: is_async = not serialize and (run_async or async_action) kwargs = { 'run_async': is_async, } # we clone the connection just in case we are isolated inside a transaction with db.clone(model=BackendLog) as handle: log = backend.create_log(*args, using=handle.target) log._state.db = handle.origin kwargs['log'] = log task = keep_log(backend.execute, log, operations) logger.debug('%s is going to be executed on %s.' % (backend, route.host)) if serialize: # Execute one backend at a time, no need for threads task(*args, **kwargs) else: task = db.close_connection(task) thread = threading.Thread(target=task, args=args, kwargs=kwargs) thread.start() if not is_async: threads_to_join.append(thread) logs.append(log) [thread.join() for thread in threads_to_join] return logs
is_async = not serialize and (async or async_action) kwargs = { 'async': is_async, } # we clone the connection just in case we are isolated inside a transaction with db.clone(model=BackendLog) as handle: log = backend.create_log(*args, using=handle.target) log._state.db = handle.origin kwargs['log'] = log task = keep_log(backend.execute, log, operations) logger.debug('%s is going to be executed on %s.' % (backend, route.host)) if serialize: # Execute one backend at a time, no need for threads task(*args, **kwargs) else: task = db.close_connection(task) thread = threading.Thread(target=task, args=args, kwargs=kwargs) thread.start() if not is_async: threads_to_join.append(thread) logs.append(log) [ thread.join() for thread in threads_to_join ] return logs def collect(instance, action, **kwargs): """ collect operations """ operations = kwargs.get('operations', OrderedSet()) route_cache = kwargs.get('route_cache', {}) for backend_cls in ServiceBackend.get_backends(): # Check if there exists a related instance to be executed for this backend and action