def worker(task_queue, result_queue, timer, timeout=Conf.TIMEOUT): """ Takes a task from the task queue, tries to execute it and puts the result back in the result queue :type task_queue: multiprocessing.Queue :type result_queue: multiprocessing.Queue :type timer: multiprocessing.Value """ name = current_process().name logger.info( _('{} ready for work at {}').format(name, current_process().pid)) task_count = 0 # Start reading the task queue for task in iter(task_queue.get, 'STOP'): result = None timer.value = -1 # Idle task_count += 1 # Get the function from the task logger.info(_('{} processing [{}]').format(name, task['name'])) f = task['func'] # if it's not an instance try to get it from the string if not callable(task['func']): try: module, func = f.rsplit('.', 1) m = importlib.import_module(module) f = getattr(m, func) except (ValueError, ImportError, AttributeError) as e: result = (e, False) if error_reporter: error_reporter.report() if rollbar: rollbar.report_exc_info() # We're still going if not result: db.close_old_connections() timer_value = task['kwargs'].pop('timeout', timeout or 0) # signal execution pre_execute.send(sender="django_q", func=f, task=task) # execute the payload timer.value = timer_value # Busy try: res = f(*task['args'], **task['kwargs']) result = (res, True) except Exception as e: result = ('{}'.format(e), False) if error_reporter: error_reporter.report() if rollbar: rollbar.report_exc_info() # Process result task['result'] = result[0] task['success'] = result[1] task['stopped'] = timezone.now() result_queue.put(task) timer.value = -1 # Idle # Recycle if task_count == Conf.RECYCLE: timer.value = -2 # Recycled break logger.info(_('{} stopped doing work').format(name))
def worker(task_queue, result_queue, timer, timeout=Conf.TIMEOUT): """ Takes a task from the task queue, tries to execute it and puts the result back in the result queue :type task_queue: multiprocessing.Queue :type result_queue: multiprocessing.Queue :type timer: multiprocessing.Value """ name = current_process().name logger.info(_(f"{name} ready for work at {current_process().pid}")) task_count = 0 if timeout is None: timeout = -1 # Start reading the task queue for task in iter(task_queue.get, "STOP"): result = None timer.value = -1 # Idle task_count += 1 # Get the function from the task logger.info(_(f'{name} processing [{task["name"]}]')) f = task["func"] # if it's not an instance try to get it from the string if not callable(task["func"]): try: module, func = f.rsplit(".", 1) m = importlib.import_module(module) f = getattr(m, func) except (ValueError, ImportError, AttributeError) as e: result = (e, False) if error_reporter: error_reporter.report() # We're still going if not result: close_old_django_connections() timer_value = task.pop("timeout", timeout) # signal execution pre_execute.send(sender="django_q", func=f, task=task) # execute the payload timer.value = timer_value # Busy try: res = f(*task["args"], **task["kwargs"]) result = (res, True) except Exception as e: result = (f"{e} : {traceback.format_exc()}", False) if error_reporter: error_reporter.report() if task.get("sync", False): raise with timer.get_lock(): # Process result task["result"] = result[0] task["success"] = result[1] task["stopped"] = timezone.now() result_queue.put(task) timer.value = -1 # Idle # Recycle if task_count == Conf.RECYCLE: timer.value = -2 # Recycled break logger.info(_(f"{name} stopped doing work"))
def worker(task_queue, result_queue, timer, timeout=Conf.TIMEOUT): """ Takes a task from the task queue, tries to execute it and puts the result back in the result queue :type task_queue: multiprocessing.Queue :type result_queue: multiprocessing.Queue :type timer: multiprocessing.Value """ name = current_process().name logger.info(_('{} ready for work at {}').format(name, current_process().pid)) task_count = 0 # Start reading the task queue for task in iter(task_queue.get, 'STOP'): result = None timer.value = -1 # Idle task_count += 1 # Get the function from the task logger.info(_('{} processing [{}]').format(name, task['name'])) f = task['func'] # if it's not an instance try to get it from the string if not callable(task['func']): try: module, func = f.rsplit('.', 1) m = importlib.import_module(module) f = getattr(m, func) except (ValueError, ImportError, AttributeError) as e: result = (e, False) if error_reporter: error_reporter.report() if rollbar: rollbar.report_exc_info() # We're still going if not result: db.close_old_connections() timer_value = task['kwargs'].pop('timeout', timeout or 0) # signal execution pre_execute.send(sender="django_q", func=f, task=task) # execute the payload timer.value = timer_value # Busy try: res = f(*task['args'], **task['kwargs']) result = (res, True) except Exception as e: result = ('{}'.format(e), False) if error_reporter: error_reporter.report() if rollbar: rollbar.report_exc_info() # Process result task['result'] = result[0] task['success'] = result[1] task['stopped'] = timezone.now() result_queue.put(task) timer.value = -1 # Idle # Recycle if task_count == Conf.RECYCLE: timer.value = -2 # Recycled break logger.info(_('{} stopped doing work').format(name))
def worker(id: str, cluster_id: str, task_queue: Queue, result_queue: Queue, timer: Value, timeout: int = Conf.TIMEOUT): """ Takes a task from the task queue, tries to execute it and puts the result back in the result queue :param timeout: number of seconds wait for a worker to finish. :type id: str :type cluster_id: str :type task_queue: multiprocessing.Queue :type result_queue: multiprocessing.Queue :type timer: multiprocessing.Value """ name = current_process().name logger.info(_(f"{name} ready for work at {current_process().pid}")) # Create Worker model model = WorkerModel.objects.create(id=id, cluster_id=cluster_id, pid=current_process().pid, task=None) task_count = 0 if timeout is None: timeout = -1 # Start reading the task queue for task in iter( task_queue.get, "STOP" ): # Task should be provided as task ID which is then retrieved result = None timer.value = -1 # Idle task_count += 1 # Get the function from the task logger.info(_(f'{name} processing [{task["name"]}]')) f = task["func"] # if it's not an instance try to get it from the string if not callable(task["func"]): try: module, func = f.rsplit(".", 1) m = importlib.import_module(module) f = getattr(m, func) except (ValueError, ImportError, AttributeError) as e: result = (e, False) if error_reporter: error_reporter.report() # We're still going if not result: close_old_django_connections() # Set worker task details # model.task = get_task_representation(task) # model.save(update_fields=['task']) timer_value = task.pop("timeout", timeout) # signal execution pre_execute.send(sender="django_q", func=f, task=task) # execute the payload timer.value = timer_value # Busy try: res = f(*task["args"], **task["kwargs"]) result = (res, True) except Exception as e: result = (f"{e} : {traceback.format_exc()}", False) if error_reporter: error_reporter.report() if task.get("sync", False): raise # Clear task details # model.task = None # model.save(update_fields=['task']) with timer.get_lock(): # Process result task["result"] = result[0] task["success"] = result[1] task["stopped"] = timezone.now() result_queue.put(task) timer.value = -1 # Idle # Recycle if task_count == Conf.RECYCLE or rss_check(): timer.value = -2 # Recycled break logger.info(_(f"{name} stopped doing work"))
def worker(task_queue, result_queue, timer, timeout=Conf.TIMEOUT): """ Takes a task from the task queue, tries to execute it and puts the result back in the result queue :type task_queue: multiprocessing.Queue :type result_queue: multiprocessing.Queue :type timer: multiprocessing.Value """ name = current_process().name logger.info(_('{} ready for work at {}').format(name, current_process().pid)) task_count = 0 # Start reading the task queue for task in iter(task_queue.get, 'STOP'): result = None timer.value = -1 # Idle task_count += 1 # record the current worker's PID, so we can kill it later task['worker_process_pid'] = os.getpid() logger.info("Got job for worker {}".format(os.getpid())) Task.objects.filter(id=task['id']).update(worker_process_pid=task['worker_process_pid']) # mark the task as being run now. Task.objects.filter(id=task['id']).update(task_status=Task.INPROGRESS) # Get the function from the task logger.info(_('{} processing [{}]').format(name, task['name'])) f = task['func'] # if it's not an instance try to get it from the string if not callable(task['func']): try: module, func = f.rsplit('.', 1) m = importlib.import_module(module) f = getattr(m, func) except (ValueError, ImportError, AttributeError) as e: result = (e, False, Task.FAILED) if rollbar: rollbar.report_exc_info() # We're still going if not result: db.close_old_connections() timer_value = task['kwargs'].pop('timeout', timeout or 0) # signal execution pre_execute.send(sender="django_q", func=f, task=task) if task['is_progress_updating']: task['kwargs']['update_state'] = partial(update_task_progress, task) # execute the payload timer.value = timer_value # Busy try: res = f(*task['args'], **task['kwargs']) result = (res, True, Task.SUCCESS) except Exception as e: e.traceback = traceback.format_exc() result = (e, False, Task.FAILED) if rollbar: rollbar.report_exc_info() # make sure to remove the update_state func before shuffling across # process boundaries (through the result_queue), since its globals() # contains multiprocessing.Queue objects, which are unpickleable task['kwargs'].pop('update_state', None) # Process result task['result'] = result[0] task['success'] = result[1] task['task_status'] = result[2] task['stopped'] = timezone.now() result_queue.put(task) timer.value = -1 # Idle # Recycle if task_count == Conf.RECYCLE: timer.value = -2 # Recycled break logger.info(_('{} stopped doing work').format(name))