def run(self, hostname=None, pool_cls=None, app=None, uid=None, gid=None, loglevel=None, logfile=None, pidfile=None, statedb=None, **kwargs): maybe_drop_privileges(uid=uid, gid=gid) # Pools like eventlet/gevent needs to patch libs as early # as possible. pool_cls = (concurrency.get_implementation(pool_cls) or self.app.conf.worker_pool) if self.app.IS_WINDOWS and kwargs.get('beat'): self.die('-B option does not work on Windows. ' 'Please run celery beat as a separate service.') hostname = self.host_format(default_nodename(hostname)) if loglevel: try: loglevel = mlevel(loglevel) except KeyError: # pragma: no cover self.die('Unknown level {0!r}. Please use one of {1}.'.format( loglevel, '|'.join( l for l in LOG_LEVELS if isinstance(l, string_t)))) worker = self.app.Worker( hostname=hostname, pool_cls=pool_cls, loglevel=loglevel, logfile=logfile, # node format handled by celery.app.log.setup pidfile=self.node_format(pidfile, hostname), statedb=self.node_format(statedb, hostname), **kwargs ) worker.start() return worker.exitcode
def detach( path, argv, logfile=None, pidfile=None, uid=None, gid=None, umask=None, working_directory=None, fake=False, app=None, executable=None, hostname=None, ): hostname = default_nodename(hostname) logfile = node_format(logfile, hostname) pidfile = node_format(pidfile, hostname) fake = 1 if C_FAKEFORK else fake with detached(logfile, pidfile, uid, gid, umask, working_directory, fake, after_forkers=False): try: if executable is not None: path = executable os.execv(path, [path] + argv) except Exception: if app is None: from celery import current_app app = current_app app.log.setup_logging_subsystem("ERROR", logfile, hostname=hostname) logger.critical("Can't exec %r", " ".join([path] + argv), exc_info=True) return EX_FAILURE
def run(self, hostname=None, pool_cls=None, app=None, uid=None, gid=None, loglevel=None, logfile=None, pidfile=None, statedb=None, **kwargs): maybe_drop_privileges(uid=uid, gid=gid) # Pools like eventlet/gevent needs to patch libs as early # as possible. pool_cls = (concurrency.get_implementation(pool_cls) or self.app.conf.worker_pool) if self.app.IS_WINDOWS and kwargs.get('beat'): self.die('-B option does not work on Windows. ' 'Please run celery beat as a separate service.') hostname = self.host_format(default_nodename(hostname)) if loglevel: try: loglevel = mlevel(loglevel) except KeyError: # pragma: no cover self.die('Unknown level {0!r}. Please use one of {1}.'.format( loglevel, '|'.join( l for l in LOG_LEVELS if isinstance(l, string_t)))) worker = self.app.Worker( hostname=hostname, pool_cls=pool_cls, loglevel=loglevel, logfile=logfile, # node format handled by celery.app.log.setup pidfile=self.node_format(pidfile, hostname), statedb=self.node_format(statedb, hostname), **kwargs) worker.start() return worker.exitcode
def __init__(self, app=None, hostname=None, **kwargs): self.app = app or self.app self.hostname = default_nodename(hostname) self.app.loader.init_worker() self.on_before_init(**kwargs) self.setup_defaults(**kwargs) self.on_after_init(**kwargs) self.setup_instance(**self.prepare_args(**kwargs))
def worker_terminate_command(name='dagster', config_yaml=None, all_=False): app = get_app(config_yaml) if all_: app.control.broadcast('shutdown') else: app.control.broadcast( 'shutdown', destination=[host_format(default_nodename(get_worker_name(name)))])
def __init__(self, app=None, hostname=None, **kwargs): self.app = app or self.app self.hostname = default_nodename(hostname) self.app.loader.init_worker() self.on_before_init(**kwargs) self.setup_defaults(**kwargs) self.on_after_init(**kwargs) self.setup_instance(**self.prepare_args(**kwargs)) self._finalize = [ Finalize(self, self._send_worker_shutdown, exitpriority=10), ]
def detach( path, argv, logfile=None, pidfile=None, uid=None, gid=None, umask=None, workdir=None, fake=False, app=None, executable=None, hostname=None, ): """Detach program by argv'.""" hostname = default_nodename(hostname) logfile = node_format(logfile, hostname) pidfile = node_format(pidfile, hostname) fake = 1 if C_FAKEFORK else fake with detached(logfile, pidfile, uid, gid, umask, workdir, fake, after_forkers=False): try: if executable is not None: path = executable os.execv(path, [path] + argv) except Exception: # pylint: disable=broad-except if app is None: from celery import current_app app = current_app app.log.setup_logging_subsystem("ERROR", logfile, hostname=hostname) logger.critical("Can't exec %r", " ".join([path] + argv), exc_info=True) return EX_FAILURE
def detach(path, argv, logfile=None, pidfile=None, uid=None, gid=None, umask=None, working_directory=None, fake=False, app=None, executable=None, hostname=None): hostname = default_nodename(hostname) logfile = node_format(logfile, hostname) pidfile = node_format(pidfile, hostname) fake = 1 if C_FAKEFORK else fake with detached(logfile, pidfile, uid, gid, umask, working_directory, fake, after_forkers=False): try: if executable is not None: path = executable os.execv(path, [path] + argv) except Exception: if app is None: from celery import current_app app = current_app app.log.setup_logging_subsystem('ERROR', logfile, hostname=hostname) logger.critical("Can't exec %r", ' '.join([path] + argv), exc_info=True) return EX_FAILURE
def detach(path, argv, logfile=None, pidfile=None, uid=None, gid=None, umask=None, workdir=None, fake=False, app=None, executable=None, hostname=None): """Detach program by argv'.""" hostname = default_nodename(hostname) logfile = node_format(logfile, hostname) pidfile = node_format(pidfile, hostname) fake = 1 if C_FAKEFORK else fake with detached(logfile, pidfile, uid, gid, umask, workdir, fake, after_forkers=False): try: if executable is not None: path = executable os.execv(path, [path] + argv) except Exception: # pylint: disable=broad-except if app is None: from celery import current_app app = current_app app.log.setup_logging_subsystem( 'ERROR', logfile, hostname=hostname) logger.critical("Can't exec %r", ' '.join([path] + argv), exc_info=True) return EX_FAILURE
def convert(self, value, param, ctx): return host_format(default_nodename(value))
from celery import current_app app = current_app app.log.setup_logging_subsystem('ERROR', logfile, hostname=hostname) logger.critical("Can't exec %r", ' '.join([path] + argv), exc_info=True) return EX_FAILURE @click.command(cls=CeleryDaemonCommand, context_settings={'allow_extra_args': True}) @click.option('-n', '--hostname', default=host_format(default_nodename(None)), cls=CeleryOption, type=HOSTNAME, help_group="Worker Options", help="Set custom hostname (e.g., 'w1@%%h'). " "Expands: %%h (hostname), %%n (name) and %%d, (domain).") @click.option('-D', '--detach', cls=CeleryOption, is_flag=True, default=False, help_group="Worker Options", help="Start worker as a background process.") @click.option( '-S', '--statedb',
def run_submission_task(submission_id, problem_id, manifest, code, language, time_limit, memory_limit): logger.debug(submission_id, problem_id, manifest, code, language, time_limit, memory_limit) result_submission_task.apply_async(args=[ submission_id, Verdict.RUNNING, None, None, { 'node': default_nodename(None) } ], queue='result') if language not in ACCEPT_SUBMISSION_LANGUAGES: logger.warning('request language not valid') result_submission_task.apply_async(args=[ submission_id, Verdict.SYSTEM_ERROR, None, None, { 'error': f'language {language} not support', 'node': default_nodename(None) } ], queue='result') # initialize runner try: validate_manifest(manifest) except TestCaseError as test_case_error: logger.debug(test_case_error) if OJ_ENABLE_SYNC: # sync test cases result_submission_task.apply_async(args=[ submission_id, Verdict.SYNC_TEST_CASES, None, None, { 'node': default_nodename(None) } ], queue='result') logger.debug("sync test cases") try: sync_test_cases(manifest['hash'], problem_id) validate_manifest(manifest) except ManifestError as e: traceback.print_exc() result_submission_task.apply_async(args=[ submission_id, Verdict.SYSTEM_ERROR, None, None, { 'error': str(e), 'node': default_nodename(None) } ], queue='result') return else: # sync test cases disabled, return system error result_submission_task.apply_async(args=[ submission_id, Verdict.SYSTEM_ERROR, None, None, { 'error': str(test_case_error), 'node': default_nodename(None) } ], queue='result') except ManifestError as e: logger.debug(e) result_submission_task.apply_async(args=[ submission_id, Verdict.SYSTEM_ERROR, None, None, { 'error': str(e), 'node': default_nodename(None) } ], queue='result') return try: runner = JudgeRunner(PROBLEM_TEST_CASES_DIR, manifest, time_limit, memory_limit, code, load_submission_config(language), load_spj_config('c')) except Exception as e: result_submission_task.apply_async(args=[ submission_id, Verdict.SYSTEM_ERROR, None, None, { 'error': str(e), 'node': default_nodename(None) } ], queue='result') return # compile code try: res_compile = runner.compile() logger.debug('compile result:' + str(res_compile)) except Exception as e: logger.debug('compile error:' + str(e)) result_submission_task.apply_async(args=[ submission_id, Verdict.COMPILE_ERROR, None, None, { 'error': str(e), 'node': default_nodename(None) } ], queue='result') return # run code try: result = runner.run() verdict = Verdict.ACCEPTED # attention: time spend and memory spend indicated maximum case time spend and maximum case memory spend time_cost = 0 memory_cost = 0 for item in result: # calculate max time spend and memory spend time_cost = max(time_cost, item['cpu_time']) memory_cost = max(memory_cost, item['memory']) if item['result'] != 0: verdict = Verdict.VERDICT_MAPPING[item['result']] break result_submission_task.apply_async(args=[ submission_id, verdict, time_cost, memory_cost, { 'result': result, 'node': default_nodename(None) } ], queue='result') except Exception as e: traceback.print_exc() result_submission_task.apply_async(args=[ submission_id, Verdict.SYSTEM_ERROR, None, None, { 'error': str(e), 'node': default_nodename(None) } ], queue='result') return try: # clean running directory runner.clean() except OSError: pass