def run(self, *args, **kwargs): scheduler = scheduler_manager.SchedulerManager.get_instance() datastore = scheduler.get_datastore() res = [] for job_no, job_info in enumerate(args): job = utils.import_from_path(job_info["class"]) execution_id = utils.generate_uuid() job_id = "{}-{}".format(self.job_id, job_no) datastore.add_execution( execution_id, job_id, constants.EXECUTION_STATUS_SCHEDULED, description="Subtask {}: {}".format( job_no, job.get_scheduled_description()), ) job.run_job(self.job_id, execution_id, *job_info.get("args", [])) exec_state = datastore.get_execution(execution_id) if exec_state[ "state"] == constants.EXECUTION_STATUS_SCHEDULED_ERROR: raise SerialExecutionException( "Job #{} failed with result {}".format( job_no, exec_state["result"])) res.append(json.loads(exec_state["result"])) return res
def run_job(cls, job_class_path, job_id, *args, **kwargs): # An execution ID is created execution_id = utils.generate_uuid() datastore = utils.get_datastore_instance() datastore.add_execution( execution_id, job_id, constants.EXECUTION_STATUS_SCHEDULED, description=job.JobBase.get_scheduled_description()) try: job_class = utils.import_from_path(job_class_path) datastore.update_execution( execution_id, state=constants.EXECUTION_STATUS_SCHEDULED, description=job_class.get_scheduled_description()) cls.run_scheduler_job(job_class, job_id, execution_id, *args, **kwargs) except Exception as e: logger.exception(e) datastore.update_execution( execution_id, state=constants.EXECUTION_STATUS_SCHEDULED_ERROR, description=job.JobBase.get_scheduled_error_description()) return None return execution_id
def add_scheduler_job(self, job_class_string, name, pub_args=None, month=None, day_of_week=None, day=None, hour=None, minute=None, **kwargs): """Add a job. Job infomation will be persistent in postgres. This is a NON-BLOCKING operation, as internally, apscheduler calls wakeup() that is async. :param str job_class_string: String for job class, e.g., myscheduler.jobs.a_job.NiceJob :param str name: String for job name, e.g., Check Melissa job. :param str pub_args: List for arguments passed to publish method of a task. :param str month: String for month cron string, e.g., */10 :param str day_of_week: String for day of week cron string, e.g., 1-6 :param str day: String for day cron string, e.g., */1 :param str hour: String for hour cron string, e.g., */2 :param str minute: String for minute cron string, e.g., */3 :param dict kwargs: Other keyword arguments passed to run_job function. :return: String of job id, e.g., 6bca19736d374ef2b3df23eb278b512e :rtype: str Returns: String of job id, e.g., 6bca19736d374ef2b3df23eb278b512e """ if not pub_args: pub_args = [] job_id = utils.generate_uuid() arguments = [job_class_string, job_id] arguments.extend(pub_args) scheduler_class = utils.import_from_path(settings.SCHEDULER_CLASS) self.add_job(scheduler_class.run_job, 'cron', month=month, day=day, day_of_week=day_of_week, hour=hour, minute=minute, args=arguments, kwargs=kwargs, name=name, id=job_id) return job_id
def run_job(cls, job_class_path, job_id, *args, **kwargs): execution_id = utils.generate_uuid() datastore = utils.get_datastore_instance() datastore.add_execution(execution_id, job_id, constants.EXECUTION_STATUS_SCHEDULED, description=job.JobBase.get_scheduled_description()) try: job_class = utils.import_from_path(job_class_path) datastore.update_execution(execution_id, state=constants.EXECUTION_STATUS_SCHEDULED, description=job_class.get_scheduled_description()) cls.run_scheduler_job(job_class, job_id, execution_id, *args, **kwargs) except Exception as e: logger.exception(e) datastore.update_execution(execution_id, state=constants.EXECUTION_STATUS_SCHEDULED_ERROR, description=job.JobBase.get_scheduled_error_description()) return None return execution_id
def __init__(self): JOB_STORES = { 'default': utils.get_datastore_instance() } JOB_DEFAULT = { 'coalesce': settings.JOB_COALESCE, 'misfire_grace_time': settings.JOB_MISFIRE_GRACE_SEC, 'max_instances': settings.JOB_MAX_INSTANCES } EXECUTORS = { 'default': pool.ThreadPoolExecutor(settings.THREAD_POOL_SIZE) } scheduler_class = utils.import_from_path(settings.SCHEDULER_CLASS) self.sched = scheduler_class( jobstores=JOB_STORES, executors=EXECUTORS, job_defaults=JOB_DEFAULT, logger=logger, timezone=settings.TIMEZONE)
def __init__(self): JOB_STORES = {'default': utils.get_datastore_instance()} JOB_DEFAULT = { 'coalesce': settings.JOB_COALESCE, 'misfire_grace_time': settings.JOB_MISFIRE_GRACE_SEC, 'max_instances': settings.JOB_MAX_INSTANCES } EXECUTORS = { 'default': pool.ThreadPoolExecutor(settings.THREAD_POOL_SIZE) } scheduler_class = utils.import_from_path(settings.SCHEDULER_CLASS) self.sched = scheduler_class(jobstores=JOB_STORES, executors=EXECUTORS, job_defaults=JOB_DEFAULT, logger=logger, timezone=settings.TIMEZONE)
def test_class_import_from_path(self): path = 'ndscheduler.default_settings_test' module = utils.import_from_path(path) self.assertEqual(module.DEBUG, True)