Exemplo n.º 1
0
    def _reconstitute_job(self, job_state):
        schedule = job_state['schedule']
        schedule.pop('coalesce', None)
        next_run_time = job_state['next_run_time']

        if next_run_time == 1:
            # This is hacky. We need to subtract more than value of misfire_grace_time
            # so that the job will be missed right after loading it for the first time
            # after doing fresh install instead of being executed.
            next_run_time = int(time.time() - 1200)

        job = Job(
            id=job_state['id'],
            func="__main__:job",
            trigger=CronTrigger(**schedule),
            name=job_state['name'],
            args=[job_state['task']] + job_state['args'],
            scheduler=self._scheduler,
            executor='default',
            next_run_time=utc_timestamp_to_datetime(next_run_time),
            kwargs={
                'id': job_state['id'],
                'name': job_state['name'],
                'hidden': job_state.get('hidden', False),
                'protected': job_state.get('protected', False)
            }
        )

        job.coalesce = True
        job.max_instances = 1
        job.misfire_grace_time = 600
        job._jobstore_alias = self._alias
        return job
Exemplo n.º 2
0
    def _reconstitute_job(self, job_state):
        schedule = job_state['schedule']
        schedule.pop('coalesce', None)
        job = Job(
            id=job_state['id'],
            func="__main__:job",
            trigger=CronTrigger(**schedule),
            name=job_state['name'],
            args=[job_state['task']] + job_state['args'],
            scheduler=self._scheduler,
            executor='default',
            next_run_time=utc_timestamp_to_datetime(job_state['next_run_time']),
            kwargs={
                'id': job_state['id'],
                'name': job_state['name'],
                'hidden': job_state.get('hidden', False),
                'protected': job_state.get('protected', False)
            }
        )

        job.coalesce = True
        job.max_instances = 1
        job.misfire_grace_time = 0
        job._jobstore_alias = self._alias
        return job
Exemplo n.º 3
0
def _update_scheduler_status(scheduler, config):
    now = datetime.now()
    working_hours = parse_working_hours(config)
    jobs = scheduler.get_jobs()
    work = False
    for start, end in working_hours:
        if start <= (now.hour * 60 + now.minute) <= end:
            work = True
    if not work:
        for j in jobs:
            if j.name == 'partial' and \
               j.func.func.__name__ in excluded_job_names:
                continue
            j.pause()
    else:
        # slack post message limit
        for job_id, times_ in g.items():
            if times_ > config.max_alert - 1:
                job = Job(scheduler, job_id)
                job.pause()
                stoped[job_id] = (job, now)
                g[job_id] = 0

        for job_id in list(stoped):
            job, time_ = stoped[job_id]
            if time_ + timedelta(minutes=config.pause_time) <= now:
                job.resume()
                del stoped[job_id]
        for j in jobs:
            if j.name == 'partial' and \
               j.func.func.__name__ in excluded_job_names:
                continue
            if j.id not in stoped:
                j.resume()
    async def _remove_and_silence_alarm(self, alarm_job: Job, reminder_text: str):
        # We found a reminder with an alarm. Remove it from the dict of current
        # alarms
        ALARMS.pop((self.room.room_id, reminder_text.upper()), None)

        if SCHEDULER.get_job(alarm_job.id):
            # Silence the alarm job
            alarm_job.remove()
Exemplo n.º 5
0
 def __init__(self, *args, **kwargs):
     if len(args) > 1 and hasattr(args[1], '__call__') or 'func' in kwargs:
         models.Model.__init__(self)
         Job.__init__(self, *args, **kwargs)
     else:
         models.Model.__init__(self, *args, **kwargs)
         self.instances = 0
         self._lock = Lock()
         self.func = ref_to_obj(self.func_ref)
Exemplo n.º 6
0
 def __init__(self, *args, **kwargs):
     if len(args) > 1 and hasattr(args[1], '__call__') or 'func' in kwargs:
         models.Model.__init__(self)
         Job.__init__(self, *args, **kwargs)
     else:
         models.Model.__init__(self, *args, **kwargs)
         self.instances = 0
         self._lock = Lock()
         self.func = ref_to_obj(self.func_ref)
Exemplo n.º 7
0
 def add_job(self, key, jobname, trigger, func, args, kwargs, jobstore='default',
             **options): 
     job = Job(trigger, func, args or [], kwargs or {}, 
                 options.pop('misfire_grace_time', self.misfire_grace_time),
                 options.pop('coalesce', self.coalesce), name=jobname,  **options)
     job.key = key
     
     if not self.running:
         self._pending_jobs.append((job, jobstore))
     else:
         self._real_add_job(job, jobstore, True)
     return job
Exemplo n.º 8
0
def construct_job(job: Job, scheduler, alias="beer_garden"):
    """Convert a Beergarden job to an APScheduler one."""
    if job is None:
        return None

    trigger = construct_trigger(job.trigger_type, job.trigger)
    next_run_time = utc.localize(job.next_run_time) if job.next_run_time else None

    ap_job = APJob.__new__(APJob)
    ap_job._scheduler = scheduler
    ap_job._jobstore_alias = alias
    ap_job.__setstate__(
        {
            "id": job.id,
            "func": "beer_garden.scheduler:run_job",
            "trigger": trigger,
            "executor": "default",
            "args": (),
            "kwargs": {"request_template": job.request_template, "job_id": job.id},
            "name": job.name,
            "misfire_grace_time": job.misfire_grace_time,
            "coalesce": job.coalesce,
            "max_instances": job.max_instances,
            "next_run_time": next_run_time,
        }
    )

    return ap_job
Exemplo n.º 9
0
    def add_job(self,
                trigger,
                func,
                args,
                kwargs,
                jobstore='default',
                **options):
        """
        Adds the given job to the job list and notifies the scheduler thread.
        Any extra keyword arguments are passed along to the constructor of the
        :class:`~apscheduler.job.Job` class (see :ref:`job_options`).

        :param trigger: trigger that determines when ``func`` is called
        :param func: callable to run at the given time
        :param args: list of positional arguments to call func with
        :param kwargs: dict of keyword arguments to call func with
        :param jobstore: alias of the job store to store the job in
        :rtype: :class:`~apscheduler.job.Job`
        """
        job = Job(trigger, func, args or [], kwargs or {},
                  options.pop('misfire_grace_time', self.misfire_grace_time),
                  options.pop('coalesce', self.coalesce), **options)
        if not self.running:
            self._pending_jobs.append((job, jobstore))
            logger.info('Adding job tentatively -- it will be properly '
                        'scheduled when the scheduler starts')
        else:
            self._real_add_job(job, jobstore, True)
        return job
Exemplo n.º 10
0
 def _reconstitute_job(self, job_state):
     job_state = pickle.loads(job_state)
     job = Job.__new__(Job)
     job.__setstate__(job_state)
     job._scheduler = self._scheduler
     job._jobstore_alias = self._alias
     return job
def _modify_job(background_job: Job,
                job_modification: Dict[Text, Union[bool, Dict, Set]]) -> None:
    changes = {}
    job_id = background_job.id
    run_immediately = job_modification.pop("run_immediately", False)

    if run_immediately:
        changes["next_run_time"] = datetime.now()
        logger.debug(f"Running job with id '{job_id}' immediately.")

    # Set keyword arguments to call scheduled job function with
    changes["kwargs"] = _get_merged_job_kwargs(background_job,
                                               job_modification)

    background_job.modify(**changes)
    logger.debug(f"Modifying job with id '{background_job.id}'.")
Exemplo n.º 12
0
 def _reconstitute_job(self, job_state):
     job_state = pickle.loads(job_state)
     job = Job.__new__(Job)
     job.__setstate__(job_state)
     job._scheduler = self._scheduler
     job._jobstore_alias = self._alias
     return job
Exemplo n.º 13
0
    def add(self):

        job_id = self._get_job_id()
        print(job_id)

        pid = self.pid
        if pid is not None:
            # if scheduler is running, stop it.
            self.stop()

        SCHEDULER.add_job(
            Job(
                get_forecast,
                # trigger=None,
                # args=None,
                # kwargs=None,
                id=job_id,
                # name=None,
                # misfire_grace_time=undefined,
                # coalesce=undefined,
                # max_instances=undefined,
                # next_run_time=undefined,
                # jobstore='default',
                # executor='default',
                # replace_existing=False,
                # **trigger_args
            ))

        if pid is not None:
            self.start()  # fork and exit parent
Exemplo n.º 14
0
def db_to_scheduler(document, scheduler, alias="beer_garden"):
    """Convert a database job to a scheduler's job."""
    job = APJob.__new__(APJob)
    if document.next_run_time:
        next_run_time = utc.localize(document.next_run_time)
    else:
        next_run_time = None
    state = {
        "id": document.id,
        "func": "brew_view.scheduler:run_job",
        "trigger": construct_trigger(document.trigger_type, document.trigger),
        "executor": "default",
        "args": (),
        "kwargs": {
            "request_template": document.request_template,
            "job_id": str(document.id),
        },
        "name": document.name,
        "misfire_grace_time": document.misfire_grace_time,
        "coalesce": document.coalesce,
        "max_instances": document.max_instances,
        "next_run_time": next_run_time,
    }
    job.__setstate__(state)
    job._scheduler = scheduler
    job._jobstore_alias = alias
    return job
Exemplo n.º 15
0
    def add_job(self,
                trigger,
                func,
                args,
                kwargs,
                jobstore='default',
                **options):
        """
        Adds the given job to the job list and notifies the scheduler thread.

        The ``func`` argument can be given either as a callable object or a textual reference in the
        ``package.module:some.object`` format, where the first half (separated by ``:``) is an importable module and the
        second half is a reference to the callable object, relative to the module.

        Any extra keyword arguments are passed along to the constructor of the :class:`~apscheduler.job.Job` class
        (see :ref:`job_options`).

        :param trigger: trigger that determines when ``func`` is called
        :param func: callable (or a textual reference to one) to run at the given time
        :param args: list of positional arguments to call func with
        :param kwargs: dict of keyword arguments to call func with
        :param jobstore: alias of the job store to store the job in
        :rtype: :class:`~apscheduler.job.Job`
        """
        job = Job(trigger, func, args or [], kwargs or {},
                  options.pop('misfire_grace_time', self.misfire_grace_time),
                  options.pop('coalesce', self.coalesce), **options)
        if not self.running:
            self._pending_jobs.append((job, jobstore))
            logger.info(
                'Adding job tentatively -- it will be properly scheduled when the scheduler starts'
            )
        else:
            self._real_add_job(job, jobstore, True)
        return job
Exemplo n.º 16
0
def db_to_scheduler(document, scheduler, alias='beer_garden'):
    """Convert a database job to a scheduler's job."""
    job = APJob.__new__(APJob)
    if document.next_run_time:
        next_run_time = utc.localize(document.next_run_time)
    else:
        next_run_time = None
    state = {
        'id': document.id,
        'func': 'brew_view.scheduler.runner:run_job',
        'trigger': construct_trigger(document.trigger_type, document.trigger),
        'executor': 'default',
        'args': (),
        'kwargs': {
            'request_template': document.request_template,
            'job_id': str(document.id),
        },
        'name': document.name,
        'misfire_grace_time': document.misfire_grace_time,
        'coalesce': document.coalesce,
        'max_instances': 3,
        'next_run_time': next_run_time,
    }
    job.__setstate__(state)
    job._scheduler = scheduler
    job._jobstore_alias = alias
    return job
Exemplo n.º 17
0
    def _encode_job(self, job: Job):
        job_state = job.__getstate__()
        job_state["kwargs"]["config"] = None
        job_state["kwargs"]["bot"] = None
        # new_kwargs = job_state["kwargs"]
        # new_kwargs["config"] = None
        # new_kwargs["bot"] = None
        # job_state["kwargs"] = new_kwargs
        encoded = base64.b64encode(
            pickle.dumps(job_state, self.pickle_protocol))
        out = {
            "_id": job.id,
            "next_run_time": datetime_to_utc_timestamp(job.next_run_time),
            "job_state": encoded.decode("ascii"),
        }
        job_state["kwargs"]["config"] = self.config
        job_state["kwargs"]["bot"] = self.bot
        # new_kwargs = job_state["kwargs"]
        # new_kwargs["config"] = self.config
        # new_kwargs["bot"] = self.bot
        # job_state["kwargs"] = new_kwargs
        # log.debug(f"Encoding job id: {job.id}\n"
        #           f"Encoded as: {out}")

        return out
Exemplo n.º 18
0
    async def _decode_job(self, in_job):
        if in_job is None:
            return None
        job_state = in_job["job_state"]
        job_state = pickle.loads(base64.b64decode(job_state))
        if job_state["args"]:  # Backwards compatibility on args to kwargs
            job_state["kwargs"] = {**job_state["args"][0]}
            job_state["args"] = []
        job_state["kwargs"]["config"] = self.config
        job_state["kwargs"]["bot"] = self.bot
        # new_kwargs = job_state["kwargs"]
        # new_kwargs["config"] = self.config
        # new_kwargs["bot"] = self.bot
        # job_state["kwargs"] = new_kwargs
        job = Job.__new__(Job)
        job.__setstate__(job_state)
        job._scheduler = self._scheduler
        job._jobstore_alias = self._alias
        # task_name, guild_id = _disassemble_job_id(job.id)
        # task = Task(task_name, guild_id, self.config)
        # await task.load_from_config()
        # save_task_objects.append(task)
        #
        # job.func = task.execute

        # log.debug(f"Decoded job id: {job.id}\n"
        #           f"Decoded as {job_state}")

        return job
Exemplo n.º 19
0
    def test_adding_a_jobstore_adds_all_jobs_in_it(self, mock_notify_jobstore_event, mock_notify_job_event, _):
        watcher = SchedulerWatcher(self.scheduler)

        jobstore = MemoryJobStore()

        jobstore.add_job(Job(scheduler=self.scheduler, id='job_1', next_run_time=datetime.now() + timedelta(days=1)))
        jobstore.add_job(Job(scheduler=self.scheduler, id='job_2', next_run_time=datetime.now() + timedelta(days=2)))

        self.assertEqual(0, len(watcher.jobs))

        self.scheduler.add_jobstore(jobstore, alias='in_memory_2')

        self.assertIn('in_memory_2', watcher.jobstores, 'Watcher should have the new jobstore tracked')
        self.assertEqual(2, len(watcher.jobs), 'Watcher should add all jobs in the newly added jobstore')
        self.assertTrue(all([job_id in watcher.jobs for job_id in ['job_1', 'job_2']]))
        self.assertEqual(2, mock_notify_job_event.call_count)
        mock_notify_jobstore_event.assert_called_once()
Exemplo n.º 20
0
 def create(**kwargs):
     kwargs.setdefault("scheduler", Mock(BaseScheduler, timezone=timezone))
     job_kwargs = job_defaults.copy()
     job_kwargs.update(kwargs)
     job_kwargs["trigger"] = BlockingScheduler()._create_trigger(
         job_kwargs.pop("trigger"), job_kwargs.pop("trigger_args"))
     job_kwargs.setdefault("next_run_time", None)
     return Job(**job_kwargs)
Exemplo n.º 21
0
    def test_one_job_fails_to_load(self):
        global dummy_job2, dummy_job_temp
        job1 = Job(self.trigger, dummy_job, [], {}, 1, False)
        job2 = Job(self.trigger, dummy_job2, [], {}, 1, False)
        job3 = Job(self.trigger, dummy_job3, [], {}, 1, False)
        for job in job1, job2, job3:
            job.next_run_time = self.trigger_date
            self.jobstore.add_job(job)

        dummy_job_temp = dummy_job2
        del dummy_job2
        try:
            self.jobstore.load_jobs()
            eq_(len(self.jobstore.jobs), 2)
        finally:
            dummy_job2 = dummy_job_temp
            del dummy_job_temp
Exemplo n.º 22
0
 def _reconstitute_job(self, job_state):
     from apscheduler.job import Job
     job_state['jobstore'] = self
     job = Job.__new__(Job)
     job.__setstate__(job_state)
     job._scheduler = self._scheduler
     job._jobstore_alias = self._alias
     return job
Exemplo n.º 23
0
 def create(**kwargs):
     kwargs.setdefault('scheduler', Mock(BaseScheduler, timezone=timezone))
     job_kwargs = job_defaults.copy()
     job_kwargs.update(kwargs)
     job_kwargs['trigger'] = BlockingScheduler()._create_trigger(job_kwargs.pop('trigger'),
                                                                 job_kwargs.pop('trigger_args'))
     job_kwargs.setdefault('next_run_time', None)
     return Job(**job_kwargs)
 def _reconstitute_job(self, job_state):
     job_state = pickle.loads(job_state)
     job_state['jobstore'] = self
     job = Job.__new__(Job)
     job.__setstate__(job_state)
     job._scheduler = self._scheduler  # pylint: disable=protected-access
     job._jobstore_alias = self._alias  # pylint: disable=protected-access
     return job
Exemplo n.º 25
0
    def init(self):
        tz = self.store.timezone
        trigger = IntervalTrigger({}, minutes=5, timezone=tz)
        url     = 'http://t.cn'
        now = datetime.now(tz)

        self.job1 = Job(1, trigger, url)
        self.job1.compute_next_run_time(now)
        self.job2 = Job(2, trigger, url)
        self.job2.compute_next_run_time(now)

        self.store.add_job(self.job1)
        self.store.add_job(self.job2)


        self.now = now
        self.url = url
        self.trigger = trigger
Exemplo n.º 26
0
    def test_jobs_equal(self):
        assert self.job == self.job

        job2 = Job(SimpleTrigger(self.RUNTIME), lambda: None, [], {}, 1, False)
        assert self.job != job2

        job2.id = self.job.id = 123
        eq_(self.job, job2)

        assert self.job != 'bleh'
Exemplo n.º 27
0
 def add_job(self, job: AppSchedulerJob):
     with transaction.atomic():
         try:
             return DjangoJob.objects.create(
                 id=job.id,
                 next_run_time=get_django_internal_datetime(job.next_run_time),
                 job_state=pickle.dumps(job.__getstate__(), self.pickle_protocol),
             )
         except IntegrityError:
             raise ConflictingIdError(job.id)
Exemplo n.º 28
0
    def _reconstitute_job(self, row):
        '''
            code gen by shell cmd: cat a | awk -F '=' '{print $1}' | cut -c5- | awk '{ print "job."$1" = row."$1}'
            what in file a is the wm_jobs_t create statement which can be found in the current source code file
        '''

        conf = JobConf()
        conf.id = row.id
        conf.cmd = row.cmd
        conf.cron_str = row.cron_str
        conf.name = row.name
        conf.desc = row.desc
        conf.mails = row.mails
        conf.phones = row.phones
        conf.team = row.team
        conf.owner = row.owner
        conf.hosts = row.hosts
        conf.host_strategy = row.host_strategy
        conf.restore_strategy = row.restore_strategy
        conf.retry_strategy = row.retry_strategy
        conf.error_strategy = row.error_strategy
        conf.exist_strategy = row.exist_strategy
        conf.running_timeout_s = row.running_timeout_s
        conf.status = row.status
        conf.modify_time = row.modify_time
        conf.modify_user = row.modify_user
        conf.create_time = row.create_time
        conf.create_user = row.create_user
        conf.start_date = row.start_date
        conf.end_date = row.end_date
        conf.oupput_match_reg = row.oupput_match_reg
        conf.next_run_time = row.next_run_time

        job = Job.__new__(Job)
        job.conf = conf
        job.id = job.conf.id
        job._scheduler = self._scheduler
        job._jobstore_alias = self._alias
        job.trigger = self._create_trigger_by_conf(job)
        t = apscheduler.util.local_timestamp_to_datetime(
            conf.next_run_time) if conf.next_run_time > 0 else None
        t = apscheduler.util.convert_to_ware_datetime(t, get_localzone(),
                                                      'conf.next_run_time')
        state = {
            'version': 1,
            'conf': conf,
            'id': conf.id,
            'name': conf.name,
            'next_run_time': t,
        }

        job.__setstate__(state)

        return job
Exemplo n.º 29
0
 def _prepare_job(job: APSJob) -> APSJob:
     """
     Erase all unpickable data from telegram.ext.Job
     Args:
         job (:obj:`apscheduler.job`): The job to be processed.
     """
     # make new job which is copy of actual job cause
     # modifying actual job also modifies jobs in threadpool
     # executor which are currently running/going to run and
     # we'll get incorrect argument instead of CallbackContext.
     prepped_job = APSJob.__new__(APSJob)
     prepped_job.__setstate__(job.__getstate__())
     # remove CallbackContext from job args since
     # it includes refrences to dispatcher which
     # is unpickleable. we'll recreate CallbackContext
     # in _reconstitute_job method.
     if isinstance(job.args[0], CallbackContext):
         tg_job = job.args[0].job
         # APScheduler stores args as tuple.
         prepped_job.args = (tg_job.name, tg_job.context)
     return prepped_job
Exemplo n.º 30
0
 def load_jobs(self):
     jobs = []
     for row in self.engine.execute(select([self.jobs_t])):
         try:
             job = Job.__new__(Job)
             job_dict = dict(row.items())
             job.__setstate__(job_dict)
             jobs.append(job)
         except Exception:
             job_name = job_dict.get("name", "(unknown)")
             logger.exception('Unable to restore job "%s"', job_name)
     self.jobs = jobs
Exemplo n.º 31
0
    def load_jobs(self):
        jobs = []
        for job_dict in itervalues(self.store):
            try:
                job = Job.__new__(Job)
                job.__setstate__(job_dict)
                jobs.append(job)
            except Exception:
                job_name = job_dict.get('name', '(unknown)')
                logger.exception('Unable to restore job "%s"', job_name)

        self.jobs = jobs
Exemplo n.º 32
0
    def load_jobs(self):
        jobs = []
        for job_dict in itervalues(self.store):
            try:
                job = Job.__new__(Job)
                job.__setstate__(job_dict)
                jobs.append(job)
            except Exception:
                job_name = job_dict.get('name', '(unknown)')
                logger.exception('Unable to restore job "%s"', job_name)

        self.jobs = jobs
Exemplo n.º 33
0
def test_constructor(job_id):
    with patch('apscheduler.job.Job._modify') as _modify:
        scheduler_mock = MagicMock(BaseScheduler)
        job = Job(scheduler_mock, id=job_id)
        assert job._scheduler is scheduler_mock
        assert job._jobstore_alias is None

        modify_kwargs = _modify.call_args[1]
        if job_id is None:
            assert len(modify_kwargs['id']) == 32
        else:
            assert modify_kwargs['id'] == job_id
Exemplo n.º 34
0
 def load_jobs(self):
     jobs = []
     for row in self.engine.execute(select([self.jobs_t])):
         try:
             job = Job.__new__(Job)
             job_dict = dict(row.items())
             job.__setstate__(job_dict)
             jobs.append(job)
         except Exception:
             job_name = job_dict.get('name', '(unknown)')
             logger.exception('Unable to restore job "%s"', job_name)
     self.jobs = jobs
 def _reconstitute_job(self, job_state):
     try:
         job_state = pickle.loads(job_state)
     except UnicodeDecodeError:
         # Unpickle py2 objects
         job_state = pickle.loads(job_state, encoding='latin1')
     job_state['jobstore'] = self
     job = Job.__new__(Job)
     job.__setstate__(job_state)
     job._scheduler = self._scheduler
     job._jobstore_alias = self._alias
     return job
Exemplo n.º 36
0
    def _reconstitute_job(self, job_state):
        schedule = job_state['schedule']
        schedule.pop('coalesce', None)
        next_run_time = job_state['next_run_time']

        if next_run_time == 1:
            # This is hacky. We need to subtract more than value of misfire_grace_time
            # so that the job will be missed right after loading it for the first time
            # after doing fresh install instead of being executed.
            next_run_time = int(time.time() - 1200)

        job = Job(id=job_state['id'],
                  func="__main__:job",
                  trigger=CronTrigger(**schedule),
                  name=job_state['name'],
                  args=[job_state['task']] + job_state['args'],
                  scheduler=self._scheduler,
                  executor='default',
                  next_run_time=utc_timestamp_to_datetime(next_run_time),
                  kwargs={
                      'id': job_state['id'],
                      'name': job_state['name'],
                      'hidden': job_state.get('hidden', False),
                      'protected': job_state.get('protected', False)
                  })

        job.coalesce = True
        job.max_instances = 1
        job.misfire_grace_time = 600
        job._jobstore_alias = self._alias
        return job
Exemplo n.º 37
0
 def _reconstitute_job(self, row):
     '''
         code gen by shell cmd: cat a | awk -F '=' '{print $1}' | cut -c5- | awk '{ print "job."$1" = row."$1}'
         what in file a is the wm_jobs_t create statement which can be found in the current source code file
     '''
             
     conf = JobConf()
     conf.id = row.id
     conf.cmd = row.cmd
     conf.cron_str = row.cron_str
     conf.name = row.name
     conf.desc = row.desc
     conf.mails = row.mails
     conf.phones = row.phones
     conf.team = row.team
     conf.owner = row.owner
     conf.hosts = row.hosts
     conf.host_strategy = row.host_strategy
     conf.restore_strategy = row.restore_strategy
     conf.retry_strategy = row.retry_strategy
     conf.error_strategy = row.error_strategy
     conf.exist_strategy = row.exist_strategy
     conf.running_timeout_s = row.running_timeout_s
     conf.status = row.status
     conf.modify_time = row.modify_time
     conf.modify_user = row.modify_user
     conf.create_time = row.create_time
     conf.create_user = row.create_user
     conf.start_date = row.start_date
     conf.end_date = row.end_date
     conf.oupput_match_reg = row.oupput_match_reg 
     conf.next_run_time = row.next_run_time 
     
     job = Job.__new__(Job)
     job.conf = conf
     job.id = job.conf.id
     job._scheduler = self._scheduler
     job._jobstore_alias = self._alias
     job.trigger = self._create_trigger_by_conf(job) 
     t = apscheduler.util.local_timestamp_to_datetime(conf.next_run_time) if conf.next_run_time > 0 else None
     t = apscheduler.util.convert_to_ware_datetime(t, get_localzone(), 'conf.next_run_time' )
     state = {
          'version': 1,
          'conf': conf, 
          'id': conf.id, 
          'name': conf.name, 
          'next_run_time': t, 
     }
     
     job.__setstate__(state)
     
     return job
Exemplo n.º 38
0
    def test_real_add_job(self, scheduler, job_exists, replace_existing,
                          wakeup):
        job = Job(scheduler,
                  id='foo',
                  func=lambda: None,
                  args=(),
                  kwargs={},
                  next_run_time=None)
        jobstore = MagicMock(
            BaseJobStore,
            _alias='bar',
            add_job=MagicMock(
                side_effect=ConflictingIdError('foo') if job_exists else None))
        scheduler.wakeup = MagicMock()
        scheduler._job_defaults = {
            'misfire_grace_time': 3,
            'coalesce': False,
            'max_instances': 6
        }
        scheduler._dispatch_event = MagicMock()
        scheduler._jobstores = {'bar': jobstore}

        # Expect and exception if the job already exists and we're not trying to replace it
        if job_exists and not replace_existing:
            pytest.raises(ConflictingIdError, scheduler._real_add_job, job,
                          'bar', replace_existing, wakeup)
            return

        scheduler._real_add_job(job, 'bar', replace_existing, wakeup)

        # Check that the undefined values were replaced with scheduler defaults
        assert job.misfire_grace_time == 3
        assert job.coalesce is False
        assert job.max_instances == 6
        assert job.next_run_time is None

        if job_exists:
            jobstore.update_job.assert_called_once_with(job)
        else:
            assert not jobstore.update_job.called

        if wakeup:
            scheduler.wakeup.assert_called_once_with()
        else:
            assert not scheduler.wakeup.called

        assert job._jobstore_alias == 'bar'

        assert scheduler._dispatch_event.call_count == 1
        event = scheduler._dispatch_event.call_args[0][0]
        assert event.code == EVENT_JOB_ADDED
        assert event.job_id == 'foo'
Exemplo n.º 39
0
 def _db_to_job(self, row):
     if row['trigger_type'] == 'date':
         trigger = DateTrigger(run_date=row['run_date'])
     if row['trigger_type'] == 'cron':
         keys = row['crontab'].split(',')[1]
         values = row['crontab'].split(',')[0].split(' ')
         cronMapRev = {v: k for k, v in cronMap.items()}
         crontab = {cronMapRev[k]: values[i] for i, k in enumerate(keys)}
         trigger = CronTrigger(**crontab)
     if row['trigger_type'] == 'interval':
         trigger = IntervalTrigger(seconds=row['interval'])
     job = Job.__new__(Job)
     job.__setstate__({
         'id':
         row['id'],
         'name':
         row['name'],
         'func':
         row['func'],
         'args':
         json.loads(row['args']) if row['args'] else [],
         'kwargs':
         json.loads(row['kwargs']) if row['kwargs'] else {},
         'version':
         1,
         'trigger':
         trigger,
         'executor':
         row['executor'],
         'start_date':
         row['start_date'],
         'end_date':
         row['end_date'],
         'next_run_time':
         utc_timestamp_to_datetime(row['next_run_time'].timestamp()),
         'coalesce':
         row['coalesce'],
         'misfire_grace_time':
         row['misfire_grace_time'],
         'max_instances':
         row['max_instances'],
         'jobstore':
         self,
     })
     job._scheduler = self._scheduler
     job._jobstore_alias = self._alias
     print(job._scheduler)
     print(job._jobstore_alias)
     return job
Exemplo n.º 40
0
 def load_jobs(self):
     jobs = []
     for job_dict in self.collection.find():
         try:
             job = Job.__new__(Job)
             job_dict['id'] = job_dict.pop('_id')
             job_dict['trigger'] = pickle.loads(job_dict['trigger'])
             job_dict['args'] = pickle.loads(job_dict['args'])
             job_dict['kwargs'] = pickle.loads(job_dict['kwargs'])
             job.__setstate__(job_dict)
             jobs.append(job)
         except Exception:
             job_name = job_dict.get('name', '(unknown)')
             logger.exception('Unable to restore job "%s"', job_name)
     self.jobs = jobs
Exemplo n.º 41
0
 def load_jobs(self):
     jobs = []
     for row in self.engine.execute(self.jobs_t.select()):
         try:
             job = Job.__new__(Job)
             job_dict = dict(row.items())
             job.__setstate__(job_dict)
             jobs.append(job)
         except Exception as e:
             print e
             traceback.print_exc(e)
             job_name = job_dict.get('name', '(unknown)')
             logger.exception('Unable to restore job "%s"', job_name)
     self.jobs = jobs
     return jobs
Exemplo n.º 42
0
    def get_job(self, id):
        select = self.jobs_t.select().where(self.jobs_t.c.id == id)
        try:
            row = self.engine.execute(select).fetchone()
        except Exception as e:
            #todo
            logger.exception(e)

        if row:
            try:
                job = Job.__new__(Job)
                job_dict = dict(row.items())
                job.__setstate__(job_dict)
                return job
            except Exception:
                job_name = job_dict.get('name', 'unknown')
                logger.exception("Unable to restore job '%s'", job_name)

        return None
Exemplo n.º 43
0
 def pop(self, now):
     item = self.redis.rpop(self.key)
     if item is None:
         return 0, '', None
     try:
         job_id, change_type, job_str = item.split('||')
         job_id = int(job_id)
         if job_str == 'None':
             job = None
         else:
             job_state = pickle.loads(job_str)
             job = Job.__new__(Job)
             job.__setstate__(job_state)
             job.compute_next_run_time(now)
         return job_id, change_type, job
     except:
         logger=logging.getLogger('cron.backend')
         logger.exception('sync item invalid')
         return 0, ''
Exemplo n.º 44
0
	def load_jobs(self):
		#continue standart execution
		jobs = []
		for job_dict in self.collection.find({'crecord_type': 'schedule'}):
			try:
				job = Job.__new__(Job)
				
				if job_dict['aaa_owner'] != 'root':
					if job_dict['kwargs']['task'] != 'task_reporting':
						raise ValueError("User %s isn\'t allow to run task %s" % (job_dict['aaa_owner'],job_dict['kwargs']['task']))
				
				#keep memory of id
				job_dict_id = job_dict['_id']
				
				job_dict['id'] = job_dict.pop('_id')
				
				if job_dict.has_key('runs'):
					job_dict['runs'] = job_dict['runs']
				else:
					job_dict['runs'] = 0
				
				job_dict['coalesce'] = False
				
				#try to get interval
				try:
					if job_dict['interval'] != None:
						job_dict['trigger'] = IntervalTrigger(timedelta(**job_dict['interval']))
				except Exception, err:
					pass
				
				#try to get simple
				try:
					if job_dict['date'] != None:
						job_dict['trigger'] = SimpleTrigger( datetime(*job_dict['date']))
				except Exception, err:
					pass
				
				#try to get crontab
				try:
					if job_dict['cron'] != None:
						job_dict['trigger'] = CronTrigger(**job_dict['cron'])
				except Exception, err:
					pass
Exemplo n.º 45
0
    def load_jobs(self):
        jobs = []
        keys = self.redis.keys(self.key_prefix + "*")
        pipeline = self.redis.pipeline()
        for key in keys:
            pipeline.hgetall(key)
        results = pipeline.execute()

        for job_dict in results:
            job_state = {}
            try:
                job = Job.__new__(Job)
                job_state = pickle.loads(job_dict["job_state".encode()])
                job_state["runs"] = long(job_dict["runs".encode()])
                dateval = job_dict["next_run_time".encode()].decode()
                job_state["next_run_time"] = datetime.strptime(dateval, "%Y-%m-%dT%H:%M:%S")
                job.__setstate__(job_state)
                jobs.append(job)
            except Exception:
                job_name = job_state.get("name", "(unknown)")
                logger.exception('Unable to restore job "%s"', job_name)
        self.jobs = jobs
Exemplo n.º 46
0
    def add_job(self, func, trigger=None, args=None, kwargs=None, id=None, name=None, misfire_grace_time=undefined,
                coalesce=undefined, max_instances=undefined, next_run_time=undefined, jobstore='default',
                executor='default', replace_existing=False, conf=None, **trigger_args):
        """
        add_job(func, trigger=None, args=None, kwargs=None, id=None, name=None, misfire_grace_time=undefined, \
            coalesce=undefined, max_instances=undefined, next_run_time=undefined, jobstore='default', \
            executor='default', replace_existing=False, **trigger_args)

        Adds the given job to the job list and wakes up the scheduler if it's already running.

        Any option that defaults to ``undefined`` will be replaced with the corresponding default value when the job is
        scheduled (which happens when the scheduler is started, or immediately if the scheduler is already running).

        The ``func`` argument can be given either as a callable object or a textual reference in the
        ``package.module:some.object`` format, where the first half (separated by ``:``) is an importable module and the
        second half is a reference to the callable object, relative to the module.

        The ``trigger`` argument can either be:
          #. the alias name of the trigger (e.g. ``date``, ``interval`` or ``cron``), in which case any extra keyword
             arguments to this method are passed on to the trigger's constructor
          #. an instance of a trigger class

        :param func: callable (or a textual reference to one) to run at the given time
        :param str|apscheduler.triggers.base.BaseTrigger trigger: trigger that determines when ``func`` is called
        :param list|tuple args: list of positional arguments to call func with
        :param dict kwargs: dict of keyword arguments to call func with
        :param str|unicode id: explicit identifier for the job (for modifying it later)
        :param str|unicode name: textual description of the job
        :param int misfire_grace_time: seconds after the designated run time that the job is still allowed to be run
        :param bool coalesce: run once instead of many times if the scheduler determines that the job should be run more
                              than once in succession
        :param int max_instances: maximum number of concurrently running instances allowed for this job
        :param datetime next_run_time: when to first run the job, regardless of the trigger (pass ``None`` to add the
                                       job as paused)
        :param str|unicode jobstore: alias of the job store to store the job in
        :param str|unicode executor: alias of the executor to run the job with
        :param bool replace_existing: ``True`` to replace an existing job with the same ``id`` (but retain the
                                      number of runs from the existing one)
        :rtype: Job
        """
        job_kwargs = {
            'trigger': trigger,
            'executor': executor,
            'func': func,
            'args': tuple(args) if args is not None else (),
            'kwargs': dict(kwargs) if kwargs is not None else {},
            'id': id,
            'name': name,
            'misfire_grace_time': misfire_grace_time,
            'coalesce': coalesce,
            'max_instances': max_instances,
            'next_run_time': next_run_time
        }
        job_kwargs = dict((key, value) for key, value in six.iteritems(job_kwargs) if value is not undefined)
        job = Job(self, **job_kwargs)
        job.conf = conf
        
        # Don't really add jobs to job store before the scheduler is up and running
        with self._jobstore_lock:
            if not self.running:
                self._pending_jobs.append((job, jobstore, replace_existing))
                logging.info('Adding job tentatively -- it will be properly scheduled when the scheduler starts')
            else:
                logging.info('Adding job really, it will be start as soon as the settings. ')
                self._real_add_job(job, jobstore, replace_existing, True)

        return job
Exemplo n.º 47
0
from apscheduler.jobstores.sqlalchemy_store import SQLAlchemyJobStore
from dateutil.tz import gettz
from datetime import datetime, timedelta
from apscheduler.triggers import IntervalTrigger, DateTrigger
from apscheduler.scripts import HttpScript, CommandScript


if __name__ == '__main__':

    queue = HotQueue('job_changes')
    script = HttpScript(url='http://baidu.comm')
    store = SQLAlchemyJobStore(url='sqlite:////tmp/task.db', tablename='tasks')
    #script = CommandScript(command='ping -c 3 www.baidu.com')
    local_tz = gettz('Asia/Chongqing')
    defaults = {'timezone': local_tz}
    trigger = IntervalTrigger(defaults, seconds=60)
    #trigger = DateTrigger(defaults, run_date=datetime(2013,12,11, 8, 11))

    job = Job(name=u'BaiduCurlWithWrongUrl', script=script, trigger=trigger)

    #print job.run()
    now = datetime.now(local_tz)
    next_run_time = job.compute_next_run_time(now)
    print job.get_run_times(now+timedelta(seconds=60))
    
    if next_run_time:
        print "add job"
        print job
        store.add_job(job)
        queue.put({'opt_type':'add', 'job_id':job.id})
Exemplo n.º 48
0
from dateutil.tz import gettz

from apscheduler.job import Job
from apscheduler.triggers import IntervalTrigger
from apscheduler.scripts import HttpScript

t = IntervalTrigger({}, minutes=5, timezone=gettz('Asia/Chongqing'))
s = HttpScript(url='http://baidu.com')

job = Job(t, s)
print job.__getstate__()
Exemplo n.º 49
0
	def load_jobs(self):
		#continue standart execution
		jobs = []
		for job_dict in self.collection.find({'crecord_type': 'schedule'}):
			try:
				job = Job.__new__(Job)

				if job_dict['aaa_owner'] != 'account.root':
					if job_dict['kwargs']['task'] != 'task_reporting':
						raise ValueError("User %s isn\'t allow to run task %s" % (job_dict['aaa_owner'],job_dict['kwargs']['task']))

				#keep memory of id
				job_dict_id = job_dict['_id']

				job_dict['id'] = job_dict.pop('_id')

				if job_dict.has_key('runs'):
					job_dict['runs'] = job_dict['runs']
				else:
					job_dict['runs'] = 0

				job_dict['coalesce'] = False

				#try to get interval
				interval = job_dict.get('interval')
				if interval is not None:
					job_dict[TRIGGER] = IntervalTrigger(timedelta(**interval))
				else: #try to get simple
					date = job_dict.get('date')
					if date is not None:
						job_dict[TRIGGER] = SimpleTrigger( datetime(*date))
					else: #try to get crontab
						cron = job_dict.get('cron')
						if cron is not None:
							job_dict[TRIGGER] = CronTrigger(**cron)

				if TRIGGER not in job_dict:
					raise ValueError("No interval, nor date, nor cron is given in task %s".format(job_dict['crecord_name']))

				job_dict['next_run_time'] = job_dict['trigger'].get_next_fire_time(datetime.now())
				job_dict['args'] = job_dict['args']
				job_dict['kwargs'] = job_dict['kwargs']
				job_dict['max_runs'] = None
				job_dict['max_instances'] = 3
				job_dict['name'] = job_dict['crecord_name']
				job_dict['misfire_grace_time'] = 1

				job_dict['func_ref'] = 'apschedulerlibs.aps_to_celery:launch_celery_task'

				job.__setstate__(job_dict)
				jobs.append(job)

				#change flag to true
				self.collection.update({'_id':job_dict_id},{"$set":{'loaded':True, 'next_run_time': job_dict['next_run_time']}},True)

			except Exception:
				job_name = job_dict.get('name', '(unknown)')
				logger.exception('Unable to restore job "%s"', job_name)

		logger.info(' + %s jobs loaded' % len(jobs))
		self.jobs = jobs
Exemplo n.º 50
0
class JobStoreTest(TestCase):
    def setUp(self):
        self.redis = StrictRedis(**settings.REDISES['default'])
        self.store = JobStore(self.redis, 'Asia/Chongqing')
        self.store.clear()
        self.redis.delete('active_jobs_pool')
        self.init()

    def tearDown(self):
        self.redis.delete('active_jobs_pool')
        self.store.clear()

    def init(self):
        tz = self.store.timezone
        trigger = IntervalTrigger({}, minutes=5, timezone=tz)
        url     = 'http://t.cn'
        now = datetime.now(tz)

        self.job1 = Job(1, trigger, url)
        self.job1.compute_next_run_time(now)
        self.job2 = Job(2, trigger, url)
        self.job2.compute_next_run_time(now)

        self.store.add_job(self.job1)
        self.store.add_job(self.job2)


        self.now = now
        self.url = url
        self.trigger = trigger

    def testLoadJob(self):
        Task.objects.all().delete()
        task = self.createTask()
        task.save()
        job = self.store.load_job(task.id, self.now)
        self.assertEqual(self.url, job.func)

    def testLoadJobs(self):
        Task.objects.all().delete()
        self.store.clear()

        for i in range(3):
            task = self.createTask()
            task.save()
            self.redis.sadd('active_jobs_pool', task.id)

        self.assertEqual(3, self.store.count())
        self.assertEqual(0, len(self.store.jobs))
        self.store.load_jobs()
        self.assertEqual(3, len(self.store.jobs))


    def testAddJob(self):
        self.assertEqual(2, self.store.count())
        job = Job(3, self.trigger, self.url)
        job.compute_next_run_time(self.now)
        self.store.add_job(job)
        self.assertEqual(3, self.store.count())

    def testRemoveJob(self):
        self.assertEqual(2, self.store.count())
        self.store.remove_job(id=1)
        self.assertEqual(1, self.store.count())
        self.store.remove_job(id=3)
        self.assertEqual(1, self.store.count())

    def createTask(self):
        import json
        task = Task()
        task.run_time = json.dumps({'minutes':10})
        task.name = 'name'
        task.run_entry = self.url
        return task

    def testUpdateJob(self):
        task = self.createTask()
        task.name = 'name1'
        task.save()
        self.store.clear()
        self.redis.sadd(self.store.active_jobs_key, task.id)
        self.store.load_jobs()
        job = self.store.find_job(task.id)
        self.assertEqual(task.name, job.name)

        task.name = 'name2'
        task.save()
        self.store.update_job(id=task.id)
        job = self.store.find_job(task.id)
        self.assertEqual(task.name, job.name)



    def testFindJob(self):
        self.assertEqual(2, self.store.count())
        job = self.store.find_job(1)
        self.assertEqual(1, job.id)
        self.assertIsNone(self.store.find_job(3))

    def testHasJob(self):
        self.assertTrue(self.store.has_job(1))
        self.assertFalse(self.store.has_job(3))
Exemplo n.º 51
0
 def testAddJob(self):
     self.assertEqual(2, self.store.count())
     job = Job(3, self.trigger, self.url)
     job.compute_next_run_time(self.now)
     self.store.add_job(job)
     self.assertEqual(3, self.store.count())
Exemplo n.º 52
0
 def setup(self):
     self.trigger = SimpleTrigger(self.RUNTIME)
     self.job = Job(self.trigger, dummyfunc, [], {}, 1, False)
Exemplo n.º 53
0
class TestJob(object):
    RUNTIME = datetime(2010, 12, 13, 0, 8, 0)

    def setup(self):
        self.trigger = SimpleTrigger(self.RUNTIME)
        self.job = Job(self.trigger, dummyfunc, [], {}, 1, False)

    def test_compute_next_run_time(self):
        self.job.compute_next_run_time(self.RUNTIME - timedelta(microseconds=1))
        eq_(self.job.next_run_time, self.RUNTIME)

        self.job.compute_next_run_time(self.RUNTIME)
        eq_(self.job.next_run_time, self.RUNTIME)

        self.job.compute_next_run_time(self.RUNTIME + timedelta(microseconds=1))
        eq_(self.job.next_run_time, None)

    def test_compute_run_times(self):
        expected_times = [self.RUNTIME + timedelta(seconds=1),
                          self.RUNTIME + timedelta(seconds=2)]
        self.job.trigger = IntervalTrigger(timedelta(seconds=1), self.RUNTIME)
        self.job.compute_next_run_time(expected_times[0])
        eq_(self.job.next_run_time, expected_times[0])

        run_times = self.job.get_run_times(self.RUNTIME)
        eq_(run_times, [])

        run_times = self.job.get_run_times(expected_times[0])
        eq_(run_times, [expected_times[0]])

        run_times = self.job.get_run_times(expected_times[1])
        eq_(run_times, expected_times)

    def test_max_runs(self):
        self.job.max_runs = 1
        self.job.runs += 1
        self.job.compute_next_run_time(self.RUNTIME)
        eq_(self.job.next_run_time, None)

    def test_eq_num(self):
        # Just increasing coverage here
        assert not self.job == 'dummyfunc'

    def test_getstate(self):
        state = self.job.__getstate__()
        eq_(state, dict(trigger=self.trigger,
                        func_ref='testjob:dummyfunc',
                        name='dummyfunc', args=[],
                        kwargs={}, misfire_grace_time=1,
                        coalesce=False, max_runs=None,
                        max_instances=1, runs=0))

    def test_setstate(self):
        trigger = SimpleTrigger('2010-12-14 13:05:00')
        state = dict(trigger=trigger, name='testjob.dummyfunc',
                     func_ref='testjob:dummyfunc',
                     args=[], kwargs={}, misfire_grace_time=2, max_runs=2,
                     coalesce=True, max_instances=2, runs=1)
        self.job.__setstate__(state)
        eq_(self.job.trigger, trigger)
        eq_(self.job.func, dummyfunc)
        eq_(self.job.max_runs, 2)
        eq_(self.job.coalesce, True)
        eq_(self.job.max_instances, 2)
        eq_(self.job.runs, 1)
        assert not hasattr(self.job, 'func_ref')
        assert isinstance(self.job._lock, lock_type)

    def test_jobs_equal(self):
        assert self.job == self.job

        job2 = Job(SimpleTrigger(self.RUNTIME), lambda: None, [], {}, 1, False)
        assert self.job != job2

        job2.id = self.job.id = 123
        eq_(self.job, job2)

        assert self.job != 'bleh'

    def test_instances(self):
        self.job.max_instances = 2
        eq_(self.job.instances, 0)

        self.job.add_instance()
        eq_(self.job.instances, 1)

        self.job.add_instance()
        eq_(self.job.instances, 2)

        assert_raises(MaxInstancesReachedError, self.job.add_instance)

        self.job.remove_instance()
        eq_(self.job.instances, 1)

        self.job.remove_instance()
        eq_(self.job.instances, 0)

        assert_raises(AssertionError, self.job.remove_instance)

    def test_repr(self):
        self.job.compute_next_run_time(self.RUNTIME)
        eq_(repr(self.job),
            "<Job (name=dummyfunc, "
            "trigger=<SimpleTrigger (run_date=datetime.datetime(2010, 12, 13, 0, 8))>)>")
        eq_(str(self.job),
            "dummyfunc (trigger: date[2010-12-13 00:08:00], "
            "next run at: 2010-12-13 00:08:00)")
Exemplo n.º 54
0
from hotqueue import HotQueue

from apscheduler.job import Job
from apscheduler.jobstores.sqlalchemy_store import SQLAlchemyJobStore
from apscheduler.triggers import IntervalTrigger
from apscheduler.scripts import HttpScript


if __name__ == '__main__':

    script = HttpScript(url='http://baidu.com')
    local_tz = gettz('Asia/Chongqing')
    defaults = {'timezone': local_tz}
    trigger = IntervalTrigger(defaults, seconds=3)

    store = SQLAlchemyJobStore(url='sqlite:////tmp/task.db', tablename='tasks')
    job   = store.get_job(3)
    if not job:
        job = Job(id=3, name='BaiduCheck', script=script, trigger=trigger)
        store.add_job(job)

    print job

    job.trigger = IntervalTrigger(defaults, seconds=5)
    store.update_job(job)

    queue = HotQueue('job_changes')
    queue.put({'job_id':job.id, 'opt_type':'update'})