Example #1
0
    def add_job(self, job):
        if isinstance(job, Job):
            if job.id:
                job_exist = list(self.table.get_all(job.id).run(self.conn))
                if job_exist:
                    job_exist = job_exist[0]
            else:
                job_exist = None
        else:
            job_exist = None

        if not job_exist:
            job_dict = {}
            job_dict['id'] = job.id
            job_dict['job_state'] = (pickle.dumps(
                job.__getstate__(),
                self.pickle_protocol).encode("zip").encode("base64").strip())
            job_dict['next_run_time'] = (datetime_to_utc_timestamp(
                job.next_run_time))

            results = self.table.insert(job_dict).run(self.conn)
            if results['errors'] > 0:
                raise ConflictingIdError(job.id)
        else:
            raise ConflictingIdError(job)
Example #2
0
 def add_job(self, job):
     insert = self.wm_jobs_t.insert().values(
         **{
             'id': job.conf.id,
             'cmd': job.conf.cmd,
             'cron_str': job.conf.cron_str,
             'name': job.conf.name,
             'desc': job.conf.desc,
             'mails': job.conf.mails,
             'phones': job.conf.phones,
             'team': job.conf.team,
             'owner': job.conf.owner,
             'hosts': job.conf.hosts,
             'host_strategy': job.conf.host_strategy,
             'restore_strategy': job.conf.restore_strategy,
             'retry_strategy': job.conf.retry_strategy,
             'error_strategy': job.conf.error_strategy,
             'exist_strategy': job.conf.exist_strategy,
             'running_timeout_s': job.conf.running_timeout_s,
             'status': job.conf.status,
             'modify_time': job.conf.modify_time,
             'modify_user': job.conf.modify_user,
             'create_time': job.conf.create_time,
             'create_user': job.conf.create_user,
             'start_date': job.conf.start_date,
             'end_date': job.conf.end_date,
             'oupput_match_reg': job.conf.oupput_match_reg,
             'next_run_time': job.conf.next_run_time,
         })
     try:
         self.engine.execute(insert)
     except IntegrityError:
         raise ConflictingIdError(job.id)
 def add_job(self, job):
     insertData = self._job_to_db(job)
     insert = self.jobs_t.insert().values(**insertData)
     try:
         self.engine.execute(insert)
     except IntegrityError:
         raise ConflictingIdError(job.id)
 def add_job(self, job):
     try:
         DjangoJob.objects.create(job_id=job.id,
                                  next_run_time=job.next_run_time,
                                  job_state=job.__getstate__())
     except:
         raise ConflictingIdError(job.id)
Example #5
0
 def add_job_submission(self, job, now):
     self._ensure_paths()
     utc_now = now.astimezone(utc)
     job_submission_id = job.id + "_" + \
         str((utc_now - utc.localize(datetime(1970, 1, 1))).total_seconds() * 1000)
     node_path = os.path.join(self.job_submission_path, job_submission_id)
     self._logger.info(node_path)
     value = {
         'state':
         'submitted',
         # TODO: Pickle the 'job.func' so we can recover from 2 diff sessions
         'func':
         job.func
         if isinstance(job.func, six.string_types) else job.func.__name__,
         'submitted_at':
         now,
         'apscheduler_job_id':
         job.id,
     }
     data = pickle.dumps(value, self.pickle_protocol)
     try:
         self.client.create(node_path, value=data)
     except NodeExistsError:
         raise ConflictingIdError(job_submission_id)
     return job_submission_id
Example #6
0
 def add_job(self, job):
     e = Scheduler.create(self.sess, job.id, self.user_id)
     e.next_time = datetime_to_utc_timestamp(job.next_run_time)
     e.job_state = pickle.dumps(job.__getstate__(), self.pickle_protocol)
     try:
         self.sess.flush()
     except sa.exc.IntegrityError:
         raise ConflictingIdError(job['job'])
Example #7
0
    def add_job(self, job):
        if job.id in self._jobs_index:
            raise ConflictingIdError(job.id)

        timestamp = datetime_to_utc_timestamp(job.next_run_time)
        index = self._get_job_index(timestamp, job.id)
        self._jobs.insert(index, (job, timestamp))
        self._jobs_index[job.id] = (job, timestamp)
Example #8
0
    def add_job(self, job):
        if DjangoJob.objects.filter(name=job.id).exists():
            raise ConflictingIdError(job.id)

        DjangoJob.objects.create(name=job.id,
                                 next_run_time=serialize_dt(job.next_run_time),
                                 job_state=pickle.dumps(
                                     job.__getstate__(), self.pickle_protocol))
Example #9
0
 def add_job(self, job):
     try:
         self.collection.insert({
             '_id': job.id,
             'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
             'job_state': Binary(pickle.dumps(job.__getstate__(), self.pickle_protocol))
         })
     except DuplicateKeyError:
         raise ConflictingIdError(job.id)
Example #10
0
 def add_job(self, job):
     job_dict = {
         'id': job.id,
         'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
         'job_state': r.binary(pickle.dumps(job.__getstate__(), self.pickle_protocol))
     }
     results = self.table.insert(job_dict).run(self.conn)
     if results['errors'] > 0:
         raise ConflictingIdError(job.id)
Example #11
0
 def add_job(self, job: Job):
     if job.id in self._jobs_index:
         raise ConflictingIdError(job.id)
     # log.debug(f"Check job args: {job.args=}")
     timestamp = datetime_to_utc_timestamp(job.next_run_time)
     index = self._get_job_index(timestamp, job.id)  # This is fine
     self._jobs.insert(index, (job, timestamp))
     self._jobs_index[job.id] = (job, timestamp)
     asyncio.create_task(self._async_add_job(job, index, timestamp))
Example #12
0
 def add_job(self, job):
     try:
         DjangoJob.objects.create(id=job.id,
                                  next_run_time=job.next_run_time,
                                  job_state=pickle.dumps(
                                      job.__getstate__(),
                                      self.pickle_protocol))
     except IntegrityError:
         raise ConflictingIdError(job.id)
Example #13
0
    def add_job(self, job):
        if self.redis.hexists(self.jobs_key, job.id):
            raise ConflictingIdError(job.id)

        with self.redis.pipeline() as pipe:
            pipe.multi()
            pipe.hset(self.jobs_key, job.id, pickle.dumps(job.__getstate__(), self.pickle_protocol))
            pipe.zadd(self.run_times_key, datetime_to_utc_timestamp(job.next_run_time), job.id)
            pipe.execute()
Example #14
0
 def add_job(self, job: AppSchedulerJob):
     with transaction.atomic():
         try:
             return DjangoJob.objects.create(
                 id=job.id,
                 next_run_time=get_django_internal_datetime(job.next_run_time),
                 job_state=pickle.dumps(job.__getstate__(), self.pickle_protocol),
             )
         except IntegrityError:
             raise ConflictingIdError(job.id)
Example #15
0
 def add_job(self, job):
     insert = self.jobs_t.insert().values(**{
         'id': job.id,
         'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
         'job_state': pickle.dumps(job.__getstate__(), self.pickle_protocol)
     })
     try:
         self.engine.execute(insert)
     except IntegrityError:
         raise ConflictingIdError(job.id)
Example #16
0
    def add_job(self, job):
        """
        Adds the given job to this store.

        :param Job job: the job to add
        :raises ConflictingIdError: if there is another job in this store with the same ID
        """
        if lookup_job(job.id) != None:
            raise ConflictingIdError(job.id)
        else:
            self._put_job(job)
Example #17
0
    def add_job(self, job):
        job_id = bytes(job.id, 'utf-8')
        if job_id in self.jobs:
            raise ConflictingIdError(job.id)

        self.jobs.put(job_id,
                      pickle.dumps(job.__getstate__(), self.pickle_protocol))
        if job.next_run_time:
            self.run_times.put(
                bytes(int(datetime_to_utc_timestamp(job.next_run_time))),
                job_id)
 def add_job(self, job):
     self._ensure_paths()
     node_path = self.path + "/" + str(job.id)
     value = {
         'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
         'job_state': job.__getstate__()
     }
     data = pickle.dumps(value, self.pickle_protocol)
     try:
         self.client.create(node_path, value=data)
     except NodeExistsError:
         raise ConflictingIdError(job.id)
Example #19
0
 def add_job(self, job):
     q = self.job_model.insert(
         **{
             'id': job.id,
             'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
             'job_state': pickle.dumps(job.__getstate__(),
                                       self.pickle_protocol)
         })
     try:
         q.execute()
     except IntegrityError:
         raise ConflictingIdError(job.id)
    def test_real_add_job(self, scheduler, job_exists, replace_existing,
                          wakeup):
        job = Job(scheduler,
                  id='foo',
                  func=lambda: None,
                  args=(),
                  kwargs={},
                  next_run_time=None)
        jobstore = MagicMock(
            BaseJobStore,
            _alias='bar',
            add_job=MagicMock(
                side_effect=ConflictingIdError('foo') if job_exists else None))
        scheduler.wakeup = MagicMock()
        scheduler._job_defaults = {
            'misfire_grace_time': 3,
            'coalesce': False,
            'max_instances': 6
        }
        scheduler._dispatch_event = MagicMock()
        scheduler._jobstores = {'bar': jobstore}

        # Expect and exception if the job already exists and we're not trying to replace it
        if job_exists and not replace_existing:
            pytest.raises(ConflictingIdError, scheduler._real_add_job, job,
                          'bar', replace_existing, wakeup)
            return

        scheduler._real_add_job(job, 'bar', replace_existing, wakeup)

        # Check that the undefined values were replaced with scheduler defaults
        assert job.misfire_grace_time == 3
        assert job.coalesce is False
        assert job.max_instances == 6
        assert job.next_run_time is None

        if job_exists:
            jobstore.update_job.assert_called_once_with(job)
        else:
            assert not jobstore.update_job.called

        if wakeup:
            scheduler.wakeup.assert_called_once_with()
        else:
            assert not scheduler.wakeup.called

        assert job._jobstore_alias == 'bar'

        assert scheduler._dispatch_event.call_count == 1
        event = scheduler._dispatch_event.call_args[0][0]
        assert event.code == EVENT_JOB_ADDED
        assert event.job_id == 'foo'
Example #21
0
 def add_job(self, job):
     #print(json.dumps(job.__getstate__(), cls=JobEncoder))
     insert = self.jobs_t.insert().values(
         **{
             'id':
             job.id,
             'next_run_time':
             datetime_to_utc_timestamp(job.next_run_time),
             'job_state':
             pickle.dumps(job.__getstate__(), self.pickle_protocol),
             'job_json':
             json.dumps(
                 job.__getstate__(), cls=JobEncoder, ensure_ascii=False)
         })
     try:
         self.engine.execute(insert)
     except IntegrityError:
         raise ConflictingIdError(job.id)
Example #22
0
    def add_job(self, job):
        insert = self.jobs_t.insert().values(**{
            'id': job.id,
            'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
            'job_state': pickle.dumps(job.__getstate__(), self.pickle_protocol)
        })
        try:
            self.engine.execute(insert)
        except IntegrityError:
            raise ConflictingIdError(job.id)
        except SQLAlchemyError as e:
            if isinstance(e.orig, InvalidRequestError):
               self.session.rollback()
            elif not isinstance(e, OperationalError):
               raise

            del self.engine 
            self.engine = create_engine(self.url)
            self.engine.execute(insert)
Example #23
0
    def test_service_schedule_duplicate_job(self, mock_start_job):
        job = Mock()
        job.utctime = 'now'
        self.service.jobs['1'] = job

        scheduler = Mock()
        scheduler.add_job.side_effect = ConflictingIdError('Conflicting jobs.')
        self.service.scheduler = scheduler

        self.service._schedule_job('1')
        self.service.log.warning.assert_called_once_with(
            'Job already running. Received multiple '
            'listener messages.',
            extra={'job_id': '1'})
        scheduler.add_job.assert_called_once_with(self.service._start_job,
                                                  args=('1', ),
                                                  id='1',
                                                  max_instances=1,
                                                  misfire_grace_time=None,
                                                  coalesce=True)
Example #24
0
    def add_job(self,
                func,
                trigger=None,
                args=None,
                kwargs=None,
                id=None,
                name=None,
                misfire_grace_time=undefined,
                coalesce=undefined,
                max_instances=undefined,
                next_run_time=undefined,
                jobstore='default',
                executor='default',
                replace_existing=False,
                **trigger_args):
        """
        add_job(func, trigger=None, args=None, kwargs=None, id=None, \
            name=None, misfire_grace_time=undefined, coalesce=undefined, \
            max_instances=undefined, next_run_time=undefined, \
            jobstore='default', executor='default', \
            replace_existing=False, **trigger_args)

        Adds the given job to the job list and wakes up the scheduler if it's already running.

        Any option that defaults to ``undefined`` will be replaced with the corresponding default
        value when the job is scheduled (which happens when the scheduler is started, or
        immediately if the scheduler is already running).

        The ``func`` argument can be given either as a callable object or a textual reference in
        the ``package.module:some.object`` format, where the first half (separated by ``:``) is an
        importable module and the second half is a reference to the callable object, relative to
        the module.

        The ``trigger`` argument can either be:
          #. the alias name of the trigger (e.g. ``date``, ``interval`` or ``cron``), in which case
            any extra keyword arguments to this method are passed on to the trigger's constructor
          #. an instance of a trigger class

        :param func: callable (or a textual reference to one) to run at the given time
        :param str|apscheduler.triggers.base.BaseTrigger trigger: trigger that determines when
            ``func`` is called
        :param list|tuple args: list of positional arguments to call func with
        :param dict kwargs: dict of keyword arguments to call func with
        :param str|unicode id: explicit identifier for the job (for modifying it later)
        :param str|unicode name: textual description of the job
        :param int misfire_grace_time: seconds after the designated runtime that the job is still
            allowed to be run
        :param bool coalesce: run once instead of many times if the scheduler determines that the
            job should be run more than once in succession
        :param int max_instances: maximum number of concurrently running instances allowed for this
            job
        :param datetime next_run_time: when to first run the job, regardless of the trigger (pass
            ``None`` to add the job as paused)
        :param str|unicode jobstore: alias of the job store to store the job in
        :param str|unicode executor: alias of the executor to run the job with
        :param bool replace_existing: ``True`` to replace an existing job with the same ``id``
            (but retain the number of runs from the existing one)
        :rtype: Job

        """

        # Check if job with `id` already exists if `id` is provided.
        if id is not None and not replace_existing:
            # `get_job` should return None if there is no job with this id
            # else raise `ConflictingIdError`
            if self.get_job(job_id=id, jobstore=jobstore) is not None:
                raise ConflictingIdError(job_id=id)

        job_kwargs = {
            'trigger': self._create_trigger(trigger, trigger_args),
            'executor': executor,
            'func': func,
            'args': tuple(args) if args is not None else (),
            'kwargs': dict(kwargs) if kwargs is not None else {},
            'id': id,
            'name': name,
            'misfire_grace_time': misfire_grace_time,
            'coalesce': coalesce,
            'max_instances': max_instances,
            'next_run_time': next_run_time
        }
        job_kwargs = dict((key, value)
                          for key, value in six.iteritems(job_kwargs)
                          if value is not undefined)
        job = Job(self, **job_kwargs)

        # Don't really add jobs to job stores before the scheduler is up and running
        with self._jobstores_lock:
            if self.state == STATE_STOPPED:
                self._pending_jobs.append((job, jobstore, replace_existing))
                self._logger.info(
                    'Adding job tentatively -- it will be properly scheduled when '
                    'the scheduler starts')
            else:
                self._real_add_job(job, jobstore, replace_existing)

        return job
Example #25
0
 def add_job(self, job):
     try:
         self.ds.insert('calendar_tasks', self._serialize_job(job))
     except DuplicateKeyException:
         raise ConflictingIdError(job.id)
Example #26
0
 def add_job(self, job: Job) -> None:
     if self.storage.exist(key=job.id):
         raise ConflictingIdError(job.id)
     item = StorableJob(job=job)
     self.storage.save(item=item, update=True)
class SchedulerTest(unittest.TestCase):
    @patch("dragonchain.scheduler.scheduler.worker")
    @patch("dragonchain.scheduler.scheduler.redis.delete_sync",
           return_value="OK")
    @patch("dragonchain.scheduler.scheduler.redis.lpush_sync",
           return_value="OK")
    @patch("dragonchain.scheduler.scheduler.redis.brpop_sync",
           return_value=[
               "whatever",
               '{"action":"create","contract_id":"apples","seconds":60}'
           ])
    def test_subscribe1(self, brpop, lpush, delete, mock_worker):
        scheduler.subscribe("mq:scheduler")
        mock_worker.assert_called_with({
            "action": "create",
            "contract_id": "apples",
            "seconds": 60
        })

    @patch("dragonchain.scheduler.scheduler.worker",
           side_effect=exceptions.TimingEventSchedulerError("boom"))
    @patch("dragonchain.scheduler.scheduler.redis.delete_sync",
           return_value="OK")
    @patch("dragonchain.scheduler.scheduler.redis.lpush_sync",
           return_value="OK")
    @patch("dragonchain.scheduler.scheduler.redis.brpop_sync",
           return_value=[
               "whatever",
               '{"action":"create","contract_id":"apples","seconds":60}'
           ])
    def test_subscribe2(self, brpop, lpush, delete, mock_worker):
        self.assertRaises(exceptions.TimingEventSchedulerError,
                          scheduler.subscribe, "mq:scheduler")
        lpush.assert_called_with("mq:scheduler:errors", ANY)

    @patch("dragonchain.scheduler.scheduler.redis.lpush_sync",
           return_value="OK")
    @patch("dragonchain.scheduler.scheduler.redis.hgetall_sync",
           return_value={"banana": '{"contract_id":"banana","seconds":54}'})
    def test_revive_dead_workers(self, hgetall, lpush):
        scheduler.revive_dead_workers()
        hgetall.assert_called_with("scheduler:params", decode=False)
        lpush.assert_called_with(
            "mq:scheduler",
            '{"contract_id":"banana","seconds":54,"action":"create"}')

    def test_parse_json_or_fail(self):
        try:
            scheduler.parse_json_or_fail("{{{{{}{}")
        except Exception as e:
            self.assertEqual(str(e), "MALFORMED_JSON")
            return
        self.fail()  # Force a failure if no exception thrown

    @patch("dragonchain.scheduler.scheduler.redis.lpush_sync")
    def test_schedule_contract_invocation(self, lpush):
        sc_model = MagicMock()
        sc_model.id = "my_name"
        sc_model.cron = "* * * * *"
        sc_model.seconds = None
        sc_model.txn_type = "banana"
        sc_model.execution_order = "serial"
        scheduler.schedule_contract_invocation(sc_model)
        lpush.assert_called_with(
            "mq:scheduler",
            '{"action":"create","contract_id":"my_name","txn_type":"banana","execution_order":"serial","cron":"* * * * *","seconds":null}',
        )

    @patch("dragonchain.scheduler.scheduler.redis.lpush_sync")
    def test_schedule_contract_invocation_raises(self, lpush):
        sc_model = FakeScModel("my_name", None, None)
        try:
            scheduler.schedule_contract_invocation(sc_model)
            self.fail("no error raised")
        except Exception as e:
            self.assertEqual(
                str(e), "You must provide cron or seconds to schedule a job")
            return
        self.fail()  # Force a failure if no exception thrown

    # CREATE NON EXISTENT JOB
    @patch("dragonchain.scheduler.timing_event.redis.hexists_sync",
           return_value=False)
    @patch("dragonchain.scheduler.timing_event.redis.hset_sync",
           return_value="1")
    def test_create_new_job(self, hset, hexists):
        change_request = {
            "action": "create",
            "contract_id": "goo",
            "txn_type": "banana",
            "execution_order": "serial",
            "cron": "* * * * *"
        }
        scheduler.worker(change_request)
        hset.assert_called_with(
            "scheduler:params", "goo",
            '{"cron":"* * * * *","seconds":null,"contract_id":"goo","execution_order":"serial","txn_type":"banana"}'
        )

    # CREATE EXISTING JOB
    @patch("dragonchain.scheduler.timing_event.redis.hset_sync")
    @patch("dragonchain.scheduler.timing_event.redis.hexists_sync",
           return_value=True)
    @patch("apscheduler.schedulers.background.BackgroundScheduler.add_job",
           side_effect=ConflictingIdError("goo"))
    def test_create_existing_job(self, hexists, mock_hexists, mock_hset):
        self.assertRaises(
            exceptions.TimingEventSchedulerError,
            scheduler.worker,
            {
                "action": "create",
                "contract_id": "goo",
                "txn_type": "banana",
                "execution_order": "serial",
                "cron": "* * * * *"
            },
        )

    # DELETE EXISTING JOB
    @patch("dragonchain.scheduler.timing_event.redis.hexists_sync")
    @patch("dragonchain.scheduler.timing_event.redis.hget_sync",
           return_value='{"contract_id":"goo","action":"delete","seconds":60}')
    @patch("dragonchain.scheduler.timing_event.redis.hdel_sync")
    @patch("apscheduler.schedulers.background.BackgroundScheduler.remove_job")
    def test_delete_job(self, remove_job, hdel, hget, hexists):
        change_request = {
            "action": "delete",
            "contract_id": "banana",
            "txn_type": "banana",
            "execution_order": "serial"
        }
        scheduler.worker(change_request)
        remove_job.assert_called_once()
        hdel.assert_called_once()

    # DELETE NON EXISTENT JOB
    @patch("dragonchain.scheduler.scheduler.timing_event.exists",
           return_value=False)
    @patch("dragonchain.scheduler.timing_event.redis.hget_sync",
           return_value='{"contract_id":"goo","action":"delete","seconds":60}')
    @patch("dragonchain.scheduler.timing_event.redis.hdel_sync")
    @patch("apscheduler.schedulers.background.BackgroundScheduler.remove_job")
    def test_delete_non_existent_job(self, remove_job, hdel, hget, exists):
        change_request = {
            "action": "delete",
            "contract_id": "banana",
            "txn_type": "banana",
            "execution_order": "serial"
        }
        scheduler.worker(change_request)
        remove_job.assert_not_called()
        hdel.assert_not_called()
        hget.assert_not_called()

    # UPDATE
    @patch("dragonchain.scheduler.scheduler.timing_event.exists",
           return_value=True)
    @patch(
        "apscheduler.schedulers.background.BackgroundScheduler.reschedule_job")
    @patch("dragonchain.scheduler.timing_event.redis.hget_sync",
           return_value='{"contract_id":"whatever"}')
    @patch("dragonchain.scheduler.timing_event.redis.hset_sync")
    def test_update_job(self, mock_hset, mock_hget, reschedule_job, exists):
        change_request = {
            "action": "update",
            "contract_id": "banana",
            "execution_order": "serial",
            "txn_type": "banana",
            "seconds": 61
        }
        scheduler.worker(change_request)
        reschedule_job.assert_called_with("whatever", trigger=ANY)

    # UPDATE NON EXISTENT JOB
    @patch("dragonchain.scheduler.scheduler.timing_event.exists",
           return_value=False)
    @patch(
        "dragonchain.scheduler.scheduler.redis.hgetall_sync",
        return_value={
            "a":
            '{"action":"update","contract_id":"goo","execution_order":"serial","txn_type":"banana",seconds":60}'
        },
    )
    def test_update_non_existent_job(self, hgetall, hexists):
        change_request = {
            "action": "update",
            "contract_id": "banana",
            "execution_order": "serial",
            "txn_type": "banana",
            "seconds": 61
        }
        self.assertRaises(exceptions.TimingEventSchedulerError,
                          scheduler.worker, change_request)
Example #28
0
def mock_unique_id_error(*args, **kwargs):
    raise ConflictingIdError(MOCK_JOB_ID)