def is_due(self, last_run_at): """Returns tuple of two items `(is_due, next_time_to_run)`, where next time to run is in seconds. e.g. * `(True, 20)`, means the task should be run now, and the next time to run is in 20 seconds. * `(False, 12)`, means the task should be run in 12 seconds. You can override this to decide the interval at runtime, but keep in mind the value of :setting:`CELERYBEAT_MAX_LOOP_INTERVAL`, which decides the maximum number of seconds celerybeat can sleep between re-checking the periodic task intervals. So if you dynamically change the next run at value, and the max interval is set to 5 minutes, it will take 5 minutes for the change to take effect, so you may consider lowering the value of :setting:`CELERYBEAT_MAX_LOOP_INTERVAL` if responsiveness is of importance to you. """ rem_delta = self.remaining_estimate(last_run_at) rem = timedelta_seconds(rem_delta) if rem == 0: return True, timedelta_seconds(self.run_every) return False, rem
def is_due(self, last_run_at): """Returns tuple of two items ``(is_due, next_time_to_run)``, where next time to run is in seconds. e.g. * ``(True, 20)``, means the task should be run now, and the next time to run is in 20 seconds. * ``(False, 12)``, means the task should be run in 12 seconds. You can override this to decide the interval at runtime, but keep in mind the value of :setting:`CELERYBEAT_MAX_LOOP_INTERVAL`, which decides the maximum number of seconds celerybeat can sleep between re-checking the periodic task intervals. So if you dynamically change the next run at value, and the max interval is set to 5 minutes, it will take 5 minutes for the change to take effect, so you may consider lowering the value of :setting:`CELERYBEAT_MAX_LOOP_INTERVAL` if responsiveness is of importance to you. """ rem_delta = self.remaining_estimate(last_run_at) rem = timedelta_seconds(rem_delta) if rem == 0: return True, timedelta_seconds(self.run_every) return False, rem
def is_due(self, last_run_at): """Returns tuple of two items ``(is_due, next_time_to_run)``, where next time to run is in seconds. See :meth:`celery.task.base.PeriodicTask.is_due` for more information. """ rem_delta = self.remaining_estimate(last_run_at) rem = timedelta_seconds(rem_delta) if rem == 0: return True, timedelta_seconds(self.run_every) return False, rem
def is_due(self, last_run_at): """Returns tuple of two items `(is_due, next_time_to_run)`, where next time to run is in seconds. See :meth:`celery.schedules.schedule.is_due` for more information. """ rem_delta = self.remaining_estimate(last_run_at) rem = timedelta_seconds(rem_delta) due = rem == 0 if due: rem_delta = self.remaining_estimate(last_run_at=self.nowfun()) rem = timedelta_seconds(rem_delta) return due, rem
def is_due(self, last_run_at): """Returns tuple of two items ``(is_due, next_time_to_run)``, where next time to run is in seconds. See :meth:`celery.schedules.schedule.is_due` for more information. """ rem_delta = self.remaining_estimate(last_run_at) rem = timedelta_seconds(rem_delta) due = rem == 0 if due: rem_delta = self.remaining_estimate(last_run_at=self.nowfun()) rem = timedelta_seconds(rem_delta) return due, rem
def _do_store(): cf = self._get_column_family() date_done = self.app.now() meta = {"status": status, "date_done": date_done.strftime('%Y-%m-%dT%H:%M:%SZ'), "traceback": self.encode(traceback)} if self.detailed_mode: meta["result"] = result cf.insert(task_id, {date_done: self.encode(meta)}, ttl=timedelta_seconds(self.expires)) else: meta["result"] = self.encode(result) cf.insert(task_id, meta, ttl=timedelta_seconds(self.expires))
def _do_store(): cf = self._get_column_family() date_done = self.app.now() meta = {'status': status, 'date_done': date_done.strftime('%Y-%m-%dT%H:%M:%SZ'), 'traceback': self.encode(traceback), 'children': self.encode(self.current_task_children())} if self.detailed_mode: meta['result'] = result cf.insert(task_id, {date_done: self.encode(meta)}, ttl=self.expires and timedelta_seconds(self.expires)) else: meta['result'] = self.encode(result) cf.insert(task_id, meta, ttl=self.expires and timedelta_seconds(self.expires))
def _do_store(): cf = self._get_column_family() date_done = self.app.now() meta = { "status": status, "date_done": date_done.strftime('%Y-%m-%dT%H:%M:%SZ'), "traceback": self.encode(traceback) } if self.detailed_mode: meta["result"] = result cf.insert(task_id, {date_done: self.encode(meta)}, ttl=timedelta_seconds(self.expires)) else: meta["result"] = self.encode(result) cf.insert(task_id, meta, ttl=timedelta_seconds(self.expires))
def __init__(self, connection=None, exchange=None, exchange_type=None, persistent=None, serializer=None, auto_delete=True, expires=None, connection_max=None, **kwargs): super(AMQPBackend, self).__init__(**kwargs) conf = self.app.conf self._connection = connection self.queue_arguments = {} if persistent is None: persistent = conf.CELERY_RESULT_PERSISTENT self.persistent = persistent delivery_mode = persistent and "persistent" or "transient" exchange = exchange or conf.CELERY_RESULT_EXCHANGE exchange_type = exchange_type or conf.CELERY_RESULT_EXCHANGE_TYPE self.exchange = self.Exchange(name=exchange, type=exchange_type, delivery_mode=delivery_mode, durable=self.persistent, auto_delete=auto_delete) self.serializer = serializer or conf.CELERY_RESULT_SERIALIZER self.auto_delete = auto_delete self.expires = expires if self.expires is None: self.expires = conf.CELERY_AMQP_TASK_RESULT_EXPIRES if isinstance(self.expires, timedelta): self.expires = timeutils.timedelta_seconds(self.expires) if self.expires is not None: self.expires = int(self.expires) # requires RabbitMQ 2.1.0 or higher. self.queue_arguments["x-expires"] = int(self.expires * 1000.0) self.connection_max = (connection_max or conf.CELERY_AMQP_TASK_RESULT_CONNECTION_MAX) self.mutex = threading.Lock()
def __init__(self, redis_host=None, redis_port=None, redis_db=None, redis_password=None, expires=None, **kwargs): super(RedisBackend, self).__init__(**kwargs) if self.redis is None: raise ImproperlyConfigured( "You need to install the redis library in order to use " + "Redis result store backend.") self.redis_host = (redis_host or self.app.conf.get("REDIS_HOST") or self.redis_host) self.redis_port = (redis_port or self.app.conf.get("REDIS_PORT") or self.redis_port) self.redis_db = (redis_db or self.app.conf.get("REDIS_DB") or self.redis_db) self.redis_password = (redis_password or self.app.conf.get("REDIS_PASSWORD") or self.redis_password) self.expires = expires if self.expires is None: self.expires = self.app.conf.CELERY_TASK_RESULT_EXPIRES if isinstance(self.expires, timedelta): self.expires = timeutils.timedelta_seconds(self.expires) if self.expires is not None: self.expires = int(self.expires) self.redis_port = int(self.redis_port)
def _calculate_times(self): """ Calculates and returns several time-related values that tend to be needed at the same time. :return: tuple of numbers described below... now_s: current time as seconds since the epoch first_run_s: time of the first run as seconds since the epoch, calculated based on self.first_run since_first_s: how many seconds have elapsed since the first run run_every_s: how many seconds should elapse between runs of this schedule last_scheduled_run_s: the most recent time at which this schedule should have run based on its schedule, as seconds since the epoch expected_runs: number of runs that should have happened based on the first_run time and the interval :rtype: tuple """ now_s = time.time() first_run_dt = dateutils.to_utc_datetime(dateutils.parse_iso8601_datetime(self.first_run)) first_run_s = calendar.timegm(first_run_dt.utctimetuple()) since_first_s = now_s - first_run_s run_every_s = timedelta_seconds(self.as_schedule_entry().schedule.run_every) # don't want this to be negative expected_runs = max(int(since_first_s / run_every_s), 0) last_scheduled_run_s = first_run_s + expected_runs * run_every_s return now_s, first_run_s, since_first_s, run_every_s, last_scheduled_run_s, expected_runs
def __init__(self, *args, **kwargs): super(CacheBackend, self).__init__(self, *args, **kwargs) expires = kwargs.get("expires", default_app.conf.CELERY_TASK_RESULT_EXPIRES) if isinstance(expires, timedelta): expires = int(timedelta_seconds(expires)) self.expires = expires
def __init__(self, connection=None, exchange=None, exchange_type=None, persistent=None, serializer=None, auto_delete=None, expires=None, **kwargs): self._connection = connection self.queue_arguments = {} self.exchange = exchange self.exchange_type = exchange_type self.persistent = persistent self.serializer = serializer self.auto_delete = auto_delete self.expires = expires if self.expires is None: self.expires = conf.AMQP_TASK_RESULT_EXPIRES if isinstance(self.expires, timedelta): self.expires = timeutils.timedelta_seconds(self.expires) if self.expires is not None: self.expires = int(self.expires) # WARNING: Requries RabbitMQ 2.1.0 or higher. # x-expires must be a signed-int, or long describing the # expiry time in milliseconds. self.queue_arguments["x-expires"] = int(self.expires * 1000.0) super(AMQPBackend, self).__init__(**kwargs)
def test_is_due(self): p = my_periodic due, remaining = p.run_every.is_due( now() - p.run_every.run_every) self.assertTrue(due) self.assertEqual(remaining, timedelta_seconds(p.run_every.run_every))
def timedelta_seconds(self, delta): """Convert :class:`~datetime.timedelta` to seconds. Doesn't account for negative timedeltas. """ return timeutils.timedelta_seconds(delta)
def timedelta_seconds(self, delta): """Convert :class:`~datetime.timedelta` to seconds. Doesn't account for negative timedeltas. """ return timedelta_seconds(delta)
def is_due(self, last_run_at): # Not threadsafe, but I don't think this needs to be. # If I'm wrong, throw a mutex around this. if self.__first_run: self.__first_run = False return True, timedelta_seconds(self.run_every) return super(tick_schedule, self).is_due(last_run_at)
def test_timedelta_seconds(self): deltamap = ((timedelta(seconds=1), 1), (timedelta(seconds=27), 27), (timedelta(minutes=3), 3 * 60), (timedelta(hours=4), 4 * 60 * 60), (timedelta(days=3), 3 * 86400)) for delta, seconds in deltamap: self.assertEqual(timedelta_seconds(delta), seconds)
def __init__(self, *args, **kwargs): super(CacheBackend, self).__init__(*args, **kwargs) expires = kwargs.get('expires', current_app.conf.CELERY_TASK_RESULT_EXPIRES) if isinstance(expires, timedelta): expires = int(timedelta_seconds(expires)) self.expires = expires
def test_is_due(self): p = my_periodic due, remaining = p.run_every.is_due( datetime.utcnow() - p.run_every.run_every) self.assertTrue(due) self.assertEqual(remaining, timedelta_seconds(p.run_every.run_every))
def __init__(self, connection=None, exchange=None, exchange_type=None, persistent=None, serializer=None, auto_delete=True, expires=None, **kwargs): super(AMQPBackend, self).__init__(**kwargs) conf = self.app.conf self._connection = connection self.queue_arguments = {} exchange = exchange or conf.CELERY_RESULT_EXCHANGE exchange_type = exchange_type or conf.CELERY_RESULT_EXCHANGE_TYPE if persistent is None: persistent = conf.CELERY_RESULT_PERSISTENT self.persistent = persistent delivery_mode = persistent and "persistent" or "transient" self.exchange = Exchange(name=exchange, type=exchange_type, delivery_mode=delivery_mode, durable=self.persistent, auto_delete=auto_delete) self.serializer = serializer or conf.CELERY_RESULT_SERIALIZER self.auto_delete = auto_delete self.expires = expires if self.expires is None: self.expires = conf.CELERY_AMQP_TASK_RESULT_EXPIRES if isinstance(self.expires, timedelta): self.expires = timeutils.timedelta_seconds(self.expires) if self.expires is not None: self.expires = int(self.expires) # WARNING: Requires RabbitMQ 2.1.0 or higher. # x-expires must be a signed-int, or long describing # the expiry time in milliseconds. self.queue_arguments["x-expires"] = int(self.expires * 1000.0)
def prepare_expires(self, value, type=None): if value is None: value = self.app.conf.CELERY_TASK_RESULT_EXPIRES if isinstance(value, timedelta): value = timeutils.timedelta_seconds(value) if value is not None and type: return type(value) return value
def _do_store(): cf = self._get_column_family() date_done = datetime.utcnow() meta = {"status": status, "result": pickle.dumps(result), "date_done": date_done.strftime('%Y-%m-%dT%H:%M:%SZ'), "traceback": pickle.dumps(traceback)} cf.insert(task_id, meta, ttl=timedelta_seconds(self.result_expires))
def __init__(self, expires=conf.TASK_RESULT_EXPIRES, backend=conf.CACHE_BACKEND, options={}, **kwargs): super(CacheBackend, self).__init__(self, **kwargs) if isinstance(expires, timedelta): expires = timeutils.timedelta_seconds(expires) self.expires = expires self.options = dict(conf.CACHE_BACKEND_OPTIONS, **options) self.backend, _, servers = partition(backend, "://") self.servers = servers.split(";")
def from_schedule(cls, schedule, period='seconds'): every = timedelta_seconds(schedule.run_every) try: return cls.objects.get(every=every, period=period) except cls.DoesNotExist: return cls(every=every, period=period) except MultipleObjectsReturned: cls.objects.filter(every=every, period=period).delete() return cls(every=every, period=period)
def test_PeriodicTask_schedule_property(self): p1 = create_model_interval(schedule(timedelta(seconds=10))) s1 = p1.schedule self.assertEqual(timedelta_seconds(s1.run_every), 10) p2 = create_model_crontab(crontab(hour="4, 5", minute="10,20,30")) s2 = p2.schedule self.assertSetEqual(s2.hour, set([4, 5])) self.assertSetEqual(s2.minute, set([10, 20, 30])) self.assertSetEqual(s2.day_of_week, set([0, 1, 2, 3, 4, 5, 6]))
def _do_store(): cf = self._get_column_family() date_done = datetime.utcnow() meta = { "status": status, "result": pickle.dumps(result), "date_done": date_done.strftime('%Y-%m-%dT%H:%M:%SZ'), "traceback": pickle.dumps(traceback) } cf.insert(task_id, meta, ttl=timedelta_seconds(self.expires))
def to_model_schedule(schedule): if schedule.__class__ == schedules.schedule: row = get_or_make_unique(db.celery_intervalschedule, every=timedelta_seconds(schedule.run_every), period='seconds') return row, 'interval' elif schedule.__class__ == schedules.crontab: row = get_or_make_unique(db.celery_crontabschedule, minute=schedule._orig_minute, hour=schedule._orig_hour, day_of_week=schedule._orig_day_of_week) return row, 'crontab' else: raise ValueError("Can't convert schedule type %r to model" % schedule)
def _do_store(): cf = self._get_column_family() date_done = self.app.now() meta = { "status": status, "date_done": date_done.strftime("%Y-%m-%dT%H:%M:%SZ"), "traceback": self.encode(traceback), "result": self.encode(result), "children": self.encode(self.current_task_children(request)), } if self.detailed_mode: cf.insert(task_id, {date_done: self.encode(meta)}, ttl=self.expires and timedelta_seconds(self.expires)) else: cf.insert(task_id, meta, ttl=self.expires and timedelta_seconds(self.expires))
def test_PeriodicTask_schedule_property(self): p1 = create_model_interval(schedule(timedelta(seconds=10))) s1 = p1.schedule self.assertEqual(timedelta_seconds(s1.run_every), 10) p2 = create_model_crontab(crontab(hour='4, 5', minute='10,20,30', day_of_month='1-7', month_of_year='*/3')) s2 = p2.schedule self.assertSetEqual(s2.hour, set([4, 5])) self.assertSetEqual(s2.minute, set([10, 20, 30])) self.assertSetEqual(s2.day_of_week, set([0, 1, 2, 3, 4, 5, 6])) self.assertSetEqual(s2.day_of_month, set([1, 2, 3, 4, 5, 6, 7])) self.assertSetEqual(s2.month_of_year, set([1, 4, 7, 10]))
def __init__(self, connection=None, exchange=None, exchange_type=None, persistent=None, serializer=None, auto_delete=None, expires=None, **kwargs): self._connection = connection self.exchange = exchange self.exchange_type = exchange_type self.persistent = persistent self.serializer = serializer self.auto_delete = auto_delete self.expires = expires if self.expires is None: self.expires = conf.TASK_RESULT_EXPIRES if isinstance(self.expires, timedelta): self.expires = timeutils.timedelta_seconds(self.expires) if self.expires is not None: self.expires = int(self.expires) super(AMQPBackend, self).__init__(**kwargs)
def __init__(self, expires=conf.TASK_RESULT_EXPIRES, backend=conf.CACHE_BACKEND, options={}, **kwargs): super(CacheBackend, self).__init__(self, **kwargs) if isinstance(expires, timedelta): expires = timeutils.timedelta_seconds(expires) self.expires = int(expires) self.options = dict(conf.CACHE_BACKEND_OPTIONS, **options) self.backend, _, servers = partition(backend, "://") self.servers = servers.split(";") try: self.Client = backends[self.backend] except KeyError: raise ImproperlyConfigured( "Unknown cache backend: %s. Please use one of the " "following backends: %s" % (self.backend, ", ".join(backends.keys())))
def to_model_schedule(schedule): if schedule.__class__ == schedules.schedule: row = get_or_make_unique(db.celery_intervalschedule, every=timedelta_seconds( schedule.run_every), period='seconds') return row, 'interval' elif schedule.__class__ == schedules.crontab: row = get_or_make_unique(db.celery_crontabschedule, minute=schedule._orig_minute, hour=schedule._orig_hour, day_of_week=schedule._orig_day_of_week) return row, 'crontab' else: raise ValueError("Can't convert schedule type %r to model" % schedule)
def __init__(self, redis_host=None, redis_port=None, redis_db=None, redis_timeout=None, redis_password=None, redis_connect_retry=None, redis_connect_timeout=None, expires=None): if redis is None: raise ImproperlyConfigured( "You need to install the redis library in order to use " + "Redis result store backend.") settings = load_settings() self.redis_host = redis_host or \ getattr(settings, "REDIS_HOST", self.redis_host) self.redis_port = redis_port or \ getattr(settings, "REDIS_PORT", self.redis_port) self.redis_db = redis_db or \ getattr(settings, "REDIS_DB", self.redis_db) self.redis_password = redis_password or \ getattr(settings, "REDIS_PASSWORD", self.redis_password) self.expires = expires if self.expires is None: self.expires = conf.TASK_RESULT_EXPIRES if isinstance(self.expires, timedelta): self.expires = timeutils.timedelta_seconds(self.expires) if self.expires is not None: self.expires = int(self.expires) for setting_name in self.deprecated_settings: if getattr(settings, setting_name, None) is not None: warnings.warn( "The setting '%s' is no longer supported by the " "python Redis client!" % setting_name.upper(), DeprecationWarning) if self.redis_port: self.redis_port = int(self.redis_port) if not self.redis_host or not self.redis_port: raise ImproperlyConfigured( "In order to use the Redis result store backend, you have to " "set the REDIS_HOST and REDIS_PORT settings") super(RedisBackend, self).__init__() self._connection = None
def __init__(self, redis_host=None, redis_port=None, redis_db=None, redis_timeout=None, redis_password=None, redis_connect_retry=None, redis_connect_timeout=None, expires=None, **kwargs): super(RedisBackend, self).__init__(**kwargs) if redis is None: raise ImproperlyConfigured( "You need to install the redis library in order to use " + "Redis result store backend.") self.redis_host = (redis_host or self.app.conf.get("REDIS_HOST") or self.redis_host) self.redis_port = (redis_port or self.app.conf.get("REDIS_PORT") or self.redis_port) self.redis_db = (redis_db or self.app.conf.get("REDIS_DB") or self.redis_db) self.redis_password = (redis_password or self.app.conf.get("REDIS_PASSWORD") or self.redis_password) self.expires = expires if self.expires is None: self.expires = self.app.conf.CELERY_TASK_RESULT_EXPIRES if isinstance(self.expires, timedelta): self.expires = timeutils.timedelta_seconds(self.expires) if self.expires is not None: self.expires = int(self.expires) for setting_name in self.deprecated_settings: if self.app.conf.get(setting_name) is not None: warnings.warn( "The setting '%s' is no longer supported by the " "python Redis client!" % setting_name.upper(), DeprecationWarning) if self.redis_port: self.redis_port = int(self.redis_port) if not self.redis_host or not self.redis_port: raise ImproperlyConfigured( "In order to use the Redis result store backend, you have to " "set the REDIS_HOST and REDIS_PORT settings") self._connection = None
def __init__(self, *args, **kwargs): super(CacheBackend, self).__init__(self, *args, **kwargs) expires = kwargs.get("expires", conf.TASK_RESULT_EXPIRES) if isinstance(expires, timedelta): expires = int(timedelta_seconds(expires)) self.expires = expires
def test_expires_is_None(self): b = self.Backend(expires=None) self.assertEqual( b.expires, timedelta_seconds(current_app.conf.CELERY_TASK_RESULT_EXPIRES))
def test_timedelta_seconds_returns_0_on_negative_time(self): delta = timedelta(days=-2) self.assertEqual(timeutils.timedelta_seconds(delta), 0)
def from_schedule(cls, schedule): return cls(every=timedelta_seconds(schedule.run_every), period="seconds")
def test_expires_is_None(self): b = self.Backend(expires=None, app=self.app, new_join=True) self.assertEqual( b.expires, timedelta_seconds(self.app.conf.CELERY_TASK_RESULT_EXPIRES))
def __repr__(self): return "<freq: %s>" % humanize_seconds( timedelta_seconds(self.run_every))
def _calculate_times(self): """ Calculates and returns several time-related values that tend to be needed at the same time. :return: tuple of numbers described below... now_s: current time as seconds since the epoch first_run_s: time of the first run as seconds since the epoch, calculated based on self.first_run since_first_s: how many seconds have elapsed since the first run run_every_s: how many seconds should elapse between runs of this schedule last_scheduled_run_s: the most recent time at which this schedule should have run based on its schedule, as seconds since the epoch expected_runs: number of runs that should have happened based on the first_run time and the interval :rtype: tuple """ now_s = time.time() first_run_dt = dateutils.to_utc_datetime( dateutils.parse_iso8601_datetime(self.first_run)) first_run_s = calendar.timegm(first_run_dt.utctimetuple()) since_first_s = now_s - first_run_s # An interval could be an isodate.Duration or a datetime.timedelta interval = self.as_schedule_entry().schedule.run_every if isinstance(interval, isodate.Duration): # Determine how long (in seconds) to wait between the last run and the next one. This # changes depending on the current time because a duration can be a month or a year. if self.last_run_at is not None: last_run_dt = dateutils.to_utc_datetime( dateutils.parse_iso8601_datetime(str(self.last_run_at))) run_every_s = timedelta_seconds( interval.totimedelta(start=last_run_dt)) else: run_every_s = timedelta_seconds( interval.totimedelta(start=first_run_dt)) # This discovers how many runs should have occurred based on the schedule expected_runs = 0 current_run = first_run_dt last_scheduled_run_s = first_run_s duration = self.as_schedule_entry().schedule.run_every while True: # The interval is determined by the date of the previous run current_interval = duration.totimedelta(start=current_run) current_run += current_interval # If time of this run is less than the current time, keep going current_run_s = calendar.timegm(current_run.utctimetuple()) if current_run_s < now_s: expected_runs += 1 last_scheduled_run_s += timedelta_seconds(current_interval) else: break else: run_every_s = timedelta_seconds(interval) # don't want this to be negative expected_runs = max(int(since_first_s / run_every_s), 0) last_scheduled_run_s = first_run_s + expected_runs * run_every_s return now_s, first_run_s, since_first_s, run_every_s, last_scheduled_run_s, expected_runs
def _calculate_times(self): """ Calculates and returns several time-related values that tend to be needed at the same time. :return: tuple of numbers described below... now_s: current time as seconds since the epoch first_run_s: time of the first run as seconds since the epoch, calculated based on self.first_run since_first_s: how many seconds have elapsed since the first run run_every_s: how many seconds should elapse between runs of this schedule last_scheduled_run_s: the most recent time at which this schedule should have run based on its schedule, as seconds since the epoch expected_runs: number of runs that should have happened based on the first_run time and the interval :rtype: tuple """ now_s = time.time() first_run_dt = dateutils.to_utc_datetime(dateutils.parse_iso8601_datetime(self.first_run)) first_run_s = calendar.timegm(first_run_dt.utctimetuple()) since_first_s = now_s - first_run_s # An interval could be an isodate.Duration or a datetime.timedelta interval = self.as_schedule_entry().schedule.run_every if isinstance(interval, isodate.Duration): # Determine how long (in seconds) to wait between the last run and the next one. This # changes depending on the current time because a duration can be a month or a year. if self.last_run_at is not None: last_run_dt = dateutils.to_utc_datetime( dateutils.parse_iso8601_datetime(str(self.last_run_at))) run_every_s = timedelta_seconds(interval.totimedelta(start=last_run_dt)) else: run_every_s = timedelta_seconds(interval.totimedelta(start=first_run_dt)) # This discovers how many runs should have occurred based on the schedule expected_runs = 0 current_run = first_run_dt last_scheduled_run_s = first_run_s duration = self.as_schedule_entry().schedule.run_every while True: # The interval is determined by the date of the previous run current_interval = duration.totimedelta(start=current_run) current_run += current_interval # If time of this run is less than the current time, keep going current_run_s = calendar.timegm(current_run.utctimetuple()) if current_run_s < now_s: expected_runs += 1 last_scheduled_run_s += timedelta_seconds(current_interval) else: break else: run_every_s = timedelta_seconds(interval) # don't want this to be negative expected_runs = max(int(since_first_s / run_every_s), 0) last_scheduled_run_s = first_run_s + expected_runs * run_every_s return now_s, first_run_s, since_first_s, run_every_s, last_scheduled_run_s, expected_runs
def to_timestamp(d): if isinstance(d, datetime): if d.tzinfo is None: d = d.replace(tzinfo=timezone.utc) return timedelta_seconds(d - EPOCH) return d
def test_expires_is_None(self): b = self.Backend(expires=None, app=self.app) self.assertEqual(b.expires, timedelta_seconds( self.app.conf.CELERY_TASK_RESULT_EXPIRES))
def test_timedelta_seconds_returns_0_on_negative_time(self): delta = timedelta(days=-2) self.assertEqual(timedelta_seconds(delta), 0)
def to_timestamp(d, default_timezone=timezone.utc): if isinstance(d, datetime): if d.tzinfo is None: d = d.replace(tzinfo=default_timezone) return timedelta_seconds(d - EPOCH) return d