def schedule_update(schedule_date=None, schedule_time=None, force=False): # Internal access for adding to the task queue if not schedule_date: schedule_date = date.today() + timedelta(days=1) if not schedule_time: schedule_time = '4:30' hour, minute = map(int, schedule_time.split(':')) # Converts from EST to UTC by default. hour += 5 # Account for drifting into the next UTC day if hour > 24: hour -= 24 schedule_date += timedelta(days=1) schedule_time = time(hour, minute) # This leaves plenty of error room for not screwing up dates. schedule_datetime = datetime.combine(schedule_date, schedule_time) args = {'url': '/update', 'eta': schedule_datetime, 'method': 'GET'} if force: # In default queue with no name, to force a new update to run at the given time args['retry_options'] = TaskRetryOptions(task_retry_limit=0) else: # In restricts updatequeue to enforce one push per day args['name'] = str(schedule_date) args['queue_name'] = 'updatequeue' taskqueue.add(**args)
def get(self): taskqueue.add( url='/fail', method='POST', retry_options=TaskRetryOptions(task_retry_limit=0), ) self.response.headers['Content-Type'] = 'text/plain' self.response.write('enqueued task')
def add_notification_to_task_queue(notification): if notification.is_in_task_queue: return notification.is_in_task_queue = True notification_key = notification.put( ) # Also done to get the key to use as the name of the task. # https://cloud.google.com/appengine/docs/python/refdocs/google.appengine.api.taskqueue#google.appengine.api.taskqueue.add payload = {"urlsafe_entity_key": notification_key.urlsafe()} eta = datetime.now(timezone('US/Eastern')) if eta.hour < notification.time.hour: etaDay = eta.day etaMonth = eta.month etaYear = eta.year elif eta.hour == notification.time.hour: if eta.minute < notification.time.minute: etaDay = eta.day etaMonth = eta.month etaYear = eta.year else: tomorrow = eta + timedelta(days=1) etaDay = tomorrow.day etaMonth = tomorrow.month etaYear = tomorrow.year else: tomorrow = eta + timedelta(days=1) etaDay = tomorrow.day etaMonth = tomorrow.month etaYear = tomorrow.year eta = eta.replace(second=0, hour=notification.time.hour, minute=notification.time.minute, day=etaDay, month=etaMonth, year=etaYear) notification.time = notification.time.replace(day=eta.day, month=eta.month, year=eta.year) notification.put() taskqueue.add(url='/queue/send-notification', name=notification.get_task_name() + eta.strftime("%m%d%Y%I%M"), payload=json.dumps(payload), eta=eta, retry_options=TaskRetryOptions(task_retry_limit=1))
def to_task(self): """Return a task object representing this async job.""" from google.appengine.api.taskqueue import Task from google.appengine.api.taskqueue import TaskRetryOptions self._increment_recursion_level() self.check_recursion_depth() url = "%s/%s" % (ASYNC_ENDPOINT, self._function_path) kwargs = { 'url': url, 'headers': self.get_headers().copy(), 'payload': json.dumps(self.to_dict()) } kwargs.update(copy.deepcopy(self.get_task_args())) # Set task_retry_limit retry_options = copy.deepcopy(DEFAULT_RETRY_OPTIONS) retry_options.update(kwargs.pop('retry_options', {})) kwargs['retry_options'] = TaskRetryOptions(**retry_options) return Task(**kwargs)
def _FindAndRestartJobs(): jobs = _FindFrozenJobs() opts = TaskRetryOptions(task_retry_limit=1) for j in jobs: deferred.defer(_ProcessFrozenJob, j.job_id, _retry_options=opts)
from google.appengine.api.taskqueue import TaskRetryOptions from . import app from .deltabot import config from .deltabot.bot import CommentsConsumer, MessagesConsumer from .deltabot.utils import defer_reddit cron_retry_options = TaskRetryOptions(task_retry_limit=0) @app.route('/') def index(): return 'Hello, World!' @app.route('/crons/consumecomments') def consume_comments(): comments_consumer = CommentsConsumer() defer_reddit(comments_consumer.run, _retry_options=cron_retry_options) return 'Task enqueued' @app.route('/crons/consumemessages') def consume_messages(): messages_consumer = MessagesConsumer() countdown = 0 if config.IS_DEV else 600 defer_reddit(messages_consumer.run, _countdown=countdown, _retry_options=cron_retry_options) return 'Task enqueued'
logger.info('Trying to post to an inactive channel: %s shutting this channel down for this feed: %s', feed.channel_id, feed.key.urlsafe()) if not feed.publish_to_stream: logger.info('Feed wasnt set to publish publicly deleting channel all together %s %s %s', feed.channel_id, feed.key.urlsafe(), feed.feed_url) yield feed.key.delete_async() else: feed.channel_id = None yield feed.put_async() @ndb.synctasklet def publish_to_api(entry_key, feed_key, path, post, access_token): api_publisher = ApiPublisher(entry_key, feed_key) yield api_publisher.send_to_api(path, post, access_token) logger.info('publishing to the api') api_publish_opts = TaskRetryOptions(task_retry_limit=3) class EntryPublisher(object): def __init__(self, entry, feed, user, ignore_publish_state=False): self.entry = entry self.feed = feed self.user = user self.ignore_publish_state = ignore_publish_state @classmethod @ndb.tasklet def from_data(cls, entry, feed, ignore_publish_state=False): user = yield feed.key.parent().get_async() raise ndb.Return(cls(entry, feed, user, ignore_publish_state))