def get(self): from google.appengine.api import taskqueue tasks = Task.all().fetch(1000) for task in tasks: if task.enabled: taskqueue.add(url='/work', method="GET", params={"key": task.key()}) self.response.out.write('Queued all tasks complete.')
def get_all_direct_subtasks(domain_identifier, root_task=None, limit=100, user_identifier=None): """ Returns all direct subtasks of a |root_task| in the given domain. If no |root_task| is specified, then all root tasks of the domain will be returned. This function returns at most |limit| tasks. Args: domain_identifier: The domain identifier string root_task: An instance of the Task model limit: The maximum number of tasks that will be returned user_identifier: Optional user identifier. If provided, the tasks will be sorted on their active state for that user. Returns: A list of at most |limit| task instances of the domain, who are all direct descendants of |root_task|, or are all root task if no specific |root_task| is specified. The tasks are ordered on completion state, and if a |user_identifier| is provided, also on active state. """ query = Task.all().\ ancestor(Domain.key_from_name(domain_identifier)).\ filter('parent_task = ', root_task) tasks = query.fetch(limit) _sort_tasks(tasks, user_identifier=user_identifier) return tasks
def get_all_direct_subtasks(domain_identifier, root_task=None, limit=100, user_identifier=None): """ Returns all direct subtasks of a |root_task| in the given domain. If no |root_task| is specified, then all root tasks of the domain will be returned. This function returns at most |limit| tasks. Args: domain_identifier: The domain identifier string root_task: An instance of the Task model limit: The maximum number of tasks that will be returned user_identifier: Optional user identifier. If provided, the tasks will be sorted on their active state for that user. Returns: A list of at most |limit| task instances of the domain, who are all direct descendants of |root_task|, or are all root task if no specific |root_task| is specified. The tasks are ordered on completion state, and if a |user_identifier| is provided, also on active state. """ query = Task.all().\ ancestor(Domain.key_from_name(domain_identifier)).\ filter('parent_task = ', root_task) tasks = query.fetch(limit) _sort_tasks(tasks, user_identifier=user_identifier) return tasks
def txn(): task = api.get_task(domain_identifier, task_identifier) if not task: logging.error("Task '%s/%s' does not exist", domain_identifier, task_identifier) return index = TaskIndex.get_by_key_name(task_identifier, parent=task) if not index: index = TaskIndex(parent=task, key_name=task_identifier) # Get all subtasks. The ancestor queries are strongly # consistent, so when propagating upwards through the # hierarchy the changes are reflected. subtasks = list(Task.all().ancestor(domain_key).filter("parent_task =", task.key())) if not subtasks: # atomic task task.derived_completed = task.completed task.derived_size = 1 task.derived_atomic_task_count = 1 task.derived_has_open_tasks = task.open() assignee_identifier = task.assignee_identifier() if assignee_identifier: index.assignees = [assignee_identifier] if not DEV_SERVER: # Uses a multi entity group transaction to get the name # of the assignee. This is cached in the record for # quick descriptions. assignee = api.get_user(assignee_identifier) name = assignee.name if assignee else "<Missing>" else: name = "temp" task.derived_assignees[task.assignee_identifier()] = { "id": task.assignee_identifier(), "name": name, "completed": int(task.is_completed()), "all": 1, } else: # composite task task.derived_completed = all(t.is_completed() for t in subtasks) task.derived_size = 1 + sum(t.derived_size for t in subtasks) task.derived_atomic_task_count = sum(t.atomic_task_count() for t in subtasks) task.derived_has_open_tasks = any(t.has_open_tasks() for t in subtasks) # Compute derived assignees, and sum the total of all # their assigned and completed subtasks. assignees = {} for subtask in subtasks: subtask_assignees = subtask.derived_assignees for id, record in subtask_assignees.iteritems(): if not id in assignees: assignees[id] = {"id": id, "name": record["name"], "completed": 0, "all": 0} assignees[id]["completed"] += record["completed"] assignees[id]["all"] += record["all"] task.derived_assignees = assignees index.assignees = list(assignees.iterkeys()) task.put() index.completed = task.is_completed() index.has_open_tasks = task.has_open_tasks() index.atomic = task.atomic() index.put() # Propagate further upwards if task.parent_task_identifier(): UpdateTaskCompletion.enqueue(domain_identifier, task.parent_task_identifier(), transactional=True)
def post(self): domain_identifier = self.request.get('domain') task_identifier = self.request.get('task') force_update = self.request.get('force_update') def txn(): # Returns (task, changed) tuple, where changed is set if # the task index was updated. task = api.get_task(domain_identifier, task_identifier) if not task: logging.error("Task '%s/%s' does not exist", domain_identifier, task_identifier) return None, False index = TaskIndex.get_by_key_name(task_identifier, parent=task) new_index = False if not index: index = TaskIndex(parent=task, key_name=task_identifier, hierarchy=[], level=0) new_index = True parent_identifier = task.parent_task_identifier() parent_hierarchy = [] if parent_identifier: parent_key = task.parent_task_key() parent_index = TaskIndex.get_by_key_name(parent_identifier, parent=parent_key) if not parent_index: logging.error("Missing index for parent task '%s/%s'", domain_identifier, parent_identifier) self.error(500) # Retry return None, False parent_hierarchy = parent_index.hierarchy hierarchy = parent_hierarchy if parent_identifier: hierarchy.append(parent_identifier) if (force_update or new_index or set(index.hierarchy) ^ set(hierarchy)): index.hierarchy = hierarchy index.level = len(hierarchy) index.put() return task, True return task, False task, changed = db.run_in_transaction(txn) if not changed: logging.info("Task '%s/%s' index is unchanged", domain_identifier, task_identifier) return query = Task.all(keys_only=True).\ ancestor(Domain.key_from_name(domain_identifier)).\ filter('parent_task =', task.key()) for subtask_key in query: subtask_identifier = subtask_key.id_or_name() # TODO(tijmen): Batch queue tasks UpdateTaskIndex.queue_task(domain_identifier, subtask_identifier, force_update)
def get(self): template_values = { 'tasks': Task.all(), 'urls': URL.all(), 'settings': settings, 'datetime_now': datetime.now(pytz.timezone(settings.TIMEZONE)).strftime(settings.TIMEFORMAT) } template = JINJA_ENVIRONMENT.get_template('templates/index.html') self.response.write(template.render(template_values))
def txn(): query = Task.all().ancestor(Domain.key_from_name(domain)).\ filter('number_of_subtasks =', 0).\ filter('completed =', False).\ filter('assignee =', None).\ order('-time') return _group_tasks(query.fetch(50), complete_hierarchy=True, domain=domain)
def get(self): tasks = Task.all().fetch(1000) timedelta = datetime.timedelta(hours = 8) for task in tasks: task = mod_task(task) template_values = { "tasks": tasks, } self.response.out.write(template.render("template/list.tpl.html", template_values))
def txn(): query = Task.all().ancestor(Domain.key_from_name(domain)).\ filter('number_of_subtasks =', 0).\ filter('completed =', False).\ filter('assignee =', None).\ order('-time') return _group_tasks(query.fetch(50), complete_hierarchy=True, domain=domain)
def txn(): # Actual test in the datastore to see if the task is atomic, # as it is a computed property. query = Task.all().\ ancestor(domain_key).\ filter('parent_task =', task_key) subtask = query.get() if not subtask: # atomic workers.UpdateTaskCompletion.enqueue(task.domain_identifier(), task.identifier())
def txn(): # Actual test in the datastore to see if the task is atomic, # as it is a computed property. query = Task.all().\ ancestor(domain_key).\ filter('parent_task =', task_key) subtask = query.get() if not subtask: # atomic workers.UpdateTaskCompletion.enqueue(task.domain_identifier(), task.identifier())
def get(self): for task in Task.all(): for sendDateTime in list(task.sendDateTimeList): try: if self.tz.localize(sendDateTime) < datetime.now(self.tz) and self._sendSMS(task): task.sendDateTimeList.remove(sendDateTime) except Exception, e: logging.error(e) if task.sendDateTimeList: task.put() else: task.delete()
def post(self): domain_identifier = self.request.get('domain') domain_key = Domain.key_from_name(domain_identifier) task_identifier = self.request.get('task') def txn(): task = api.get_task(domain_identifier, task_identifier) if not task: logging.error("Task '%s/%s' does not exist", domain_identifier, task_identifier) return None parent_task = api.get_task(domain_identifier, task.parent_task_identifier()) if parent_task: parent_index = TaskIndex.get_by_key_name( parent_task.identifier(), parent=parent_task.key()) if not parent_index: logging.error("Missing index for parent task '%s/%s'", domain_identifier, parent_identifier) self.error(400) # Retry later return None hierarchy = list(parent_index.hierarchy) hierarchy.append(parent_task.identifier()) level = parent_task.derived_level + 1 else: # root task hierarchy = [] level = 0 index = TaskIndex.get_by_key_name(task_identifier, parent=task) if not index: index = TaskIndex(parent=task, key_name=task_identifier) index.hierarchy = hierarchy index.put() task.derived_level = level task.put() return task task = db.run_in_transaction(txn) if not task: return # Spawn new tasks to propagate downwards. This is done outside # the transaction, as only 5 transactional tasks can be # queued. It is not a problem if the tasks will fail after the # transaction, as this task is then retried, so the # propagation will always proceeed. query = Task.all(keys_only=True).\ ancestor(Domain.key_from_name(domain_identifier)).\ filter('parent_task =', task.key()) for subtask_key in query: subtask_identifier = subtask_key.id_or_name() # TODO(tijmen): Batch queue tasks UpdateTaskHierarchy.enqueue(domain_identifier, subtask_identifier)
def post(self): domain_identifier = self.request.get('domain') domain_key = Domain.key_from_name(domain_identifier) task_identifier = self.request.get('task') def txn(): task = api.get_task(domain_identifier, task_identifier) if not task: logging.error("Task '%s/%s' does not exist", domain_identifier, task_identifier) return None parent_task = api.get_task(domain_identifier, task.parent_task_identifier()) if parent_task: parent_index = TaskIndex.get_by_key_name( parent_task.identifier(), parent=parent_task.key()) if not parent_index: logging.error("Missing index for parent task '%s/%s'", domain_identifier, parent_identifier) self.error(400) # Retry later return None hierarchy = list(parent_index.hierarchy) hierarchy.append(parent_task.identifier()) level = parent_task.derived_level + 1 else: # root task hierarchy = [] level = 0 index = TaskIndex.get_by_key_name(task_identifier, parent=task) if not index: index = TaskIndex(parent=task, key_name=task_identifier) index.hierarchy = hierarchy index.put() task.derived_level = level task.put() return task task = db.run_in_transaction(txn) if not task: return # Spawn new tasks to propagate downwards. This is done outside # the transaction, as only 5 transactional tasks can be # queued. It is not a problem if the tasks will fail after the # transaction, as this task is then retried, so the # propagation will always proceeed. query = Task.all(keys_only=True).\ ancestor(Domain.key_from_name(domain_identifier)).\ filter('parent_task =', task.key()) for subtask_key in query: subtask_identifier = subtask_key.id_or_name() # TODO(tijmen): Batch queue tasks UpdateTaskHierarchy.enqueue(domain_identifier, subtask_identifier)
def post(self): domain_identifier = self.request.get('domain') task_identifier = self.request.get('task') force_update = self.request.get('force_update') def txn(): # Returns (task, changed) tuple, where changed is set if # the task index was updated. task = api.get_task(domain_identifier, task_identifier) if not task: logging.error("Task '%s/%s' does not exist", domain_identifier, task_identifier) return None, False index = TaskIndex.get_by_key_name(task_identifier, parent=task) new_index = False if not index: index = TaskIndex(parent=task, key_name=task_identifier, hierarchy=[], level=0) new_index = True parent_identifier = task.parent_task_identifier() parent_hierarchy = [] if parent_identifier: parent_key = task.parent_task_key() parent_index = TaskIndex.get_by_key_name(parent_identifier, parent=parent_key) if not parent_index: logging.error("Missing index for parent task '%s/%s'", domain_identifier, parent_identifier) self.error(500) # Retry return None, False parent_hierarchy = parent_index.hierarchy hierarchy = parent_hierarchy if parent_identifier: hierarchy.append(parent_identifier) if (force_update or new_index or set(index.hierarchy) ^ set(hierarchy)): index.hierarchy = hierarchy index.level = len(hierarchy) index.put() return task, True return task, False task, changed = db.run_in_transaction(txn) if not changed: logging.info("Task '%s/%s' index is unchanged", domain_identifier, task_identifier) return query = Task.all(keys_only=True).\ ancestor(Domain.key_from_name(domain_identifier)).\ filter('parent_task =', task.key()) for subtask_key in query: subtask_identifier = subtask_key.id_or_name() # TODO(tijmen): Batch queue tasks UpdateTaskIndex.queue_task(domain_identifier, subtask_identifier, force_update)
def txn(): query = Task.all().ancestor(Domain.key_from_name(domain)).\ order('-time') return _group_tasks(query.fetch(50), complete_hierarchy=True, domain=domain)
def txn(): query = Task.all().ancestor(Domain.key_from_name(domain)).\ order('-time') return _group_tasks(query.fetch(50), complete_hierarchy=True, domain=domain)
def txn(): task = api.get_task(domain_identifier, task_identifier) if not task: logging.error("Task '%s/%s' does not exist", domain_identifier, task_identifier) return index = TaskIndex.get_by_key_name(task_identifier, parent=task) if not index: index = TaskIndex(parent=task, key_name=task_identifier) # Get all subtasks. The ancestor queries are strongly # consistent, so when propagating upwards through the # hierarchy the changes are reflected. subtasks = list(Task.all().ancestor(domain_key).filter( 'parent_task =', task.key())) if not subtasks: # atomic task task.derived_completed = task.completed task.derived_size = 1 task.derived_atomic_task_count = 1 task.derived_has_open_tasks = task.open() assignee_identifier = task.assignee_identifier() if assignee_identifier: index.assignees = [assignee_identifier] if not DEV_SERVER: # Uses a multi entity group transaction to get the name # of the assignee. This is cached in the record for # quick descriptions. assignee = api.get_user(assignee_identifier) name = assignee.name if assignee else '<Missing>' else: name = 'temp' task.derived_assignees[task.assignee_identifier()] = { 'id': task.assignee_identifier(), 'name': name, 'completed': int(task.is_completed()), 'all': 1 } else: # composite task task.derived_completed = all(t.is_completed() for t in subtasks) task.derived_size = 1 + sum(t.derived_size for t in subtasks) task.derived_atomic_task_count = sum(t.atomic_task_count() for t in subtasks) task.derived_has_open_tasks = any(t.has_open_tasks() for t in subtasks) # Compute derived assignees, and sum the total of all # their assigned and completed subtasks. assignees = {} for subtask in subtasks: subtask_assignees = subtask.derived_assignees for id, record in subtask_assignees.iteritems(): if not id in assignees: assignees[id] = { 'id': id, 'name': record['name'], 'completed': 0, 'all': 0 } assignees[id]['completed'] += record['completed'] assignees[id]['all'] += record['all'] task.derived_assignees = assignees index.assignees = list(assignees.iterkeys()) task.put() index.completed = task.is_completed() index.has_open_tasks = task.has_open_tasks() index.atomic = task.atomic() index.put() # Propagate further upwards if task.parent_task_identifier(): UpdateTaskCompletion.enqueue(domain_identifier, task.parent_task_identifier(), transactional=True)