def post(self): domain_identifier = self.request.get('domain') domain_key = Domain.key_from_name(domain_identifier) task_identifier = self.request.get('task') def txn(): task = api.get_task(domain_identifier, task_identifier) if not task: logging.error("Task '%s/%s' does not exist", domain_identifier, task_identifier) return None parent_task = api.get_task(domain_identifier, task.parent_task_identifier()) if parent_task: parent_index = TaskIndex.get_by_key_name( parent_task.identifier(), parent=parent_task.key()) if not parent_index: logging.error("Missing index for parent task '%s/%s'", domain_identifier, parent_identifier) self.error(400) # Retry later return None hierarchy = list(parent_index.hierarchy) hierarchy.append(parent_task.identifier()) level = parent_task.derived_level + 1 else: # root task hierarchy = [] level = 0 index = TaskIndex.get_by_key_name(task_identifier, parent=task) if not index: index = TaskIndex(parent=task, key_name=task_identifier) index.hierarchy = hierarchy index.put() task.derived_level = level task.put() return task task = db.run_in_transaction(txn) if not task: return # Spawn new tasks to propagate downwards. This is done outside # the transaction, as only 5 transactional tasks can be # queued. It is not a problem if the tasks will fail after the # transaction, as this task is then retried, so the # propagation will always proceeed. query = Task.all(keys_only=True).\ ancestor(Domain.key_from_name(domain_identifier)).\ filter('parent_task =', task.key()) for subtask_key in query: subtask_identifier = subtask_key.id_or_name() # TODO(tijmen): Batch queue tasks UpdateTaskHierarchy.enqueue(domain_identifier, subtask_identifier)
def txn(): query = user.assigned_tasks.ancestor(Domain.key_from_name(domain)).\ order('completed').\ order('-time') return _group_tasks(query.fetch(50), complete_hierarchy=True, domain=domain)
def rebuild_hierarchy(task): """ Rebuilds all derived properties and hierarchies. This includes the TaskIndexes. This operation will only create tasks, which will do the actual work. """ if task.root(): workers.UpdateTaskHierarchy.enqueue(task.domain_identifier(), task.identifier()) domain_key = Domain.key_from_name(task.domain_identifier()) task_key = task.key() logging.info("Domain_key %s" % domain_key) def txn(): # Actual test in the datastore to see if the task is atomic, # as it is a computed property. query = Task.all().\ ancestor(domain_key).\ filter('parent_task =', task_key) subtask = query.get() if not subtask: # atomic workers.UpdateTaskCompletion.enqueue(task.domain_identifier(), task.identifier()) db.run_in_transaction(txn)
def get_all_direct_subtasks(domain_identifier, root_task=None, limit=100, user_identifier=None): """ Returns all direct subtasks of a |root_task| in the given domain. If no |root_task| is specified, then all root tasks of the domain will be returned. This function returns at most |limit| tasks. Args: domain_identifier: The domain identifier string root_task: An instance of the Task model limit: The maximum number of tasks that will be returned user_identifier: Optional user identifier. If provided, the tasks will be sorted on their active state for that user. Returns: A list of at most |limit| task instances of the domain, who are all direct descendants of |root_task|, or are all root task if no specific |root_task| is specified. The tasks are ordered on completion state, and if a |user_identifier| is provided, also on active state. """ query = Task.all().\ ancestor(Domain.key_from_name(domain_identifier)).\ filter('parent_task = ', root_task) tasks = query.fetch(limit) _sort_tasks(tasks, user_identifier=user_identifier) return tasks
def post(self): domain_identifier = self.request.get('domain') task_identifier = self.request.get('task') force_update = self.request.get('force_update') def txn(): # Returns (task, changed) tuple, where changed is set if # the task index was updated. task = api.get_task(domain_identifier, task_identifier) if not task: logging.error("Task '%s/%s' does not exist", domain_identifier, task_identifier) return None, False index = TaskIndex.get_by_key_name(task_identifier, parent=task) new_index = False if not index: index = TaskIndex(parent=task, key_name=task_identifier, hierarchy=[], level=0) new_index = True parent_identifier = task.parent_task_identifier() parent_hierarchy = [] if parent_identifier: parent_key = task.parent_task_key() parent_index = TaskIndex.get_by_key_name(parent_identifier, parent=parent_key) if not parent_index: logging.error("Missing index for parent task '%s/%s'", domain_identifier, parent_identifier) self.error(500) # Retry return None, False parent_hierarchy = parent_index.hierarchy hierarchy = parent_hierarchy if parent_identifier: hierarchy.append(parent_identifier) if (force_update or new_index or set(index.hierarchy) ^ set(hierarchy)): index.hierarchy = hierarchy index.level = len(hierarchy) index.put() return task, True return task, False task, changed = db.run_in_transaction(txn) if not changed: logging.info("Task '%s/%s' index is unchanged", domain_identifier, task_identifier) return query = Task.all(keys_only=True).\ ancestor(Domain.key_from_name(domain_identifier)).\ filter('parent_task =', task.key()) for subtask_key in query: subtask_identifier = subtask_key.id_or_name() # TODO(tijmen): Batch queue tasks UpdateTaskIndex.queue_task(domain_identifier, subtask_identifier, force_update)
def txn(): query = Task.all().ancestor(Domain.key_from_name(domain)).\ filter('number_of_subtasks =', 0).\ filter('completed =', False).\ filter('assignee =', None).\ order('-time') return _group_tasks(query.fetch(50), complete_hierarchy=True, domain=domain)
def txn(): level = root_task.hierarchy_level() + 1 if root_task else 0 query = TaskIndex.all(keys_only=True).\ ancestor(Domain.key_from_name(domain_identifier)).\ filter('assignees =', user.identifier()).\ filter('level =', level) if root_task: query.filter('hierarchy =', root_task.identifier()) fetched = query.fetch(limit) tasks = Task.get([key.parent() for key in fetched]) return tasks
def get_all_subtasks(domain, task, limit=50, depth_limit=None): """ Returns a list of all subtasks of the given task, in the order as a pre-order traversal through the task hierarchy. This function will perform one query for each level of the subtask hierarchy. Args: domain: The domain identifier string. task: An instance of the Task model. limit: The maximum number of subtasks to return. depth_limit: The maximum depth of subtasks in the task hierarchy. Returns: A list with all subtasks of the given task. Raises: ValueError: The depth_limit or limit are not positive integers """ if not depth_limit: # ListProperties cannot contain more than 5000 elements anyway depth_limit = 5000 if depth_limit < 0 or limit < 0: raise ValueError("Invalid limits") task_level = task.level tasks = [] for depth in range(depth_limit): query = TaskIndex.all(keys_only=True).\ ancestor(Domain.key_from_name(domain)).\ filter('level = ', task_level + depth + 1).\ filter('hierarchy = ', task.identifier()) fetched = query.fetch(limit) tasks.extend(Task.get([key.parent() for key in fetched])) limit = limit - len(fetched) if not fetched or limit < 1: break # stop # Sort the tasks on completion status and then on time, as this is # not possible in the query. def task_cmp(t1, t2): if t1.completed != t2.completed: return cmp(t1.completed, t2.completed) return -cmp(t1.time, t2.time) tasks.sort(cmp=task_cmp) return _group_tasks(tasks)
def get_task(domain, task): """Gets a task in a domain. Args: domain: The domain identifier task: The task key id or name. Can either be an int or a string. Returns: A task instance or None if no task exists. """ domain_key = Domain.key_from_name(domain) try: task_id = int(task) return Task.get_by_id(task_id, parent=domain_key) except ValueError: return Task.get_by_key_name(task, parent=domain_key)
def txn(): task = Task(parent=Domain.key_from_name(domain_identifier), description=description, user=user, context=user.default_context_key()) # TODO(tijmen): This get is redundant, the key can # be derived from the identifier and the domain. parent_task = get_task(domain_identifier, parent_task_identifier) task.parent_task = parent_task task.put() workers.UpdateTaskCompletion.enqueue(domain_identifier, task.identifier(), transactional=True) workers.UpdateTaskHierarchy.enqueue(domain_identifier, task.identifier(), transactional=True) return task
def txn(): task = Task(parent=Domain.key_from_name(domain_identifier), description=description, user=user, context=user.default_context_key()) parent_task = get_task(domain_identifier, parent_task_identifier) if parent_task_identifier and not parent_task: raise ValueError("Parent task '%s' does not exist" % parent_task_identifier) task.parent_task = parent_task task.put() workers.UpdateTaskCompletion.enqueue(domain_identifier, task.identifier(), transactional=True) workers.UpdateTaskHierarchy.enqueue(domain_identifier, task.identifier(), transactional=True) return task
def admin_of_domain(domain_identifier, user): """Returns true iff the user is a member and admin of the domain. Args: domain: The domain identifier user: Instance of the user model Returns: True if the user is a member and an admin of the domain. """ if not member_of_domain(domain_identifier, user): return False query = Domain.all(keys_only=True).\ filter('__key__ =', Domain.key_from_name(domain_identifier)).\ filter('admins =', user.identifier()) if not query.fetch(1): return False return True
def txn(): super_task = None if parent_task: super_task = get_task(domain, parent_task) if not super_task: raise ValueError("Parent task does not exist") task = Task(parent=Domain.key_from_name(domain), description=description, user=user, context=user.default_context_key(), parent_task=super_task, level=super_task.level + 1 if super_task else 0) if super_task: super_task.number_of_subtasks = super_task.number_of_subtasks + 1 super_task.increment_incomplete_subtasks() super_task.put() if assignee: task.baked_assignee_description = assignee.name task.put() return task
def get_task(domain_identifier, task_identifier): """Gets a task in a domain. Args: domain: The domain identifier task: The task identifier, as an int or string. This argument can also be None, in which case None will be returned. Returns: A task instance or None if no task exists. If |task_identifier| was set to None, None will always be returned. """ if not task_identifier: return None domain_key = Domain.key_from_name(domain_identifier) try: task_id = int(task_identifier) return Task.get_by_id(task_id, parent=domain_key) except ValueError: return Task.get_by_key_name(task_identifier, parent=domain_key)
def txn(): query = Task.all().ancestor(Domain.key_from_name(domain)).\ order('-time') return _group_tasks(query.fetch(50), complete_hierarchy=True, domain=domain)
def post(self): domain_identifier = self.request.get('domain') domain_key = Domain.key_from_name(domain_identifier) task_identifier = self.request.get('task') def txn(): task = api.get_task(domain_identifier, task_identifier) if not task: logging.error("Task '%s/%s' does not exist", domain_identifier, task_identifier) return index = TaskIndex.get_by_key_name(task_identifier, parent=task) if not index: index = TaskIndex(parent=task, key_name=task_identifier) # Get all subtasks. The ancestor queries are strongly # consistent, so when propagating upwards through the # hierarchy the changes are reflected. subtasks = list(Task.all().ancestor(domain_key).filter( 'parent_task =', task.key())) if not subtasks: # atomic task task.derived_completed = task.completed task.derived_size = 1 task.derived_atomic_task_count = 1 task.derived_has_open_tasks = task.open() assignee_identifier = task.assignee_identifier() if assignee_identifier: index.assignees = [assignee_identifier] if not DEV_SERVER: # Uses a multi entity group transaction to get the name # of the assignee. This is cached in the record for # quick descriptions. assignee = api.get_user(assignee_identifier) name = assignee.name if assignee else '<Missing>' else: name = 'temp' task.derived_assignees[task.assignee_identifier()] = { 'id': task.assignee_identifier(), 'name': name, 'completed': int(task.is_completed()), 'all': 1 } else: # composite task task.derived_completed = all(t.is_completed() for t in subtasks) task.derived_size = 1 + sum(t.derived_size for t in subtasks) task.derived_atomic_task_count = sum(t.atomic_task_count() for t in subtasks) task.derived_has_open_tasks = any(t.has_open_tasks() for t in subtasks) # Compute derived assignees, and sum the total of all # their assigned and completed subtasks. assignees = {} for subtask in subtasks: subtask_assignees = subtask.derived_assignees for id, record in subtask_assignees.iteritems(): if not id in assignees: assignees[id] = { 'id': id, 'name': record['name'], 'completed': 0, 'all': 0 } assignees[id]['completed'] += record['completed'] assignees[id]['all'] += record['all'] task.derived_assignees = assignees index.assignees = list(assignees.iterkeys()) task.put() index.completed = task.is_completed() index.has_open_tasks = task.has_open_tasks() index.atomic = task.atomic() index.put() # Propagate further upwards if task.parent_task_identifier(): UpdateTaskCompletion.enqueue(domain_identifier, task.parent_task_identifier(), transactional=True) # Use a multi entity group transaction, if available if DEV_SERVER: db.run_in_transaction(txn) else: options = datastore_rpc.TransactionOptions( allow_multiple_entity_groups=True) datastore.RunInTransactionOptions(options, txn)
def post(self): domain_identifier = self.request.get("domain") domain_key = Domain.key_from_name(domain_identifier) task_identifier = self.request.get("task") def txn(): task = api.get_task(domain_identifier, task_identifier) if not task: logging.error("Task '%s/%s' does not exist", domain_identifier, task_identifier) return index = TaskIndex.get_by_key_name(task_identifier, parent=task) if not index: index = TaskIndex(parent=task, key_name=task_identifier) # Get all subtasks. The ancestor queries are strongly # consistent, so when propagating upwards through the # hierarchy the changes are reflected. subtasks = list(Task.all().ancestor(domain_key).filter("parent_task =", task.key())) if not subtasks: # atomic task task.derived_completed = task.completed task.derived_size = 1 task.derived_atomic_task_count = 1 task.derived_has_open_tasks = task.open() assignee_identifier = task.assignee_identifier() if assignee_identifier: index.assignees = [assignee_identifier] if not DEV_SERVER: # Uses a multi entity group transaction to get the name # of the assignee. This is cached in the record for # quick descriptions. assignee = api.get_user(assignee_identifier) name = assignee.name if assignee else "<Missing>" else: name = "temp" task.derived_assignees[task.assignee_identifier()] = { "id": task.assignee_identifier(), "name": name, "completed": int(task.is_completed()), "all": 1, } else: # composite task task.derived_completed = all(t.is_completed() for t in subtasks) task.derived_size = 1 + sum(t.derived_size for t in subtasks) task.derived_atomic_task_count = sum(t.atomic_task_count() for t in subtasks) task.derived_has_open_tasks = any(t.has_open_tasks() for t in subtasks) # Compute derived assignees, and sum the total of all # their assigned and completed subtasks. assignees = {} for subtask in subtasks: subtask_assignees = subtask.derived_assignees for id, record in subtask_assignees.iteritems(): if not id in assignees: assignees[id] = {"id": id, "name": record["name"], "completed": 0, "all": 0} assignees[id]["completed"] += record["completed"] assignees[id]["all"] += record["all"] task.derived_assignees = assignees index.assignees = list(assignees.iterkeys()) task.put() index.completed = task.is_completed() index.has_open_tasks = task.has_open_tasks() index.atomic = task.atomic() index.put() # Propagate further upwards if task.parent_task_identifier(): UpdateTaskCompletion.enqueue(domain_identifier, task.parent_task_identifier(), transactional=True) # Use a multi entity group transaction, if available if DEV_SERVER: db.run_in_transaction(txn) else: xg_on = db.create_transaction_options(xg=True) db.run_in_transaction_options(xg_on, txn)
def post(self): domain_identifier = self.request.get('domain') domain_key = Domain.key_from_name(domain_identifier) task_identifier = self.request.get('task') def txn(): task = api.get_task(domain_identifier, task_identifier) if not task: logging.error("Task '%s/%s' does not exist", domain_identifier, task_identifier) return index = TaskIndex.get_by_key_name(task_identifier, parent=task) if not index: index = TaskIndex(parent=task, key_name=task_identifier) # Get all subtasks. The ancestor queries are strongly # consistent, so when propagating upwards through the # hierarchy the changes are reflected. subtasks = list(Task.all(). ancestor(domain_key). filter('parent_task =', task.key())) if not subtasks: # atomic task task.derived_completed = task.completed task.derived_size = 1 task.derived_atomic_task_count = 1 task.derived_has_open_tasks = task.open() assignee_identifier = task.assignee_identifier() if assignee_identifier: index.assignees = [assignee_identifier] if not DEV_SERVER: # Uses a multi entity group transaction to get the name # of the assignee. This is cached in the record for # quick descriptions. assignee = api.get_user(assignee_identifier) name = assignee.name if assignee else '<Missing>' else: name = 'temp' task.derived_assignees[task.assignee_identifier()] = { 'id': task.assignee_identifier(), 'name': name, 'completed': int(task.is_completed()), 'all': 1 } else: # composite task task.derived_completed = all(t.is_completed() for t in subtasks) task.derived_size = 1 + sum(t.derived_size for t in subtasks) task.derived_atomic_task_count = sum(t.atomic_task_count() for t in subtasks) task.derived_has_open_tasks = any(t.has_open_tasks() for t in subtasks) # Compute derived assignees, and sum the total of all # their assigned and completed subtasks. assignees = {} for subtask in subtasks: subtask_assignees = subtask.derived_assignees for id, record in subtask_assignees.iteritems(): if not id in assignees: assignees[id] = { 'id': id, 'name': record['name'], 'completed': 0, 'all': 0 } assignees[id]['completed'] += record['completed'] assignees[id]['all'] += record['all'] task.derived_assignees = assignees index.assignees = list(assignees.iterkeys()) task.put() index.completed = task.is_completed() index.has_open_tasks = task.has_open_tasks() index.atomic = task.atomic() index.put() # Propagate further upwards if task.parent_task_identifier(): UpdateTaskCompletion.enqueue(domain_identifier, task.parent_task_identifier(), transactional=True) # Use a multi entity group transaction, if available if DEV_SERVER: db.run_in_transaction(txn) else: options = datastore_rpc.TransactionOptions( allow_multiple_entity_groups=True) datastore.RunInTransactionOptions(options, txn)