def post(self): domain_identifier = self.request.get('domain') domain_key = Domain.key_from_name(domain_identifier) task_identifier = self.request.get('task') def txn(): task = api.get_task(domain_identifier, task_identifier) if not task: logging.error("Task '%s/%s' does not exist", domain_identifier, task_identifier) return None parent_task = api.get_task(domain_identifier, task.parent_task_identifier()) if parent_task: parent_index = TaskIndex.get_by_key_name( parent_task.identifier(), parent=parent_task.key()) if not parent_index: logging.error("Missing index for parent task '%s/%s'", domain_identifier, parent_identifier) self.error(400) # Retry later return None hierarchy = list(parent_index.hierarchy) hierarchy.append(parent_task.identifier()) level = parent_task.derived_level + 1 else: # root task hierarchy = [] level = 0 index = TaskIndex.get_by_key_name(task_identifier, parent=task) if not index: index = TaskIndex(parent=task, key_name=task_identifier) index.hierarchy = hierarchy index.put() task.derived_level = level task.put() return task task = db.run_in_transaction(txn) if not task: return # Spawn new tasks to propagate downwards. This is done outside # the transaction, as only 5 transactional tasks can be # queued. It is not a problem if the tasks will fail after the # transaction, as this task is then retried, so the # propagation will always proceeed. query = Task.all(keys_only=True).\ ancestor(Domain.key_from_name(domain_identifier)).\ filter('parent_task =', task.key()) for subtask_key in query: subtask_identifier = subtask_key.id_or_name() # TODO(tijmen): Batch queue tasks UpdateTaskHierarchy.enqueue(domain_identifier, subtask_identifier)
def createDomain(self, coinbase, resigning): used = not resigning b = False try: try: db_relayer = Relayer.select().where( Relayer.coinbase == coinbase).get() except Relayer.DoesNotExist: b = True db_relayer = model_to_dict(db_relayer) Domain.update(used=used, coinbase=coinbase).where( Domain.domain == db_relayer['domain']).execute() except: pass try: db_domain = Domain.select().where(Domain.used == False).order_by( Domain.id.asc()).get() db_domain = model_to_dict(db_domain) if b == True: Domain.update(used=True, coinbase=coinbase).where( Domain.domain == db_domain['domain']).execute() return db_domain['domain'] except: countDomain = Domain.select().where(Domain.used == True).count() domain = 'https://' + format( countDomain, '03d') + '.' + settings['domain_suffix'] Domain.insert( domain=domain, used=True, coinbase=coinbase).on_conflict_ignore(True).execute() return domain
def get_all_direct_subtasks(domain_identifier, root_task=None, limit=100, user_identifier=None): """ Returns all direct subtasks of a |root_task| in the given domain. If no |root_task| is specified, then all root tasks of the domain will be returned. This function returns at most |limit| tasks. Args: domain_identifier: The domain identifier string root_task: An instance of the Task model limit: The maximum number of tasks that will be returned user_identifier: Optional user identifier. If provided, the tasks will be sorted on their active state for that user. Returns: A list of at most |limit| task instances of the domain, who are all direct descendants of |root_task|, or are all root task if no specific |root_task| is specified. The tasks are ordered on completion state, and if a |user_identifier| is provided, also on active state. """ query = Task.all().\ ancestor(Domain.key_from_name(domain_identifier)).\ filter('parent_task = ', root_task) tasks = query.fetch(limit) _sort_tasks(tasks, user_identifier=user_identifier) return tasks
def txn(): query = user.assigned_tasks.ancestor(Domain.key_from_name(domain)).\ order('completed').\ order('-time') return _group_tasks(query.fetch(50), complete_hierarchy=True, domain=domain)
def rebuild_hierarchy(task): """ Rebuilds all derived properties and hierarchies. This includes the TaskIndexes. This operation will only create tasks, which will do the actual work. """ if task.root(): workers.UpdateTaskHierarchy.enqueue(task.domain_identifier(), task.identifier()) domain_key = Domain.key_from_name(task.domain_identifier()) task_key = task.key() logging.info("Domain_key %s" % domain_key) def txn(): # Actual test in the datastore to see if the task is atomic, # as it is a computed property. query = Task.all().\ ancestor(domain_key).\ filter('parent_task =', task_key) subtask = query.get() if not subtask: # atomic workers.UpdateTaskCompletion.enqueue(task.domain_identifier(), task.identifier()) db.run_in_transaction(txn)
def post(self): domain_identifier = self.request.get('domain') task_identifier = self.request.get('task') force_update = self.request.get('force_update') def txn(): # Returns (task, changed) tuple, where changed is set if # the task index was updated. task = api.get_task(domain_identifier, task_identifier) if not task: logging.error("Task '%s/%s' does not exist", domain_identifier, task_identifier) return None, False index = TaskIndex.get_by_key_name(task_identifier, parent=task) new_index = False if not index: index = TaskIndex(parent=task, key_name=task_identifier, hierarchy=[], level=0) new_index = True parent_identifier = task.parent_task_identifier() parent_hierarchy = [] if parent_identifier: parent_key = task.parent_task_key() parent_index = TaskIndex.get_by_key_name(parent_identifier, parent=parent_key) if not parent_index: logging.error("Missing index for parent task '%s/%s'", domain_identifier, parent_identifier) self.error(500) # Retry return None, False parent_hierarchy = parent_index.hierarchy hierarchy = parent_hierarchy if parent_identifier: hierarchy.append(parent_identifier) if (force_update or new_index or set(index.hierarchy) ^ set(hierarchy)): index.hierarchy = hierarchy index.level = len(hierarchy) index.put() return task, True return task, False task, changed = db.run_in_transaction(txn) if not changed: logging.info("Task '%s/%s' index is unchanged", domain_identifier, task_identifier) return query = Task.all(keys_only=True).\ ancestor(Domain.key_from_name(domain_identifier)).\ filter('parent_task =', task.key()) for subtask_key in query: subtask_identifier = subtask_key.id_or_name() # TODO(tijmen): Batch queue tasks UpdateTaskIndex.queue_task(domain_identifier, subtask_identifier, force_update)
def admin_of_domain(domain_identifier, user): """Returns true iff the user is a member and admin of the domain. Args: domain: The domain identifier user: Instance of the user model Returns: True if the user is a member and an admin of the domain. """ if not member_of_domain(domain_identifier, user): return False query = Domain.all(keys_only=True).\ filter('__key__ =', Domain.key_from_name(domain_identifier)).\ filter('admins =', user.identifier()) if not query.fetch(1): return False return True
def txn(): query = Task.all().ancestor(Domain.key_from_name(domain)).\ filter('number_of_subtasks =', 0).\ filter('completed =', False).\ filter('assignee =', None).\ order('-time') return _group_tasks(query.fetch(50), complete_hierarchy=True, domain=domain)
def txn(): level = root_task.hierarchy_level() + 1 if root_task else 0 query = TaskIndex.all(keys_only=True).\ ancestor(Domain.key_from_name(domain_identifier)).\ filter('assignees =', user.identifier()).\ filter('level =', level) if root_task: query.filter('hierarchy =', root_task.identifier()) fetched = query.fetch(limit) tasks = Task.get([key.parent() for key in fetched]) return tasks
def create_domain(domain, domain_title, user): """Creates a new domain, if none already exists with that identifier. The user will become an admin on the newly created domain, and the domain will be added to the list of domains of the user. The updates will be stored in the datastore. Args: domain: The domain identifier of the new domain. Must be a lowercase alphanumeric string of length less than 100. The identifier must match the VALID_DOMAIN_IDENTIFIER regexp. domain_title: The string title of the new domain. The string must be non-empty. user: Instance of the User model that creates the domain. Returns: The newly created Domain instance. |user| will be set as admin of the new domain. Returns None if a domain already exists with that identifier, the identifier is not valid or the domain_title is empty. """ # TODO(tijmen): Use multiple entity group transaction here domain_title = domain_title.splitlines()[0].strip() if (not re.match(VALID_DOMAIN_IDENTIFIER, domain) or not domain_title): return None existing = Domain.get_by_key_name(domain) if existing: return None new_domain = Domain(key_name=domain, name=domain_title, admins=[user.key().name()]) new_domain.put() def txn(user_key): txn_user = User.get(user_key) if not domain in txn_user.domains: txn_user.domains.append(domain) txn_user.put() db.run_in_transaction(txn, user.key()) return new_domain
async def get(self): relayers = [model_to_dict(relayer or {}) for relayer in Relayer.select()] tokens = [model_to_dict(token or {}) for token in Token.select()] domains = [model_to_dict(domain or {}) for domain in Domain.select()] contracts = Blockchain.contracts self.json_response({ 'Relayers': relayers, 'Contracts': contracts, 'Tokens': tokens, 'Domains': domains })
def get_domain(domain_identifier): """ Returns the Domain model instance corresponding to the identifier. Args: domain_identifier: The domain identifier string Returns: An instance of the Domain model, or None if no domain exist with the given identifier. """ return Domain.get_by_key_name(domain_identifier)
def get_all_domains_for_user(user): """ Returns a list with domain instances of the domains that the given user is a member of. Args: user: An instance of the User model Returns: A list of Domain model instances. """ keys = [db.Key.from_path('Domain', domain) for domain in user.domains] return Domain.get(keys)
def get_all_subtasks(domain, task, limit=50, depth_limit=None): """ Returns a list of all subtasks of the given task, in the order as a pre-order traversal through the task hierarchy. This function will perform one query for each level of the subtask hierarchy. Args: domain: The domain identifier string. task: An instance of the Task model. limit: The maximum number of subtasks to return. depth_limit: The maximum depth of subtasks in the task hierarchy. Returns: A list with all subtasks of the given task. Raises: ValueError: The depth_limit or limit are not positive integers """ if not depth_limit: # ListProperties cannot contain more than 5000 elements anyway depth_limit = 5000 if depth_limit < 0 or limit < 0: raise ValueError("Invalid limits") task_level = task.level tasks = [] for depth in range(depth_limit): query = TaskIndex.all(keys_only=True).\ ancestor(Domain.key_from_name(domain)).\ filter('level = ', task_level + depth + 1).\ filter('hierarchy = ', task.identifier()) fetched = query.fetch(limit) tasks.extend(Task.get([key.parent() for key in fetched])) limit = limit - len(fetched) if not fetched or limit < 1: break # stop # Sort the tasks on completion status and then on time, as this is # not possible in the query. def task_cmp(t1, t2): if t1.completed != t2.completed: return cmp(t1.completed, t2.completed) return -cmp(t1.time, t2.time) tasks.sort(cmp=task_cmp) return _group_tasks(tasks)
def get_task(domain, task): """Gets a task in a domain. Args: domain: The domain identifier task: The task key id or name. Can either be an int or a string. Returns: A task instance or None if no task exists. """ domain_key = Domain.key_from_name(domain) try: task_id = int(task) return Task.get_by_id(task_id, parent=domain_key) except ValueError: return Task.get_by_key_name(task, parent=domain_key)
def txn(): task = Task(parent=Domain.key_from_name(domain_identifier), description=description, user=user, context=user.default_context_key()) # TODO(tijmen): This get is redundant, the key can # be derived from the identifier and the domain. parent_task = get_task(domain_identifier, parent_task_identifier) task.parent_task = parent_task task.put() workers.UpdateTaskCompletion.enqueue(domain_identifier, task.identifier(), transactional=True) workers.UpdateTaskHierarchy.enqueue(domain_identifier, task.identifier(), transactional=True) return task
def txn(): task = Task(parent=Domain.key_from_name(domain_identifier), description=description, user=user, context=user.default_context_key()) parent_task = get_task(domain_identifier, parent_task_identifier) if parent_task_identifier and not parent_task: raise ValueError("Parent task '%s' does not exist" % parent_task_identifier) task.parent_task = parent_task task.put() workers.UpdateTaskCompletion.enqueue(domain_identifier, task.identifier(), transactional=True) workers.UpdateTaskHierarchy.enqueue(domain_identifier, task.identifier(), transactional=True) return task
def txn(): super_task = None if parent_task: super_task = get_task(domain, parent_task) if not super_task: raise ValueError("Parent task does not exist") task = Task(parent=Domain.key_from_name(domain), description=description, user=user, context=user.default_context_key(), parent_task=super_task, level=super_task.level + 1 if super_task else 0) if super_task: super_task.number_of_subtasks = super_task.number_of_subtasks + 1 super_task.increment_incomplete_subtasks() super_task.put() if assignee: task.baked_assignee_description = assignee.name task.put() return task
def push(): dn = Domain() dn.uid = str(uuid.uuid4()) dn.name = request.form['domain'] dn.status = "legitimate" if not re.match(r'^[a-zA-Z0-9-_\\.]+$', dn.name): raise BadRequest('Invalid domain name') while dn.name.startswith('*.'): dn.name = dn.name.split('.', 1)[1] keywords = app.config['SUSPICIOUS_KEYWORDS'] matches = [] for k in keywords: matches.extend([{ "keyword": k, "matched": word, "score": score } for word, score in fuzzy_process.extractBests(k, (dn.name, ), score_cutoff=70)]) if matches or request.form.get('from-ui'): dn.status = "check-queued" try: db.session.add(dn) db.session.commit() except IntegrityError: db.session.rollback() if request.form.get('from-ui'): return redirect('/') return jsonify({"status": "already-exists", "matches": matches}) else: if matches or request.form.get('from-ui'): rs.rpush('screen-jobs', json.dumps({"uid": dn.uid})) if request.form.get('from-ui'): return redirect('/') return jsonify({"status": dn.status, "matches": matches})
def get_task(domain_identifier, task_identifier): """Gets a task in a domain. Args: domain: The domain identifier task: The task identifier, as an int or string. This argument can also be None, in which case None will be returned. Returns: A task instance or None if no task exists. If |task_identifier| was set to None, None will always be returned. """ if not task_identifier: return None domain_key = Domain.key_from_name(domain_identifier) try: task_id = int(task_identifier) return Task.get_by_id(task_id, parent=domain_key) except ValueError: return Task.get_by_key_name(task_identifier, parent=domain_key)
def _get_task(domain_id): try: domain = Domain.get_by_id(domain_id) tasks = Task.select().where(~Task.completed, Task.domain.is_null(), Task.cpu_intensity <= domain.mflops, Task.com_intensity <= domain.mpi_bandwidth, Task.mem_intensity <= domain.memory) if GREEDY_TASKS: tasks = sorted( tasks, key=lambda t: _distance( (t.cpu_intensity, t.com_intensity, t.mem_intensity), (domain.mflops, domain.mpi_bandwidth, domain.memory))) task = tasks[0] else: task = tasks.get() task.domain = domain task.assign_date = datetime.now() task.save() return task except Domain.DoesNotExist: abort(Response('Domain not found', 404)) except (Task.DoesNotExist, IndexError): abort(Response('No tasks available', 503))
def post(self): domain_identifier = self.request.get("domain") domain_key = Domain.key_from_name(domain_identifier) task_identifier = self.request.get("task") def txn(): task = api.get_task(domain_identifier, task_identifier) if not task: logging.error("Task '%s/%s' does not exist", domain_identifier, task_identifier) return index = TaskIndex.get_by_key_name(task_identifier, parent=task) if not index: index = TaskIndex(parent=task, key_name=task_identifier) # Get all subtasks. The ancestor queries are strongly # consistent, so when propagating upwards through the # hierarchy the changes are reflected. subtasks = list(Task.all().ancestor(domain_key).filter("parent_task =", task.key())) if not subtasks: # atomic task task.derived_completed = task.completed task.derived_size = 1 task.derived_atomic_task_count = 1 task.derived_has_open_tasks = task.open() assignee_identifier = task.assignee_identifier() if assignee_identifier: index.assignees = [assignee_identifier] if not DEV_SERVER: # Uses a multi entity group transaction to get the name # of the assignee. This is cached in the record for # quick descriptions. assignee = api.get_user(assignee_identifier) name = assignee.name if assignee else "<Missing>" else: name = "temp" task.derived_assignees[task.assignee_identifier()] = { "id": task.assignee_identifier(), "name": name, "completed": int(task.is_completed()), "all": 1, } else: # composite task task.derived_completed = all(t.is_completed() for t in subtasks) task.derived_size = 1 + sum(t.derived_size for t in subtasks) task.derived_atomic_task_count = sum(t.atomic_task_count() for t in subtasks) task.derived_has_open_tasks = any(t.has_open_tasks() for t in subtasks) # Compute derived assignees, and sum the total of all # their assigned and completed subtasks. assignees = {} for subtask in subtasks: subtask_assignees = subtask.derived_assignees for id, record in subtask_assignees.iteritems(): if not id in assignees: assignees[id] = {"id": id, "name": record["name"], "completed": 0, "all": 0} assignees[id]["completed"] += record["completed"] assignees[id]["all"] += record["all"] task.derived_assignees = assignees index.assignees = list(assignees.iterkeys()) task.put() index.completed = task.is_completed() index.has_open_tasks = task.has_open_tasks() index.atomic = task.atomic() index.put() # Propagate further upwards if task.parent_task_identifier(): UpdateTaskCompletion.enqueue(domain_identifier, task.parent_task_identifier(), transactional=True) # Use a multi entity group transaction, if available if DEV_SERVER: db.run_in_transaction(txn) else: xg_on = db.create_transaction_options(xg=True) db.run_in_transaction_options(xg_on, txn)
def txn(): query = Task.all().ancestor(Domain.key_from_name(domain)).\ order('-time') return _group_tasks(query.fetch(50), complete_hierarchy=True, domain=domain)
def register(domain): ip_addr = request.remote_addr nodes = len(domain) cpus = 0 total_memory = 0 total_bandwidth = 0 total_mflops = 0 for node in domain: # Parse cpuinfo file processors = _parse_file(node['cpuinfo'], CPUinfoParser, CPUinfoLexer, cpuinfo.Evaluator) # Parse meminfo file memory_stats = _parse_file(node['meminfo'], MeminfoParser, MeminfoLexer, meminfo.Evaluator) # Update domain global information cpus += len(processors) total_memory += int(memory_stats['MemTotal'].split()[0]) total_bandwidth += float(node.get('mpi_bandwidth')) total_mflops += float(node.get('mflops')) * cpus try: domain = Domain.get(ip=ip_addr) except Domain.DoesNotExist: domain = Domain(ip=ip_addr) domain.nodes = nodes domain.cpus = cpus domain.mflops = total_mflops domain.mpi_bandwidth = total_bandwidth / nodes domain.memory = total_memory domain.save() return {"id": domain.get_id()}
def post(self): domain_identifier = self.request.get('domain') domain_key = Domain.key_from_name(domain_identifier) task_identifier = self.request.get('task') def txn(): task = api.get_task(domain_identifier, task_identifier) if not task: logging.error("Task '%s/%s' does not exist", domain_identifier, task_identifier) return index = TaskIndex.get_by_key_name(task_identifier, parent=task) if not index: index = TaskIndex(parent=task, key_name=task_identifier) # Get all subtasks. The ancestor queries are strongly # consistent, so when propagating upwards through the # hierarchy the changes are reflected. subtasks = list(Task.all().ancestor(domain_key).filter( 'parent_task =', task.key())) if not subtasks: # atomic task task.derived_completed = task.completed task.derived_size = 1 task.derived_atomic_task_count = 1 task.derived_has_open_tasks = task.open() assignee_identifier = task.assignee_identifier() if assignee_identifier: index.assignees = [assignee_identifier] if not DEV_SERVER: # Uses a multi entity group transaction to get the name # of the assignee. This is cached in the record for # quick descriptions. assignee = api.get_user(assignee_identifier) name = assignee.name if assignee else '<Missing>' else: name = 'temp' task.derived_assignees[task.assignee_identifier()] = { 'id': task.assignee_identifier(), 'name': name, 'completed': int(task.is_completed()), 'all': 1 } else: # composite task task.derived_completed = all(t.is_completed() for t in subtasks) task.derived_size = 1 + sum(t.derived_size for t in subtasks) task.derived_atomic_task_count = sum(t.atomic_task_count() for t in subtasks) task.derived_has_open_tasks = any(t.has_open_tasks() for t in subtasks) # Compute derived assignees, and sum the total of all # their assigned and completed subtasks. assignees = {} for subtask in subtasks: subtask_assignees = subtask.derived_assignees for id, record in subtask_assignees.iteritems(): if not id in assignees: assignees[id] = { 'id': id, 'name': record['name'], 'completed': 0, 'all': 0 } assignees[id]['completed'] += record['completed'] assignees[id]['all'] += record['all'] task.derived_assignees = assignees index.assignees = list(assignees.iterkeys()) task.put() index.completed = task.is_completed() index.has_open_tasks = task.has_open_tasks() index.atomic = task.atomic() index.put() # Propagate further upwards if task.parent_task_identifier(): UpdateTaskCompletion.enqueue(domain_identifier, task.parent_task_identifier(), transactional=True) # Use a multi entity group transaction, if available if DEV_SERVER: db.run_in_transaction(txn) else: options = datastore_rpc.TransactionOptions( allow_multiple_entity_groups=True) datastore.RunInTransactionOptions(options, txn)
def post(self): domain_identifier = self.request.get('domain') domain_key = Domain.key_from_name(domain_identifier) task_identifier = self.request.get('task') def txn(): task = api.get_task(domain_identifier, task_identifier) if not task: logging.error("Task '%s/%s' does not exist", domain_identifier, task_identifier) return index = TaskIndex.get_by_key_name(task_identifier, parent=task) if not index: index = TaskIndex(parent=task, key_name=task_identifier) # Get all subtasks. The ancestor queries are strongly # consistent, so when propagating upwards through the # hierarchy the changes are reflected. subtasks = list(Task.all(). ancestor(domain_key). filter('parent_task =', task.key())) if not subtasks: # atomic task task.derived_completed = task.completed task.derived_size = 1 task.derived_atomic_task_count = 1 task.derived_has_open_tasks = task.open() assignee_identifier = task.assignee_identifier() if assignee_identifier: index.assignees = [assignee_identifier] if not DEV_SERVER: # Uses a multi entity group transaction to get the name # of the assignee. This is cached in the record for # quick descriptions. assignee = api.get_user(assignee_identifier) name = assignee.name if assignee else '<Missing>' else: name = 'temp' task.derived_assignees[task.assignee_identifier()] = { 'id': task.assignee_identifier(), 'name': name, 'completed': int(task.is_completed()), 'all': 1 } else: # composite task task.derived_completed = all(t.is_completed() for t in subtasks) task.derived_size = 1 + sum(t.derived_size for t in subtasks) task.derived_atomic_task_count = sum(t.atomic_task_count() for t in subtasks) task.derived_has_open_tasks = any(t.has_open_tasks() for t in subtasks) # Compute derived assignees, and sum the total of all # their assigned and completed subtasks. assignees = {} for subtask in subtasks: subtask_assignees = subtask.derived_assignees for id, record in subtask_assignees.iteritems(): if not id in assignees: assignees[id] = { 'id': id, 'name': record['name'], 'completed': 0, 'all': 0 } assignees[id]['completed'] += record['completed'] assignees[id]['all'] += record['all'] task.derived_assignees = assignees index.assignees = list(assignees.iterkeys()) task.put() index.completed = task.is_completed() index.has_open_tasks = task.has_open_tasks() index.atomic = task.atomic() index.put() # Propagate further upwards if task.parent_task_identifier(): UpdateTaskCompletion.enqueue(domain_identifier, task.parent_task_identifier(), transactional=True) # Use a multi entity group transaction, if available if DEV_SERVER: db.run_in_transaction(txn) else: options = datastore_rpc.TransactionOptions( allow_multiple_entity_groups=True) datastore.RunInTransactionOptions(options, txn)