def GetTaskList(self, request): limit = 10 if request.limit is not None: limit = request.limit curs = None if request.cursor is not None: try: curs = Cursor(urlsafe=request.cursor) except BadValueError: raise endpoints.BadRequestException('Invalid cursor %s.' % request.cursor) hQuery = None if request.timestamp is not None: hQuery = Task.query(Task.last_updated <= request.timestamp, Task.user == self.GetUserId()) else: hQuery = Task.query(Task.user == self.GetUserId()) if (curs is not None): tasks, next_curs, more = hQuery.fetch_page(limit, start_cursor=curs) else: tasks, next_curs, more = hQuery.fetch_page(limit) items = [entity.ConvertToResponse() for entity in tasks] if more: return TaskListResponse(task_list=items, cursor=next_curs.urlsafe(), has_more=more) else: return TaskListResponse(task_list=items, has_more=more)
def _convert_user_params_to_tasks(user_params, job): """ :param model.UserParameters.UserParameters user_params: UserParameters :param uuid.UUID job: uuid.UUID :return: model.Task.Task[] """ tasks = [] compute_parameters = ComputeParameters(user_params.num_samples, user_params.viscosity, user_params.speed, user_params.time, server_ip()) angles = numpy.arange(user_params.min_angle, user_params.max_angle, user_params.step) for angle in angles: model_parameters = ModelParameters(user_params.naca4, job, angle, user_params.num_nodes, user_params.refinement_level) task = Task(None, None, model_parameters, compute_parameters, None) tasks.append(task) model_parameters = ModelParameters(user_params.naca4, job, user_params.max_angle, user_params.num_nodes, user_params.refinement_level) task = Task(None, None, model_parameters, compute_parameters, None) tasks.append(task) return tasks
def CreateTask(self, request): task = Task(status = request.status, category=ndb.Key('Category', request.category), title = request.title, description = request.description, user = self.GetUserId()) task.put() return task.ConvertToResponse()
def create(self, task_number): self.name = "RW%s" % task_number entry_task = Task(0, [], []) exit_task = Task(task_number - 1, [], []) self.task_list.append(entry_task) surplus_task_num = task_number - 2 last_layer = [entry_task.task_id] max_id = 0 while surplus_task_num != 0: current_layer_task_num = random.randint(constant.MIN_TASK_NUM, constant.MAX_TASK_NUM) # 创建当层任务列表 current_layer_task_id_list = [] if current_layer_task_num > surplus_task_num: current_layer_task_num = surplus_task_num for i in range(0, current_layer_task_num): task = Task(max_id + 1, [], []) self.task_list.append(task) current_layer_task_id_list.append(task.task_id) max_id += 1 # 给上层任务随机添加后继任务 for task_id in last_layer: task = self.get_task_by_id(task_id) random_num = random.randint(0, current_layer_task_num - 1) task_id_temp = current_layer_task_id_list[random_num] task.suc_task_id_list.append(task_id_temp) task_temp = self.get_task_by_id(task_id_temp) task_temp.pre_task_id_list.append(task_id) # 判断当前层的任务是否都有前驱,如果没有,随机添加前驱任务 for task_id in current_layer_task_id_list: task = self.get_task_by_id(task_id) if task.pre_task_id_list is None or len(task.pre_task_id_list) == 0: random_num = random.randint(0, len(last_layer) - 1) task_id_temp = last_layer[random_num] task_temp = self.get_task_by_id(task_id_temp) task_temp.suc_task_id_list.append(task.task_id) task.pre_task_id_list.append(task_id_temp) surplus_task_num -= current_layer_task_num last_layer = current_layer_task_id_list for task_id in last_layer: task = self.get_task_by_id(task_id) task.suc_task_id_list.append(exit_task.task_id) exit_task.pre_task_id_list.append(task_id) self.task_list.append(exit_task)
def CreateTask(self, request): CategoryKey = ndb.Key('Category', request.category) if CategoryKey == None: raise endpoints.NotFoundException('No category entity with the id "%s" exists.' % request.category) task = Task(status = request.status, category= CategoryKey, title = request.title, description = request.description, user = self.GetUserId()) task.put() return task.ConvertToResponse()
def create_classic_wf(self, file_path): task_dict = dict() file = open(file_path, "r") line = file.readline() while line: # 首先去除换行符,真讨厌 line = line.strip('\n') # 以空格分割一行,分割后的每一项都是task_id line_split = line.split(" ") # 如果当前task任务不在task_key_value中,那么new一个task for task_id in line_split: if task_id not in task_dict.keys(): task = Task(task_id, [], []) task_dict[task_id] = task # 记录每一行的第一个task task = task_dict[line_split[0]] # 删除当前的task_id,那么剩余的line_split中的task都上它的后继 line_split.pop(0) for task_id in line_split: task_temp = task_dict[task_id] task_temp.pre_task_id_list.append(task.task_id) task.suc_task_id_list.append(task_id) line = file.readline() task_dict = WorkFlow.sort_dict_by_key(task_dict) # 最后按照int类型的key值进行排序 self.task_list = task_dict.values()
def GetTaskById(self, request): if request.id is None: raise endpoints.BadRequestException('id field is required.') task = Task.get_by_id(request.id) if task is None or task.user != self.GetUserId(): raise endpoints.NotFoundException('No task entity with the id "%s" exists.' % request.id) return task.ConvertToResponse()
def put_task_in_queue(): """ route for new task :return: int id task """ new_idx = next(idx_gen) task = Task(new_idx) r_new_tasks.set(new_idx, pickle.dumps(task)) return str(new_idx)
def UpdateTask(self, request): result = TaskResponse() task = Task.get_by_id(request.id) if task != None and task.user == self.GetUserId(): if task.last_updated <= request.client_copy_timestamp: task.MergeFromMessage(request) task.put() result = task.ConvertToResponse() else: raise endpoints.NotFoundException("The item was updated on the outside") else: raise endpoints.NotFoundException('No task entity with the id "%s" exists.' % request.id) return result
def CreateTask(self, request): if request.category == None and request.status == None: raise endpoints.BadRequestException('The request must contain at least one of category or status!!!') if request.category != None: CategoryKey = ndb.Key('Category', request.category) if CategoryKey == None: raise endpoints.NotFoundException('No category entity with the id "%s" exists.' % request.category) else: CategoryKey = None task = Task(status = request.status, category= CategoryKey, title = request.title, description = request.description, user = self.GetUserId()) if task.isTaskUnique() == False: raise endpoints.BadRequestException("BUSINESS RULE: Duplicate Task already exists") task.put() return task.ConvertToResponse()
def get(self, gname): mod = self.request.get('modstatus') grp, usr = group_verify(self, gname) if usr and grp: if mod != "" and ((grp.members[usr.name]['restore_own_task'] and usr == Calendar().get_person(grp.key, mod)) or grp.members[usr.name]['restore_member_tasks']): Calendar().reactivate(grp.key, mod) self.redirect('/group/'+gname+'/history') return tasks = {} for task in Task.all(grp.key): tasks[task['name']] = task['description'] calendar = Calendar().get(grp.key) self.render('group_history.html', calendar = calendar, user = usr, group = grp, tasks = tasks)
def DeleteUser(self, request): '''API method to delete current user and all his data''' categories = Category.query(Category.user == self.GetUserId()).fetch() for category in categories: category.key.delete() tasks = Task.query(Task.user == self.GetUserId()).fetch() for task in tasks: task.key.delete() user = User.query(User.username == endpoints.get_current_user().email()).get() user.key.delete() return message_types.VoidMessage()
def DeleteCategory(self, request): result = CategoryResponse() category = Category.get_by_id(request.id) if category != None: if Task.query(Task.category == category.key).get() == None: if category.last_updated <= request.client_copy_timestamp and category.user == self.GetUserId(): category.key.delete() result = category.ConvertToResponse() else: raise endpoints.NotFoundException("The item was updated on the outside") else: raise endpoints.NotFoundException("This item has child elements") else: raise endpoints.NotFoundException('No category entity with the id "%s" exists.' % request.id) return result
def get(self, gname): logging.error(gname) mod = self.request.get('modstatus') grp, usr = group_verify(self, gname) if usr and grp: if mod != "" and (((grp.members[usr.name])['finish_own_task'] and usr.name == Calendar().get_person(grp.key, mod)) or (grp.members[usr.name])['finish_member_tasks']): t = Task(grp.key, mod[:-8]) intervall = t.intervall memberlist = [x for x in grp.members] Calendar().make_next(grp.key, mod, memberlist, intervall) calendar = Calendar().get(grp.key,True, status = 'Active') self.redirect('/group/'+gname) return tasks = {} for task in Task.all(grp.key): tasks[task['name']] = task['description'] calendar = Calendar().get(grp.key, status = 'Active') comments = Comment().get(grp.key, order = "-datetime") logging.error(calendar) self.render('group_root.html', calendar = calendar, user = usr, group = grp, tasks = tasks, comments = comments) return self.redirect('/')
def create_task_for_params(connection, name, state_id, user_id): state = connection.query(State).get(state_id) user = connection.query(User).get(user_id) task = Task(task_name=name, active_state=state, user=user) connection.add(task) connection.commit()
def get(self, gname): grp, usr, taskid = task_verify(self, gname) if grp and (usr.name in grp.members): tasks = Task.all(grp.key) self.render('task.html', tasks = tasks, user = usr, group = grp )
# If no options specified, print usage and exit. if len(sys.argv) == 1: print_usage(True) exit(0) # If help flag specified, print help and exit. if is_arg_passed('h'): print_help() exit(0) # Trusted mode or not? trusted = is_arg_passed('t') # Load task from file. task = Task.load(sys.argv[-1]) # For each file the task specifies. for file in task.files: print('Now working on file:', file) # For each policy the task specifies. for policy in task.policies: print('Reselecting for policy:', policy) # For each mode the task specifies. for mode in task.modes: print('In mode', mode, f'({mode}) reselecting...') # Run policy filtration/distribution renormalization script. out_path = compute_out_path(task.out, file, policy, mode) retries = 0 success = False while not success and retries <= FILT_RUN_RETRIES: # We might need to retry this several times.