def define_task_simulation_type(): from django.db.models import Q from taskengine.models import ProductionTask, StepExecution from deftcore.log import get_exception_string print "*define_task_simulation_type: running" tasks = \ ProductionTask.objects.filter(~Q(project='user'), id__gte=4000000, request__request_type='MC', request__id__gte=800).order_by('id') for task in tasks.iterator(): print "Processing task %d" % int(task.id) try: if task.step.step_template.step.lower() == 'evgen'.lower(): continue if '(Fullsim)' in str(task.step.slice.comment): task.simulation_type = 'full' elif '(Atlfast)' in str(task.step.slice.comment): task.simulation_type = 'fast' else: fast_steps_in_slice = \ StepExecution.objects.filter(slice=task.step.slice, step_template__step__icontains='fast') if len(fast_steps_in_slice) > 0: task.simulation_type = 'fast' else: task.simulation_type = 'full' task.save() except: print "Task ID = %d, exception occurred: %s" % (int(task.id), get_exception_string()) print "*define_task_simulation_type: finished"
def create_task_input(): from django.db.models import Q from taskengine.models import ProductionTask, ProductionTaskInput from deftcore.log import get_exception_string from taskengine.atlas.datamgmt import DDMWrapper task_def = TaskDefinition(debug_mode=True) ddm_wrapper = DDMWrapper() tasks = ProductionTask.objects.filter(~Q(project='user'), id__gte=4000000).order_by('id') for task in tasks.iterator(): try: number_of_events = 0 number_of_files = 0 input_dataset = task.input_dataset events_per_file = task.events_per_file if not events_per_file and input_dataset: events_per_file = task_def.get_events_per_file(input_dataset) if task.total_req_events: number_of_events = task.total_req_events if events_per_file: number_of_files = number_of_events / events_per_file elif task.number_of_events: number_of_events = task.number_of_events if events_per_file: number_of_files = number_of_events / events_per_file elif task.number_of_files: number_of_files = task.number_of_files number_of_events = number_of_files * events_per_file else: if input_dataset: try: number_of_files = ddm_wrapper.ddm_get_number_files(input_dataset) except: pass number_of_events = number_of_files * events_per_file task_input = ProductionTaskInput(task=task, ctag=task.step.step_template.ctag, output_formats=task.step.step_template.output_formats, input_name=task_def.get_step_input_data_name(task.step), events=number_of_events, files=number_of_files, events_per_job=task.events_per_job, events_per_file=events_per_file, input_dataset=input_dataset ) task_input.save() except: print "Task ID = %d, exception occurred: %s" % (int(task.id), get_exception_string())
def search_task_duplicates(): from django.db.models import Q from taskengine.models import ProductionTask from deftcore.log import get_exception_string print "*search_task_duplicates: running" tasks = \ ProductionTask.objects.filter(~Q(project='user'), ~Q(status__in=['failed', 'broken', 'aborted', 'obsolete']), id__gte=4000000, request__id__gte=800).order_by('-id') skip_task_list = list() for task in tasks.iterator(): try: task_id = int(task.id) if task_id in skip_task_list: continue input_data_name = task.inputdataset task_list = ProductionTask.objects.filter(~Q(id=task_id), ~Q(status__in=['failed', 'broken', 'aborted', 'obsolete']), project=task.step.request.project, step__step_template__ctag=task.step.step_template.ctag).filter( Q(inputdataset=input_data_name) | Q(inputdataset__endswith=input_data_name.split(':')[-1]) | Q(step__slice__input_dataset=input_data_name) | Q(step__slice__input_dataset__endswith=input_data_name.split(':')[-1]) | Q(step__slice__input_data=input_data_name) | Q(step__slice__input_data__endswith=input_data_name.split(':')[-1]) ) task_duplicates = list() for prod_task_existing in task_list: requested_output_types = task.step.step_template.output_formats.split('.') previous_output_types = prod_task_existing.step.step_template.output_formats.split('.') processed_output_types = [e for e in requested_output_types if e in previous_output_types] if not processed_output_types: continue skip_task_list.append(int(prod_task_existing.id)) task_duplicates.append(int(prod_task_existing.id)) if len(task_duplicates): with open('duplicates.txt', 'a') as fp: fp.write("%d: %s\n" % (task_id, str(task_duplicates))) except Exception: print "Task ID = %d, exception occurred: %s" % (int(task.id), get_exception_string()) print "*search_task_duplicates: finished"
def add_task_comment(task_id, comment_body): if not task_id: return try: task = ProductionTask.objects.get(id=int(task_id)) except ProductionTask.DoesNotExist: logger.info('The task {0} is not found'.format(int(task_id))) return try: if task.reference: client = JIRAClient() client.authorize() client.add_issue_comment(task.reference, comment_body) except Exception: logger.info('add_task_comment, exception occurred: {0}'.format( get_exception_string()))
def add_task_comment(self, task_id, comment_body, user=None): try: task = ProductionTask.objects.get(id=task_id) except ProductionTask.DoesNotExist: logger.info("The task %d is not found" % task_id) return self._task_action_log(comment_body, user) try: if task.reference: its = ITS() its.authorize() its.add_issue_comment(task.reference, comment_body) except Exception: from deftcore.log import get_exception_string logger.info("Exception occurred: %s" % get_exception_string())
def t_request_proxy_post_init(sender, **kwargs): self = kwargs['instance'] self.is_error = bool(self.exception) self.creation_time = None self.approval_time = None if self.id: result = TRequestStatus.objects.filter(request__id=self.id).order_by('timestamp') if len(result) > 0: self.creation_time = result[0].timestamp result = TRequestStatus.objects.filter(request__id=self.id, status='approved').order_by('-timestamp') if len(result) > 0: self.approval_time = result[0].timestamp try: self.evgen_steps = list() if not self.evgen_steps: input_slices = InputRequestList.objects.filter(request__id=self.id).order_by('slice') for input_slice in input_slices: try: if input_slice.input_data and not input_slice.hided: if '/' not in input_slice.input_data: dsid = int(input_slice.input_data.split('.')[1]) brief = input_slice.input_data.split('.')[2] else: dsid = int(input_slice.input_data.split('/')[0]) brief = input_slice.input_data.split('/')[1].split('.')[1] evgen_steps = StepExecution.objects.filter(request__id=self.id, step_template__step__iexact='evgen', slice__slice=input_slice.slice) if evgen_steps: for evgen_step in evgen_steps: self.evgen_steps.append({'dsid': dsid, 'brief': brief, 'input_events': evgen_step.input_events, 'jo': evgen_step.slice.input_data, 'ctag': evgen_step.step_template.ctag, 'slice': int(evgen_step.slice.slice)}) else: self.evgen_steps.append({'dsid': dsid, 'brief': brief, 'jo': input_slice.input_data, 'slice': int(input_slice.slice)}) except Exception as ex: logger.exception('Exception occurred: {0}'.format(ex)) except: logger.exception('Exception occurred: {0}'.format(get_exception_string())) self.evgen_steps = None
def _process_api_request(self, request): try: handler = TaskActionHandler() if request.action == request.ACTION_TEST: status = {'result': "test"} request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=status) elif request.action == request.ACTION_CLONE_TASK: raise NotImplementedError() elif request.action == request.ACTION_ABORT_TASK: body = json.loads(request.body) task_id = int(body['task_id']) handler_status = handler.abort_task(task_id) try: jedi_info = handler_status['jedi_info'] if jedi_info['status_code'] == 0 and jedi_info['return_code'] == 0: task = ProductionTask.objects.get(id=task_id) task.status = Protocol().TASK_STATUS[TaskStatus.TOABORT] task.save() except: logger.exception("Exception occurred: %s" % get_exception_string()) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) handler.add_task_comment(task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_FINISH_TASK: body = json.loads(request.body) task_id = int(body['task_id']) soft = bool(body.get('soft')) handler_status = handler.finish_task(task_id, soft) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) handler.add_task_comment(task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_OBSOLETE_TASK: body = json.loads(request.body) task_id = int(body['task_id']) task = ProductionTask.objects.get(id=task_id) task.status = Protocol().TASK_STATUS[TaskStatus.OBSOLETE] task.timestamp = timezone.now() task.save() request.set_status(request.STATUS_RESULT_SUCCESS) handler.add_task_comment(task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_REASSIGN_TASK: body = json.loads(request.body) task_id = int(body['task_id']) site = body.get('site', None) cloud = body.get('cloud', None) nucleus = body.get('nucleus', None) mode = body.get('mode', None) handler_status = handler.reassign_task(task_id, site, cloud, nucleus, mode=mode) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) handler.add_task_comment(task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_REASSIGN_JOBS: body = json.loads(request.body) task_id = int(body['task_id']) for_pending = bool(body.get('for_pending', None)) first_submission = bool(body.get('first_submission', None)) handler_status = handler.reassign_jobs(task_id, for_pending, first_submission) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) handler.add_task_comment(task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_CHANGE_TASK_PRIORITY: body = json.loads(request.body) task_id = int(body['task_id']) priority = int(body['priority']) handler_status = handler.change_task_priority(task_id, priority) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) handler.add_task_comment(task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_CHANGE_TASK_RAM_COUNT: body = json.loads(request.body) task_id = int(body['task_id']) ram_count = int(body['ram_count']) handler_status = handler.change_task_ram_count(task_id, ram_count) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) handler.add_task_comment(task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_CHANGE_TASK_WALL_TIME: body = json.loads(request.body) task_id = int(body['task_id']) wall_time = int(body['wall_time']) handler_status = handler.change_task_wall_time(task_id, wall_time) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) handler.add_task_comment(task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_CHANGE_TASK_CPU_TIME: body = json.loads(request.body) task_id = int(body['task_id']) cpu_time = int(body['cpu_time']) handler_status = handler.change_task_cpu_time(task_id, cpu_time) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) handler.add_task_comment(task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_CHANGE_TASK_SPLIT_RULE: body = json.loads(request.body) task_id = int(body['task_id']) rule_name = body['rule_name'] rule_value = body['rule_value'] handler_status = handler.change_task_split_rule(task_id, rule_name, rule_value) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) handler.add_task_comment(task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_CHANGE_TASK_ATTRIBUTE: body = json.loads(request.body) task_id = int(body['task_id']) attr_name = body['attr_name'] attr_value = body['attr_value'] handler_status = handler.change_task_attribute(task_id, attr_name, attr_value) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) handler.add_task_comment(task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_RETRY_TASK: body = json.loads(request.body) task_id = int(body['task_id']) handler_status = handler.retry_task(task_id) try: jedi_info = handler_status['jedi_info'] if jedi_info['status_code'] == 0 and jedi_info['return_code'] == 0: task = ProductionTask.objects.get(id=task_id) task.status = Protocol().TASK_STATUS[TaskStatus.TORETRY] task.save() except: logger.exception("Exception occurred: %s" % get_exception_string()) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) handler.add_task_comment(task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_PAUSE_TASK: body = json.loads(request.body) task_id = int(body['task_id']) handler_status = handler.pause_task(task_id) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) handler.add_task_comment(task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_RESUME_TASK: body = json.loads(request.body) task_id = int(body['task_id']) handler_status = handler.resume_task(task_id) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) handler.add_task_comment(task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_TRIGGER_TASK_BROKERAGE: body = json.loads(request.body) task_id = int(body['task_id']) handler_status = handler.trigger_task_brokerage(task_id) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) handler.add_task_comment(task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_AVALANCHE_TASK: body = json.loads(request.body) task_id = int(body['task_id']) handler_status = handler.avalanche_task(task_id) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) handler.add_task_comment(task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_INCREASE_ATTEMPT_NUMBER: body = json.loads(request.body) task_id = int(body['task_id']) increment = int(body['increment']) handler_status = handler.increase_attempt_number(task_id, increment) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) handler.add_task_comment(task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_ABORT_UNFINISHED_JOBS: body = json.loads(request.body) task_id = int(body['task_id']) code = int(body.get('code', 9)) handler_status = handler.abort_unfinished_jobs(task_id, code) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) handler.add_task_comment(task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_ADD_TASK_COMMENT: body = json.loads(request.body) task_id = int(body['task_id']) comment_body = body['comment_body'] handler_status = handler.add_task_comment(task_id, comment_body) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) elif request.action == request.ACTION_CREATE_SLICE_TIER0: body = json.loads(request.body) slice_dict = body['slice_dict'] steps_list = body['steps_list'] handler_status = handler.create_slice_tier0(slice_dict, steps_list) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) elif request.action == request.ACTION_CLEAN_TASK_CARRIAGES: body = json.loads(request.body) task_id = body['task_id'] output_formats = body['output_formats'] handler_status = handler.clean_task_carriages(task_id, output_formats) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) handler.add_task_comment(task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_KILL_JOB: body = json.loads(request.body) task_id = body['task_id'] job_id = body['job_id'] code = body.get('code', 9) handler_status = handler.kill_job(job_id, code=code) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) status_code = handler_status['jedi_info']['status_code'] body.update({'status_code': status_code}) handler.add_task_comment(task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_SET_JOB_DEBUG_MODE: body = json.loads(request.body) task_id = body['task_id'] job_id = body['job_id'] debug_mode = body['debug_mode'] handler_status = handler.set_job_debug_mode(job_id, debug_mode) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) status_code = handler_status['jedi_info']['status_code'] body.update({'status_code': status_code}) handler.add_task_comment(task_id, request.create_default_task_comment(body)) else: raise Exception("Invalid action: %s" % request.action) except Exception: logger.exception("Exception occurred: %s" % get_exception_string()) if request: request.set_status(request.STATUS_RESULT_EXCEPTION, exception=get_exception_string())
def _process_api_request(self, request): try: from taskengine.models import ProductionTask from taskengine.handlers import TaskActionHandler handler = TaskActionHandler() if request.action == request.ACTION_TEST: status = {'result': "test"} request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=status) elif request.action == request.ACTION_CLONE_TASK: raise NotImplementedError() elif request.action == request.ACTION_ABORT_TASK: body = json.loads(request.body) task_id = int(body['task_id']) handler_status = handler.abort_task(task_id) try: jedi_info = handler_status['jedi_info'] if jedi_info['status_code'] == 0 and jedi_info[ 'return_code'] == 0: task = ProductionTask.objects.get(id=task_id) task.status = Protocol().TASK_STATUS[ TaskStatus.TOABORT] task.save() except Exception: logger.exception("Exception occurred: %s" % get_exception_string()) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) handler.add_task_comment( task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_FINISH_TASK: body = json.loads(request.body) task_id = int(body['task_id']) soft = bool(body.get('soft')) handler_status = handler.finish_task(task_id, soft) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) handler.add_task_comment( task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_OBSOLETE_TASK: body = json.loads(request.body) task_id = int(body['task_id']) task = ProductionTask.objects.get(id=task_id) task.status = Protocol().TASK_STATUS[TaskStatus.OBSOLETE] task.timestamp = timezone.now() task.save() request.set_status(request.STATUS_RESULT_SUCCESS) handler.add_task_comment( task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_OBSOLETE_ENTITY: body = json.loads(request.body) task_id_list = [int(e) for e in str(body['tasks']).split(',')] is_force = bool(body.get('force', None)) tasks = ProductionTask.objects.filter(id__in=task_id_list) is_chain = len(tasks) > 1 for task in tasks: task.status = Protocol().TASK_STATUS[TaskStatus.OBSOLETE] task.timestamp = timezone.now() if is_chain: task.pp_flag = 2 if is_force: task.pp_grace_period = 0 else: task.pp_grace_period = 48 else: if is_force: task.pp_flag = 1 task.pp_grace_period = 0 else: task.pp_flag = 0 task.pp_grace_period = 48 task.save() request.set_status(request.STATUS_RESULT_SUCCESS) handler.add_task_comment( task.id, request.create_default_task_comment(body)) elif request.action == request.ACTION_REASSIGN_TASK: body = json.loads(request.body) task_id = int(body['task_id']) site = body.get('site', None) cloud = body.get('cloud', None) nucleus = body.get('nucleus', None) mode = body.get('mode', None) handler_status = handler.reassign_task(task_id, site, cloud, nucleus, mode=mode) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) handler.add_task_comment( task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_REASSIGN_JOBS: body = json.loads(request.body) task_id = int(body['task_id']) for_pending = bool(body.get('for_pending', None)) first_submission = bool(body.get('first_submission', None)) handler_status = handler.reassign_jobs(task_id, for_pending, first_submission) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) handler.add_task_comment( task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_CHANGE_TASK_PRIORITY: body = json.loads(request.body) task_id = int(body['task_id']) priority = int(body['priority']) handler_status = handler.change_task_priority( task_id, priority) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) handler.add_task_comment( task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_CHANGE_TASK_RAM_COUNT: body = json.loads(request.body) task_id = int(body['task_id']) ram_count = int(body['ram_count']) handler_status = handler.change_task_ram_count( task_id, ram_count) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) handler.add_task_comment( task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_CHANGE_TASK_WALL_TIME: body = json.loads(request.body) task_id = int(body['task_id']) wall_time = int(body['wall_time']) handler_status = handler.change_task_wall_time( task_id, wall_time) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) handler.add_task_comment( task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_CHANGE_TASK_CPU_TIME: body = json.loads(request.body) task_id = int(body['task_id']) cpu_time = int(body['cpu_time']) handler_status = handler.change_task_cpu_time( task_id, cpu_time) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) handler.add_task_comment( task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_CHANGE_TASK_SPLIT_RULE: body = json.loads(request.body) task_id = int(body['task_id']) rule_name = body['rule_name'] rule_value = body['rule_value'] handler_status = handler.change_task_split_rule( task_id, rule_name, rule_value) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) handler.add_task_comment( task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_CHANGE_TASK_ATTRIBUTE: body = json.loads(request.body) task_id = int(body['task_id']) attr_name = body['attr_name'] attr_value = body['attr_value'] handler_status = handler.change_task_attribute( task_id, attr_name, attr_value) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) handler.add_task_comment( task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_RETRY_TASK: body = json.loads(request.body) task_id = int(body['task_id']) discard_events = bool(body.get('discard_events', False)) disable_staging_mode = bool( body.get('disable_staging_mode', False)) handler_status = handler.retry_task(task_id, discard_events, disable_staging_mode) try: jedi_info = handler_status['jedi_info'] if jedi_info['status_code'] == 0 and jedi_info[ 'return_code'] == 0: task = ProductionTask.objects.get(id=task_id) task.status = Protocol().TASK_STATUS[ TaskStatus.TORETRY] task.save() except Exception: logger.exception("Exception occurred: %s" % get_exception_string()) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) handler.add_task_comment( task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_PAUSE_TASK: body = json.loads(request.body) task_id = int(body['task_id']) handler_status = handler.pause_task(task_id) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) handler.add_task_comment( task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_RESUME_TASK: body = json.loads(request.body) task_id = int(body['task_id']) handler_status = handler.resume_task(task_id) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) handler.add_task_comment( task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_REASSIGN_TASK_TO_SHARE: body = json.loads(request.body) task_id = int(body['task_id']) share = body.get('share', '') reassign_running = bool(body.get('reassign_running', None)) handler_status = handler.reassign_task_to_share( task_id, share, reassign_running=reassign_running) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) handler.add_task_comment( task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_TRIGGER_TASK_BROKERAGE: body = json.loads(request.body) task_id = int(body['task_id']) handler_status = handler.trigger_task_brokerage(task_id) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) handler.add_task_comment( task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_AVALANCHE_TASK: body = json.loads(request.body) task_id = int(body['task_id']) handler_status = handler.avalanche_task(task_id) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) handler.add_task_comment( task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_INCREASE_ATTEMPT_NUMBER: body = json.loads(request.body) task_id = int(body['task_id']) increment = int(body['increment']) handler_status = handler.increase_attempt_number( task_id, increment) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) handler.add_task_comment( task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_ABORT_UNFINISHED_JOBS: body = json.loads(request.body) task_id = int(body['task_id']) code = body.get('code', TaskDefConstants.DEFAULT_KILL_JOB_CODE) handler_status = handler.abort_unfinished_jobs(task_id, code) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) handler.add_task_comment( task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_ADD_TASK_COMMENT: body = json.loads(request.body) task_id = int(body['task_id']) comment_body = body['comment_body'] handler_status = handler.add_task_comment( task_id, comment_body) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) elif request.action == request.ACTION_CREATE_SLICE_TIER0: body = json.loads(request.body) slice_dict = body['slice_dict'] steps_list = body['steps_list'] handler_status = handler.create_slice_tier0( slice_dict, steps_list) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) elif request.action == request.ACTION_CLEAN_TASK_CARRIAGES: body = json.loads(request.body) task_id = body['task_id'] output_formats = body['output_formats'] handler_status = handler.clean_task_carriages( task_id, output_formats) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) handler.add_task_comment( task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_KILL_JOB: body = json.loads(request.body) task_id = body['task_id'] job_id = body['job_id'] code = body.get('code', TaskDefConstants.DEFAULT_KILL_JOB_CODE) keep_unmerged = bool(body.get('keep_unmerged', False)) handler_status = handler.kill_job(job_id, code=code, keep_unmerged=keep_unmerged) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) status_code = handler_status['jedi_info']['status_code'] body.update({'status_code': status_code}) handler.add_task_comment( task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_KILL_JOBS: body = json.loads(request.body) task_id = body['task_id'] jobs = [int(e) for e in str(body['jobs']).split(',')] code = body.get('code', TaskDefConstants.DEFAULT_KILL_JOB_CODE) keep_unmerged = bool(body.get('keep_unmerged', False)) handler_status = handler.kill_jobs(jobs, code=code, keep_unmerged=keep_unmerged) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) status_code = handler_status['jedi_info']['status_code'] body.update({'status_code': status_code}) handler.add_task_comment( task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_SET_JOB_DEBUG_MODE: body = json.loads(request.body) task_id = body['task_id'] job_id = body['job_id'] debug_mode = body['debug_mode'] handler_status = handler.set_job_debug_mode(job_id, debug_mode) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) status_code = handler_status['jedi_info']['status_code'] body.update({'status_code': status_code}) handler.add_task_comment( task_id, request.create_default_task_comment(body)) elif request.action == request.ACTION_SET_TTCR: body = json.loads(request.body) ttcr_dict = body['ttcr_dict'] handler_status = handler.set_ttcr(ttcr_dict) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) elif request.action == request.ACTION_SET_TTCJ: body = json.loads(request.body) ttcj_dict = body['ttcj_dict'] handler_status = handler.set_ttcj(ttcj_dict) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) elif request.action == request.ACTION_RELOAD_INPUT: body = json.loads(request.body) task_id = body['task_id'] handler_status = handler.reload_input(task_id) request.set_status(request.STATUS_RESULT_SUCCESS, data_dict=handler_status) handler.add_task_comment( task_id, request.create_default_task_comment(body)) else: raise Exception("Invalid action: %s" % request.action) except Exception: logger.exception("Exception occurred: %s" % get_exception_string()) if request: request.set_status(request.STATUS_RESULT_EXCEPTION, exception=get_exception_string())