def _create_task_log_info(self, task_id, fail_mode): """ Create the dictionary that will be included in the log. """ info_to_monitor = ['func_name', 'fn_hash', 'memoize', 'fail_count', 'status', 'id', 'time_submitted', 'time_returned', 'executor'] task_log_info = {"task_" + k: self.tasks[task_id][k] for k in info_to_monitor} task_log_info['run_id'] = self.run_id task_log_info['timestamp'] = datetime.datetime.now() task_log_info['task_status_name'] = self.tasks[task_id]['status'].name task_log_info['tasks_failed_count'] = self.tasks_failed_count task_log_info['tasks_completed_count'] = self.tasks_completed_count task_log_info['task_inputs'] = str(self.tasks[task_id]['kwargs'].get('inputs', None)) task_log_info['task_outputs'] = str(self.tasks[task_id]['kwargs'].get('outputs', None)) task_log_info['task_stdin'] = self.tasks[task_id]['kwargs'].get('stdin', None) stdout_spec = self.tasks[task_id]['kwargs'].get('stdout', None) stderr_spec = self.tasks[task_id]['kwargs'].get('stderr', None) try: stdout_name, stdout_mode = get_std_fname_mode('stdout', stdout_spec) except Exception as e: logger.warning("Incorrect stdout format {} for Task {}".format(stdout_spec, task_id)) stdout_name, stdout_mode = str(e), None try: stderr_name, stderr_mode = get_std_fname_mode('stderr', stderr_spec) except Exception as e: logger.warning("Incorrect stderr format {} for Task {}".format(stderr_spec, task_id)) stderr_name, stderr_mode = str(e), None stdout_spec = ";".join((stdout_name, stdout_mode)) if stdout_mode else stdout_name stderr_spec = ";".join((stderr_name, stderr_mode)) if stderr_mode else stderr_name task_log_info['task_stdout'] = stdout_spec task_log_info['task_stderr'] = stderr_spec task_log_info['task_fail_history'] = None if self.tasks[task_id]['fail_history'] is not None: task_log_info['task_fail_history'] = ",".join(self.tasks[task_id]['fail_history']) task_log_info['task_depends'] = None if self.tasks[task_id]['depends'] is not None: task_log_info['task_depends'] = ",".join([str(t.tid) for t in self.tasks[task_id]['depends'] if isinstance(t, AppFuture) or isinstance(t, DataFuture)]) task_log_info['task_elapsed_time'] = None if self.tasks[task_id]['time_returned'] is not None: task_log_info['task_elapsed_time'] = (self.tasks[task_id]['time_returned'] - self.tasks[task_id]['time_submitted']).total_seconds() task_log_info['task_fail_mode'] = fail_mode return task_log_info
def _create_task_log_info(self, task_record): """ Create the dictionary that will be included in the log. """ info_to_monitor = ['func_name', 'fn_hash', 'memoize', 'hashsum', 'fail_count', 'status', 'id', 'time_invoked', 'try_time_launched', 'time_returned', 'try_time_returned', 'executor'] task_log_info = {"task_" + k: task_record[k] for k in info_to_monitor} task_log_info['run_id'] = self.run_id task_log_info['try_id'] = task_record['try_id'] task_log_info['timestamp'] = datetime.datetime.now() task_log_info['task_status_name'] = task_record['status'].name task_log_info['tasks_failed_count'] = self.tasks_failed_count task_log_info['tasks_completed_count'] = self.tasks_completed_count task_log_info['tasks_memo_completed_count'] = self.tasks_memo_completed_count task_log_info['from_memo'] = task_record['from_memo'] task_log_info['task_inputs'] = str(task_record['kwargs'].get('inputs', None)) task_log_info['task_outputs'] = str(task_record['kwargs'].get('outputs', None)) task_log_info['task_stdin'] = task_record['kwargs'].get('stdin', None) stdout_spec = task_record['kwargs'].get('stdout', None) stderr_spec = task_record['kwargs'].get('stderr', None) try: stdout_name, _ = get_std_fname_mode('stdout', stdout_spec) except Exception as e: logger.warning("Incorrect stdout format {} for Task {}".format(stdout_spec, task_record['id'])) stdout_name = str(e) try: stderr_name, _ = get_std_fname_mode('stderr', stderr_spec) except Exception as e: logger.warning("Incorrect stderr format {} for Task {}".format(stderr_spec, task_record['id'])) stderr_name = str(e) task_log_info['task_stdout'] = stdout_name task_log_info['task_stderr'] = stderr_name task_log_info['task_fail_history'] = ",".join(task_record['fail_history']) task_log_info['task_depends'] = None if task_record['depends'] is not None: task_log_info['task_depends'] = ",".join([str(t.tid) for t in task_record['depends'] if isinstance(t, AppFuture) or isinstance(t, DataFuture)]) return task_log_info
def open_std_fd(fdname): # fdname is 'stdout' or 'stderr' stdfspec = kwargs.get(fdname) # spec is str name or tuple (name, mode) if stdfspec is None: return None fname, mode = get_std_fname_mode(fdname, stdfspec) try: if os.path.dirname(fname): os.makedirs(os.path.dirname(fname), exist_ok=True) fd = open(fname, mode) except Exception as e: raise pe.BadStdStreamFile(fname, e) return fd
def remap_all_files(mapping, fn_args, fn_kwargs): # remap any positional argument given to the function that looks like a # File remap_list_of_files(mapping, fn_args) # remap any keyword argument in the same way, but we need to treat # "inputs" and "outputs" specially because they are lists, and # "stdout" and "stderr", because they are not File's. for kwarg, maybe_file in fn_kwargs.items(): if kwarg in ["inputs", "outputs"]: remap_list_of_files(mapping, maybe_file) if kwarg in ["stdout", "stderr"]: (fname, mode) = get_std_fname_mode(kwarg, maybe_file) if fname in mapping: fn_kwargs[kwarg] = (mapping[fname], mode) else: # Treat anything else as a possible File to be remapped. remap_location(mapping, maybe_file)
def _std_output_to_wq(self, fdname, stdfspec): """Find the name of the file that will contain stdout or stderr and return a ParslFileToWq with it. These files are never cached""" fname, mode = putils.get_std_fname_mode(fdname, stdfspec) to_stage = not os.path.isabs(fname) return ParslFileToWq(fname, stage=to_stage, cache=False)