def _get_section_header_string_for_element(self, job_base_name, path_of_file, title, last_path): index_after_which_to_use = path_of_file.index(job_base_name) + 1 shortened_path = path_of_file[index_after_which_to_use:] if not shortened_path: level = '\\subsubsection' elif len(shortened_path) == 1: level = '\\myparagraph' elif len(shortened_path) == 2: level = '\\mysubparagraph' elif len(shortened_path) == 3: level = '\\mysubsubparagraph' output = '' for idx, path_element in enumerate(shortened_path): if len(last_path) <= idx or path_element != last_path[idx]: if idx == 0: output += '\n\\subsubsection{%s}\n' % sanitize( path_element) level = '\\myparagraph' elif idx == 1: output += '\n\\myparagraph{%s}\n' % sanitize(path_element) level = '\\mysubparagraph' elif idx == 2: output += '\n\\mysubparagraph{%s}\n' % sanitize( path_element) level = '\\mysubsubparagraph' last_path = shortened_path if len(shortened_path) > 3: path_to_print = os.path.join('/'.join(shortened_path[3:]), title) else: path_to_print = title output += '\n%s{%s}\n' % (level, sanitize(path_to_print)) return output, last_path
def _write_table_header(self, evaluation_runs): output = '' for run in evaluation_runs: output += '& \\rotatebox{90}{\\textbf{%s}}' % sanitize( format_date(run.timestamp)) output += '\\\\\n' return output
def _experiment_name_to_title(self, name): name_split = name.split('_') if len(name_split) <= 2: return name if len(name_split[0]) != 8 and len(name_split[1]) != 6: # Not a correct timestamp. return name title = '\\texttt{' + sanitize('_'.join(name_split[2:])) + '} (' title += (sanitize(name_split[0][6:8]) + '.' + sanitize( name_split[0][4:6]) + '.' + sanitize(name_split[0][0:4])) title += (', ' + sanitize(name_split[1][0:2]) + ':' + sanitize( name_split[1][2:4]) + ':' + sanitize(name_split[1][4:6])) + ')' return title
def format_command(command, highlight_first_n_words=0): command_words = command.split() for idx, word in enumerate(command_words): word = sanitize(word) if idx < highlight_first_n_words: word = '{\\color{MidnightBlue}%s}' % word else: if word[0] == '-': # Color flags up to '=' sign. word_segments = word.split('=') word = '{\\color{PineGreen}%s}' % word_segments[0] if len(word_segments) > 1: word += '=' + '='.join(word_segments[1:]) command_words[idx] = word return ' '.join(command_words)
def _write_base_structure(self): template_file_path = os.path.join(catkinFindSrc('maplab_evaluation'), 'latex', 'template.tex') with open(template_file_path, 'r') as template_file: template = template_file.read() content_summary = '\\input{%s}\n' % os.path.realpath( 'long_term_summary.tex') last_job_prefix = None content_all_jobs = '' for job_name in self.all_individual_experiments: job_prefix = os.path.dirname(job_name) if job_prefix != last_job_prefix: last_job_prefix = job_prefix content_all_jobs += '\\pagebreak\n' content_all_jobs += '\n\\section{%s}\n' % ( sanitize(job_prefix)) content_all_jobs += self._write_output_for_job(job_name) with open(self.report_file, 'w+') as output_file: output_file.write( template.replace('SUMMARYCONTENT', content_summary).replace( 'PERJOBCONTENT', content_all_jobs))
def _job_name_to_title(self, name): name_split = name.split('__') if len(name_split) != 2: return name return ('Dataset \\texttt{' + sanitize(name_split[0]) + '}, parameters \\texttt{' + sanitize(name_split[1]) + '}')
def write_job_yaml(yaml_file): job_dict = yaml.load(open(yaml_file)) output = '\\begin{description}' output += ' \\item[Job folder:] %s\n' % sanitize( os.path.realpath(os.path.dirname(yaml_file))) for name, key in zip( [ # pylint: disable=bad-continuation 'Package name', 'Executable', 'Experiment name', 'Experiment description', 'Experiment root folder', 'Parameter file' ], [ # pylint: disable=bad-continuation 'app_package_name', 'app_executable', 'experiment_filename', # 'description', 'experiment_root_folder', 'parameter_file' ]): if key in job_dict: output += '\\item[%s:] %s\n' % (sanitize(name), sanitize(str(job_dict[key]))) job_summary_file = os.path.join(os.path.dirname(yaml_file), 'job_summary.yaml') output += ' \\item[Revision:]' if os.path.isfile(job_summary_file): job_summary = yaml.load(open(job_summary_file)) revision = str(job_summary['executable']['rev']) output += ' %s\n' % sanitize(revision) if 'app_package_name' in job_dict: output += ' \\\\%s\n' % sanitize( get_commit_info(job_dict['app_package_name'], revision)) else: output += 'Unknown (no job\\_summary.yaml available)\n' output += '\\item[Datasets:] {\\color{white}.}\\\\\n' output += ' \\begin{itemize}\n' for dataset_dict in job_dict['datasets']: output += '\\item Dataset \\inltt{%s}\\\\\n' % (sanitize( dataset_dict['name'])) command = 'rosrun %s %s<NEWLINE>' % (sanitize( job_dict['app_package_name']), sanitize( job_dict['app_executable'])) for name, value in dataset_dict['parameters'].iteritems(): command += ' --%s=%s<NEWLINE>' % (name, value) command = format_command(command) command = command.replace('<NEWLINE>', '\\\\ \\hspace*{0.5cm}') output += ' Command:\n\\begin{ttblock}%s\\end{ttblock}\n\n' % command output += ' Additional dataset parameters (used for placeholder ' \ 'replacement and evaluation scripts):\n\n' output += ' \\begin{longtabu} to \\textwidth {X[1,l]X[2,l]}\n' output += ' \\toprule\n' output += ' \\textbf{Name} & \\textbf{Value} \\\\\n' for name, value in dataset_dict['additional_parameters'].iteritems(): output += ' \\midrule\n' output += ' \\inltt{%s} & \\inltt{%s} \\\\\n' % ( sanitize(name), sanitize(str(value))) output += ' \\bottomrule\n' output += ' \\end{longtabu}\n' output += ' \\end{itemize}\n' if 'evaluation_scripts' in job_dict: output += ' \\item[Evaluation scripts:] These are run after the ' \ 'estimator is done and the console has run.\n' output += ' \\begin{itemize}\n' for evaluation_dict in job_dict['evaluation_scripts']: output += ' \\item\\begin{description}\n' output += ' \\item[Name:] \\inltt{%s}\n' % sanitize( evaluation_dict['name']) if 'arguments' in evaluation_dict: output += ' \\item[Arguments:] \\inltt{%s}\n' % sanitize( str(evaluation_dict['arguments'])) output += ' \\end{description}\n' output += ' \\end{itemize}\n' output += '\\end{description}' return output
def _write_report_for_job(self, job, last_job_prefix): job_path = os.path.join(self.results_folder, job) job_prefix = os.path.dirname(job_path) job_report_path = os.path.realpath( os.path.join(job_path, 'job_report.tex')) with open(job_report_path, 'w+') as job_report: job_name = os.path.basename(job) if last_job_prefix == job_prefix: job_report.write('\\pagebreak\n') job_report.write('\\subsection{%s}\n' % (self._job_name_to_title(job_name))) job_report.write( '\\label{%s}\n' % sanitize(self.job_path_to_id[job])) job_report.write('\\begin{tabular}{lcr}\n' # ' \\toprule\n' # ' \\textbf{Name} & \\textbf{Status} & ' '\\textbf{Exit code} \\\\\n' # ' \\midrule\n' # ) job_results = self.evaluation_results[job] for evaluation_name, evaluation_result in job_results.iteritems(): job_report.write( ' %s & %s & %i\\\\\n' % (sanitize(evaluation_name), '{\\color{NavyBlue}Success}' if evaluation_result == 0 else '{\\color{red}Failure}', evaluation_result)) job_report.write(' \\bottomrule\n' # '\\end{tabular}\n' # ) # Get all yamls in path. all_yamls_in_job = self._get_all_files_filtered_in_job( job_path, '*.yaml') all_pdfs_in_job = self._get_all_files_filtered_in_job( job_path, '*.pdf') all_files_in_job = all_yamls_in_job + all_pdfs_in_job if len(all_files_in_job) > 1: for idx, path in enumerate(all_files_in_job): file_name = os.path.basename(path) if file_name == 'job.yaml': all_files_in_job[idx] = all_files_in_job[0] all_files_in_job[0] = path elif file_name == 'console_commands.yaml': all_files_in_job[idx] = all_files_in_job[1] all_files_in_job[1] = path # Filter out some files from list. all_files_in_job = [ file_name for file_name in all_files_in_job if os.path.basename(file_name) not in ['sensors.yaml', 'job_summary.yaml'] ] if len(all_files_in_job) > 2: all_files_in_job[2:] = sorted(all_files_in_job[2:]) last_path = [] for gen_file in all_files_in_job: file_name = os.path.basename(gen_file) path = os.path.dirname(gen_file).split('/') title_output, last_path = \ self._get_section_header_string_for_element( job_name, path, file_name, last_path) job_report.write(title_output) if file_name == 'console_commands.yaml': job_report.write( yaml_outputter.write_console_commands_yaml(gen_file)) elif file_name == 'job.yaml': job_report.write(yaml_outputter.write_job_yaml(gen_file)) elif file_name == 'errors.yaml': job_report.write( yaml_outputter.write_errors_yaml(gen_file)) elif file_name.split('.')[-1] == 'yaml': job_report.write( yaml_outputter.write_generic_yaml_file(gen_file)) elif file_name.split('.')[-1] == 'pdf': real_pdf_path = os.path.realpath(gen_file) job_report.write( '\\includegraphics[width=0.9\\textwidth]{%s}\n' % real_pdf_path) # Return the job prefix so that the outer loop can pass this in when # writing the next job. This is necessary to avoid a pagebreak just # after the section (= experiment) title. return job_prefix
def _job_overview_table_print_errors_yaml(self, evaluation_results, job_name, yaml_file, file_name_to_label_map): output = '\\midrule\n' output += (' \\multicolumn{%i}{l}{\\textbf{' '\\hyperref[%s]{%s}}}\\\\\n' % (len(evaluation_results), file_name_to_label_map[yaml_file], sanitize(yaml_file))) at_least_one_value_printed_for_job = False for label, outer_tag, inner_tag in zip([ 'Position mean [m]', 'Position RMSE [m]', 'Max position error [m]', 'Orientation mean [rad]', 'Orientation RMSE [rad]', 'Max orientation error [rad]', ], [ value for value in ['position_errors', 'orientation_errors'] for _ in range(3) ], ['mean', 'rmse', 'max'] * 2): at_least_one_value_printed_in_row = False output_of_row = ' %s' % sanitize(label) for idx, run_2 in enumerate(evaluation_results): value_printed = False if idx == 0: # Ignore first value as there is no previous value available # in the list to compare to. continue run_1 = evaluation_results[idx - 1] file_path_1 = '' file_path_2 = '' if job_name in run_1.jobs: file_path_1 = os.path.join(run_1.path, run_1.jobs[job_name], yaml_file) if job_name in run_2.jobs: file_path_2 = os.path.join(run_2.path, run_2.jobs[job_name], yaml_file) if os.path.isfile(file_path_1) and os.path.isfile(file_path_2): errors_1 = yaml.load(open(file_path_1)) errors_2 = yaml.load(open(file_path_2)) if (outer_tag in errors_1 and inner_tag in errors_1[outer_tag] and outer_tag in errors_2 and inner_tag in errors_2[outer_tag]): if errors_1[outer_tag][inner_tag] != 0: ratio_change = ((errors_2[outer_tag][inner_tag] - errors_1[outer_tag][inner_tag]) / errors_1[outer_tag][inner_tag]) color = 'black' if ratio_change > 0: color = 'red' if (idx == len(evaluation_results) - 1 and ratio_change > 0.05): # Last entry: print warning for jenkins # parser that result got worse. print( 'Evaluation job progression: ' 'performance of job "%s" in "%s" for ' 'label "%s" decreased by %.2f%% ' 'between the last two runs.' % (job_name, yaml_file, label, ratio_change * 100)) elif ratio_change < 0: color = 'ForestGreen' output_of_row += (' & {\\leavevmode' '\\color{%s}%+.2f\\%%}') % ( color, ratio_change * 100) value_printed = True at_least_one_value_printed_in_row = True at_least_one_value_printed_for_job = True if not value_printed: output_of_row += ' & --' if at_least_one_value_printed_in_row: output += output_of_row output += '\\\\\n' if not at_least_one_value_printed_for_job: output += ( ' \\multicolumn{%i}{l}{' '(Not enough runs to show progression. ' 'Need at least two consecutive runs to show any data.)}\\\\\n' ) % (len(evaluation_results)) return output
def _write_output_for_job(self, job_name): job_base_name = os.path.basename(job_name) output = '\\subsection{%s}\n' % sanitize(job_base_name) output += '\\label{%s}\n' % self.job_path_to_id[job_name] all_yaml_files_set = set() for evaluation_run in self.all_evaluation_runs: if job_name in evaluation_run.jobs: job_path = os.path.join(evaluation_run.path, evaluation_run.jobs[job_name]) all_yamls_in_job = self._get_all_files_filtered_in_job( job_path, '*.yaml') for yaml_file in all_yamls_in_job: all_yaml_files_set.add(os.path.relpath( yaml_file, job_path)) all_yaml_files = [element for element in all_yaml_files_set] if all_yamls_in_job: for idx, yaml_file in enumerate(all_yaml_files): file_name = os.path.basename(yaml_file) if file_name == 'job_summary.yaml': all_yaml_files[idx] = all_yaml_files[0] all_yaml_files[0] = 'job_summary.yaml' if len(all_yaml_files) > 1: all_yaml_files[1:] = sorted(all_yaml_files[1:]) file_name_to_label_map = {} if len(self.all_evaluation_runs) >= 2: if len(self.all_evaluation_runs) > 5: evaluation_runs_in_overview = self.all_evaluation_runs[-5:] else: evaluation_runs_in_overview = self.all_evaluation_runs output += '\\subsubsection{Overview}\n' output += '\\begin{longtabu} to \\textwidth{l' output += 'X[r]' * (len(evaluation_runs_in_overview) - 1) output += '}\n' output += '\\toprule\n' for idx, evaluation_run in enumerate(evaluation_runs_in_overview): if idx == 0: continue output += ( '& \\rotatebox{90}{\\parbox{4cm}{' '{\\color{white}to }\\textbf{%s}\\\\to \\textbf{%s}}}' ) % (sanitize( format_date(evaluation_runs_in_overview[idx - 1].timestamp, new_line_between_date_and_time=False)), sanitize( format_date(evaluation_run.timestamp, new_line_between_date_and_time=False))) output += '\\\\\n' for yaml_file in all_yaml_files: if yaml_file not in file_name_to_label_map: file_name_to_label_map.update( {yaml_file: generate_random_string(length=10)}) if os.path.basename(yaml_file) == 'errors.yaml': output += self._job_overview_table_print_errors_yaml( evaluation_runs_in_overview, job_name, yaml_file, file_name_to_label_map) output += '\\bottomrule\n' output += '\\end{longtabu}\n' last_path = [] for yaml_file in all_yaml_files: generated_output = None title = None if os.path.basename(yaml_file) == 'job_summary.yaml': title = 'Revision info' generated_output = '\\begin{longtabu} to \\textwidth {lX}' generated_output += '\\toprule\n' generated_output += '\\textbf{Run} & \\textbf{Revision} \\\\\n' generated_output += '\\midrule\n' for run in self.all_evaluation_runs: generated_output += ' %s & ' % sanitize( format_date(run.timestamp)) if job_name not in run.jobs: generated_output += '(Not run)\\\\\n' continue relative_job_path = run.jobs[job_name] job_yaml_path = os.path.join(run.path, relative_job_path, 'job.yaml') summary_path = os.path.join(run.path, relative_job_path, 'job_summary.yaml') if os.path.isfile(job_yaml_path) and os.path.isfile( summary_path): job_dict = yaml.load(open(job_yaml_path)) job_summary = yaml.load(open(summary_path)) revision = str(job_summary['executable']['rev']) generated_output += 'Revision: %s\n\n' % sanitize( revision) if 'app_package_name' in job_dict: generated_output += sanitize( get_commit_info(job_dict['app_package_name'], revision)) else: generated_output += '(No revision info available)' generated_output += '\\\\\n' generated_output += '\\bottomrule\n' generated_output += '\\end{longtabu}\n' elif os.path.basename(yaml_file) == 'errors.yaml': generated_plots = error_history_plot(self.all_evaluation_runs, job_name, yaml_file) generated_output = '' for plot in generated_plots: generated_output += ( '\\includegraphics[width=0.9\\textwidth]{%s}\n\n' % os.path.realpath(plot)) if generated_output is None: continue path = os.path.dirname(os.path.join(job_path, yaml_file)).split('/') title_output, last_path = ( self._get_section_header_string_for_element( job_base_name, path, os.path.basename(yaml_file) if title is None else title, last_path)) output += title_output if yaml_file in file_name_to_label_map: output += '\\label{%s}\n' % file_name_to_label_map[yaml_file] output += generated_output return output