def file_merged_test_results(file_spec, input_data): """Generate the file(s) with algorithm: file_merged_test_results specified in the specification file. :param file_spec: File to generate. :param input_data: Data to process. :type file_spec: pandas.Series :type input_data: InputData """ file_name = "{0}{1}".format(file_spec["output-file"], file_spec["output-file-ext"]) rst_header = file_spec["file-header"] logging.info(" Generating the file {0} ...".format(file_name)) table_lst = get_files(file_spec["dir-tables"], ".csv", full_path=True) if len(table_lst) == 0: logging.error(" No tables to include in '{0}'. Skipping.".format( file_spec["dir-tables"])) return None logging.info(" Writing file '{0}'".format(file_name)) logging.info(" Creating the data set for the {0} '{1}'.".format( file_spec.get("type", ""), file_spec.get("title", ""))) tests = input_data.filter_data(file_spec) tests = input_data.merge_data(tests) logging.info(" Creating the data set for the {0} '{1}'.".format( file_spec.get("type", ""), file_spec.get("title", ""))) suites = input_data.filter_data(file_spec, data_set="suites") suites = input_data.merge_data(suites) suites.sort_index(inplace=True) with open(file_name, "w") as file_handler: file_handler.write(rst_header) for suite_longname, suite in suites.iteritems(): if "ndrchk" in suite_longname or "pdrchk" in suite_longname: continue if len(suite_longname.split(".")) <= file_spec["data-start-level"]: continue suite_name = suite["name"] file_handler.write("\n{0}\n{1}\n".format( suite_name, get_rst_title_char(suite["level"] - file_spec["data-start-level"] - 1) * len(suite_name))) file_handler.write("\n{0}\n".format(suite["doc"].replace( '|br|', '\n\n -'))) if _tests_in_suite(suite_name, tests): for tbl_file in table_lst: if suite_name in tbl_file: file_handler.write( RST_INCLUDE_TABLE.format( file_latex=tbl_file, file_html=tbl_file.split("/")[-1])) logging.info(" Done.")
def file_test_results(file_spec, input_data): """Generate the file(s) with algorithm: file_test_results specified in the specification file. :param file_spec: File to generate. :param input_data: Data to process. :type file_spec: pandas.Series :type input_data: InputData """ file_name = "{0}{1}".format(file_spec["output-file"], file_spec["output-file-ext"]) rst_header = file_spec["file-header"] logging.info(" Generating the file {0} ...".format(file_name)) table_lst = get_files(file_spec["dir-tables"], ".csv", full_path=True) if len(table_lst) == 0: logging.error(" No tables to include in '{0}'. Skipping.".format( file_spec["dir-tables"])) return None job = file_spec["data"].keys()[0] build = str(file_spec["data"][job][0]) logging.info(" Writing file '{0}'".format(file_name)) suites = input_data.suites(job, build)[file_spec["data-start-level"]:] suites.sort_index(inplace=True) with open(file_name, "w") as file_handler: file_handler.write(rst_header) for suite_longname, suite in suites.iteritems(): suite_name = suite["name"] file_handler.write("\n{0}\n{1}\n".format( suite_name, get_rst_title_char(suite["level"] - file_spec["data-start-level"] - 1) * len(suite_name))) file_handler.write("\n{0}\n".format(suite["doc"].replace( '|br|', '\n\n -'))) if _tests_in_suite(suite_name, input_data.tests(job, build)): for tbl_file in table_lst: if suite_name in tbl_file: file_handler.write( RST_INCLUDE_TABLE.format( file_latex=tbl_file, file_html=tbl_file.split("/")[-1])) logging.info(" Done.")