import os import boost_wide_report import common import utils import shutil import time tag = "CVS-HEAD" if os.path.exists( "results/incoming/CVS-HEAD/processed/merged" ): shutil.rmtree( "results/incoming/CVS-HEAD/processed/merged" ) boost_wide_report.ftp_task = lambda ftp_site, site_path, incoming_dir: 1 boost_wide_report.unzip_archives_task = lambda incoming_dir, processed_dir, unzip: 1 boost_wide_report.execute_tasks( tag = tag , user = None , run_date = common.format_timestamp( time.gmtime() ) , comment_file = os.path.abspath( "comment.html" ) , results_dir = os.path.abspath( "results" ) , output_dir = os.path.abspath( "output" ) , reports = [ "x", "ds", "dd", "dsr", "ddr", "us", "ud", "usr", "udr" ] , warnings = [ 'Warning text 1', 'Warning text 2' ] , extended_test_results = os.path.abspath( "output/extended_test_results.xml" ) , dont_collect_logs = 1 , expected_results_file = os.path.abspath( "expected_results.xml" ) , failures_markup_file = os.path.abspath( "explicit-failures-markup.xml" ) )
def make_test_results(): if not os.path.exists(results_directory): os.makedirs(results_directory) for i_runner in range(0, num_of_runners): runner_id = "runner_%02d" % i_runner g = xml.sax.saxutils.XMLGenerator( open(os.path.join(results_directory, runner_id + ".xml"), "w")) if i_runner % 2: platform = "Win32" else: platform = "Unix" g.startElement( "test-run", { "platform": platform, "runner": runner_id, "timestamp": common.format_timestamp( time.gmtime(time.time() - i_runner * 24 * 60 * 60)), "source": test_run_source(i_runner), "run-type": test_run_type(i_runner) }) g.startElement("comment", {}) g.characters("<b>Runner</b> is who <i>running</i> does.") g.endElement("comment") for i_lib in range(0, num_of_libs): for i_toolset in range(num_of_toolsets): if library_build_failed(i_lib): test_result = "fail" else: test_result = "success" common.make_test_log(xml_generator=g, library_idx=i_lib, toolset_idx=i_toolset, test_name="", test_type="lib", test_result=test_result, show_run_output="false", variant=None) for i_lib in range(0, num_of_libs): library_name = "library_%02d" % i_lib if num_of_runners - 1 == i_runner and i_lib % 2: continue for i_toolset in range(num_of_toolsets): toolset_name = "toolset_%02d" % (i_toolset) if num_of_runners - 1 == i_runner and i_toolset % 2: continue for i_test in range(num_of_tests): test_name = "test_%02d_%02d" % (i_lib, i_test) test_result = "" test_type = "run" show_run_output = "false" if num_of_runners - 1 == i_runner and i_test % 2: continue if i_runner % 2: test_result = "success" else: test_result = "fail" if i_runner == 1 and i_toolset == 2 and i_test % 6 == 0: test_result = "fail" if test_result == "success" and (0 == i_test % 5): show_run_output = "true" if i_toolset == 2: variants = ["static-lib", "shared-lib"] else: variants = [None] for variant in variants: common.make_test_log(xml_generator=g, library_idx=i_lib, toolset_idx=i_toolset, test_name=test_name, test_type=test_type, test_result=test_result, show_run_output=show_run_output, variant=variant) g.endElement("test-run")
import os import boost_wide_report import common import utils import shutil import time tag = "CVS-HEAD" if os.path.exists( "results/incoming/CVS-HEAD/processed/merged" ): shutil.rmtree( "results/incoming/CVS-HEAD/processed/merged" ) boost_wide_report.ftp_task = lambda ftp_site, site_path, incoming_dir: 1 boost_wide_report.unzip_archives_task = lambda incoming_dir, processed_dir, unzip: 1 boost_wide_report.execute_tasks( tag = tag , user = None , run_date = common.format_timestamp( time.gmtime() ) , comment_file = os.path.abspath( "comment.html" ) , results_dir = os.path.abspath( "results" ) , output_dir = os.path.abspath( "output" ) , reports = [ "i", "x", "ds", "dd", "dsr", "ddr", "us", "ud", "usr", "udr" ] , warnings = [ 'Warning text 1', 'Warning text 2' ] , extended_test_results = os.path.abspath( "output/extended_test_results.xml" ) , dont_collect_logs = 1 , expected_results_file = os.path.abspath( "expected_results.xml" ) , failures_markup_file = os.path.abspath( "explicit-failures-markup.xml" ) )
def make_test_results(): if not os.path.exists(results_directory): os.makedirs(results_directory) for i_runner in range(0, num_of_runners): runner_id = "runner_%02d" % i_runner g = xml.sax.saxutils.XMLGenerator(open(os.path.join(results_directory, runner_id + ".xml"), "w")) if i_runner % 2: platform = "Win32" else: platform = "Unix" g.startElement( "test-run", { "platform": platform, "runner": runner_id, "timestamp": common.format_timestamp(time.gmtime(time.time() - i_runner * 24 * 60 * 60)), "source": test_run_source(i_runner), "run-type": test_run_type(i_runner), }, ) g.startElement("comment", {}) g.characters("<b>Runner</b> is who <i>running</i> does.") g.endElement("comment") for i_lib in range(0, num_of_libs): for i_toolset in range(num_of_toolsets): if library_build_failed(i_lib): test_result = "fail" else: test_result = "success" common.make_test_log( xml_generator=g, library_idx=i_lib, toolset_idx=i_toolset, test_name="", test_type="lib", test_result=test_result, show_run_output="false", variant=None, ) for i_lib in range(0, num_of_libs): library_name = "library_%02d" % i_lib if num_of_runners - 1 == i_runner and i_lib % 2: continue for i_toolset in range(num_of_toolsets): toolset_name = "toolset_%02d" % (i_toolset) if num_of_runners - 1 == i_runner and i_toolset % 2: continue for i_test in range(num_of_tests): test_name = "test_%02d_%02d" % (i_lib, i_test) test_result = "" show_run_output = "false" if num_of_runners - 1 == i_runner and i_test % 2: continue if i_runner % 2: test_result = "success" else: test_result = "fail" if i_runner == 1 and i_toolset == 2 and i_test % 6 == 0: test_result = "fail" if test_result == "success" and (0 == i_test % 5): show_run_output = "true" if i_toolset == 2: variants = ["static-lib", "shared-lib"] else: variants = [None] for variant in variants: common.make_test_log( xml_generator=g, library_idx=i_lib, toolset_idx=i_toolset, test_name=test_name, test_type=test_type(i_test), test_result=test_result, show_run_output=show_run_output, variant=variant, ) g.endElement("test-run")