def update_based_on_file_argument(template_file, output_file, pattern, value): assert os.path.exists(template_file) assert os.path.exists(output_file) tf = open(template_file, "r") of = open(output_file, "w") modified = False for line in tf.readlines(): newline = line.replace(pattern, str(value)) if newline != line: of.write(newline) modified = True else: of.write(line) if not modified: error_msg('Template file "' + template_file + '" did not contain instance of pattern "' + pattern + '".') of.close() tf.close()
def check_ecd(suite): debug.debug_msg(3, "Sanity checking the provided ECD...") visible_vars = suite.__dict__.keys() for (var_name, t, cannot_be_empty) in rules: if var_name not in visible_vars: debug.warning_msg("'" + var_name + "' variable must be present!") else: var = suite.__dict__[var_name] if type(var) is not t: debug.warning_msg("'" + var_name + "' must be of type: " + str(t)) if cannot_be_empty and len(var) == 0: debug.warning_msg("'" + var_name + "' cannot be empty!") if debug.seen_warnings(): debug.error_msg("Execution Configuration Description was invalid!") debug.reset_warnings()
def main(): parser = argparse.ArgumentParser( description = "marky - a benchmark execution and statistics gathering framework") parser.add_argument('file', type=str, nargs=1, metavar='FILE', help='A file containing an execution configuration. (or ECD) (Minus the .py)') parser.add_argument('--disable-aggregation', '-a', dest='disable_agg', action='store_true', help='Turn off aggregation calculation for this session.') parser.add_argument('--warmup', '-w', dest='should_warmup', action='store_true', help='Perform a warmup run of each benchmark that is not recorded.') parser.add_argument('--time', '-t', dest='should_time', action='store_true', help='Use marky to measure the runtime of any experiments.') parser.add_argument('--explain', '-e', dest='should_explain', action='store_true', help='Explain the experiments that will be run by the provided ECD.') parser.add_argument('--print', '-p', dest='should_print', action='store_true', help='"Pretty-print" the results.') parser.add_argument('--print-format', '-pf', dest='printfmt', nargs=1, choices=formats, help='Choose which format to print the data in. (default: json)') parser.add_argument('--load', '-l', dest='should_load', nargs=1, metavar='FILE', help='Load previous results from a file. (supports JSON only)') parser.add_argument('--load-raw', '-lr', dest='should_loadraw', nargs=1, metavar='DIR', help='Load the raw output from each run from the given directory.') parser.add_argument('--save', '-s', dest='should_save', nargs=1, metavar='FILE', help='Output the results into a file.') parser.add_argument('--save-format', '-sf', dest='savefmt', nargs=1, choices=formats, help='Choose which format to save the data in. (default: json)') parser.add_argument('--email', '-m', dest='should_email', nargs=1, metavar='ADDRESS', help='Send an email to the address once complete. (Uses localhost unless --mailserver is given.)') parser.add_argument('--email-format', '-mf', dest='emailfmt', nargs=1, choices=formats, help='Choose which format to email the data in. (default: json)') parser.add_argument('--mailserver', '-ms', dest='mailserver', nargs=1, metavar='HOST', help='Use the provided host as a mailserver.') parser.add_argument('--save-raw', '-r', dest='should_saveraw', nargs=1, metavar='DIR', help='Save the raw output from each run into the given directory.') parser.add_argument('--debug', '-d', dest='debuglevel', nargs=1, type=int, metavar='LEVEL', help='Set debug info level. 1 = Announce each benchmark invocation. 2 = Include time taken. 3 = Everything else. (Default = 1) (Set to 0 for quiet, or use --quiet.)') parser.add_argument('--quiet', '-q', dest='quiet', action='store_true', help='Hide all output (apart from errors.)') parser.add_argument('--speedups', '-cs', dest='should_calculate_speedups', action='store_true', help='Assuming only two experiments will be run, calculate the speedups.') args = parser.parse_args() config["debuglevel"] = 2 if args.debuglevel: config["debuglevel"] = args.debuglevel[0] if args.quiet: config["debuglevel"] = 0 suite = None if args.file: # (ecd = Execution Configuration Description) ecd_name = args.file[0] ecd_name = ecd_name.replace(".py", "") suite = __import__(ecd_name) ecd.check_ecd(suite) if args.should_explain: ecd.explain_ecd(suite) exit(0) config["original_dir"] = os.getcwd() config["saveraw"] = False if args.should_saveraw: config["saveraw"] = True config["saveraw_dir"] = config["original_dir"] + "/" + args.should_saveraw[0] config["loadraw"] = False if args.should_loadraw: config["loadraw"] = True config["loadraw_dir"] = config["original_dir"] + "/" + args.should_loadraw[0] if not os.path.exists(config["loadraw_dir"]): error_msg("Raw results directory required for loading doesn't exist!") debug_msg(1, "Will load output from " + config["loadraw_dir"]) config["should_warmup"] = False if args.should_warmup: config["should_warmup"] = True config["should_time"] = False if args.should_time: config["should_time"] = True results = None if args.should_load: debug_msg(1, "Loading previous results table from " + args.should_load[0]) json_file = open(args.should_load[0], "r") results = json.load(json_file) json_file.close() else: debug_msg(1, "Running experiment to obtain results!") os.chdir(suite.benchmark_root) results = run(suite) os.chdir(config["original_dir"]) results["description"] = ecd.convert_ecd_to_description(suite) if not args.disable_agg: stats.perform_aggregation(suite, results) #if args.should_calculate_speedups: # speedup.calculate(results) if args.should_print: formatter_name = default_print_format if args.printfmt: formatter_name = args.printfmt[0] formatter = formatters[formatter_name] print_results(results, formatter=formatter) if args.should_save: formatter_name = default_save_format if args.savefmt: formatter_name = args.savefmt[0] formatter = formatters[formatter_name] save_results(args.should_save[0], results, formatter=formatter) if args.should_email: mailserver = 'localhost' if args.mailserver: mailserver = args.mailserver[0] formatter_name = default_email_format if args.emailfmt: formatter_name = args.emailfmt[0] formatter = formatters[formatter_name] email_results(args.should_email[0], results, mailserver=mailserver, formatter=formatter)
def exception_hook(self, exctype, value, tb): """Custom exception hook, handling errors.""" traceback_formated = traceback.format_exception(exctype, value, tb) traceback_string = "".join(traceback_formated) debug.error_msg(traceback_string) self.show_exception_box(traceback_string)
def main(): parser = argparse.ArgumentParser( description= "marky - a benchmark execution and statistics gathering framework") parser.add_argument( 'file', type=str, nargs=1, metavar='FILE', help= 'A file containing an execution configuration. (or ECD) (Minus the .py)' ) parser.add_argument( '--disable-aggregation', '-a', dest='disable_agg', action='store_true', help='Turn off aggregation calculation for this session.') parser.add_argument( '--warmup', '-w', dest='should_warmup', action='store_true', help='Perform a warmup run of each benchmark that is not recorded.') parser.add_argument( '--time', '-t', dest='should_time', action='store_true', help='Use marky to measure the runtime of any experiments.') parser.add_argument( '--explain', '-e', dest='should_explain', action='store_true', help='Explain the experiments that will be run by the provided ECD.') parser.add_argument('--print', '-p', dest='should_print', action='store_true', help='"Pretty-print" the results.') parser.add_argument( '--print-format', '-pf', dest='printfmt', nargs=1, choices=formats, help='Choose which format to print the data in. (default: json)') parser.add_argument( '--load', '-l', dest='should_load', nargs=1, metavar='FILE', help='Load previous results from a file. (supports JSON only)') parser.add_argument( '--load-raw', '-lr', dest='should_loadraw', nargs=1, metavar='DIR', help='Load the raw output from each run from the given directory.') parser.add_argument('--save', '-s', dest='should_save', nargs=1, metavar='FILE', help='Output the results into a file.') parser.add_argument( '--save-format', '-sf', dest='savefmt', nargs=1, choices=formats, help='Choose which format to save the data in. (default: json)') parser.add_argument( '--email', '-m', dest='should_email', nargs=1, metavar='ADDRESS', help= 'Send an email to the address once complete. (Uses localhost unless --mailserver is given.)' ) parser.add_argument( '--email-format', '-mf', dest='emailfmt', nargs=1, choices=formats, help='Choose which format to email the data in. (default: json)') parser.add_argument('--mailserver', '-ms', dest='mailserver', nargs=1, metavar='HOST', help='Use the provided host as a mailserver.') parser.add_argument( '--save-raw', '-r', dest='should_saveraw', nargs=1, metavar='DIR', help='Save the raw output from each run into the given directory.') parser.add_argument( '--debug', '-d', dest='debuglevel', nargs=1, type=int, metavar='LEVEL', help= 'Set debug info level. 1 = Announce each benchmark invocation. 2 = Include time taken. 3 = Everything else. (Default = 1) (Set to 0 for quiet, or use --quiet.)' ) parser.add_argument('--quiet', '-q', dest='quiet', action='store_true', help='Hide all output (apart from errors.)') parser.add_argument( '--speedups', '-cs', dest='should_calculate_speedups', action='store_true', help= 'Assuming only two experiments will be run, calculate the speedups.') args = parser.parse_args() config["debuglevel"] = 2 if args.debuglevel: config["debuglevel"] = args.debuglevel[0] if args.quiet: config["debuglevel"] = 0 suite = None if args.file: # (ecd = Execution Configuration Description) ecd_name = args.file[0] ecd_name = ecd_name.replace(".py", "") suite = __import__(ecd_name) ecd.check_ecd(suite) if args.should_explain: ecd.explain_ecd(suite) exit(0) config["original_dir"] = os.getcwd() config["saveraw"] = False if args.should_saveraw: config["saveraw"] = True config["saveraw_dir"] = config[ "original_dir"] + "/" + args.should_saveraw[0] config["loadraw"] = False if args.should_loadraw: config["loadraw"] = True config["loadraw_dir"] = config[ "original_dir"] + "/" + args.should_loadraw[0] if not os.path.exists(config["loadraw_dir"]): error_msg( "Raw results directory required for loading doesn't exist!") debug_msg(1, "Will load output from " + config["loadraw_dir"]) config["should_warmup"] = False if args.should_warmup: config["should_warmup"] = True config["should_time"] = False if args.should_time: config["should_time"] = True results = None if args.should_load: debug_msg(1, "Loading previous results table from " + args.should_load[0]) json_file = open(args.should_load[0], "r") results = json.load(json_file) json_file.close() else: debug_msg(1, "Running experiment to obtain results!") os.chdir(suite.benchmark_root) results = run(suite) os.chdir(config["original_dir"]) results["description"] = ecd.convert_ecd_to_description(suite) if not args.disable_agg: stats.perform_aggregation(suite, results) #if args.should_calculate_speedups: # speedup.calculate(results) if args.should_print: formatter_name = default_print_format if args.printfmt: formatter_name = args.printfmt[0] formatter = formatters[formatter_name] print_results(results, formatter=formatter) if args.should_save: formatter_name = default_save_format if args.savefmt: formatter_name = args.savefmt[0] formatter = formatters[formatter_name] save_results(args.should_save[0], results, formatter=formatter) if args.should_email: mailserver = 'localhost' if args.mailserver: mailserver = args.mailserver[0] formatter_name = default_email_format if args.emailfmt: formatter_name = args.emailfmt[0] formatter = formatters[formatter_name] email_results(args.should_email[0], results, mailserver=mailserver, formatter=formatter)