def _trace_with_std_per_time(pkl_list, name_list, maxvalue, save="", cut=sys.maxint, log=False, aggregation="mean"): plotting_dir = os.path.dirname(os.path.realpath(__file__)) cur_dir = os.getcwd() # noinspection PyBroadException try: os.chdir(plotting_dir) import plotTrace_perTime plotTrace_perTime.main(pkl_list, name_list, autofill=True, aggregation=aggregation, optimum=0, maxvalue=maxvalue, save=save, logy=log) os.chdir(cur_dir) sys.stdout.write("passed\n") except Exception, e: sys.stderr.write(format_traceback(sys.exc_info())) sys.stderr.write("failed: %s %s" % (sys.exc_info()[0], e))
def _statistics(pkl_list, name_list, save="", cut=sys.maxint, log=False): plotting_dir = os.path.dirname(os.path.realpath(__file__)) cur_dir = os.getcwd() # noinspection PyBroadException try: os.chdir(plotting_dir) cmd = ["python statistics.py", "--cut %d" % cut] for i in range(len(name_list)): cmd.append(name_list[i][0]) for pkl in pkl_list[i]: cmd.append(pkl) if save is not "": fh = open(save, "w") subprocess.check_call(" ".join(cmd), shell=True, stdin=fh, stdout=fh, stderr=fh) fh.close() else: proc = subprocess.Popen(" ".join(cmd), shell=True, stdout=subprocess.PIPE) out = proc.communicate()[0] # print the output of the child process to stdout print (out) os.chdir(cur_dir) sys.stdout.write("passed\n") except Exception, e: sys.stderr.write(format_traceback(sys.exc_info())) sys.stderr.write("failed: %s %s" % (sys.exc_info()[0], e))
def _optimizer_overhead(pkl_list, name_list, save="", cut=sys.maxint, log=False): plotting_dir = os.path.dirname(os.path.realpath(__file__)) cur_dir = os.getcwd() # noinspection PyBroadException try: os.chdir(plotting_dir) import plotOptimizerOverhead plotOptimizerOverhead.main(pkl_list=pkl_list, name_list=name_list, autofill=True, title="", log=log, save=save, cut=cut, ylabel="Time [sec]", xlabel="#Function evaluations", aggregation="mean", properties=None, print_lenght_trial_list=True) os.chdir(cur_dir) sys.stdout.write("passed\n") except Exception, e: sys.stderr.write(format_traceback(sys.exc_info())) sys.stderr.write("failed: %s %s" % (sys.exc_info()[0], e))
def _statistics(pkl_list, name_list, save="", cut=sys.maxint, log=False): plotting_dir = os.path.dirname(os.path.realpath(__file__)) cur_dir = os.getcwd() # noinspection PyBroadException try: os.chdir(plotting_dir) cmd = ["python statistics.py", "--cut %d" % cut] for i in range(len(name_list)): cmd.append(name_list[i][0]) for pkl in pkl_list[i]: cmd.append(pkl) if save is not "": fh = open(save, "w") subprocess.check_call(" ".join(cmd), shell=True, stdin=fh, stdout=fh, stderr=fh) fh.close() else: proc = subprocess.Popen(" ".join(cmd), shell=True, stdout=subprocess.PIPE) out = proc.communicate()[0] #print the output of the child process to stdout print (out) os.chdir(cur_dir) sys.stdout.write("passed\n") except Exception, e: sys.stderr.write(format_traceback(sys.exc_info())) sys.stderr.write("failed: %s %s" % (sys.exc_info()[0], e))
def _generate_tex_table(pkl_list, name_list, save="", cut=sys.maxint, log=False): plotting_dir = os.path.dirname(os.path.realpath(__file__)) cur_dir = os.getcwd() # noinspection PyBroadException try: os.chdir(plotting_dir) import generateTexTable generateTexTable.main(pkl_list, name_list, save, cut) os.chdir(cur_dir) sys.stdout.write("passed\n") except Exception, e: sys.stderr.write(format_traceback(sys.exc_info())) sys.stderr.write("failed: %s %s" % (sys.exc_info()[0], e))
def _optimizer_overhead(pkl_list, name_list, save="", cut=sys.maxint, log=False): plotting_dir = os.path.dirname(os.path.realpath(__file__)) cur_dir = os.getcwd() # noinspection PyBroadException try: os.chdir(plotting_dir) import plotOptimizerOverhead plotOptimizerOverhead.main(pkl_list, name_list, autofill=True, log=log, save=save, cut=cut) os.chdir(cur_dir) sys.stdout.write("passed\n") except Exception, e: sys.stderr.write(format_traceback(sys.exc_info())) sys.stderr.write("failed: %s %s" % (sys.exc_info()[0], e))
def _plot_trace(pkl_list, name_list, save="", cut=sys.maxint, log=False): # We have one pkl per experiment plotting_dir = os.path.dirname(os.path.realpath(__file__)) cur_dir = os.getcwd() # noinspection PyBroadException try: os.chdir(plotting_dir) import plotTrace plotTrace.main(pkl_list, name_list, save=save, log=log, cut=cut) os.chdir(cur_dir) sys.stdout.write("passed\n") except Exception, e: sys.stderr.write(format_traceback(sys.exc_info())) sys.stderr.write("failed: %s %s" % (sys.exc_info()[0], e))
def _statistics(pkl_list, name_list, save="", cut=sys.maxint, log=False): plotting_dir = os.path.dirname(os.path.realpath(__file__)) cur_dir = os.getcwd() # noinspection PyBroadException try: os.chdir(plotting_dir) import statistics stringIO = statistics.get_statistics_as_text(pkl_list, name_list, cut) os.chdir(plotting_dir) if save is not "": with open(save, "w") as fh: fh.write(stringIO.getvalue()) else: print stringIO.getvalue() sys.stdout.write("passed\n") except Exception, e: sys.stderr.write(format_traceback(sys.exc_info())) sys.stderr.write("failed: %s %s" % (sys.exc_info()[0], e))
def do_cv(params, folds=10): logger.info("Starting Cross validation") sys.stdout.flush() optimizer = get_optimizer() cfg = load_experiment_config_file() # Store the results to hand them back to tpe and spearmint results = [] try: logger.info("%s", params) param_array = [ "-" + str(param_name) + " " + str(params[param_name]) for param_name in params ] param_string = " ".join(param_array) for fold in range(folds): # "Usage: runsolver_wrapper <instancename> " + \ # "<instancespecificinformation> <cutofftime> <cutofflength> " + \ # "<seed> <param> <param> <param>" # Cutofftime, cutofflength and seed can be safely ignored since they # are read in runsolver_wrapper runsolver_wrapper_script = "python " + \ os.path.join(os.path.dirname(os.path.realpath(__file__)), "runsolver_wrapper.py") cmd = "%s %d %s %d %d %d %s" % \ (runsolver_wrapper_script, fold, optimizer, 0, 0, 0, param_string) logger.info("Calling command:\n%s", cmd) process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, executable="/bin/bash") logger.info( "--------------RUNNING RUNSOLVER_WRAPPER--------------") stdoutdata, stderrdata = process.communicate() if stdoutdata: logger.info(stdoutdata) if stderrdata: logger.error(stderrdata) # Read the runsolver_wrapper output lines = stdoutdata.split("\n") result_string = None for line in lines: pos = line.find("Result for ParamILS: SAT") if pos != -1: result_string = line[pos:] result_array = result_string.split() results.append(float(result_array[6].strip(","))) break if result_string is None: raise NotImplementedError( "No result string available or result string doesn't contain SAT" ) # If a specified number of runs crashed, quit the whole cross validation # in order to save time. worst_possible = cfg.getfloat("HPOLIB", "result_on_terminate") crashed_runs = np.nansum( [0 if res != worst_possible else 1 for res in results]) if crashed_runs >= cfg.getint("HPOLIB", "max_crash_per_cv"): logger.warning("Aborting CV because the number of crashes " "exceeds the configured max_crash_per_cv value") return worst_possible # TODO: Error Handling assert (len(results) == folds) mean = np.mean(results) except Exception as e: logger.error(format_traceback(sys.exc_info())) logger.error("CV failed %s %s", sys.exc_info()[0], e) # status = "CRASHED" # status = "SAT" mean = np.NaN # Do not return any kind of nan because this would break spearmint if not np.isfinite(mean): mean = float(cfg.get("HPOLIB", "result_on_terminate")) logger.info("Finished CV") return mean
def do_cv(params, folds=10): logger.info("Starting Cross validation") sys.stdout.flush() optimizer = get_optimizer() cfg = load_experiment_config_file() # Store the results to hand them back to tpe and spearmint results = [] try: logger.info("%s", params) param_array = ["-" + str(param_name) + " " + str(params[param_name]) for param_name in params] param_string = " ".join(param_array) for fold in range(folds): # "Usage: runsolver_wrapper <instancename> " + \ # "<instancespecificinformation> <cutofftime> <cutofflength> " + \ # "<seed> <param> <param> <param>" # Cutofftime, cutofflength and seed can be safely ignored since they # are read in runsolver_wrapper runsolver_wrapper_script = "python " + \ os.path.join(os.path.dirname(os.path.realpath(__file__)), "runsolver_wrapper.py") cmd = "%s %d %s %d %d %d %s" % \ (runsolver_wrapper_script, fold, optimizer, 0, 0, 0, param_string) logger.info("Calling command:\n%s", cmd) process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, executable="/bin/bash") logger.info("--------------RUNNING RUNSOLVER_WRAPPER--------------") stdoutdata, stderrdata = process.communicate() if stdoutdata: logger.info(stdoutdata) if stderrdata: logger.error(stderrdata) # Read the runsolver_wrapper output lines = stdoutdata.split("\n") result_string = None for line in lines: pos = line.find("Result for ParamILS: SAT") if pos != -1: result_string = line[pos:] result_array = result_string.split() results.append(float(result_array[6].strip(","))) break if result_string is None: raise NotImplementedError("No result string available or result string doesn't contain SAT") # If a specified number of runs crashed, quit the whole cross validation # in order to save time. worst_possible = cfg.getfloat("HPOLIB", "result_on_terminate") crashed_runs = np.nansum([0 if res != worst_possible else 1 for res in results]) if crashed_runs >= cfg.getint("HPOLIB", "max_crash_per_cv"): logger.warning("Aborting CV because the number of crashes " "exceeds the configured max_crash_per_cv value") return worst_possible # TODO: Error Handling assert(len(results) == folds) mean = np.mean(results) except Exception as e: logger.error(format_traceback(sys.exc_info())) logger.error("CV failed %s %s", sys.exc_info()[0], e) # status = "CRASHED" # status = "SAT" mean = np.NaN # Do not return any kind of nan because this would break spearmint if not np.isfinite(mean): mean = float(cfg.get("HPOLIB", "result_on_terminate")) logger.info("Finished CV") return mean
def dispatch(cfg, fold, params, test=False): starttime = time.time() wallclock_time = None result = float("NaN") fn_module = cfg.get("HPOLIB", "python_module") if test: fn_name = cfg.get("HPOLIB", "python_test_function") else: fn_name = cfg.get("HPOLIB", "python_function") folds = cfg.getint("HPOLIB", "number_cv_folds") if not cfg.getboolean("HPOLIB", "use_HPOlib_time_measurement"): logger.warn( "The configuration HPOLIB:use_HPOlib_time_measurment False " "has no effect for the python function dispatcher.") try: modules = fn_module.rsplit(".", 1) fromlist = [] if len(modules) == 1 else [modules[1]] module = importlib.import_module(fn_module, fromlist) fn = getattr(module, fn_name) except Exception as e: logger.error(wrapping_util.format_traceback(sys.exc_info())) logger.error("Could not import function %s due to exception %s", fn_name, str(e)) return "", result, "UNSAT", time.time() - starttime try: # TODO: remove this hackines fixed_params = dict() for param in params: if param[0] == "-": fixed_params[param[1:]] = params[param] else: fixed_params[param] = params[param] if test: retval = fn(fixed_params, fold=0, folds=1) else: retval = fn(fixed_params, fold=fold, folds=folds) status = "SAT" if isinstance(retval, float): result = retval additional_data = None elif isinstance(retval, dict): result = retval["result"] wallclock_time = retval.get("duration") additional_data = retval.get("additional_data") else: status = "UNSAT" logger.error( "Return type %s of target function %s is not " "supported", str(type(retval)), str(fn_name)) except Exception as e: status = "UNSAT" logger.error("Target function evaluation raised exception %s.", str(e)) logger.error(wrapping_util.format_traceback(sys.exc_info())) if wallclock_time is None: wallclock_time = time.time() - starttime return additional_data, result, status, wallclock_time