def main(expt_dir, repeat=-1): options = parse_config_file(expt_dir, 'config.json') experiment_name = options["experiment-name"] if repeat > 0: experiment_name = repeat_experiment_name(experiment_name, repeat) input_space = InputSpace(options["variables"]) chooser_module = importlib.import_module('spearmint.choosers.' + options['chooser']) chooser = chooser_module.init(input_space, options) db = MongoDB(database_address=options['database']['address']) jobs = load_jobs(db, experiment_name) hypers = db.load(experiment_name, 'hypers') tasks = parse_tasks_from_jobs(jobs, experiment_name, options, input_space) for task_name, task in tasks.iteritems(): # print 'Printing results for task %s' % task_name for i in xrange(len(task.values)): print 'Iteration %d' % (i + 1) input_space.paramify_and_print(task.inputs[i], left_indent=0) print '%s: %s' % (task_name, task.values[i]) print '' print ''
def cleanup(path, repeat=-1): if not os.path.isdir(path): raise Exception("%s is not a valid directory" % path) cfg = parse_config_file(path, 'config.json', verbose=False) db_address = cfg['database']['address'] # client = pymongo.MongoClient(db_address) db = MongoDB(database_address=db_address) experiment_name = cfg["experiment_name"] if repeat >= 0: # only for advanced use experiment_name = repeat_experiment_name(experiment_name, repeat) print 'Cleaning up experiment %s in database at %s' % (experiment_name, db_address) # db.remove_experiment(experiment_name) # does not work db.remove_collection(experiment_name, 'jobs') db.remove_collection(experiment_name, 'hypers') db.remove_collection(experiment_name, 'recommendations') db.remove_collection(experiment_name, 'start-time') # remove output files output_directory = repeat_output_dir( path, repeat) if repeat >= 0 else os.path.join(path, 'output') if os.path.isdir(output_directory): shutil.rmtree(output_directory)
def main(expt_dir, repeat=-1): options = parse_config_file(expt_dir, 'config.json') experiment_name = options["experiment_name"] if repeat > 0: experiment_name = repeat_experiment_name(experiment_name, repeat) input_space = InputSpace(options["variables"]) chooser_module = importlib.import_module('spearmint.choosers.' + options['chooser']) chooser = chooser_module.init(input_space, options) db = MongoDB(database_address=options['database']['address']) jobs = load_jobs(db, experiment_name) hypers = db.load(experiment_name, 'hypers') tasks = parse_tasks_from_jobs(jobs, experiment_name, options, input_space) for job in jobs: if job['status'] == 'complete': print 'Job %d' % job['id'] input_space.print_params(job['params'], left_indent=0) for task, val in job['values'].iteritems(): print '%s: %s' % (task, val) print ''
def cleanup(path, repeat=-1): if not os.path.isdir(path): raise Exception("%s is not a valid directory" % path) cfg = parse_config_file(path, 'config.json', verbose=False) db_address = cfg['database']['address'] client = pymongo.MongoClient(db_address) experiment_name = cfg["experiment-name"] if repeat >= 0: experiment_name = repeat_experiment_name(experiment_name, repeat) print 'Cleaning up experiment %s in database at %s' % (experiment_name, db_address) db = client.spearmint[experiment_name] db['jobs'].drop() db['hypers'].drop() db['recommendations'].drop() # remove output files output_directory = repeat_output_dir( path, repeat) if repeat >= 0 else os.path.join(path, 'output') if os.path.isdir(output_directory): shutil.rmtree(output_directory) # remove plots plots_directory = os.path.join(path, 'plots') if os.path.isdir(plots_directory): shutil.rmtree(plots_directory)
def main(expt_dir, n_repeat): n_repeat = int(n_repeat) options = parse_config_file(expt_dir, 'config.json') tasks = options['tasks'].keys() jobs = dict() start_times = dict() for j in xrange(n_repeat): experiment_name = repeat_experiment_name(options["experiment_name"], j) db = MongoDB(database_address=options['database']['address']) jobs[j] = load_jobs(db, experiment_name) start_times[j] = db.load(experiment_name, 'start-time')['start-time'] time_in_evals = defaultdict(lambda: np.zeros(n_repeat)) time_in_fast_updates = np.zeros(n_repeat) time_in_slow_updates = np.zeros(n_repeat) for j in xrange(n_repeat): last_job_end_time = start_times[j] for job in jobs[j]: if job['status'] == 'complete': time_in_evals[job['tasks'][0]][j] += (job['end time'] - job['start time']) / 60.0 if job['fast update']: time_in_fast_updates[j] += (job['start time'] - last_job_end_time) / 60.0 else: time_in_slow_updates[j] += (job['start time'] - last_job_end_time) / 60.0 last_job_end_time = job['end time'] for task in tasks: print 'Average time on task %s over %d repeats: %f +/- %f minutes (mean +/- std)' % ( task, n_repeat, np.mean( time_in_evals[task]), np.std(time_in_evals[task])) total_time_in_evals = sum(time_in_evals.values()) print 'Average time in JOBS over %d repeats: %f +/- %f minutes (mean +/- std)' % ( n_repeat, np.mean(total_time_in_evals), np.std(total_time_in_evals)) print 'Average time in FAST over %d repeats: %f +/- %f minutes (mean +/- std)' % ( n_repeat, np.mean(time_in_fast_updates), np.std(time_in_fast_updates)) print 'Average time in SLOW over %d repeats: %f +/- %f minutes (mean +/- std)' % ( n_repeat, np.mean(time_in_slow_updates), np.std(time_in_slow_updates)) total_optimizer_time = time_in_fast_updates + time_in_slow_updates print 'Average time in OPTIMIZER over %d repeats: %f +/- %f minutes (mean +/- std)' % ( n_repeat, np.mean(total_optimizer_time), np.std(total_optimizer_time)) print 'Total average time spent: %f' % np.sum([ np.mean(total_time_in_evals), np.mean(time_in_fast_updates), np.mean(time_in_slow_updates) ])
def main(expt_dir, n_repeat): n_repeat = int(n_repeat) options = parse_config_file(expt_dir, 'config.json') tasks = options['tasks'].keys() jobs = dict() for j in xrange(n_repeat): experiment_name = repeat_experiment_name(options["experiment_name"], j) db = MongoDB(database_address=options['database']['address']) jobs[j] = load_jobs(db, experiment_name) n_iter_each = map(len, jobs.values()) print 'Found %s iterations' % n_iter_each n_iter = min(n_iter_each) cum_evals = defaultdict(lambda: defaultdict(lambda:defaultdict(int))) for j in xrange(n_repeat): for i in xrange(n_iter): for task in tasks: if task in jobs[j][i]['tasks']: cum_evals[j][task][i] = cum_evals[j][task][i-1] + 1 else: cum_evals[j][task][i] = cum_evals[j][task][i-1] # average over the j repeats for i in xrange(n_iter): for task in tasks: cum_evals["avg"][task][i] = np.mean([cum_evals[j][task][i] for j in xrange(n_repeat)]) cum_evals["err"][task][i] = np.std([cum_evals[j][task][i] for j in xrange(n_repeat)]) plt.figure() iters = range(n_iter) for task in tasks: plt.errorbar(iters, [cum_evals["avg"][task][i] for i in xrange(n_iter)], yerr=[cum_evals["err"][task][i] for i in xrange(n_iter)], linewidth=2) plt.legend(tasks, loc='upper left') plt.xlabel('Iteration number', size=25) plt.ylabel('Cumulative evaluations',size=25) # Make the directory for the plots plots_dir = os.path.join(expt_dir, 'plots') if not os.path.isdir(plots_dir): os.mkdir(plots_dir) figname = os.path.join(plots_dir, 'cumulative_evals.pdf') print 'Saving figure at %s' % figname plt.savefig(figname)
def cleanup(repeat=-1): parser = optparse.OptionParser(usage="usage: %prog [options] directory") parser.add_option("--config", dest="config_file", help="Configuration file name.", type="string", default="config.json") (commandline_kwargs, args) = parser.parse_args() path = os.path.realpath(args[0]) if not os.path.isdir(path): raise Exception("%s is not a valid directory" % path) cfg = parse_config_file(path, commandline_kwargs.config_file, verbose=False) db_address = cfg['database']['address'] client = pymongo.MongoClient(db_address) experiment_name = cfg["experiment-name"] if repeat >= 0: experiment_name = repeat_experiment_name(experiment_name, repeat) print 'Cleaning up experiment %s in database at %s' % (experiment_name, db_address) db = client.spearmint[experiment_name] db['jobs'].drop() db['hypers'].drop() db['recommendations'].drop() # remove output files output_directory = repeat_output_dir( path, repeat) if repeat >= 0 else os.path.join(path, 'output') if os.path.isdir(output_directory): shutil.rmtree(output_directory) # remove plots plots_directory = os.path.join(path, 'plots') if os.path.isdir(plots_directory): shutil.rmtree(plots_directory)
def main(): parser = optparse.OptionParser(usage="usage: %prog [options] directory") parser.add_option("--config", dest="config_file", help="Configuration file name.", type="string", default="config.json") parser.add_option("--no-output", action="store_true", help="Do not create output files.") parser.add_option("--repeat", dest="repeat", help="Used for repeating the same experiment many times.", type="int", default="-1") (commandline_kwargs, args) = parser.parse_args() # Read in the config file #expt_dir = os.path.realpath('examples/cifar10') expt_dir = os.path.realpath(args[0]) if not os.path.isdir(expt_dir): raise Exception("Cannot find directory %s" % expt_dir) options = parse_config_file(expt_dir, commandline_kwargs.config_file) experiment_name = options["experiment-name"] # Special advanced feature for repeating the same experiment many times if commandline_kwargs.repeat >= 0: experiment_name = repeat_experiment_name(experiment_name, commandline_kwargs.repeat) if not commandline_kwargs.no_output: # if we want output if commandline_kwargs.repeat >= 0: output_directory = repeat_output_dir(expt_dir, commandline_kwargs.repeat) else: output_directory = os.path.join(expt_dir, 'output', options["experiment-name"]) if not os.path.isdir(output_directory): os.mkdir(output_directory) if commandline_kwargs.repeat < 0: rootLogger = logging.getLogger() fileHandler = logging.FileHandler(os.path.join(output_directory, 'main.log')) fileHandler.setFormatter(logFormatter) fileHandler.setLevel(logLevel) rootLogger.addHandler(fileHandler) # consoleHandler = logging.StreamHandler() # consoleHandler.setFormatter(logFormatter) # consoleHandler.setLevel(logLevel) # rootLogger.addHandler(consoleHandler) else: output_directory = None input_space = InputSpace(options["variables"]) resources = parse_resources_from_config(options) # Load up the chooser. chooser_module = importlib.import_module('spearmint.choosers.' + options['chooser']) chooser = chooser_module.init(input_space, options) # Connect to the database db_address = options['database']['address'] db = MongoDB(database_address=db_address) if os.getenv('SPEARMINT_MAX_ITERATIONS') == None and 'max_iterations' not in set(options.keys()): maxiterations = DEFAULT_MAX_ITERATIONS elif os.getenv('SPEARMINT_MAX_ITERATIONS') != None: maxiterations = int(os.getenv('SPEARMINT_MAX_ITERATIONS')) else: maxiterations = options['max_iterations'] # Set random seed if 'random_seed' in options.keys(): np.random.seed(int(options['random_seed'])) seed(int(options['random_seed'])) waiting_for_results = False # for printing purposes only while True: for resource_name, resource in resources.iteritems(): jobs = load_jobs(db, experiment_name) # resource.printStatus(jobs) # If the resource is currently accepting more jobs # TODO: here cost will eventually also be considered: even if the # resource is not full, we might wait because of cost incurred # Note: I could chose to fill up one resource and them move on to the next ("if") # You could also do it the other way, by changing "if" to "while" here # Remove any broken jobs from pending # note: make sure to do this before the acceptingJobs() condition is checked remove_broken_jobs(db, jobs, experiment_name, resources) if resource.acceptingJobs(jobs): if waiting_for_results: logging.info('\n') waiting_for_results = False optim_start_time = time.time() # Load jobs from DB # (move out of one or both loops?) would need to pass into load_tasks jobs = load_jobs(db, experiment_name) # Print out a list of broken jobs print_broken_jobs(jobs) # Get a suggestion for the next job tasks = parse_tasks_from_jobs(jobs, experiment_name, options, input_space) # Special case when coupled and there is a NaN task-- what to do with NaN task when decoupled?? if 'NaN' in tasks and 'NaN' not in resource.tasks: resource.tasks.append('NaN') # Load the model hypers from the database. hypers = db.load(experiment_name, 'hypers') # "Fit" the chooser - give the chooser data and let it fit the model(s). # NOTE: even if we are only suggesting for 1 task, we need to fit all of them # because the acquisition function for one task depends on all the tasks hypers = chooser.fit(tasks, hypers) if hypers: logging.debug('GP covariance hyperparameters:') print_hypers(hypers) # Save the hyperparameters to the database. if hypers: db.save(hypers, experiment_name, 'hypers') # Compute the best value so far, a.k.a. the "recommendation" recommendation = chooser.best() # Save the recommendation in the DB numComplete_by_task = {task_name : task.numComplete(jobs) for task_name, task in tasks.iteritems()} db.save({'num_complete' : resource.numComplete(jobs), 'num_complete_tasks' : numComplete_by_task, 'params' : input_space.paramify(recommendation['model_model_input']), 'objective': recommendation['model_model_value'], 'params_o' : None if recommendation['obser_obser_input'] is None else input_space.paramify(recommendation['obser_obser_input']), 'obj_o' : recommendation['obser_obser_value'], 'params_om': None if recommendation['obser_model_input'] is None else input_space.paramify(recommendation['obser_model_input']), 'obj_om' : recommendation['obser_model_value']}, experiment_name, 'recommendations', {'id' : len(jobs)}) # Get the decoupling groups task_couplings = {task_name : tasks[task_name].options["group"] for task_name in resource.tasks} logging.info('\nGetting suggestion for %s...\n' % (', '.join(task_couplings.keys()))) # Get the next suggested experiment from the chooser. suggested_input, suggested_tasks = chooser.suggest(task_couplings, optim_start_time) suggested_task = suggested_tasks[0] # hack, deal with later suggested_job = { 'id' : len(jobs) + 1, 'params' : input_space.paramify(suggested_input), 'expt_dir' : options['main_file_path'], 'tasks' : suggested_tasks, 'resource' : resource_name, 'main-file' : resource.main_file, 'language' : options['tasks'][suggested_task]['language'], 'status' : 'new', 'submit time' : time.time(), 'start time' : None, 'end time' : None } save_job(suggested_job, db, experiment_name) # Submit the job to the appropriate resource process_id = resource.attemptDispatch(experiment_name, suggested_job, db_address, expt_dir, output_directory) # Print the current time logging.info('Current time: %s' % datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')) # Set the status of the job appropriately (successfully submitted or not) if process_id is None: suggested_job['status'] = 'broken' logging.info('Job %s failed -- check output file for details.' % job['id']) save_job(suggested_job, db, experiment_name) else: suggested_job['status'] = 'pending' suggested_job['proc_id'] = process_id save_job(suggested_job, db, experiment_name) jobs = load_jobs(db, experiment_name) # Print out the status of the resources # resource.printStatus(jobs) print_resources_status(resources.values(), jobs) if len(set(task_couplings.values())) > 1: # if decoupled print_tasks_status(tasks.values(), jobs) # For debug - print pending jobs print_pending_jobs(jobs) # Terminate the optimization if all resources are finished (run max number of jobs) # or ANY task is finished (just my weird convention) if reduce(lambda x,y: x and y, map(lambda x: x.maxCompleteReached(jobs), resources.values()), True) or \ reduce(lambda x,y: x or y, map(lambda x: x.maxCompleteReached(jobs), tasks.values()), False): # Do all this extra work just to save the final recommendation -- would be ok to delete everything # in here and just "return" sys.stdout.write('\n') jobs = load_jobs(db, experiment_name) tasks = parse_tasks_from_jobs(jobs, experiment_name, options, input_space) hypers = db.load(experiment_name, 'hypers') hypers = chooser.fit(tasks, hypers) if hypers: db.save(hypers, experiment_name, 'hypers') # logging.info('\n**All resources have run the maximum number of jobs.**\nFinal recommendation:') recommendation = chooser.best() # numComplete_per_task numComplete_by_task = {task_name : task.numComplete(jobs) for task_name, task in tasks.iteritems()} db.save({'num_complete' : resource.numComplete(jobs), 'num_complete_tasks' : numComplete_by_task, 'params' : input_space.paramify(recommendation['model_model_input']), 'objective': recommendation['model_model_value'], 'params_o' : None if recommendation['obser_obser_input'] is None else input_space.paramify(recommendation['obser_obser_input']), 'obj_o' : recommendation['obser_obser_value'], 'params_om': None if recommendation['obser_model_input'] is None else input_space.paramify(recommendation['obser_model_input']), 'obj_om' : recommendation['obser_model_value']}, experiment_name, 'recommendations', {'id' : len(jobs)}) logging.info('Maximum number of jobs completed. Have a nice day.') return # If no resources are accepting jobs, sleep if no_free_resources(db, experiment_name, resources): # Don't use logging here because it's too much effort to use logging without a newline at the end sys.stdout.write('Waiting for results...' if not waiting_for_results else '.') sys.stdout.flush() # sys.stderr.flush() waiting_for_results = True time.sleep(options['polling_time']) else: sys.stdout.write('\n')
def main(dirs, n_repeat=-1, n_iter_spec=None, rec_type="model", average="mean", log_scale=False, violation_value=1., constraint_tol=0., make_dist_plot=False, mainfile=None, stretch_x=False, task_comp_x=None, plot_wall_time=False, bin_size=1.0, plot_separate=False, labels=None, y_axis_label=None, x_axis_label=None): # Create the figure that plots utility gap fig = dict() ax = dict() # averaging function if average == "mean": avg = np.mean elif average == "median": avg = np.median else: raise Exception("Unknown average %s" % average) fig['err'] = plt.figure() ax['err'] = fig['err'].add_subplot(1, 1, 1) if plot_wall_time: ax['err'].set_xlabel("wall time (min)", size=25) elif x_axis_label: ax['err'].set_xlabel(x_axis_label, size=25) else: ax['err'].set_xlabel('Number of function evaluations', size=25) ax['err'].tick_params(axis='both', which='major', labelsize=20) # Create the figure that plots L2 distance from solution fig['dist'] = plt.figure() ax['dist'] = fig['dist'].add_subplot(1, 1, 1) if x_axis_label: ax['dist'].set_xlabel(x_axis_label, size=25) else: ax['dist'].set_xlabel('Number of function evaluations', size=25) if y_axis_label: ax['dist'].set_ylabel(y_axis_label, size=25) elif log_scale: ax['dist'].set_ylabel('$\log_{10}\, \ell_2$-distance', size=25) else: ax['dist'].set_ylabel('$\ell_2$-distance', size=25) ax['dist'].tick_params(axis='both', which='major', labelsize=20) db_document_name = 'recommendations' acq_names = list() for expt_dir in dirs: options = parse_config_file(expt_dir, 'config.json') experiment_name = options["experiment_name"] input_space = InputSpace(options["variables"]) chooser_module = importlib.import_module('spearmint.choosers.' + options['chooser']) chooser = chooser_module.init(input_space, options) db = MongoDB(database_address=options['database']['address']) jobs = load_jobs(db, experiment_name) hypers = db.load(experiment_name, 'hypers') tasks = parse_tasks_from_jobs(jobs, experiment_name, options, input_space) if rec_type == "model": if mainfile is None: main_file = options['main_file'] else: main_file = mainfile sys.path.append(options['main_file_path'] ) # TODO: make this nicer with proper importin if (main_file[-3:] == u'.py') is True: module = importlib.import_module(main_file[:len(main_file) - 3]) else: module = importlib.import_module(main_file) sys.path.remove(options['main_file_path']) obj, con = get_objectives_and_constraints(options) # get the names obj = obj[0] # only one objective print 'Found %d constraints' % len(con) plot_utility_gap = rec_type == "model" and hasattr(module, 'true_val') if plot_utility_gap: print 'PLOTTING UTILITY GAP' if y_axis_label: ax['err'].set_ylabel(y_axis_label, size=25) elif log_scale: ax['err'].set_ylabel('$\log_{10}$ utility gap', size=25) else: ax['err'].set_ylabel('utility gap', size=25) else: if y_axis_label: ax['err'].set_ylabel(y_axis_label, size=25) elif log_scale: ax['err'].set_ylabel('$\log_{10}$ objective value', size=25) else: ax['err'].set_ylabel('objective value', size=25) # Make the directory for the plots plots_dir = os.path.join(expt_dir, 'plots') if not os.path.isdir(plots_dir): os.mkdir(plots_dir) # if the module provides the location of the true solution, plot the distance to this solution vs iterations if make_dist_plot and not hasattr(module, 'true_sol'): raise Exception( "make_dist_plot turned on but cannot find true sol in the main_file" ) # If repeat >= 0, then we are averaging a number of experiments # We assume the experiments are stored with the original name plus a hyphen plus the number n_repeat = int(n_repeat) if n_repeat < 0: recs = db.load(experiment_name, db_document_name) if recs is None: raise Exception( "Could not find experiment %s in database at %s" % (experiment_name, options['database']['address'])) # the x axis represents the number of evals of a particular task given by task_comp_x # so we only take the data where this number was incrememted, i.e. when this task was evaluated if task_comp_x: # only include recommendations when you finish a particular task new_recs = list() last_complete = 0 for rec in recs: cur_complete = rec['num_complete_tasks'][task_comp_x] if cur_complete > last_complete: last_complete = cur_complete new_recs.append(rec) recs = new_recs n_iter = len(recs) if n_iter_spec is None else n_iter_spec iters = range(n_iter) if plot_wall_time: if task_comp_x: raise Exception("Do not use plot wall_time with task_x") iters = [rec['total_elapsed_time'] / 60.0 for rec in recs] iters = iters[:n_iter] iters = np.array(iters, dtype=float) print 'Found %d iterations' % len(recs) if rec_type == "model": values = [ true_func(rec, module, violation_value, constraint_tol, obj, con) for rec in recs ] if log_scale: ax['err'].plot(iters, map(np.log10, values)) else: ax['err'].plot(iters, values) else: if rec_type == "observations": observations = [x['obj_o'] for x in recs] elif rec_type == "mixed": observations = [x['obj_om'] for x in recs] else: raise Exception("unknown rec type") for i in xrange(len(observations)): if observations[i] is None or np.isnan(observations[i]): observations[i] = violation_value # print observations # print len(observations) if log_scale: ax['err'].plot(iters, np.log10(observations)) else: ax['err'].plot(iters, observations) if make_dist_plot: distances = [ params_norm(rec['params'], module.true_sol()) for rec in recs ] if log_scale: ax['dist'].plot(iters, np.log10(distances)) else: ax['dist'].plot(iters, distances) else: # MULTIPLE REPEATS repeat_recs = [ db.load(repeat_experiment_name(experiment_name, j), db_document_name) for j in xrange(n_repeat) ] if None in repeat_recs: for i, repeat_rec in enumerate(repeat_recs): if repeat_rec is None: print 'Could not load experiment %s repeat %d' % ( experiment_name, i) print 'Exiting...' return if task_comp_x: # only include recommendations when you finish a particular task new_repeat_recs = list() for recs in repeat_recs: recs = sorted(recs, key=lambda k: k['id']) # sort by id new_recs = list() last_complete = 0 for rec in recs: cur_complete = rec['num_complete_tasks'][task_comp_x] if cur_complete == last_complete + 1: last_complete = cur_complete new_recs.append(rec) elif cur_complete == last_complete: pass else: print( 'WARNING: cur complete=%d, last_complete=%d' % (cur_complete, last_complete)) break new_repeat_recs.append(new_recs) repeat_recs = new_repeat_recs n_iter_each = map(len, repeat_recs) if plot_wall_time: """ do everything separately from here if plotting wall time here is what we do... we can't have a square array because we don't want to take the minimum number of iterations... we want to take ALL iterations for each repeat, and this number may be different for different repeats. so we store all times/values in a list of arrays then we chop things up into bins """ if rec_type != "model": values = list() wall_times = list() for j in xrange( n_repeat): # loop over repeated experiments wall_times.append( np.array([ repeat_recs[j][i]['total_elapsed_time'] / 60.0 for i in xrange(n_iter_each[j]) ])) if rec_type == "observations": values.append([ repeat_recs[j][i]['obj_o'] for i in xrange(n_iter_each[j]) ]) elif rec_type == "mixed": values.append([ repeat_recs[j][i]['obj_om'] for i in xrange(n_iter_each[j]) ]) else: raise Exception("unknown rec type") for i in xrange(n_iter_each[j]): if values[-1][i] is None or np.isnan( values[-1][i]): values[-1][i] = violation_value values[-1] = np.array(values[-1]) # print values else: # if plot wall tiem but using model values = list() wall_times = list() for j in xrange( n_repeat): # loop over repeated experiments # for this repeat, get all wall times wall_times.append( np.array([ repeat_recs[j][i]['total_elapsed_time'] / 60.0 for i in xrange(n_iter_each[j]) ])) values_j = np.zeros(n_iter_each[j]) for i in xrange( n_iter_each[j]): # loop over iterations val = true_func(repeat_recs[j][i], module, None, constraint_tol, obj, con) if val is None or np.isnan( val ): #set to violation value here so we can print out this info... values_j[i] = violation_value print 'Violation with params %s at repeat %d iter %d' % ( paramify_no_types( repeat_recs[j][i]['params']), j, i) else: values_j[i] = val values.append(values_j) # change the data structure to be time bins and include everything in # those time bins across repeats end_times = map(max, wall_times) for j in xrange(n_repeat): print 'end time for repeat %d: %f' % (j, end_times[j]) iters = np.arange(0.0, np.round(max(end_times)), bin_size) new_values = list() for i, timestep in enumerate(iters): # print 'Creating wall time bin from %f to %f. (%d/%d)' % (i, i+bin_size, i, len(iters)) new_value = list() for j in xrange(n_repeat): new_value = np.append( new_value, values[j][np.logical_and( wall_times[j] >= timestep, wall_times[j] < timestep + bin_size)].flatten()) # if a time bin is empty across all repeats: if len(new_value) == 0: if i == 0: new_value = [violation_value] else: new_value = new_values[-1] new_values.append(new_value) values = new_values # make the first value equal to the violation value (optional) iters = np.append(iters, max(iters) + bin_size) values.insert(0, np.array([violation_value])) # Average over the repeated experiments average_values = map(avg, values) errorbars = bootstrap_errorbars(values, log=log_scale, avg=avg) # plt.yscale('log', nonposy='clip') if log_scale: ax['err'].errorbar(iters, np.log10(average_values), yerr=errorbars) else: ax['err'].errorbar(iters, average_values, yerr=errorbars) else: # NOT WALL TIME n_iter = reduce(min, n_iter_each, np.inf) if n_iter_spec is None: print 'Found %d repeats with at least %d iterations' % ( n_repeat, n_iter) print {i: n_iter_each[i] for i in xrange(n_repeat)} elif n_iter < n_iter_spec: print 'You specified %d iterations but there are only %d available... so plotting %d' % ( n_iter_spec, n_iter, n_iter) else: n_iter = n_iter_spec print 'Plotting %d iterations' % n_iter iters = range(n_iter) if rec_type != "model": values = np.zeros((n_iter, n_repeat)) for j in xrange( n_repeat): # loop over repeated experiments for i in iters[j]: # loop over iterations if rec_type == "observations": values[i, j] = repeat_recs[j][i]['obj_o'] elif rec_type == "mixed": values[i, j] = repeat_recs[j][i]['obj_om'] else: raise Exception("unknown rec type") if values[i, j] is None or np.isnan(values[i, j]): values[i, j] = violation_value print values else: values = np.zeros((n_iter, n_repeat)) distances = np.zeros((n_iter, n_repeat)) for j in xrange( n_repeat): # loop over repeated experiments for i in iters: # loop over iterations val = true_func(repeat_recs[j][i], module, None, constraint_tol, obj, con) if val is None: #set to violation value here so we can print out this info... values[i, j] = violation_value print 'Violation with params %s at repeat %d iter %d' % ( paramify_no_types( repeat_recs[j][i]['params']), j, i) else: values[i, j] = val if make_dist_plot: distances[i, j] = params_norm( repeat_recs[j][i]['params'], module.true_sol()) if plot_separate: if log_scale: ax['err'].plot(iters, np.log10(values)) else: ax['err'].plot(iters, values) else: # Average over the repeated experiments average_values = map(avg, values) errorbars = bootstrap_errorbars(values, log=log_scale, avg=avg) # plt.yscale('log', nonposy='clip') if stretch_x: fctr = float(n_iter_spec) / float(n_iter) iters = np.array(iters) * fctr print 'Stretching x axis by a factor of %f' % fctr if log_scale: ax['err'].errorbar(iters, np.log10(average_values), yerr=errorbars) else: ax['err'].errorbar(iters, average_values, yerr=errorbars) if make_dist_plot: average_dist = map(avg, distances) errorbars_dist = bootstrap_errorbars(distances, log=log_scale, avg=avg) if log_scale: ax['dist'].errorbar(iters, np.log10(average_dist), yerr=errorbars_dist) else: ax['dist'].errorbar(iters, average_dist, yerr=errorbars_dist) acq_names.append(options["tasks"].values()[0]["acquisition"]) if acq_names[-1] == 'PES': acq_names[-1] = 'PESC' if acq_names[-1] == 'ExpectedImprovement': acq_names[-1] = 'EIC' if labels: ax['err'].legend(labels.split(';'), fontsize=16, loc='lower left') ax['dist'].legend(labels.split(';'), fontsize=20) elif len(acq_names) > 1: ax['err'].legend(acq_names, fontsize=20) ax['dist'].legend(acq_names, fontsize=20) # save it in the last directory... (if there are multiple directories) if not plot_wall_time: if n_repeat >= 0: print 'Made a plot with %d repeats and %d iterations' % (n_repeat, n_iter) else: print 'Made a plot with %d iterations' % (n_iter) else: if n_repeat >= 0: print 'Made a plot with %d repeats and %f minutes' % (n_repeat, max(iters)) else: print 'Made a plot with %f minutes' % (max(iters)) file_prefix = '%s_' % average if n_repeat > 0 else '' file_postfix = '_wall_time' if plot_wall_time else '' fig['err'].tight_layout() figname = os.path.join(plots_dir, '%serror%s' % (file_prefix, file_postfix)) fig['err'].savefig(figname + '.pdf') fig['err'].savefig(figname + '.svg') print 'Saved to %s' % figname if make_dist_plot: fig['dist'].tight_layout() figname_dist = os.path.join( plots_dir, '%sl2_distance%s.pdf' % (file_prefix, file_postfix)) fig['dist'].savefig(figname_dist) print 'Saved to %s' % figname_dist
def main(expt_dir, repeat=None): options = parse_config_file(expt_dir, 'config.json') experiment_name = options["experiment-name"] if repeat is not None: experiment_name = repeat_experiment_name(experiment_name,repeat) input_space = InputSpace(options["variables"]) chooser_module = importlib.import_module('spearmint.choosers.' + options['chooser']) chooser = chooser_module.init(input_space, options) db = MongoDB(database_address=options['database']['address']) jobs = load_jobs(db, experiment_name) hypers = db.load(experiment_name, 'hypers') if input_space.num_dims != 2: raise Exception("This plotting script is only for 2D optimizations. This problem has %d dimensions." % input_space.num_dims) tasks = parse_tasks_from_jobs(jobs, experiment_name, options, input_space) hypers = chooser.fit(tasks, hypers) print '\nHypers:' print_hypers(hypers) recommendation = chooser.best() current_best_value = recommendation['model_model_value'] current_best_location = recommendation['model_model_input'] plots_dir = os.path.join(expt_dir, 'plots') if not os.path.isdir(plots_dir): os.mkdir(plots_dir) if len(chooser.models) > 1: for task_name in chooser.models: plots_subdir = os.path.join(plots_dir, task_name) if not os.path.isdir(plots_subdir): os.mkdir(plots_subdir) print 'Plotting...' # Plot objective model # plot_2d_mean_and_var(chooser.objective_model, plots_dir, # chooser.objective.name, # input_space, current_best_location) # plot_hypers(chooser.objective_model, plots_dir, 'objective_function') for task_name, model in chooser.models.iteritems(): plots_subdir = os.path.join(plots_dir, task_name) if len(chooser.models) > 1 else plots_dir plot_hypers(model, plots_subdir, task_name) plot_2d_mean_and_var(model, plots_subdir, task_name, input_space, current_best_location) if chooser.numConstraints() > 0: plot_2d_constraints(chooser, plots_dir, input_space, current_best_location) plot_acquisition_function(chooser, plots_dir, input_space, current_best_location, current_best_value) print 'Done plotting.'
def main(expt_dir, config_file="config.json", no_output=False, repeat=-1): if not os.path.isdir(expt_dir): raise Exception("Cannot find directory %s" % expt_dir) options = parse_config_file(expt_dir, config_file) experiment_name = options["experiment_name"] # Special advanced feature for repeating the same experiment many times if repeat >= 0: experiment_name = repeat_experiment_name(experiment_name, repeat) if not no_output: # if we want output if repeat >= 0: output_directory = repeat_output_dir(expt_dir, repeat) else: output_directory = os.path.join(expt_dir, 'output') if not os.path.isdir(output_directory): os.mkdir(output_directory) if repeat < 0: rootLogger = logging.getLogger() fileHandler = logging.FileHandler( os.path.join(output_directory, 'main.log')) fileHandler.setFormatter(logFormatter) fileHandler.setLevel(logLevel) rootLogger.addHandler(fileHandler) # consoleHandler = logging.StreamHandler() # consoleHandler.setFormatter(logFormatter) # consoleHandler.setLevel(logLevel) # rootLogger.addHandler(consoleHandler) else: output_directory = None input_space = InputSpace(options["variables"]) resources = parse_resources_from_config(options) # Load up the chooser. chooser_module = importlib.import_module('spearmint.choosers.' + options['chooser']) chooser = chooser_module.init(input_space, options) # Connect to the database db_address = options['database']['address'] db = MongoDB(database_address=db_address) overall_start_time = time.time() db.save({'start-time': overall_start_time}, experiment_name, 'start-time') waiting_for_results = False # for printing purposes only while True: for resource_name, resource in resources.iteritems(): jobs = load_jobs(db, experiment_name) # resource.printStatus(jobs) # If the resource is currently accepting more jobs # TODO: here cost will eventually also be considered: even if the # resource is not full, we might wait because of cost incurred # Note: I could chose to fill up one resource and them move on to the next ("if") # You could also do it the other way, by changing "if" to "while" here # Remove any broken jobs from pending # note: make sure to do this before the acceptingJobs() condition is checked remove_broken_jobs(db, jobs, experiment_name, resources) if resource.acceptingJobs(jobs): if waiting_for_results: logging.info('\n') waiting_for_results = False # Load jobs from DB # (move out of one or both loops?) would need to pass into load_tasks jobs = load_jobs(db, experiment_name) # Print out a list of broken jobs print_broken_jobs(jobs) # Get a suggestion for the next job tasks = parse_tasks_from_jobs(jobs, experiment_name, options, input_space) # Special case when coupled and there is a NaN task-- what to do with NaN task when decoupled?? if 'NaN' in tasks and 'NaN' not in resource.tasks: resource.tasks.append('NaN') # Load the model hypers from the database. hypers = db.load(experiment_name, 'hypers') # "Fit" the chooser - give the chooser data and let it fit the model(s). # NOTE: even if we are only suggesting for 1 task, we need to fit all of them # because the acquisition function for one task depends on all the tasks hypers = chooser.fit(tasks, hypers) if hypers: logging.debug('GP covariance hyperparameters:') print_hypers(hypers, input_space, options) # if 'duration hypers' in hypers: # logging.debug('Duration GP covariance hyperparameters:') # print_hypers(hypers['duration hypers'], input_space, options) # Save the hyperparameters to the database. if hypers: db.save(hypers, experiment_name, 'hypers') if options['recommendations'] == "during": # Compute the best value so far, a.k.a. the "recommendation" recommendation = chooser.best() # Save the recommendation in the DB if there are more complete jobs than last time store_recommendation(recommendation, db, experiment_name, tasks, jobs, input_space, time.time() - overall_start_time) # Get the decoupling groups task_couplings = { task_name: tasks[task_name].options["group"] for task_name in resource.tasks } logging.info('\nGetting suggestion for %s...\n' % (', '.join(task_couplings.keys()))) # Get the next suggested experiment from the chooser. suggested_input, suggested_tasks = chooser.suggest( task_couplings) suggested_task = suggested_tasks[0] # hack, deal with later suggested_job = { 'id': len(jobs) + 1, 'params': input_space.paramify(suggested_input), 'expt_dir': options['main_file_path'], 'tasks': suggested_tasks, 'resource': resource_name, 'main-file': options['tasks'][suggested_task]['main_file'], 'language': options['tasks'][suggested_task]['language'], 'status': 'new', 'submit time': time.time(), 'start time': None, 'end time': None, 'fast update': chooser.fast_update # just for plotting - not important } save_job(suggested_job, db, experiment_name) # Submit the job to the appropriate resource process_id = resource.attemptDispatch(experiment_name, suggested_job, db_address, expt_dir, output_directory) # Print the current time logging.info( 'Current time: %s' % datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')) # Set the status of the job appropriately (successfully submitted or not) if process_id is None: suggested_job['status'] = 'broken' logging.info( 'Job %s failed -- check output file for details.' % job['id']) save_job(suggested_job, db, experiment_name) else: suggested_job['status'] = 'pending' suggested_job['proc_id'] = process_id save_job(suggested_job, db, experiment_name) jobs = load_jobs(db, experiment_name) # Print out the status of the resources # resource.printStatus(jobs) print_resources_status(resources.values(), jobs) if len(set(task_couplings.values())) > 1: # if decoupled print_tasks_status(tasks.values(), jobs) # For debug - print pending jobs print_pending_jobs(jobs) # Terminate the optimization if all resources are finished (run max number of jobs) # or ANY task is finished (just my weird convention) jobs = load_jobs(db, experiment_name) tasks = parse_tasks_from_jobs(jobs, experiment_name, options, input_space) terminate_resources = reduce( lambda x, y: x and y, map(lambda x: x.maxCompleteReached(jobs), resources.values()), True) terminate_tasks = reduce( lambda x, y: x or y, map(lambda x: x.maxCompleteReached(jobs), tasks.values()), False) terminate_maxtime = (time.time() - overall_start_time) >= ( options['max_time_mins'] * 60.0) if terminate_resources or terminate_tasks or terminate_maxtime: if terminate_resources: logging.info( 'Maximum number of jobs completed on all resources.') if terminate_tasks: logging.info( 'Maximum number of jobs reached for at least one task.') if terminate_maxtime: logging.info( 'Maximum total experiment time of %f minutes reached.' % options['max_time_mins']) # save rec in DB if options['recommendations'] in ("during", "end-one"): logging.info('Making final recommendation:') recommendation = chooser.best() store_recommendation(recommendation, db, experiment_name, tasks, jobs, input_space, time.time() - overall_start_time, final=True) elif options['recommendations'] == "end-all": logging.info('Making recommendations...') all_jobs = jobs for i in xrange(len(all_jobs)): logging.info('') logging.info( '-------------------------------------------------') logging.info( ' Getting recommendations for iter %d/%d ' % (i, len(all_jobs))) logging.info( '-------------------------------------------------') logging.info('') jobs = all_jobs[:i + 1] tasks = parse_tasks_from_jobs(jobs, experiment_name, options, input_space) hypers = chooser.fit(tasks, hypers) print_hypers(hypers, input_space, options) # get the biggest end time of the jobs end_time = max([job['end time'] for job in jobs]) elapsed_time = end_time - overall_start_time recommendation = chooser.best() store_recommendation(recommendation, db, experiment_name, tasks, jobs, input_space, elapsed_time) logging.info('Have a nice day.') return # If no resources are accepting jobs, sleep if no_free_resources(db, experiment_name, resources): # Don't use logging here because it's too much effort to use logging without a newline at the end sys.stdout.write( 'Waiting for results...' if not waiting_for_results else '.') sys.stdout.flush() # sys.stderr.flush() waiting_for_results = True time.sleep(options['polling_time']) else: sys.stdout.write('\n')