Example #1
0
def main(expt_dir, repeat=-1):

    options = parse_config_file(expt_dir, 'config.json')
    experiment_name = options["experiment_name"]

    if repeat > 0:
        experiment_name = repeat_experiment_name(experiment_name, repeat)

    input_space = InputSpace(options["variables"])
    chooser_module = importlib.import_module('spearmint.choosers.' +
                                             options['chooser'])
    chooser = chooser_module.init(input_space, options)
    db = MongoDB(database_address=options['database']['address'])
    jobs = load_jobs(db, experiment_name)
    hypers = db.load(experiment_name, 'hypers')
    tasks = parse_tasks_from_jobs(jobs, experiment_name, options, input_space)

    for job in jobs:

        if job['status'] == 'complete':
            print 'Job %d' % job['id']

            input_space.print_params(job['params'], left_indent=0)
            for task, val in job['values'].iteritems():
                print '%s: %s' % (task, val)

            print ''
def main(expt_dir, repeat=-1):

    options = parse_config_file(expt_dir, 'config.json')
    experiment_name = options["experiment-name"]

    if repeat > 0:
        experiment_name = repeat_experiment_name(experiment_name, repeat)

    input_space = InputSpace(options["variables"])
    chooser_module = importlib.import_module('spearmint.choosers.' +
                                             options['chooser'])
    chooser = chooser_module.init(input_space, options)
    db = MongoDB(database_address=options['database']['address'])
    jobs = load_jobs(db, experiment_name)
    hypers = db.load(experiment_name, 'hypers')
    tasks = parse_tasks_from_jobs(jobs, experiment_name, options, input_space)

    for task_name, task in tasks.iteritems():

        # print 'Printing results for task %s' % task_name

        for i in xrange(len(task.values)):

            print 'Iteration %d' % (i + 1)
            input_space.paramify_and_print(task.inputs[i], left_indent=0)

            print '%s: %s' % (task_name, task.values[i])

            print ''
            print ''
Example #3
0
def main():

    options = parse_config_file('.', 'config.json')
    experiment_name = options["experiment-name"]
    input_space = InputSpace(options["variables"])
    db = MongoDB(database_address=options['database']['address'])

    i = 0
    recommendation = db.load(experiment_name, 'recommendations', {'id': i + 1})
    while recommendation is not None:
        params_last = input_space.vectorify(recommendation['params'])
        recommendation = db.load(experiment_name, 'recommendations',
                                 {'id': i + 1})
        i += 1

    np.savetxt('pareto_front.txt', params_last, fmt='%e')
Example #4
0
def main(expt_dir, n_repeat):
    n_repeat = int(n_repeat)
    options = parse_config_file(expt_dir, 'config.json')
    tasks = options['tasks'].keys()

    jobs = dict()
    start_times = dict()
    for j in xrange(n_repeat):
        experiment_name = repeat_experiment_name(options["experiment_name"], j)
        db = MongoDB(database_address=options['database']['address'])
        jobs[j] = load_jobs(db, experiment_name)
        start_times[j] = db.load(experiment_name, 'start-time')['start-time']

    time_in_evals = defaultdict(lambda: np.zeros(n_repeat))
    time_in_fast_updates = np.zeros(n_repeat)
    time_in_slow_updates = np.zeros(n_repeat)

    for j in xrange(n_repeat):

        last_job_end_time = start_times[j]

        for job in jobs[j]:
            if job['status'] == 'complete':
                time_in_evals[job['tasks'][0]][j] += (job['end time'] -
                                                      job['start time']) / 60.0

                if job['fast update']:
                    time_in_fast_updates[j] += (job['start time'] -
                                                last_job_end_time) / 60.0
                else:
                    time_in_slow_updates[j] += (job['start time'] -
                                                last_job_end_time) / 60.0
                last_job_end_time = job['end time']

    for task in tasks:
        print 'Average time on task %s over %d repeats: %f +/- %f minutes (mean +/- std)' % (
            task, n_repeat, np.mean(
                time_in_evals[task]), np.std(time_in_evals[task]))
    total_time_in_evals = sum(time_in_evals.values())
    print 'Average time in JOBS over %d repeats: %f +/- %f minutes (mean +/- std)' % (
        n_repeat, np.mean(total_time_in_evals), np.std(total_time_in_evals))
    print 'Average time in FAST over %d repeats: %f +/- %f minutes (mean +/- std)' % (
        n_repeat, np.mean(time_in_fast_updates), np.std(time_in_fast_updates))
    print 'Average time in SLOW over %d repeats: %f +/- %f minutes (mean +/- std)' % (
        n_repeat, np.mean(time_in_slow_updates), np.std(time_in_slow_updates))
    total_optimizer_time = time_in_fast_updates + time_in_slow_updates
    print 'Average time in OPTIMIZER over %d repeats: %f +/- %f minutes (mean +/- std)' % (
        n_repeat, np.mean(total_optimizer_time), np.std(total_optimizer_time))
    print 'Total average time spent: %f' % np.sum([
        np.mean(total_time_in_evals),
        np.mean(time_in_fast_updates),
        np.mean(time_in_slow_updates)
    ])
Example #5
0
def returnBest(config_directory):
    os.chdir("/home/carrknight/code/oxfish/runs/optimization/spearmint")
    options = get_options(config_directory, config_file="config.json")
    experiment_name = str(options['experiment-name'])
    db = MongoDB()
    resources = parse_resources_from_config(options)
    resource = resources.itervalues().next()

    # load hyper parameters
    chooser_module = importlib.import_module('spearmint.choosers.' +
                                             options['chooser'])
    chooser = chooser_module.init(options)
    print "chooser", chooser

    hypers = db.load(experiment_name, "hypers")
    print "loaded hypers", hypers  # from GP.to_dict()

    jobs = load_jobs(db, experiment_name)
    remove_broken_jobs(db, jobs, experiment_name, resources)
    task_options = {task: options["tasks"][task] for task in resource.tasks}
    task_group = spearmint.main.load_task_group(db, options, resource.tasks)

    hypers = spearmint.main.load_hypers(db, experiment_name)
    print "loaded hypers", hypers  # from GP.to_dict()

    hypers = chooser.fit(task_group, hypers, task_options)
    print "\nfitted hypers:"
    print(hypers)

    lp, x = chooser.best()
    x = x.flatten()
    print "best", lp, x
    bestp = task_group.paramify(task_group.from_unit(x))
    print "expected best position", bestp

    return bestp
Example #6
0
def launch(db_address, experiment_name, job_id):
    """
    Launches a job from on a given id.
    """

    db  = MongoDB(database_address=db_address)
    job = db.load(experiment_name, 'jobs', {'id' : job_id})

    start_time        = time.time()
    job['start time'] = start_time
    db.save(job, experiment_name, 'jobs', {'id' : job_id})

    sys.stderr.write("Job launching after %0.2f seconds in submission.\n" 
                     % (start_time-job['submit time']))

    success = False

    try:
        if job['language'].lower() == 'matlab':
            result = matlab_launcher(job)

        elif job['language'].lower() == 'python':
            result = python_launcher(job)

        elif job['language'].lower() == 'shell':
            result = shell_launcher(job)

        elif job['language'].lower() == 'mcr':
            result = mcr_launcher(job)

        else:
            raise Exception("That language has not been implemented.")

        if not isinstance(result, dict):
            # Returning just NaN means NaN on all tasks
            if np.isnan(result):
                # Apparently this dict generator throws an error for some people??
                # result = {task_name: np.nan for task_name in job['tasks']}
                # So we use the much uglier version below... ????
                result = dict(list(zip(job['tasks'], [np.nan]*len(job['tasks']))))
            elif len(job['tasks']) == 1: # Only one named job
                result = {job['tasks'][0] : result}
            else:
                result = {'main' : result}
        
        if set(result.keys()) != set(job['tasks']):
            raise Exception("Result task names %s did not match job task names %s." % (list(result.keys()), job['tasks']))

        success = True
    except:
        import traceback
        traceback.print_exc()
        sys.stderr.write("Problem executing the function\n")
        print(sys.exc_info())
        
    end_time = time.time()

    if success:
        sys.stderr.write("Completed successfully in %0.2f seconds. [%s]\n" 
                         % (end_time-start_time, result))
        
        job['values']   = result
        job['status']   = 'complete'
        job['end time'] = end_time

    else:
        sys.stderr.write("Job failed in %0.2f seconds.\n" % (end_time-start_time))
    
        # Update metadata.
        job['status']   = 'broken'
        job['end time'] = end_time

    db.save(job, experiment_name, 'jobs', {'id' : job_id})
Example #7
0
def launch(db_address, experiment_name, job_id):
    """
    Launches a job from on a given id.
    """

    db = MongoDB(database_address=db_address)
    job = db.load(experiment_name, 'jobs', {'id': job_id})

    start_time = time.time()
    job['start time'] = start_time
    db.save(job, experiment_name, 'jobs', {'id': job_id})

    sys.stderr.write("Job launching after %0.2f seconds in submission.\n" %
                     (start_time - job['submit time']))

    success = False

    try:
        if job['language'].lower() == 'matlab':
            result = matlab_launcher(job)

        elif job['language'].lower() == 'python':
            result = python_launcher(job)
            # sys.stderr.write('RESULT EQUALS %s' % result)
        elif job['language'].lower() == 'shell':
            result = shell_launcher(job)
        else:
            raise Exception("That language has not been implemented.")

        if not isinstance(result, dict):
            # Returning just NaN means NaN on all tasks
            if np.isnan(result):
                # Apparently this dict generator throws an error for some people??
                result = {task_name: np.nan for task_name in job['tasks']}
                # So we use the much uglier version below... ????
                # result = dict(zip(job['tasks'], [np.nan]*len(job['tasks'])))
            elif len(
                    job['tasks']
            ) == 1:  # Only one named job and result is not a dict, stick it in dict
                result = {job['tasks'][0]: result}
            else:
                result = {DEFAULT_TASK_NAME: result}
        else:
            if "objective" in result and "constraints" in result:
                result_new = dict()
                result_new[DEFAULT_TASK_NAME] = result["objective"]
                for i in xrange(len(result["constraints"])):
                    result_new['%s%d' % (DEFAULT_CONSTRAINT_NAME,
                                         i)] = result["constraints"][i]
                result = result_new

        # actually it's ok if the result dict contains extra stuff. so it would be fine just to
        # check that all((t in result for t in job['tasks']))


#        if set(result.keys()) != set(job['tasks']):
        if not set(job['tasks']).issubset(set(result.keys())):
            #            if set(result.keys()).union(['NaN']) != set(job['tasks']):
            if not set(job['tasks']).issubset(
                    set(result.keys()).union(['NaN'])):
                raise Exception(
                    "Result task names %s did not match job task names %s." %
                    (result.keys(), job['tasks']))

        success = True
    except:
        sys.stderr.flush()
        sys.stdout.flush()
        import traceback
        traceback.print_exc()
        sys.stderr.write("Problem executing the function\n")
        print sys.exc_info()

    end_time = time.time()

    if success:
        sys.stderr.write("Completed successfully in %0.2f seconds. [%s]\n" %
                         (end_time - start_time, result))

        job['values'] = result
        job['status'] = 'complete'
        job['end time'] = end_time

    else:
        sys.stderr.write("Job failed in %0.2f seconds.\n" %
                         (end_time - start_time))

        # Update metadata.
        job['status'] = 'broken'
        job['end time'] = end_time

    db.save(job, experiment_name, 'jobs', {'id': job_id})
Example #8
0
def main(expt_dir):

    os.chdir(expt_dir)
    sys.path.append(expt_dir)

    options = parse_config_file(expt_dir, 'config.json')
    experiment_name = options["experiment-name"]

    #	main_file = options['main_file']
    main_file = 'OSY_no_noisy'
    if main_file[-3:] == '.py':
        main_file = main_file[:-3]
    module = __import__(main_file)

    input_space = InputSpace(options["variables"])
    chooser_module = importlib.import_module('spearmint.choosers.' +
                                             options['chooser'])
    chooser = chooser_module.init(input_space, options)
    db = MongoDB(database_address=options['database']['address'])
    jobs = load_jobs(db, experiment_name)
    hypers = db.load(experiment_name, 'hypers')
    tasks = parse_tasks_from_jobs(jobs, experiment_name, options, input_space)

    if len(tasks) < 2:
        print 'Not a multi-objective problem!'
        return -1

    if options['language'] != "PYTHON":
        print 'Only python programs supported!'
        return -1

    objectives = dict()
    contraints = dict()

    for task in tasks:
        if tasks[task].type == 'objective':
            objectives[task] = tasks[task]
        else:
            contraints[task] = tasks[task]

    assert len(objectives) >= 2 and len(contraints) >= 1

    def create_fun(task):
        def fun(params, gradient=False):

            if len(params.shape) > 1 and params.shape[1] > 1:

                values = np.zeros(params.shape[0])
                params_orig = params

                for i in range(params_orig.shape[0]):
                    param = params[i, :]
                    param = param.flatten()
                    param = input_space.from_unit(np.array([param])).flatten()

                    values[i] = module.main(
                        0,
                        paramify_no_types(input_space.paramify(param)))[task]

            else:
                return module.main(
                    0, paramify_no_types(input_space.paramify(params)))[task]

            return values

        return fun

    funs_o = [create_fun(task) for task in objectives]
    funs_c = [create_fun(task) for task in contraints]

    moop = MOOP_basis_functions(funs_o,
                                input_space.num_dims,
                                constraints=funs_c)

    grid = sobol_grid.generate(input_space.num_dims,
                               grid_size=1000 * input_space.num_dims)

    # We only retain the feasible points

    moop.solve_using_grid(grid)

    reference = np.ones(len(objectives)) * 1e3

    hyper_volume_solution = moop.get_hypervolume(reference.tolist())

    result = moop.compute_pareto_front_and_set()

    front = result['frontier']
    pareto_set = result['pareto_set']

    with open('hypervolume_solution.txt', 'a') as f:
        print >> f, "%lf" % (hyper_volume_solution)

    # We iterate through each recommendation made

    i = 0
    more_recommendations = True
    while more_recommendations:

        recommendation = db.load(experiment_name, 'recommendations',
                                 {'id': i + 1})

        if recommendation == None:
            more_recommendations = False
        else:

            solution = input_space.to_unit(
                input_space.vectorify(recommendation['params']))

            if len(solution.shape) == 1:
                solution = solution.reshape((1, len(solution)))

            # We compute the objective values associated to this recommendation

            values_solution = np.zeros((solution.shape[0], len(objectives)))

            for j in range(values_solution.shape[0]):
                for k in range(values_solution.shape[1]):
                    values_solution[j, k] = funs_o[k](solution[j:(j + 1), :])

            moop = MOOP_basis_functions(funs_o, input_space.num_dims)

            moop.set_population(solution)

            hyper_volume = moop.get_hypervolume(reference.tolist())

            # We make sure that there are no infeasible points recommended
            # If there are infeasible recommendations we return 0 as the hypervolume

            all_feasible = True

            for k in range(len(funs_c)):
                all_feasible = all_feasible and not np.any(
                    funs_c[k](solution) < 0)

            if not all_feasible:
                hyper_volume = 0.0

            with open('hypervolumes.txt', 'a') as f:
                print >> f, "%lf" % (hyper_volume)

            with open('evaluations.txt', 'a') as f_handle:
                np.savetxt(
                    f_handle,
                    np.array([recommendation['num_complete_tasks'].values()]),
                    delimiter=' ',
                    newline='\n')

        i += 1
def main():

    parser = optparse.OptionParser(usage="usage: %prog [options] directory")

    parser.add_option("--config", dest="config_file",
                      help="Configuration file name.",
                      type="string", default="config.json")
    parser.add_option("--no-output", action="store_true",
                      help="Do not create output files.")
    parser.add_option("--repeat", dest="repeat",
                      help="Used for repeating the same experiment many times.",
                      type="int", default="-1")

    (commandline_kwargs, args) = parser.parse_args()

    # Read in the config file
    #expt_dir = os.path.realpath('examples/cifar10')
    expt_dir  = os.path.realpath(args[0])
    if not os.path.isdir(expt_dir):
        raise Exception("Cannot find directory %s" % expt_dir)

    options = parse_config_file(expt_dir, commandline_kwargs.config_file)
    experiment_name = options["experiment-name"]

    # Special advanced feature for repeating the same experiment many times
    if commandline_kwargs.repeat >= 0:
        experiment_name = repeat_experiment_name(experiment_name, commandline_kwargs.repeat)

    if not commandline_kwargs.no_output: # if we want output
        if commandline_kwargs.repeat >= 0:
            output_directory = repeat_output_dir(expt_dir, commandline_kwargs.repeat)
        else:
            output_directory = os.path.join(expt_dir, 'output', options["experiment-name"])
        if not os.path.isdir(output_directory):
            os.mkdir(output_directory)

        if commandline_kwargs.repeat < 0:
            rootLogger = logging.getLogger()
            fileHandler = logging.FileHandler(os.path.join(output_directory, 'main.log'))
            fileHandler.setFormatter(logFormatter)
            fileHandler.setLevel(logLevel)
            rootLogger.addHandler(fileHandler)
        # consoleHandler = logging.StreamHandler()
        # consoleHandler.setFormatter(logFormatter)
        # consoleHandler.setLevel(logLevel)
        # rootLogger.addHandler(consoleHandler)
    else:
        output_directory = None

    input_space = InputSpace(options["variables"])

    resources = parse_resources_from_config(options)

    # Load up the chooser.
    chooser_module = importlib.import_module('spearmint.choosers.' + options['chooser'])

    chooser = chooser_module.init(input_space, options)

    # Connect to the database

    db_address = options['database']['address']
    db         = MongoDB(database_address=db_address)

    if os.getenv('SPEARMINT_MAX_ITERATIONS') == None and 'max_iterations' not in set(options.keys()):
	maxiterations = DEFAULT_MAX_ITERATIONS
    elif os.getenv('SPEARMINT_MAX_ITERATIONS') != None:
	maxiterations = int(os.getenv('SPEARMINT_MAX_ITERATIONS'))
    else:
	maxiterations = options['max_iterations']

    # Set random seed

    if 'random_seed' in options.keys():
	    np.random.seed(int(options['random_seed']))
	    seed(int(options['random_seed']))

    waiting_for_results = False  # for printing purposes only
    while True:

        for resource_name, resource in resources.iteritems():

            jobs = load_jobs(db, experiment_name)
            # resource.printStatus(jobs)

            # If the resource is currently accepting more jobs
            # TODO: here cost will eventually also be considered: even if the
            #       resource is not full, we might wait because of cost incurred
            # Note: I could chose to fill up one resource and them move on to the next ("if")
            # You could also do it the other way, by changing "if" to "while" here

            # Remove any broken jobs from pending
            # note: make sure to do this before the acceptingJobs() condition is checked
            remove_broken_jobs(db, jobs, experiment_name, resources)

            if resource.acceptingJobs(jobs):

                if waiting_for_results:
                    logging.info('\n')
                waiting_for_results = False

                optim_start_time = time.time()

                # Load jobs from DB
                # (move out of one or both loops?) would need to pass into load_tasks
                jobs = load_jobs(db, experiment_name)

                # Print out a list of broken jobs
                print_broken_jobs(jobs)

                # Get a suggestion for the next job
                tasks = parse_tasks_from_jobs(jobs, experiment_name, options, input_space)

                # Special case when coupled and there is a NaN task-- what to do with NaN task when decoupled??
                if 'NaN' in tasks and 'NaN' not in resource.tasks:
                    resource.tasks.append('NaN')

                # Load the model hypers from the database.
                hypers = db.load(experiment_name, 'hypers')

                # "Fit" the chooser - give the chooser data and let it fit the model(s).
                # NOTE: even if we are only suggesting for 1 task, we need to fit all of them
                # because the acquisition function for one task depends on all the tasks

                hypers = chooser.fit(tasks, hypers)

                if hypers:
                    logging.debug('GP covariance hyperparameters:')
                print_hypers(hypers)

                # Save the hyperparameters to the database.
                if hypers:
                    db.save(hypers, experiment_name, 'hypers')

                # Compute the best value so far, a.k.a. the "recommendation"

                recommendation = chooser.best()

                # Save the recommendation in the DB

                numComplete_by_task = {task_name : task.numComplete(jobs) for task_name, task in tasks.iteritems()}

                db.save({'num_complete' : resource.numComplete(jobs),
                     'num_complete_tasks' : numComplete_by_task,
                     'params'   : input_space.paramify(recommendation['model_model_input']),
                     'objective': recommendation['model_model_value'],
                     'params_o' : None if recommendation['obser_obser_input'] is None else input_space.paramify(recommendation['obser_obser_input']),
                     'obj_o'    : recommendation['obser_obser_value'],
                     'params_om': None if recommendation['obser_model_input'] is None else input_space.paramify(recommendation['obser_model_input']),
                     'obj_om'   : recommendation['obser_model_value']},
                experiment_name, 'recommendations', {'id' : len(jobs)})

                # Get the decoupling groups
                task_couplings = {task_name : tasks[task_name].options["group"] for task_name in resource.tasks}

                logging.info('\nGetting suggestion for %s...\n' % (', '.join(task_couplings.keys())))

                # Get the next suggested experiment from the chooser.

                suggested_input, suggested_tasks = chooser.suggest(task_couplings, optim_start_time)
                suggested_task = suggested_tasks[0] # hack, deal with later

                suggested_job = {
                    'id'          : len(jobs) + 1,
                    'params'      : input_space.paramify(suggested_input),
                    'expt_dir'    : options['main_file_path'],
                    'tasks'       : suggested_tasks,
                    'resource'    : resource_name,
                    'main-file'   : resource.main_file,
                    'language'    : options['tasks'][suggested_task]['language'],
                    'status'      : 'new',
                    'submit time' : time.time(),
                    'start time'  : None,
                    'end time'    : None
                }

                save_job(suggested_job, db, experiment_name)

                # Submit the job to the appropriate resource
                process_id = resource.attemptDispatch(experiment_name, suggested_job, db_address,
                                                      expt_dir, output_directory)

                # Print the current time
                logging.info('Current time: %s' % datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))

                # Set the status of the job appropriately (successfully submitted or not)
                if process_id is None:
                    suggested_job['status'] = 'broken'
                    logging.info('Job %s failed -- check output file for details.' % job['id'])
                    save_job(suggested_job, db, experiment_name)
                else:
                    suggested_job['status'] = 'pending'
                    suggested_job['proc_id'] = process_id
                    save_job(suggested_job, db, experiment_name)

                jobs = load_jobs(db, experiment_name)

                # Print out the status of the resources
                # resource.printStatus(jobs)
                print_resources_status(resources.values(), jobs)

                if len(set(task_couplings.values())) > 1: # if decoupled
                    print_tasks_status(tasks.values(), jobs)

                # For debug - print pending jobs
                print_pending_jobs(jobs)


        # Terminate the optimization if all resources are finished (run max number of jobs)
        # or ANY task is finished (just my weird convention)
        if reduce(lambda x,y: x and y, map(lambda x: x.maxCompleteReached(jobs), resources.values()), True) or \
           reduce(lambda x,y: x or y,  map(lambda x: x.maxCompleteReached(jobs), tasks.values()),     False):
            # Do all this extra work just to save the final recommendation -- would be ok to delete everything
            # in here and just "return"
            sys.stdout.write('\n')
            jobs = load_jobs(db, experiment_name)
            tasks = parse_tasks_from_jobs(jobs, experiment_name, options, input_space)
            hypers = db.load(experiment_name, 'hypers')
            hypers = chooser.fit(tasks, hypers)
            if hypers:
                db.save(hypers, experiment_name, 'hypers')
            # logging.info('\n**All resources have run the maximum number of jobs.**\nFinal recommendation:')
            recommendation = chooser.best()

            # numComplete_per_task
            numComplete_by_task = {task_name : task.numComplete(jobs) for task_name, task in tasks.iteritems()}
            db.save({'num_complete'       : resource.numComplete(jobs),
                     'num_complete_tasks' : numComplete_by_task,
                     'params'   : input_space.paramify(recommendation['model_model_input']),
                     'objective': recommendation['model_model_value'],
                     'params_o' : None if recommendation['obser_obser_input'] is None else input_space.paramify(recommendation['obser_obser_input']),
                     'obj_o'    : recommendation['obser_obser_value'],
                     'params_om': None if recommendation['obser_model_input'] is None else input_space.paramify(recommendation['obser_model_input']),
                     'obj_om'   : recommendation['obser_model_value']},
                experiment_name, 'recommendations', {'id'       : len(jobs)})
            logging.info('Maximum number of jobs completed. Have a nice day.')
            return

        # If no resources are accepting jobs, sleep
        if no_free_resources(db, experiment_name, resources):
            # Don't use logging here because it's too much effort to use logging without a newline at the end
            sys.stdout.write('Waiting for results...' if not waiting_for_results else '.')
            sys.stdout.flush()
            # sys.stderr.flush()
            waiting_for_results = True
            time.sleep(options['polling_time'])
        else:
            sys.stdout.write('\n')
Example #10
0
def main(dirs,
         n_repeat=-1,
         n_iter_spec=None,
         rec_type="model",
         average="mean",
         log_scale=False,
         violation_value=1.,
         constraint_tol=0.,
         make_dist_plot=False,
         mainfile=None,
         stretch_x=False,
         task_comp_x=None,
         plot_wall_time=False,
         bin_size=1.0,
         plot_separate=False,
         labels=None,
         y_axis_label=None,
         x_axis_label=None):

    # Create the figure that plots utility gap
    fig = dict()
    ax = dict()
    # averaging function
    if average == "mean":
        avg = np.mean
    elif average == "median":
        avg = np.median
    else:
        raise Exception("Unknown average %s" % average)

    fig['err'] = plt.figure()
    ax['err'] = fig['err'].add_subplot(1, 1, 1)
    if plot_wall_time:
        ax['err'].set_xlabel("wall time (min)", size=25)
    elif x_axis_label:
        ax['err'].set_xlabel(x_axis_label, size=25)
    else:
        ax['err'].set_xlabel('Number of function evaluations', size=25)
    ax['err'].tick_params(axis='both', which='major', labelsize=20)

    # Create the figure that plots L2 distance from solution
    fig['dist'] = plt.figure()
    ax['dist'] = fig['dist'].add_subplot(1, 1, 1)
    if x_axis_label:
        ax['dist'].set_xlabel(x_axis_label, size=25)
    else:
        ax['dist'].set_xlabel('Number of function evaluations', size=25)
    if y_axis_label:
        ax['dist'].set_ylabel(y_axis_label, size=25)
    elif log_scale:
        ax['dist'].set_ylabel('$\log_{10}\, \ell_2$-distance', size=25)
    else:
        ax['dist'].set_ylabel('$\ell_2$-distance', size=25)
    ax['dist'].tick_params(axis='both', which='major', labelsize=20)

    db_document_name = 'recommendations'

    acq_names = list()
    for expt_dir in dirs:
        options = parse_config_file(expt_dir, 'config.json')
        experiment_name = options["experiment_name"]
        input_space = InputSpace(options["variables"])
        chooser_module = importlib.import_module('spearmint.choosers.' +
                                                 options['chooser'])
        chooser = chooser_module.init(input_space, options)
        db = MongoDB(database_address=options['database']['address'])
        jobs = load_jobs(db, experiment_name)
        hypers = db.load(experiment_name, 'hypers')
        tasks = parse_tasks_from_jobs(jobs, experiment_name, options,
                                      input_space)

        if rec_type == "model":
            if mainfile is None:
                main_file = options['main_file']
            else:
                main_file = mainfile

            sys.path.append(options['main_file_path']
                            )  # TODO: make this nicer with proper importin
            if (main_file[-3:] == u'.py') is True:
                module = importlib.import_module(main_file[:len(main_file) -
                                                           3])
            else:
                module = importlib.import_module(main_file)
            sys.path.remove(options['main_file_path'])

        obj, con = get_objectives_and_constraints(options)  # get the names
        obj = obj[0]  # only one objective
        print 'Found %d constraints' % len(con)
        plot_utility_gap = rec_type == "model" and hasattr(module, 'true_val')

        if plot_utility_gap:
            print 'PLOTTING UTILITY GAP'
            if y_axis_label:
                ax['err'].set_ylabel(y_axis_label, size=25)
            elif log_scale:
                ax['err'].set_ylabel('$\log_{10}$ utility gap', size=25)
            else:
                ax['err'].set_ylabel('utility gap', size=25)
        else:
            if y_axis_label:
                ax['err'].set_ylabel(y_axis_label, size=25)
            elif log_scale:
                ax['err'].set_ylabel('$\log_{10}$ objective value', size=25)
            else:
                ax['err'].set_ylabel('objective value', size=25)

        # Make the directory for the plots
        plots_dir = os.path.join(expt_dir, 'plots')
        if not os.path.isdir(plots_dir):
            os.mkdir(plots_dir)

        # if the module provides the location of the true solution, plot the distance to this solution vs iterations
        if make_dist_plot and not hasattr(module, 'true_sol'):
            raise Exception(
                "make_dist_plot turned on but cannot find true sol in the main_file"
            )

        # If repeat >= 0, then we are averaging a number of experiments
        # We assume the experiments are stored with the original name plus a hyphen plus the number
        n_repeat = int(n_repeat)
        if n_repeat < 0:
            recs = db.load(experiment_name, db_document_name)
            if recs is None:
                raise Exception(
                    "Could not find experiment %s in database at %s" %
                    (experiment_name, options['database']['address']))

            # the x axis represents the number of evals of a particular task given by task_comp_x
            # so we only take the data where this number was incrememted, i.e. when this task was evaluated
            if task_comp_x:
                # only include recommendations when you finish a particular task
                new_recs = list()
                last_complete = 0
                for rec in recs:
                    cur_complete = rec['num_complete_tasks'][task_comp_x]
                    if cur_complete > last_complete:
                        last_complete = cur_complete
                        new_recs.append(rec)
                recs = new_recs

            n_iter = len(recs) if n_iter_spec is None else n_iter_spec

            iters = range(n_iter)

            if plot_wall_time:
                if task_comp_x:
                    raise Exception("Do not use plot wall_time with task_x")
                iters = [rec['total_elapsed_time'] / 60.0 for rec in recs]
                iters = iters[:n_iter]
                iters = np.array(iters, dtype=float)

            print 'Found %d iterations' % len(recs)

            if rec_type == "model":
                values = [
                    true_func(rec, module, violation_value, constraint_tol,
                              obj, con) for rec in recs
                ]

                if log_scale:
                    ax['err'].plot(iters, map(np.log10, values))
                else:
                    ax['err'].plot(iters, values)
            else:
                if rec_type == "observations":
                    observations = [x['obj_o'] for x in recs]
                elif rec_type == "mixed":
                    observations = [x['obj_om'] for x in recs]
                else:
                    raise Exception("unknown rec type")

                for i in xrange(len(observations)):
                    if observations[i] is None or np.isnan(observations[i]):
                        observations[i] = violation_value
                # print observations
                # print len(observations)

                if log_scale:
                    ax['err'].plot(iters, np.log10(observations))
                else:
                    ax['err'].plot(iters, observations)

            if make_dist_plot:
                distances = [
                    params_norm(rec['params'], module.true_sol())
                    for rec in recs
                ]
                if log_scale:
                    ax['dist'].plot(iters, np.log10(distances))
                else:
                    ax['dist'].plot(iters, distances)
        else:
            # MULTIPLE REPEATS
            repeat_recs = [
                db.load(repeat_experiment_name(experiment_name, j),
                        db_document_name) for j in xrange(n_repeat)
            ]
            if None in repeat_recs:
                for i, repeat_rec in enumerate(repeat_recs):
                    if repeat_rec is None:
                        print 'Could not load experiment %s repeat %d' % (
                            experiment_name, i)
                print 'Exiting...'
                return

            if task_comp_x:
                # only include recommendations when you finish a particular task
                new_repeat_recs = list()
                for recs in repeat_recs:
                    recs = sorted(recs, key=lambda k: k['id'])  # sort by id
                    new_recs = list()
                    last_complete = 0
                    for rec in recs:
                        cur_complete = rec['num_complete_tasks'][task_comp_x]
                        if cur_complete == last_complete + 1:
                            last_complete = cur_complete
                            new_recs.append(rec)
                        elif cur_complete == last_complete:
                            pass
                        else:
                            print(
                                'WARNING: cur complete=%d, last_complete=%d' %
                                (cur_complete, last_complete))
                            break
                    new_repeat_recs.append(new_recs)

                repeat_recs = new_repeat_recs

            n_iter_each = map(len, repeat_recs)
            if plot_wall_time:
                """ do everything separately from here if plotting wall time
                here is what we do... we can't have a square array because 
                we don't want to take the minimum number of iterations... 
                we want to take ALL iterations for each repeat, and this number
                may be different for different repeats.
                so we store all times/values in a list of arrays
                then we chop things up into bins
                """
                if rec_type != "model":
                    values = list()
                    wall_times = list()
                    for j in xrange(
                            n_repeat):  # loop over repeated experiments

                        wall_times.append(
                            np.array([
                                repeat_recs[j][i]['total_elapsed_time'] / 60.0
                                for i in xrange(n_iter_each[j])
                            ]))

                        if rec_type == "observations":
                            values.append([
                                repeat_recs[j][i]['obj_o']
                                for i in xrange(n_iter_each[j])
                            ])
                        elif rec_type == "mixed":
                            values.append([
                                repeat_recs[j][i]['obj_om']
                                for i in xrange(n_iter_each[j])
                            ])
                        else:
                            raise Exception("unknown rec type")

                        for i in xrange(n_iter_each[j]):
                            if values[-1][i] is None or np.isnan(
                                    values[-1][i]):
                                values[-1][i] = violation_value

                        values[-1] = np.array(values[-1])

                    # print values

                else:  # if plot wall tiem but using model
                    values = list()
                    wall_times = list()
                    for j in xrange(
                            n_repeat):  # loop over repeated experiments

                        # for this repeat, get all wall times
                        wall_times.append(
                            np.array([
                                repeat_recs[j][i]['total_elapsed_time'] / 60.0
                                for i in xrange(n_iter_each[j])
                            ]))

                        values_j = np.zeros(n_iter_each[j])
                        for i in xrange(
                                n_iter_each[j]):  # loop over iterations
                            val = true_func(repeat_recs[j][i], module, None,
                                            constraint_tol, obj, con)
                            if val is None or np.isnan(
                                    val
                            ):  #set to violation value here so we can print out this info...
                                values_j[i] = violation_value
                                print 'Violation with params %s at repeat %d iter %d' % (
                                    paramify_no_types(
                                        repeat_recs[j][i]['params']), j, i)
                            else:
                                values_j[i] = val
                        values.append(values_j)

                # change the data structure to be time bins and include everything in
                # those time bins across repeats
                end_times = map(max, wall_times)
                for j in xrange(n_repeat):
                    print 'end time for repeat %d: %f' % (j, end_times[j])
                iters = np.arange(0.0, np.round(max(end_times)), bin_size)
                new_values = list()
                for i, timestep in enumerate(iters):
                    # print 'Creating wall time bin from %f to %f. (%d/%d)' % (i, i+bin_size, i, len(iters))
                    new_value = list()
                    for j in xrange(n_repeat):
                        new_value = np.append(
                            new_value, values[j][np.logical_and(
                                wall_times[j] >= timestep, wall_times[j] <
                                timestep + bin_size)].flatten())
                    # if a time bin is empty across all repeats:
                    if len(new_value) == 0:
                        if i == 0:
                            new_value = [violation_value]
                        else:
                            new_value = new_values[-1]
                    new_values.append(new_value)
                values = new_values

                # make the first value equal to the violation value (optional)
                iters = np.append(iters, max(iters) + bin_size)
                values.insert(0, np.array([violation_value]))

                # Average over the repeated experiments
                average_values = map(avg, values)
                errorbars = bootstrap_errorbars(values, log=log_scale, avg=avg)
                # plt.yscale('log', nonposy='clip')

                if log_scale:
                    ax['err'].errorbar(iters,
                                       np.log10(average_values),
                                       yerr=errorbars)
                else:
                    ax['err'].errorbar(iters, average_values, yerr=errorbars)

            else:
                # NOT WALL TIME

                n_iter = reduce(min, n_iter_each, np.inf)
                if n_iter_spec is None:
                    print 'Found %d repeats with at least %d iterations' % (
                        n_repeat, n_iter)
                    print {i: n_iter_each[i] for i in xrange(n_repeat)}
                elif n_iter < n_iter_spec:
                    print 'You specified %d iterations but there are only %d available... so plotting %d' % (
                        n_iter_spec, n_iter, n_iter)
                else:
                    n_iter = n_iter_spec
                    print 'Plotting %d iterations' % n_iter

                iters = range(n_iter)

                if rec_type != "model":
                    values = np.zeros((n_iter, n_repeat))
                    for j in xrange(
                            n_repeat):  # loop over repeated experiments
                        for i in iters[j]:  # loop over iterations
                            if rec_type == "observations":
                                values[i, j] = repeat_recs[j][i]['obj_o']
                            elif rec_type == "mixed":
                                values[i, j] = repeat_recs[j][i]['obj_om']
                            else:
                                raise Exception("unknown rec type")
                            if values[i, j] is None or np.isnan(values[i, j]):
                                values[i, j] = violation_value

                    print values

                else:
                    values = np.zeros((n_iter, n_repeat))
                    distances = np.zeros((n_iter, n_repeat))
                    for j in xrange(
                            n_repeat):  # loop over repeated experiments
                        for i in iters:  # loop over iterations
                            val = true_func(repeat_recs[j][i], module, None,
                                            constraint_tol, obj, con)
                            if val is None:  #set to violation value here so we can print out this info...
                                values[i, j] = violation_value
                                print 'Violation with params %s at repeat %d iter %d' % (
                                    paramify_no_types(
                                        repeat_recs[j][i]['params']), j, i)
                            else:
                                values[i, j] = val

                            if make_dist_plot:
                                distances[i, j] = params_norm(
                                    repeat_recs[j][i]['params'],
                                    module.true_sol())

                if plot_separate:
                    if log_scale:
                        ax['err'].plot(iters, np.log10(values))
                    else:
                        ax['err'].plot(iters, values)

                else:
                    # Average over the repeated experiments
                    average_values = map(avg, values)
                    errorbars = bootstrap_errorbars(values,
                                                    log=log_scale,
                                                    avg=avg)
                    # plt.yscale('log', nonposy='clip')

                    if stretch_x:
                        fctr = float(n_iter_spec) / float(n_iter)
                        iters = np.array(iters) * fctr
                        print 'Stretching x axis by a factor of %f' % fctr

                    if log_scale:
                        ax['err'].errorbar(iters,
                                           np.log10(average_values),
                                           yerr=errorbars)
                    else:
                        ax['err'].errorbar(iters,
                                           average_values,
                                           yerr=errorbars)

                    if make_dist_plot:
                        average_dist = map(avg, distances)
                        errorbars_dist = bootstrap_errorbars(distances,
                                                             log=log_scale,
                                                             avg=avg)
                        if log_scale:
                            ax['dist'].errorbar(iters,
                                                np.log10(average_dist),
                                                yerr=errorbars_dist)
                        else:
                            ax['dist'].errorbar(iters,
                                                average_dist,
                                                yerr=errorbars_dist)

        acq_names.append(options["tasks"].values()[0]["acquisition"])
        if acq_names[-1] == 'PES':
            acq_names[-1] = 'PESC'
        if acq_names[-1] == 'ExpectedImprovement':
            acq_names[-1] = 'EIC'

    if labels:
        ax['err'].legend(labels.split(';'), fontsize=16, loc='lower left')
        ax['dist'].legend(labels.split(';'), fontsize=20)
    elif len(acq_names) > 1:
        ax['err'].legend(acq_names, fontsize=20)
        ax['dist'].legend(acq_names, fontsize=20)

    # save it in the last directory... (if there are multiple directories)
    if not plot_wall_time:
        if n_repeat >= 0:
            print 'Made a plot with %d repeats and %d iterations' % (n_repeat,
                                                                     n_iter)
        else:
            print 'Made a plot with %d iterations' % (n_iter)
    else:
        if n_repeat >= 0:
            print 'Made a plot with %d repeats and %f minutes' % (n_repeat,
                                                                  max(iters))
        else:
            print 'Made a plot with %f minutes' % (max(iters))

    file_prefix = '%s_' % average if n_repeat > 0 else ''
    file_postfix = '_wall_time' if plot_wall_time else ''
    fig['err'].tight_layout()
    figname = os.path.join(plots_dir,
                           '%serror%s' % (file_prefix, file_postfix))
    fig['err'].savefig(figname + '.pdf')
    fig['err'].savefig(figname + '.svg')
    print 'Saved to %s' % figname
    if make_dist_plot:
        fig['dist'].tight_layout()
        figname_dist = os.path.join(
            plots_dir, '%sl2_distance%s.pdf' % (file_prefix, file_postfix))
        fig['dist'].savefig(figname_dist)
        print 'Saved to %s' % figname_dist
Example #11
0
def main(expt_dir):

    os.chdir(expt_dir)
    sys.path.append(expt_dir)

    options = parse_config_file(expt_dir, 'config.json')
    experiment_name = options["experiment-name"]

    #	main_file = options['main_file']
    main_file = 'wrapper'
    if main_file[-3:] == '.py':
        main_file = main_file[:-3]
    module = __import__(main_file)

    input_space = InputSpace(options["variables"])
    chooser_module = importlib.import_module('spearmint.choosers.' +
                                             options['chooser'])
    chooser = chooser_module.init(input_space, options)
    db = MongoDB(database_address=options['database']['address'])
    jobs = load_jobs(db, experiment_name)
    hypers = db.load(experiment_name, 'hypers')
    objective = parse_tasks_from_jobs(jobs, experiment_name, options,
                                      input_space).values()[0]

    def create_fun(task):
        def fun(params, gradient=False):

            if len(params.shape) > 1 and params.shape[1] > 1:

                values = np.zeros(params.shape[0])
                params_orig = params

                for i in range(params_orig.shape[0]):
                    param = params[i, :]
                    param = param.flatten()
                    param = input_space.from_unit(np.array([param])).flatten()

                    values[i] = module.main(
                        0, paramify_no_types(input_space.paramify(param)))

            else:
                return module.main(
                    0, paramify_no_types(input_space.paramify(params)))

            return values

        return fun

    fun = create_fun(objective)

    # We iterate through each recommendation made

    i = 0
    more_recommendations = True
    while more_recommendations:

        recommendation = db.load(experiment_name, 'recommendations',
                                 {'id': i + 1})

        if recommendation == None:
            more_recommendations = False
        else:

            solution_om = input_space.vectorify(recommendation['params_om'])

            M = 1
            vsom_acum = 0.0
            for j in range(M):
                vsom_acum += fun(solution_om, gradient=False)['score']
            values_solution_om = -vsom_acum / float(M)

            with open('value_solution_om.txt', 'a') as f:
                print >> f, "%lf" % (values_solution_om)

            with open('params_om.txt', 'a') as f_handle:
                np.savetxt(f_handle,
                           np.array([solution_om]),
                           delimiter=' ',
                           newline='\n')

        i += 1
Example #12
0
def launch(db_address, experiment_name, job_id):
    """
    Launches a job from on a given id.
    """

    db = MongoDB(database_address=db_address)
    job = db.load(experiment_name, 'jobs', {'id': job_id})

    start_time = time.time()
    job['start time'] = start_time
    db.save(job, experiment_name, 'jobs', {'id': job_id})

    sys.stderr.write("Job launching after %0.2f seconds in submission.\n"
                     % (start_time - job['submit time']))

    success = False

    try:
        if job['language'].lower() == 'matlab':
            result = matlab_launcher(job)

        elif job['language'].lower() == 'python':
            result = python_launcher(job)

        elif job['language'].lower() == 'shell':
            result = shell_launcher(job)

        elif job['language'].lower() == 'mcr':
            result = mcr_launcher(job)

        else:
            raise Exception("That language has not been implemented.")

        if not isinstance(result, dict):
            # Returning just NaN means NaN on all tasks
            if np.isnan(result):
                # Apparently this dict generator throws an error for some people??
                # result = {task_name: np.nan for task_name in job['tasks']}
                # So we use the much uglier version below... ????
                result = dict(zip(job['tasks'], [np.nan] * len(job['tasks'])))
            elif len(job['tasks']) == 1:  # Only one named job
                result = {job['tasks'][0]: result}
            else:
                result = {'main': result}

        if set(result.keys()) != set(job['tasks']):
            raise Exception("Result task names %s did not match job task names %s." % (result.keys(), job['tasks']))

        success = True
    except:
        import traceback
        traceback.print_exc()
        sys.stderr.write("Problem executing the function\n")
        print(sys.exc_info())

    end_time = time.time()

    if success:
        sys.stderr.write("Completed successfully in %0.2f seconds. [%s]\n"
                         % (end_time - start_time, result))

        job['values'] = result
        job['status'] = 'complete'
        job['end time'] = end_time

    else:
        sys.stderr.write("Job failed in %0.2f seconds.\n" % (end_time - start_time))

        # Update metadata.
        job['status'] = 'broken'
        job['end time'] = end_time

    db.save(job, experiment_name, 'jobs', {'id': job_id})
Example #13
0
def plot(
    config_directory="/home/carrknight/code/oxfish/runs/optimization/spearmint"
):
    os.chdir("/home/carrknight/code/oxfish/runs/optimization/spearmint")
    options = get_options(config_directory, config_file="config.json")
    experiment_name = str(options['experiment-name'])
    db = MongoDB()
    resources = parse_resources_from_config(options)
    resource = resources.itervalues().next()

    # load hyper parameters
    chooser_module = importlib.import_module('spearmint.choosers.' +
                                             options['chooser'])
    chooser = chooser_module.init(options)
    print "chooser", chooser

    hypers = db.load(experiment_name, "hypers")
    print "loaded hypers", hypers  # from GP.to_dict()

    jobs = load_jobs(db, experiment_name)
    remove_broken_jobs(db, jobs, experiment_name, resources)
    task_options = {task: options["tasks"][task] for task in resource.tasks}
    task_group = spearmint.main.load_task_group(db, options, resource.tasks)

    hypers = spearmint.main.load_hypers(db, experiment_name)
    print "loaded hypers", hypers  # from GP.to_dict()

    hypers = chooser.fit(task_group, hypers, task_options)
    print "\nfitted hypers:"
    print(hypers)

    lp, x = chooser.best()
    x = x.flatten()
    print "best", lp, x
    bestp = task_group.paramify(task_group.from_unit(x))
    print "expected best position", bestp

    print "chooser models:", chooser.models
    obj_model = chooser.models[chooser.objective['name']]
    grid = chooser.grid
    obj_mean, obj_var = obj_model.function_over_hypers(obj_model.predict, grid)

    import numpy as np

    bounds = dict()

    for task_name, task in task_group.tasks.iteritems():
        # make a grid, feed it to the predictor:

        dimensions = ()
        for key in options['variables'].keys():
            type = str(options['variables'][key]["type"]).strip().lower()
            keyname = key.encode('utf-8')
            bounds[keyname] = dict()
            bounds[keyname]["type"] = type
            if type == "float":
                dimension = np.linspace(0, 1, num=SPACING)
                dimensions = dimensions + (dimension, )
                bounds[keyname]["min"] = options['variables'][key]["min"]
                bounds[keyname]["max"] = options['variables'][key]["max"]

            elif type == "int":
                min = int(options['variables'][key]["min"])
                max = int(options['variables'][key]["max"])
                dimension = np.linspace(0, 1, num=max - min)
                bounds[keyname]["min"] = min
                bounds[keyname]["max"] = max
                #  dimension = np.array([x + min for x in range(max - min + 1)])
                dimensions = dimensions + (dimension, )
            else:
                bounds[keyname]["options"] = options['variables'][key][
                    "options"]
                assert type == "enum"
                dimension = tuple([
                    (0, 1)
                    for i in range(len(options['variables'][key]["options"]))
                ])
                for t in dimension:
                    dimensions = dimensions + (t, )
                    # print(dimension)

        data = cartesian(np.array(dimensions))

        mean, variance = chooser.models[task_name].predict(data)
        mean = [
            task.unstandardize_mean(task.unstandardize_variance(v))
            for v in mean
        ]
        variance = [task.unstandardize_variance(np.sqrt(v)) for v in variance]

        os.chdir(config_directory)

        # unzip the data
        new_data = zip(*data.transpose().tolist())
        datum = zip(new_data, mean, variance)

        header = ",".join(options['variables'].keys()) + ",mean,variance"
        with open(experiment_name + ".csv", 'w') as fileout:
            fileout.write(header + "\n")
            for i in range(len(datum)):
                fileout.write(
                    str(datum[i]).replace("(", "").replace(")", "").replace(
                        ",,", ",") + "\n")

        with open(experiment_name + "_bounds.yaml", 'w') as outfile:
            outfile.write(yaml.dump(bounds, default_flow_style=False))

        # grid = cartesian(dimensions)
        # mean, variance = obj_model.function_over_hypers(obj_model.predict, grid)
        #
        # mean = [obj_task.unstandardize_mean(obj_task.unstandardize_variance(v)) for v in mean]
        # variance = [obj_task.unstandardize_variance(np.sqrt(v)) for v in variance]
        #
        # # xymv = [([x for x in xy], m, v) for xy, m, v in izip(new_grid, obj_mean, obj_std)]  # if .2 < xy[0] < .25]
        # with open(experiment_name + ".csv", 'w') as fileout:
        #     for i in range(len(mean)):
        #         fileout.write(str(([x for x in grid[i]], mean[i], variance[i])).replace("(", "").replace(")", "").
        #                       replace("[", "").replace("]", "") + "\n")
        xy = np.array(task.inputs)
        # function values:
        vals = task.values
        vals = np.array(vals)
        np.savetxt(experiment_name + "_" + task_name + "_runs.csv",
                   xy,
                   delimiter=",",
                   fmt='%.3e')
        np.savetxt(experiment_name + "_" + task_name + "_runs_values.csv",
                   vals,
                   delimiter=",",
                   fmt='%.3e')
Example #14
0
def main(expt_dir, repeat=None):

    options = parse_config_file(expt_dir, 'config.json')
    experiment_name = options["experiment-name"]

    if repeat is not None:
        experiment_name = repeat_experiment_name(experiment_name,repeat)

    input_space = InputSpace(options["variables"])

    chooser_module = importlib.import_module('spearmint.choosers.' + options['chooser'])
    chooser = chooser_module.init(input_space, options)

    db = MongoDB(database_address=options['database']['address'])

    jobs = load_jobs(db, experiment_name)
    hypers = db.load(experiment_name, 'hypers')

    if input_space.num_dims != 2:
        raise Exception("This plotting script is only for 2D optimizations. This problem has %d dimensions." % input_space.num_dims)

    tasks = parse_tasks_from_jobs(jobs, experiment_name, options, input_space)

    hypers = chooser.fit(tasks, hypers)

    print '\nHypers:'
    print_hypers(hypers)

    recommendation = chooser.best()
    current_best_value = recommendation['model_model_value']
    current_best_location = recommendation['model_model_input']

    plots_dir = os.path.join(expt_dir, 'plots')
    if not os.path.isdir(plots_dir):
        os.mkdir(plots_dir)
    if len(chooser.models) > 1:
        for task_name in chooser.models:
            plots_subdir = os.path.join(plots_dir, task_name)
            if not os.path.isdir(plots_subdir):
                os.mkdir(plots_subdir)

    print 'Plotting...'

    # Plot objective model
    # plot_2d_mean_and_var(chooser.objective_model, plots_dir, 
    #     chooser.objective.name, 
    #     input_space, current_best_location)
    
    # plot_hypers(chooser.objective_model, plots_dir, 'objective_function')
    for task_name, model in chooser.models.iteritems():

        plots_subdir = os.path.join(plots_dir, task_name) if len(chooser.models) > 1 else plots_dir

        plot_hypers(model, plots_subdir, task_name)

        plot_2d_mean_and_var(model, plots_subdir, task_name, input_space, current_best_location)


    if chooser.numConstraints() > 0:
        plot_2d_constraints(chooser, plots_dir, input_space, current_best_location)

    plot_acquisition_function(chooser, plots_dir, input_space, current_best_location, current_best_value)

    print 'Done plotting.'
def main(expt_dir):

	os.chdir(expt_dir)
	sys.path.append(expt_dir)

	options         = parse_config_file(expt_dir, 'config.json')
	experiment_name = options["experiment-name"]
	options['main_file'] = 'prog_no_noisy'

	main_file = options['main_file']
	if main_file[-3:] == '.py':
		main_file = main_file[:-3]
	module  = __import__(main_file)

	input_space     = InputSpace(options["variables"])
	chooser_module  = importlib.import_module('spearmint.choosers.' + options['chooser'])
	chooser         = chooser_module.init(input_space, options)
	db              = MongoDB(database_address=options['database']['address'])
	jobs            = load_jobs(db, experiment_name)
	hypers          = db.load(experiment_name, 'hypers')
	tasks           = parse_tasks_from_jobs(jobs, experiment_name, options, input_space)

	if len(tasks) < 2:
		print 'Not a multi-objective problem!'
		return -1

	if options['language'] != "PYTHON":
		print 'Only python programs supported!'
		return -1

	for task in tasks:
		if tasks[ task ].type != 'objective':
			print 'Not a multi-objective problem!'
			return -1

	def create_fun(task):
		def fun(params, gradient = False):

			if len(params.shape) > 1 and params.shape[ 1 ] > 1:
				params = params.flatten()

			params = input_space.from_unit(np.array([ params ])).flatten()

			return module.main(0, paramify_no_types(input_space.paramify(params)))[ task ]

		return fun

	funs = [ create_fun(task) for task in tasks ]

	moop = MOOP_basis_functions(funs, input_space.num_dims)

#	moop.evolve(1, 8)

	grid = sobol_grid.generate(input_space.num_dims, grid_size = 1000 * input_space.num_dims)

	moop.solve_using_grid(grid)

#	reference = find_reference_point_using_direct(tasks, module, input_space)

#	reference = reference + np.abs(reference) * 0.1

	reference = np.ones(len(tasks)) * 7

	hyper_volume_solution = moop.get_hypervolume(reference.tolist())

	result = moop.compute_pareto_front_and_set()
	front = result['frontier']
	pareto_set = result['pareto_set']

#	os.remove('hypervolume_solution.txt')

	with open('hypervolume_solution.txt', 'a') as f:
		print >> f, "%lf" % (hyper_volume_solution)

#	os.remove('hypervolumes.txt')

	# We iterate through each recommendation made

	i = 0
	more_recommendations = True
	while more_recommendations:

                recommendation = db.load(experiment_name, 'recommendations', {'id' : i + 1})

		if recommendation == None:
			more_recommendations = False
		else:

			solution = input_space.to_unit(input_space.vectorify(recommendation[ 'params' ]))

			if len(solution.shape) == 1:
				solution = solution.reshape((1, len(solution)))
			
			# We compute the objective values associated to this recommendation
	
			values_solution = np.zeros((solution.shape[ 0 ], len(tasks)))
		
			for j in range(values_solution.shape[ 0 ]):
				for k in range(values_solution.shape[ 1 ]):
					values_solution[ j, k ] = funs[ k ](solution[ j : (j + 1), : ])

			moop = MOOP_basis_functions(funs, input_space.num_dims)

			moop.set_population(solution)

			hyper_volume = moop.get_hypervolume(reference.tolist())

			with open('hypervolumes.txt', 'a') as f:
				print >> f, "%lf" % (hyper_volume)

			with open('mean_min_distance_to_frontier.txt', 'a') as f: 
				print >> f, "%lf" % (average_min_distance(values_solution, front))

			with open('mean_min_distance_from_frontier.txt', 'a') as f: 
				print >> f, "%lf" % (average_min_distance(front, values_solution))

			with open('mean_min_distance_to_pareto_set.txt', 'a') as f: 
				print >> f, "%lf" % (average_min_distance(input_space.from_unit(solution), \
				input_space.from_unit(pareto_set)))

			with open('mean_min_distance_from_pareto_set.txt', 'a') as f: 
				print >> f, "%lf" % (average_min_distance(input_space.from_unit(pareto_set), \
				input_space.from_unit(solution)))

			with open('evaluations.txt','a') as f_handle: 
				np.savetxt(f_handle, np.array([recommendation['num_complete_tasks'].values()]), delimiter = ' ', newline = '\n')

		i += 1
Example #16
0
def main(expt_dir, config_file="config.json", no_output=False, repeat=-1):
    if not os.path.isdir(expt_dir):
        raise Exception("Cannot find directory %s" % expt_dir)

    options = parse_config_file(expt_dir, config_file)
    experiment_name = options["experiment_name"]

    # Special advanced feature for repeating the same experiment many times
    if repeat >= 0:
        experiment_name = repeat_experiment_name(experiment_name, repeat)

    if not no_output:  # if we want output
        if repeat >= 0:
            output_directory = repeat_output_dir(expt_dir, repeat)
        else:
            output_directory = os.path.join(expt_dir, 'output')
        if not os.path.isdir(output_directory):
            os.mkdir(output_directory)

        if repeat < 0:
            rootLogger = logging.getLogger()
            fileHandler = logging.FileHandler(
                os.path.join(output_directory, 'main.log'))
            fileHandler.setFormatter(logFormatter)
            fileHandler.setLevel(logLevel)
            rootLogger.addHandler(fileHandler)
        # consoleHandler = logging.StreamHandler()
        # consoleHandler.setFormatter(logFormatter)
        # consoleHandler.setLevel(logLevel)
        # rootLogger.addHandler(consoleHandler)
    else:
        output_directory = None

    input_space = InputSpace(options["variables"])

    resources = parse_resources_from_config(options)

    # Load up the chooser.
    chooser_module = importlib.import_module('spearmint.choosers.' +
                                             options['chooser'])
    chooser = chooser_module.init(input_space, options)

    # Connect to the database
    db_address = options['database']['address']
    db = MongoDB(database_address=db_address)

    overall_start_time = time.time()
    db.save({'start-time': overall_start_time}, experiment_name, 'start-time')

    waiting_for_results = False  # for printing purposes only
    while True:

        for resource_name, resource in resources.iteritems():

            jobs = load_jobs(db, experiment_name)
            # resource.printStatus(jobs)

            # If the resource is currently accepting more jobs
            # TODO: here cost will eventually also be considered: even if the
            #       resource is not full, we might wait because of cost incurred
            # Note: I could chose to fill up one resource and them move on to the next ("if")
            # You could also do it the other way, by changing "if" to "while" here

            # Remove any broken jobs from pending
            # note: make sure to do this before the acceptingJobs() condition is checked
            remove_broken_jobs(db, jobs, experiment_name, resources)

            if resource.acceptingJobs(jobs):

                if waiting_for_results:
                    logging.info('\n')
                waiting_for_results = False

                # Load jobs from DB
                # (move out of one or both loops?) would need to pass into load_tasks
                jobs = load_jobs(db, experiment_name)

                # Print out a list of broken jobs
                print_broken_jobs(jobs)

                # Get a suggestion for the next job
                tasks = parse_tasks_from_jobs(jobs, experiment_name, options,
                                              input_space)

                # Special case when coupled and there is a NaN task-- what to do with NaN task when decoupled??
                if 'NaN' in tasks and 'NaN' not in resource.tasks:
                    resource.tasks.append('NaN')

                # Load the model hypers from the database.
                hypers = db.load(experiment_name, 'hypers')

                # "Fit" the chooser - give the chooser data and let it fit the model(s).
                # NOTE: even if we are only suggesting for 1 task, we need to fit all of them
                # because the acquisition function for one task depends on all the tasks
                hypers = chooser.fit(tasks, hypers)

                if hypers:
                    logging.debug('GP covariance hyperparameters:')
                print_hypers(hypers, input_space, options)
                # if 'duration hypers' in hypers:
                # logging.debug('Duration GP covariance hyperparameters:')
                # print_hypers(hypers['duration hypers'], input_space, options)

                # Save the hyperparameters to the database.
                if hypers:
                    db.save(hypers, experiment_name, 'hypers')

                if options['recommendations'] == "during":
                    # Compute the best value so far, a.k.a. the "recommendation"
                    recommendation = chooser.best()

                    # Save the recommendation in the DB if there are more complete jobs than last time
                    store_recommendation(recommendation, db, experiment_name,
                                         tasks, jobs, input_space,
                                         time.time() - overall_start_time)

                # Get the decoupling groups
                task_couplings = {
                    task_name: tasks[task_name].options["group"]
                    for task_name in resource.tasks
                }

                logging.info('\nGetting suggestion for %s...\n' %
                             (', '.join(task_couplings.keys())))

                # Get the next suggested experiment from the chooser.
                suggested_input, suggested_tasks = chooser.suggest(
                    task_couplings)
                suggested_task = suggested_tasks[0]  # hack, deal with later

                suggested_job = {
                    'id': len(jobs) + 1,
                    'params': input_space.paramify(suggested_input),
                    'expt_dir': options['main_file_path'],
                    'tasks': suggested_tasks,
                    'resource': resource_name,
                    'main-file': options['tasks'][suggested_task]['main_file'],
                    'language': options['tasks'][suggested_task]['language'],
                    'status': 'new',
                    'submit time': time.time(),
                    'start time': None,
                    'end time': None,
                    'fast update':
                    chooser.fast_update  # just for plotting - not important
                }

                save_job(suggested_job, db, experiment_name)

                # Submit the job to the appropriate resource
                process_id = resource.attemptDispatch(experiment_name,
                                                      suggested_job,
                                                      db_address, expt_dir,
                                                      output_directory)

                # Print the current time
                logging.info(
                    'Current time: %s' %
                    datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))

                # Set the status of the job appropriately (successfully submitted or not)
                if process_id is None:
                    suggested_job['status'] = 'broken'
                    logging.info(
                        'Job %s failed -- check output file for details.' %
                        job['id'])
                    save_job(suggested_job, db, experiment_name)
                else:
                    suggested_job['status'] = 'pending'
                    suggested_job['proc_id'] = process_id
                    save_job(suggested_job, db, experiment_name)

                jobs = load_jobs(db, experiment_name)

                # Print out the status of the resources
                # resource.printStatus(jobs)
                print_resources_status(resources.values(), jobs)

                if len(set(task_couplings.values())) > 1:  # if decoupled
                    print_tasks_status(tasks.values(), jobs)

                # For debug - print pending jobs
                print_pending_jobs(jobs)

        # Terminate the optimization if all resources are finished (run max number of jobs)
        # or ANY task is finished (just my weird convention)
        jobs = load_jobs(db, experiment_name)
        tasks = parse_tasks_from_jobs(jobs, experiment_name, options,
                                      input_space)
        terminate_resources = reduce(
            lambda x, y: x and y,
            map(lambda x: x.maxCompleteReached(jobs), resources.values()),
            True)
        terminate_tasks = reduce(
            lambda x, y: x or y,
            map(lambda x: x.maxCompleteReached(jobs), tasks.values()), False)
        terminate_maxtime = (time.time() - overall_start_time) >= (
            options['max_time_mins'] * 60.0)

        if terminate_resources or terminate_tasks or terminate_maxtime:

            if terminate_resources:
                logging.info(
                    'Maximum number of jobs completed on all resources.')
            if terminate_tasks:
                logging.info(
                    'Maximum number of jobs reached for at least one task.')
            if terminate_maxtime:
                logging.info(
                    'Maximum total experiment time of %f minutes reached.' %
                    options['max_time_mins'])

            # save rec in DB
            if options['recommendations'] in ("during", "end-one"):
                logging.info('Making final recommendation:')
                recommendation = chooser.best()
                store_recommendation(recommendation,
                                     db,
                                     experiment_name,
                                     tasks,
                                     jobs,
                                     input_space,
                                     time.time() - overall_start_time,
                                     final=True)
            elif options['recommendations'] == "end-all":
                logging.info('Making recommendations...')
                all_jobs = jobs
                for i in xrange(len(all_jobs)):
                    logging.info('')
                    logging.info(
                        '-------------------------------------------------')
                    logging.info(
                        '     Getting recommendations for iter %d/%d      ' %
                        (i, len(all_jobs)))
                    logging.info(
                        '-------------------------------------------------')
                    logging.info('')

                    jobs = all_jobs[:i + 1]
                    tasks = parse_tasks_from_jobs(jobs, experiment_name,
                                                  options, input_space)
                    hypers = chooser.fit(tasks, hypers)
                    print_hypers(hypers, input_space, options)
                    # get the biggest end time of the jobs
                    end_time = max([job['end time'] for job in jobs])
                    elapsed_time = end_time - overall_start_time

                    recommendation = chooser.best()
                    store_recommendation(recommendation, db, experiment_name,
                                         tasks, jobs, input_space,
                                         elapsed_time)

            logging.info('Have a nice day.')
            return

        # If no resources are accepting jobs, sleep
        if no_free_resources(db, experiment_name, resources):
            # Don't use logging here because it's too much effort to use logging without a newline at the end
            sys.stdout.write(
                'Waiting for results...' if not waiting_for_results else '.')
            sys.stdout.flush()
            # sys.stderr.flush()
            waiting_for_results = True
            time.sleep(options['polling_time'])
        else:
            sys.stdout.write('\n')