Ejemplo n.º 1
0
def main():
    """ Main function. """
    compute_objectives, num_objectives, config_file = _CHOOSER_DICT[PROBLEM]
    config = load_config_file(config_file)
    moo_objectives = (compute_objectives, num_objectives)

    # Specify optimisation method --------------------------------------------------------
    #   opt_method = 'bo'
    opt_method = 'rand'

    # Specify options
    options = Namespace(
        build_new_model_every=5,  # update the model every 5 iterations
        report_results_every=4,  # report progress every 6 iterations
        report_model_on_each_build=True,  # report the model when you build it.
    )

    # Specifying GP priors -------------------------------------------------------------
    # Dragonfly allows specifying a mean for the GP prior - if there is prior knowledge
    # on the rough behaviour of the function to be optimised, this is one way that
    # information can be incorporated into the model.
    if USE_CONDUCTIVITY_PRIOR_MEAN:
        if PROBLEM in ['3d', '3d_euc']:
            options.gps_prior_means = (conductivity_prior_mean_3d, None)
        elif PROBLEM == '5d':
            options.gps_prior_means = (conductivity_prior_mean_5d, None)
        # The _unproc indicates that the mean function is "unprocessed". Dragonfly converts
        # the domain specified given in the configuration to an internal order which may
        # have reordered the variables. The _unproc tells that the function
        # should be called in the original format.

    # Saving and loading data ----------------------------------------------------------
    # You can save and load progress in Dragonfly. This allows you to resume an
    # optimisation routine if it crashes from where we left off.
    # Other related options include:
    #   - progress_load_from: loads progress from this file but does not save it.
    #   - progress_save_to: loads progress from this file but does not save it.
    #   - progress_report_on_each_save: reports that the progress was saved (default True)
    if SAVE_AND_LOAD_PROGRESS:
        options.progress_load_from_and_save_to = 'moo_progress.p'
        options.progress_save_every = 5
        # progress_load_from and progress_load_from_and_save_to can be a list of file names
        # in which case we will load from all the files.
        # e.g options.progress_load_from_and_save_to = ['progress1.p', 'progress2.p']

    # Optimise
    max_num_evals = 60
    pareto_opt_vals, pareto_opt_pts, history = multiobjective_maximise_functions(
        moo_objectives,
        config.domain,
        max_num_evals,
        config=config,
        options=options,
        opt_method=opt_method)
    print(pareto_opt_pts)
    print(pareto_opt_vals)
Ejemplo n.º 2
0
def main():
    """ Main function. """
    # First Specify the domain via a JSON configuration file.
    # See examples/synthetic/multiobjective_branin_currinexp/in_code_demo.py for speciying
    # domain directly in code without a file.
    config = load_config_file('config.json')

    # Specify objectives -- either of the following options could work. Uncomment
    # appropriately from imports and multiobjective_hartmann.py
    # 1. compute_objectives returns a list of objective values, num_objectives is the number
    # of objectives. This has to be a 2-tuple.
    # moo_objectives = (compute_objectives, num_objectives)
    # 2. Specify each function separately. This has to be a list.
    moo_objectives = [hartmann3_by_2_1, hartmann6, hartmann3_by_2_2]

    # Optimise
    max_num_evals = 100  # Optimisation budget (max number of evaluations)
    pareto_opt_vals, pareto_opt_pts, history = multiobjective_maximise_functions(
        moo_objectives, config.domain, max_num_evals, config=config)
    print(pareto_opt_pts)
    print(pareto_opt_vals)
Ejemplo n.º 3
0
def main():
    """ Main function. """
    # Load configuration file
    objective, config_file, mf_cost = _CHOOSER_DICT[PROBLEM]
    config = load_config_file(config_file)

    # Specify optimisation method -----------------------------------------------------
    opt_method = 'bo'
    # opt_method = 'ga'
    # opt_method = 'rand'

    # Optimise
    max_capital = 60
    domain, domain_orderings = config.domain, config.domain_orderings
    if PROBLEM in ['3d', '5d']:
        # Create function caller.
        # Note there is no function passed in to the Function Caller object.
        func_caller = CPFunctionCaller(None,
                                       domain,
                                       domain_orderings=domain_orderings)

        if opt_method == 'bo':
            opt = gp_bandit.CPGPBandit(func_caller, ask_tell_mode=True)
        elif opt_method == 'ga':
            opt = cp_ga_optimiser.CPGAOptimiser(func_caller,
                                                ask_tell_mode=True)
        elif opt_method == 'rand':
            opt = random_optimiser.CPRandomOptimiser(func_caller,
                                                     ask_tell_mode=True)
        opt.initialise()

        # Optimize using the ask-tell interface
        # User continually asks for the next point to evaluate, then tells the optimizer the
        # new result to perform Bayesian optimisation.
        best_x, best_y = None, float('-inf')
        for _ in range(max_capital):
            x = opt.ask()
            y = objective(x)
            opt.tell([(x, y)])
            print('x: %s, y: %s' % (x, y))
            if y > best_y:
                best_x, best_y = x, y
        print("Optimal Value: %s, Optimal Point: %s" % (best_y, best_x))

        # Compare results with the maximise_function API
        print("-------------")
        print("Compare with maximise_function API:")
        opt_val, opt_pt, history = maximise_function(objective,
                                                     config.domain,
                                                     max_capital,
                                                     opt_method=opt_method,
                                                     config=config)

    elif PROBLEM == '3d_euc':
        # Create function caller.
        # Note there is no function passed in to the Function Caller object.
        domain = domain.list_of_domains[0]
        func_caller = EuclideanFunctionCaller(None, domain)

        if opt_method == 'bo':
            opt = gp_bandit.EuclideanGPBandit(func_caller, ask_tell_mode=True)
        elif opt_method == 'ga':
            raise ValueError("Invalid opt_method %s" % (opt_method))
        opt.initialise()

        # Optimize using the ask-tell interface
        # User continually asks for the next point to evaluate, then tells the optimizer the
        # new result to perform Bayesian optimisation.
        best_x, best_y = None, float('-inf')
        for _ in range(max_capital):
            # Optionally, you can add an integer argument `n_points` to ask to have it return
            # `n_points` number of points. These points will be returned as a list.
            # No argument for `n_points` returns a single point from ask.
            x = opt.ask()
            y = objective(x)
            opt.tell([(x, y)])
            print('x: %s, y: %s' % (x, y))
            if y > best_y:
                best_x, best_y = x, y
        print("Optimal Value: %s, Optimal Point: %s" % (best_y, best_x))

        # Compare results with the maximise_function API
        print("-------------")
        print("Compare with maximise_function API:")
        opt_val, opt_pt, history = maximise_function(objective,
                                                     config.domain,
                                                     max_capital,
                                                     opt_method=opt_method,
                                                     config=config)

    else:
        # Create function caller.
        # Note there is no function passed in to the Function Caller object.
        (ask_tell_fidel_space, ask_tell_domain, _, ask_tell_mf_cost, ask_tell_fidel_to_opt, ask_tell_config, _) = \
          preprocess_multifidelity_arguments(config.fidel_space, domain, [objective],
                                             mf_cost, config.fidel_to_opt, config)
        func_caller = CPFunctionCaller(
            None,
            ask_tell_domain,
            domain_orderings=domain_orderings,
            fidel_space=ask_tell_fidel_space,
            fidel_cost_func=ask_tell_mf_cost,
            fidel_to_opt=ask_tell_fidel_to_opt,
            fidel_space_orderings=config.fidel_space_orderings,
            config=ask_tell_config)
        if opt_method == 'bo':
            opt = gp_bandit.CPGPBandit(func_caller,
                                       is_mf=True,
                                       ask_tell_mode=True)
        else:
            raise ValueError("Invalid opt_method %s" % (opt_method))
        opt.initialise()

        # Optimize using the ask-tell interface
        # User continually asks for the next point to evaluate, then tells the optimizer the
        # new result to perform Bayesian optimisation.
        best_z, best_x, best_y = None, None, float('-inf')
        for _ in range(max_capital):
            point = opt.ask()
            z, x = point[0], point[1]
            y = objective(z, x)
            opt.tell([(z, x, y)])
            print('z: %s, x: %s, y: %s' % (z, x, y))
            if y > best_y:
                best_z, best_x, best_y = z, x, y
        print("Optimal Value: %s, Optimal Point: %s" % (best_y, best_x))

        # Compare results with the maximise_multifidelity_function API
        print("-------------")
        print("Compare with maximise_multifidelity_function API:")
        opt_val, opt_pt, history = maximise_multifidelity_function(
            objective,
            config.fidel_space,
            config.domain,
            config.fidel_to_opt,
            mf_cost,
            max_capital,
            opt_method=opt_method,
            config=config)

    print('opt_pt: %s' % (str(opt_pt)))
    print('opt_val: %s' % (str(opt_val)))