Esempio n. 1
0
def main():
    """ Main function. """
    # First Specify all parameters.
    # See examples/synthetic/multiobjective_hartmann/in_code_demo.py for speciying
    # domain via a JSON file.
    domain_vars = [
        {
            'type': 'float',
            'min': -5,
            'max': 10,
            'dim': 1
        },
        {
            'type': 'float',
            'min': 0,
            'max': 15,
            'dim': 1
        },
    ]
    config_params = {'domain': domain_vars}
    config = load_config(config_params)

    # Specify objectives -- either of the following options could work.
    # 1. compute_objectives returns a list of objective values, num_objectives is the number
    # of objectives. This has to be a 2-tuple.
    moo_objectives = (compute_objectives, num_objectives)
    # 2. Specify each function separately. This has to be a list.
    #   moo_objectives = [branin, currin_exp]

    # Optimise
    max_num_evals = 100  # Optimisation budget (max number of evaluations)
    pareto_opt_vals, pareto_opt_pts, history = multiobjective_maximise_functions(
        moo_objectives, config.domain, max_num_evals, config=config)
    print(pareto_opt_pts)
    print(pareto_opt_vals)
Esempio n. 2
0
def main():
    """ Main function. """
    compute_objectives, num_objectives, config_file = _CHOOSER_DICT[PROBLEM]
    config = load_config_file(config_file)
    moo_objectives = (compute_objectives, num_objectives)

    # Specify optimisation method --------------------------------------------------------
    #   opt_method = 'bo'
    opt_method = 'rand'

    # Specify options
    options = Namespace(
        build_new_model_every=5,  # update the model every 5 iterations
        report_results_every=4,  # report progress every 6 iterations
        report_model_on_each_build=True,  # report the model when you build it.
    )

    # Specifying GP priors -------------------------------------------------------------
    # Dragonfly allows specifying a mean for the GP prior - if there is prior knowledge
    # on the rough behaviour of the function to be optimised, this is one way that
    # information can be incorporated into the model.
    if USE_CONDUCTIVITY_PRIOR_MEAN:
        if PROBLEM in ['3d', '3d_euc']:
            options.gps_prior_means = (conductivity_prior_mean_3d, None)
        elif PROBLEM == '5d':
            options.gps_prior_means = (conductivity_prior_mean_5d, None)
        # The _unproc indicates that the mean function is "unprocessed". Dragonfly converts
        # the domain specified given in the configuration to an internal order which may
        # have reordered the variables. The _unproc tells that the function
        # should be called in the original format.

    # Saving and loading data ----------------------------------------------------------
    # You can save and load progress in Dragonfly. This allows you to resume an
    # optimisation routine if it crashes from where we left off.
    # Other related options include:
    #   - progress_load_from: loads progress from this file but does not save it.
    #   - progress_save_to: loads progress from this file but does not save it.
    #   - progress_report_on_each_save: reports that the progress was saved (default True)
    if SAVE_AND_LOAD_PROGRESS:
        options.progress_load_from_and_save_to = 'moo_progress.p'
        options.progress_save_every = 5
        # progress_load_from and progress_load_from_and_save_to can be a list of file names
        # in which case we will load from all the files.
        # e.g options.progress_load_from_and_save_to = ['progress1.p', 'progress2.p']

    # Optimise
    max_num_evals = 60
    pareto_opt_vals, pareto_opt_pts, history = multiobjective_maximise_functions(
        moo_objectives,
        config.domain,
        max_num_evals,
        config=config,
        options=options,
        opt_method=opt_method)
    print(pareto_opt_pts)
    print(pareto_opt_vals)
Esempio n. 3
0
def main():
    """ Main function. """
    # First Specify the domain via a JSON configuration file.
    # See examples/synthetic/multiobjective_branin_currinexp/in_code_demo.py for speciying
    # domain directly in code without a file.
    config = load_config_file('config.json')

    # Specify objectives -- either of the following options could work. Uncomment
    # appropriately from imports and multiobjective_hartmann.py
    # 1. compute_objectives returns a list of objective values, num_objectives is the number
    # of objectives. This has to be a 2-tuple.
    # moo_objectives = (compute_objectives, num_objectives)
    # 2. Specify each function separately. This has to be a list.
    moo_objectives = [hartmann3_by_2_1, hartmann6, hartmann3_by_2_2]

    # Optimise
    max_num_evals = 100  # Optimisation budget (max number of evaluations)
    pareto_opt_vals, pareto_opt_pts, history = multiobjective_maximise_functions(
        moo_objectives, config.domain, max_num_evals, config=config)
    print(pareto_opt_pts)
    print(pareto_opt_vals)
Esempio n. 4
0
                {'type': 'discrete', 'items': place_pruned_graph_list},
                {'type': 'discrete', 'items': enable_bfloat16_sendrecv_list},
                {'type': 'discrete', 'items': do_common_subexpression_elimination_list},
                {'type': 'discrete_numeric', 'items': max_folded_constant_list},
                {'type': 'discrete', 'items': do_function_inlining_list},
                {'type': 'discrete_numeric', 'items': global_jit_level_list},
		{'type': 'discrete', 'items': optimizer_list}
                ]

dragonfly_args = [ 
	get_option_specs('report_results_every', False, 2, 'Path to the json or pb config file. '),
	get_option_specs('init_capital', False, None, 'Path to the json or pb config file. '),
	get_option_specs('init_capital_frac', False, 0.07, 'Path to the json or pb config file. '),
	get_option_specs('num_init_evals', False, 2, 'Path to the json or pb config file. ')]

options = load_options(dragonfly_args)
config_params = {'domain': domain_vars}
config = load_config(config_params)
max_num_evals = 60*60*12
moo_objectives = [runtime_eval, acc_eval]
pareto_opt_vals, pareto_opt_pts, history = multiobjective_maximise_functions(moo_objectives, config.domain,max_num_evals,capital_type='realtime',config=config,options=options)
f = open("./output.log","w+")
print(pareto_opt_pts,file=f)
print("\n",file=f)
print(pareto_opt_vals,file=f)
print("\n",file=f)
print(history,file=f)



Esempio n. 5
0
    def get_config(self, budget):
        """Function to sample a new configuration
        This function is called inside BOHB to query a new configuration

        Parameters:
        -----------
        budget: float
            the budget for which this configuration is scheduled

        Returns
        -------
        config
            return a valid configuration with parameters and budget
        """
        if not self.is_moo:
            return self.get_config_old(budget)

        logger.debug('start sampling a new configuration.')
        if not self.configs:
            print(
                f"[vincent] self.configs is empty! Use a random config instead."
            )
            sample = self.configspace.sample_configuration()
            sample = ConfigSpace.util.deactivate_inactive_hyperparameters(
                configuration_space=self.configspace,
                configuration=sample.get_dictionary()).get_dictionary()
            sample['TRIAL_BUDGET'] = budget
            return sample
        domain_vars = list()
        for name in self.search_space.keys():
            if isinstance(self.search_space[name][0], (float, int)):
                var_type = 'discrete_numeric'
            else:
                var_type = 'discrete'
            domain_var = {'type': var_type, 'items': self.search_space[name]}
            domain_vars.append(domain_var)
        points = list()
        vals = list()
        true_vals = list()

        print(f"[vincent] self.configs:{self.configs} budget:{budget}")
        print(f"{list(self.search_space.keys())}")
        for conf_array in self.configs[0]:
            first, second = [], []
            for i in range(len(conf_array)):
                item = self.search_space[list(
                    self.search_space.keys())[i]][int(conf_array[i])]
                if isinstance(item, (float, int)):
                    second.append(item)
                else:
                    first.append(item)
            points.append([first, second])
        for idx in range(len(self.losses[0])):
            vals.append([-self.losses[0][idx], -self.runtime[0][idx]])
            true_vals.append([-self.losses[0][idx], -self.runtime[0][idx]])

        print(f"[vincent] len of points:{len(points)}")
        if len(points) > 10:
            vals_array = np.array(vals)
            pareto_index = is_pareto_efficient_simple(vals_array)
            p_idx = []
            np_idx = []
            np_items = []
            for j in range(len(pareto_index)):
                if pareto_index[j] == True:
                    p_idx.append(j)
                else:
                    np_idx.append(j)
                    np_items.append(vals[j])
            print(f"[vincent] pareto_index:{p_idx}")
            print(f"[vincent] not pareto_index:{np_idx}")

            if len(p_idx) >= 5:
                tmp_idx = []
                for j in range(5):
                    tmp_idx.append(p_idx[j])
                points = [points[i] for i in tmp_idx]
                vals = [vals[i] for i in tmp_idx]
                true_vals = [true_vals[i] for i in tmp_idx]
            else:
                num_diff = 5 - len(p_idx)
                print(f"[vincent] diff num:{num_diff}")
                print(f"[vincent] search space:{self.search_space}")
                if self.search_space['PREFERENCE'][0] == "accuracy":
                    acc_items = [-item[0] for item in np_items]
                    sort_n_idx = np.argsort(acc_items)
                    for i in range(num_diff):
                        p_idx.append(sort_n_idx[i])
                    print(f"[vincent] final pareto_index:{p_idx}")
                    points = [points[i] for i in p_idx]
                    vals = [vals[i] for i in p_idx]
                    true_vals = [true_vals[i] for i in p_idx]
                elif self.search_space['PREFERENCE'][0] == "runtime":
                    time_items = [-item[1] for item in np_items]
                    sort_n_idx = np.argsort(time_items)
                    for i in range(num_diff):
                        p_idx.append(sort_n_idx[i])
                    print(f"[vincent] final pareto_index:{p_idx}")
                    points = [points[i] for i in p_idx]
                    vals = [vals[i] for i in p_idx]
                    true_vals = [true_vals[i] for i in p_idx]

            # import random
            # idx_list = random.sample(range(len(points)), 10)
            # print(f"[vincent] random selections list idx_list:{idx_list}")
            # points = [points[i] for i in idx_list]
            # vals = [vals[i] for i in idx_list]
            # true_vals = [true_vals[i] for i in idx_list]

        ## vals = [[acc,-spent time],[acc,-spent time]]
        ## load from memory
        previous_eval = {'qinfos': []}
        for i in range(len(points)):
            tmp = Namespace(point=points[i],
                            val=vals[i],
                            true_val=true_vals[i])
            previous_eval['qinfos'].append(tmp)
        p = Namespace(**previous_eval)
        load_args = [
            get_option_specs('init_capital', False, 1,
                             'Path to the json or pb config file. '),
            get_option_specs(
                'init_capital_frac', False, None,
                'The fraction of the total capital to be used for initialisation.'
            ),
            get_option_specs(
                'num_init_evals', False, 1,
                'The number of evaluations for initialisation. If <0, will use default.'
            ),
            get_option_specs('prev_evaluations', False, p,
                             'Data for any previous evaluations.')
        ]
        options = load_options(load_args)
        config_params = {'domain': domain_vars}
        config = load_config(config_params)
        max_num_evals = 1
        self.dragonfly_config = None

        def fake_func(x):
            if not self.dragonfly_config:
                self.dragonfly_config = x
                print(
                    f"[vincent] x is assigned to self.dragonfly_config:{self.dragonfly_config}"
                )
            return 0

        moo_objectives = [fake_func, fake_func]
        _, _, _ = multiobjective_maximise_functions(moo_objectives,
                                                    config.domain,
                                                    max_num_evals,
                                                    capital_type='num_evals',
                                                    config=config,
                                                    options=options)
        print(
            f"[vincent] self.dragonfly_config after dragonfly:{self.dragonfly_config}"
        )

        ## load prev from the file
        # data_to_save = {'points': points,
        #                 'vals': vals,
        #                 'true_vals': true_vals}
        # print(f"[vincent] data_to_save:{data_to_save}")
        # temp_save_path = './dragonfly.saved'

        # with open(temp_save_path, 'wb') as save_file_handle:
        #     pickle.dump(data_to_save, save_file_handle)

        # load_args = [
        #     get_option_specs('progress_load_from', False, temp_save_path,
        #     'Load progress (from possibly a previous run) from this file.')
        # ]
        # options = load_options(load_args)
        # config_params = {'domain': domain_vars}
        # config = load_config(config_params)
        # max_num_evals = 1
        # self.dragonfly_config = None

        # def fake_func(x):
        #     if not self.dragonfly_config:
        #         self.dragonfly_config = x
        #         print(f"[vincent] x is assigned to self.dragonfly_config:{self.dragonfly_config}")
        #     return 0

        # moo_objectives = [fake_func, fake_func]
        # _, _, _ = multiobjective_maximise_functions(moo_objectives, config.domain,max_num_evals,capital_type='num_evals',config=config,options=options)
        # print(f"[vincent] self.dragonfly_config after dragonfly:{self.dragonfly_config}")
        # import os
        # if os.path.exists(temp_save_path):
        #     os.remove(temp_save_path)

        if not self.dragonfly_config:
            print(
                f"[vincent] Get empty config from dragonfly! Use a random config instead."
            )
            sample = self.configspace.sample_configuration()
        else:
            sample = dict()
            df_idx = 0
            for name in self.search_space.keys():
                sample[name] = self.dragonfly_config[df_idx]
                df_idx += 1

        logger.debug('done sampling a new configuration.')
        sample['TRIAL_BUDGET'] = budget

        print(f'[vincent] sample from get_config:{sample}')

        return sample
Esempio n. 6
0
        dfp.parse_args(o, opt)
    if a.use_prior:
        opt.moo_gpb_prior_means = co2df.prior_means(op, cfg)
    for k in opt.__dict__:
        if opt.__dict__.get(k) == "None":
            opt.__dict__[k] = None

    obj = co2df.objective(op, cfg, url, auth=auth)
    n_objectives = obj[1]
    obj_directions = [sn for sn, expr in op.obj.values()]
    arg_names = list(op.dv)
    obj_names = list(op.obj)

    pareto_values, pareto_points, history = \
        dragonfly.multiobjective_maximise_functions(
            obj, None, a.timelimit,
            worker_manager='multiprocessing', num_workers=a.parallel,
            capital_type='realtime', config=cfg, options=opt)

    print("\nPareto values:")
    print(pareto_values)

    print("\nPareto points:")
    print(pareto_points)

    with open('debug.pickle', 'wb') as f:
        pickle.dump((pareto_points, pareto_values, history),
                    f, pickle.HIGHEST_PROTOCOL)

    no_result = [numpy.nan] * n_objectives
    query_values = [val if val != EVAL_ERROR_CODE else no_result
                    for val in history.query_vals]