示例#1
0
def evolution_search(f, para_b):
    begin_time = datetime.now()
    Timestamps_list = []
    Target_list = []
    Parameters_list = []
    keys = list(para_b.keys())
    bounds = np.array(list(para_b.values()), dtype=np.float)
    dim = len(keys)
    plog = PrintLog(keys)
    para_value = np.empty((1, dim))
    plog.print_header(initialization=True)
    for col, (lower, upper) in enumerate(bounds):
        para_value.T[col] = np.random.RandomState().uniform(lower, upper)
    para_value = para_value.ravel().tolist()
    plog.print_header(initialization=False)

    es = cma.CMAEvolutionStrategy(para_value, 0.2, {'maxiter': 60, 'popsize': 50})
#    es = cma.CMAEvolutionStrategy(para_value, 0.5)
    while not es.stop():
        solutions = es.ask()
        es.tell(solutions, [f(x) for x in solutions])
#        es.tell(*es.ask_and_eval(f))
#        es.disp()
        res = es.result
#        metric = f(**params_dic)
        Parameters_list.append(res[0].tolist())
        Target_list.append(1-res[1])
        elapse_time = (datetime.now() - begin_time).total_seconds()
        Timestamps_list.append(elapse_time)
#        print("The best candidate: ", res[0])
#        print("The best result: ", res[1])
        plog.print_step(res[0], 1-res[1])

    return Timestamps_list, Target_list, Parameters_list
示例#2
0
    def __init__(self, func, para):
        # "para" is the diction of parameters which needs to be optimized.
        # "para" --> {'x_1': (0,10), 'x_2': (-1, 1),..., 'x_n':(Lower_bound, Upper_bound)}

        self.f = func
        self.begin_time = datetime.now()
        self.timestamps_list = []
        self.target_list = []
        self.parameters_list = []
        self.pop_list = []
        self.keys = list(para.keys())
        self.bounds = np.array(list(para.values()), dtype=np.float)
        self.dim = len(self.keys)
        self.plog = PrintLog(self.keys)
示例#3
0
def random_search(f, para_b, num):
    begin_time = datetime.now()
    Timestamps_list = []
    Target_list = []
    keys = list(para_b.keys())
    bounds = np.array(list(para_b.values()), dtype=np.float)
    dim = len(keys)
    plog = PrintLog(keys)
    parameter_list = np.empty((num, dim))
    plog.print_header(initialization=True)
    for col, (lower, upper) in enumerate(bounds):
        parameter_list.T[col] = np.random.RandomState().uniform(lower,
                                                                upper,
                                                                size=num)
    plog.print_header(initialization=False)

    for i in range(num):
        params_dic = dict(zip(keys, parameter_list[i]))
        metric = f(**params_dic)
        Target_list.append(metric)
        elapse_time = (datetime.now() - begin_time).total_seconds()
        Timestamps_list.append(elapse_time)
        plog.print_step(parameter_list[i], metric)

    return Timestamps_list, Target_list, parameter_list.tolist()
def evolution_search(f, para_b):
    begin_time = datetime.now()
    Timestamps_list = []
    Target_list = []
    Parameters_list = []
    keys = list(para_b.keys())
    dim = len(keys)
    plog = PrintLog(keys)

    min = np.ones(dim)
    max = np.ones(dim)
    value_list = list(parameters.values())
    for i_v in range(dim):
        min[i_v] = value_list[i_v][0]
        max[i_v] = value_list[i_v][1]
    bounds = (min, max)
    plog.print_header(initialization=True)

    my_topology = Star()
    my_options ={'c1': 0.6, 'c2': 0.3, 'w': 0.4}
    my_swarm = P.create_swarm(n_particles=20, dimensions=dim, options=my_options, bounds=bounds)  # The Swarm Class

    iterations = 30  # Set 100 iterations
    for i in range(iterations):
        # Part 1: Update personal best

        # for evaluated_result in map(evaluate, my_swarm.position):
        #     my_swarm.current_cost = np.append(evaluated_result)
        # for best_personal_result in map(evaluate, my_swarm.pbest_pos):  # Compute personal best pos
        #     my_swarm.pbest_cost = np.append(my_swarm.pbest_cost, best_personal_result)
        my_swarm.current_cost = np.array(list(map(evaluate, my_swarm.position)))
        #print(my_swarm.current_cost)
        my_swarm.pbest_cost = np.array(list(map(evaluate, my_swarm.pbest_pos)))
        my_swarm.pbest_pos, my_swarm.pbest_cost = P.compute_pbest(my_swarm)  # Update and store

        # Part 2: Update global best
        # Note that gbest computation is dependent on your topology
        if np.min(my_swarm.pbest_cost) < my_swarm.best_cost:
            my_swarm.best_pos, my_swarm.best_cost = my_topology.compute_gbest(my_swarm)

        # Let's print our output
        #if i % 2 == 0:
        #    print('Iteration: {} | my_swarm.best_cost: {:.4f}'.format(i + 1, my_swarm.best_cost))

        # Part 3: Update position and velocity matrices
        # Note that position and velocity updates are dependent on your topology
        my_swarm.velocity = my_topology.compute_velocity(my_swarm)
        my_swarm.position = my_topology.compute_position(my_swarm)

        Parameters_list.append(my_swarm.best_pos.tolist())
        Target_list.append(1-my_swarm.best_cost)
        elapse_time = (datetime.now() - begin_time).total_seconds()
        Timestamps_list.append(elapse_time)
#        print("The best candidate: ", my_swarm.best_pos)
#        print("The best result: ", res[1])
        plog.print_step(my_swarm.best_pos, 1 - my_swarm.best_cost)
        if i == 0:
            plog.print_header(initialization=False)

    return Timestamps_list, Target_list, Parameters_list
示例#5
0
    def __init__(self, func, para):
        # "para" is the diction of parameters which needs to be optimized.
        # "para" --> {'x_1': (0,10), 'x_2': (-1, 1),..., 'x_n':(Lower_bound, Upper_bound)}

        self.f = func
        self.begin_time = datetime.now()
        self.timestamps_list = []
        self.target_list = []
        self.parameters_list = []
        self.pop_list = []
        self.keys = list(para.keys())
        self.bounds = np.array(list(para.values()), dtype=np.float)
        self.dim = len(self.keys)
        self.plog = PrintLog(self.keys)
        self.para_value = np.empty((1, self.dim))
        self.plog.print_header(initialization=True)
        for col, (lower, upper) in enumerate(self.bounds):
            self.para_value.T[col] = np.random.RandomState().uniform(
                lower, upper)
        self.para_value = self.para_value.ravel().tolist()
        SMOTE_feature_train_list = r['S_F_tr_l']
        SMOTE_label_train_list = r['S_L_tr_l']
        SMOTE_feature_valid_list = r['S_F_va_l']
        SMOTE_label_valid_list = r['S_L_va_l']
        # num_classifiers_in_pool = len(SMOTE_feature_valid_list)     # 100 in all
        num_classifiers_in_pool = 3
        output_para_list = {}

        for k in range(num_classifiers_in_pool):
            print("Data Set Folder: ", file, ", SMOTE folder id: ", str(k))
            data_dic = {'F_tr_l': SMOTE_feature_train_list[k],
                        'L_tr_l': SMOTE_label_train_list[k],
                        'F_va_l': SMOTE_feature_valid_list[k],
                        'L_va_l': SMOTE_label_valid_list[k]}

            plog = PrintLog(para_keys)
            max_params, max_val, res_records = Genetic_Algorithm()
            print('XGBoost:')
            print("best_values", max_val[0])
            print("best_parameters", max_params)

            Output_Parameters = dict(zip(para_keys, max_params))
            output_para_list[k] = Output_Parameters

            time_list = res_records['timestamp']
            target_list = res_records['values']
            para_list = res_records['params']
            list_file = list_path + '/' + sub_name + '_smote_' + str(k) + '_GA_List.json'
            Output_line = {}
            for i in range(len(target_list)):
                Output_line[time_list[i]] = {target_list[i]: para_list[i]}
示例#7
0
class CmaEs:
    def __init__(self, func, para):
        # "para" is the diction of parameters which needs to be optimized.
        # "para" --> {'x_1': (0,10), 'x_2': (-1, 1),..., 'x_n':(Lower_bound, Upper_bound)}

        self.f = func
        self.begin_time = datetime.now()
        self.timestamps_list = []
        self.target_list = []
        self.parameters_list = []
        self.pop_list = []
        self.keys = list(para.keys())
        self.bounds = np.array(list(para.values()), dtype=np.float)
        self.dim = len(self.keys)
        self.plog = PrintLog(self.keys)
        self.para_value = np.empty((1, self.dim))
        self.plog.print_header(initialization=True)
        for col, (lower, upper) in enumerate(self.bounds):
            self.para_value.T[col] = np.random.RandomState().uniform(
                lower, upper)
        self.para_value = self.para_value.ravel().tolist()

    def evaluate(self, input):
        result = self.f(input[0], input[1])
        return -result

    def run(self, max_iter=20, pop_size=10, sigma=0.5):

        # "sigma0" is the initial standard deviation.
        # The problem variables should have been scaled, such that a single standard deviation
        # on all variables is useful and the optimum is expected to lie within about `x0` +- ``3*sigma0``.
        # See also options 'scaling_of_variables'. Often one wants to check for solutions close to the initial point.
        # This allows, for example, for an easier check of consistency of the
        # objective function and its interfacing with the optimizer.
        # In this case, a much "smaller" 'sigma0' is advisable.
        sigma_0 = sigma

        # "conf_para" is used to configure the parameters in CMA-ES algorithm
        # "conf_para" --> {'maxiter': 20, 'popsize': 20}
        conf_para = {'maxiter': max_iter, 'popsize': pop_size}

        es = cma.CMAEvolutionStrategy(self.para_value, sigma_0, conf_para)

        self.plog.print_header(initialization=False)

        while not es.stop():
            solutions = es.ask()
            self.pop_list.append(solutions)
            es.tell(solutions, [self.evaluate(x) for x in solutions])
            #        es.tell(*es.ask_and_eval(f))
            #        es.disp()
            res = es.result
            #        metric = f(**params_dic)
            self.parameters_list.append(res[0].tolist())
            self.target_list.append(-res[1])
            elapse_time = (datetime.now() - self.begin_time).total_seconds()
            self.timestamps_list.append(elapse_time)
            #        print("The best candidate: ", res[0])
            #        print("The best result: ", res[1])
            self.plog.print_step(res[0], -res[1])

        return self.timestamps_list, self.target_list, self.parameters_list, self.pop_list
示例#8
0
class CNPs_Optimization:
    def __init__(self, func, para):
        # "para" is the diction of parameters which needs to be optimized.
        # "para" --> {'x_1': (0,10), 'x_2': (-1, 1),..., 'x_n':(Lower_bound, Upper_bound)}

        self.f = func
        self.begin_time = datetime.now()
        self.timestamps_list = []
        self.target_list = []
        self.parameters_list = []
        self.pop_list = []
        self.keys = list(para.keys())
        self.bounds = np.array(list(para.values()), dtype=np.float)
        self.dim = len(self.keys)
        self.plog = PrintLog(self.keys)

    def initialization(self, pop_size):
        para_value = np.empty((pop_size, self.dim))
        self.plog.print_header(initialization=True)
        for col, (lower, upper) in enumerate(self.bounds):
            para_value[:, col] = np.random.RandomState().uniform(
                lower, upper, pop_size)
        target_value = np.empty((pop_size, 1))
        for i in range(pop_size):
            target_value[i] = self.f(para_value[i])
            # print(target_value[i])
            # print(para_value[i])
            self.plog.print_step(para_value[i], target_value[i][0])

        return para_value, target_value

    def model_build(self):
        dim_r = 4
        dim_h_hidden = 128
        dim_g_hidden = 128

        x_context = tf.placeholder(tf.float32, shape=(None, self.dim))
        y_context = tf.placeholder(tf.float32, shape=(None, 1))
        x_target = tf.placeholder(tf.float32, shape=(None, self.dim))
        y_target = tf.placeholder(tf.float32, shape=(None, 1))
        neural_process = NeuralProcess(x_context, y_context, x_target,
                                       y_target, dim_r, dim_h_hidden,
                                       dim_g_hidden)

        return neural_process

    def model_run(self,
                  neural_process,
                  train_op,
                  sess,
                  x_init,
                  y_init,
                  n_iter_cnp=5001):
        num_init = len(y_init)
        plot_freq = 1000
        for iter in range(n_iter_cnp):
            N_context = np.random.randint(1, num_init, 1)
            # create feed_dict containing context and target sets
            feed_dict = neural_process.helper_context_and_target(
                x_init, y_init, N_context)
            # optimisation step
            a = sess.run(train_op, feed_dict=feed_dict)
            if iter % plot_freq == 0:
                print(a[1])

    def maximize(self, num_iter=3, pop_size=10, uncertain_rate=0.2):
        pop_para, pop_target = self.initialization(pop_size)
        self.pop_list.append(pop_para)
        pop_max = np.max(pop_target)
        self.target_list.append(pop_max)
        idx_max = np.where(pop_target == pop_max)[0]
        max_para = np.squeeze(pop_para[idx_max])
        self.parameters_list.append(max_para.tolist())
        elapse_time = (datetime.now() - self.begin_time).total_seconds()
        self.timestamps_list.append(elapse_time)

        self.plog.print_header(initialization=False)

        tf.reset_default_graph()
        cnp_model = self.model_build()
        sess = tf.Session()
        train_op_and_loss = cnp_model.init_NP(learning_rate=0.001)
        init = tf.global_variables_initializer()
        sess.run(init)

        n_op_iter = num_iter
        for iter_op in range(n_op_iter):
            self.model_run(cnp_model,
                           train_op_and_loss,
                           sess,
                           x_init=pop_para,
                           y_init=pop_target)

            num_candidate = 1000
            x_candidate = np.empty((num_candidate, self.dim))
            for col, (lower, upper) in enumerate(self.bounds):
                x_candidate[:, col] = np.random.RandomState().uniform(
                    lower, upper, num_candidate)
            predict_candidate = cnp_model.posterior_predict(
                pop_para, pop_target, x_candidate)
            _, y_candidate_mu, y_candidate_sigma = sess.run(predict_candidate)

            num_uncertain = int(pop_size * uncertain_rate)
            if num_uncertain < 1:
                num_uncertain = 1
            num_select = pop_size - num_uncertain
            y_candidate_mu = np.squeeze(y_candidate_mu)
            y_candidate_sigma = np.squeeze(y_candidate_sigma)
            ind_mu = np.argpartition(y_candidate_mu, -num_select)[-num_select:]
            x_mu_select = x_candidate[ind_mu]
            ind_sigma = np.argpartition(y_candidate_sigma,
                                        -num_uncertain)[-num_uncertain:]
            x_sigma_select = x_candidate[ind_sigma]

            x_select = np.unique(np.concatenate((x_mu_select, x_sigma_select),
                                                axis=0),
                                 axis=0)
            _, idx_d = np.unique(np.concatenate((pop_para, x_select), axis=0),
                                 axis=0,
                                 return_index=True)
            # remove the same item
            idx_d = idx_d - pop_para.shape[0]
            idx_d = np.delete(idx_d, np.where(idx_d < 0))
            x_select = x_select[idx_d]
            n_selected = np.shape(x_select)[
                0]  # final number of candidate which are selected.
            y_select = np.empty((n_selected, 1))
            for i in range(n_selected):
                y_select[i] = self.f(x_select[i])
                self.plog.print_step(x_select[i], y_select[i][0])

            self.pop_list.append(x_select)
            pop_max = np.max(y_select)
            self.target_list.append(pop_max)
            idx_max = np.where(y_select == pop_max)[0]
            max_para = np.squeeze(x_select[idx_max])
            self.parameters_list.append(max_para.tolist())
            elapse_time = (datetime.now() - self.begin_time).total_seconds()
            self.timestamps_list.append(elapse_time)

            pop_para = np.concatenate((pop_para, x_select), axis=0)
            pop_target = np.concatenate((pop_target, y_select), axis=0)
            print("The %d-th iteration is completed!" % iter_op)
示例#9
0
def test_2():
    print = PrintLog("test1.log").to_log
    print("test_2 not to log")