示例#1
0
def evolution_search(f, para_b):
    begin_time = datetime.now()
    Timestamps_list = []
    Target_list = []
    Parameters_list = []
    keys = list(para_b.keys())
    bounds = np.array(list(para_b.values()), dtype=np.float)
    dim = len(keys)
    plog = PrintLog(keys)
    para_value = np.empty((1, dim))
    plog.print_header(initialization=True)
    for col, (lower, upper) in enumerate(bounds):
        para_value.T[col] = np.random.RandomState().uniform(lower, upper)
    para_value = para_value.ravel().tolist()
    plog.print_header(initialization=False)

    es = cma.CMAEvolutionStrategy(para_value, 0.2, {'maxiter': 60, 'popsize': 50})
#    es = cma.CMAEvolutionStrategy(para_value, 0.5)
    while not es.stop():
        solutions = es.ask()
        es.tell(solutions, [f(x) for x in solutions])
#        es.tell(*es.ask_and_eval(f))
#        es.disp()
        res = es.result
#        metric = f(**params_dic)
        Parameters_list.append(res[0].tolist())
        Target_list.append(1-res[1])
        elapse_time = (datetime.now() - begin_time).total_seconds()
        Timestamps_list.append(elapse_time)
#        print("The best candidate: ", res[0])
#        print("The best result: ", res[1])
        plog.print_step(res[0], 1-res[1])

    return Timestamps_list, Target_list, Parameters_list
示例#2
0
def random_search(f, para_b, num):
    begin_time = datetime.now()
    Timestamps_list = []
    Target_list = []
    keys = list(para_b.keys())
    bounds = np.array(list(para_b.values()), dtype=np.float)
    dim = len(keys)
    plog = PrintLog(keys)
    parameter_list = np.empty((num, dim))
    plog.print_header(initialization=True)
    for col, (lower, upper) in enumerate(bounds):
        parameter_list.T[col] = np.random.RandomState().uniform(lower,
                                                                upper,
                                                                size=num)
    plog.print_header(initialization=False)

    for i in range(num):
        params_dic = dict(zip(keys, parameter_list[i]))
        metric = f(**params_dic)
        Target_list.append(metric)
        elapse_time = (datetime.now() - begin_time).total_seconds()
        Timestamps_list.append(elapse_time)
        plog.print_step(parameter_list[i], metric)

    return Timestamps_list, Target_list, parameter_list.tolist()
def evolution_search(f, para_b):
    begin_time = datetime.now()
    Timestamps_list = []
    Target_list = []
    Parameters_list = []
    keys = list(para_b.keys())
    dim = len(keys)
    plog = PrintLog(keys)

    min = np.ones(dim)
    max = np.ones(dim)
    value_list = list(parameters.values())
    for i_v in range(dim):
        min[i_v] = value_list[i_v][0]
        max[i_v] = value_list[i_v][1]
    bounds = (min, max)
    plog.print_header(initialization=True)

    my_topology = Star()
    my_options ={'c1': 0.6, 'c2': 0.3, 'w': 0.4}
    my_swarm = P.create_swarm(n_particles=20, dimensions=dim, options=my_options, bounds=bounds)  # The Swarm Class

    iterations = 30  # Set 100 iterations
    for i in range(iterations):
        # Part 1: Update personal best

        # for evaluated_result in map(evaluate, my_swarm.position):
        #     my_swarm.current_cost = np.append(evaluated_result)
        # for best_personal_result in map(evaluate, my_swarm.pbest_pos):  # Compute personal best pos
        #     my_swarm.pbest_cost = np.append(my_swarm.pbest_cost, best_personal_result)
        my_swarm.current_cost = np.array(list(map(evaluate, my_swarm.position)))
        #print(my_swarm.current_cost)
        my_swarm.pbest_cost = np.array(list(map(evaluate, my_swarm.pbest_pos)))
        my_swarm.pbest_pos, my_swarm.pbest_cost = P.compute_pbest(my_swarm)  # Update and store

        # Part 2: Update global best
        # Note that gbest computation is dependent on your topology
        if np.min(my_swarm.pbest_cost) < my_swarm.best_cost:
            my_swarm.best_pos, my_swarm.best_cost = my_topology.compute_gbest(my_swarm)

        # Let's print our output
        #if i % 2 == 0:
        #    print('Iteration: {} | my_swarm.best_cost: {:.4f}'.format(i + 1, my_swarm.best_cost))

        # Part 3: Update position and velocity matrices
        # Note that position and velocity updates are dependent on your topology
        my_swarm.velocity = my_topology.compute_velocity(my_swarm)
        my_swarm.position = my_topology.compute_position(my_swarm)

        Parameters_list.append(my_swarm.best_pos.tolist())
        Target_list.append(1-my_swarm.best_cost)
        elapse_time = (datetime.now() - begin_time).total_seconds()
        Timestamps_list.append(elapse_time)
#        print("The best candidate: ", my_swarm.best_pos)
#        print("The best result: ", res[1])
        plog.print_step(my_swarm.best_pos, 1 - my_swarm.best_cost)
        if i == 0:
            plog.print_header(initialization=False)

    return Timestamps_list, Target_list, Parameters_list
示例#4
0
    def __init__(self, func, para):
        # "para" is the diction of parameters which needs to be optimized.
        # "para" --> {'x_1': (0,10), 'x_2': (-1, 1),..., 'x_n':(Lower_bound, Upper_bound)}

        self.f = func
        self.begin_time = datetime.now()
        self.timestamps_list = []
        self.target_list = []
        self.parameters_list = []
        self.pop_list = []
        self.keys = list(para.keys())
        self.bounds = np.array(list(para.values()), dtype=np.float)
        self.dim = len(self.keys)
        self.plog = PrintLog(self.keys)
示例#5
0
    def __init__(self, func, para):
        # "para" is the diction of parameters which needs to be optimized.
        # "para" --> {'x_1': (0,10), 'x_2': (-1, 1),..., 'x_n':(Lower_bound, Upper_bound)}

        self.f = func
        self.begin_time = datetime.now()
        self.timestamps_list = []
        self.target_list = []
        self.parameters_list = []
        self.pop_list = []
        self.keys = list(para.keys())
        self.bounds = np.array(list(para.values()), dtype=np.float)
        self.dim = len(self.keys)
        self.plog = PrintLog(self.keys)
        self.para_value = np.empty((1, self.dim))
        self.plog.print_header(initialization=True)
        for col, (lower, upper) in enumerate(self.bounds):
            self.para_value.T[col] = np.random.RandomState().uniform(
                lower, upper)
        self.para_value = self.para_value.ravel().tolist()
        SMOTE_feature_train_list = r['S_F_tr_l']
        SMOTE_label_train_list = r['S_L_tr_l']
        SMOTE_feature_valid_list = r['S_F_va_l']
        SMOTE_label_valid_list = r['S_L_va_l']
        # num_classifiers_in_pool = len(SMOTE_feature_valid_list)     # 100 in all
        num_classifiers_in_pool = 3
        output_para_list = {}

        for k in range(num_classifiers_in_pool):
            print("Data Set Folder: ", file, ", SMOTE folder id: ", str(k))
            data_dic = {'F_tr_l': SMOTE_feature_train_list[k],
                        'L_tr_l': SMOTE_label_train_list[k],
                        'F_va_l': SMOTE_feature_valid_list[k],
                        'L_va_l': SMOTE_label_valid_list[k]}

            plog = PrintLog(para_keys)
            max_params, max_val, res_records = Genetic_Algorithm()
            print('XGBoost:')
            print("best_values", max_val[0])
            print("best_parameters", max_params)

            Output_Parameters = dict(zip(para_keys, max_params))
            output_para_list[k] = Output_Parameters

            time_list = res_records['timestamp']
            target_list = res_records['values']
            para_list = res_records['params']
            list_file = list_path + '/' + sub_name + '_smote_' + str(k) + '_GA_List.json'
            Output_line = {}
            for i in range(len(target_list)):
                Output_line[time_list[i]] = {target_list[i]: para_list[i]}
示例#7
0
def test_2():
    print = PrintLog("test1.log").to_log
    print("test_2 not to log")