Example #1
0
def test_solver(solver):
    history = []
    j = 0
    seed_width = 20
    while True:
        solutions = solver.ask()
        fitness_list = np.zeros(solver.popsize)
        #print(solutions)
        #         for i in range(solver.popsize):
        #             fit_func((model,solutions[i]))
        seed = np.random.randint(seed_width)
        fitness_list = pool.map(fit_func, [(model, solutions[i], seed, 5)
                                           for i in range(solver.popsize)])

        solver.tell(fitness_list)
        result = solver.result(
        )  # first element is the best solution, second element is the best fitness
        history.append(result[1])
        if (j + 1) % 10 == 0:
            print("fitness at iteration", (j + 1), result[1])
            if args.algo == 'ga':
                print("Best:", solver.elite_rewards[0])
            print('Seed width', seed_width)

        if (j + 1) % 100 == 0:
            evluate_func_test(model, result[0])
            plt.plot(history)
            plt.savefig(args.log_dir + '/loss_plot.png')
            plt.close()

        if (j + 1) % 1000 == 0:
            # save the best result
            filename = args.log_dir + '/models/model_parameters_' + str(j + 1)
            with open(filename, 'wt') as out:
                res = json.dump([np.array(result[0]).round(4).tolist()],
                                out,
                                sort_keys=True,
                                indent=2,
                                separators=(',', ': '))


#         if  (j+1) % 40== 0 and args.algo == 'ga':
#             #print('----------------------RESET ELITES')
#             #new_elite_rewards = []
#             new_elite_params = solver.elite_params
#             new_elite_rewards = pool.map(fit_func, [(model,solver.elite_params[kk], seed, 5) for kk in range(len(solver.elite_rewards))])

#             solver = SimpleGA(NPARAMS,                # number of model parameters
#                    sigma_init=0.5,        # initial standard deviation
#                    popsize=NPOPULATION,   # population size
#                    elite_ratio=0.05,       # percentage of the elites
#                    forget_best=False,     # forget the historical best elites
#                    weight_decay=0.00,     # weight decay coefficient
#                   )

#             solver.elite_params = new_elite_params
#             solver.elite_rewards = new_elite_rewards
#             #print('----------------------RESET ELITES')

        if -result[1] <= 0.001:
            print("local optimum discovered by solver:\n", result[0])
            print("fitness score at this local optimum:", result[1])
            # save the best result
            filename = args.log_dir + '/models/model_parameters_' + str(j + 1)
            with open(filename, 'wt') as out:
                res = json.dump([np.array(result[0]).round(4).tolist()],
                                out,
                                sort_keys=True,
                                indent=2,
                                separators=(',', ': '))
                seed_width += 5
                new_elite_params = solver.elite_params
            new_elite_rewards = pool.map(
                fit_func, [(model, solver.elite_params[kk], seed, 5)
                           for kk in range(len(solver.elite_rewards))])

            solver = SimpleGA(
                NPARAMS,  # number of model parameters
                sigma_init=0.5,  # initial standard deviation
                popsize=NPOPULATION,  # population size
                elite_ratio=0.05,  # percentage of the elites
                forget_best=False,  # forget the historical best elites
                weight_decay=0.00,  # weight decay coefficient
            )

            solver.elite_params = new_elite_params
            solver.elite_rewards = new_elite_rewards
            #return history, result

        j += 1
def test_solver(solver):
    history = []
    j = 0
    seed_width = 100
    while True:
        solutions = solver.ask()
        fitness_list = np.zeros(solver.popsize)
        #print(solutions)
        #         for i in range(solver.popsize):
        #             fit_func((model,solutions[i]))
        seed = 1
        fitness_list = pool.map(fit_func, [(model, solutions[i], train_X, 10)
                                           for i in range(solver.popsize)])

        if args.algo == 'ga':
            # For the elites testing one more time on 30 seed to be sure they are really good
            elite_idxs = np.argsort(fitness_list)[::-1][0:solver.elite_popsize]
            fitness_list_2 = pool.map(
                fit_func, [(model, solutions[elite_idx], train_X, 10)
                           for elite_idx in elite_idxs])
            for kk, elite_idx_ in enumerate(elite_idxs):
                fitness_list[elite_idx_] = fitness_list_2[kk]

            for kk in range(solver.popsize):
                if kk not in elite_idxs:
                    fitness_list[kk] = -np.inf

#         # check how many new unchecked elites sneak in
#         new_elites_idxs = np.argsort(fitness_list)[::-1][0:solver.elite_popsize]
#         for new_elites_idx in new_elites_idxs:
#             if new_elites_idx not in elite_idxs:
#                 print(new_elites_idx)

        solver.tell(fitness_list)
        result = solver.result(
        )  # first element is the best solution, second element is the best fitness
        history.append(result[1])
        if (j + 1) % 10 == 0:
            print("fitness at iteration", (j + 1), result[1])
            if args.algo == 'ga':
                print("Best:", solver.elite_rewards[0])

            if args.algo == 'oes':
                print('Best:', solver.curr_best_reward)
            print('Seed width', seed_width)

        if (j + 1) % 100 == 0:
            #evluate_func_test(model, result[0])
            plt.plot(history)
            plt.savefig(args.log_dir + '/loss_plot.png')
            plt.close()

        if (j + 1) % 1000 == 0:
            # save the best result
            filename = args.log_dir + '/models/model_parameters_' + str(j + 1)
            with open(filename, 'wt') as out:
                res = json.dump([np.array(result[0]).round(4).tolist()],
                                out,
                                sort_keys=True,
                                indent=2,
                                separators=(',', ': '))


#         if  (j+1) % 40== 0 and args.algo == 'ga':
#             #print('----------------------RESET ELITES')
#             #new_elite_rewards = []
#             new_elite_params = solver.elite_params
#             new_elite_rewards = pool.map(fit_func, [(model,solver.elite_params[kk], seed, 5) for kk in range(len(solver.elite_rewards))])

#             solver = SimpleGA(NPARAMS,                # number of model parameters
#                    sigma_init=0.5,        # initial standard deviation
#                    popsize=NPOPULATION,   # population size
#                    elite_ratio=0.05,       # percentage of the elites
#                    forget_best=False,     # forget the historical best elites
#                    weight_decay=0.00,     # weight decay coefficient
#                   )

#             solver.elite_params = new_elite_params
#             solver.elite_rewards = new_elite_rewards
#             #print('----------------------RESET ELITES')

        if -result[1] <= 0.001:
            print("local optimum discovered by solver:\n", result[0])
            print("fitness score at this local optimum:", result[1])
            # save the best result
            filename = args.log_dir + '/models/model_parameters_' + str(j + 1)
            with open(filename, 'wt') as out:
                res = json.dump([np.array(result[0]).round(4).tolist()],
                                out,
                                sort_keys=True,
                                indent=2,
                                separators=(',', ': '))
                seed_width += 5
                if args.algo == 'ga':
                    new_elite_params = solver.elite_params
                new_elite_rewards = pool.map(
                    fit_func, [(model, solver.elite_params[kk], train_X, 5)
                               for kk in range(len(solver.elite_rewards))])

                solver = SimpleGA(
                    NPARAMS,  # number of model parameters
                    sigma_init=0.5,  # initial standard deviation
                    popsize=NPOPULATION,  # population size
                    elite_ratio=0.05,  # percentage of the elites
                    forget_best=False,  # forget the historical best elites
                    weight_decay=0.00,  # weight decay coefficient
                )

                solver.elite_params = new_elite_params
                solver.elite_rewards = new_elite_rewards
            #return history, result

        j += 1