Exemple #1
0
def optim_result(param):
    pop_size = int(param[0, 0])
    select_rate = param[0, 1]
    select_size = int(pop_size * select_rate)
    lr = param[0, 2]
    mu_init = param[0, 3]
    mu_rate = param[0, 4]
    mode = int(param[0, 5])
    sphere_norm = int(param[0, 6])
    n_gen = 3000 // pop_size  # 4000 is the total evaluation budget
    optimizer = ZOHA_Sphere_lr_euclid(120,
                                      population_size=pop_size,
                                      select_size=select_size,
                                      lr=lr,
                                      maximize=True,
                                      rankweight=True,
                                      rankbasis=True,
                                      sphere_norm=sphere_norm)
    optimizer.lr_schedule(
        n_gen, lim=(mu_init, mu_rate * mu_init), mode=mode_dict[mode]
    )  # the space is much tighter than the fc6 space, so step size
    # should be tuned for that
    Exp = ExperimentEvolve(("alexnet", "fc8", 1),
                           max_step=n_gen,
                           backend="torch",
                           optimizer=optimizer,
                           GAN="BigBiGAN",
                           verbose=False)
    # note using torch and batch processing evolution is much faster ~ 27sec 100 gens
    Exp.run()
    return np.percentile(
        Exp.scores_all[Exp.generations > Exp.generations.max() - 5], 99.5)
Exemple #2
0
def optim_result(param):
    pop_size = int(param[0,0])
    select_rate = param[0,1]
    select_size = int(pop_size * select_rate)
    lr = param[0,2]
    mu_init = param[0,3]
    mu_rate = param[0,4]
    mode = int(param[0,5])
    n_gen = 4000 // pop_size # 4000 is the total evaluation budget
    optimizer = ZOHA_Sphere_lr_euclid(4096, population_size=pop_size, select_size=select_size, lr=lr)
    optimizer.lr_schedule(n_gen=n_gen, lim=(mu_init, mu_rate*mu_init), mode=mode_dict[mode])
    Exp = ExperimentEvolve(("alexnet", "fc8", 1), max_step=n_gen, backend="torch", optimizer=optimizer, GAN="fc6")
    # note using torch and batch processing evolution is much faster ~ 27sec 100 gens
    # Exp = ExperimentEvolve(("caffe-net", "fc8", 1), max_step=50, backend="caffe", optimizer=optimizer, GAN="fc6")
    Exp.run()
    return np.percentile(Exp.scores_all, 99.5)
Exemple #3
0
from time import time
import matplotlib.pylab as plt

for unit in unit_arr[:]:
    savedir = join(recorddir, "optim_cmp", "%s_%s_%d" % (unit[0], unit[1], unit[2]))
    os.makedirs(savedir, exist_ok=True)
    for Optim_str in Optim_arr:
        for triali in range(10):
            t0 = time()
            if Optim_str == "Genetic":
                optim = Genetic(population_size, mutation_rate, mutation_size, kT_multiplier, recorddir,
                            parental_skew=0.5, n_conserve=0)
            elif Optim_str == "CholCMA":
                optim = CholeskyCMAES(recorddir=recorddir, space_dimen=code_length, init_sigma=init_sigma,
                            Aupdate_freq=Aupdate_freq, init_code=initcode) # np.zeros([1, code_length])
            experiment = ExperimentEvolve(unit, max_step=100, optimizer=optim)
            experiment.run()
            fig0 = experiment.visualize_best(show=False)
            fig0.savefig(join(savedir, "%s_BestImgTrial%03d.png" % (Optim_str, triali,)))
            fig = experiment.visualize_trajectory(show=False)
            fig.savefig(join(savedir, "%s_ScoreTrajTrial%03d.png" % (Optim_str, triali,)))
            # fig2 = experiment.visualize_exp(show=False)
            # fig2.savefig(join(savedir, "EvolveTrial%03d.png" % (triali)))
            plt.close('all')
            np.savez(join(savedir, "%s_scores_trial%03d.npz" % (Optim_str, triali)),
                     generations=experiment.generations,
                     scores_all=experiment.scores_all)
            print("Optimization with %s took %.1f s" % (Optim_str, time() - t0))
            # lastgen_max = [experiment.scores_all[experiment.generations == geni].max() for geni in
            #         range(experiment.generations.max() - 10, experiment.generations.max() + 1)]
            # best_scores_col.append(lastgen_max)
Exemple #4
0
    eigvect_avg = data["eigvect_avg"]

savedir = os.path.join(recorddir, "%s_%s_eig_subspac%d" % (netname, layer, subspace_d))
os.makedirs(savedir, exist_ok=True)
pos_dict = {"conv5": (7, 7), "conv4": (7, 7), "conv3": (7, 7), "conv2": (14, 14), "conv1": (28, 28)}
best_scores_col = []
for chi in range(100):
    if "fc" in layer:
        unit = (netname, layer, chi)
    else:
        unit = (netname, layer, chi, *pos_dict[layer])
    for triali in range(10):
        optimizer = ZOHA_Sphere_lr_euclid_ReducDim(4096, subspace_d, population_size=40, select_size=20)
        optimizer.lr_schedule(n_gen=n_gen, mode="inv")
        optimizer.get_basis("rand")
        experiment = ExperimentEvolve(unit, max_step=n_gen, backend="torch", optimizer=optimizer, GAN="fc6")
        experiment.run()
        fig0 = experiment.visualize_best(show=False)
        fig0.savefig(join(savedir, "Subspc%dBestImgChan%02dtr%01d.png" % (subspace_d, chi, triali)))
        fig = experiment.visualize_trajectory(show=False)
        fig.savefig(join(savedir, "Subspc%dScoreTrajChan%02dtr%01d.png" % (subspace_d, chi, triali)))
        fig2 = experiment.visualize_exp(show=False)
        fig2.savefig(join(savedir, "Subspc%dEvolveChan%02dtr%01d.png" % (subspace_d, chi, triali)))
        plt.close("all")
        np.savez(join(savedir, "scores_subspc%dChan%02dtr%01d.npz" % (subspace_d, chi, triali)),
                 generations=experiment.generations,
                 scores_all=experiment.scores_all, codes_fin=experiment.codes_all[experiment.generations==experiment.max_steps-1,:])
        lastgen_max = [experiment.scores_all[experiment.generations == geni].max() for geni in
         range(experiment.generations.max() - 10, experiment.generations.max() + 1)]
        best_scores_col.append(lastgen_max)
    #     break
Exemple #5
0
imgs = BigBiGAN_render(SLERP(Vec1, Vec2, ticks, (0, 1)), 255.0)
mtg = build_montages(imgs, (128, 128), (ticks, 1))[0]
Img = Image.fromarray(np.uint8(mtg))
Img.show()
#%%
optimizer = CholeskyCMAES(120,
                          population_size=None,
                          init_sigma=0.25,
                          init_code=0.8 * np.random.randn(1, 120),
                          Aupdate_freq=10,
                          maximize=True,
                          random_seed=None,
                          optim_params={})
experiment = ExperimentEvolve(("alexnet", "fc8", 1),
                              max_step=100,
                              backend="torch",
                              optimizer=optimizer,
                              GAN="BigBiGAN",
                              verbose=True)
experiment.run()
#%%
optimizerZH = ZOHA_Sphere_lr_euclid(120,
                                    population_size=40,
                                    select_size=15,
                                    lr=1.0,
                                    maximize=True,
                                    rankweight=True,
                                    rankbasis=True,
                                    sphere_norm=9)
optimizerZH.lr_schedule(
    100, lim=(10, 6),
    mode="lin")  # the space is much tighter than the fc6 space, so step size
Exemple #6
0

#%%
# optimizer = ZOHA_Sphere_lr_euclid(4096, population_size=40, select_size=20)
# optimizer.lr_schedule(n_gen=100)
# codes = np.random.randn(40, 4096)
# scores = np.random.randn(40)
# optimizer.step_simple(scores, codes)
#%%
if __name__ == "__main__":
    from insilico_Exp import ExperimentEvolve
    optimizer = ZOHA_Sphere_lr_euclid(4096, population_size=40, select_size=20)
    optimizer.lr_schedule(n_gen=100, mode="exp")
    Exp = ExperimentEvolve(("alexnet", "fc8", 1),
                           max_step=100,
                           backend="torch",
                           optimizer=optimizer,
                           GAN="fc6")
    # note using torch and batch processing evolution is much faster ~ 27sec 100 gens
    # Exp = ExperimentEvolve(("caffe-net", "fc8", 1), max_step=50, backend="caffe", optimizer=optimizer, GAN="fc6")
    Exp.run()
    #%%
    optimizer = ZOHA_Sphere_lr_euclid_ReducDim(4096,
                                               50,
                                               population_size=40,
                                               select_size=20)
    optimizer.lr_schedule(n_gen=50, mode="exp")
    optimizer.get_basis("rand")
    Exp = ExperimentEvolve(("alexnet", "fc8", 1),
                           max_step=100,
                           backend="torch",