Exemple #1
0
for chi in range(100):
    if "fc" in layer:
        unit = (netname, layer, chi)
    else:
        unit = (netname, layer, chi, *pos_dict[layer])
    for triali in range(10):
        optimizer = ZOHA_Sphere_lr_euclid_ReducDim(4096, subspace_d, population_size=40, select_size=20)
        optimizer.lr_schedule(n_gen=n_gen, mode="inv")
        optimizer.get_basis("rand")
        experiment = ExperimentEvolve(unit, max_step=n_gen, backend="torch", optimizer=optimizer, GAN="fc6")
        experiment.run()
        fig0 = experiment.visualize_best(show=False)
        fig0.savefig(join(savedir, "Subspc%dBestImgChan%02dtr%01d.png" % (subspace_d, chi, triali)))
        fig = experiment.visualize_trajectory(show=False)
        fig.savefig(join(savedir, "Subspc%dScoreTrajChan%02dtr%01d.png" % (subspace_d, chi, triali)))
        fig2 = experiment.visualize_exp(show=False)
        fig2.savefig(join(savedir, "Subspc%dEvolveChan%02dtr%01d.png" % (subspace_d, chi, triali)))
        plt.close("all")
        np.savez(join(savedir, "scores_subspc%dChan%02dtr%01d.npz" % (subspace_d, chi, triali)),
                 generations=experiment.generations,
                 scores_all=experiment.scores_all, codes_fin=experiment.codes_all[experiment.generations==experiment.max_steps-1,:])
        lastgen_max = [experiment.scores_all[experiment.generations == geni].max() for geni in
         range(experiment.generations.max() - 10, experiment.generations.max() + 1)]
        best_scores_col.append(lastgen_max)
    #     break
    # break
best_scores_col = np.array(best_scores_col)
np.save(join(savedir, "best_scores.npy"), best_scores_col)
#%%  certain subspace along the eigen spectum
# unit = ("alexnet", 'fc8')
# netname, layer = unit
Exemple #2
0
                                    rankweight=True,
                                    rankbasis=True,
                                    sphere_norm=9)
optimizerZH.lr_schedule(
    100, lim=(10, 6),
    mode="lin")  # the space is much tighter than the fc6 space, so step size
# should be tuned for that
experiment = ExperimentEvolve(("alexnet", "fc8", 1),
                              max_step=100,
                              backend="torch",
                              optimizer=optimizerZH,
                              GAN="BigBiGAN",
                              verbose=True)
experiment.run()
experiment.visualize_trajectory(True)
experiment.visualize_exp(True)
experiment.visualize_best(True)
#%%
mode_dict = ["lin", "exp"]  #"inv",


def optim_result(param):
    pop_size = int(param[0, 0])
    select_rate = param[0, 1]
    select_size = int(pop_size * select_rate)
    lr = param[0, 2]
    mu_init = param[0, 3]
    mu_rate = param[0, 4]
    mode = int(param[0, 5])
    sphere_norm = int(param[0, 6])
    n_gen = 3000 // pop_size  # 4000 is the total evaluation budget