def evaluate():
    current_dir = os.getcwd()
    print("Current_dir = ", current_dir)
    model_dir = "./saved_models"
    save_files = os.listdir(model_dir)
    # Filter only asymmetric versions:
    save_files = [
        x for x in save_files if (int(x.split("_")[1]) == test_for_G)
    ]
    indexes = [int(x.split("_")[2]) for x in save_files]
    save_files = [x for _, x in sorted(zip(indexes, save_files))]
    indexes = sorted(indexes)
    print("Save files found: ", save_files)
    # TODO: Filter save files to only load symmetrically trained ones... i.e. indexes mus be equal.
    print("Depths parsed: ", indexes)
    l = len(save_files)
    image_batch = np.zeros([256, 64, 64, 3])
    DCG = DCGAN()
    num_pool_workers = 1
    i = 0
    noise = tf.random_normal([n_samples, 128])
    # print("Noise: ", noise)
    for j, gen in enumerate(save_files):
        DCG.set_G_dim(test_for_G)
        print("G_dim set to ", test_for_G)
        # test_function(DCG, gen, disc)
        param_tuple = (DCG, gen)
        with contextlib.closing(Pool(num_pool_workers)) as po:
            pool_results = po.map_async(test_function, (param_tuple, ))
            results_list = pool_results.get()
            image_batch[i * n_samples:n_samples * (i + 1)] = results_list[0]
            i += 1
    # print(image_batch)
    generate_png(image_batch)
    print("Output saved")
def evaluate():
    """
    Should be able to mix and match various versions of GANs with this function. Steps:
    1. List all models available in the save_dir
    2. Double for loop:
     2.1 For each Generator
     2.2 For load each discriminator and
    3. Run 100k samples and see the discriminator output.
    """
    current_dir = os.getcwd()
    print("Current_dir = ", current_dir)
    model_dir = "./saved_models"
    save_files = os.listdir(model_dir)
    # Filter only symmetric versions:
    save_files = [x for x in save_files if (
        x.split("_")[1] == x.split("_")[2])]
    indexes = [int(x.split("_")[1]) for x in save_files]
    save_files = [x for _, x in sorted(zip(indexes, save_files))]
    indexes = sorted(indexes)
    print("Save files found: ", save_files)
    print("Depths parsed: ", indexes)
    l = len(save_files)
    DCG = DCGAN()
    i = 0
    images_to_explain = []
    for normalize_by_mean in [True, False]:
        for k, disc in enumerate(tqdm(save_files)):
            DCG.set_D_dim(indexes[k])
            param_tuple = (DCG, disc, images_to_explain, k, normalize_by_mean)
            with contextlib.closing(Pool(1)) as po:
                pool_results = po.map_async(
                    explain, (param_tuple,))
                images_to_explain = pool_results.get()[0]
    print("Evaluation finished")
def evaluate():
    """
    Should be able to mix and match various versions of GANs with this function. Steps:
    1. List all models available in the save_dir
    2. Double for loop:
     2.1 For each Generator
     2.2 For load each discriminator and
    3. Run 100k samples and see the discriminator output.
    """
    current_dir = os.getcwd()
    print("Current_dir = ", current_dir)
    model_dir = "./saved_models"
    save_files = os.listdir(model_dir)
    save_files = [
        x for x in save_files if (x.split("_")[1] == x.split("_")[2])
    ]
    indexes = [int(x.split("_")[1]) for x in save_files]
    save_files = [x for _, x in sorted(zip(indexes, save_files))]
    indexes = sorted(indexes)
    print("Save files found: ", save_files)
    # TODO: Filter save files to only load symmetrically trained ones... i.e. indexes mus be equal.
    print("Depths parsed: ", indexes)
    l = len(save_files)
    results_tensor = np.empty([l, no_samples, FLAGS.batch_size])
    DCG = DCGAN()
    num_pool_workers = 1
    i = 0
    for k, disc in enumerate(tqdm(save_files)):
        DCG.set_D_dim(indexes[k])
        print("D_dim set to ", indexes[k])
        param_tuple = (DCG, disc)
        with contextlib.closing(Pool(num_pool_workers)) as po:
            pool_results = po.map_async(test_function,
                                        (param_tuple for _ in range(1)))
            results_list = pool_results.get()
            results_tensor[i] = results_list[0]
            i += 1
    print("Evaluation finished")
    output_path = './real_evaluation_stats.npz'
    np.savez_compressed(output_path,
                        results_tensor=results_tensor,
                        save_files=save_files)
    print("Output saved")
def launch_managed_training():
    """
    Function which trains many models in series
    """
    epochs = 10
    DCG = DCGAN()
    num_pool_workers = 1  # can be bigger than 1, to enable parallel execution
    for entry in models_and_options:
        print("Starting training of model: \n", entry)
        DCG.set_G_dim(entry[1])
        DCG.set_D_dim(entry[2])
        if entry[3] == 1:
            Generator = DCG.DCGANG_1
        else:
            Generator = DCG.DCGANG_2
        if entry[4] == 1:
            Discriminator = DCG.DCGAND_1
        else:
            Discriminator = DCG.DCGAND_2
        param_tuple = (entry[0], Generator, Discriminator, epochs, entry[6])
        with contextlib.closing(Pool(num_pool_workers)) as po:
            pool_results = po.map_async(
                begin_training, (param_tuple for _ in range(1)))
            results_list = pool_results.get()
    # This ensures that the processes get closed once they are done
    return 0
def evaluate():
    """
    Should be able to mix and match various versions of GANs with this function. Steps:
    1. List all models available in the save_dir
    2. Double for loop:
     2.1 For each Generator
     2.2 For load each discriminator and
    3. Run 100k samples and see the discriminator output.
    """
    current_dir = os.getcwd()
    print("Current_dir = ", current_dir)
    model_dir = "./saved_models"
    save_files = os.listdir(model_dir)
    # Filter only symmetric versions:
    save_files = [x for x in save_files if (
        x.split("_")[1] == x.split("_")[2])]
    indexes = [int(x.split("_")[1]) for x in save_files]
    save_files = [x for _, x in sorted(zip(indexes, save_files))]
    indexes = sorted(indexes)
    print("Save files found: ", save_files)
    print("Depths parsed: ", indexes)
    l = len(save_files)
    results_tensor = np.empty([l * l, 6])
    # I.e. for each of l architectures, show prediction mean for real, fake, mixed
    DCG = DCGAN()
    num_pool_workers = 1
    i = 0
    for j, gen in enumerate(tqdm(save_files)):
        for k, disc in enumerate(tqdm(save_files)):
            DCG.set_G_dim(indexes[j])
            DCG.set_D_dim(indexes[k])
            param_tuple = (DCG, gen, disc)
            with contextlib.closing(Pool(num_pool_workers)) as po:
                pool_results = po.map_async(
                    test_function, (param_tuple for _ in range(1)))
                results_list = pool_results.get()
                results_tensor[i] = results_list[0]
                i += 1
    print("Evaluation finished")
    output_path = './real_fake_half.npz'
    results_tensor.reshape([l, l, 6])
    np.savez_compressed(
        output_path, results_tensor=results_tensor, save_files=save_files)
    print("Output saved")