Ejemplo n.º 1
0
def generate_interpolated_images(model_load_path,
                                 batch_size=16,
                                 n_latent=512,
                                 use_bn=False,
                                 hyper_sphere=True,
                                 last_act='tanh',
                                 use_wscale=True,
                                 use_he_backward=False,
                                 resolution_list=[4, 8, 16, 32, 64, 128],
                                 channel_list=[512, 512, 256, 128, 64, 32]):
    # Generate
    gen = load_gen(model_load_path,
                   use_bn=use_bn,
                   last_act=last_act,
                   use_wscale=use_wscale,
                   use_he_backward=use_he_backward)
    z_data0 = np.random.randn(1, n_latent, 1, 1)
    z_data1 = np.random.randn(1, n_latent, 1, 1)
    imgs = []
    for i in range(batch_size):
        alpha = 1. * i / (batch_size - 1)
        z_data = (1 - alpha) * z_data0 + alpha * z_data1
        z = nn.Variable.from_numpy_array(z_data)
        z = pixel_wise_feature_vector_normalization(z) if hyper_sphere else z
        y = gen(z, test=True)
        imgs.append(y.d)
    imgs = np.concatenate(imgs, axis=0)
    return imgs
Ejemplo n.º 2
0
def generate_images(model_load_path,
                    batch_size=16, n_latent=512, use_bn=False,
                    hyper_sphere=True, last_act='tanh',
                    use_wscale=True, use_he_backward=False,
                    resolution_list=[4, 8, 16, 32, 64, 128],
                    channel_list=[512, 512, 256, 128, 64, 32]):
    # Generate
    gen = load_gen(model_load_path, use_bn=use_bn, last_act=last_act,
                   use_wscale=use_wscale, use_he_backward=use_he_backward)
    z_data = np.random.randn(batch_size, n_latent, 1, 1)
    z = nn.Variable.from_numpy_array(z_data)
    z = pixel_wise_feature_vector_normalization(z) if hyper_sphere else z
    y = gen(z, test=True)
    return y.d
Ejemplo n.º 3
0
    ctx = get_extension_context(opt['context'],
                                device_id=opt['device_id'],
                                type_config=opt['type_config'])
    nn.set_default_context(ctx)
    nn.set_auto_forward(True)

    # Generate config
    model_load_path = opt['generator_model']
    use_bn = False
    last_act = 'tanh'
    use_wscale = True
    use_he_backward = False
    # Load generator
    gen = load_gen(model_load_path,
                   use_bn=use_bn,
                   last_act=last_act,
                   use_wscale=use_wscale,
                   use_he_backward=use_he_backward)
    if experiment == 'orig':
        save_dir = "{}/AllGenImages".format(opt["fake_data_dir"])
        latent_vector = r"{}/latent_vectors.pkl".format(
            opt['record_latent_vector'])
        generate_images(gen,
                        num_images,
                        save_dir=save_dir,
                        latent_vector=latent_vector)

    if experiment == 'flip':
        save_dir = "{}/{}/".format(opt["fake_data_dir"],
                                   attr_list[opt['attribute']])
        latent = pickle.load(
Ejemplo n.º 4
0
def main():
    # Args
    args = get_args()

    # Context
    ctx = get_extension_context(args.context,
                                device_id=args.device_id,
                                type_config=args.type_config)
    logger.info(ctx)
    nn.set_default_context(ctx)
    nn.set_auto_forward(True)

    # Monitor
    monitor = Monitor(args.monitor_path)

    # Validation
    logger.info("Start validation")

    num_images = args.valid_samples
    num_batches = num_images // args.batch_size

    # DataIterator
    di = data_iterator(args.img_path,
                       args.batch_size,
                       imsize=(args.imsize, args.imsize),
                       num_samples=args.valid_samples,
                       dataset_name=args.dataset_name)
    # generator
    gen = load_gen(args.model_load_path,
                   use_bn=args.use_bn,
                   last_act=args.last_act,
                   use_wscale=args.not_use_wscale,
                   use_he_backward=args.use_he_backward)

    # compute metric
    if args.validation_metric == "ms-ssim":
        logger.info("Multi Scale SSIM")
        monitor_time = MonitorTimeElapsed("MS-SSIM-ValidationTime",
                                          monitor,
                                          interval=1)
        monitor_metric = MonitorSeries("MS-SSIM", monitor, interval=1)
        from ms_ssim import compute_metric
        score = compute_metric(gen, args.batch_size, num_images, args.latent,
                               args.hyper_sphere)
        monitor_time.add(0)
        monitor_metric.add(0, score)
    elif args.validation_metric == "swd":
        logger.info("Sliced Wasserstein Distance")
        monitor_time = MonitorTimeElapsed("SWD-ValidationTime",
                                          monitor,
                                          interval=1)
        monitor_metric = MonitorSeries("SWD", monitor, interval=1)
        nhoods_per_image = 128
        nhood_size = 7
        level_list = [128, 64, 32, 16]  # TODO: use argument
        dir_repeats = 4
        dirs_per_repeat = 128
        from sliced_wasserstein import compute_metric
        score = compute_metric(di, gen, args.latent, num_batches,
                               nhoods_per_image, nhood_size, level_list,
                               dir_repeats, dirs_per_repeat, args.hyper_sphere)
        monitor_time.add(0)
        monitor_metric.add(0, score)  # averaged in the log
    else:
        logger.info("Set `validation-metric` as either `ms-ssim` or `swd`.")
    logger.info(score)
    logger.info("End validation")