示例#1
0
def generate_image(
    frame, true_dist
):  # generates a batch of samples next to each other in one image!
    # test: generate some samples
    samples = session.run(fixed_noise_samples,
                          feed_dict={real_data: fixed_data
                                     })  # [-1,1] ### for MNIST # (50, 784)
    samples_255 = ((samples + 1.) * (255. / 2)).astype('int32')  # [0,255]
    samples_01 = ((samples + 1.) / 2.).astype('float32')  # [0,1]
    imsaver.save_images(samples_255.reshape((BATCH_SIZE, IM_DIM, IM_DIM)),
                        'samples_{}.png'.format(frame))  ### for MNIST
    print("Iteration %d : \n" % frame)
    # compare generated to real ones
    real = tf.reshape(fixed_data,
                      [BATCH_SIZE, IM_DIM, IM_DIM, 1])  ### for MNIST
    # real_gray = tf.image.rgb_to_grayscale(real) # tensor batch in&out returns original dtype = float [0,1] ### for MNIST
    pred = tf.reshape(samples_01,
                      [BATCH_SIZE, IM_DIM, IM_DIM, 1])  ### for MNIST
    # pred_gray = tf.image.rgb_to_grayscale(pred) ### for MNIST
    ssimval = tf.image.ssim(
        real, pred, max_val=1.0
    )  # in tensor batch, out tensor ssimvals (64,)  ### for MNIST
    mseval_per_entry = tf.keras.metrics.mse(
        real, pred)  # mse on grayscale, on [0,1] ### for MNIST
    mseval = tf.reduce_mean(mseval_per_entry, [1, 2])  ### for MNIST
    # ssimvals 0.2 to 0.75 :) # msevals 1-9 e -1 to -3
    ssimval_list = ssimval.eval()  # to numpy array # (64,)  # (50,)
    mseval_list = mseval.eval()  # (64,)   # (50,)
    # print(ssimval_list)
    # print(mseval_list)
    for i in range(0, 3):
        plotter.plot('SSIM for sample %d' % (i + 1), ssimval_list[i])
        plotter.plot('MSE for sample %d' % (i + 1), mseval_list[i])
        print("sample %d \t MSE: %.5f \t SSIM: %.5f \r\n" %
              (i, mseval_list[i], ssimval_list[i]))
示例#2
0
def generate_image(
    frame, true_dist
):  # test: generates a batch of samples next to each other in one image!
    samples = session.run(fixed_noise_samples)  # [-1,1]
    samples_255 = ((samples + 1.) * (255. / 2)).astype('int32')  # [0,255]
    imsaver.save_images(samples_255.reshape((BATCH_SIZE, IM_DIM, IM_DIM)),
                        'samples_{}.png'.format(frame))
示例#3
0
 def generate_image(iteration):
     samples = session.run(all_fixed_noise_samples)
     samples = binarize(samples)
     #samples = ((samples+1.)*(255.99/2)).astype('int32')
     save_images.save_images(
         samples.reshape((BATCH_SIZE, NUM_NEURONS, NUM_BINS)),
         FOLDER + 'samples_{}.png'.format(iteration))
示例#4
0
def generate_image(frame, true_dist):   # generates a batch of samples next to each other in one image!
    samples = session.run(fixed_noise_samples, feed_dict={real_data: fixed_real_data, condition_data: fixed_labels}) # [-1,1]
    #for im in samples: # add a image for the samples
    #    tf.summary.image("{}-image".format(grad.name.replace(":","_")), im)
    samples_255 = ((samples+1.)*(255./2)).astype('int32') # [0,255] 
    samples_01 = ((samples+1.)/2.).astype('float32') # [0,1]
    for i in range(0, BATCH_SIZE):
        samples_255 = np.insert(samples_255, i*2, fixed_real_data_255[i,:], axis=0) # show cond digit next to generated sample
    imsaver.save_images(samples_255.reshape((2*BATCH_SIZE, 1, IM_DIM, IM_DIM)), 'samples_{}.jpg'.format(frame))
    print("Iteration %d : \n" % frame)
    # compare generated to real one
    real = tf.reshape(fixed_real_data, [BATCH_SIZE,IM_DIM,IM_DIM,1])
    pred = tf.reshape(samples_01, [BATCH_SIZE,IM_DIM,IM_DIM,1])
    ssimval = tf.image.ssim(real, pred, max_val=1.0) # tensor batch in, out tensor of ssimvals (64,)
    mseval_per_entry = tf.keras.metrics.mse(real, pred)  # mse on grayscale, on [0,1]
    mseval = tf.reduce_mean(mseval_per_entry, [1,2])
    tf.summary.tensor_summary("SSIM values", ssimval)
    tf.summary.tensor_summary("MSE values", mseval)
    ssimval_list = ssimval.eval()  # to numpy array # (50,)
    mseval_list = mseval.eval() # (50,)
    # print(ssimval_list)
    # print(mseval_list)
    for i in range (0,3):
        plotter.plot('SSIM for sample %d' % (i+1), ssimval_list[i])
        plotter.plot('MSE for sample %d' % (i+1), mseval_list[i])
        print("sample %d \t MSE: %.5f \t SSIM: %.5f \r\n" % (i, mseval_list[i], ssimval_list[i]))
def generate_image(
        frame, final
):  # generates a batch of samples next to each other in one image!
    samples = session.run(fixed_noise_samples,
                          feed_dict={
                              condition_data: fixed_cond_data,
                              time_data: fixed_time_data
                          })  # [0,1]
    samples_255 = ((samples) * 255.99).astype('uint8')  # [0,1] -> [0,255]
    #print(samples.min()) #print(samples.max())

    # add ground truth
    for i in range(0, BATCH_SIZE):
        samples_255 = np.insert(samples_255,
                                i * 4,
                                fixed_cond_2_data_255[i, :],
                                axis=0)  # cond_time left of sample
        samples_255 = np.insert(samples_255,
                                i * 4 + 1,
                                fixed_cond_data_255[i, :],
                                axis=0)  # cond left of sample
        samples_255 = np.insert(samples_255,
                                i * 4 + 3,
                                fixed_real_data_255[i, :],
                                axis=0)  # real right of sample
    imsaver.save_images(samples_255.reshape((4 * BATCH_SIZE, IM_DIM, IM_DIM)),
                        'samples_{}.jpg'.format(frame),
                        alternate_viz=True,
                        conds=True,
                        gt=True,
                        time=True)

    print("Iteration %d :" % frame)
    # compare generated to real one
    real = tf.reshape(fixed_cond_data, [BATCH_SIZE, IM_DIM, IM_DIM, 1])
    pred = tf.reshape(samples, [BATCH_SIZE, IM_DIM, IM_DIM, 1])
    ssim_vals = tf.image.ssim(real, pred, max_val=1.0)  # on batch
    mse_vals = tf.reduce_mean(tf.keras.metrics.mse(real, pred),
                              [1, 2])  # mse on grayscale, on [0,1]
    psnr_vals = tf.image.psnr(real, pred,
                              max_val=1.0)  # on batch, out tf.float32
    ssim_avg = (tf.reduce_mean(ssim_vals)).eval()
    mse_avg = (tf.reduce_mean(mse_vals)).eval()
    psnr_avg = (tf.reduce_mean(psnr_vals)).eval()
    plotter.plot(
        'SSIM avg',
        ssim_avg)  # show average of ssim and mse vals over whole batch
    plotter.plot('MSE avg', mse_avg)
    plotter.plot('PSNR avg', psnr_avg)
    if (final):
        print('final iteration %d SSIM avg: %.2f MSE avg: %.2f' %
              (iteration, ssim_avg, mse_avg))
        ssim_vals_list = ssim_vals.eval()
        mse_vals_list = mse_vals.eval()
        psnr_vals_list = psnr_vals.eval()
        print(ssim_vals_list)  # save it in nohup.out
        print(mse_vals_list)
        print(psnr_vals_list)
def make_batch(imarr, save_fn, n=128):
    ims = imarr[:n]
    print(ims.shape)
    ims.reshape((n, 96, 96, -1))
    #ims = ims.transpose(0, 3, 1, 2)
    ims = np.array([normalize(im) for im in ims])
    print(ims.shape)
    #ims = np.flip(ims, 3)
    saver.save_images(ims, save_fn, luptonize=False, unnormalize=False)
示例#7
0
def generate_image(
    frame, true_dist
):  # generates a batch of samples next to each other in one image!
    # test: generate some samples
    samples = session.run(fixed_noise_samples,
                          feed_dict={real_data_int:
                                     fixed_real_data_int})  # [-1,1]
    samples_255 = ((samples + 1.) * (255. / 2)).astype('int32')  # [0,255]
    imsaver.save_images(samples_255.reshape((BATCH_SIZE, 3, IM_DIM, IM_DIM)),
                        'samples_{}.png'.format(frame))  ### for MNIST
示例#8
0
def generate_image(frame, netG):
    fixed_noise_128 = torch.randn(128, 128)
    if use_cuda:
        fixed_noise_128 = fixed_noise_128.cuda(gpu)
    noisev = autograd.Variable(fixed_noise_128, volatile=True)
    samples = netG(noisev)
    samples = samples.view(-1, 3, 32, 32)
    samples = samples.mul(0.5).add(0.5)
    samples = samples.cpu().data.numpy()

    save_images.save_images(samples,
                            './tmp/cifar10/samples_{}.jpg'.format(frame))
示例#9
0
def generate_image(i, save, save_dir, random=False):
    if random == True:
        random_noise_z = tf.constant(
            np.random.normal(size=(args.z, args.z)).astype('float32'))
        random_noise_samples_z = G(args.z,
                                   DIM,
                                   OUTPUT_DIM,
                                   args.z,
                                   noise=random_noise_z)
        samples = session.run(random_noise_samples_z)
    else:
        samples = session.run(fixed_noise_samples_z)
    samples = ((samples + 1.) * (255. / 2)).astype('int32')
    save_images(samples.reshape((args.z, 3, 32, 32)), 'samples_{}'.format(i),
                args.save_dir)
示例#10
0
def generate_image(frame, netG):
    noise = torch.randn(BATCH_SIZE, LATENT_DIM)
    if use_cuda:
        noise = noise.cuda(gpu)
    noisev = autograd.Variable(noise, volatile=True)

    ones = torch.ones(BATCH_SIZE, 28 * 28, 1)
    if use_cuda:
        ones = ones.cuda()

    seed = torch.bmm(ones, noisev.unsqueeze(1))

    samples = netG(x, y, r, seed)
    samples = samples.view(BATCH_SIZE, 28, 28)
    # print samples.size()

    samples = samples.cpu().data.numpy()

    save_images.save_images(samples, 'tmp/mnist/samples_{}.png'.format(frame))
def generate_image(frame, final): # generates a batch of samples next to each other in one image!
    inference_start_time = time.time()  # inference time analysis
    if(MODE == 'cond' or MODE == 'enc'):
        samples = session.run(fixed_noise_samples, feed_dict={condition_data: fixed_cond_data}) # [0,1]
    elif(MODE == 'plain'):
        samples = session.run(fixed_noise_samples) # [0,1]
    elif(MODE == 'vae'):
        samples = session.run(fixed_noise_samples, feed_dict={real_data: fixed_cond_data}) # [0,1]
        samples_noise = session.run(_noise_samples, feed_dict={real_data: fixed_cond_data}) # [0,1]
        noise_samples_255 = ((samples_noise)*255.).astype('uint8') # [0, 1] -> [0,255] 
    inference_end_time = time.time()  # inference time analysis
    inference_time = (inference_end_time - inference_start_time)
    print("The architecture took ", inference_time, "sec for the generation of ", BATCH_SIZE, "images")

    samples_255 = ((samples)*255.).astype('uint8') # [0, 1] -> [0,255] 
    # print('samples 255') # print(samples_255.min()) # print(samples_255.max())

    if(MODE == 'plain'):
        imsaver.save_images(samples_255.reshape((BATCH_SIZE, IM_DIM, IM_DIM)), 'samples_{}.jpg'.format(frame), alternate_viz=True)  
    elif(MODE == 'vae'):
        for i in range(0, BATCH_SIZE):
            samples_255 = np.insert(samples_255, i*2, fixed_cond_data_255[i,:], axis=0) # real (cond digit) next to sample
        imsaver.save_images(samples_255.reshape((2*BATCH_SIZE, IM_DIM, IM_DIM)), 'samples_{}.jpg'.format(frame), alternate_viz=True, conds=True)  
        imsaver.save_images(noise_samples_255.reshape((BATCH_SIZE, IM_DIM, IM_DIM)), 'noise_samples_{}.jpg'.format(frame), alternate_viz=True)  
    else: # if(MODE == 'enc' or MODE == 'cond') add ground truth
        for i in range(0, BATCH_SIZE):
            samples_255 = np.insert(samples_255, i*3, fixed_cond_data_255[i,:], axis=0) # cond left of sample
            samples_255 = np.insert(samples_255, i*3+2, fixed_real_data_255[i,:], axis=0) # real right of sample
        imsaver.save_images(samples_255.reshape((3*BATCH_SIZE, IM_DIM, IM_DIM)), 'samples_{}.jpg'.format(frame), alternate_viz=True, conds=True, gt=True)  

        print("Iteration %d :" % frame)
        # compare generated to real one
        real = tf.reshape(fixed_cond_data, [BATCH_SIZE,IM_DIM,IM_DIM,1])
        pred = tf.reshape(samples, [BATCH_SIZE,IM_DIM,IM_DIM,1])
        ssim_vals = tf.image.ssim(real, pred, max_val=1.0) # on batch
        mse_vals = tf.reduce_mean(tf.keras.metrics.mse(real, pred), [1,2]) # mse on grayscale, on [0,1]
        psnr_vals = tf.image.psnr(real, pred, max_val=1.0) # on batch, out tf.float32
        ssim_avg = (tf.reduce_mean(ssim_vals)).eval()
        mse_avg = (tf.reduce_mean(mse_vals)).eval()
        psnr_avg = (tf.reduce_mean(psnr_vals)).eval()
        plotter.plot('SSIM avg', ssim_avg) # show average of ssim and mse vals over whole batch
        plotter.plot('MSE avg', mse_avg)
        plotter.plot('PSNR avg', psnr_avg)
        if(final):
            print('final iteration %d SSIM avg: %.2f MSE avg: %.2f' % (iteration, ssim_avg, mse_avg)) 
            ssim_vals_list = ssim_vals.eval()  
            mse_vals_list = mse_vals.eval()  
            psnr_vals_list = psnr_vals.eval()   
            print(ssim_vals_list) # save it in nohup.out
            print(mse_vals_list)
            print(psnr_vals_list)         
示例#12
0
def generate_image(
    frame, true_dist
):  # generates a batch of samples next to each other in one image!
    samples = session.run(fixed_noise_samples,
                          feed_dict={
                              real_data_int: fixed_real_data_int,
                              cond_data_int: fixed_cond_data_int
                          })  # [-1,1]
    samples_255 = ((samples + 1.) * (255. / 2)).astype('int32')  # [0,255]
    samples_01 = ((samples + 1.) / 2.).astype('float32')  # [0,1]
    for i in range(0, BATCH_SIZE):
        samples_255 = np.insert(
            samples_255, i * 2, fixed_cond_data_int[i],
            axis=0)  # show last frame next to generated sample
    imsaver.save_images(
        samples_255.reshape((2 * BATCH_SIZE, 3, IM_DIM, IM_DIM)),
        'samples_{}.jpg'.format(frame))
    print("Iteration %d : \n" % frame)
    # compare generated to real one
    real = tf.reshape(fixed_real_data_norm01, [BATCH_SIZE, IM_DIM, IM_DIM, 3])
    real_gray = tf.image.rgb_to_grayscale(
        real)  # tensor batch in&out; returns original dtype = float [0,1]
    pred = tf.reshape(samples_01, [BATCH_SIZE, IM_DIM, IM_DIM, 3])
    pred_gray = tf.image.rgb_to_grayscale(pred)
    ssimval = tf.image.ssim(
        real_gray, pred_gray,
        max_val=1.0)  # tensor batch in, out tensor of ssimvals (64,)
    mseval_per_entry = tf.keras.metrics.mse(
        real_gray, pred_gray)  # mse on grayscale, on [0,1]
    mseval = tf.reduce_mean(mseval_per_entry, [1, 2])
    # ssimvals 0.2 to 0.75 :) # msevals 1-9 e -1 to -3
    ssimval_list = ssimval.eval()  # to numpy array # (64,)
    mseval_list = mseval.eval()  # (64,)
    #print(ssimval_list)
    # print(mseval_list)
    for i in range(0, 3):
        plotter.plot('SSIM for sample %d' % (i + 1), ssimval_list[i])
        plotter.plot('MSE for sample %d' % (i + 1), mseval_list[i])
        print("sample %d \t MSE: %.5f \t SSIM: %.5f \r\n" %
              (i, mseval_list[i], ssimval_list[i]))
示例#13
0
def evaluate_inference_via_optimization(n_samples):
    np.random.seed(1)
    tf.set_random_seed(1)
    samples = Generator(n_samples)
    session = tf.Session()
    session.run(tf.global_variables_initializer())
    saver = tf.train.Saver()
    saver.restore(session, MAIN_DIR + 'models/' + MODE + '_final.ckpt')

    gen = inf_train_gen()
    data = gen.next()
    data = 2 * ((data / 255.) - .5)

    sample_z_dim = 128
    z_var = tf.get_variable("z_var",
                            shape=[n_samples, sample_z_dim],
                            dtype=tf.float32,
                            trainable=False)
    z_var_pl = tf.placeholder(dtype=tf.float32,
                              shape=[n_samples, sample_z_dim],
                              name="z_var_placeholder")
    z_var_assign = tf.assign(z_var, z_var_pl, name="z_var_assign")

    targets = data[np.random.randint(data.shape[0], size=n_samples)]

    g_loss = tf.nn.l2_loss(samples - targets)
    a = (samples + 1.) / 2.
    b = (targets + 1.) / 2.
    mse = ((a - b)**2)
    mse_2d = tf.reshape(mse, [n_samples, 32 * 32 * 3])
    mse = tf.reduce_mean(mse_2d, axis=1, keep_dims=True)

    optimizer = tf.contrib.opt.ScipyOptimizerInterface(loss=g_loss,
                                                       var_list=[z_var],
                                                       method='L-BFGS-B',
                                                       options={
                                                           'maxiter': 5,
                                                           'disp': False
                                                       })

    # run the optimization from 3 different initializations
    results_images = []
    results_errors = []
    num_of_random_restarts = 3

    for i in xrange(num_of_random_restarts):
        z_sample = np.random.normal(0, 1, size=(n_samples, sample_z_dim))
        session.run(z_var_assign, {z_var_pl: z_sample})
        optimizer.minimize(session)

        generated_samples = session.run(samples)
        generated_samples_mse = session.run(mse)
        results_images.append(generated_samples)
        results_errors.append(generated_samples_mse)

    # select the best out of all random restarts
    best_images = np.zeros_like(results_images[0])
    best_images_errors = np.zeros_like(results_errors[0])
    for image_index in xrange(n_samples):
        best_img = results_images[0][image_index]
        best_img_error = results_errors[0][image_index][0]
        for indep_run_index in xrange(1, num_of_random_restarts):
            if best_img_error > results_errors[indep_run_index][image_index][0]:
                best_img_error = results_errors[indep_run_index][image_index][
                    0]
                best_img = results_images[indep_run_index][image_index]
        best_images[image_index] = best_img
        best_images_errors[image_index][0] = best_img_error

    best_images_errors_i = np.array(best_images_errors)
    best_images_errors = np.zeros((0, 1), dtype=np.float32)
    for i in xrange(1):
        best_images_errors = np.vstack(
            (best_images_errors, best_images_errors_i))

    if not os.path.exists('/home/elfeki/Desktop/samples'):
        os.makedirs('/home/elfeki/Desktop/samples')

    sampled = (np.array(best_images[:BATCH_SIZE]).reshape(-1, 3, 32, 32) +
               1) / 2.
    targeted = (np.array(targets[:BATCH_SIZE]).reshape(-1, 3, 32, 32) + 1) / 2.
    print sampled.shape, targeted.shape
    save_images(sampled, '/home/elfeki/Desktop/samples/sampled.jpg')
    save_images(targeted, '/home/elfeki/Desktop/samples/targeted.jpg')
    print('Evaluation Inference Via Optimization: {:.5f} +/- {:.5f}'.format(
        np.mean(best_images_errors), np.std(best_images_errors)))
示例#14
0
def generate_image(session, frame, fixed_noise_samples_128):
    samples = session.run(fixed_noise_samples_128)
    samples = ((samples + 1.) * (255. / 2)).astype('int32')
    save_images(
        samples.reshape((128, 3, 32, 32))[:], MAIN_DIR + '/generated_images/' +
        MODE + '/samples_{}.jpg'.format(frame))
    (BATCH_SIZE, output_dim))  # current frame # (50, 1024)
fixed_cond_data = (fixed_data[:, 4, :]).reshape(
    (BATCH_SIZE, output_dim))  # only 1 last frame for now # (50, 1024)
fixed_cond_2_data = (fixed_data[:, 3, :]).reshape(
    (BATCH_SIZE, output_dim))  # only 1 last frame for now # (50, 1024)
fixed_time_data = (
    fixed_data[:, (5 - TIMESTEPS):5, :]
)  #.reshape((BATCH_SIZE, TIMESTEPS, output_dim)) # current frame # (50, 5, 1024)
fixed_real_data_255 = ((fixed_real_data) * 255.).astype('uint8')  # [0,255]
fixed_cond_data_255 = ((fixed_cond_data) * 255.).astype('uint8')  # [0,255]
fixed_cond_2_data_255 = ((fixed_cond_2_data) * 255.).astype('uint8')  # [0,255]
fixed_real_data_gt = np.copy(fixed_real_data_255)
fixed_cond_data_gt = np.copy(fixed_cond_data_255)
fixed_cond_2_data_gt = np.copy(fixed_cond_2_data_255)
imsaver.save_images(fixed_real_data_gt.reshape((BATCH_SIZE, IM_DIM, IM_DIM)),
                    'groundtruth_reals_grey.jpg',
                    alternate_viz=True)
imsaver.save_images(fixed_cond_data_gt.reshape((BATCH_SIZE, IM_DIM, IM_DIM)),
                    'groundtruth_conds_grey.jpg',
                    alternate_viz=True)
imsaver.save_images(fixed_cond_2_data_gt.reshape((BATCH_SIZE, IM_DIM, IM_DIM)),
                    'groundtruth_conds_2_grey.jpg',
                    alternate_viz=True)

if (START_ITER > 0):  # get noise from saved model: variable, implicit float
    fixed_noise = tf.get_variable("noise", shape=[BATCH_SIZE, NOISE_DIM])
else:
    fixed_noise = tf.Variable(tf.random_normal(shape=[BATCH_SIZE, NOISE_DIM]),
                              name='noise')

if (TIMESTEPS == 2):
示例#16
0
def generate_image(
        frame, final
):  # generates a batch of samples next to each other in one image!
    inference_start_time = time.time()
    if (MODE == 'cond_ordered'):
        samples = session.run(fixed_noise_samples,
                              feed_dict={condition_data:
                                         fixed_labels_array})  # [0,1]
    elif (MODE == 'cond'):
        samples = session.run(fixed_noise_samples,
                              feed_dict={condition_data:
                                         sorted_labels})  # [0,1]
    elif (MODE == 'plain'):
        samples = session.run(fixed_noise_samples)  # [0,1]
    else:
        samples = session.run(fixed_noise_samples,
                              feed_dict={real_data: sorted_data})  # [0,1]
    inference_end_time = time.time()  # inference time analysis
    inference_time = (inference_end_time - inference_start_time)
    print("The architecture took ", inference_time,
          "sec for the generation of ", BATCH_SIZE, "images")

    samples_255 = ((samples) * (255.)).astype('uint8')  # [0,255]

    if (MODE == 'enc' or MODE == 'vae' or MODE == 'cond'):
        for i in range(0, BATCH_SIZE):
            samples_255 = np.insert(samples_255,
                                    i * 2,
                                    fixed_data_255[i, :],
                                    axis=0)  # real (cond digit) next to sample
        imsaver.save_images(samples_255.reshape(
            (2 * BATCH_SIZE, IM_DIM, IM_DIM)),
                            'samples_{}.jpg'.format(frame),
                            alternate_viz=True,
                            conds=True)
        print("Iteration %d :" % frame)
        # compare generated to real one
        real = tf.reshape(sorted_data, [BATCH_SIZE, IM_DIM, IM_DIM, 1])
        pred = tf.reshape(samples, [BATCH_SIZE, IM_DIM, IM_DIM, 1])
        ssim_vals = tf.image.ssim(real, pred, max_val=1.0)  # on batch
        mse_vals = tf.reduce_mean(tf.keras.metrics.mse(real, pred),
                                  [1, 2])  # mse on grayscale, on [0,1]
        psnr_vals = tf.image.psnr(real, pred,
                                  max_val=1.0)  # on batch, out tf.float32
        ssim_avg = (tf.reduce_mean(ssim_vals)).eval()
        mse_avg = (tf.reduce_mean(mse_vals)).eval()
        psnr_avg = (tf.reduce_mean(psnr_vals)).eval()
        plotter.plot(
            'SSIM avg',
            ssim_avg)  # show average of ssim and mse vals over whole batch
        plotter.plot('MSE avg', mse_avg)
        plotter.plot('PSNR avg', psnr_avg)
        if (final):
            print('final iteration %d SSIM avg: %.2f MSE avg: %.2f' %
                  (iteration, ssim_avg, mse_avg))
            ssim_vals_list = ssim_vals.eval()
            mse_vals_list = mse_vals.eval()
            psnr_vals_list = psnr_vals.eval()
            print(ssim_vals_list)  # save it in nohup.out
            print(mse_vals_list)
            print(psnr_vals_list)
    else:
        imsaver.save_images(samples_255.reshape(
            (BATCH_SIZE, IM_DIM,
             IM_DIM)), 'samples_{}.jpg'.format(frame))  # , alternate_viz=True)
    if (MODE == 'vae'):
        noise_samples = session.run(more_noise_samples)  # [0,1]
        noise_samples_255 = ((noise_samples) * (255.)).astype(
            'uint8')  # [0,255]
        imsaver.save_images(
            noise_samples_255.reshape((BATCH_SIZE, IM_DIM, IM_DIM)),
            'noise_samples_{}.jpg'.format(frame))  # , alternate_viz=True)
    if (final):  # calculate accuracy of samples (mnist classificator)
        if (MODE == 'cond_ordered'):
            _fixed_labels = np.zeros((fixed_labels_array.size, N_LABELS))
            _fixed_labels[np.arange(fixed_labels_array.size),
                          fixed_labels_array] = 1  # np to one-hot
        else:
            _fixed_labels = np.zeros((sorted_labels.size, N_LABELS))
            _fixed_labels[np.arange(sorted_labels.size),
                          sorted_labels] = 1  # np to one-hot
        accu = session.run(accuracy, feed_dict={x: samples, y: _fixed_labels})
        print('Accuracy at step %d: %s' % (iteration, accu))
示例#17
0
BATCH_SIZE = 10  # Batch size
TARGET_SIZE = 300
OUT_DIM_FLOW = TARGET_SIZE * TARGET_SIZE * 2

gen = sintel.load_train_gen(BATCH_SIZE, (TARGET_SIZE, TARGET_SIZE, 3),
                            (TARGET_SIZE, TARGET_SIZE, 2))
_data, _flow = next(gen)
# images: (n, 6144) -- 3072 + 3072 + 3072 = three images for 32
# flows: (n, 4096) -- 2048 + 2048 = two flows for 32

outpath = "/home/linkermann/Desktop/MA/opticalFlow/opticalFlowGAN/data/gentest/"

flows = _flow[0]  # first from batch
flow1 = flows[0:OUT_DIM_FLOW]  # (2048,) for 32
flow2 = flows[OUT_DIM_FLOW:]  # (2048,) for 32
flow1 = flow1.reshape(TARGET_SIZE, TARGET_SIZE, 2)
flow2 = flow2.reshape(TARGET_SIZE, TARGET_SIZE, 2)

# save flow               # write_flo_file(flow, filename)
# filename must be string and end in .flo. Flow must be in (w,h,2) format
# fh.write_flo_file(flow1, outpath+'sample_sintel_flow.flo')
# load flow from file     # read_flo_file(filename)
# flowfile = fh.read_flo_file(outpath+"sample_sintel_flow.flo")

# show flow               # computeImg(flow)
flowimg = fh.computeFlowImg(flow1)  # (200, 200, 3) # now color img!! :)
flowimage_T = np.transpose(flowimg, [2, 0, 1])  #  (3, 200, 200)
save_images(flowimage_T.reshape((1, 3, TARGET_SIZE, TARGET_SIZE)),
            outpath + "flowsintel10.jpg")
示例#18
0
def interpolate(state_dict,
                generator,
                preview=True,
                interpolate=False,
                large_sample=False,
                img_size=28,
                img_channel=1,
                large_dim=1024,
                samples=[
                    random.randint(0, args.bsize - 1),
                    random.randint(0, args.bsize - 1)
                ]):
    """
    Args:
        
    state_dict: saved copy of trained params
    generator: generator model
    preview: show preview of images in grid form in original size (to pick which to blow up)
    interpolate: create interpolation gif
    large_sample: create a large sample of an individual picture
    img_size: size of your input samples, e.g. 28 for MNIST
    img_channel: number of color channels, 3 for cifar
    large_dim: dimension to blow up samples to for interpolation
    samples: indices of the samples you want to interpolate
    """

    x_d = img_size
    y_d = img_size
    c_d = img_channel
    #position = 2
    x, y, r = mnist.get_coordinates(x_d, y_d, batch_size=args.bsize)
    x_large = large_dim
    y_large = large_dim

    generator_int = generator
    generator_int.load_state_dict(
        torch.load(state_dict, map_location=lambda storage, loc: storage))

    noise = torch.randn(args.bsize, args.latdim)

    if preview:

        noise = noise.to(device)
        noisev = autograd.Variable(noise, volatile=True)

        ones = torch.ones(args.bsize, x_d * y_d, c_d)
        ones = ones.to(device)

        seed = torch.bmm(ones, noisev.unsqueeze(1))

        gen_imgs = generator_int(x, y, r, seed)

        gen_imgs = gen_imgs.cpu().data.numpy()

        save_images.save_images(gen_imgs, 'generated_img/samples.png')

    if large_sample >= 0:

        assert args.sample < args.bsize, "Sample position is out of bounds"

        noise = noise.to(device)
        noisev = autograd.Variable(noise[large_sample], volatile=True)
        ones = torch.ones(1, x_large * y_large, 1).to(device)
        seed = torch.bmm(ones, noisev.unsqueeze(0).unsqueeze(0))
        x, y, r = mnist.get_coordinates(x_large, y_large, batch_size=1)

        gen_imgs = generator_int(x, y, r, seed)
        gen_imgs = gen_imgs.cpu().data.numpy()

        save_images.save_images(gen_imgs, 'generated_img/large_sample.png')
    if interpolate:

        nbSteps = args.frames
        alphaValues = np.linspace(0, 1, nbSteps)
        images = []

        noise = noise.to(device)
        noisev = autograd.Variable(noise[samples[0]], volatile=True)
        ones = torch.ones(1, x_large * y_large, 1).to(device)
        seed = torch.bmm(ones, noisev.unsqueeze(0).unsqueeze(0))
        x, y, r = mnist.get_coordinates(x_large, y_large, batch_size=1)

        samples.append(samples[0])

        for i in range(len(samples) - 1):
            for alpha in alphaValues:
                vector = noise[samples[i]].unsqueeze(0) * (
                    1 - alpha) + noise[samples[i + 1]].unsqueeze(0) * alpha
                gen_imgs = generator_int(x, y, r, vector)

                if c_d == 3:
                    gen_img_np = np.transpose(gen_imgs.data[0].numpy())
                elif c_d == 1:
                    gen_img_np = np.transpose(gen_imgs.data.numpy()).reshape(
                        x_large, y_large, -1)

                images.append(gen_img_np)

        imageio.mimsave('generated_img/movie.gif', images)
示例#19
0
import numpy as np
import sys
sys.path.append('/home/linkermann/Desktop/MA/opticalFlow/opticalFlowGAN')
import tflib as lib
from tflib.save_images import save_images
#import tflib.SINTELdata as sintel
import tflib.flow_handler as fh

BATCH_SIZE = 10 # Batch size
TARGET_SIZE = 300
OUT_DIM_FLOW = TARGET_SIZE*TARGET_SIZE*2
            
outpath = "/home/linkermann/Desktop/MA/opticalFlow/opticalFlowGAN/data/gentest/"

flow = np.zeros((TARGET_SIZE,TARGET_SIZE,2))
flow[:,:,0] = ...
flow[:,:,1] = ...

# show flow               # computeImg(flow)
flowimg = fh.computeFlowImg(flow)    # (TARGET_SIZE, TARGET_SIZE, 3) # now color img!! :)
flowimage_T = np.transpose(flowimg, [2,0,1])  #  (3, TARGET_SIZE, TARGET_SIZE)
save_images(flowimage_T.reshape((1,3,TARGET_SIZE,TARGET_SIZE)), outpath+"flowfieldviz.jpg")
示例#20
0
    # Dataset iterator
    train_gen, dev_gen = sim_pop_activity.load(num_samples=NUM_SAMPLES, batch_size=BATCH_SIZE, dim=NUM_BINS,\
    num_neurons=NUM_NEURONS, corr=CORR, group_size=GROUP_SIZE, refr_per=REFR_PER)

    def inf_train_gen():
        while True:
            for (images, ) in train_gen():
                yield images

    # Save a batch of ground-truth samples
    _x = next(inf_train_gen())
    _x_r = session.run(real_data, feed_dict={real_data: _x})
    _x_r = binarize(_x_r)
    #_x_r = ((_x_r+1.)*(255.99/2)).astype('int32')
    save_images.save_images(_x_r.reshape((BATCH_SIZE, NUM_NEURONS, NUM_BINS)),
                            FOLDER + 'samples_groundtruth.png')

    #try to load trained parameters
    load()

    # Train loop
    session.run(tf.global_variables_initializer())
    gen = inf_train_gen()
    start_time = time.time()
    for iteration in range(ITERS):
        # Train generator
        if iteration > 0:
            _ = session.run(gen_train_op)

        # Train critic
        disc_iters = CRITIC_ITERS