Exemple #1
0
def sample_model(sess, idx, test_X, test_Y, testG, testF, testG_back,
                 testF_back):
    # RUN THEM THROUGH THE MODEL
    x_val, y_val, y_samp, x_samp, y_cycle_samp, x_cycle_samp = sess.run(
        [test_X, test_Y, testG, testF, testG_back, testF_back])

    # GRAB THE RETURNED RESULTS AND COLOR CORRECT / MERGE DOWN TO SINGLE IMAGE FILE EACH
    pretty_x = merge(utils.inverse_transform(x_samp), [1, 1])
    pretty_y = merge(utils.inverse_transform(y_samp), [1, 1])
    pretty_x_cycle = merge(utils.inverse_transform(x_cycle_samp), [1, 1])
    pretty_y_cycle = merge(utils.inverse_transform(y_cycle_samp), [1, 1])

    if not os.path.isdir(SAMPLE_DIR):
        os.makedirs(SAMPLE_DIR)

    scipy.misc.imsave(os.path.join(SAMPLE_DIR, '{}_X.jpg'.format(idx)),
                      x_val[0])
    scipy.misc.imsave(os.path.join(SAMPLE_DIR, '{}_Y.jpg'.format(idx)),
                      y_val[0])

    scipy.misc.imsave(os.path.join(SAMPLE_DIR, '{}_X_2Y.jpg'.format(idx)),
                      y_samp[0])
    scipy.misc.imsave(os.path.join(SAMPLE_DIR, '{}_Y_2X.jpg'.format(idx)),
                      x_samp[0])

    scipy.misc.imsave(os.path.join(SAMPLE_DIR, '{}_X_2Y_2X.jpg'.format(idx)),
                      x_cycle_samp[0])
    scipy.misc.imsave(os.path.join(SAMPLE_DIR, '{}_Y_2X_2Y.jpg'.format(idx)),
                      y_cycle_samp[0])
Exemple #2
0
def calculate_rmse(n_series, n_features, n_lags, X, y, scaler, model):
	import utils
	"""
		
		Función para calcular la raíz del error cuadrático medio, solo es utilizado por los algoritmos:
		*Random forest
		*Ada boost
		*SVM (máquinas de soporte vectorial o máquinas de soporte de vectores)
		 Los otros algoritmos tienen su propia forma de calcular este error.

		Parámetros:
		- n_series -- Entero, el número de time steps a predecir en el futuro, en estos metodos es siempre es 1 por ahora, hasta que se implementen para varios time steps
		- n_features -- Entero, el número de *features* con los que se entrena el modelo, también se conocen como variables exogenas
		- n_lags -- Entero, el número de *lags* que se usaron para entrenar, estos *lags* también son conocidos como resagos. Intuitivamente se pueden enterder como una ventana deslizante o como cuantos tiempos atrás en el tiempo tomo en cuenta para hacer mi predicción
		- X -- Arreglo de numpy, los datos con los que se quier hacer la rpedicción para calcular el error
		- y -- Arreglo de numpy, las observaciones contra las cuales se van a comparar las predicciones para luego calcular el error
		- scaler -- Instancia de la clase MinMaxScaler de la libreria sklearn, sirve para escalar los datos y devolverlos a la escala original posteriormente. En esta función se utiliza para revertir el escalamiento y obtener los datos reales
		- model -- Modelo de sklearn, el modelo entrenado con el cual se van a realizar las predicciones

		

		Retorna:
		- inv_y -- Arreglo de numpy, observaciones en la escala real
		- inv_yhat -- Arreglo de numpy, predicciones en la escala real
		- rmse -- Flotante, raíz del error curadrático medio entre las observaciones y las predicciones

	"""
	yhat = model.predict(X)
	inv_yhat = utils.inverse_transform(yhat, scaler, n_features)
	inv_y = utils.inverse_transform(y, scaler, n_features)

	# calculate RMSE	
	rmse = math.sqrt(mean_squared_error(inv_y, inv_yhat))
	return inv_y, inv_yhat, rmse
Exemple #3
0
def main(_):
    # Using the Winograd non-fused algorithms provides a small performance boost.
    os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
    os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu_index

    gpu_options = tf.GPUOptions(
        per_process_gpu_memory_fraction=FLAGS.gpu_memory_fraction)
    run_config = tf.ConfigProto(allow_soft_placement=True,
                                gpu_options=gpu_options)
    run_config.gpu_options.allow_growth = True
    sess = tf.Session(config=run_config)

    data_path = '../../Data/BMP_320x280/tfrecord/train.tfrecords'
    data_reader = reader.Reader(data_path,
                                name='data',
                                image_size=(320, 280, 3),
                                batch_size=1)
    x_imgs_, y_imgs_, x_imgs_ori_, y_imgs_ori_, img_name = data_reader.feed()
    sess.run(tf.global_variables_initializer())

    # threads for tfrecord
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    iter_time = 0
    try:
        while iter_time < FLAGS.iters:
            x_imgs, y_imgs, x_imgs_ori, y_imgs_ori = sess.run(
                [x_imgs_, y_imgs_, x_imgs_ori_, y_imgs_ori_])
            _, h, w, c = x_imgs.shape
            canvas = np.zeros((h, 4 * w, c), dtype=np.uint8)

            x_imgs = utils.inverse_transform(x_imgs)
            y_imgs = utils.inverse_transform(y_imgs)
            x_imgs_ori = utils.inverse_transform(x_imgs_ori)
            y_imgs_ori = utils.inverse_transform(y_imgs_ori)

            y_imgs = utils.draw_minutiae(x_imgs, y_imgs)

            canvas[:, 0:w, :] = x_imgs[0]
            canvas[:, w:2 * w, :] = y_imgs
            canvas[:, 2 * w:3 * w, :] = x_imgs_ori[0]
            canvas[:, 3 * w:, :] = y_imgs_ori[0]

            iter_time += 1

            cv2.imshow('show', canvas[:, :, ::-1])
            if cv2.waitKey(0) == 27:
                sys.exit('Esc clicked!')

    except KeyboardInterrupt:
        coord.request_stop()
    except Exception as e:
        coord.request_stop(e)
    finally:
        # when done, ask the threads to stop
        coord.request_stop()
        coord.join(threads)
Exemple #4
0
    def plots(self, imgs_, iter_time, save_file):
        # reshape image from vector to (N, H, W, C)
        imgs_fake = np.reshape(imgs_[0], (self.flags.sample_batch, 64, 64, 3))

        imgs = []
        for img in imgs_fake:
            imgs.append(img)

        # parameters for plot size
        scale, margin = 0.04, 0.01
        n_cols, n_rows = int(np.sqrt(len(imgs))), int(np.sqrt(len(imgs)))

        imgs = [utils.inverse_transform(imgs[idx]) for idx in range(len(imgs))]

        output = (imgs[0]).reshape(self.image_size[0], self.image_size[1], self.image_size[2])
        for row_index in range(n_rows - 1):
            output = cv.vconcat([output, (imgs[(row_index + 1) * n_cols]).reshape(self.image_size[0],
                                                                                  self.image_size[1],
                                                                                  self.image_size[2])])

        for col_index in range(n_cols - 1):
            out = (imgs[col_index + 1]).reshape(self.image_size[0], self.image_size[1], self.image_size[2])
            for row_index in range(n_rows - 1):
                out = cv.vconcat([out, (imgs[(row_index + 1) * n_cols + col_index + 1]).reshape(self.image_size[0],
                                                                                                self.image_size[1],
                                                                                                self.image_size[2])])
            output = cv.hconcat([output, out])

        output = np.uint8(output*255.0)
        print(np.min(output), np.max(output))

        cv.imwrite(save_file + '/sample_{}.png'.format(str(iter_time)), output)
    def plots(imgs, iter_time, image_size, save_file):

        scale, margin = 0.02, 0.02
        n_cols, n_rows = len(imgs), imgs[0].shape[0]
        cell_size_h, cell_size_w = imgs[0].shape[1] * scale, imgs[0].shape[
            2] * scale

        fig = plt.figure(figsize=(cell_size_w * n_cols,
                                  cell_size_h * n_rows))  # (column, row)
        gs = gridspec.GridSpec(n_rows, n_cols)  # (row, column)
        gs.update(wspace=margin, hspace=margin)

        imgs = [inverse_transform(imgs[idx]) for idx in range(len(imgs))]

        # save more bigger image
        for col_index in range(n_cols):
            for row_index in range(n_rows):
                ax = plt.subplot(gs[row_index * n_cols + col_index])
                plt.axis('off')
                ax.set_xticklabels([])
                ax.set_yticklabels([])
                ax.set_aspect('equal')
                plt.imshow((imgs[col_index][row_index]).reshape(
                    image_size[0], image_size[1]),
                           cmap='Greys_r')

        plt.savefig(save_file + f'/sample_{str(iter_time).zfill(5)}.png',
                    bbox_inches='tight')

        plt.close(fig)
 def slerp_interpolation(self,batch_image,batch_label,epoch,interval):
     # 球面差值法测试
     # for _ in xrange(3):#测试三次
     # print 'slerp test!!'
     index_select = np.random.randint(0, batch_image.shape[0], self.test_slerp_count*2*2)
     encode_512 = self.sess.run(self.encode_slerp,
                          feed_dict={self.batch_data: batch_image[index_select],
                                     self.input_label: batch_label[0][index_select]})
     encode_50=np.random.uniform(high=1, low=-1, size=[self.test_slerp_count*2,self.noise_z])
     encode_562=np.concatenate([encode_512,encode_50],axis=1)
     encode_sub = np.split(encode_562,2,axis=0)
     decodes = []
     for idx, ratio in enumerate(np.linspace(0, 1, 10)):
         z = np.stack([utils.slerp(ratio, r1, r2) for r1, r2 in
                       zip(encode_sub[0], encode_sub[1])])
         z = np.reshape(z, [-1,562])
         z_decode = self.sess.run(self.image_syn_slerp,
                             feed_dict={self.encode_slerp_z: z})
         decodes.append(z_decode)
     decodes = np.stack(decodes).transpose([1, 0, 2, 3, 4])
     index_sub=np.split(index_select,2,axis=0)
     index_sub=np.split(index_sub[0],2,axis=0)
     for idx, img in enumerate(decodes):
         img = np.concatenate([[batch_image[index_sub[0][idx]]], img,
                               [batch_image[index_sub[1][idx]]]], 0)
         img = utils.inverse_transform(img)[:, :, :, ::-1]
         utils.save_image(img, os.path.join('./{}'.format(self.result_path),
                                            'test{:08}_interp_G_{:01}.png'.format(epoch,interval )), nrow=10 + 2)
Exemple #7
0
def save_image_to_memory(image):
    image = inverse_transform(image)
    image = merge(image, (1, 1))
    image = cv2.cvtColor(image.astype('uint8'), cv2.COLOR_RGB2BGR)

    is_success, buffer = cv2.imencode(".jpg", image)
    io_buf = io.BytesIO(buffer)
    return io_buf
Exemple #8
0
    def slerp_interpolation(self, batch_image, batch_label, epoch, interval):
        # 球面差值法测试
        # for _ in xrange(3):#测试三次
        # print 'slerp test!!'
        index_select = np.random.randint(0, batch_image.shape[0],
                                         self.test_slerp_count * 2)
        encode_512 = self.sess.run(self.encode_slerp,
                                   feed_dict={
                                       self.batch_data:
                                       batch_image[index_select],
                                       self.input_label:
                                       batch_label[0][index_select],
                                       self.input_pose:
                                       batch_label[1][index_select],
                                       self.input_light:
                                       batch_label[2][index_select]
                                   })
        pose = np.asarray(batch_label[1][index_select], np.int32)
        light = np.asarray(batch_label[2][index_select], np.int32)

        b_1 = np.zeros((pose.size, self.pose_c), dtype=np.float32)
        b_1[np.arange(pose.size), pose] = 1.

        noise = np.zeros((light.size, self.light_c), dtype=np.float32)
        noise[np.arange(light.size), light] = 1.

        encode_519 = np.concatenate([np.reshape(encode_512, [-1, 512]), b_1],
                                    axis=1)
        encode_539 = np.concatenate([encode_519, noise], 1)
        encode_sub = np.split(encode_539, 2, axis=0)
        decodes = []
        for idx, ratio in enumerate(np.linspace(0, 1, 10)):
            z = np.stack([
                utils.slerp(ratio, r1, r2)
                for r1, r2 in zip(encode_sub[0], encode_sub[1])
            ])
            z = np.reshape(z, [-1, 1, 1, 512 + self.light_c + self.pose_c])
            z_decode = self.sess.run(self.image_syn_slerp,
                                     feed_dict={
                                         self.slerp_code: z,
                                         self.batch_data: batch_image,
                                         self.input_label: batch_label[0],
                                         self.input_pose: batch_label[1],
                                         self.input_light: batch_label[2]
                                     })
            decodes.append(z_decode)
        decodes = np.stack(decodes).transpose([1, 0, 2, 3, 4])
        index_sub = np.split(index_select, 2, axis=0)
        for idx, img in enumerate(decodes):
            img = np.concatenate([[batch_image[index_sub[0][idx]]], img,
                                  [batch_image[index_sub[1][idx]]]], 0)
            img = utils.inverse_transform(img)[:, :, :, ::-1]
            utils.save_image(img,
                             os.path.join(
                                 './{}'.format(self.result_path),
                                 'test{:08}_interp_G_{:01}.png'.format(
                                     epoch, interval)),
                             nrow=10 + 2)
Exemple #9
0
 def sample_and_save_imgs(self, iter_time = 0, istrain = True):
     if istrain == True:
         if np.mod(iter_time, self.flags.sample_freq) == 0:
             imgs = self.model.sample_imgs(sample_size=self.flags.sample_batch)
             imgs = [utils.inverse_transform(imgs[idx]) for idx in range(len(imgs))] # [(batch_size, height, width, 3)]
             for i in range(imgs[0].shape[0]):
                 # print('img shape is: ', imgs[0][i].shape)
                 imgs[0][i] = imgs[0][i][:, :, [2, 1, 0]]  # rgb to bgr
                 file_path = self.sample_out_dir + '/sample_{}_{}.jpg'.format(str(iter_time), str(i+1))
                 cv2.imwrite(file_path, imgs[0][i])
     else:
             imgs = self.model.sample_imgs(sample_size=self.flags.sample_batch)
             imgs = [utils.inverse_transform(imgs[idx]) for idx in range(len(imgs))] # [(batch_size, height, width, 3)]
             for i in range(imgs[0].shape[0]):
                 # print('img shape is: ', imgs[0][i].shape)
                 imgs[0][i] = imgs[0][i][:, :, [2, 1, 0]]  # rgb to bgr
                 file_path = self.test_out_dir + '/sample_{}_{}.jpg'.format(str(iter_time), str(i+1))
                 cv2.imwrite(file_path, imgs[0][i])
Exemple #10
0
    def predict_img(self, img_path):
        img_np = self.gan.read_img(img_path)
        print('[Info] img_np shape: {}'.format(img_np.shape))

        img_fake = self.gan.predict_img(img_np, self.sess)
        img_fake = np.squeeze(img_fake, axis=0)
        print('[Info] img_fake shape: {}'.format(img_fake.shape))

        img_fake = inverse_transform(img_fake)
        img_fake = img_fake.astype(np.uint8)
        # show_img_rgb(img_fake)
        return img_fake
    def plots_test(self, imgs, img_name, save_file):
        num_imgs = len(imgs)

        canvas = np.zeros((self.img_size[0], num_imgs * self.img_size[1]),
                          np.uint8)
        for idx in range(num_imgs):
            canvas[:, idx * self.img_size[1]: (idx+1) * self.img_size[1], :] = \
                np.squeeze(255. * utils.inverse_transform(imgs[idx]))

        img_name_ = img_name.astype('U26')[0]
        # save imgs on test folder
        cv2.imwrite(os.path.join(save_file, img_name_), canvas)
Exemple #12
0
    def calculate(self, preds, gts):
        # from list to array
        arr_preds = np.asarray(preds).astype(np.float32)
        arr_gts = np.asarray(gts).astype(np.float32)

        # reshape to (N, image_size, image_size)
        arr_preds = np.reshape(
            arr_preds,
            (arr_preds.shape[0], self.image_size[0], self.image_size[1]))
        arr_gts = np.reshape(
            arr_gts,
            (arr_gts.shape[0], self.image_size[0], self.image_size[1]))

        # conver from [-1. to 1.]  to [0. to 255.]
        arr_preds_ = utils.inverse_transform(arr_preds) * 255.
        arr_gts_ = utils.inverse_transform(arr_gts) * 255.

        mae = self.mean_absoulute_error(arr_preds_, arr_gts_)
        psnr = self.peak_signal_to_noise_ratio(arr_preds_, arr_gts_)

        return mae, psnr
Exemple #13
0
    def plots(self, imgs, iter_time, save_file, names=None):
        canvas = len(imgs)

        # transform [-1., 1.] to [0., 1.]
        imgs = [utils.inverse_transform(imgs[idx]) for idx in range(len(imgs))]

        # save more bigger image
        for canvas_idx in range(canvas):
            utils.plots(imgs[canvas_idx],
                        iter_time,
                        save_file,
                        self.grid_cols,
                        self.grid_rows,
                        self.flags.sample_batch,
                        name=names[canvas_idx])
Exemple #14
0
def generate_images(dcgan, sess, predefined_mean_z=None, z_sigma=0.05):
    print(FLAGS.generate_name_postfix)
    strLog = ""
    generation_data = []
    dir = FLAGS.generate_dir
    if not os.path.exists(dir):
        os.makedirs(dir)

    imgInd = 0
    while (imgInd < FLAGS.generate_num):
        if predefined_mean_z is None:
            z_sample = np.random.uniform(-1, 1, size=(FLAGS.batch_size, FLAGS.z_dim))
        else:
            z_sample = [[np.random.normal(mu, z_sigma) for mu in z_sigma] for _ in range(FLAGS.batch_size)]
            z_sample = np.array(z_sample)

        samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})

        ps = sess.run(dcgan.D, feed_dict={dcgan.inputs: samples})
        probs = ps[:, 0]

        images = inverse_transform(samples)
        for im, z, d_prob in zip(images, z_sample, probs):
            if imgInd >= FLAGS.generate_num:
                break

            if FLAGS.generate_min_D_prob is None or d_prob >= FLAGS.generate_min_D_prob:
                filename = 'gen_%d_dPr_%.2f.png' % (imgInd, d_prob)
                scipy.misc.imsave(os.path.join(dir, filename), np.squeeze(im))
                strLog += filename + ',' + FLAGS.generate_name_postfix + ',' + strftime("%Y%m%d%H%M%S", gmtime()) + '\n'
                generation_data.append((filename, str(d_prob),','.join([str(_) for _ in z])))
                imgInd += 1

            if imgInd % 50 == 0:
                print(imgInd)

    f = open(dir + "/generated_list.txt", "a")
    f.write(strLog)
    f.close()
    with open(dir + "/generated_info.csv", 'w') as ff:
        ff.write('file,D_prob,' + ','.join(['z' + str(_) for _ in range(0, FLAGS.z_dim)]) + '\n')
        for g_d in generation_data:
            ff.write(','.join(g_d) + '\n')
    # plt.hist(probs)
    # plt.show()
    return [os.path.join(dir, gd[0]) for gd in generation_data]
Exemple #15
0
 def slerp_interpolation(self,
                         input_data,
                         batch_image,
                         sess,
                         savepath,
                         test_slerp_count=1):
     #用了patch的方法是不能做slerp—interpolation操作的
     # 球面差值法测试
     index_select = np.random.randint(0, batch_image.shape[0],
                                      test_slerp_count * 2)
     encode_512 = sess.run(
         self.encode_slerp,
         feed_dict={input_data: batch_image[index_select]})
     encode_50 = np.random.uniform(
         high=1, low=-1, size=[test_slerp_count * 2, self.noise_z])
     encode_562 = np.concatenate([encode_512, encode_50], axis=1)
     encode_sub = np.split(encode_562, 2, axis=0)
     decodes = []
     for idx, ratio in enumerate(np.linspace(0, 1, 10)):
         z = np.stack([
             utils.slerp(ratio, r1, r2)
             for r1, r2 in zip(encode_sub[0], encode_sub[1])
         ])
         z = np.reshape(z, [-1, 562])
         z_decode = sess.run(self.image_syn_slerp,
                             feed_dict={
                                 input_data: batch_image[index_select],
                                 self.encode_slerp_z: z
                             })
         decodes.append(z_decode)
     decodes = np.stack(decodes).transpose([1, 0, 2, 3, 4])
     index_sub = np.split(index_select, 2, axis=0)
     index_sub = np.split(index_sub[0], 2, axis=0)
     for idx, img in enumerate(decodes):
         img = np.concatenate([[batch_image[index_sub[0][idx]]], img,
                               [batch_image[index_sub[1][idx]]]], 0)
         img = utils.inverse_transform(img)[:, :, :, ::-1]
         utils.save_image(
             img,
             os.path.join('./{}'.format(savepath),
                          'test{:08}_interp_G.png'.format(self.model_id)),
             nrow=10 + 2)
Exemple #16
0
    def plots(self, imgs_, iter_time, save_file):
        imgs_fake = np.reshape(imgs_[0],
                               (self.flags.sample_batch, *self.image_size))

        imgs = []
        for img in imgs_fake:
            imgs.append(img)

        scale, margin = 0.04, 0.01
        n_cols, n_rows = int(np.sqrt(len(imgs))), int(np.sqrt(len(imgs)))
        cell_size_h, cell_size_w = imgs[0].shape[0] * scale, imgs[0].shape[
            1] * scale

        fig = plt.figure(figsize=(cell_size_w * n_cols, cell_size_h * n_rows))
        gs = gridspec.GridSpec(n_rows, n_cols)
        gs.update(wspace=margin, hspace=margin)

        imgs = [utils.inverse_transform(imgs[idx]) for idx in range(len(imgs))]

        for col_index in range(n_cols):
            for row_index in range(n_rows):
                ax = plt.subplot(gs[row_index * n_cols + col_index])
                plt.axis('off')
                ax.set_xticklabels([])
                ax.set_yticklabels([])
                ax.set_aspect('equal')
                if self.image_size[2] == 3:
                    plt.imshow((imgs[row_index * n_cols + col_index]).reshape(
                        self.image_size[0], self.image_size[1],
                        self.image_size[2]),
                               cmap='Greys_r')
                elif self.image_sizep[2] == 1:
                    plt.imshow((imgs[row_index * n_cols + col_index]).reshape(
                        self.image_size[0], self.image_size[1]),
                               cmap='Greys_r')
                else:
                    raise NotImplementedError

            plt.savefig(save_file + '/sample_{}.png'.format(str(iter_time)),
                        bbox_inches='tight')
            plt.close(fig)
Exemple #17
0
def eval_mnist_stacked_generate_images(sess, dcgan, config):
    # generate N images
    n = config.eval_mnist_stacked_examples

    generated = np.zeros((n, 3, 28, 28), dtype=np.float32)

    n_generated = 0
    while True:
        to_be_added_count = config.batch_size
        if n_generated + config.batch_size > n:
            to_be_added_count = n - n_generated

        if config.z_uniform:
            sample_z = np.random.uniform(-1, 1, size=(config.batch_size, dcgan.z_dim))
        else:
            sample_z = np.random.normal(0, 1, size=(config.batch_size, dcgan.z_dim))

        samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: sample_z})
        samples = inverse_transform(samples)
        samples = np.transpose(samples, (0, 3, 1, 2))

        generated[n_generated:n_generated + to_be_added_count] = samples[0:to_be_added_count]

        n_generated += to_be_added_count

        print n_generated

        if n_generated >= n:
            break

    """
    # save to an nparray of shape [N, 3, 28, 28]
    dir_str = './' + config.main_output_dir + '/eval_stacked_mnist/'
    if not os.path.exists(dir_str):
        os.makedirs(dir_str)
    np.save(dir_str + "eval_mnist_stacked_gen_dataset.npy", generated)"""
    return generated
Exemple #18
0

def list_files(directory, extension):
    return sorted((f for f in os.listdir(directory) if f.endswith('.' + extension)))


if __name__ == '__main__':
    crop_size = 148
    resize_sze = (64, 64)
    BATCH_SIZE = 32

    dataset = KinectLeap(model='cvae')

    for i in range(len(dataset)):
        X, y, g, y_, y__, g_, g__ = dataset[i]
        X = inverse_transform(X)
        print(y)
        print(g)
        print(y_)
        print(g_)
        print(y__)
        print(g__)
        plt.figure()
        plt.imshow(X)
        plt.axis('off')
        plt.show()

    # Data
    # data transform
    data_transform = transforms.Compose([
                     transforms.CenterCrop((crop_size, crop_size)),
 def predictF(image_data):
     img = (image_data / 127.5) - 1.
     res = sess.run(genF, feed_dict={real_Y: [img]})
     res = utils.inverse_transform(res[0])
     # scipy.misc.imsave('resultF.jpg', res)
     return res
Exemple #20
0
        out = layer(img)
        loss = -(out[0]**2).mean()
        loss.backward()

        g = params.grad.data
        g = g / g.abs().mean()
        params = params - learning_rate * g

        img = torch.irfft(params, signal_ndim=2)

        # Normalize image
        # from https://github.com/eriklindernoren/PyTorch-Deep-Dream
        for c in range(3):
            m, s = mean[c], std[c]
            img[0][c] = torch.clamp(img[0][c], -m / s, (1 - m) / s)

        # undo jitter
        img = torch.roll(img, shifts=(-y_jitter, -x_jitter), dims=(-2, -1))

# print(print_probs(F.softmax(net(img), dim=1)[0]))

# for proc in psutil.process_iter():
#     if proc.name() == "display":
#         proc.kill()

img = img[0].cpu()
img = inverse_transform(img)
img.show()

time.sleep(5)