Exemplo n.º 1
0
def generate_extra_data(X=None,
                        Y=None,
                        n=3000,
                        seed=None,
                        folder=io.DATA_FOLDER + 'train',
                        file_name=None,
                        rotate=True,
                        noise=True):
    print "Creating {} new images".format(n)

    if X is None and Y is None:
        # automatically resizes images to (720, 1028)
        X, _, _ = io.create_test_train_split(samples=-1,
                                             auto_resize=True,
                                             folder=folder)
        X, Y = io.dataset_dict_to_array(X)

    if seed:
        np.random.seed(seed)

    positions = np.random.randint(0, X.shape[0] - 1, n)
    transformed = X[positions, :]
    targets = Y[positions, :]
    final = np.zeros_like(transformed)
    final_resized_190_320 = np.zeros((n, 190, 320, 3))
    final_resized_244_244 = np.zeros_like((n, 244, 244, 3))
    final_resized_48_48 = np.zeros_like((n, 48, 48, 3))

    # slight pertubations in the angle of the image
    angles = np.random.uniform(-20, +20, n)
    for i in range(n):
        temp = transformed[i, :]

        if rotate:
            temp = imrotate(temp, angles[i], reshape=False)
        if noise:
            rand_noise = np.random.randint(0, 255, temp.shape)
            keepers = np.random.binomial(1, 0.95, size=temp.shape)
            temp = temp * keepers + rand_noise * (1 - keepers)

        final[i, :] = temp
        final_resized_190_320[i, :] = imresize(temp, size=(190, 320))
        final_resized_244_244[i, :] = imresize(temp, size=(244, 244))
        final_resized_48_48[i, :] = imresize(temp, size=(48, 48))

        if i % 1000 == 0:
            print "Created {} images.".format(i)

    if file_name:
        if not file_name.startswith("./data/"):
            file_name = folder + file_name
        np.save(file_name + '.npy', final)
        np.save(file_name + '_190_320.npy', final_resized_190_320)
        np.save(file_name + '_244_244.npy', final_resized_244_244)
        np.save(file_name + '_48_48.npy', final_resized_48_48)
        np.save(file_name + "_targets.npy", targets)
    return final, targets
Exemplo n.º 2
0
def load_imgs_from_paths(paths, auto_resize=True):

    # shuffle paths so that when we do test/train splits
    # we get shuffled distributions
    paths = sampler(paths, len(paths))

    if type(auto_resize) is tuple and len(auto_resize) == 2:
        # resize to specified value
        imgs = [imresize(read_image(path), size=auto_resize) for path in paths]
    elif type(auto_resize) is bool and auto_resize:
        # automatically resizes to image_utils.AUTO_RESIZE_DEFAULT
        imgs = [imresize(read_image(path)) for path in paths]
    else:
        # no resize
        imgs = [read_image(path) for path in paths]
    return imgs
Exemplo n.º 3
0
assert direction == 'a2b' or direction == 'b2a', 'Direction should be a2b or b2a!'
""" run """
frames = []
a_reals_ipt_ori = gif_frames(Image.open(gif_path))
size_ori = a_reals_ipt_ori.shape[0:3]
with tf.Session() as sess:
    a_real = tf.placeholder(tf.float32,
                            shape=[None, crop_size, crop_size, crop_size, 3])
    a2b = models.generator(a_real, direction)

    # retore
    saver = tf.train.Saver()
    ckpt_path = utils.load_checkpoint('./checkpoints/' + dataset, sess, saver)
    if ckpt_path is None:
        raise Exception('No checkpoint!')
    else:
        print('Copy variables from % s' % ckpt_path)
    a_real_ipt = np.zeros(
        (1, len(a_real_ipt_ori), crop_size, crop_size, crop_size, 3))
    a_real_ipt[0, ...] = im.imresize(a_real_ipt_ori,
                                     [crop_size, crop_size, crop_size])
    a2b_opt = sess.run(a2b, feed_dict={a_real: a_real_ipt})

    a2b_opt_ori = im.imresize(a2b_opt[0, ..., 0].squeeze(), size_ori)
    img_opt_ori = np.array([a_real_ipt_ori, a2b_opt_ori])
    img_opt_ori = im.im2uint(im.immerge(img_opt_ori, 1, 1, 2))

writeGif(save_path, img_opt_ori, duration)
print('save in %s' % save_path)
Exemplo n.º 4
0
        shape = variable.get_shape()
        print(shape)
        print(len(shape))
        variable_parameters = 1
        for dim in shape:
            print(dim)
            variable_parameters *= dim.value
        print(variable_parameters)
        total_parameters += variable_parameters
    print("\nTotal parameters:\n", total_parameters)

    # start = time.time()
    # Inference
    for i in range(len(a_list)):
        # Define shapes for images fed to the graph
        a_feed = im.imresize(im.imread(a_list[i]), [crop_size, crop_size])
        a_feed.shape = 1, crop_size, crop_size, 3

        # Feed in images to the graph
        a2b_result = sess.run(a2b, feed_dict={a_input: a_feed})

        # Create and save the output image
        a_img_opt = np.concatenate((a_feed, a2b_result), axis=0)
        img_name = os.path.basename(a_list[i])
        im.imwrite(im.immerge(a_img_opt, 1, 2), a_save_dir + '/' + img_name)
        print('Save %s' % (a_save_dir + '/' + img_name))

    #     if i == 100:
    #         end = time.time()
    # end2 = time.time()
    # print("Time to process first 100 images:", end - start)
Exemplo n.º 5
0
    saver = tf.train.Saver()
    ckpt_path = utils.load_checkpoint('./checkpoints/' + dataset, sess, saver)
    if ckpt_path is None:
        raise Exception('No checkpoint!')
    else:
        print('Copy variables from % s' % ckpt_path)

    # test
    a_list = glob('./datasets/' + dataset + '/testA/*.jpg')
    b_list = glob('./datasets/' + dataset + '/testB/*.jpg')

    a_save_dir = './test_predictions/' + dataset + '/testA/'
    b_save_dir = './test_predictions/' + dataset + '/testB/'
    utils.mkdir([a_save_dir, b_save_dir])
    for i in range(len(a_list)):
        a_real_ipt = im.imresize(im.imread(a_list[i]), [crop_size, crop_size])
        a_real_ipt.shape = 1, crop_size, crop_size, 3
        a2b_opt, a2b2a_opt = sess.run([a2b, a2b2a],
                                      feed_dict={a_real: a_real_ipt})
        a_img_opt = np.concatenate((a_real_ipt, a2b_opt, a2b2a_opt), axis=0)

        img_name = os.path.basename(a_list[i])
        im.imwrite(im.immerge(a_img_opt, 1, 3), a_save_dir + img_name)
        print('Save %s' % (a_save_dir + img_name))

    for i in range(len(b_list)):
        b_real_ipt = im.imresize(im.imread(b_list[i]), [crop_size, crop_size])
        b_real_ipt.shape = 1, crop_size, crop_size, 3
        b2a_opt, b2a2b_opt = sess.run([b2a, b2a2b],
                                      feed_dict={b_real: b_real_ipt})
        b_img_opt = np.concatenate((b_real_ipt, b2a_opt, b2a2b_opt), axis=0)
Exemplo n.º 6
0
assert direction == 'a2b' or direction == 'b2a', 'Direction should be a2b or b2a!'
""" run """
frames = []
a_reals_ipt_ori = gif_frames(Image.open(gif_path))
size_ori = a_reals_ipt_ori[0].shape[0:2]
with tf.Session() as sess:
    a_real = tf.placeholder(tf.float32, shape=[None, crop_size, crop_size, 3])
    a2b = models.generator(a_real, direction)

    # retore
    saver = tf.train.Saver()
    ckpt_path = utils.load_checkpoint('./checkpoints/' + dataset, sess, saver)
    if ckpt_path is None:
        raise Exception('No checkpoint!')
    else:
        print('Copy variables from % s' % ckpt_path)

    for a_real_ipt_ori in a_reals_ipt_ori:
        a_real_ipt = im.imresize(a_real_ipt_ori, [crop_size, crop_size])
        a_real_ipt.shape = 1, crop_size, crop_size, 3
        a2b_opt = sess.run(a2b, feed_dict={a_real: a_real_ipt})

        a2b_opt_ori = im.imresize(a2b_opt.squeeze(), size_ori)
        img_opt_ori = np.array([a_real_ipt_ori, a2b_opt_ori])
        img_opt_ori = im.im2uint(im.immerge(img_opt_ori, 1, 2))
        frames.append(img_opt_ori)

writeGif(save_path, frames, duration)
print('save in %s' % save_path)
    saver = tf.train.Saver()
    ckpt_path = utils.load_checkpoint('./checkpoints/' + dataset, sess, saver)
    if ckpt_path is None:
        raise Exception('No checkpoint!')
    else:
        print('Copy variables from % s' % ckpt_path)

    # test
    a_list = glob('./datasets/' + dataset + '/testA/*.jpg')
    b_list = glob('./datasets/' + dataset + '/testB/*.jpg')

    a_save_dir = './test_predictions/' + dataset + '/testA/'
    b_save_dir = './test_predictions/' + dataset + '/testB/'
    utils.mkdir([a_save_dir, b_save_dir])
    for i in range(len(a_list)):
        a_real_ipt = im.imresize(im.imread(a_list[i]), [crop_size, crop_size])
        a_real_ipt.shape = 1, crop_size, crop_size, 3
        a2b_opt, a2b2a_opt = sess.run([a2b, a2b2a], feed_dict={a_real: a_real_ipt})
        a_img_opt = np.concatenate((a_real_ipt, a2b_opt, a2b2a_opt), axis=0)

        img_name = os.path.basename(a_list[i])
        im.imwrite(im.immerge(a_img_opt, 1, 3), a_save_dir + img_name)
        print('Save %s' % (a_save_dir + img_name))

    for i in range(len(b_list)):
        b_real_ipt = im.imresize(im.imread(b_list[i]), [crop_size, crop_size])
        b_real_ipt.shape = 1, crop_size, crop_size, 3
        b2a_opt, b2a2b_opt = sess.run([b2a, b2a2b], feed_dict={b_real: b_real_ipt})
        b_img_opt = np.concatenate((b_real_ipt, b2a_opt, b2a2b_opt), axis=0)

        img_name = os.path.basename(b_list[i])