Exemplo n.º 1
0
def train_net(x,y,w,h,c,H=200,d=2,niter=200000,bz=256,traj_iter=1000):
    bw = (y.shape[-1] == 1)
    net = ReLUNet(H=H,d=d,bw=bw).cuda()
    net.apply(weight_init)

    x = Variable(torch.from_numpy(x)).cuda()
    y = Variable(torch.from_numpy(y)).cuda()

    optim = torch.optim.Adam(net.parameters(), lr=1e-3) #lr=1e-4)
    mse = nn.MSELoss().cuda()
    T = []
    for itr in range(niter):
        optim.zero_grad()
        b = np.random.randint(0,x.shape[0],bz)
        y_ = net(x[b])
        loss = mse(y_,y[b])
        #loss = torch.sum((y_-y)**2)
        loss.backward(retain_graph=True)
        optim.step()
        if (itr%traj_iter == 0):
            #import pdb; pdb.set_trace();
            out_np = net(x).detach().cpu().data.numpy().reshape((w,h,c))
            if bw:
                out_np = out_np[:,:,0]
            utils.imwrite(os.path.join(output_dir,'itr'+str(itr)+'.png'),out_np)
            T.append(out_np)
            print('Iteration '+str(itr)+': '+str(loss.data))
            del out_np
    
    return T
Exemplo n.º 2
0
def make_denoise_small_ds(source_dir, target_dir):
    factor = 0.25
    interp = cv2.INTER_AREA  # for downsampling
    if not os.path.exists(target_dir):
        os.system('mkdir ' + target_dir)
    for img_dir in os.listdir(source_dir):
        img_name = os.path.split(img_dir)[-1]
        if len(img_name) == 0:
            img_name = os.path.split(img_dir)[-2]
        ext = '.png'
        clean_img = os.path.join(source_dir, img_dir, img_name + ext)
        img = utils.imread(clean_img)
        size = (int(img.shape[1] * factor), int(img.shape[0] * factor))
        img = cv2.resize(img, size, interpolation=interp)
        target_img_dir = os.path.join(target_dir, img_name)
        if not os.path.exists(target_img_dir):
            os.system('mkdir ' + target_img_dir)
        target_clean = os.path.join(target_img_dir, img_name + ext)
        utils.imwrite(target_clean, img)
        for sigma in [5, 10, 20, 25, 30, 35, 40, 50, 60, 70, 75, 80, 90, 100]:
            output_file = os.path.join(target_dir, img_dir,
                                       img_name + '_s' + str(sigma) + ext)
            sigma = sigma / 255.
            noisy_img = utils.get_noisy_image(img, sigma)
            utils.imwrite(output_file, noisy_img)
Exemplo n.º 3
0
def renderframe(modeltest, outname, sess, upsample_method):
    # TODO finish this
    print("Model: " + modeltest + ' saved test file ' + outname)
    # load test image
    input_img_path = '/home/kth/deepstuff/frames/bk01.jpg'
    testimg = utils.imread2(input_img_path)
    testimg = utils.imresize(testimg, 1)
    #testimg = utils.imresize_xy(testimg,256,256)
    testimg_4d = testimg[np.newaxis, :]  #  .astype(np.float32)

    #    tf.reset_default_graph()
    with tf.variable_scope('img_t_net_test', reuse=tf.AUTO_REUSE):
        Xtest = tf.placeholder(tf.float32,
                               shape=testimg_4d.shape,
                               name='input')
        Ytest = create_net(Xtest, upsample_method)

    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())
    print("Evaluating test image...")

    with tf.Session() as sesstest:
        sesstest.run(init_op)
        img_out = sesstest.run(Ytest, feed_dict={Xtest: testimg_4d})
    img_out = np.squeeze(img_out)
    utils.imwrite(outname, img_out)
def main():
    """Main function."""
    args = parse_args()
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
    tf_config = {'rnd.np_random_seed': 1}
    tflib.init_tf(tf_config)
    assert os.path.exists(args.restore_path)
    _, _, _, Gs, _ = load_pkl(args.restore_path)
    latent_dim = Gs.components.synthesis.input_shape[2]
    print(f'Latent dimension shape: {latent_dim}')

    # Building graph
    Z = tf.placeholder('float32', [None, latent_dim], name='Gaussian')
    print(f'Z in tensorflow graph: {Z.shape}')
    sampling_from_z = Gs.get_output_for(Z, None, randomize_noise=True)
    sess = tf.get_default_session()

    save_dir = args.output_dir or './outputs/sampling'
    os.makedirs(save_dir, exist_ok=True)

    print('Sampling...')
    for it in tqdm(range(args.total_nums)):
        samples = sess.run(
            sampling_from_z,
            {Z: np.random.randn(args.batch_size * 2, latent_dim)})
        samples = samples.transpose(0, 2, 3, 1)
        print(f'shape of output: {samples.shape}')
        imwrite(immerge(samples, 2, args.batch_size),
                '%s/sampling_%06d_newseed.png' % (save_dir, it))
def main():
    """Main function."""
    args = parse_args()
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
    tf_config = {'rnd.np_random_seed': 1000}
    tflib.init_tf(tf_config)
    assert os.path.exists(args.restore_path)
    _, _, _, Gs, _ = load_pkl(args.restore_path)
    num_layers = Gs.components.synthesis.input_shape[1]

    batch_codes = np.load(args.data_dir_encode)
    print(batch_codes.shape)
    latent_dim = batch_codes.shape[1]
    print(f'Latent dimension shape: {latent_dim}')

    # Building graph
    w_vec = tf.placeholder('float32', [None, latent_dim], name='w_codes')
    print(f'W in tensorflow graph: {w_vec.shape}')
    encoder_w_tile = tf.tile(w_vec[:, np.newaxis], [1, num_layers, 1])
    print(f'encoder_w_tile size: {encoder_w_tile.shape}')
    reconstructor = Gs.components.synthesis.get_output_for(
        encoder_w_tile, randomize_noise=False)
    sess = tf.get_default_session()

    save_dir = args.output_dir or './outputs/rebuild_encodings'
    os.makedirs(save_dir, exist_ok=True)

    print('Creating Images...')
    samples = sess.run(reconstructor, {w_vec: batch_codes})
    samples = samples.transpose(0, 2, 3, 1)
    print(f'shape of output: {samples.shape}')
    imwrite(immerge(samples, 4, args.batch_size),
            '%s/decode_00000_new1.png' % (save_dir))
Exemplo n.º 6
0
def main():
    """Main function."""
    args = parse_args()
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
    tf_config = {'rnd.np_random_seed': 1000}
    tflib.init_tf(tf_config)
    assert os.path.exists(args.restore_path)
    E, _, _, Gs, _ = load_pkl(args.restore_path)
    num_layers = Gs.components.synthesis.input_shape[1]

    # Building graph
    real = tf.placeholder('float32', [None, 3, args.image_size, args.image_size], name='real_image')
    encoder_w = E.get_output_for(real, phase=False)
    encoder_w_tile = tf.tile(encoder_w[:, np.newaxis], [1, num_layers, 1])
    reconstructor = Gs.components.synthesis.get_output_for(encoder_w_tile, randomize_noise=False)
    sess = tf.get_default_session()

    # Preparing data
    input_images, _ = preparing_data(im_path=args.data_dir_test, img_type=args.img_type)

    save_dir = args.output_dir or './outputs/reconstruction'
    os.makedirs(save_dir, exist_ok=True)

    print('Reconstructing...')
    for it, image_id in tqdm(enumerate(range(0, input_images.shape[0], args.batch_size))):
        batch_images = input_images[image_id:image_id+args.batch_size]
        rec = sess.run(reconstructor, feed_dict={real: batch_images})
        orin_recon = np.concatenate([batch_images, rec], axis=0)
        orin_recon = orin_recon.transpose(0, 2, 3, 1)
        imwrite(immerge(orin_recon, 2, batch_images.shape[0]),
                '%s/reconstruction_%06d.png' % (save_dir, it))
Exemplo n.º 7
0
    def on_epoch_end(self, val_loss=None, logs=None):
        # save model
        global num_epoch
        global dataset_name
        global img_rows
        global img_cols
        global mbllen
        num_epoch += 1
        modelname = './models/' + str(num_epoch) + '_' + dataset_name + '_base.h5'
        mbllen.save_weights(modelname)

        # test val data
        path = glob('../dataset/test/*.jpg')
        number = 0
        psnr_ave = 0

        for i in range(len(path)):
            if i>15:
                break

            img_B_path = path[i]
            img_B = utils.imread_color(img_B_path)

            path_mid = os.path.split(img_B_path)
            path_A_1 = path_mid[0] + '_' + dataset_name
            path_A = os.path.join(path_A_1, path_mid[1])
            img_A = utils.imread_color(path_A)

            nw = random.randint(0, img_A.shape[0] - img_rows)
            nh = random.randint(0, img_A.shape[1] - img_cols)

            crop_img_A = img_A[nw:nw + img_rows, nh:nh + img_cols, :]
            crop_img_B = img_B[nw:nw + img_rows, nh:nh + img_cols, :]

            crop_img_A = crop_img_A[np.newaxis, :]
            crop_img_B = crop_img_B[np.newaxis, :]

            fake_B = mbllen.predict(crop_img_A)
            identy_B = mbllen.predict(crop_img_B)

            out_img = np.concatenate([crop_img_A, fake_B, crop_img_B, identy_B], axis=2)
            out_img = out_img[0, :, :, :]

            fake_B = fake_B[0, :, :, :]
            img_B = crop_img_B[0, :, :, :]

            clean_psnr = utils.psnr_cau(fake_B, img_B)
            L_psnr = ("%.4f" % clean_psnr)

            number += 1
            psnr_ave += clean_psnr

            filename = os.path.basename(path[i])
            img_name = './val_images/' + str(num_epoch) + '_' + L_psnr + '_' + filename
            utils.imwrite(img_name, out_img)
        psnr_ave /= number
        print('------------------------------------------------')
        print("[Epoch %d]  [PSNR_AVE :%f]" % (num_epoch,  psnr_ave))
        print('------------------------------------------------')
Exemplo n.º 8
0
    def _inject_summary(self, tag, feed_dict, step):
        summaries = self.sess.run(self.summary_ops[tag], feed_dict)
        self.summary_writer.add_summary(summaries['summary'], step)

        path = os.path.join(
            self.config.sample_model_dir, "{}.png".format(step))
        imwrite(path, img_tile(summaries['output'],
                               tile_shape=self.config.sample_image_grid)[:, :, 0])
def main():
    """Main function."""
    args = parse_args()
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
    tf_config = {'rnd.np_random_seed': 1000}
    tflib.init_tf(tf_config)
    assert os.path.exists(args.restore_path)
    assert os.path.exists(args.boundary)
    E, _, _, Gs, _ = load_pkl(args.restore_path)
    num_layers, latent_dim = Gs.components.synthesis.input_shape[1:3]

    # Building graph
    real = tf.placeholder('float32',
                          [None, 3, args.image_size, args.image_size],
                          name='real_image')
    W = tf.placeholder('float32', [None, num_layers, latent_dim],
                       name='Gaussian')
    encoder_w = E.get_output_for(real, phase=False)
    reconstruction_from_w = Gs.components.synthesis.get_output_for(
        W, randomize_noise=False)
    sess = tf.get_default_session()

    # Preparing data
    input_images, images_name = preparing_data(im_path=args.data_dir_test,
                                               img_type=args.img_type)

    boundary = np.load(args.boundary)
    boundary_name = args.boundary.split('/')[-1].split('_')[0]

    save_dir = args.output_dir or './outputs/manipulation'
    os.makedirs(save_dir, exist_ok=True)

    print('manipulation in w space on %s' % (boundary_name))
    for i in tqdm(range(input_images.shape[0])):
        input_image = input_images[i:i + 1]
        im_name = images_name[i]
        latent_code = sess.run(encoder_w, feed_dict={real: input_image})
        codes = manipulate(latent_code,
                           boundary,
                           num_layers=num_layers,
                           step=args.step,
                           start_distance=args.start_distance,
                           end_distance=args.end_distance)
        inputs = np.zeros((args.batch_size, num_layers, latent_dim),
                          np.float32)
        output_images = []
        for idx in range(0, args.step, args.batch_size):
            batch = codes[idx:idx + args.batch_size]
            inputs[0:len(batch)] = batch
            images = sess.run(reconstruction_from_w, feed_dict={W: inputs})
            output_images.append(images[0:len(batch)])
        output_images = np.concatenate(output_images, axis=0)
        output_images = np.concatenate([input_image, output_images], axis=0)
        output_images = output_images.transpose(0, 2, 3, 1)
        imwrite(immerge(output_images, 1, args.step + 1),
                '%s/%s_%s.png' % (save_dir, im_name, boundary_name))
Exemplo n.º 10
0
def make_denoise_ds(data_dir):
    for img_dir in os.listdir(data_dir):
        img_name = os.path.split(img_dir)[-1]
        if len(img_name) == 0:
            img_name = os.path.split(img_dir)[-2]
        ext = '.png'
        clean_img = os.path.join(data_dir, img_dir, img_name + ext)
        img = utils.imread(clean_img)
        for sigma in [5, 10, 20, 25, 30, 35, 40, 50, 60, 70, 75, 80, 90, 100]:
            output_file = os.path.join(data_dir, img_dir,
                                       img_name + '_s' + str(sigma) + ext)
            sigma = sigma / 255.
            noisy_img = utils.get_noisy_image(img, sigma)
            utils.imwrite(output_file, noisy_img)
Exemplo n.º 11
0
def main():
    """Main function."""
    args = parse_args()
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
    tf_config = {'rnd.np_random_seed': 1000}
    tflib.init_tf(tf_config)
    assert os.path.exists(args.restore_path)
    _, _, _, Gs, _ = load_pkl(args.restore_path)
    num_layers = Gs.components.synthesis.input_shape[1]

    
    w_codes = np.load(args.data_dir_encode)
    boundary = np.load(args.boundary_path)
    print(w_codes.shape)
    latent_dim = w_codes.shape[1]
    total_num = w_codes.shape[0]
    
    # Building graph
    w_vec = tf.placeholder('float32', [None, latent_dim], name='w_codes')
    print(f'W in tensorflow graph: {w_vec.shape}')
    encoder_w_tile = tf.tile(w_vec[:, np.newaxis], [1, num_layers, 1])
    print(f'encoder_w_tile size: {encoder_w_tile.shape}')
    reconstructor = Gs.components.synthesis.get_output_for(encoder_w_tile, randomize_noise=False)
    sess = tf.get_default_session()

    save_dir = args.output_dir or './outputs/edited_images'
    os.makedirs(save_dir, exist_ok=True)
    
    for sample_id in tqdm(range(total_num), leave=True):
        #get edited codes for one image
        interpolations = linear_interpolate(w_codes[sample_id:sample_id+1],
                                            boundary,
                                            start_distance=args.start_distance,
                                            end_distance=args.end_distance,
                                            steps=args.steps)
        print(f'Interpolations= {interpolations.shape}')
        if interpolations[0].all == interpolations[1].all:
            print('STOP')
            sys.exit(1)
        interpolation_id = 0
        samples = sess.run(reconstructor, {w_vec: interpolations})
        samples = samples.transpose(0, 2, 3, 1)
        print(f'Samples= {samples.shape}')
        for image in samples:
            save_path = os.path.join(args.output_dir, f'{sample_id:03d}_{interpolation_id:03d}.jpg')
            imwrite(image, save_path)
            interpolation_id += 1
        print(f'interpolation_id: {interpolation_id}, steps: {args.steps}')
        assert interpolation_id == args.steps
Exemplo n.º 12
0
def reconstruction(sess, images, real, reconstructor, save_dir):
    it = 0
    for image_id in tqdm(range(0, len(images), args.batch_size)):
        images_name = images[image_id:image_id+args.batch_size]
        batch_images = []
        for im_name in images_name:
            batch_images.append(cv2.imread(im_name)[:, :, ::-1])
        batch_images = np.asarray(batch_images)
        batch_images = adjust_dynamic_range(batch_images.astype(np.float32), [0, 255], [-1., 1.])
        rec = sess.run(reconstructor, feed_dict={real: batch_images.transpose(0, 3, 1, 2)})
        rec = rec.transpose(0, 2, 3, 1)
        rec = np.clip(rec, -1., 1.)
        orin_recon = np.concatenate([batch_images, rec], axis=0)
        imwrite(immerge(orin_recon, 2, len(images_name)), '%s/iter_%06d.png' % (save_dir, it))
        it += 1
Exemplo n.º 13
0
    def test(self):
        batch_size = self.data_loader.batch_size
        num_epoch = len(self.data_loader.synthetic_data_paths) / batch_size

        for idx in trange(num_epoch, desc="Refine all synthetic images"):
            feed_dict = {
                self.model.synthetic_batch_size: batch_size,
            }
            res = self.model.test_refiner(self.sess,
                                          feed_dict,
                                          None,
                                          with_output=True)

            for image, filename in zip(res['output'], res['filename']):
                basename = os.path.basename(filename).replace(
                    "_cropped", "_refined")
                path = os.path.join(self.config.output_model_dir, basename)
                imwrite(path, image[:, :, 0])
Exemplo n.º 14
0
def manipulation(sess, images, real, Z, get_w_from_img, get_img_from_w, boundaries, save_dir, steps=11, start_distance=-5., end_distance=5.):
    linspace = np.linspace(start_distance, end_distance, steps)
    linspace = linspace.reshape(-1, 1).astype(np.float32)
    for boundary in boundaries:
        attr = boundary.split('/')[-1].split('_')[1]
        print('manipulating on %s' % (attr))
        boundary_ = np.load(boundary)
        boundary_ = boundary_ * linspace
        for image in tqdm(images):
            img_1 = cv2.imread(image)[:, :, ::-1][np.newaxis]
            img_1_name = image.split('/')[-1].split('.')[0]
            img_1 = adjust_dynamic_range(img_1.astype(np.float32), [0, 255], [-1., 1.])
            latent_w = sess.run(get_w_from_img, feed_dict={real: img_1.transpose(0, 3, 1, 2)})
            inter = latent_w + boundary_
            mid_res = sess.run(get_img_from_w, feed_dict={Z: inter})
            mid_res = mid_res.transpose(0, 2, 3, 1)
            mid_res = np.concatenate([img_1, mid_res], axis=0)
            imwrite(immerge(mid_res, 1, steps + 1), '%s/%s_attr_%s.png' % (save_dir, img_1_name, attr))
Exemplo n.º 15
0
    def test(self):
        batch_size = self.data_loader.batch_size
        num_epoch = len(self.data_loader.synthetic_data_paths) // batch_size
        path = "tested-images"
        os.mkdir(path)
        for idx in trange(num_epoch, desc="Refine all synthetic images"):
            feed_dict = {
                self.model.synthetic_batch_size: batch_size,
            }
            res = self.model.test_refiner(
                self.sess, feed_dict, None, with_output=True)

            counter = 0
            for image, filename in zip(res['output'], res['filename']):
                # basename = os.path.basename(filename).replace(".jpg", "_refined.jpg")
                # path = os.path.join(self.config.output_model_dir, basename)
                write_path = os.path.join(path, "%d.jpg" % counter)
                imwrite(write_path, image[:, :, 0])
                counter+= 1
def load(config, data_path, sample_path, rng):
    if not os.path.exists(data_path):
        print('creating folder', data_path)
        os.makedirs(data_path)

    maybe_download_and_extract(config, data_path)
    synthetic_image_path = maybe_preprocess(config, data_path, sample_path)

    gaze_data = np.load(os.path.join(data_path, DATA_FNAME))
    real_data = gaze_data['real']

    if not os.path.exists(sample_path):
        os.makedirs(sample_path)

    print("[*] Save samples images in {}".format(data_path))
    random_idxs = rng.choice(len(real_data), 100)
    for idx, random_idx in enumerate(random_idxs):
        image_path = os.path.join(sample_path, "real_{}.png".format(idx))
        imwrite(image_path, real_data[random_idx])

    return real_data, synthetic_image_path
Exemplo n.º 17
0
def interpolation_on_w(sess, images, real, Z, get_w_from_img, get_img_from_w, step, save_dir):
    for image1 in tqdm(images):
        img_1 = cv2.imread(image1)[:, :, ::-1][np.newaxis]
        img_1 = adjust_dynamic_range(img_1.astype(np.float32), [0, 255], [-1., 1.])
        img_1_name = image1.split('/')[-1].split('.')[0]
        for image2 in images:
            img_2 = cv2.imread(image2)[:, :, ::-1][np.newaxis]
            img_2 = adjust_dynamic_range(img_2.astype(np.float32), [0, 255], [-1., 1.])
            img_2_name = image2.split('/')[-1].split('.')[0]

            latent_1 = sess.run(get_w_from_img, feed_dict={real: img_1.transpose(0, 3, 1, 2)})
            latent_2 = sess.run(get_w_from_img, feed_dict={real: img_2.transpose(0, 3, 1, 2)})

            linspace = np.linspace(0.0, 1.0, step)[:, np.newaxis].astype(np.float32)
            mid_res = latent_1 + linspace * (latent_2 - latent_1)

            mid_res = sess.run(get_img_from_w, feed_dict={Z: mid_res})
            mid_res = mid_res.transpose(0, 2, 3, 1)
            mid_res = np.clip(mid_res, -1., 1.)
            mid_res = np.concatenate([img_1, mid_res, img_2], axis=0)

            imwrite(immerge(mid_res, 1, step + 2), '%s/%s_%s.png' % (save_dir, img_1_name, img_2_name))
Exemplo n.º 18
0
def generate_to_art(param):
    print('generate_to_art')

    input_img_path = param['input_img_path']
    output_img_path = param['output_img_path']
    model_path = param['model_path']
    upsample_method = param['upsample_method']
    content_target_resize = param['content_target_resize']

    # Read + preprocess input image.
    img = utils.imread(input_img_path)
    img = utils.imresize(img, content_target_resize)
    img_4d = img[np.newaxis, :]

    tf.reset_default_graph()

    # Create the graph.
    with tf.variable_scope('img_t_net'):
        X = tf.placeholder(tf.float32, shape=img_4d.shape, name='input')
        Y = create_net(X, upsample_method)

    # Filter the input image.
    with tf.Session() as sess:
        print 'Loading up model...'
        # Saver used to restore the model to the session.
        saver = tf.train.Saver()

        saver.restore(sess, model_path)
        print 'Evaluating...'
        img_out = sess.run(Y, feed_dict={X: img_4d})
        sess.close()

    # Postprocess + save the output image.
    print 'Saving image.'
    img_out = np.squeeze(img_out)
    utils.imwrite(output_img_path, img_out)

    print 'Done.'
Exemplo n.º 19
0
def make_inpaint_ds(data_dir):
    for img_dir in os.listdir(data_dir):
        img_name = os.path.split(img_dir)[-1]
        print(img_name)
        if len(img_name) == 0:
            img_name = os.path.split(img_dir)[-2]
        ext = '.png'
        img = utils.imread(os.path.join(data_dir, img_dir, img_name + ext))

        # read mask, if available. otherwise, generate maske dropping 50% pixels
        mask_path = os.path.join(data_dir, img_dir, img_name + '_mask' + ext)
        if os.path.exists(mask_path):
            mask = utils.imread(mask_path)
        else:
            mask = np.ones((img.shape[0], img.shape[1]))
            mask = utils.add_mask_noise(mask)
            utils.imwrite(mask_path, mask)

        masked_img = utils.mask_img(img, mask)

        output_file = os.path.join(data_dir, img_dir,
                                   img_name + '_noisy' + ext)
        utils.imwrite(output_file, masked_img)
Exemplo n.º 20
0
def load(config, data_path, sample_path, rng):
    if not os.path.exists(data_path):
        print('creating folder', data_path)
        os.makedirs(data_path)

    synthetic_image_path = maybe_preprocess(config, data_path, sample_path)
    supervised_image_path = glob(
        os.path.join(data_path, config.synthetic_image_sup_dir, '*.png'))

    nuclei_data = np.load(os.path.join(data_path, DATA_FNAME))
    real_data = nuclei_data['real']
    ref_real_data = nuclei_data['ref_real']

    if not os.path.exists(sample_path):
        os.makedirs(sample_path)

    print("[*] Save samples images in {}".format(data_path))
    random_idxs = rng.choice(len(real_data), 100)
    for idx, random_idx in enumerate(random_idxs):
        image_path = os.path.join(sample_path, "real_{}.png".format(idx))
        imwrite(image_path, real_data[random_idx])

    return real_data, synthetic_image_path, ref_real_data, supervised_image_path
Exemplo n.º 21
0
def imwrite_fork(dert_, param, Ave, fork_history):
    """Output fork's gradient image."""
    # Select dert_ to draw:
    if param == 'i':
        o = dert_[0]
    elif param == 'g':
        o = dert_[1]
    elif param == 'm':
        assert len(dert_) in (5, 12)
        o = dert_[2]
    elif param == 'ga':
        o = dert_[7] if len(dert_) == 12 else dert_[6]
    else:
        o = None

    if OUTPUT_BIN:
        if fork_history[-1] == "a":
            Ave = ANGLE_AVE
        imwrite(OUTPUT_PATH + fork_history, (o > Ave) * 255)
    elif OUTPUT_NORMALIZE:
        imwrite(OUTPUT_PATH + fork_history,
                255 * (o - o.min()) / (o.max() - o.min()))
    else:
        imwrite(OUTPUT_PATH + fork_history, o)
Exemplo n.º 22
0
    model_path = args.model_path
    upsample_method = args.upsample_method
    content_target_resize = args.content_target_resize

    # Read + preprocess input image.
    img = utils.imread(input_img_path)
    img = utils.imresize(img, content_target_resize)
    img_4d = img[np.newaxis, :]

    # Create the graph.
    with tf.variable_scope('img_t_net'):
        X = tf.placeholder(tf.float32, shape=img_4d.shape, name='input')
        Y = create_net(X, upsample_method)

    # Saver used to restore the model to the session.
    saver = tf.train.Saver()

    # Filter the input image.
    with tf.Session() as sess:
        print 'Loading up model...'
        saver.restore(sess, model_path)
        print 'Evaluating...'
        img_out = sess.run(Y, feed_dict={X: img_4d})

    # Postprocess + save the output image.
    print 'Saving image.'
    img_out = np.squeeze(img_out)
    utils.imwrite(output_img_path, img_out)

    print 'Done.'
Exemplo n.º 23
0
        g_summary_opt, _ = sess.run([g_summary, g_step], feed_dict={z: z_ipt})
        summary_writer.add_summary(g_summary_opt, it)

        # display
        if it % 1 == 0:
            print("Epoch: (%3d) (%5d/%5d)" % (epoch, it_epoch, batch_epoch))

        # save
        if (it + 1) % 1000 == 0:
            save_path = saver.save(
                sess, '%s/Epoch_(%d)_(%dof%d).ckpt' %
                (ckpt_dir, epoch, it_epoch, batch_epoch))
            print('Model saved in file: % s' % save_path)

        # sample
        if (it + 1) % 100 == 0:
            f_sample_opt = sess.run(f_sample, feed_dict={z: z_ipt_sample})

            save_dir = './sample_images_while_training/celeba_wgan'
            utils.mkdir(save_dir + '/')
            utils.imwrite(
                utils.immerge(f_sample_opt, 10,
                              10), '%s/Epoch_(%d)_(%dof%d).jpg' %
                (save_dir, epoch, it_epoch, batch_epoch))

except Exception as e:
    traceback.print_exc()
finally:
    print(" [*] Close main session!")
    sess.close()
def saveSampleImgs(imgs, full_path, row, column):
    utils.imwrite(utils.immerge(imgs, row, column),full_path)
Exemplo n.º 25
0
def train():
    alpha_span = 800000
    batch_size = 32
    ckpt_dir = './checkpoints/wgp'
    n_gen = 1
    n_critic = 1
    it_start = 0
    #epoch = 20*(alpha_span * 2 // (2*4936)) # 4936 is number of images
    
    def preprocess_fn(img):
        img = tf.image.resize_images(img, [target_size, target_size], method=tf.image.ResizeMethod.AREA) / 127.5 -1
        return img

    def preprocess_fn_dummy(img):
        img = tf.image.resize_images(img, [final_size, final_size], method=tf.image.ResizeMethod.AREA) / 127.5 -1
        return img
    
    # dataset
    img_paths = glob.glob('./imgs/faces/*.png')
    data_pool = utils.DiskImageData(5, img_paths, batch_size//2, shape=[640, 640, 3], preprocess_fn=preprocess_fn)
    data_pool_dummy = utils.DiskImageData(7, img_paths, 1, shape=[640, 640, 3], preprocess_fn=preprocess_fn_dummy)    
    batch_epoch = len(data_pool) // (batch_size * 1)#n_critic

    # build graph
    print('Building a graph ...')
    nodes = build(batch_size)
    # session
    sess = utils.session()
    saver = tf.train.Saver()
    # summary
    summary_writer = tf.summary.FileWriter('./summaries/wgp/', sess.graph)
    utils.mkdir(ckpt_dir + '/')

    print('Initializing all variables ...')
    sess.run(tf.global_variables_initializer())
    
    # run final size session for storing all variables to be used into the optimizer
    print('Running final size dummy session ...')
    #if target_size == initial_size and len(sys.argv) <= 3:
    #    _ = sess.run([nodes['dummy']['d']], feed_dict=get_ipt(2, final_size, 1.0, data_pool_dummy ,z_dim, nodes['dummy']['input'] ))
    #    _ = sess.run([nodes['dummy']['g']], feed_dict=get_ipt(2, final_size, 1.0, data_pool_dummy ,z_dim, nodes['dummy']['input'] ))
        
    # load checkpoint
    if len(sys.argv)>3 and sys.argv[2]=='resume':
        print ('Loading the checkpoint ...')
        saver.restore(sess, ckpt_dir+'/model.ckpt')
        it_start = 1 + int(sys.argv[3])
    last_saved_iter = it_start - 1

    ''' train '''
    for it in range(it_start, 9999999999):
        # fade alpha
        alpha_ipt = it / (alpha_span / batch_size)
        if alpha_ipt > 1 or target_size == initial_size:
            alpha_ipt = 1.0
        print('Alpha : %f' % alpha_ipt)
        alpha_ipt = 1.0
        
        # train D
        for i in range(n_critic):
            d_summary_opt, _ = sess.run([nodes['summaries']['d'], nodes['product']['d']],\
                feed_dict=get_ipt(batch_size, target_size, alpha_ipt, data_pool, z_dim, nodes['product']['input']))
        summary_writer.add_summary(d_summary_opt, it)

        # train G
        for i in range(n_gen):
            g_summary_opt, _ = sess.run([nodes['summaries']['g'], nodes['product']['g']],\
                feed_dict=get_ipt(batch_size, target_size, alpha_ipt, data_pool, z_dim, nodes['product']['input']))
        summary_writer.add_summary(g_summary_opt, it)
        
        # display
        epoch = it // batch_epoch
        it_epoch = it % batch_epoch + 1
        if it % 1 == 0:
            print("iter : %8d, epoch : (%3d) (%5d/%5d) _ resume point : %d" % (it, epoch, it_epoch, batch_epoch,last_saved_iter))

        # sample
        if (it + 1) % batch_epoch == 0:
            f_sample_opt = sess.run(nodes['sample'], feed_dict=get_ipt_for_sample(batch_size, z_dim, nodes['product']['input']))
            f_sample_opt = np.clip(f_sample_opt, -1, 1)
            save_dir = './sample_images_while_training/wgp/'
            utils.mkdir(save_dir + '/')
            osz = int(math.sqrt(batch_size))+1
            utils.imwrite(utils.immerge(f_sample_opt, osz, osz), '%s/iter_(%d).png' % (save_dir, it))
            
        # save
        if (it + 1) % batch_epoch == 0:
            last_saved_iter = it
            save_path = saver.save(sess, '%s/model.ckpt' % (ckpt_dir))
            print('Model saved in file: %s' % save_path)
Exemplo n.º 26
0
def main():

    with tf.name_scope('input'):
        real = tf.placeholder(
            'float32', [args.batch_size, 3, args.image_size, args.image_size],
            name='real_image')
        z = tf.placeholder('float32', [args.batch_size, args.z_dim],
                           name='Gaussian')
        lr_g = tf.placeholder(tf.float32, None, name='learning_rate_g')
        lr_d = tf.placeholder(tf.float32, None, name='learning_rate_d')

    G_x = generator_x(input_z=z, reuse=False)
    G_z = generator_z(input_x=real, reuse=False)

    dis_fake, dis_fake_logit = discriminator(input_x=G_x,
                                             input_z=z,
                                             reuse=False)
    dis_real, dis_real_logit = discriminator(input_x=real,
                                             input_z=G_z,
                                             reuse=True)

    Reconstruction = generator_x(generator_z(real, reuse=True), reuse=True)

    with tf.variable_scope('generator_loss'):
        G_loss_img = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                logits=dis_fake_logit, labels=tf.ones_like(dis_fake_logit)))
        G_loss_z = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                logits=dis_real_logit, labels=tf.zeros_like(dis_real_logit)))
        G_loss = G_loss_img + G_loss_z

    with tf.variable_scope('discriminator_loss'):
        D_loss_real = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                logits=dis_real_logit, labels=tf.ones_like(dis_real_logit)))
        D_loss_fake = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                logits=dis_fake_logit, labels=tf.zeros_like(dis_fake_logit)))
        D_loss = D_loss_real + D_loss_fake

    Genx_vars = [
        v for v in tf.global_variables() if v.name.startswith("generator_x")
    ]
    Genz_vars = [
        v for v in tf.global_variables() if v.name.startswith("generator_z")
    ]
    Dis_vars = [
        v for v in tf.global_variables() if v.name.startswith("discriminator")
    ]

    G_solver = tf.train.AdamOptimizer(lr_g, args.beta1, args.beta2).minimize(
        G_loss, var_list=Genx_vars + Genz_vars)
    D_solver = tf.train.AdamOptimizer(lr_d, args.beta1,
                                      args.beta2).minimize(D_loss,
                                                           var_list=Dis_vars)

    saver = tf.train.Saver(max_to_keep=5)
    sess = tensorflow_session()

    if args.restore_path != '':
        print('resotre weights from {}'.format(args.restore_path))
        saver.restore(sess, tf.train.latest_checkpoint(args.restore_path))
        print('Load weights finished!!!')
    else:
        sess.run(tf.global_variables_initializer())

    print('Getting training HQ data...')
    image_batch_train = get_train_data(sess,
                                       data_dir=args.data_dir_train,
                                       batch_size=args.batch_size)
    image_batch_test = get_train_data(sess,
                                      data_dir=args.data_dir_test,
                                      batch_size=args.batch_size)

    if not os.path.exists(args.log_dir):
        os.mkdir(args.log_dir)

    if not os.path.exists(args.checkpoint_dir):
        os.makedirs(args.checkpoint_dir)

    for it in range(120000):

        latent_z = np.random.randn(args.batch_size,
                                   args.z_dim).astype(np.float32)
        batch_images_train = sess.run(image_batch_train)
        batch_images_train = adjust_dynamic_range(
            batch_images_train.astype(np.float32), [0, 255], [0., 1.])

        feed_dict_1 = {
            real: batch_images_train,
            z: latent_z,
            lr_g: args.learning_rate * 10,
            lr_d: args.learning_rate * 0.1
        }
        _, d_loss_real, d_loss_fake = sess.run(
            [D_solver, D_loss_real, D_loss_fake], feed_dict=feed_dict_1)
        _, g_loss_img, g_loss_z = sess.run([G_solver, G_loss_img, G_loss_z],
                                           feed_dict=feed_dict_1)

        if it % 50 == 0:
            print(
                'Iter: {}  g_loss_img: {} g_loss_z: {} d_r_loss: {} d_f_loss_: {}'
                .format(it, g_loss_img, g_loss_z, d_loss_real, d_loss_fake))

            if it % 1000 == 0:
                samples1 = sess.run(G_x, feed_dict={z: latent_z})
                samples1 = adjust_dynamic_range(samples1.transpose(0, 2, 3, 1),
                                                drange_in=[0, 1],
                                                drange_out=[-1, 1])
                imwrite(immerge(samples1[:36, :, :, :], 6, 6),
                        '%s/epoch_%d_sampling.png' % (args.log_dir, it))

                batch_images_test = sess.run(image_batch_test)
                batch_images_test = adjust_dynamic_range(
                    batch_images_test.astype(np.float32), [0, 255], [0., 1.])

                recon = sess.run(Reconstruction,
                                 feed_dict={real: batch_images_test})
                recon = adjust_dynamic_range(recon.transpose(0, 2, 3, 1),
                                             drange_in=[0, 1],
                                             drange_out=[-1, 1])
                imwrite(immerge(recon[:36, :, :, :], 6, 6),
                        '%s/epoch_%d_recon.png' % (args.log_dir, it))

                batch_images = adjust_dynamic_range(
                    batch_images_test.transpose(0, 2, 3, 1),
                    drange_in=[0, 1],
                    drange_out=[-1, 1])
                imwrite(immerge(batch_images[:36, :, :, :], 6, 6),
                        '%s/epoch_%d_orin.png' % (args.log_dir, it))

        if np.mod(it, 10000) == 0 and it > 50000:
            saver.save(sess, args.checkpoint_dir, global_step=it)
Exemplo n.º 27
0
def train():

    graph = tf.Graph()

    with graph.as_default():

        z = tf.placeholder(tf.float32, shape=[64, 100], name='z')

        img_batch = read_records.read_and_decode(
            'tf_records/cartoon.tfrecords', batch_size=batch_size)
        #generator
        # fake=models.generator(z, stddev=0.02, alpha=alpha, name='generator', reuse=False)
        #
        # #discriminator
        # dis_real=models.discriminator(img_batch , alpha=alpha, batch_size=batch_size)
        # dis_fake=models.discriminator(fake,  alpha=alpha, reuse=True)

        #generator
        fake = models.generator(z, reuse=False)  #, is_training=True

        #discriminator
        dis_real = models.discriminator(img_batch,
                                        reuse=False)  #is_training=True
        dis_fake = models.discriminator(fake, reuse=True)  #,  is_training=True

        # #losses
        # gene_loss = tf.reduce_mean(tf.squared_difference(dis_fake, 0.9))
        # dis_loss = (tf.reduce_mean(tf.squared_difference(dis_real, 0.9))
        #             + tf.reduce_mean(tf.square(dis_fake))) / 2

        gene_loss = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                labels=tf.ones_like(dis_fake) * 0.9, logits=dis_fake))
        d_f_loss = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                labels=tf.zeros_like(dis_fake), logits=dis_fake))
        d_r_loss = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                labels=tf.ones_like(dis_real) * 0.9, logits=dis_real))
        dis_loss = d_f_loss + d_r_loss

        gen_loss_sum = tf.summary.scalar("gen_loss", gene_loss)
        dis_loss_sum = tf.summary.scalar("dis_loss", dis_loss)
        merge_sum_gen = tf.summary.merge([gen_loss_sum])
        merge_sum_dis = tf.summary.merge([dis_loss_sum])

        #variables
        gene_var = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                     scope='generator')
        dis_var = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                    scope='discriminator')

        gene_opt = tf.train.AdamOptimizer(
            learning_rate=0.0002, beta1=0.3).minimize(gene_loss,
                                                      var_list=gene_var)
        dis_opt = tf.train.AdamOptimizer(learning_rate=0.0002,
                                         beta1=0.3).minimize(dis_loss,
                                                             var_list=dis_var)

        test_sample = models.generator(z, reuse=True)  #,  is_training=False
        test_out = tf.add(test_sample, 0, 'test_out')

        init = tf.global_variables_initializer()
    print('t')

    with tf.Session(graph=graph) as sess:
        sess.run(init)  # 初始化全局变量

        z_ipt_sample = np.random.normal(size=[batch_size, 100])
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)
        writer = tf.summary.FileWriter('./tensorboard', sess.graph)
        saver = tf.train.Saver()
        try:
            for i in range(run_nums):
                z_ipt = np.random.normal(size=[batch_size, 100])
                #train D
                #_, dis_loss1 = sess.run([dis_opt,dis_loss],feed_dict={real:img_batch,z:z_ipt})
                sum_dis, _, dis_loss1 = sess.run(
                    [merge_sum_dis, dis_opt, dis_loss], feed_dict={z: z_ipt})
                #train G
                sum_gen, _, gen_loss1 = sess.run(
                    [merge_sum_gen, gene_opt, gene_loss], feed_dict={z: z_ipt})

                if i % 400 == 0:
                    print(i)
                    test_sample_opt = sess.run(test_sample,
                                               feed_dict={z: z_ipt_sample})
                    #print(type(test_sample_opt),test_sample_opt.shape)
                    utils.mkdir('out_cartoon')
                    utils.imwrite(utils.immerge(test_sample_opt, 10, 10),
                                  'out_cartoon/' + str(i) + '.jpg')
                # writer.add_summary(sum_dis, i)
                #writer.add_summary(sum_gen, i)
            print("train end!!!")

        except tf.errors.OutOfRangeError:
            print('out of range')
        finally:
            coord.request_stop()

        coord.request_stop()
        coord.join(threads)
        writer.close()
        saver.save(sess, "./checkpoints/DCGAN")
Exemplo n.º 28
0
 all_luts = []
 for image_file in input_files:
     res_map = safe_decoder(text_detector, image_file, res_type)
     if (res_map is not None):
         file_bname = os.path.basename(image_file).rsplit('.')[0]
         lut = {'input_file': image_file}
         blut = from_res_map_to_bbox(res_map,
                                     th_size=th_size,
                                     th_prob=th_prob,
                                     border_perc=.16)
         lut.update(blut)
         # write response map if necessary
         if (not no_map):
             output_res_file = os.path.join(output_dir, file_bname + '.png')
             lut['response_map'] = output_res_file
             imwrite(
                 np.round(res_map * 255).astype('uint8'), output_res_file)
             verbose_print(['INFO: dump response map to', output_res_file],
                           verbose, 1)
         # write output detection json file if necessary
         if (not no_box):
             output_box_json = os.path.join(output_dir,
                                            file_bname + '.json')
             json.dump(lut, open(output_box_json, 'w'), indent=4)
             verbose_print(['INFO: dump text bbox to', output_box_json],
                           verbose, 1)
         all_luts.append(lut)
 if (no_box):
     output_mega_json = os.path.join(output_dir, 'decoded_bbox.json')
     json.dump(all_luts, open(output_mega_json, 'w'), indent=4)
     verbose_print(['INFO: dump mega text bbox to', output_mega_json],
                   verbose, 1)
Exemplo n.º 29
0
                                                     root_fcr, fig)
        blob['Dert']['M'] = ave - blob['Dert']['M']

        blob['dert__'], fga, root_fcr, fig = layer_2(blob['dert__'],
                                                     blob['Dert']['G'],
                                                     blob['Dert']['M'], fga,
                                                     root_fcr, fig)
        blob['Dert']['M'] = ave - blob['Dert']['M']

        blob['dert__'], fga, root_fcr, fig = layer_3(blob['dert__'],
                                                     blob['Dert']['G'],
                                                     blob['Dert']['M'], fga,
                                                     root_fcr, fig)

    frame = map_frame_binary(frame, sign_map={1: 'white', 0: 'black'})
    imwrite("./images/test_intra_comp.bmp", frame)
    print('Done!')
    '''
    # Recursive comps:
    recursive_comp(dert_=dert_,
                   rng=1,
                   fca=1,
                   nI=2,
                   Ave=ave,
                   fork_history="i",
                   depth=MAX_DEPTH)
    print('Done!')
    print('Terminating...')'''


def imwrite_fork(dert_, param, Ave, fork_history):
Exemplo n.º 30
0
            # save img and fft trajectories
            fft_dir = os.path.join(output_dir, 'fft' + hyp_str)
            if not os.path.exists(fft_dir):
                os.system('mkdir ' + fft_dir)

            traj_dir = os.path.join(output_dir, 'traj' + hyp_str)
            if not os.path.exists(traj_dir):
                os.system('mkdir ' + traj_dir)

            fft_var = [fft(t[:, :]) for t in traj1]
            for i in range(0, min(50, len(fft_var) * traj_iter), 2):
                cv2.imwrite(os.path.join(fft_dir,
                                         str(i) + '_fft.png'), 20 * fft_var[i])
                utils.imwrite(
                    os.path.join(traj_dir, str(i)) + '_img.png',
                    traj1[i, :, :])
                plt.cla()
                plt.clf()
                plt.close()

            plt.plot([np.sum(f) for f in fft_var])
            plt.savefig(os.path.join(output_dir, 'fft_var.png'),
                        bbox_inches='tight')
            plt.cla()
            plt.clf()
            plt.close()

        traj_iter = cfg.TRAJ_ITER
        for err_type in ['err_traj', 'err_true_t1', 'err_true_t2']:
            # compare across varying num channels