Example #1
0
def train(args, model, sess, saver):
    
    if args.fine_tuning :
        saver.restore(sess, args.pre_trained_model)
        print("saved model is loaded for fine-tuning!")
        print("model path is %s"%(args.pre_trained_model))
        
    num_imgs = len(os.listdir(args.train_Sharp_path))
    
    merged = tf.summary.merge_all()
    train_writer = tf.summary.FileWriter('./logs',sess.graph)
#    if args.test_with_train:
#        f = open("valid_logs.txt", 'w')
    
    epoch = 1
    step = num_imgs // args.batch_size
    
    
    blur_imgs = util.image_loader(args.train_Blur_path, args.load_X, args.load_Y)
    sharp_imgs = util.image_loader(args.train_Sharp_path, args.load_X, args.load_Y)
    
    while epoch <= args.max_epoch:
        random_index = np.random.permutation(len(blur_imgs))
        for k in range(step):
            s_time = time.time()
            blur_batch, sharp_batch = util.batch_gen(blur_imgs, sharp_imgs, args.patch_size, args.batch_size, random_index, k, args.augmentation)
            Knoise = np.random.randn(args.batch_size,64)
            for t in range(args.critic_updates):
                _, D_loss = sess.run([model.D_train, model.D_loss], feed_dict = {model.blur : blur_batch, model.sharp : sharp_batch,model.Knoise:Knoise, model.epoch : epoch})
            
            if (k+1) % args.log_freq == 0:
                _, G_loss,gene_K,gene_img,reg_loss,D_loss,G_loss,gp_loss = sess.run([model.G_train, model.G_loss,model.gene_K,model.gene_img,model.reg_loss,model.D_loss,model.G_loss,model.gp_loss], feed_dict = {model.blur : blur_batch, model.sharp : sharp_batch,model.Knoise:Knoise, model.epoch : epoch})
                gene_K=util.normalized(gene_K)
                gene_img=util.normalized(gene_img)
                util.imshow(gene_K[0,:,:,0],cmap='gray')
                toshow = np.hstack((sharp_batch[0]/255.0,blur_batch[0]/255.0,gene_img[0]))
                util.imshow(toshow)
                print("training with %d epoch %d/%d batch, D_loss: %0.2f, gp_loss: %0.2f, G_loss: %0.2f, reg_loss: %0.2f "%(epoch,k+1,step,D_loss,gp_loss,G_loss,reg_loss))
            else:
                _, G_loss = sess.run([model.G_train, model.G_loss], feed_dict = {model.blur : blur_batch, model.sharp : sharp_batch,model.Knoise:Knoise, model.epoch : epoch})
            
            e_time = time.time()
        
#        if epoch % args.log_freq == 0:
        summary = sess.run(merged, feed_dict = {model.blur : blur_batch, model.sharp: sharp_batch,model.Knoise:Knoise, model.epoch : epoch})
        train_writer.add_summary(summary, epoch)
#            if args.test_with_train:
#                test(args, model, sess, saver, f, epoch, loading = False)
        print("%d training epoch completed" %(epoch))
        print("D_loss : %0.4f, \t G_loss : %0.4f"%(D_loss, G_loss))
        print("Elpased time : %0.4f"%(e_time - s_time))
        saver.save(sess, './model/DeblurrGAN', global_step = epoch, write_meta_graph = False)
        
        epoch += 1

    saver.save(sess, './model/DeblurrGAN_last', write_meta_graph = False)
Example #2
0
def test(args, model, sess, saver, step=-1, loading=False):

    if loading:
        saver.restore(sess, args.pre_trained_model)
        print("saved model is loaded for test!")
        print("model path is %s" % args.pre_trained_model)

    rad_img_name = sorted(os.listdir(args.test_radiance_path))
    inp_img_name = sorted(os.listdir(args.test_inp_path))
    focus_img_name = sorted(os.listdir(args.test_focus_path))

    input_imgs = util.image_loader_new(args.test_inp_path,
                                       args.test_focus_path, args.load_X,
                                       args.load_Y)
    rad_imgs = util.image_loader(args.test_radiance_path,
                                 args.load_X,
                                 args.load_Y,
                                 is_train=False)

    index = 1
    rad = np.expand_dims(rad_imgs[index], axis=0)
    inp = np.expand_dims(input_imgs[index], axis=0)
    output = sess.run(model.output,
                      feed_dict={
                          model.inp: inp,
                          model.radiance: rad
                      })
    if args.save_test_result:
        output = Image.fromarray(output[0])
        split_name = inp_img_name[index].split('.')
        output.save(
            os.path.join(
                args.result_path, 'epoch%d_%s_radiance.png' %
                (step, ''.join(map(str, split_name[:-1])))))
Example #3
0
def test_only(args, model, sess, saver):
    
    saver.restore(sess,args.pre_trained_model)
    print("saved model is loaded for test only!")
    print("model path is %s"%args.pre_trained_model)
    
    blur_img_name = sorted(os.listdir(args.test_Blur_path))

    if args.in_memory :
        
        blur_imgs = util.image_loader(args.test_Blur_path, args.load_X, args.load_Y, is_train = False)
        
        for i, ele in enumerate(blur_imgs):
            blur = np.expand_dims(ele, axis = 0)
            
            if args.chop_forward:
                output = util.recursive_forwarding(blur, args.chop_size, sess, model, args.chop_shave)
                output = Image.fromarray(output[0])
            
            else:
                output = sess.run(model.output, feed_dict = {model.blur : blur})
                output = Image.fromarray(output[0])
            
            split_name = blur_img_name[i].split('.')
            output.save(os.path.join(args.result_path, '%s_sharp.png'%(''.join(map(str, split_name[:-1])))))

    else:
        
        sess.run(model.data_loader.init_op['te_init'])

        for i in range(len(blur_img_name)):
            output = sess.run(model.output)
            output = Image.fromarray(output[0])
            split_name = blur_img_name[i].split('.')
            output.save(os.path.join(args.result_path, '%s_sharp.png'%(''.join(map(str, split_name[:-1])))))    
Example #4
0
def test_only(args, model, sess):

    loader = tf.train.Saver(max_to_keep=None)
    loader.restore(sess, args.pre_trained_model)

    print("saved model is loaded for test only!")
    print("model path is %s" % args.pre_trained_model)

    val_LR = sorted(os.listdir(args.test_LR_path))
    val_LR_imgs = util.image_loader(args.test_LR_path)

    if args.in_memory:

        for i, img_LR in enumerate(val_LR_imgs):

            batch_img_LR = np.expand_dims(img_LR, axis=0)

            if args.self_ensemble:
                output = util.self_ensemble(args,
                                            model,
                                            sess,
                                            batch_img_LR,
                                            is_recursive=args.chop_forward)

            else:
                if args.chop_forward:
                    output = util.recursive_forwarding(batch_img_LR,
                                                       args.scale,
                                                       args.chop_size, sess,
                                                       model, args.chop_shave)
                    output = output[0]
                else:
                    output = sess.run(model.output,
                                      feed_dict={model.LR: batch_img_LR})
                    output = output[0]

            im = Image.fromarray(output)
            split_name = val_LR[i].split(".")
            im.save(
                os.path.join(
                    args.result_path,
                    "%sX%d.%s" % (''.join(map(
                        str, split_name[:-1])), args.scale, split_name[-1])))

    else:

        sess.run(model.data_loader.init_op['val_init'])

        for i in range(len(val_LR)):
            output = sess.run([model.output])
            output = output[0]

            im = Image.fromarray(output)
            split_name = val_LR[i].split(".")
            im.save(
                os.path.join(
                    args.result_path,
                    "%sX%d.%s" % (''.join(map(
                        str, split_name[:-1])), args.scale, split_name[-1])))
Example #5
0
def test(args, model, sess, saver, file, step = -1, loading = False):
        
    if loading:
        saver.restore(sess, args.pre_trained_model)
        print("saved model is loaded for test!")
        print("model path is %s"%args.pre_trained_model)
        
    blur_img_name = sorted(os.listdir(args.test_Blur_path))
    sharp_img_name = sorted(os.listdir(args.test_Sharp_path))
    
    PSNR_list = []
    ssim_list = []
    

    
    blur_imgs = util.image_loader(args.test_Blur_path, args.load_X, args.load_Y, is_train = False)
    sharp_imgs = util.image_loader(args.test_Sharp_path, args.load_X, args.load_Y, is_train = False)
    
    for i, ele in enumerate(blur_imgs):
        blur = np.expand_dims(ele, axis = 0)
        sharp = np.expand_dims(sharp_imgs[i], axis = 0)
        output, psnr, ssim = sess.run([model.output, model.PSNR, model.ssim], feed_dict = {model.blur : blur, model.sharp : sharp})
        if args.save_test_result:
            output = Image.fromarray(output[0])
            split_name = blur_img_name[i].split('.')
            output.save(os.path.join(args.result_path, '%s_sharp.png'%(''.join(map(str, split_name[:-1])))))

        PSNR_list.append(psnr)
        ssim_list.append(ssim)

            
    length = len(PSNR_list)
    
    mean_PSNR = sum(PSNR_list) / length
    mean_ssim = sum(ssim_list) / length
    
    if step == -1:
        file.write('PSNR : 0.4f SSIM : %0.4f'%(mean_PSNR, mean_ssim))
        file.close()
        
    else :
        file.write("%d-epoch step PSNR : %0.4f SSIM : %0.4f \n"%(step, mean_PSNR, mean_ssim))
Example #6
0
def test(args, model, sess, saver, step=-1, loading=False):

    if loading:
        saver.restore(sess, args.pre_trained_model)
        print("saved model is loaded for test!")
        print("model path is %s" % args.pre_trained_model)

    out_img_name = sorted(os.listdir(args.test_out_path))
    inp_img_name = sorted(os.listdir(args.test_inp_path))
    radiance_img_name = sorted(os.listdir(args.test_radiance_path))

    input_imgs = util.image_loader_new(args.test_inp_path,
                                       args.test_radiance_path, args.load_X,
                                       args.load_Y)
    out_imgs = util.image_loader(args.test_out_path,
                                 args.load_X,
                                 args.load_Y,
                                 is_train=False)

    #refocus parameter delta - one hot vector of size 2
    delta = np.zeros(args.z_dim)
    delta = np.expand_dims(delta, axis=0)

    index = 1
    out = np.expand_dims(out_imgs[index], axis=0)
    inp = np.expand_dims(input_imgs[index], axis=0)

    output = sess.run(model.output,
                      feed_dict={
                          model.inp: inp,
                          model.out: out,
                          model.delta: delta
                      })
    if args.save_test_result:
        output = Image.fromarray(output[0])
        split_name = inp_img_name[index].split('.')
        output.save(
            os.path.join(
                args.result_path, 'epoch%d_%s_out.png' %
                (step, ''.join(map(str, split_name[:-1])))))
Example #7
0
def train(args, model, sess, saver):

    if args.fine_tuning:
        saver.restore(sess, args.pre_trained_model)
        print("saved model is loaded for fine-tuning!")
        print("model path is %s" % (args.pre_trained_model))

    num_imgs = len(os.listdir(args.train_inp_path))

    merged = tf.summary.merge_all()
    train_writer = tf.summary.FileWriter('./logs', sess.graph)
    if args.test_with_train:
        f = open("valid_logs.txt", 'w')

    epoch = 0
    step = num_imgs // args.batch_size
    """
    input_imgs - Dimensions(BATCH_SIZE*256*256*4) --> concatenated RGB and FocusMeasure images along channel axis
    rad_imgs  - Dimensions(BATCH_SIZE*256*256*3) --> Label images
    """
    input_imgs = util.image_loader_new(args.train_inp_path,
                                       args.train_focus_path, args.load_X,
                                       args.load_Y)
    rad_imgs = util.image_loader(args.train_radiance_path, args.load_X,
                                 args.load_Y)  #output

    while epoch < args.max_epoch:
        random_index = np.random.permutation(len(input_imgs))
        s_time = time.time()
        for k in range(step):
            """
                utils.batch_gen function generates a batch of size args.batch_size that are fed to the network.
            """
            rad_batch, inp_batch = util.batch_gen(rad_imgs, input_imgs,
                                                  args.patch_size,
                                                  args.batch_size,
                                                  random_index, k)

            for t in range(args.critic_updates):
                _, D_loss = sess.run(
                    [model.D_train, model.D_loss],
                    feed_dict={
                        model.inp: inp_batch,
                        model.radiance: rad_batch,
                        model.epoch: epoch
                    })

            _, G_loss = sess.run(
                [model.G_train, model.G_loss],
                feed_dict={
                    model.inp: inp_batch,
                    model.radiance: rad_batch,
                    model.epoch: epoch
                })

        e_time = time.time()

        if epoch % args.log_freq == 0:
            summary = sess.run(merged,
                               feed_dict={
                                   model.inp: inp_batch,
                                   model.radiance: rad_batch
                               })
            train_writer.add_summary(summary, epoch)
            """
                Testing the model while training for knowing how the model is working after each iteration
            """
            if args.test_with_train:
                test(args, model, sess, saver, f, epoch, loading=False)
            print("%d training epoch completed" % epoch)
            print("D_loss : %0.4f, \t G_loss : %0.4f" % (D_loss, G_loss))
            print("Elpased time : %0.4f" % (e_time - s_time))
        """
            Saving the model every args.model_save_freq epochs into model directory
        """
        if ((epoch) % args.model_save_freq == 0):
            saver.save(sess, './model/DeblurrGAN', global_step=epoch)

        epoch += 1
    """
        Saving the model obtained after the last iteration
    """
    saver.save(sess, './model/DeblurrGAN_last')
Example #8
0
def train(args, model, sess, saver):
    if args.fine_tuning:
        saver.restore(sess, args.pre_trained_model)
        print("saved model is loaded for fine-tuning!")
        print("model path is %s" % (args.pre_trained_model))

    num_imgs = len(os.listdir(args.train_Sharp_path))

    merged = tf.summary.merge_all()
    train_writer = tf.summary.FileWriter('./logs', sess.graph)
    if args.test_with_train:
        f = open("valid_logs.txt", 'w')

    epoch = 0
    step = num_imgs // args.batch_size

    if args.in_memory:
        blur_imgs = util.image_loader(args.train_Blur_path, args.load_X,
                                      args.load_Y)
        sharp_imgs = util.image_loader(args.train_Sharp_path, args.load_X,
                                       args.load_Y)
        # lr_hr_ds, n_data = data.load_train_dataset(args.train_Sharp_path, args.train_Blur_path, args.ext, args.batch_size)
        while epoch < args.max_epoch:
            random_index = np.random.permutation(len(blur_imgs))
            #  next_element = lr_hr_ds.get_next()
            for k in range(step):
                s_time = time.time()
                # blur_batch, sharp_batch = sess.run(next_element)
                blur_batch, sharp_batch = util.batch_gen(
                    blur_imgs, sharp_imgs, args.patch_size, args.batch_size,
                    random_index, k, args.augmentation)

                for t in range(args.critic_updates):
                    _, D_loss = sess.run(
                        [model.D_train, model.D_loss],
                        feed_dict={
                            model.blur: blur_batch,
                            model.sharp: sharp_batch,
                            model.epoch: epoch
                        })

                _, G_loss = sess.run(
                    [model.G_train, model.G_loss],
                    feed_dict={
                        model.blur: blur_batch,
                        model.sharp: sharp_batch,
                        model.epoch: epoch
                    })

                e_time = time.time()

            if epoch % args.log_freq == 0:
                summary = sess.run(merged,
                                   feed_dict={
                                       model.blur: blur_batch,
                                       model.sharp: sharp_batch
                                   })
                train_writer.add_summary(summary, epoch)
                if args.test_with_train:
                    test(args, model, sess, saver, f, epoch, loading=False)
                print("%d training epoch completed" % epoch)
                print("D_loss : %0.4f, \t G_loss : %0.4f" % (D_loss, G_loss))
                print("Elpased time : %0.4f" % (e_time - s_time))
            if ((epoch) % args.model_save_freq == 0):
                saver.save(sess,
                           './model/DeblurrGAN',
                           global_step=epoch,
                           write_meta_graph=False)

            epoch += 1

        saver.save(sess, './model/DeblurrGAN_last', write_meta_graph=False)

    else:
        while epoch < args.max_epoch:

            sess.run(model.data_loader.init_op['tr_init'])

            for k in range(step):
                s_time = time.time()

                for t in range(args.critic_updates):
                    _, D_loss = sess.run([model.D_train, model.D_loss],
                                         feed_dict={model.epoch: epoch})

                _, G_loss = sess.run([model.G_train, model.G_loss],
                                     feed_dict={model.epoch: epoch})

                e_time = time.time()

            if epoch % args.log_freq == 0:
                summary = sess.run(merged)
                train_writer.add_summary(summary, epoch)
                if args.test_with_train:
                    test(args, model, sess, saver, f, epoch, loading=False)
                print("%d training epoch completed" % epoch)
                print("D_loss : %0.4f, \t G_loss : %0.4f" % (D_loss, G_loss))
                print("Elpased time : %0.4f" % (e_time - s_time))
            if ((epoch) % args.model_save_freq == 0):
                saver.save(sess,
                           './model/DeblurrGAN',
                           global_step=epoch,
                           write_meta_graph=False)

            epoch += 1

        saver.save(sess,
                   './model/DeblurrGAN_last',
                   global_step=epoch,
                   write_meta_graph=False)

    if args.test_with_train:
        f.close()
Example #9
0
def test(args, model, sess, saver, step = -1, loading = False):
        
    if loading:
        saver.restore(sess, args.pre_trained_model)
        print("saved model is loaded for test!")
        print("model path is %s"%args.pre_trained_model)
        
    blur_img_name = sorted(os.listdir(args.test_Blur_path))
    sharp_img_name = sorted(os.listdir(args.test_Sharp_path))
    focus_img_name = sorted(os.listdir(args.test_focus_path))
    
    if args.in_memory :
        
    input_imgs = util.image_loader_new(args.test_Sharp_path, args.test_focus_path, args.load_X, args.load_Y)
    blur_imgs = util.image_loader(args.test_Blur_path, args.load_X, args.load_Y, is_train = False)
    
    index = 0
    input_z = np.zeros(args.z_dim)
    input_z[0] = 3
    input_z = np.expand_dims(input_z, axis = 0)
    
    blur = np.expand_dims(blur_imgs[index], axis = 0)
    inp = np.expand_dims(input_imgs[index], axis = 0)
   
    output = sess.run(model.output, feed_dict = {model.inp : inp, model.blur : blur, model.input_z : input_z})
    if args.save_test_result:
        output = Image.fromarray(output[0])
        split_name = blur_img_name[index].split('.')
        output.save(os.path.join(args.result_path, 'epoch%d_%s_blur.png'%(step,''.join(map(str, split_name[:-1])))))


"""
    test_only function is used to test the model on test data after training
    Parameters:
        args  - arguments defined in the main
        model - model defined in the build_graph
    saver restores the saved model from args.pre_trained_model directory
"""            
def test_only(args, model, sess, saver):
    
    saver.restore(sess,args.pre_trained_model)
    graph = sess.graph
    print("saved model is loaded for test only!")
    print("model path is %s"%args.pre_trained_model)
    
    blur_img_name = sorted(os.listdir(args.test_Blur_path))

        
    input_imgs = util.image_loader_new(args.test_Sharp_path, args.test_focus_path, args.load_X, args.load_Y)
    input_z = np.zeros(args.z_dim)
    input_z[0] = 3
    input_z = np.expand_dims(input_z, axis = 0)
    for i, ele in enumerate(input_imgs):
        inp = np.expand_dims(ele, axis = 0)
        
        if args.chop_forward:
            output = util.recursive_forwarding(inp, args.chop_size, sess, model, args.chop_shave)
            output = Image.fromarray(output[0])
        
        else:
            output = sess.run(model.output, feed_dict = {model.inp : inp, model.input_z : input_z})
            output = Image.fromarray(output[0])
        
        split_name = blur_img_name[i].split('.')
        output.save(os.path.join(args.result_path, '%s_sharp.png'%(''.join(map(str, split_name[:-1])))))
Example #10
0
def test(args, model, sess):

    loader = tf.train.Saver(max_to_keep=None)

    loader.restore(sess, args.pre_trained_model)
    print("saved model is loaded for test!")
    print("model path is %s" % args.pre_trained_model)

    val_LR = sorted(os.listdir(args.test_LR_path))
    val_HR = sorted(os.listdir(args.test_GT_path))

    val_LR_imgs = util.image_loader(args.test_LR_path)
    val_GT_imgs = util.image_loader(args.test_GT_path)

    Y_PSNR_list = []
    Y_SSIM_list = []

    file = open('./ARDN_X%d_%s_result.txt' % (args.scale, args.test_set), 'w')

    if args.in_memory:

        for i, img_LR in enumerate(val_LR_imgs):

            batch_img_LR = np.expand_dims(img_LR, axis=0)
            img_HR = val_GT_imgs[i]

            if args.self_ensemble:
                output = util.self_ensemble(args,
                                            model,
                                            sess,
                                            batch_img_LR,
                                            is_recursive=args.chop_forward)

            else:
                if args.chop_forward:
                    output = util.recursive_forwarding(batch_img_LR,
                                                       args.scale,
                                                       args.chop_size, sess,
                                                       model, args.chop_shave)
                    output = output[0]
                else:
                    output = sess.run(model.output,
                                      feed_dict={model.LR: batch_img_LR})
                    output = output[0]

            h, w, c = output.shape
            val_gt = img_HR[:h, :w]

            y_psnr, y_ssim = util.compare_measure(val_gt, output, args)

            Y_PSNR_list.append(y_psnr)
            Y_SSIM_list.append(y_ssim)
            file.write('file name : %s PSNR : %04f SSIM : %04f \n' %
                       (val_LR[i], y_psnr, y_ssim))

            if args.save_test_result:
                im = Image.fromarray(output)
                split_name = val_LR[i].split(".")
                im.save(
                    os.path.join(
                        args.result_path,
                        "%sX%d.%s" % (''.join(map(str, split_name[:-1])),
                                      args.scale, split_name[-1])))

    else:

        sess.run(model.data_loader.init_op['val_init'])

        for i in range(len(val_LR)):

            output, val_gt = sess.run([model.output, model.label])
            output = output[0]
            val_gt = val_gt[0]
            h, w, c = output.shape
            val_gt = val_gt[:h, :w]
            val_gt = val_gt.astype(np.uint8)

            y_psnr, y_ssim = util.compare_measure(val_gt, output, args)

            Y_PSNR_list.append(y_psnr)
            Y_SSIM_list.append(y_ssim)
            file.write('file name : %s PSNR : %04f SSIM : %04f \n' %
                       (val_LR[i], y_psnr, y_ssim))

            if args.save_test_result:
                im = Image.fromarray(output)
                split_name = val_LR[i].split(".")
                im.save(
                    os.path.join(
                        args.result_path,
                        "%sX%d.%s" % (''.join(map(str, split_name[:-1])),
                                      args.scale, split_name[-1])))

    length = len(val_LR)
    mean_Y_PSNR = sum(Y_PSNR_list) / length
    mean_SSIM = sum(Y_SSIM_list) / length

    file.write("Y_PSNR : %0.4f SSIM : %0.4f \n" % (mean_Y_PSNR, mean_SSIM))
    file.close()
    print('############Test completed!############')
Example #11
0
def train(args, model, sess):
    '''
    If you want to fine-tuning from pre-trained model,
    You should --fine_tuning option to True and --pre_trained_model option to the pre-trained model path
    '''

    if args.fine_tuning:  # load pre-trained model

        if args.load_tail_part:
            variables_to_restore = [var for var in tf.global_variables()]
        else:
            variables_to_restore = [
                var for var in tf.global_variables()
                if 'up_sample' not in var.name and 'conv_rec' not in var.name
                and 'learning_step' not in var.name
            ]

        loader = tf.train.Saver(variables_to_restore)
        loader.restore(sess, args.pre_trained_model)

        print("saved model is loaded for fine-tuning!")
        if not args.load_tail_part:
            print("Tail part is not loaded!")
        print("model path is %s" % (args.pre_trained_model))

    num_imgs = len(os.listdir(args.train_GT_path))  # 文件总数

    merged = tf.summary.merge_all()
    train_writer = tf.summary.FileWriter('./logs', sess.graph)
    if args.test_with_train:
        f = open("ARDN_X%d_train_log.txt" % (args.scale), 'w')
        model_config = 'scale : %d \t n_feats : %d \t n_ARDG : %d \t n_ARDB : %d \n' % (
            args.scale, args.n_feats, args.n_ARDG, args.n_ARDB)
        f.write(model_config)

    count = sess.run(model.global_step)
    step = num_imgs // args.batch_size
    saver = tf.train.Saver(max_to_keep=None)
    '''
    If your train data is small enough for fitting in main memory,
    It is better to set --in_memory option to True
    
    '''
    if args.in_memory:

        lr_imgs = util.image_loader(args.train_LR_path)
        gt_imgs = util.image_loader(args.train_GT_path)

        if args.test_with_train:
            val_lr_imgs = util.image_loader(args.test_LR_path)
            val_gt_imgs = util.image_loader(args.test_GT_path)

        while count < args.max_step:
            random_index = np.random.permutation(len(lr_imgs))
            for k in range(step):
                s_time = time.time()
                lr_batch, gt_batch = util.batch_gen(lr_imgs, gt_imgs,
                                                    args.patch_size,
                                                    args.scale,
                                                    args.batch_size,
                                                    random_index, k)
                _, losses = sess.run([model.train, model.loss],
                                     feed_dict={
                                         model.LR: lr_batch,
                                         model.GT: gt_batch
                                     })
                count = sess.run(model.global_step)

                e_time = time.time()

                print(
                    "%d training step completed, Loss : %0.4f,  Elpased time : %0.4f。"
                    % (count, losses, (e_time - s_time)))

                if count % args.log_freq == 0:
                    summary = sess.run(merged,
                                       feed_dict={
                                           model.LR: lr_batch,
                                           model.GT: gt_batch
                                       })
                    train_writer.add_summary(summary, count)

                    if args.test_with_train:
                        util.train_with_test(args, model, sess, saver, f,
                                             count, val_lr_imgs, val_gt_imgs)
                        f.close()
                        f = open("ARDN_X%d_train_log.txt" % (args.scale), 'a')

                    # print("%d training step completed" % count)
                    # print("Loss : %0.4f"%losses)
                    # print("Elpased time : %0.4f"%(e_time - s_time))

                if ((count) % args.model_save_freq == 0):
                    saver.save(sess,
                               os.path.join(
                                   args.model_path, 'ARDN_X%d_%d_%d_%d' %
                                   (args.scale, args.n_feats, args.n_ARDG,
                                    args.n_ARDB)),
                               global_step=count,
                               write_meta_graph=False)

        saver.save(sess,
                   os.path.join(
                       args.model_path, 'ARDN_X%d_%d_%d_%d_last' %
                       (args.scale, args.n_feats, args.n_ARDG, args.n_ARDB)),
                   global_step=count,
                   write_meta_graph=False)

    else:

        while count < args.max_step:
            sess.run(model.data_loader.init_op['tr_init'])

            for k in range(step):
                s_time = time.time()
                _ = sess.run([model.train],
                             feed_dict={model.global_step: count})
                e_time = time.time()
                count += 1
                if count % args.log_freq == 0:
                    summary, loss = sess.run([merged, model.loss])
                    train_writer.add_summary(summary, count)

                    if args.test_with_train:
                        util.train_with_test(args, model, sess, saver, f,
                                             count, None, None)
                        f.close()
                        f = open("ARDN_X%d_train_log.txt" % (args.scale), 'a')
                        sess.run(model.data_loader.init_op['tr_init'])

                    print("%d training step completed" % count)
                    print("Loss : %0.4f" % loss)
                    print("Elpased time : %0.4f" % (e_time - s_time))

                if ((count) % args.model_save_freq == 0):
                    saver.save(sess,
                               os.path.join(
                                   args.model_path, 'ARDN_X%d_%d_%d_%d' %
                                   (args.scale, args.n_feats, args.n_ARDG,
                                    args.n_ARDB)),
                               global_step=count,
                               write_meta_graph=False)

        saver.save(sess,
                   os.path.join(
                       args.model_path, 'ARDN_X%d_%d_%d_%d_last' %
                       (args.scale, args.n_feats, args.n_ARDG, args.n_ARDB)),
                   global_step=count,
                   write_meta_graph=False)

    if args.test_with_train:
        f.close()
Example #12
0
def test(args, model, sess, saver, file, step=-1, loading=False):
    if loading:

        import re
        print(" [*] Reading checkpoints...")
        ckpt = tf.train.get_checkpoint_state(args.pre_trained_model)
        if ckpt and ckpt.model_checkpoint_path:
            ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
            saver.restore(sess, os.path.join(args.pre_trained_model,
                                             ckpt_name))
            print(" [*] Success to read {}".format(ckpt_name))
        else:
            print(" [*] Failed to find a checkpoint")

    blur_img_name = sorted(os.listdir(args.test_Blur_path))
    sharp_img_name = sorted(os.listdir(args.test_Sharp_path))

    PSNR_list = []
    ssim_list = []

    blur_imgs = util.image_loader(args.test_Blur_path,
                                  args.load_X,
                                  args.load_Y,
                                  is_train=False)
    sharp_imgs = util.image_loader(args.test_Sharp_path,
                                   args.load_X,
                                   args.load_Y,
                                   is_train=False)

    if not os.path.exists('./result/'):
        os.makedirs('./result/')

    for i, ele in enumerate(blur_imgs):
        blur = np.expand_dims(ele, axis=0)
        sharp = np.expand_dims(sharp_imgs[i], axis=0)
        output, psnr, ssim = sess.run([model.output, model.PSNR, model.ssim],
                                      feed_dict={
                                          model.blur: blur,
                                          model.sharp: sharp
                                      })
        if args.save_test_result:
            output = Image.fromarray(output[0])
            split_name = blur_img_name[i].split('.')
            output.save(
                os.path.join(
                    args.result_path,
                    '%s_sharp.png' % (''.join(map(str, split_name[:-1])))))

        PSNR_list.append(psnr)
        ssim_list.append(ssim)

    length = len(PSNR_list)

    mean_PSNR = sum(PSNR_list) / length
    mean_ssim = sum(ssim_list) / length

    if step == -1:
        file.write('PSNR : {} SSIM : {}'.format(mean_PSNR, mean_ssim))
        file.close()

    else:
        file.write("{}d-epoch step PSNR : {} SSIM : {} \n".format(
            step, mean_PSNR, mean_ssim))
Example #13
0
style_layers = [
    'conv_3', 'conv_4', 'conv_5', 'conv_6', 'conv_7', 'conv_8', 'conv_9',
    'conv_10', 'conv_11', 'conv_12'
]
content_layers = ['conv_5']

if not os.path.isdir(out_path):
    os.mkdir(out_path)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# tamaño de la imagen, si no hay gpu tiene que ser pequeñita para no tirarse años
image_size = 512 if torch.cuda.is_available() else 128

content_image = image_loader(content_path, image_size).to(device, torch.float)
style_image = image_loader(style_path, image_size).to(device, torch.float)

input_image = content_image.clone()


def run_style_transfer(content_image,
                       style_image,
                       input_image,
                       num_steps=300,
                       style_weight=1000000,
                       content_weight=1,
                       path=""):

    vgg_model, style_losses, content_losses = get_style_model_and_losses(
        style_image,