Example #1
0
    catesr_train = CATESR(subset='train',
                          images_dir='/home/ec2-user/gans/data/images_rgb',
                          caches_dir='/home/ec2-user/gans/data/caches_rgb')
    catesr_valid = CATESR(subset='valid',
                          images_dir='/home/ec2-user/gans/data/images_rgb',
                          caches_dir='/home/ec2-user/gans/data/caches_rgb')

    train_ds = catesr_train.dataset(batch_size=16,
                                    random_transform=True,
                                    shuffle_buffer_size=500)
    valid_ds = catesr_valid.dataset(batch_size=1,
                                    random_transform=False,
                                    repeat_count=1)

    generator_model = wdsr_b(scale=scale, num_res_blocks=depth)
    generator_model.load_weights(
        os.path.join(weights_dir,
                     'pretrained_weights-wdsr-b-32-x4-fine-tuned.h5'))

    trainer = WdsrTrainer(model=generator_model,
                          checkpoint_dir=f'.ckpt/wdsr-b-{depth}-x{scale}')

    # Train WDSR B model for 300,000 steps. Save a checkpoint only if evaluation PSNR has improved.
    trainer.train(train_ds,
                  valid_ds.take(100),
                  steps=300000,
                  evaluate_every=1000,
                  save_best_only=True)

    # Restore from checkpoint with highest PSNR
Example #2
0
def Use_WDSR():
    model = wdsr_b(scale=4, num_res_blocks=32)
    model.load_weights('weights/wdsr-b-32-x4/weights.h5')
    lr = Load_Image(filename)
    sr = resolve_single(model, lr)
    Save_Image(sr)
Example #3
0
from tensorflow import keras
from model.wdsr import wdsr_b

input_path = './'
weight_file = 'weights-wdsr-b-32-x4-fine-tuned.h5'
weight_file_path = osp.join(input_path,weight_file)
output_graph_name = weight_file[:-3] + '.pb'

def h5_to_pb(h5_model,output_dir,model_name,out_prefix = "output_",log_tensorboard = True):
    if osp.exists(output_dir) == False:
        os.mkdir(output_dir)
    out_nodes = []
    for i in range(len(h5_model.outputs)):
        out_nodes.append(out_prefix + str(i + 1))
        tf.identity(h5_model.output[i],out_prefix + str(i + 1))
    sess = K.get_session()
    from tensorflow.python.framework import graph_util,graph_io
    init_graph = sess.graph.as_graph_def()
    main_graph = graph_util.convert_variables_to_constants(sess,init_graph,out_nodes)
    graph_io.write_graph(main_graph,output_dir,name = model_name,as_text = False)
    if log_tensorboard:
        from tensorflow.python.tools import import_pb_to_tensorboard
        import_pb_to_tensorboard.import_to_tensorboard(osp.join(output_dir,model_name),output_dir)

output_dir = osp.join(os.getcwd(),"trans_model")

h5_model = wdsr_b(scale=4, num_res_blocks=32)
h5_model.load_weights("./weights-wdsr-b-32-x4-fine-tuned.h5")
#h5_model = load_model(weight_file_path)
h5_to_pb(h5_model,output_dir = output_dir,model_name = output_graph_name)
print('model saved')
Example #4
0
def func(fn_img, fn_model, psf=None, 
         fn_img_hr=None, suptitle=None, 
         fnfigout='test.pdf', vm=75, nbit=8):

    if fn_img.endswith('npy'):
        datalr = np.load(fn_img)[:, :]
    elif fn_img.endswith('png'):
      try:
          datalr = load_image(fn_img)
      except:
          datalr = load_image('demo/0851x4-crop.png')
    else:
      print('Do not recognize input image file type, exiting')
      exit()

#    datalr = hr2lr.normalize_data(datalr, nbit=nbit)

    if fn_img_hr!=None:
        if fn_img_hr.endswith('.npy'):
            datahr = np.load(fn_img_hr)
        elif fn_img_hr.endswith('png'):
            datahr = load_image(fn_img_hr)
    else:
        datahr = None

    if psf is not None:
        if datahr is None:
          pass
        print("Convolving data")
        if psf in ('gaussian','Gaussian'):
          kernel1D = signal.gaussian(8, std=1).reshape(8, 1)
          kernel = np.outer(kernel1D, kernel1D)
        elif psf.endswith('.npy'):
          kernel = np.load(psf)
          nkern = len(kernel)
          kernel = kernel[nkern//2-256:nkern//2+256, nkern//2-256:nkern//2+256]
        else:
          print("Can't interpret kernel")
          exit()
        plt.figure()
        plt.subplot(121)
        plt.imshow(datahr, vmax=25, vmin=5)
        datalr = hr2lr.convolvehr(datahr, kernel, rebin=1)
        datahr = hr2lr.normalize_data(datahr, nbit=nbit)
        plt.subplot(122)
        print(datalr.shape)
        datalr = hr2lr.normalize_data(datalr, nbit=nbit)
        plt.imshow(datalr, vmax=50, vmin=20)
        plt.show()
    else:
        print("Assuming data is already convolved")

    model = wdsr_b(scale=4, num_res_blocks=32)
    model.load_weights(fn_model)
    datalr = datalr[:,:,None]
    if nbit==8:
      datasr = resolve_single(model, datalr)
    else:
      datasr = resolve_single16(model, datalr)
    plotter(datalr, datasr, datahr=datahr, 
            suptitle=suptitle, fnfigout=fnfigout, vm=vm, 
            nbit=nbit)
    # Convert PNG buffer to TF image
    image = tf.image.decode_png(buf.getvalue(), channels=4)
    # Add the batch dimension
    image = tf.expand_dims(image, 0)

    with tb_file_writer.as_default():
        tf.summary.image(title, image, step=0)


if os.path.exists('saved_model') and LOAD_SAVED_MODEL:
    print("Loaded previously saved model")
    our_model = tf.keras.models.load_model('saved_model',
                                           custom_objects={'psnr': psnr})

else:
    our_model = wdsr.wdsr_b(scale=scale, num_res_blocks=depth)
    our_model.compile(
        optimizer=get_optimizer(),
        loss='mae',
        metrics=[psnr],
    )

resolve_and_tensorboard_plot(
    our_model,
    ['demo/0869x4-crop.png', 'demo/0829x4-crop.png', 'demo/0851x4-crop.png'],
    "Start")

tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)

initial_epoch = our_model.optimizer.iterations.numpy() // STEPS_PER_EPOCH
print("Starting on initial epoch: {0}".format(initial_epoch))