Ejemplo n.º 1
0
import data
#import model
#import utils
#import predict
import os
import json
import numpy as np
#import tensorflow as tf


with open(os.path.join(os.path.dirname(__file__), 'params.json')) as f:
    params = json.load(f)

train_set = data.TrainSet(params['train_set'], params['batch_size'], params['patch_size'])
validation_set = data.TestSet(params['validation_set'])
Ejemplo n.º 2
0
import data
import predict
import numpy as np
import tensorflow as tf

with tf.Session() as session:
    network = predict.load_model(session)

    for set_name in ['Set5', 'Set14', 'B100', 'Urban100']:
        for scaling_factor in [2, 3, 4]:
            dataset = data.TestSet(set_name, scaling_factors=[scaling_factor])
            predictions, psnr = predict.predict(dataset.images,
                                                session,
                                                network,
                                                targets=dataset.targets,
                                                border=scaling_factor)

            print('Dataset "%s", scaling factor = %d. Mean PSNR = %.2f.' %
                  (set_name, scaling_factor, np.mean(psnr)))
Ejemplo n.º 3
0
parser.add_argument('-stride', type=int, default=50)
parser.add_argument('-n_channels', type=int, default=3)
parser.add_argument('-learning_rate', type=float, default=0.01)
parser.add_argument('-learning_rate_decay', type=float, default=0.1)
parser.add_argument('-learning_rate_decay_step', type=int, default=15)
parser.add_argument('-momentum', type=float, default=0.9)
parser.add_argument('-inner_activation', type=str, default='tanh')
parser.add_argument('-outer_activation', type=str, default='sigmoid')
parser.add_argument('-tone_mapping', type=bool, default=True)
parser.add_argument('-discard_well_exposed', type=bool, default=True)

params = vars(parser.parse_args())

train_set = data.TrainingSet(params['batch_size'], params['patch_size'], params['region_size'], params['stride'],
                             params['n_channels'], discard_well_exposed=params['discard_well_exposed'])
validation_set = data.TestSet('ALL', params['n_channels'])

batches_per_epoch = int(np.ceil(train_set.length / train_set.batch_size))

inputs = tf.placeholder(tf.float32)
ground_truth = tf.placeholder(tf.float32)
psnr_t = tf.placeholder(tf.float32, shape=[])
psnr_l = tf.placeholder(tf.float32, shape=[])
global_step = tf.Variable(0, trainable=False, name='global_step')

network = model.Model(inputs, params['n_layers'], params['kernel_size'], params['n_filters'], params['n_channels'],
                      params['inner_activation'], params['outer_activation'])

if params['tone_mapping']:
    base_loss = tf.losses.mean_squared_error(tone_mapping_tf(network.outputs), tone_mapping_tf(ground_truth))
else: