Exemplo n.º 1
0
 def build(self):
     H = tf.placeholder(tf.float32,
                        shape=[None, 1, None, None, 3],
                        name='H_truth')
     L = tf.placeholder(tf.float32,
                        shape=[None, self.num_frames, None, None, 3],
                        name='L_input')
     is_train = tf.placeholder(tf.bool, shape=[])  # Phase ,scalar
     SR = self.forward(L, is_train)
     loss = Huber(SR, H, 0.01)  #tf.reduce_mean(tf.sqrt((SR-H)**2+1e-6))
     eval_mse = tf.reduce_mean(
         (SR - H)**2,
         axis=[2, 3, 4])  #[:,self.num_frames//2:self.num_frames//2+1]
     self.loss, self.eval_mse = loss, eval_mse
     self.L, self.H, self.SR, self.is_train = L, H, SR, is_train
Exemplo n.º 2
0
    x_c += [t]
x = tf.concat(
    x_c, axis=3
)  # [B,H*R,W*R,3] Tensor("concat_9:0", shape=(?, ?, ?, 3), dtype=float32)

x = tf.expand_dims(
    x,
    axis=1)  # Tensor("ExpandDims_3:0", shape=(?, 1, ?, ?, 3), dtype=float32)
Rx = depth_to_space_3D(
    Rx, R
)  # [B,1,H*R,W*R,3] Tensor("Reshape_6:0", shape=(?, ?, ?, ?, ?), dtype=float32)
x += Rx  # Tensor("add_18:0", shape=(?, ?, ?, ?, 3), dtype=float32)

out_H = tf.clip_by_value(x, 0, 1, name='out_H')

cost = Huber(y_true=H_out_true, y_pred=out_H, delta=0.01)

learning_rate = 0.001
learning_rate = tf.Variable(float(learning_rate),
                            trainable=False,
                            dtype=tf.float32,
                            name='learning_rate')
learning_rate_decay_op = learning_rate.assign(learning_rate * 0.9)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)

# total train epochs
num_epochs = 100

# Session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
Exemplo n.º 3
0
                node.op = 'Switch'
            elif node.op == 'AssignSub':
                node.op = 'Sub'
                if 'use_locking' in node.attr: del node.attr['use_locking']
            elif node.op == 'AssignAdd':
                node.op = 'Add'
                if 'use_locking' in node.attr: del node.attr['use_locking']
        _ = tf.import_graph_def(output_graph_def, name="")

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        input = sess.graph.get_tensor_by_name("L_in:0")
        output = sess.graph.get_tensor_by_name("out_H:0")
        is_train = sess.graph.get_tensor_by_name('is_train:0')

        total_loss = 0
        for j in range(x_data.shape[0]):
            in_L = x_data_padded[j:j + T_in]  # select T_in frames
            in_L = in_L[np.newaxis, :, :, :, :]
            y_out = sess.run(output, feed_dict={input: in_L, is_train: False})
            Image.fromarray(np.around(y_out[0, 0] * 255).astype(
                np.uint8)).save('./result_test/{:05}.png'.format(j))

            cost = Huber(y_true=y_data[j], y_pred=y_out, delta=0.01)
            loss = sess.run(cost)
            total_loss = total_loss + loss
            print('this single test cost: {:.7f}'.format(loss))

        avg_test_loss = total_loss / x_data.shape[0]
        print("avg test cost: {:.7f}".format(avg_test_loss))
Exemplo n.º 4
0
    batch_L_Matlab = DownSample2DMatlab(batch_H, 1 / float(UPSCALE))
    batch_L_Matlab = torch.clamp(batch_L_Matlab, 0, 1)

    batch_H = batch_H[:, :, UPSCALE * 2:-UPSCALE * 2, UPSCALE * 2:-UPSCALE * 2]
    batch_L_Matlab = batch_L_Matlab[:, :, 2:-2, 2:-2]
    dT += time.time() - st

    st = time.time()
    opt_G.zero_grad()
    opt_D.zero_grad()

    batch_S = model_G(batch_L_Matlab)

    # Pixel loss
    loss_Pixel = Huber(batch_S, batch_H)
    loss_G = loss_Pixel

    if i > NB_ITER_MSE:
        # LPIPS loss
        loss_LPIPS, _ = model_LPIPS.forward_pair(batch_H * 2 - 1,
                                                 batch_S * 2 - 1)
        loss_LPIPS = torch.mean(loss_LPIPS) * L_LPIPS

        # FM and GAN losses
        e_S, d_S, e_Ss, d_Ss = model_D(batch_S)
        _, _, e_Hs, d_Hs = model_D(batch_H)

        # FM loss
        loss_FMs = []
        for f in range(6):
Exemplo n.º 5
0
def build_BUF(H_out_true, is_train, L, learning_rate):
    # build model
    stp = [[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]]
    sp = [[0, 0], [0, 0], [1, 1], [1, 1], [0, 0]]
    # [1, 3, 3, 3, 64] [filter_depth, filter_height, filter_width, in_channels,out_channels]
    x = Conv3D(tf.pad(L, sp, mode='CONSTANT'), [1, 3, 3, 3, 64],
               [1, 1, 1, 1, 1],
               'VALID',
               name='conv1')

    F = 64
    G = 32
    for r in range(3):
        t = BatchNorm(x, is_train, name='Rbn' + str(r + 1) + 'a')
        t = tf.nn.relu(t)
        t = Conv3D(t, [1, 1, 1, F, F], [1, 1, 1, 1, 1],
                   'VALID',
                   name='Rconv' + str(r + 1) + 'a')

        t = BatchNorm(t, is_train, name='Rbn' + str(r + 1) + 'b')
        t = tf.nn.relu(t)
        t = Conv3D(tf.pad(t, stp, mode='CONSTANT'), [3, 3, 3, F, G],
                   [1, 1, 1, 1, 1],
                   'VALID',
                   name='Rconv' + str(r + 1) + 'b')

        x = tf.concat([x, t], 4)
        F += G
    for r in range(3, 6):
        t = BatchNorm(x, is_train, name='Rbn' + str(r + 1) + 'a')
        t = tf.nn.relu(t)
        t = Conv3D(t, [1, 1, 1, F, F], [1, 1, 1, 1, 1],
                   'VALID',
                   name='Rconv' + str(r + 1) + 'a')

        t = BatchNorm(t, is_train, name='Rbn' + str(r + 1) + 'b')
        t = tf.nn.relu(t)
        t = Conv3D(tf.pad(t, sp, mode='CONSTANT'), [3, 3, 3, F, G],
                   [1, 1, 1, 1, 1],
                   'VALID',
                   name='Rconv' + str(r + 1) + 'b')

        x = tf.concat([x[:, 1:-1], t], 4)
        F += G

    # sharen section
    x = BatchNorm(x, is_train, name='fbn1')
    x = tf.nn.relu(x)
    x = Conv3D(tf.pad(x, sp, mode='CONSTANT'), [1, 3, 3, 256, 256],
               [1, 1, 1, 1, 1],
               'VALID',
               name='conv2')
    x = tf.nn.relu(x)

    # R
    r = Conv3D(x, [1, 1, 1, 256, 256], [1, 1, 1, 1, 1], 'VALID', name='rconv1')
    r = tf.nn.relu(r)
    r = Conv3D(r, [1, 1, 1, 256, 3 * 16], [1, 1, 1, 1, 1],
               'VALID',
               name='rconv2')

    # F
    f = Conv3D(x, [1, 1, 1, 256, 512], [1, 1, 1, 1, 1], 'VALID', name='fconv1')
    f = tf.nn.relu(f)
    f = Conv3D(f, [1, 1, 1, 512, 1 * 5 * 5 * 16], [1, 1, 1, 1, 1],
               'VALID',
               name='fconv2')

    ds_f = tf.shape(f)
    f = tf.reshape(f, [ds_f[0], ds_f[1], ds_f[2], ds_f[3], 25, 16])
    f = tf.nn.softmax(f, dim=4)

    Fx = f
    Rx = r

    x = L
    x_c = []
    for c in range(3):
        t = DynFilter3D(x[:, T_in // 2:T_in // 2 + 1, :, :, c],
                        Fx[:, 0, :, :, :, :], [1, 5, 5])  # [B,H,W,R*R]
        t = tf.depth_to_space(t, R)  # [B,H*R,W*R,1]
        x_c += [t]
    x = tf.concat(
        x_c, axis=3
    )  # [B,H*R,W*R,3] Tensor("concat_9:0", shape=(?, ?, ?, 3), dtype=float32)

    x = tf.expand_dims(
        x, axis=1
    )  # Tensor("ExpandDims_3:0", shape=(?, 1, ?, ?, 3), dtype=float32)
    Rx = depth_to_space_3D(
        Rx, R
    )  # [B,1,H*R,W*R,3] Tensor("Reshape_6:0", shape=(?, ?, ?, ?, ?), dtype=float32)
    x += Rx  # Tensor("add_18:0", shape=(?, ?, ?, ?, 3), dtype=float32)
    x = tf.squeeze(x)
    print(x.get_shape())
    out_H = tf.clip_by_value(x, 0, 1, name='out_H')
    cost = Huber(y_true=H_out_true, y_pred=out_H, delta=0.01)
    learning_rate = learning_rate
    learning_rate = tf.Variable(float(learning_rate),
                                trainable=False,
                                dtype=tf.float32,
                                name='learning_rate')
    learning_rate_decay_op = learning_rate.assign(learning_rate * 0.9)
    optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
    return cost, learning_rate_decay_op, optimizer