def __init__(self, filters, config): super(DTN, self).__init__() self.config = config layer = [1, 2, 4, 8, 16] self.conv1 = Conv(filters, 5, apply_batchnorm=False) # CRU self.cru0 = CRU(filters) self.cru1 = CRU(filters) self.cru2 = CRU(filters) self.cru3 = CRU(filters) self.cru4 = CRU(filters) self.cru5 = CRU(filters) self.cru6 = CRU(filters) # TRU alpha = config.TRU_PARAMETERS['alpha'] beta = config.TRU_PARAMETERS['beta'] self.tru0 = TRU(filters, '1', alpha, beta) self.tru1 = TRU(filters, '2', alpha, beta) self.tru2 = TRU(filters, '3', alpha, beta) self.tru3 = TRU(filters, '4', alpha, beta) self.tru4 = TRU(filters, '5', alpha, beta) self.tru5 = TRU(filters, '6', alpha, beta) self.tru6 = TRU(filters, '7', alpha, beta) # SFL self.sfl0 = SFL(filters) self.sfl1 = SFL(filters) self.sfl2 = SFL(filters) self.sfl3 = SFL(filters) self.sfl4 = SFL(filters) self.sfl5 = SFL(filters) self.sfl6 = SFL(filters) self.sfl7 = SFL(filters)
def Disc(x, training_nn, scope): nlayers = [16, 32, 64, 96, ] x = tf.concat([x,tf.image.rgb_to_yuv(x)], axis=3) # Block 1 #x1 = Conv(x, nlayers[1], scope+'/conv1', training_nn) x1 = Downsample(x, nlayers[1], scope+'/conv2', training_nn) # Block 2 #x2 = Conv(x1, nlayers[2], scope+'/conv3', training_nn) x2 = Downsample(x1, nlayers[2], scope+'/conv4', training_nn) # Block 3 #x3 = Conv(x2, nlayers[2], scope+'/conv5', training_nn) x3 = Downsample(x2, nlayers[3], scope+'/conv6', training_nn) # Block 4 x4 = Conv(x3, nlayers[3], scope+'/conv7', training_nn) x4l = Conv(x4, 1, scope+'/conv8', training_nn, act=False, norm=False) x4s = Conv(x4, 1, scope+'/conv9', training_nn, act=False, norm=False) return x4l, x4s
def Gen(x, training_nn, scope): nlayers = [ 16, 64, 64, 96, ] x = tf.concat([x, tf.image.rgb_to_yuv(x)], axis=3) x0 = Conv(x, nlayers[1], scope + '/conv0', training_nn) # Block 1 x1 = Conv(x0, nlayers[2], scope + '/conv1', training_nn) x1 = Conv(x1, nlayers[3], scope + '/conv2', training_nn) x1 = Downsample(x1, nlayers[2], scope + '/conv3', training_nn) # Block 2 x2 = Conv(x1, nlayers[2], scope + '/conv4', training_nn) x2 = Conv(x2, nlayers[3], scope + '/conv5', training_nn) x2 = Downsample(x2, nlayers[2], scope + '/conv6', training_nn) # Block 3 x3 = Conv(x2, nlayers[2], scope + '/conv7', training_nn) x3 = Conv(x3, nlayers[3], scope + '/conv8', training_nn) x3 = Downsample(x3, nlayers[2], scope + '/conv9', training_nn) # Decoder u1 = Upsample(x3, nlayers[1], scope + '/up1', training_nn) u2 = Upsample(tf.concat([u1, x2], 3), nlayers[1], scope + '/up2', training_nn) u3 = Upsample(tf.concat([u2, x1], 3), nlayers[1], scope + '/up3', training_nn) n1 = tf.nn.tanh( Conv(Conv(u1, nlayers[0], scope + '/n1', training_nn), 6, scope + '/nn1', training_nn, act=False, norm=False)) n2 = tf.nn.tanh( Conv(Conv(u2, nlayers[0], scope + '/n2', training_nn), 3, scope + '/nn2', training_nn, act=False, norm=False)) n3 = tf.nn.tanh( Conv(Conv(u3, nlayers[0], scope + '/n3', training_nn), 3, scope + '/nn3', training_nn, act=False, norm=False)) s = tf.reduce_mean(n1[:, :, :, 3:6], axis=[1, 2], keepdims=True) b = tf.reduce_mean(n1[:, :, :, :3], axis=[1, 2], keepdims=True) C = tf.nn.avg_pool(n2, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME') T = n3 # ESR map1 = tf.image.resize_images(x1, [32, 32]) map2 = tf.image.resize_images(x2, [32, 32]) map3 = tf.image.resize_images(x3, [32, 32]) maps = tf.concat([map1, map2, map3], 3) x4 = Conv(maps, nlayers[2], scope + '/conv10', training_nn, apply_dropout=True) x4 = Conv(x4, nlayers[1], scope + '/conv11', training_nn, apply_dropout=True) x5 = Conv(x4, 1, scope + '/conv12', training_nn, act=False, norm=False) return x5, s, b, C, T