示例#1
0
    def build_model(self, n_features=256, n_res_blocks=36, scale=8, max_to_keep=100):
        print("Building CycleSR...")

        norm_input = utils.normalize_color_tf(self.input)
        norm_target = utils.normalize_color_tf(self.target)
        x = tl.InputLayer(norm_input, name='input_layer')

        # One convolution before res blocks and to convert to required feature depth
        x = tl.Conv2d(x, n_features, (3, 3), name='c')

        # Store the output of the first convolution to add later
        conv_1 = x

        scaling_factor = 0.1

        with tf.variable_scope("res_blocks"):
            for i in range(n_res_blocks):
                x = self._res_block(x, n_features, (3, 3), scale=scaling_factor, layer=i)
            x = tl.Conv2d(x, n_features, (3, 3), name='res_c')
            x = tl.ElementwiseLayer([conv_1, x], tf.add, name='res_add')

        with tf.variable_scope("upscale_module"):
            x = utils.deconv_upsample(x, n_features, kernel=(5, 5), scale=scale)

        output = tl.Conv2d(x, self.n_channels, (1, 1), act=tf.nn.relu, name='bottleneck')
        self.output = tf.clip_by_value(output.outputs, 0.0, 1.0, name='output')
        self.cycle_x = tf.image.resize_bicubic(self.output*255.0+0.5, [48, 48])

        self.calculate_loss(norm_target, self.output)

        conf = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
        self.sess = tf.Session(config=conf)
        self.saver = tf.train.Saver(max_to_keep=max_to_keep)
        print("Done building!")
示例#2
0
    def build_model(self, n_dense_blocks=8, scale=8, subpixel=False):
        print("Building DenseNet...")

        norm_input = utils.normalize_color_tf(self.input)
        norm_target = utils.normalize_color_tf(self.target)
        x = tl.InputLayer(norm_input, name='input_layer')
        '''
        extract low level feature
        In Paper <Densely Connected Convolutional Networks>,the filter size here is 7*7
        and followed by a max pool layer
        upscale_input = tl.Conv2d(x,self.feature_size, [7, 7], act = None, name = 'conv0')
        upscale_input = tl.MaxPool2d(upscale_input, [3,3], [2,2], name = 'maxpool0')
        '''
        with tf.variable_scope("low_level_features"):
            x = tl.Conv2d(x, 128, [3, 3], act=None, name='conv0')

        conv1 = x
        with tf.variable_scope("dense_blocks"):
            for i in range(n_dense_blocks):
                x = self.dense_block(x, 16, 8, (3, 3), layer=i)
                x = tl.ConcatLayer([conv1, x],
                                   concat_dim=3,
                                   name='dense%d/concat_output' % i)

        with tf.variable_scope("bottleneck_layer"):
            '''
            bottleneck layer
            In Paper <Image Super-Resolution Using Dense Skip Connections>
            The channel here is 256
            '''
            x = tl.Conv2d(x, 256, (1, 1), act=None, name='bottleneck')

        with tf.variable_scope("upscale_module"):
            '''
            Paper <Densely Connected Convolutional Networks> using deconv layers to upscale the output
            we provide two methods here: deconv, subpixel
            '''
            if subpixel:
                x = utils.subpixel_upsample(x, 128, scale)
            else:
                x = utils.deconv_upsample(x, 128, (3, 3), scale)

        with tf.variable_scope("reconstruction_layer"):
            output = tl.Conv2d(x,
                               self.n_channels, (3, 3),
                               act=tf.nn.relu,
                               name='reconstruction')

        self.output = tf.clip_by_value(output.outputs, 0.0, 1.0, name="output")
        self.calculate_loss(norm_target, self.output)
        conf = tf.ConfigProto(allow_soft_placement=True,
                              log_device_placement=False)
        self.sess = tf.Session(config=conf)
        self.saver = tf.train.Saver()
        print("Done building!")
示例#3
0
    def buildModel(self):
        print("Building EDSR...")
        self.norm_input = utils.normalize_color_tf(self.input)
        self.norm_target = utils.normalize_color_tf(self.target)

        #input layer
        x = tl.InputLayer(self.norm_input, name='inputlayer')

        # One convolution before res blocks and to convert to required feature depth
        #第一层的剪枝将会作用在这里,filter数量减少
        #x = tl.Conv2d(x, self.feature_size-self.prunedlist[0], [3, 3], name='c')
        x = tl.Conv2d(x, self.feature_size, [3, 3], name='c')

        # Store the output of the first convolution to add later
        conv_1 = x

        scaling_factor = 0.1
        # Add the residual blocks to the model
        for i in range(self.num_layers):
            x = self.__resBlock(x, self.feature_size, scale=scaling_factor,layer=i)
        # One more convolution, and then we add the output of our first conv layer
        x = tl.Conv2d(x, self.feature_size, [3, 3], act = None, name = 'm1')
        x = tl.ElementwiseLayer([conv_1,x],tf.add, name='res_add')

        x = TransposedConv2dLayer(x, self.feature_size, [5,5], [2,2], name='deconv_1')
        x = tl.Conv2d(x, self.feature_size, [3, 3], act=tf.nn.relu, name='deconv_conv_1')
        x = TransposedConv2dLayer(x, self.feature_size, [5, 5], [2, 2], name='deconv_2')
        if self.scale==8:
            x = tl.Conv2d(x, self.feature_size, [3, 3], act=tf.nn.relu, name='deconv_conv_2')
            x = TransposedConv2dLayer(x, self.feature_size, [5, 5], [2, 2], name='deconv_3')

        # One final convolution on the upsampling output
        output = tl.Conv2d(x,self.output_channels,[1,1],act=tf.nn.relu, name='lastLayer')
        # output = tl.Conv2d(x, self.output_channels, [1, 1], act=None, name='lastLayer')
        self.output = output.outputs
        self.output = tf.clip_by_value(output.outputs, 0.0, 1.0)

        self.cacuLoss()

        # Tensorflow graph setup... session, saver, etc.
        session_conf = tf.ConfigProto(allow_soft_placement=True,log_device_placement=False)
        session_conf.gpu_options.allocator_type = 'BFC'
        self.sess = tf.Session(config=session_conf)
        self.saver = tf.train.Saver(var_list = tf.trainable_variables(), max_to_keep=100)
        print("Done building!")