示例#1
0
    def gen_F(self, feature_size, num_layers, scale, y):
        image_input, mean_x = self.preprossessing(y, self.img_size)
        # One convolution before res blocks and to convert to required feature
        # depth
        # conv  # input x2 ( 64*64 ) output ( 64*64 ) 64
        y = slim.conv2d(image_input, feature_size, [3, 3])

        # Store the output of the first convolution to add later      *********
        conv_1 = y

        scaling_factor = 0.1

        # Add the residual blocks to the model
        for i in range(num_layers):  # 32        conv_1---conv_64
            y = utils.resBlock(y, feature_size, scale=scaling_factor)

        # One more convolution, and then we add the output of our first conv
        # layer      *******************
        # conv_65                       #   HR -> LR
        y = slim.conv2d(y, feature_size, [3, 3])
        y += conv_1  # 补齐残差

        # Downsample output of the convolution
        y = utils.downsample(y, scale, feature_size,
                             None)  # conv_66 conv_67 scale = 2
        y = tf.clip_by_value(y + mean_x, 0.0, 255.0)

        return y
示例#2
0
def EDSR_v1(self, image_input, num_channels, num_block):

    x = slim.conv2d(image_input, num_channels, [3, 3])

    conv_1 = x

    # scaling_factor = 0.1
    scaling_factor = 0.1

    # Add the residual blocks to the model
    for i in range(num_block):
        x = utils.resBlock(x, num_channels, scale=scaling_factor)

    # One more convolution, and then we add the output of our first conv layer
    x = slim.conv2d(x, num_channels, [3, 3])
    x += conv_1

    # Upsample output of the convolution
    # x = utils.upsample(x, self.scale, 256, None)

    # TODO:试试新的上采样
    x = slim.conv2d(x,
                    self.output_channels * self.scale * self.scale, [3, 3],
                    activation_fn=tf.nn.tanh)
    x = tf.depth_to_space(x, self.scale)

    # One final convolution on the upsampling output
    output = x  # slim.conv2d(x,output_channels,[3,3])
    return output
示例#3
0
    def gen_G(self, feature_size, num_layers, scale, x):
        image_input, mean_x = self.preprossessing(x, int(self.img_size / 2))
        # One convolution before res blocks and to convert to required feature
        # depth
        # conv  # input ( 32*32 ) output ( 32*32 )
        x = slim.conv2d(image_input, feature_size, [3, 3])

        # Store the output of the first convolution to add later      *********
        conv_1 = x
        """
        This creates `num_layers` number of resBlocks
        a resBlock is defined in the paper as
        (excuse the ugly ASCII graph)
        x
        |\
        | \
        |  conv2d
        |  relu
        |  conv2d
        | /
        |/
        + (addition here)
        |
        result
        """
        """
        Doing scaling here as mentioned in the paper:

        `we found that increasing the number of feature
        maps above a certain level would make the training procedure
        numerically unstable. A similar phenomenon was
        reported by Szegedy et al. We resolve this issue by
        adopting the residual scaling with factor 0.1. In each
        residual block, constant scaling layers are placed after the
        last convolution layers. These modules stabilize the training
        procedure greatly when using a large number of filters.
        In the test phase, this layer can be integrated into the previous
        convolution layer for the computational efficiency.'

        """
        scaling_factor = 0.1

        # Add the residual blocks to the model
        for i in range(num_layers):  # 32        conv_1---conv_64
            x = utils.resBlock(x, feature_size, scale=scaling_factor)

        # One more convolution, and then we add the output of our first conv
        # layer      *******************
        # conv_65                       #   LR -> HR
        x = slim.conv2d(x, feature_size, [3, 3])
        x += conv_1  # 补齐残差

        # Upsample output of the convolution
        x = utils.upsample(x, scale, feature_size, None)  # conv_66 conv_67
        x = tf.clip_by_value(x + mean_x, 0.0, 255.0)

        return x
示例#4
0
def discriminatorResNet(x, hidden_num, reuse, kern_size=3):
    with tf.variable_scope("D") as vs:
        if reuse:
            vs.reuse_variables()

        output = tcl.conv2d(x, hidden_num / 2, 1)
        for i in range(5):
            output = resBlock(output, hidden_num / 2, hidden_num / 2,
                              kern_size)
        output = resBlock(output,
                          hidden_num / 2,
                          hidden_num,
                          kern_size,
                          resample='down')
        for i in range(6):
            output = resBlock(output, hidden_num, hidden_num, kern_size)
        output = resBlock(output,
                          hidden_num,
                          hidden_num * 2,
                          kern_size,
                          resample='down')
        for i in range(6):
            output = resBlock(output, hidden_num * 2, hidden_num * 2,
                              kern_size)
        output = resBlock(output,
                          hidden_num * 2,
                          hidden_num * 4,
                          kern_size,
                          resample='down')
        for i in range(6):
            output = resBlock(output, hidden_num * 4, hidden_num * 4,
                              kern_size)
        output = resBlock(output,
                          hidden_num * 4,
                          hidden_num * 8,
                          kern_size,
                          resample='down')
        for i in range(6):
            output = resBlock(output, hidden_num * 8, hidden_num * 8,
                              kern_size)

        out_flt = tcl.flatten(output)  # data_format: 'NHWC'
        disc_out = tcl.fully_connected(out_flt, 1, activation_fn=None)

    d_vars = tf.contrib.framework.get_variables(vs)
    return disc_out, d_vars
示例#5
0
	def resnet(self, image_input, feature_size, num_layers, scaling_factor=0.1, reuse=False, scope=''):
		with tf.variable_scope("resnet"+scope, reuse=reuse) as vs:
			x = slim.conv2d(image_input, feature_size, [3,3])
		
			conv_1 = x	
			
			for i in range(num_layers):
				x = utils.resBlock(x, feature_size, scale=scaling_factor)
			x = slim.conv2d(x, feature_size, [3,3])
			x += conv_1
		
		return x
示例#6
0
    def __init__(self,
                 img_size=32,
                 num_layers=32,
                 feature_size=256,
                 scale=2,
                 output_channels=3):
        print("Building EDSR...")
        #Placeholder for image inputs
        self.input = x = tf.placeholder(
            tf.float32, [None, img_size, img_size, output_channels])
        #Placeholder for upscaled image ground-truth
        self.target = y = tf.placeholder(
            tf.float32,
            [None, img_size * scale, img_size * scale, output_channels])

        #One convolution before res blocks and to convert to required feature depth
        x = slim.conv2d(x, feature_size, [3, 3])
        """
		This creates `num_layers` number of resBlocks
		a resBlock is defined in the paper as
		(excuse the ugly ASCII graph)
		x
		|\
		| \
		|  conv2d
		|  relu
		|  conv2d
		| /
		|/
		+ (addition here)
		|
		result
		"""
        for i in range(num_layers):
            x = utils.resBlock(x, feature_size)

        #Two more convolutions on the output of the res blocks
        x = slim.conv2d(x, feature_size, [3, 3])
        x = slim.conv2d(x, output_channels, [3, 3])

        #Upsample output of the convolution
        x = utils.upsample(x, scale, feature_size, None)

        #One final convolution on the upsampling output
        self.out = output = slim.conv2d(x, output_channels, [3, 3])

        self.loss = loss = tf.reduce_mean(
            tf.losses.absolute_difference(y, output))

        #Tensorflow graph setup... session, saver, etc.
        self.sess = tf.Session()
        self.saver = tf.train.Saver()
        print("Done building!")
示例#7
0
        def model():

            x = tf.contrib.layers.conv2d(images, 64, kernel_size=(3, 3), stride=1, padding='SAME')
            conv1 = x

            for i in range(64):
                x = resBlock(x, 256, 0.1)

            x = tf.contrib.layers.conv2d(x, 64, kernel_size=(3, 3), stride=1, padding='SAME')
            x += conv1
            x = upsample(x, 3, 256, None)
            out = x

            return out
示例#8
0
    def model(self):

        x = tf.contrib.layers.conv2d(self.images,
                                     64,
                                     kernel_size=(3, 3),
                                     stride=1,
                                     padding='SAME')
        conv1 = x

        for i in range(64):
            x = resBlock(x, self.feature_size, scale=self.scaling_factor)

        x = tf.contrib.layers.conv2d(x,
                                     64,
                                     kernel_size=(3, 3),
                                     stride=1,
                                     padding='SAME')
        x += conv1
        x = upsample(x, self.scale, self.feature_size, None)
        out = x

        return out
示例#9
0
	def __init__(self,img_size=32,num_layers=32,feature_size=256,scale=2,output_channels=3):
		print("Building EDSR...")
		#Placeholder for image inputs
		self.input = x = tf.placeholder(tf.float32,[None,img_size,img_size,output_channels])
		#Placeholder for upscaled image ground-truth
		self.target = y = tf.placeholder(tf.float32,[None,img_size*scale,img_size*scale,output_channels])
	
		"""
		Preprocessing as mentioned in the paper, by subtracting the mean
		However, the subtract the mean of the entire dataset they use. As of
		now, I am subtracting the mean of each batch
		"""
		mean_x = tf.reduce_mean(self.input)
		image_input =x- mean_x
		mean_y = tf.reduce_mean(self.target)
		image_target =y- mean_y

		#One convolution before res blocks and to convert to required feature depth
		x = slim.conv2d(image_input,feature_size,[3,3])
	
		#Store the output of the first convolution to add later
		conv_1 = x	

		"""
		This creates `num_layers` number of resBlocks
		a resBlock is defined in the paper as
		(excuse the ugly ASCII graph)
		x
		|\
		| \
		|  conv2d
		|  relu
		|  conv2d
		| /
		|/
		+ (addition here)
		|
		result
		"""

		"""
		Doing scaling here as mentioned in the paper:

		`we found that increasing the number of feature
		maps above a certain level would make the training procedure
		numerically unstable. A similar phenomenon was
		reported by Szegedy et al. We resolve this issue by
		adopting the residual scaling with factor 0.1. In each
		residual block, constant scaling layers are placed after the
		last convolution layers. These modules stabilize the training
		procedure greatly when using a large number of filters.
		In the test phase, this layer can be integrated into the previous
		convolution layer for the computational efficiency.'

		"""
		scaling_factor = 0.1
		
		#Add the residual blocks to the model
		for i in range(num_layers):
			x = utils.resBlock(x,feature_size,scale=scaling_factor)

		#One more convolution, and then we add the output of our first conv layer
		x = slim.conv2d(x,feature_size,[3,3])
		x += conv_1
		
		#Upsample output of the convolution		
		x = utils.upsample(x,scale,feature_size,None)

		#One final convolution on the upsampling output
		output =x# slim.conv2d(x,output_channels,[3,3])
		self.out = tf.clip_by_value(output+mean_x,0.0,255.0)

		self.loss = loss = tf.reduce_mean(tf.losses.absolute_difference(image_target,output))
	
		#Calculating Peak Signal-to-noise-ratio
		#Using equations from here: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
		mse = tf.reduce_mean(tf.squared_difference(image_target,output))	
		PSNR = tf.constant(255**2,dtype=tf.float32)/mse
		PSNR = tf.constant(10,dtype=tf.float32)*utils.log10(PSNR)
	
		#Scalar to keep track for loss
		tf.summary.scalar("loss",self.loss)
		tf.summary.scalar("PSNR",PSNR)
		#Image summaries for input, target, and output
		tf.summary.image("input_image",tf.cast(self.input,tf.uint8))
		tf.summary.image("target_image",tf.cast(self.target,tf.uint8))
		tf.summary.image("output_image",tf.cast(self.out,tf.uint8))
		
		#Tensorflow graph setup... session, saver, etc.
		self.sess = tf.Session()
		self.saver = tf.train.Saver()
		print("Done building!")
示例#10
0
    def __init__(self):
        print("Building MYSR...")
        self.imgsize = config.TRAIN.imgsize
        self.output_channels = config.TRAIN.output_channels
        self.scale = config.TRAIN.scale

        # Placeholder for image inputs
        self.input = x = tf.placeholder(
            tf.float32, [None, None, None, self.output_channels])
        # Placeholder for upscaled image ground-truth
        self.target = y = tf.placeholder(
            tf.float32, [None, None, None, self.output_channels])

        # 输入预处理
        # result = result / (255. / 2.)
        # TODO: 后边有relu层,注意将输入图像收缩至[-1, 1]区间是否合适
        # result = result - 1.
        image_input = x / (255. / 2.)
        image_input = image_input - 1
        image_target = y / (255. / 2.)
        image_target = image_target - 1

        # 貌似即使收缩至[-1, 1]区间,卷积层依旧可以有效适应,只是注意最后一层不能使用relu,b毕竟relu值域在[0, x]
        # ENCODER
        # 入口
        x = slim.conv2d(image_input, 64, [5, 5])  # 入口适当大点?
        conv_1 = x

        # ENCODER-resBlock-64
        scaling_factor = 1
        for i in range(3):
            x = utils.resBlock(x, 64, scale=scaling_factor)

        x = slim.conv2d(image_input, 128, [3, 3])

        # ENCODER-resBlock-128
        scaling_factor = 1
        for i in range(4):
            x = utils.resBlock(x, 128, scale=scaling_factor)

        x = slim.conv2d(image_input, 256, [3, 3])

        # ENCODER-resBlock-256
        scaling_factor = 1
        for i in range(5):
            x = utils.resBlock(x, 256, scale=scaling_factor)

        # Upsample output of the convolution
        x = utils.upsample(x, self.scale, 128, None)

        # DECODER-resBlock-64
        scaling_factor = 0.1
        for i in range(4):
            x = utils.resBlock(x, 64, scale=scaling_factor)

        # DECODER-resBlock-32
        scaling_factor = 0.1
        for i in range(3):
            x = utils.resBlock(x, 32, scale=scaling_factor)

        # DECODER-resBlock-16
        scaling_factor = 0.1
        for i in range(2):
            x = utils.resBlock(x, 16, scale=scaling_factor)

        # DECODER-resBlock-8
        scaling_factor = 0.1
        for i in range(1):
            x = utils.resBlock(x, 8, scale=scaling_factor)

        # DECODER-输出
        # TODO: 貌似这里直接使用conv会破坏逐步修复结构?反而会因为缺少feature_map而导致精细度降低?
        x = slim.conv2d(x, self.output_channels, [3, 3])

        output = x

        # 结果
        self.out = tf.clip_by_value((output + 1) * (255. / 2.), 0.0, 255.0)
        self.loss = loss = tf.reduce_mean(
            tf.losses.absolute_difference(image_target, output))

        # Calculating Peak Signal-to-noise-ratio
        # Using equations from here: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
        mse = tf.reduce_mean(tf.squared_difference(image_target, output))
        PSNR = tf.constant(255**2, dtype=tf.float32) / mse
        PSNR = tf.constant(10, dtype=tf.float32) * utils.log10(PSNR)

        # Scalar to keep track for loss
        tf.summary.scalar("loss", self.loss)
        tf.summary.scalar("PSNR", PSNR)
        # Image summaries for input, target, and output
        tf.summary.image("input_image", tf.cast(self.input, tf.uint8))
        tf.summary.image("target_image", tf.cast(self.target, tf.uint8))
        tf.summary.image("output_image", tf.cast(self.out, tf.uint8))

        # Tensorflow graph setup... session, saver, etc.
        self.sess = tf.Session()
        self.saver = tf.train.Saver()
        print("Done building!")
示例#11
0
def generatorResNet(z,
                    hidden_num,
                    output_dim,
                    out_channels,
                    reuse,
                    kern_size=3):
    '''
    Default values:
    :param reuse: True
    :param z: 128
    :param hidden_num: 64
    :param output_dim: 64
    :param kern_size: 3
    :param out_channels: 3
    :return:
    '''
    with tf.variable_scope("G") as vs:
        if reuse:
            vs.reuse_variables()

        fc = tcl.fully_connected(z,
                                 hidden_num * 8 * (output_dim / 16) *
                                 (output_dim / 16),
                                 activation_fn=None)
        output = tf.reshape(
            fc, [-1, output_dim / 16, output_dim / 16, hidden_num * 8
                 ])  # data_format: 'NHWC'

        for i in range(6):
            output = resBlock(output, hidden_num * 8, hidden_num * 8,
                              kern_size)
        output = resBlock(output,
                          hidden_num * 8,
                          hidden_num * 4,
                          kern_size,
                          resample='up')
        for i in range(6):
            output = resBlock(output, hidden_num * 4, hidden_num * 4,
                              kern_size)
        output = resBlock(output,
                          hidden_num * 4,
                          hidden_num * 2,
                          kern_size,
                          resample='up')
        for i in range(6):
            output = resBlock(output, hidden_num * 2, hidden_num * 2,
                              kern_size)
        output = resBlock(output,
                          hidden_num * 2,
                          hidden_num,
                          kern_size,
                          resample='up')
        for i in range(6):
            output = resBlock(output, hidden_num, hidden_num, kern_size)
        output = resBlock(output,
                          hidden_num,
                          hidden_num / 2,
                          kern_size,
                          resample='up')
        for i in range(5):
            output = resBlock(output, hidden_num / 2, hidden_num / 2,
                              kern_size)

        gen_out = tcl.conv2d(output, out_channels, 1, activation_fn=tf.nn.tanh)

    g_vars = tf.contrib.framework.get_variables(vs)
    return gen_out, g_vars
示例#12
0
    def __init__(self,
                 img_size=32,
                 num_layers=32,
                 feature_size=256,
                 scale=2,
                 output_channels=3):
        print("Building EDSR...")
        #Placeholder for image inputs
        self.input = x = tf.placeholder(
            tf.float32, [None, img_size, img_size, output_channels])
        #Placeholder for upscaled image ground-truth
        self.target = y = tf.placeholder(
            tf.float32,
            [None, img_size * scale, img_size * scale, output_channels])

        #One convolution before res blocks and to convert to required feature depth
        x = slim.conv2d(x, feature_size, [3, 3])

        #Store the output of the first convolution to add later
        conv_1 = x
        """
		This creates `num_layers` number of resBlocks
		a resBlock is defined in the paper as
		(excuse the ugly ASCII graph)
		x
		|\
		| \
		|  conv2d
		|  relu
		|  conv2d
		| /
		|/
		+ (addition here)
		|
		result
		"""
        """
		Doing scaling here as mentioned in the paper:

		`we found that increasing the number of feature
		maps above a certain level would make the training procedure
		numerically unstable. A similar phenomenon was
		reported by Szegedy et al. We resolve this issue by
		adopting the residual scaling with factor 0.1. In each
		residual block, constant scaling layers are placed after the
		last convolution layers. These modules stabilize the training
		procedure greatly when using a large number of filters.
		In the test phase, this layer can be integrated into the previous
		convolution layer for the computational efficiency.'

		"""
        scaling_factor = 1 if feature_size <= 64 else 0.1

        #Add the residual blocks to the model
        for i in range(num_layers):
            x = utils.resBlock(x, feature_size, scale=scaling_factor)

        #One more convolution, and then we add the output of our first conv layer
        x = slim.conv2d(x, feature_size, [3, 3])
        x += conv_1

        #Upsample output of the convolution
        x = utils.upsample(x, scale, feature_size, None)

        #One final convolution on the upsampling output
        self.out = output = x  # slim.conv2d(x,output_channels,[3,3])

        self.loss = loss = tf.reduce_mean(
            tf.losses.absolute_difference(y, output))

        #Tensorflow graph setup... session, saver, etc.
        self.sess = tf.Session()
        self.saver = tf.train.Saver()
        print("Done building!")
示例#13
0
    def __init__(self):
        print("Building MYSR...")
        self.is_continued = False
        self.imgsize = config.TRAIN.imgsize
        self.output_channels = config.TRAIN.output_channels
        self.scale = config.TRAIN.scale
        self.epoch = config.TRAIN.n_epoch
        self.batch_size = config.TRAIN.batch_size
        self.save_model_dir = config.TRAIN.save_model_dir

        # Placeholder for image inputs
        # self.input = x = tf.placeholder(tf.float32, [None, self.imgsize, self.imgsize, self.output_channels])
        self.input = x = tf.placeholder(
            tf.float32, [None, None, None, self.output_channels])
        # Placeholder for upscaled image ground-truth
        # self.target = y = tf.placeholder(tf.float32, [None, self.imgsize*self.scale, self.imgsize*self.scale, self.output_channels])
        self.target = y = tf.placeholder(
            tf.float32, [None, None, None, self.output_channels])

        # 验证集Placeholder
        self.valid_input = []
        self.valid_target = []

        # 输入预处理
        # result = result / (255. / 2.)
        # TODO: 后边有relu层,注意将输入图像收缩至[-1, 1]区间是否合适
        # result = result - 1.
        # image_input = x - (255. / 2.)
        # image_input = image_input - 1
        # image_target = y - (255. / 2.)
        # image_target = image_target - 1

        image_input = x / (255. / 2.)
        image_input = image_input - 1
        image_target = y / (255. / 2.)
        image_target = image_target - 1

        # 貌似即使收缩至[-1, 1]区间,卷积层依旧可以有效适应,只是注意最后一层不能使用relu,b毕竟relu值域在[0, x]
        # ENCODER
        # 入口
        # One convolution before res blocks and to convert to required feature depth
        x = slim.conv2d(image_input, 256, [3, 3])

        conv_1 = x

        # scaling_factor = 0.1
        scaling_factor = 0.1

        # Add the residual blocks to the model
        for i in range(12):
            x = utils.resBlock(x, 256, scale=scaling_factor)

        # One more convolution, and then we add the output of our first conv layer
        x = slim.conv2d(x, 256, [3, 3])
        x += conv_1

        # Upsample output of the convolution
        # x = utils.upsample(x, self.scale, 256, None)

        # TODO:试试新的上采样
        x = tf.depth_to_space(x, self.scale)
        x = slim.conv2d(x,
                        self.output_channels, [3, 3],
                        activation_fn=tf.nn.tanh)

        # One final convolution on the upsampling output
        output = x  # slim.conv2d(x,output_channels,[3,3])

        # 结果 注意预处理的值
        # self.out = tf.clip_by_value(output+(255. / 2.), 0.0, 255.0)
        self.out = tf.clip_by_value((output + 1) * (255. / 2.), 0.0, 255.0)
        self.loss = loss = tf.reduce_mean(
            tf.losses.absolute_difference(image_target, output))

        # Calculating Peak Signal-to-noise-ratio
        # Using equations from here: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
        mse = tf.reduce_mean(
            tf.squared_difference((image_target + 1) * (255. / 2.),
                                  tf.clip_by_value((output + 1) * (255. / 2.),
                                                   0.0, 255.0)))
        PSNR = tf.constant(255**2, dtype=tf.float32) / mse
        PSNR = tf.constant(10, dtype=tf.float32) * utils.log10(PSNR)

        # Scalar to keep track for loss
        tf.summary.scalar("loss", self.loss)
        tf.summary.scalar("PSNR", PSNR)
        # Image summaries for input, target, and output
        tf.summary.image("input_image", tf.cast(self.input, tf.uint8))
        tf.summary.image("target_image", tf.cast(self.target, tf.uint8))
        tf.summary.image("output_image", tf.cast(self.out, tf.uint8))

        # Tensorflow graph setup... session, saver, etc.
        config_tf = tf.ConfigProto()
        config_tf.gpu_options.allow_growth = True
        self.sess = tf.Session(config=config_tf)
        self.saver = tf.train.Saver()
        print("Done building!")
示例#14
0
文件: model.py 项目: chengyake/karch
	def __init__(self,img_size=32,num_layers=32,feature_size=128,scale=2,output_channels=1):
		print("Building EDSR...")
		self.img_size = img_size
		self.scale = scale
		self.output_channels = output_channels

		#Placeholder for image inputs
		self.input = x = tf.placeholder(tf.float32,[None,img_size,img_size,output_channels])
		#Placeholder for upscaled image ground-truth
		self.target = y = tf.placeholder(tf.float32,[None,img_size*scale,img_size*scale,output_channels])
	
		"""
		Preprocessing as mentioned in the paper, by subtracting the mean
		However, the subtract the mean of the entire dataset they use. As of
		now, I am subtracting the mean of each batch
		"""
		mean_x = tf.reduce_mean(self.input)
		image_input =x- mean_x
		mean_y = tf.reduce_mean(self.target)
		image_target =y- mean_y

		#One convolution before res blocks and to convert to required feature depth
		x = slim.conv2d(image_input,feature_size,[3,3])
	
		#Store the output of the first convolution to add later
		conv_1 = x	

		"""
		This creates `num_layers` number of resBlocks
		a resBlock is defined in the paper as
		(excuse the ugly ASCII graph)
		x
		|\
		| \
		|  conv2d
		|  relu
		|  conv2d
		| /
		|/
		+ (addition here)
		|
		result
		"""

		"""
		Doing scaling here as mentioned in the paper:

		`we found that increasing the number of feature
		maps above a certain level would make the training procedure
		numerically unstable. A similar phenomenon was
		reported by Szegedy et al. We resolve this issue by
		adopting the residual scaling with factor 0.1. In each
		residual block, constant scaling layers are placed after the
		last convolution layers. These modules stabilize the training
		procedure greatly when using a large number of filters.
		In the test phase, this layer can be integrated into the previous
		convolution layer for the computational efficiency.'

		"""
		scaling_factor = 0.1
		
		#Add the residual blocks to the model
		for i in range(num_layers):
			x = utils.resBlock(x,feature_size,scale=scaling_factor)

		#One more convolution, and then we add the output of our first conv layer
		x = slim.conv2d(x,feature_size,[3,3])
		x += conv_1
		
		#Upsample output of the convolution		
		x = utils.upsample(x,scale,feature_size,None)

		#One final convolution on the upsampling output
		output =x# slim.conv2d(x,output_channels,[3,3])
		self.out = tf.clip_by_value(output+mean_x,0.0,255.0)
		self.loss = loss = tf.reduce_mean(tf.losses.absolute_difference(image_target,output))
	
		#Calculating Peak Signal-to-noise-ratio
		#Using equations from here: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
		mse = tf.reduce_mean(tf.squared_difference(image_target,output))	
		PSNR = tf.constant(255**2,dtype=tf.float32)/mse
		PSNR = tf.constant(10,dtype=tf.float32)*utils.log10(PSNR)
	
		#Scalar to keep track for loss
		tf.summary.scalar("loss",self.loss)
		tf.summary.scalar("PSNR",PSNR)
		#Image summaries for input, target, and output
		tf.summary.image("input_image",tf.cast(self.input,tf.uint8))
		tf.summary.image("target_image",tf.cast(self.target,tf.uint8))
		tf.summary.image("output_image",tf.cast(self.out,tf.uint8))
		
		#Tensorflow graph setup... session, saver, etc.
		self.sess = tf.Session()
		self.saver = tf.train.Saver()
		print("Done building!")