def Dense3(inputs, growth_rate, training):
    #	[b, w, h, d, c] = inputs.get_shape().as_list()
    bn_relu1 = BN_ReLU(inputs, training)
    conv1 = Conv3D(bn_relu1, growth_rate, 3, 1)
    concat1 = tf.concat((inputs, conv1), axis=4)
    bn_relu2 = BN_ReLU(concat1, training)
    conv2 = Conv3D(bn_relu2, growth_rate, kernel_size=3, strides=1)
    concat2 = tf.concat((concat1, conv2), axis=4)
    bn_relu3 = BN_ReLU(concat2, training)
    conv3 = Conv3D(bn_relu3, growth_rate, kernel_size=3, strides=1)
    concat3 = tf.concat((concat2, conv3), axis=4)
    bn_relu4 = BN_ReLU(concat3, training)
    conv4 = Conv3D(bn_relu4, c[3] + 3 * growth_rate, kernel_size=1, strides=1)
    return conv4
예제 #2
0
	def _build_network(self, inputs, training):
		"""Build the network.
		"""

		inputs = Conv3D(
					inputs=inputs,
					filters=self.num_filters,
					kernel_size=3,
					strides=1)
		inputs = tf.identity(inputs, 'initial_conv')

		skip_inputs = []
		for i, num_blocks in enumerate(self.block_sizes):
			# print(i, num_blocks)
			num_filters = self.num_filters * (2**i)
			inputs = self._encoding_block_layer(
						inputs=inputs, filters=num_filters,
						block_fn=self._residual_block, blocks=num_blocks,
						strides=self.block_strides[i], training=training,
						name='encode_block_layer{}'.format(i+1))
			skip_inputs.append(inputs)
			# print(inputs.shape)
		# print(len(skip_inputs))
		
		inputs = BN_ReLU(inputs, training)
		num_filters = self.num_filters * (2**(len(self.block_sizes)-1))
		# print(num_filters)
		inputs = multihead_attention_3d(
					inputs, num_filters, num_filters, num_filters, 2, training, layer_type='SAME')
		inputs += skip_inputs[-1]

		for i, num_blocks in reversed(list(enumerate(self.block_sizes[1:]))):
			# print(i, num_blocks)
			num_filters = self.num_filters * (2**i)
			if i == len(self.block_sizes) - 2:
				inputs = self._att_decoding_block_layer(
						inputs=inputs, skip_inputs=skip_inputs[i],
						filters=num_filters, block_fn=self._residual_block,
						blocks=1, strides=self.block_strides[i+1],
						training=training,
						name='decode_block_layer{}'.format(len(self.block_sizes)-i-1))
			else:
				inputs = self._decoding_block_layer(
						inputs=inputs, skip_inputs=skip_inputs[i],
						filters=num_filters, block_fn=self._residual_block,
						blocks=1, strides=self.block_strides[i+1],
						training=training,
						name='decode_block_layer{}'.format(len(self.block_sizes)-i-1))
			# print(inputs.shape)

		inputs = self._output_block_layer(inputs=inputs, training=training)
		# print(inputs.shape)

		return inputs
예제 #3
0
	def _residual_block(self, inputs, filters, training,
							projection_shortcut, strides):
		"""Standard building block for residual networks with BN before convolutions.

		Args:
			inputs: A tensor of size [batch, depth_in, height_in, width_in, channels].
			filters: The number of filters for the convolutions.
			training: A Boolean for whether the model is in training or inference
				mode. Needed for batch normalization.
			projection_shortcut: The function to use for projection shortcuts
				(typically a 1x1 convolution when downsampling the input).
			strides: The block's stride. If greater than 1, this block will ultimately
				downsample the input.

		Returns:
			The output tensor of the block.
		"""

		shortcut = inputs
		inputs = BN_ReLU(inputs, training)

		# The projection shortcut should come after the first batch norm and ReLU
		# since it performs a 1x1 convolution.
		if projection_shortcut is not None:
			shortcut = projection_shortcut(inputs)

		inputs = Conv3D(
					inputs=inputs,
					filters=filters,
					kernel_size=3,
					strides=strides)

		inputs = BN_ReLU(inputs, training)

		inputs = Conv3D(
					inputs=inputs,
					filters=filters,
					kernel_size=3,
					strides=1)

		return inputs + shortcut
예제 #4
0
    def _output_block_layer(self, inputs, training):

        inputs = BN_ReLU(inputs, training)

        inputs = tf.layers.dropout(inputs, rate=0.5, training=training)

        inputs = Conv3D(inputs=inputs,
                        filters=self.num_classes,
                        kernel_size=1,
                        strides=1,
                        use_bias=True)

        return tf.identity(inputs, 'output')
def unpool(inputs, training):
    #	[b, w, h, d, c] = inputs.get_shape().as_list()
    conv31 = Conv3D(inputs, 176, kernel_size=3, strides=1)
    deconv31 = BN_ReLU(conv31, training)
    deconv1_1 = Deconv3D(deconv31,
                         176,
                         kernel_size=3,
                         strides=1,
                         use_bias=False)
    deconv1 = BN_ReLU(deconv1_1, training)
    deconv1_2 = Deconv3D(deconv1, 88, kernel_size=3, strides=2, use_bias=False)
    deconv2 = BN_ReLU(deconv1_2, training)
    deconv2_1 = Deconv3D(inputs, 176, kernel_size=3, strides=1, use_bias=False)
    deconv3 = BN_ReLU(deconv2_1, training)
    deconv2_2 = Dilated_Conv3D(deconv3,
                               176,
                               kernel_size=3,
                               dilation_rate=2,
                               use_bias=False)
    deconv4 = BN_ReLU(deconv2_2, training)
    deconv2_3 = Deconv3D(deconv4, 88, kernel_size=3, strides=2, use_bias=False)
    deconv5 = BN_ReLU(deconv2_3, training)
    concat = tf.concat((deconv2, deconv5), axis=4)
    return concat
예제 #6
0
# Network
H_out_true = tf.placeholder(tf.float32,
                            shape=(1, 1, None, None, 3),
                            name='H_out_true')

is_train = tf.placeholder(tf.bool, shape=[], name='is_train')  # Phase ,scalar

# L_ = DownSample(H_in, h, R)
L = tf.placeholder(tf.float32, shape=[None, T_in, None, None, 3], name='L_in')

# build model
stp = [[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]]
sp = [[0, 0], [0, 0], [1, 1], [1, 1], [0, 0]]
# [1, 3, 3, 3, 64] [filter_depth, filter_height, filter_width, in_channels,out_channels]
x = Conv3D(tf.pad(L, sp, mode='CONSTANT'), [1, 3, 3, 3, 64], [1, 1, 1, 1, 1],
           'VALID',
           name='conv1')

F = 64
G = 32
for r in range(3):
    t = BatchNorm(x, is_train, name='Rbn' + str(r + 1) + 'a')
    t = tf.nn.relu(t)
    t = Conv3D(t, [1, 1, 1, F, F], [1, 1, 1, 1, 1],
               'VALID',
               name='Rconv' + str(r + 1) + 'a')

    t = BatchNorm(t, is_train, name='Rbn' + str(r + 1) + 'b')
    t = tf.nn.relu(t)
    t = Conv3D(tf.pad(t, stp, mode='CONSTANT'), [3, 3, 3, F, G],
               [1, 1, 1, 1, 1],
예제 #7
0
파일: nets.py 프로젝트: zhwzhong/VSR-DUF
def FR_16L(x, is_train, uf=4):
    x = Conv3D(tf.pad(x, sp, mode='CONSTANT'), [1, 3, 3, 3, 64],
               [1, 1, 1, 1, 1],
               'VALID',
               name='conv1')

    F = 64
    G = 32
    for r in range(3):
        t = BatchNorm(x, is_train, name='Rbn' + str(r + 1) + 'a')
        t = tf.nn.relu(t)
        t = Conv3D(t, [1, 1, 1, F, F], [1, 1, 1, 1, 1],
                   'VALID',
                   name='Rconv' + str(r + 1) + 'a')

        t = BatchNorm(t, is_train, name='Rbn' + str(r + 1) + 'b')
        t = tf.nn.relu(t)
        t = Conv3D(tf.pad(t, stp, mode='CONSTANT'), [3, 3, 3, F, G],
                   [1, 1, 1, 1, 1],
                   'VALID',
                   name='Rconv' + str(r + 1) + 'b')

        x = tf.concat([x, t], 4)
        F += G
    for r in range(3, 6):
        t = BatchNorm(x, is_train, name='Rbn' + str(r + 1) + 'a')
        t = tf.nn.relu(t)
        t = Conv3D(t, [1, 1, 1, F, F], [1, 1, 1, 1, 1],
                   'VALID',
                   name='Rconv' + str(r + 1) + 'a')

        t = BatchNorm(t, is_train, name='Rbn' + str(r + 1) + 'b')
        t = tf.nn.relu(t)
        t = Conv3D(tf.pad(t, sp, mode='CONSTANT'), [3, 3, 3, F, G],
                   [1, 1, 1, 1, 1],
                   'VALID',
                   name='Rconv' + str(r + 1) + 'b')

        x = tf.concat([x[:, 1:-1], t], 4)
        F += G

    x = BatchNorm(x, is_train, name='fbn1')
    x = tf.nn.relu(x)
    x = Conv3D(tf.pad(x, sp, mode='CONSTANT'), [1, 3, 3, 256, 256],
               [1, 1, 1, 1, 1],
               'VALID',
               name='conv2')
    x = tf.nn.relu(x)

    r = Conv3D(x, [1, 1, 1, 256, 256], [1, 1, 1, 1, 1], 'VALID', name='rconv1')
    r = tf.nn.relu(r)
    r = Conv3D(r, [1, 1, 1, 256, 3 * uf * uf], [1, 1, 1, 1, 1],
               'VALID',
               name='rconv2')

    f = Conv3D(x, [1, 1, 1, 256, 512], [1, 1, 1, 1, 1], 'VALID', name='fconv1')
    f = tf.nn.relu(f)
    f = Conv3D(f, [1, 1, 1, 512, 1 * 5 * 5 * uf * uf], [1, 1, 1, 1, 1],
               'VALID',
               name='fconv2')

    ds_f = tf.shape(f)
    f = tf.reshape(f, [ds_f[0], ds_f[1], ds_f[2], ds_f[3], 25, uf * uf])
    f = tf.nn.softmax(f, dim=4)

    return f, r
예제 #8
0
		def projection_shortcut(inputs):
			return Conv3D(
					inputs=inputs,
					filters=filters,
					kernel_size=1,
					strides=strides)
예제 #9
0
def build_BUF(H_out_true, is_train, L, learning_rate):
    # build model
    stp = [[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]]
    sp = [[0, 0], [0, 0], [1, 1], [1, 1], [0, 0]]
    # [1, 3, 3, 3, 64] [filter_depth, filter_height, filter_width, in_channels,out_channels]
    x = Conv3D(tf.pad(L, sp, mode='CONSTANT'), [1, 3, 3, 3, 64],
               [1, 1, 1, 1, 1],
               'VALID',
               name='conv1')

    F = 64
    G = 32
    for r in range(3):
        t = BatchNorm(x, is_train, name='Rbn' + str(r + 1) + 'a')
        t = tf.nn.relu(t)
        t = Conv3D(t, [1, 1, 1, F, F], [1, 1, 1, 1, 1],
                   'VALID',
                   name='Rconv' + str(r + 1) + 'a')

        t = BatchNorm(t, is_train, name='Rbn' + str(r + 1) + 'b')
        t = tf.nn.relu(t)
        t = Conv3D(tf.pad(t, stp, mode='CONSTANT'), [3, 3, 3, F, G],
                   [1, 1, 1, 1, 1],
                   'VALID',
                   name='Rconv' + str(r + 1) + 'b')

        x = tf.concat([x, t], 4)
        F += G
    for r in range(3, 6):
        t = BatchNorm(x, is_train, name='Rbn' + str(r + 1) + 'a')
        t = tf.nn.relu(t)
        t = Conv3D(t, [1, 1, 1, F, F], [1, 1, 1, 1, 1],
                   'VALID',
                   name='Rconv' + str(r + 1) + 'a')

        t = BatchNorm(t, is_train, name='Rbn' + str(r + 1) + 'b')
        t = tf.nn.relu(t)
        t = Conv3D(tf.pad(t, sp, mode='CONSTANT'), [3, 3, 3, F, G],
                   [1, 1, 1, 1, 1],
                   'VALID',
                   name='Rconv' + str(r + 1) + 'b')

        x = tf.concat([x[:, 1:-1], t], 4)
        F += G

    # sharen section
    x = BatchNorm(x, is_train, name='fbn1')
    x = tf.nn.relu(x)
    x = Conv3D(tf.pad(x, sp, mode='CONSTANT'), [1, 3, 3, 256, 256],
               [1, 1, 1, 1, 1],
               'VALID',
               name='conv2')
    x = tf.nn.relu(x)

    # R
    r = Conv3D(x, [1, 1, 1, 256, 256], [1, 1, 1, 1, 1], 'VALID', name='rconv1')
    r = tf.nn.relu(r)
    r = Conv3D(r, [1, 1, 1, 256, 3 * 16], [1, 1, 1, 1, 1],
               'VALID',
               name='rconv2')

    # F
    f = Conv3D(x, [1, 1, 1, 256, 512], [1, 1, 1, 1, 1], 'VALID', name='fconv1')
    f = tf.nn.relu(f)
    f = Conv3D(f, [1, 1, 1, 512, 1 * 5 * 5 * 16], [1, 1, 1, 1, 1],
               'VALID',
               name='fconv2')

    ds_f = tf.shape(f)
    f = tf.reshape(f, [ds_f[0], ds_f[1], ds_f[2], ds_f[3], 25, 16])
    f = tf.nn.softmax(f, dim=4)

    Fx = f
    Rx = r

    x = L
    x_c = []
    for c in range(3):
        t = DynFilter3D(x[:, T_in // 2:T_in // 2 + 1, :, :, c],
                        Fx[:, 0, :, :, :, :], [1, 5, 5])  # [B,H,W,R*R]
        t = tf.depth_to_space(t, R)  # [B,H*R,W*R,1]
        x_c += [t]
    x = tf.concat(
        x_c, axis=3
    )  # [B,H*R,W*R,3] Tensor("concat_9:0", shape=(?, ?, ?, 3), dtype=float32)

    x = tf.expand_dims(
        x, axis=1
    )  # Tensor("ExpandDims_3:0", shape=(?, 1, ?, ?, 3), dtype=float32)
    Rx = depth_to_space_3D(
        Rx, R
    )  # [B,1,H*R,W*R,3] Tensor("Reshape_6:0", shape=(?, ?, ?, ?, ?), dtype=float32)
    x += Rx  # Tensor("add_18:0", shape=(?, ?, ?, ?, 3), dtype=float32)
    x = tf.squeeze(x)
    print(x.get_shape())
    out_H = tf.clip_by_value(x, 0, 1, name='out_H')
    cost = Huber(y_true=H_out_true, y_pred=out_H, delta=0.01)
    learning_rate = learning_rate
    learning_rate = tf.Variable(float(learning_rate),
                                trainable=False,
                                dtype=tf.float32,
                                name='learning_rate')
    learning_rate_decay_op = learning_rate.assign(learning_rate * 0.9)
    optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
    return cost, learning_rate_decay_op, optimizer
예제 #10
0
 def inference(self,h,scope_name):
     
     def dynamic_shift(inp, pad_size):
         x1 =tf.pad(inp, [[0,0], [pad_size,0], [0,0], [0,0]], mode='CONSTANT')
         x1 = x1[:,:-pad_size,:]
         return x1
         
     with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE) as scope:
         
         F=64
         
         intermediates = []
         
         
         for j in range(4):
             x1=tf.image.rot90(h,k=j,name=None)
             
         
             if j in [0,2]:
                 with tf.variable_scope('nety', reuse=tf.AUTO_REUSE) as scope:
                     sp = [[0,0], [1,0], [0,0], [0,0]]
                     x1 = Conv2D(tf.pad(x1, sp, mode='CONSTANT'), [3,3,self.channels,F], [1,1,1,1], 'SAME', scope_name='conv_0')
                     x1 = tf.nn.leaky_relu(x1)
                     #Remove last row
                     x1 = x1[:,:-1,:]
                     
                     # 15 layers,Conv+BN+relu
                     for i in range(15):
                         x1 = Conv2D(tf.pad(x1, sp, mode='CONSTANT'), [3,3,F,F], [1,1,1,1], 'SAME', scope_name='conv_{0}'.format(i+1))
                         x1 = tf.layers.batch_normalization(x1, axis=-1,training=self.is_train,name='bn_{0}'.format(i+1))
                         x1 = tf.nn.leaky_relu(x1)
                         x1 = x1[:,:-1,:]
                      
                     # last layer, Conv
                     x1 = Conv2D(tf.pad(x1, sp, mode='CONSTANT'), [3,3,F,F], [1,1,1,1], 'SAME', scope_name='conv_last')
                     x1 = x1[:,:-1,:] 
                     
                     #Computing the shift to apply to the receptive fields
                     shift = tf.cond(tf.equal(self.shift, 1), 
                                lambda: dynamic_shift(x1,1), 
                                lambda: dynamic_shift(x1,2))
                     #Applying the computed shift only during training otherwise the canonical shift by 1 is applied
                     x1 = tf.cond(tf.equal(self.is_train, True), 
                                lambda: shift, 
                                lambda: dynamic_shift(x1,1))
                     
                     
                     #Rotating back
                     x1 = tf.image.rot90(x1,k=4-j,name=None)
                     intermediates.append(x1)
             else:
                 with tf.variable_scope('netx', reuse=tf.AUTO_REUSE) as scope:
                     sp = [[0,0], [1,0], [0,0], [0,0]]
                     x1 = Conv2D(tf.pad(x1, sp, mode='CONSTANT'), [3,3,self.channels,F], [1,1,1,1], 'SAME', scope_name='conv_0')
                     x1 = tf.nn.leaky_relu(x1)
                     #Remove last row
                     x1 = x1[:,:-1,:]
                     
                     # 15 layers, Conv+BN+relu
                     for i in range(15):
                         x1 = Conv2D(tf.pad(x1, sp, mode='CONSTANT'), [3,3,F,F], [1,1,1,1], 'SAME', scope_name='conv_{0}'.format(i+1))
                         x1 = tf.layers.batch_normalization(x1, axis=-1,training=self.is_train,name='bn_{0}'.format(i+1))
                         x1 = tf.nn.leaky_relu(x1)
                         x1 = x1[:,:-1,:]
                      
                     # last layer, Conv
                     x1 = Conv2D(tf.pad(x1, sp, mode='CONSTANT'), [3,3,F,F], [1,1,1,1], 'SAME', scope_name='conv_last')
                     x1 = x1[:,:-1,:] 
                     
                     #Applying the canonical shift for the horizontally extending receptive fields
                     x1 = dynamic_shift(x1,1)
                     
                     #Rotating back
                     x1 = tf.image.rot90(x1,k=4-j,name=None)
                     intermediates.append(x1)
                 
             
         images_to_combine=tf.stack(intermediates,axis=1)
         
         x1 = Conv3D(images_to_combine, [4,1,1,F,F], [1,1,1,1,1], 'VALID', scope_name='conv_comb_0')
         x1 = tf.nn.leaky_relu(x1)
         x1 = tf.squeeze(x1,axis=1)
         x1 = Conv2D(x1                , [1,1,F,F], [1,1,1,1], 'SAME', scope_name='conv_comb_1')
         x1 = tf.nn.leaky_relu(x1)
         x1 = Conv2D(x1                , [1,1,F,2], [1,1,1,1], 'SAME', scope_name='conv_comb_2')
         x1 = tf.nn.relu(x1)
     
 
     return x1