def layer_op(self, input_tensor, is_training=None, keep_prob=None): fc_layer = FCLayer(n_output_chns=self.n_output_chns, with_bias=self.with_bias, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], b_initializer=self.initializers['b'], b_regularizer=self.regularizers['b'], name='fc_') output_tensor = fc_layer(input_tensor) if self.with_bn: bn_layer = BNLayer( regularizer=self.regularizers['w'], moving_decay=self.moving_decay, eps=self.eps, name='bn_') output_tensor = bn_layer(output_tensor, is_training) if self.acti_func is not None: acti_layer = ActiLayer( func=self.acti_func, regularizer=self.regularizers['w'], name='acti_') output_tensor = acti_layer(output_tensor) if keep_prob is not None: dropout_layer = ActiLayer(func='dropout', name='dropout_') output_tensor = dropout_layer(output_tensor, keep_prob=keep_prob) return output_tensor
def layer_op(self, input_tensor, is_training=None, keep_prob=None): # init sub-layers deconv_layer = DeconvLayer(n_output_chns=self.n_output_chns, kernel_size=self.kernel_size, stride=self.stride, padding=self.padding, with_bias=self.with_bias, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], b_initializer=self.initializers['b'], b_regularizer=self.regularizers['b'], name='deconv_') output_tensor = deconv_layer(input_tensor) if self.with_bn: if is_training is None: raise ValueError('is_training argument should be ' 'True or False unless with_bn is False') bn_layer = BNLayer(regularizer=self.regularizers['w'], moving_decay=self.moving_decay, eps=self.eps, name='bn_') output_tensor = bn_layer(output_tensor, is_training) if self.acti_func is not None: acti_layer = ActiLayer(func=self.acti_func, regularizer=self.regularizers['w'], name='acti_') output_tensor = acti_layer(output_tensor) if keep_prob is not None: dropout_layer = ActiLayer(func='dropout', name='dropout_') output_tensor = dropout_layer(output_tensor, keep_prob=keep_prob) return output_tensor
def layer_op(self, image, is_training=True, **unused_kwargs): # ---FC layers FC_1 = FCLayer( n_output_chns=self.n_fea[0], acti_func='leakyrelu', with_bias=True, with_bn=False, w_initializer=self.conv_params.get('w_initializer'), w_regularizer=self.conv_params.get('w_regularizer'))(image) FC_1_drop = ActiLayer(func='dropout', name='dropout')(FC_1, keep_prob=0.5) FC_2 = FCLayer( n_output_chns=self.n_fea[1], acti_func='leakyrelu', with_bias=True, with_bn=False, w_initializer=self.conv_params.get('w_initializer'), w_regularizer=self.conv_params.get('w_regularizer'))(FC_1_drop) FC_2_drop = ActiLayer(func='dropout', name='dropout')(FC_2, keep_prob=0.5) FC_class = FCLayer( n_output_chns=self.n_fea[2], acti_func='leakyrelu', with_bias=True, with_bn=False, w_initializer=self.conv_params.get('w_initializer'), w_regularizer=self.conv_params.get('w_regularizer'))(FC_2_drop) return FC_class
def layer_op(self, input_tensor, is_training=None, keep_prob=None): conv_layer = ConvLayer(n_output_chns=self.n_output_chns, kernel_size=self.kernel_size, stride=self.stride, dilation=self.dilation, padding=self.padding, with_bias=self.with_bias, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], b_initializer=self.initializers['b'], b_regularizer=self.regularizers['b'], padding_constant=self.padding_constant, name='conv_') if self.feature_normalization == 'batch': if is_training is None: raise ValueError( 'is_training argument should be ' 'True or False unless feature_normalization is False') bn_layer = BNLayer(regularizer=self.regularizers['w'], moving_decay=self.moving_decay, eps=self.eps, name='bn_') elif self.feature_normalization == 'instance': in_layer = InstanceNormLayer(eps=self.eps, name='in_') elif self.feature_normalization == 'group': gn_layer = GNLayer(regularizer=self.regularizers['w'], group_size=self.group_size, eps=self.eps, name='gn_') if self.acti_func is not None: acti_layer = ActiLayer(func=self.acti_func, regularizer=self.regularizers['w'], name='acti_') if keep_prob is not None: dropout_layer = ActiLayer(func='dropout', name='dropout_') def activation(output_tensor): if self.feature_normalization == 'batch': output_tensor = bn_layer(output_tensor, is_training) elif self.feature_normalization == 'instance': output_tensor = in_layer(output_tensor) elif self.feature_normalization == 'group': output_tensor = gn_layer(output_tensor) if self.acti_func is not None: output_tensor = acti_layer(output_tensor) if keep_prob is not None: output_tensor = dropout_layer(output_tensor, keep_prob=keep_prob) return output_tensor if self.preactivation: output_tensor = conv_layer(activation(input_tensor)) else: output_tensor = activation(conv_layer(input_tensor)) return output_tensor
def layer_op(self, main_flow, bypass_flow): """ :param main_flow: tensor, input to the VNet block :param bypass_flow: tensor, input from skip connection :return: res_flow is tensor before final block operation (for residual connections), main_flow is final output tensor """ for i in range(self.n_conv): main_flow = ConvLayer(name='conv_{}'.format(i), n_output_chns=self.n_feature_chns, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], kernel_size=5)(main_flow) if i < self.n_conv - 1: # no activation for the last conv layer main_flow = ActiLayer( func=self.acti_func, regularizer=self.regularizers['w'])(main_flow) res_flow = ElementwiseLayer('SUM')(main_flow, bypass_flow) if self.func == 'DOWNSAMPLE': main_flow = ConvLayer(name='downsample', n_output_chns=self.n_output_chns, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], kernel_size=2, stride=2, with_bias=True)(res_flow) elif self.func == 'UPSAMPLE': main_flow = DeconvLayer(name='upsample', n_output_chns=self.n_output_chns, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], kernel_size=2, stride=2, with_bias=True)(res_flow) elif self.func == 'SAME': main_flow = ConvLayer(name='conv_1x1x1', n_output_chns=self.n_output_chns, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], b_initializer=self.initializers['b'], b_regularizer=self.regularizers['b'], kernel_size=1, with_bias=True)(res_flow) main_flow = ActiLayer(self.acti_func)(main_flow) print(self) return res_flow, main_flow
def layer_op(self, input_tensor): output_tensor_1 = Conv(self.n_chns, **self.conv_params)(input_tensor) output_tensor_drop_1 = ActiLayer(func='dropout', name='dropout')(output_tensor_1, keep_prob=0.9) output_tensor_2 = Conv(self.n_chns, **self.conv_params)(output_tensor_drop_1) output_tensor_drop_2 = ActiLayer(func='dropout', name='dropout')(output_tensor_2, keep_prob=0.9) output_tensor_3 = Conv(self.n_chns, **self.conv_params)(output_tensor_drop_2) output_tensor_drop_3 = ActiLayer(func='dropout', name='dropout')(output_tensor_3, keep_prob=0.9) return output_tensor_drop_3
def layer_op(self, input_tensor, is_training): output_tensor = input_tensor for i in range(len(self.kernels)): # create parameterised layers bn_op = BNLayer( regularizer=self. regularizers['w'], # Add regulizer for samplicity name='bn_{}'.format(i)) acti_op = ActiLayer(func=self.acti_func, regularizer=self.regularizers['w'], name='acti_{}'.format(i)) conv_op = ConvLayer(n_output_chns=self.n_output_chns, kernel_size=self.kernels[i], stride=self.strides[i], dilation=self.dilation_rates[i], w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='conv_{}'.format(i)) output_tensor = conv_op(output_tensor) output_tensor = acti_op(output_tensor) output_tensor = bn_op( output_tensor, is_training ) # Construct operation first and then connect them. # make residual connections if self.with_res: # The input is directly added to the output. output_tensor = ElementwiseLayer('SUM')(output_tensor, input_tensor) return output_tensor
def layer_op(self, input_tensor): output_tensor = input_tensor for (i, k) in enumerate(self.kernels): # create parameterised layers if self.encoding: if i == 0: nb_channels = self.n_output_chns elif i == 1: nb_channels = self.n_output_chns else: if self.double_n: if i == 0: nb_channels = self.n_output_chns elif i == 1: nb_channels = int(self.n_output_chns / 2) else: nb_channels = int(self.n_output_chns / 2) in_op = InstanceNormLayer(name='in_{}'.format(i)) acti_op = ActiLayer(func=self.acti_func, regularizer=self.regularizers['w'], name='acti_{}'.format(i)) conv_op = ConvLayer(n_output_chns=nb_channels, kernel_size=k, stride=self.stride, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='conv_{}'.format(i)) # connect layers output_tensor = in_op(output_tensor) output_tensor = acti_op(output_tensor) output_tensor = conv_op(output_tensor) return output_tensor
def layer_op_selu(self, input_tensor, is_training): output_tensor = input_tensor for i in range(len(self.kernels)): # create parameterised layers # bn_op = BNLayer(regularizer=self.regularizers['w'], # name='bn_{}'.format(i)) acti_op = ActiLayer(func=self.acti_func, regularizer=self.regularizers['w'], name='acti_{}'.format(i)) conv_op = ConvLayer(n_output_chns=self.n_output_chns, kernel_size=self.kernels[i], stride=self.strides[i], dilation=self.dilation_rates[i], w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='conv_{}'.format(i)) # connect layers # output_tensor = bn_op(output_tensor, is_training) output_tensor = conv_op(output_tensor) output_tensor = acti_op(output_tensor) # make residual connections if self.with_res: output_tensor = ElementwiseLayer('SUM')(output_tensor, input_tensor) return output_tensor
def layer_op(self, input_tensor, is_training): """ :param input_tensor: tensor, input to the network :param is_training: boolean, True if network is in training mode :return: tensor, output of the residual block """ output_tensor = input_tensor for (i, k) in enumerate(self.kernels): # create parameterised layers bn_op = BNLayer(regularizer=self.regularizers['w'], name='bn_{}'.format(i)) acti_op = ActiLayer(func=self.acti_func, regularizer=self.regularizers['w'], name='acti_{}'.format(i)) conv_op = ConvLayer(n_output_chns=self.n_output_chns, kernel_size=k, stride=1, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='conv_{}'.format(i)) # connect layers output_tensor = bn_op(output_tensor, is_training) output_tensor = acti_op(output_tensor) output_tensor = conv_op(output_tensor) # make residual connections if self.with_res: output_tensor = ElementwiseLayer('SUM')(output_tensor, input_tensor) return output_tensor
def layer_op(self, input_tensor, is_training): output_tensor = input_tensor for (i, k) in enumerate(self.kernels): # Batch Normalization is removed from the residual blocks. # create parameterised layers # bn_op = BNLayer(regularizer=self.regularizers['w'], # name='bn_{}'.format(i)) acti_op = ActiLayer(func=self.acti_func, regularizer=self.regularizers['w'], name='acti_{}'.format(i)) conv_op = ConvLayer(n_output_chns=self.n_output_chns, kernel_size=k, stride=1, padding='SAME', with_bias=True, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], name='conv_{}'.format(i)) # connect layers # output_tensor = bn_op(output_tensor, is_training) output_tensor = acti_op(output_tensor) output_tensor = conv_op(output_tensor) # make residual connections if self.with_res: output_tensor = ElementwiseLayer('SUM')(output_tensor, input_tensor) return output_tensor
def layer_op(self, input_tensor, is_training=None, keep_prob=None): conv_layer = ConvLayer(n_output_chns=self.n_output_chns, kernel_size=self.kernel_size, stride=self.stride, dilation=self.dilation, padding=self.padding, with_bias=self.with_bias, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], b_initializer=self.initializers['b'], b_regularizer=self.regularizers['b'], name='conv_') output_tensor = conv_layer(input_tensor) #self.CONV_KERNEL = conv_layer.CONV_KERNEL if self.with_bn: if is_training is None: raise ValueError('is_training argument should be ' 'True or False unless with_bn is False') bn_layer = BNLayer(regularizer=self.regularizers['w'], moving_decay=self.moving_decay, eps=self.eps, name='bn_') output_tensor = bn_layer(output_tensor, is_training) if self.acti_func is not None: acti_layer = ActiLayer(func=self.acti_func, regularizer=self.regularizers['w'], name='acti_') output_tensor = acti_layer(output_tensor) OUTPUT_TENSOR = tf.identity(output_tensor, name="MYOUTPUTTENSOR") #print("__________OUTPUT_TENSOR: ", OUTPUT_TENSOR.name) if keep_prob is not None: dropout_layer = ActiLayer(func='dropout', name='dropout_') output_tensor = dropout_layer(output_tensor, keep_prob=keep_prob) OUTPUT_TENSORR = tf.identity(output_tensor, name="MYOUTPUTTENSOR_DROPOUT") #print("_________OUTPUT_TENSORR dropout: ", OUTPUT_TENSORR.name) return output_tensor
def test_3d_dropout_shape(self): x = self.get_3d_input() dropout_layer = ActiLayer(func='dropout') out_dropout = dropout_layer(x, keep_prob=0.8) print(dropout_layer) with self.cached_session() as sess: sess.run(tf.global_variables_initializer()) out = sess.run(out_dropout) self.assertAllClose((2, 16, 16, 16, 8), out.shape)
def layer_op(self, image, is_training=True, **unused_kwargs): # ---Conv Layers output_1 = ThreeLayerConv(self.n_fea[0], self.conv_params)(image) down_1 = Pooling(func='MAX', **self.pooling_params)(output_1) output_2 = ThreeLayerConv(self.n_fea[1], self.conv_params)(down_1) down_2 = Pooling(func='MAX', **self.pooling_params)(output_2) output_3 = ThreeLayerConv(self.n_fea[2], self.conv_params)(down_2) down_3 = Pooling(func='MAX', **self.pooling_params)(output_3) # ---FC layers FC_1 = FCLayer( n_output_chns=self.n_fea[3], acti_func='leakyrelu', with_bias=True, with_bn=False, w_initializer=self.conv_params.get('w_initializer'), w_regularizer=self.conv_params.get('w_regularizer'))(down_3) FC_1_drop = ActiLayer(func='dropout', name='dropout')(FC_1, keep_prob=0.5) FC_2 = FCLayer( n_output_chns=self.n_fea[4], acti_func='leakyrelu', with_bias=True, with_bn=False, w_initializer=self.conv_params.get('w_initializer'), w_regularizer=self.conv_params.get('w_regularizer'))(FC_1_drop) FC_2_drop = ActiLayer(func='dropout', name='dropout')(FC_2, keep_prob=0.5) FC_class = FCLayer( n_output_chns=self.n_fea[5], acti_func='leakyrelu', with_bias=True, with_bn=False, w_initializer=self.conv_params.get('w_initializer'), w_regularizer=self.conv_params.get('w_regularizer'))(FC_2_drop) return FC_class
def layer_op(self, main_flow, bypass_flow): for i in range(self.n_conv): main_flow = ConvLayer(name='conv_{}'.format(i), n_output_chns=self.n_feature_chns, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], kernel_size=5)(main_flow) if i < self.n_conv - 1: # no activation for the last conv layer main_flow = ActiLayer( func=self.acti_func, regularizer=self.regularizers['w'])(main_flow) res_flow = ElementwiseLayer('SUM')(main_flow, bypass_flow) if self.func == 'DOWNSAMPLE': main_flow = ConvLayer(name='downsample', n_output_chns=self.n_output_chns, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], kernel_size=2, stride=2, with_bias=True)(res_flow) elif self.func == 'UPSAMPLE': main_flow = DeconvLayer(name='upsample', n_output_chns=self.n_output_chns, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], kernel_size=2, stride=2, with_bias=True)(res_flow) elif self.func == 'SAME': main_flow = ConvLayer(name='conv_1x1x1', n_output_chns=self.n_output_chns, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], b_initializer=self.initializers['b'], b_regularizer=self.regularizers['b'], kernel_size=1, with_bias=True)(res_flow) main_flow = ActiLayer(self.acti_func)(main_flow) print(self) print('VNet is running') return res_flow, main_flow
def conv(ch, x, s): conv_layer = ConvolutionalLayer( n_output_chns=ch, kernel_size=3, with_bn=True, w_initializer=self.initializers['w']) acti_layer = ActiLayer(func='selu') # combining two flows res_flow = conv_layer(x, is_training=is_training) + s return acti_layer(res_flow)
def test_3d_prelu_reg_shape(self): x = self.get_3d_input() prelu_layer = ActiLayer(func='prelu', regularizer=regularizers.l2_regularizer(0.5), name='regularized') out_prelu = prelu_layer(x) print(prelu_layer) with self.cached_session() as sess: sess.run(tf.global_variables_initializer()) out = sess.run(out_prelu) self.assertAllClose((2, 16, 16, 16, 8), out.shape)
def layer_op(self, input_tensor, is_training=None, keep_prob=None): fc_layer = FCLayer(n_output_chns=self.n_output_chns, with_bias=self.with_bias, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], b_initializer=self.initializers['b'], b_regularizer=self.regularizers['b'], name='fc_') output_tensor = fc_layer(input_tensor) if self.feature_normalization == 'batch': if is_training is None: raise ValueError( 'is_training argument should be ' 'True or False unless feature_normalization is False') bn_layer = BNLayer(regularizer=self.regularizers['w'], moving_decay=self.moving_decay, eps=self.eps, name='bn_') output_tensor = bn_layer(output_tensor, is_training) elif self.feature_normalization == 'instance': in_layer = InstanceNormLayer(eps=self.eps, name='in_') output_tensor = in_layer(output_tensor) elif self.feature_normalization == 'group': gn_layer = GNLayer(regularizer=self.regularizers['w'], group_size=self.group_size, eps=self.eps, name='gn_') output_tensor = gn_layer(output_tensor) if self.acti_func is not None: acti_layer = ActiLayer(func=self.acti_func, regularizer=self.regularizers['w'], name='acti_') output_tensor = acti_layer(output_tensor) if keep_prob is not None: dropout_layer = ActiLayer(func='dropout', name='dropout_') output_tensor = dropout_layer(output_tensor, keep_prob=keep_prob) return output_tensor
def run_test(self, is_3d, type_str, expected_shape): if is_3d: x = self.get_3d_input() else: x = self.get_2d_input() activation_layer = ActiLayer(func=type_str) out_acti = activation_layer(x) print(activation_layer) with self.cached_session() as sess: sess.run(tf.global_variables_initializer()) out = sess.run(out_acti) self.assertAllClose(out.shape, expected_shape)
def layer_op(self, input_tensor, input_mask=None, is_training=None, keep_prob=None): conv_layer = ChannelSparseConvLayer( n_output_chns=self.n_output_chns, kernel_size=self.kernel_size, stride=self.stride, dilation=self.dilation, padding=self.padding, with_bias=self.with_bias, w_initializer=self.initializers['w'], w_regularizer=self.regularizers['w'], b_initializer=self.initializers['b'], b_regularizer=self.regularizers['b'], name='conv_') if keep_prob is not None: output_mask = \ tf.to_float(tf.random_shuffle(tf.range(self.n_output_chns))) \ < keep_prob * self.n_output_chns n_output_ch = math.ceil(keep_prob * self.n_output_chns) else: output_mask = tf.ones([self.n_output_chns]) > 0 n_output_ch = self.n_output_chns output_tensor = conv_layer(input_tensor, input_mask, output_mask) output_tensor.set_shape( output_tensor.shape.as_list()[:-1] + [n_output_ch]) if self.feature_normalization == 'batch': if is_training is None: raise ValueError('For batch norm, you must set the `is_training` argument.') bn_layer = ChannelSparseBNLayer( self.n_output_chns, regularizer=self.regularizers['w'], moving_decay=self.moving_decay, eps=self.eps, name='bn_') output_tensor = bn_layer(output_tensor, is_training, output_mask) if self.acti_func is not None: acti_layer = ActiLayer( func=self.acti_func, regularizer=self.regularizers['w'], name='acti_') output_tensor = acti_layer(output_tensor) return output_tensor, output_mask
def conv(ch, x, s): """ Convolutional layer (stride 1, kernel size 3, batch norm) combining two flows :param ch: int, number of output channels :param x: tensor, input to the convolutional layer :param s: flow to be added after convolution :return: tensor, output of selu activation layer (selu(conv(x) + s)) """ conv_layer = ConvolutionalLayer( n_output_chns=ch, kernel_size=3, feature_normalization='batch', w_initializer=self.initializers['w']) acti_layer = ActiLayer(func='selu') # combining two flows res_flow = conv_layer(x, is_training=is_training) + s return acti_layer(res_flow)
def layer_op(self, input_tensor, is_training): """ :param input_tensor: tensor, input to the network :param is_training: boolean, True if network is in training mode :return: tensor, output of the autofocus block """ output_tensor = input_tensor ######################################################################## # 1: Create first of two autofocus layer of autofocus block. ######################################################################## # A convolution without feature norm and activation. conv_1 = ConvLayer(n_output_chns = self.n_output_chns[0], kernel_size = self.kernel_size[0], padding='SAME', dilation = 1, w_initializer = self.initializers['w'], w_regularizer = self.regularizers['w'], name = 'conv_1') # Create two conv layers for the attention model. The output of the # attention model will be needed for the K parallel conv layers. # First convolutional layer of the attention model (conv l,1). conv_att_11 = ConvLayer(n_output_chns = int(self.n_input_chns[0]/2), kernel_size = self.kernel_size[0], padding = 'SAME', w_initializer = self.initializers['w'], w_regularizer = self.regularizers['w'], name = 'conv_att_11') # Second convolutional layer of the attention model (conv l,2). conv_att_12 = ConvLayer(n_output_chns = self.num_branches, kernel_size = [1, 1, 1], padding = 'SAME', w_initializer = self.initializers['w'], w_regularizer = self.regularizers['w'], name = 'conv_att_12') # Batch norm (BN) layer for each of the K parallel convolutions bn_layer_1 = [] for i in range(self.num_branches): bn_layer_1.append(BNLayer(regularizer = self.regularizers['w'], name = 'bn_layer_1_{}'.format(i))) # Activation function used in the first attention model acti_op_1 = ActiLayer(func = self.acti_func, regularizer = self.regularizers['w'], name = 'acti_op_1') ######################################################################## # 2: Create second of two autofocus layer of autofocus block. ######################################################################## # A convolution without feature norm and activation. conv_2 = ConvLayer(n_output_chns = self.n_output_chns[1], kernel_size = self.kernel_size[1], padding='SAME', dilation = 1, w_initializer = self.initializers['w'], w_regularizer = self.regularizers['w'], name = 'conv_2') # Create two conv layers for the attention model. The output of the # attention model will be needed for the K parallel conv layers. # First convolutional layer of the attention model (conv l,1). conv_att_21 = ConvLayer(n_output_chns = int(self.n_input_chns[1]/2), kernel_size = self.kernel_size[1], padding = 'SAME', w_initializer = self.initializers['w'], w_regularizer = self.regularizers['w'], name = 'conv_att_21') # Second convolutional layer of the attention model (conv l,2). conv_att_22 = ConvLayer(n_output_chns = self.num_branches, kernel_size = [1, 1, 1], padding = 'SAME', w_initializer = self.initializers['w'], w_regularizer = self.regularizers['w'], name = 'conv_att_22') # Batch norm (BN) layer for each of the K parallel convolutions bn_layer_2 = [] for i in range(self.num_branches): bn_layer_2.append(BNLayer(regularizer = self.regularizers['w'], name = 'bn_layer_2_{}'.format(i))) # Activation function used in the second attention model acti_op_2 = ActiLayer(func = self.acti_func, regularizer = self.regularizers['w'], name = 'acti_op_2') ######################################################################## # 3: Create other parameterised layers ######################################################################## acti_op = ActiLayer(func = self.acti_func, regularizer = self.regularizers['w'], name = 'acti_op') ######################################################################## # 4: Connect layers ######################################################################## # compute attention weights for the K parallel conv layers in the first # autofocus convolutional layer feature_1 = output_tensor att_1 = acti_op_1(conv_att_11(feature_1)) att_1 = conv_att_12(att_1) att_1 = tf.nn.softmax(att_1, axis=1) # Create K dilated tensors as input to the autofocus layer. This # simulates the K parallel convolutions with different dilation # rates. Doing it this way ensures the required weight sharing. dilated_tensor_1 = [] for i in range(self.num_branches): dilated_1 = output_tensor with DilatedTensor(dilated_1, dilation_factor = self.dilation_list[i]) as dilated: dilated.tensor = conv_1(dilated.tensor) dilated.tensor = bn_layer_1[i](dilated.tensor, is_training) dilated.tensor = dilated.tensor * att_1[:,:,:,:,i:(i+1)] dilated_tensor_1.append(dilated.tensor) output_tensor = tf.add_n(dilated_tensor_1) output_tensor = acti_op(output_tensor) # compute attention weights for the K parallel conv layers in the second # autofocus convolutional layer feature_2 = output_tensor att_2 = acti_op_2(conv_att_21(feature_2)) att_2 = conv_att_22(att_2) att_2 = tf.nn.softmax(att_2, axis=1) # Create K dilated tensors as input to the autofocus layer. This # simulates the K parallel convolutions with different dilation # rates. Doing it this way ensures the required weight sharing. dilated_tensor_2 = [] for i in range(self.num_branches): dilated_2 = output_tensor with DilatedTensor(dilated_2, dilation_factor = self.dilation_list[i]) as dilated: dilated.tensor = conv_2(dilated.tensor) dilated.tensor = bn_layer_2[i](dilated.tensor, is_training) dilated.tensor = dilated.tensor * att_2[:,:,:,:,i:(i+1)] dilated_tensor_2.append(dilated.tensor) output_tensor = tf.add_n(dilated_tensor_2) # make residual connection using ElementwiseLayer with SUM if self.with_res: output_tensor = ElementwiseLayer('SUM')(output_tensor, input_tensor) # apply the last ReLU activation output_tensor = acti_op(output_tensor) print("output_tensor:", output_tensor) return output_tensor
def layer_op(self, images, is_training=True, layer_id=-1, keep_prob=0.7, **unused_kwargs): print('learning downsample ...') # >>>>>>>>>>>>>>>> learning down sample lds1 = ConvolutionalLayer(32, conv_type='REGULAR', kernel_size=3, stride=2, w_initializer=self.w_initializer, w_regularizer=self.w_regularizer) lds2 = ConvolutionalLayer(48, conv_type='SEPARABLE_2D', kernel_size=3, stride=2, w_initializer=self.w_initializer, w_regularizer=self.w_regularizer) lds3 = ConvolutionalLayer(64, conv_type='SEPARABLE_2D', kernel_size=3, stride=2) flow = lds1(images, is_training=is_training) flow = lds2(flow, is_training=is_training) flow = lds3(flow, is_training=is_training) lds = flow # >>>>>>>>>>>>>>>> global feature extraction print('global feature extractor ...') bottle1 = SCCNBottleneckBlock(64, 3, t=6, stride=2, n=3, w_initializer=self.w_initializer, w_regularizer=self.w_regularizer) bottle2 = SCCNBottleneckBlock(96, 3, t=6, stride=2, n=3, w_initializer=self.w_initializer, w_regularizer=self.w_regularizer) bottle3 = SCCNBottleneckBlock(128, 3, t=6, stride=1, n=3, w_initializer=self.w_initializer, w_regularizer=self.w_regularizer) pyramid = SCNNPyramidBlock([2, 4, 6, 8], w_initializer=self.w_initializer, w_regularizer=self.w_regularizer) flow = bottle1(flow) flow = bottle2(flow) flow = bottle3(flow) flow = pyramid(flow) gfe = flow # >>>>>>>>>>>>>>>> feature fusion print('Feature fusion ...') conv1 = ConvolutionalLayer(128, conv_type='REGULAR', kernel_size=1, padding='same', stride=1, acti_func=None, w_initializer=self.w_initializer, w_regularizer=self.w_regularizer) upsample1 = tf.keras.layers.UpSampling2D((4, 4), interpolation='bilinear') dwconv = ConvolutionalLayer(1, conv_type='DEPTHWISE_2D', kernel_size=3, stride=1, padding='same', acti_func=self.acti_func, w_initializer=self.w_initializer, w_regularizer=self.w_regularizer) conv2 = ConvLayer(128, conv_type='REGULAR', kernel_size=1, padding='same', stride=1, w_initializer=self.w_initializer, w_regularizer=self.w_regularizer) bn = BNLayer() acti = ActiLayer(func=self.acti_func, regularizer=self.w_regularizer, name='acti_') flow1 = conv1(lds, is_training=is_training) flow2 = upsample1(gfe) flow2 = dwconv(flow2, is_training=is_training) flow2 = conv2(flow2) flow = tf.math.add(flow1, flow2) flow = bn(flow, is_training=is_training) flow = acti(flow) # ff = flow # >>>>>>>>>>>>>>>> classifier sep_conv1 = ConvolutionalLayer(128, conv_type='SEPARABLE_2D', kernel_size=3, padding='same', stride=1, name='DSConv1_classifier', acti_func=self.acti_func, w_initializer=self.w_initializer, w_regularizer=self.w_regularizer) sep_conv2 = ConvolutionalLayer(128, conv_type='SEPARABLE_2D', kernel_size=3, padding='same', stride=1, name='DSConv2_classifier', acti_func=self.acti_func, w_initializer=self.w_initializer, w_regularizer=self.w_regularizer) flow = sep_conv1(flow, is_training=is_training) flow = sep_conv2(flow, is_training=is_training) conv = ConvolutionalLayer(self.num_classes, conv_type='REGULAR', kernel_size=1, padding='same', stride=1, w_initializer=self.w_initializer, w_regularizer=self.w_regularizer) dropout = ActiLayer(func='dropout', regularizer=self.w_regularizer, name='dropout_') # tf.keras.layers.Dropout(0.3) upsample = tf.keras.layers.UpSampling2D((8, 8), interpolation='bilinear') flow = conv(flow, is_training=is_training) flow = dropout(flow, keep_prob=keep_prob) flow = upsample(flow) flow = tf.nn.softmax(flow) return flow