def create_layer(self, input, include_w_input=None, is_training=True, center=False, dp_rate=0.0, **kwargs): print("name: {}".format(self.name)) if self.add_to_input: input = tf.add(input, include_w_input) if self.concat_to_input: input = tf.concat([input, include_w_input],axis=-1) self.input_shape = get_incoming_shape(input) print(self.input_shape) number_of_input_channels = self.input_shape[3] self.number_of_input_channels = number_of_input_channels with tf.variable_scope('conv', reuse=False): initializer = None if self.weight_init == 'He': initializer = tf.contrib.layers.variance_scaling_initializer(factor=2.0, mode='FAN_IN', uniform=False) elif self.weight_init == 'Xnormal': initializer = tf.contrib.layers.xavier_initializer(uniform=False, seed=None) W = tf.get_variable(('W{}'.format(self.name)), shape=(self.kernel_size, self.kernel_size, number_of_input_channels, self.output_channels), initializer=initializer) b = tf.Variable(tf.zeros([self.output_channels])) tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, W) output = tf.nn.atrous_conv2d(input, W, rate=self.dilation, padding='SAME') output = self.apply_dropout(output, dp_rate, is_training) # apply batch-norm if self.batch_norm: print("apply batch norm") output = tf.contrib.layers.batch_norm(output, is_training=is_training) output = tf.add(tf.contrib.layers.batch_norm(output), b) output = self.get_act_values(output) output = self.zero_center_output(output, center) return output
def create_layer(self, input, **kwargs): print("name: {}".format(self.name)) self.input_shape = get_incoming_shape(input) print(self.input_shape) number_of_input_channels = self.input_shape[3] self.number_of_input_channels = number_of_input_channels with tf.variable_scope('dfconv', reuse=False): if self.weight_init == 'He': self.weight_init = tf.contrib.layers.variance_scaling_initializer(factor=2.0, mode='FAN_IN', uniform=False) elif self.weight_init == 'Xnormal': self.weight_init = tf.contrib.layers.xavier_initializer(uniform=False, seed=None) #tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, W) output = tl.layers.DeformableConv2d(net, self.prev_layer, self.offset_layer, self.n_filter, self.filter_size, self.act_fn, self.name, self.weight_init) output = self.get_act_values(output) return output
def create_layer(self, input): self.input_shape = get_incoming_shape(input) print(self.input_shape) number_of_input_channels = self.input_shape[3] self.number_of_input_channels = number_of_input_channels with tf.variable_scope('conv', reuse=False): W = tf.get_variable(('W{}'.format(self.name[-3:])),shape=(self.kernel_size, self.kernel_size, number_of_input_channels, self.output_channels)) b = tf.Variable(tf.zeros([self.output_channels])) #self.encoder_matrix = W Conv2d.layer_index += 1 output = tf.nn.atrous_conv2d(input, W, rate=self.dilation, padding='SAME') output = lrelu(tf.add(tf.contrib.layers.batch_norm(output), b)) return output
def create_layer(self, input, include_w_input=None, pooling_method="MAX", unpooling_method="nearest_neighbor", center=False, **kwargs): print("name: {}".format(self.name)) self.input_shape = get_incoming_shape(input) print(self.input_shape) if self.add_to_input: input = tf.add(input, include_w_input) if self.concat_to_input: input = tf.concat([input, include_w_input], axis=-1) output = self.apply_pool(input, self.kernel_size, pooling_method=pooling_method, unpooling_method=unpooling_method) output = self.zero_center_output(output, center) return output
def create_layer(self, input): self.input_shape = get_incoming_shape(input) print(self.input_shape) return max_pool_2d(input, self.kernel_size)