def build_conv(self, name, input_tensor, cnn_size, input_feature_num, output_feature_num): with tf.variable_scope(name): w = util.weight( [cnn_size, cnn_size, input_feature_num, output_feature_num], stddev=self.weight_dev, name="conv_W", initializer=self.initializer) h = self.conv2d(input_tensor, w, self.cnn_stride, bias=None, activator=None, name=name) if self.save_weights: util.add_summaries("weight", self.name, w, save_stddev=True, save_mean=True) if self.save_images and cnn_size > 1 and input_feature_num == 1: weight_transposed = tf.transpose(w, [3, 0, 1, 2]) with tf.name_scope("image"): tf.summary.image(self.name, weight_transposed, max_outputs=self.log_weight_image_num) return w, h
def depth_conv2d_layer(self, name, input_tensor, kernel_size1, kernel_size2, input_feature_num, output_feature_num, use_bias=False, activator=None, initializer="he", use_batch_norm=False, dropout_rate=1.0, reuse=False): with tf.variable_scope(name, reuse=reuse): w = util.weight([kernel_size1, kernel_size2, input_feature_num, 1], stddev=self.weight_dev, name="conv_W", initializer=initializer) b = util.bias([output_feature_num], name="conv_B") if use_bias else None h = self.depth_conv2d(input_tensor, w, self.cnn_stride, bias=b, use_batch_norm=use_batch_norm, name=name) if activator is not None: h = self.build_activator(h, output_feature_num, activator, base_name=name) return h
def depthwise_separable_conv2d(self, input_tensor, w, stride, channel_multiplier=1, bias=None, use_batch_norm=False, name=""): # w format is filter_height, filter_width, in_channels, out_channels depthwise_filter = util.weight([ int(w.shape[0]), int(w.shape[1]), int(w.shape[2]), channel_multiplier ], stddev=self.weight_dev, name="depthwise_W", initializer=self.initializer) pointwise_filter = util.weight( [1, 1, channel_multiplier * int(w.shape[2]), int(w.shape[3])], stddev=self.weight_dev, name="pointwise_W", initializer=self.initializer) output = tf.nn.separable_conv2d(input_tensor, \ depthwise_filter, \ pointwise_filter, \ strides=[1, stride, stride, 1], \ padding="SAME", \ name=name + "_conv") self.complexity += (self.pix_per_input * int(w.shape[0] * w.shape[1] * w.shape[2] * channel_multiplier) + \ self.pix_per_input * int(w.shape[2] * w.shape[3])) if bias is not None: output = tf.add(output, bias, name=name + "_add") self.complexity += self.pix_per_input * int(bias.shape[0]) if use_batch_norm: output = tf.layers.batch_normalization(output, training=self.is_training, name='BN') return output
def build_conv_and_bias(self, name, input_tensor, cnn_size, input_feature_num, output_feature_num, use_activator=True, use_dropout=True): with tf.variable_scope(name): w = util.weight( [cnn_size, cnn_size, input_feature_num, output_feature_num], stddev=self.weight_dev, name="conv_W", initializer=self.initializer) b = util.bias([output_feature_num], name="conv_B") h = self.conv2d( input_tensor, w, self.cnn_stride, bias=b, activator=self.activator if use_activator else None, name=name) if use_dropout and self.dropout != 1.0: h = tf.nn.dropout(h, self.dropout_input, name="dropout") if self.save_weights: util.add_summaries("weight", self.name, w, save_stddev=True, save_mean=True) util.add_summaries("bias", self.name, b, save_stddev=True, save_mean=True) if self.save_images and cnn_size > 1 and input_feature_num == 1: weight_transposed = tf.transpose(w, [3, 0, 1, 2]) with tf.name_scope("image"): tf.summary.image(self.name, weight_transposed, max_outputs=self.log_weight_image_num) return w, b, h
def build_conv(self, name, input_tensor, cnn_size, input_feature_num, output_feature_num, use_bias=False, activator=None, use_batch_norm=False, dropout_rate=1.0): with tf.variable_scope(name): w = util.weight([cnn_size, cnn_size, input_feature_num, output_feature_num], stddev=self.weight_dev, name="conv_W", initializer=self.initializer) b = util.bias([output_feature_num], name="conv_B") if use_bias else None h = self.conv2d(input_tensor, w, self.cnn_stride, bias=b, use_batch_norm=use_batch_norm, name=name) if activator is not None: h = self.build_activator(h, output_feature_num, activator, base_name=name) if dropout_rate < 1.0: h = tf.nn.dropout(h, self.dropout, name="dropout") self.H.append(h) if self.save_weights: util.add_summaries("weight", self.name, w, save_stddev=True, save_mean=True) util.add_summaries("output", self.name, h, save_stddev=True, save_mean=True) if use_bias: util.add_summaries("bias", self.name, b, save_stddev=True, save_mean=True) # todo check if self.save_images and cnn_size > 1 and input_feature_num == 1: weight_transposed = tf.transpose(w, [3, 0, 1, 2]) with tf.name_scope("image"): tf.summary.image(self.name, weight_transposed, max_outputs=self.log_weight_image_num) if self.receptive_fields == 0: self.receptive_fields = cnn_size else: self.receptive_fields += (cnn_size - 1) self.features += "%d " % output_feature_num self.Weights.append(w) if use_bias: self.Biases.append(b) return h