Beispiel #1
0
    def depth_conv2d_layer(self,
                           name,
                           input_tensor,
                           kernel_size1,
                           kernel_size2,
                           input_feature_num,
                           output_feature_num,
                           use_bias=False,
                           activator=None,
                           initializer="he",
                           use_batch_norm=False,
                           dropout_rate=1.0,
                           reuse=False):
        with tf.variable_scope(name, reuse=reuse):
            w = util.weight([kernel_size1, kernel_size2, input_feature_num, 1],
                            stddev=self.weight_dev,
                            name="conv_W",
                            initializer=initializer)

            b = util.bias([output_feature_num],
                          name="conv_B") if use_bias else None
            h = self.depth_conv2d(input_tensor,
                                  w,
                                  self.cnn_stride,
                                  bias=b,
                                  use_batch_norm=use_batch_norm,
                                  name=name)

            if activator is not None:
                h = self.build_activator(h,
                                         output_feature_num,
                                         activator,
                                         base_name=name)
        return h
Beispiel #2
0
    def build_conv_and_bias(self,
                            name,
                            input_tensor,
                            cnn_size,
                            input_feature_num,
                            output_feature_num,
                            use_activator=True,
                            use_dropout=True):
        with tf.variable_scope(name):
            w = util.weight(
                [cnn_size, cnn_size, input_feature_num, output_feature_num],
                stddev=self.weight_dev,
                name="conv_W",
                initializer=self.initializer)
            b = util.bias([output_feature_num], name="conv_B")
            h = self.conv2d(
                input_tensor,
                w,
                self.cnn_stride,
                bias=b,
                activator=self.activator if use_activator else None,
                name=name)

            if use_dropout and self.dropout != 1.0:
                h = tf.nn.dropout(h, self.dropout_input, name="dropout")

            if self.save_weights:
                util.add_summaries("weight",
                                   self.name,
                                   w,
                                   save_stddev=True,
                                   save_mean=True)
                util.add_summaries("bias",
                                   self.name,
                                   b,
                                   save_stddev=True,
                                   save_mean=True)

            if self.save_images and cnn_size > 1 and input_feature_num == 1:
                weight_transposed = tf.transpose(w, [3, 0, 1, 2])
                with tf.name_scope("image"):
                    tf.summary.image(self.name,
                                     weight_transposed,
                                     max_outputs=self.log_weight_image_num)

        return w, b, h
Beispiel #3
0
	def build_conv(self, name, input_tensor, cnn_size, input_feature_num, output_feature_num, use_bias=False,
	               activator=None, use_batch_norm=False, dropout_rate=1.0):

		with tf.variable_scope(name):
			w = util.weight([cnn_size, cnn_size, input_feature_num, output_feature_num],
			                stddev=self.weight_dev, name="conv_W", initializer=self.initializer)

			b = util.bias([output_feature_num], name="conv_B") if use_bias else None
			h = self.conv2d(input_tensor, w, self.cnn_stride, bias=b, use_batch_norm=use_batch_norm, name=name)

			if activator is not None:
				h = self.build_activator(h, output_feature_num, activator, base_name=name)

			if dropout_rate < 1.0:
				h = tf.nn.dropout(h, self.dropout, name="dropout")

			self.H.append(h)

			if self.save_weights:
				util.add_summaries("weight", self.name, w, save_stddev=True, save_mean=True)
				util.add_summaries("output", self.name, h, save_stddev=True, save_mean=True)
				if use_bias:
					util.add_summaries("bias", self.name, b, save_stddev=True, save_mean=True)

			# todo check
			if self.save_images and cnn_size > 1 and input_feature_num == 1:
				weight_transposed = tf.transpose(w, [3, 0, 1, 2])

				with tf.name_scope("image"):
					tf.summary.image(self.name, weight_transposed, max_outputs=self.log_weight_image_num)

		if self.receptive_fields == 0:
			self.receptive_fields = cnn_size
		else:
			self.receptive_fields += (cnn_size - 1)
		self.features += "%d " % output_feature_num

		self.Weights.append(w)
		if use_bias:
			self.Biases.append(b)

		return h