def _call(inputs): logging.info('Block %s input shape: %s', self.name, inputs.shape) if self._block_args.expand_ratio != 1: x = self._relu_fn( self._bn0(self._expand_conv(inputs), training=training)) else: x = inputs logging.info('Expand shape: %s', x.shape) self.endpoints = {'expansion_output': x} x = self._bn1(self._project_conv(x), training=training) # Add identity so that quantization-aware training can insert quantization # ops correctly. x = tf.identity(x) if self._clip_projection_output: x = tf.clip_by_value(x, -6, 6) if self._block_args.id_skip: if all( s == 1 for s in self._block_args.strides ) and self._block_args.input_filters == self._block_args.output_filters: # Apply only if skip connection presents. if survival_prob: x = utils.drop_connect(x, training, survival_prob) x = tf.add(x, inputs) logging.info('Project shape: %s', x.shape) return x
def _call(image): original_image = image image = conv_op(image) image = bn(image, training=training) if self.act_type: image = utils.activation_fn(image, act_type) if i > 0 and self.survival_prob: image = utils.drop_connect(image, training, self.survival_prob) image = image + original_image return image
def class_net(images, level, num_classes, num_anchors, num_filters, is_training, act_type, separable_conv=True, repeats=4, survival_prob=None, strategy=None, data_format='channels_last'): """Class prediction network.""" if separable_conv: conv_op = functools.partial( tf.layers.separable_conv2d, depth_multiplier=1, data_format=data_format, pointwise_initializer=tf.initializers.variance_scaling(), depthwise_initializer=tf.initializers.variance_scaling()) else: conv_op = functools.partial( tf.layers.conv2d, data_format=data_format, kernel_initializer=tf.random_normal_initializer(stddev=0.01)) for i in range(repeats): orig_images = images images = conv_op(images, num_filters, kernel_size=3, bias_initializer=tf.zeros_initializer(), activation=None, padding='same', name='class-%d' % i) images = utils.batch_norm_act(images, is_training, act_type=act_type, init_zero=False, strategy=strategy, data_format=data_format, name='class-%d-bn-%d' % (i, level)) if i > 0 and survival_prob: images = utils.drop_connect(images, is_training, survival_prob) images = images + orig_images classes = conv_op( images, num_classes * num_anchors, kernel_size=3, bias_initializer=tf.constant_initializer(-np.log((1 - 0.01) / 0.01)), padding='same', name='class-predict') return classes
def _call(inputs): logging.info('Block %s input shape: %s', self.name, inputs.shape) x = inputs # creates conv 2x2 kernel if self.super_pixel: x = self.super_pixel(x, training) logging.info('SuperPixel %s: %s', self.name, x.shape) if self._block_args.fused_conv: # If use fused mbconv, skip expansion and use regular conv. x = self._relu_fn( self._bn1(self._fused_conv(x), training=training)) logging.info('Conv2D shape: %s', x.shape) else: # Otherwise, first apply expansion and then apply depthwise conv. if self._block_args.expand_ratio != 1: x = self._relu_fn( self._bn0(self._expand_conv(x), training=training)) logging.info('Expand shape: %s', x.shape) x = self._relu_fn( self._bn1(self._depthwise_conv(x), training=training)) logging.info('DWConv shape: %s', x.shape) if self._se: x = self._se(x) self.endpoints = {'expansion_output': x} x = self._bn2(self._project_conv(x), training=training) # Add identity so that quantization-aware training can insert quantization # ops correctly. x = tf.identity(x) if self._clip_projection_output: x = tf.clip_by_value(x, -6, 6) if self._block_args.id_skip: if all( s == 1 for s in self._block_args.strides ) and self._block_args.input_filters == self._block_args.output_filters: # Apply only if skip connection presents. if survival_prob: x = utils.drop_connect(x, training, survival_prob) x = tf.add(x, inputs) logging.info('Project shape: %s', x.shape) return x