def call(self, new_node, training): if not self.conv_bn_act_pattern: new_node = utils.activation_fn(new_node, self.act_type) new_node = self.conv_op(new_node) new_node = self.bn(new_node, training=training) if self.conv_bn_act_pattern: new_node = utils.activation_fn(new_node, self.act_type) return new_node
def test_swish(self): features = tf.constant([.5, 10]) result = utils.activation_fn(features, 'swish') expected = features * tf.sigmoid(features) self.assertAllClose(result, expected) result = utils.activation_fn(features, 'swish_native') self.assertAllClose(result, expected)
def _call(image): original_image = image image = conv_op(image) image = bn(image, training=training) if self.act_type: image = utils.activation_fn(image, act_type) if i > 0 and self.survival_prob: image = utils.drop_connect(image, training, self.survival_prob) image = image + original_image return image
def call(self, feats, training): x = feats[-1] skips = list(reversed(feats[:-1])) for con2d_t, con2d_t_bn, skip in zip(self.con2d_ts, self.con2d_t_bns, skips): x = con2d_t(x) x = con2d_t_bn(x, training) x = utils.activation_fn(x, self.act_type) x = tf.concat([x, skip], axis=-1) # This is the last layer of the model return self.head_transpose(x) # 64x64 -> 128x128
def build_bifpn_layer(feats, feat_sizes, config): """Builds a feature pyramid given previous feature pyramid and config.""" p = config # use p to denote the network config. if p.fpn_config: fpn_config = p.fpn_config else: fpn_config = fpn_configs.get_fpn_config(p.fpn_name, p.min_level, p.max_level, p.fpn_weight_method) num_output_connections = [0 for _ in feats] for i, fnode in enumerate(fpn_config.nodes): with tf.variable_scope('fnode{}'.format(i)): logging.info('fnode %d : %s', i, fnode) new_node_height = feat_sizes[fnode['feat_level']]['height'] new_node_width = feat_sizes[fnode['feat_level']]['width'] nodes = [] for idx, input_offset in enumerate(fnode['inputs_offsets']): input_node = feats[input_offset] num_output_connections[input_offset] += 1 input_node = resample_feature_map( input_node, '{}_{}_{}'.format(idx, input_offset, len(feats)), new_node_height, new_node_width, p.fpn_num_filters, p.apply_bn_for_resampling, p.is_training_bn, p.conv_after_downsample, strategy=p.strategy, data_format=config.data_format) nodes.append(input_node) new_node = fuse_features(nodes, fpn_config.weight_method) with tf.variable_scope('op_after_combine{}'.format(len(feats))): if not p.conv_bn_act_pattern: new_node = utils.activation_fn(new_node, p.act_type) if p.separable_conv: conv_op = functools.partial(tf.layers.separable_conv2d, depth_multiplier=1) else: conv_op = tf.layers.conv2d new_node = conv_op(new_node, filters=p.fpn_num_filters, kernel_size=(3, 3), padding='same', use_bias=not p.conv_bn_act_pattern, data_format=config.data_format, name='conv') new_node = utils.batch_norm_act( new_node, is_training_bn=p.is_training_bn, act_type=None if not p.conv_bn_act_pattern else p.act_type, data_format=config.data_format, strategy=p.strategy, name='bn') feats.append(new_node) num_output_connections.append(0) output_feats = {} for l in range(p.min_level, p.max_level + 1): for i, fnode in enumerate(reversed(fpn_config.nodes)): if fnode['feat_level'] == l: output_feats[l] = feats[-1 - i] break return output_feats
def test_mish(self): features = tf.constant([.5, 10]) result = utils.activation_fn(features, 'mish') self.assertAllClose(result, [0.37524524, 10.0])
def test_relu6(self): features = tf.constant([.5, 10]) result = utils.activation_fn(features, 'relu6') self.assertAllClose(result, [0.5, 6])
def test_hswish(self): features = tf.constant([.5, 10]) result = utils.activation_fn(features, 'hswish') self.assertAllClose(result, [0.29166667, 10.0])