示例#1
0
  def test_batch_norm(self, is_training, strategy):
    inputs = tf.random.uniform([8, 40, 40, 3])
    expect_results = utils.batch_norm_act(
        inputs, is_training, None, strategy=strategy)

    # Call batch norm layer with is_training parameter.
    bn_layer = util_keras.build_batch_norm(is_training, strategy=strategy)
    self.assertAllClose(expect_results, bn_layer(inputs, is_training))
示例#2
0
def class_net(images,
              level,
              num_classes,
              num_anchors,
              num_filters,
              is_training,
              act_type,
              separable_conv=True,
              repeats=4,
              survival_prob=None,
              strategy=None,
              data_format='channels_last'):
    """Class prediction network."""
    if separable_conv:
        conv_op = functools.partial(
            tf.layers.separable_conv2d,
            depth_multiplier=1,
            data_format=data_format,
            pointwise_initializer=tf.initializers.variance_scaling(),
            depthwise_initializer=tf.initializers.variance_scaling())
    else:
        conv_op = functools.partial(
            tf.layers.conv2d,
            data_format=data_format,
            kernel_initializer=tf.random_normal_initializer(stddev=0.01))

    for i in range(repeats):
        orig_images = images
        images = conv_op(images,
                         num_filters,
                         kernel_size=3,
                         bias_initializer=tf.zeros_initializer(),
                         activation=None,
                         padding='same',
                         name='class-%d' % i)
        images = utils.batch_norm_act(images,
                                      is_training,
                                      act_type=act_type,
                                      init_zero=False,
                                      strategy=strategy,
                                      data_format=data_format,
                                      name='class-%d-bn-%d' % (i, level))

        if i > 0 and survival_prob:
            images = utils.drop_connect(images, is_training, survival_prob)
            images = images + orig_images

    classes = conv_op(
        images,
        num_classes * num_anchors,
        kernel_size=3,
        bias_initializer=tf.constant_initializer(-np.log((1 - 0.01) / 0.01)),
        padding='same',
        name='class-predict')
    return classes
示例#3
0
 def _maybe_apply_1x1(feat):
     """Apply 1x1 conv to change layer width if necessary."""
     if num_channels != target_num_channels:
         feat = tf.layers.conv2d(feat,
                                 filters=target_num_channels,
                                 kernel_size=(1, 1),
                                 padding='same',
                                 data_format=data_format)
         if apply_bn:
             feat = utils.batch_norm_act(feat,
                                         is_training_bn=is_training,
                                         act_type=None,
                                         data_format=data_format,
                                         strategy=strategy,
                                         name='bn')
     return feat
示例#4
0
def build_bifpn_layer(feats, feat_sizes, config):
    """Builds a feature pyramid given previous feature pyramid and config."""
    p = config  # use p to denote the network config.
    if p.fpn_config:
        fpn_config = p.fpn_config
    else:
        fpn_config = fpn_configs.get_fpn_config(p.fpn_name, p.min_level,
                                                p.max_level,
                                                p.fpn_weight_method)

    num_output_connections = [0 for _ in feats]
    for i, fnode in enumerate(fpn_config.nodes):
        with tf.variable_scope('fnode{}'.format(i)):
            logging.info('fnode %d : %s', i, fnode)
            new_node_height = feat_sizes[fnode['feat_level']]['height']
            new_node_width = feat_sizes[fnode['feat_level']]['width']
            nodes = []
            for idx, input_offset in enumerate(fnode['inputs_offsets']):
                input_node = feats[input_offset]
                num_output_connections[input_offset] += 1
                input_node = resample_feature_map(
                    input_node,
                    '{}_{}_{}'.format(idx, input_offset, len(feats)),
                    new_node_height,
                    new_node_width,
                    p.fpn_num_filters,
                    p.apply_bn_for_resampling,
                    p.is_training_bn,
                    p.conv_after_downsample,
                    strategy=p.strategy,
                    data_format=config.data_format)
                nodes.append(input_node)

            new_node = fuse_features(nodes, fpn_config.weight_method)

            with tf.variable_scope('op_after_combine{}'.format(len(feats))):
                if not p.conv_bn_act_pattern:
                    new_node = utils.activation_fn(new_node, p.act_type)

                if p.separable_conv:
                    conv_op = functools.partial(tf.layers.separable_conv2d,
                                                depth_multiplier=1)
                else:
                    conv_op = tf.layers.conv2d

                new_node = conv_op(new_node,
                                   filters=p.fpn_num_filters,
                                   kernel_size=(3, 3),
                                   padding='same',
                                   use_bias=not p.conv_bn_act_pattern,
                                   data_format=config.data_format,
                                   name='conv')

                new_node = utils.batch_norm_act(
                    new_node,
                    is_training_bn=p.is_training_bn,
                    act_type=None if not p.conv_bn_act_pattern else p.act_type,
                    data_format=config.data_format,
                    strategy=p.strategy,
                    name='bn')

            feats.append(new_node)
            num_output_connections.append(0)

    output_feats = {}
    for l in range(p.min_level, p.max_level + 1):
        for i, fnode in enumerate(reversed(fpn_config.nodes)):
            if fnode['feat_level'] == l:
                output_feats[l] = feats[-1 - i]
                break
    return output_feats