示例#1
0
 def test_unknown_size(self):
     '''Instantiates the network with unknown spatial dimensions.'''
     for feature_arch in feature_nets.NAMES:
         sub_test = trySubTest(self, feature_arch=feature_arch)
         with sub_test:
             feature_fn = feature_nets.BY_NAME[feature_arch]
             with tf.Graph().as_default():
                 image = tf.placeholder(tf.float32, (None, None, None, 32), name='image')
                 is_training = tf.placeholder(tf.bool, (), name='is_training')
                 _, _ = feature_fn(image, is_training)
示例#2
0
 def test_no_padding_by_default(self):
     '''Tests that feature functions with default options have zero padding.'''
     for feature_arch in feature_nets.NAMES:
         sub_test = trySubTest(self, feature_arch=feature_arch)
         with sub_test, tf.Graph().as_default():
             feature_fn = feature_nets.BY_NAME[feature_arch]
             image = tf.placeholder(tf.float32, (None, None, None, 3), name='image')
             image = cnn.as_tensor(image, add_to_set=True)
             feat, _ = feature_fn(image, is_training=True)
             field = feat.fields[image.value]
             self.assertAllEqual(field.padding, [0, 0])
示例#3
0
    def test_same_variables(self):
        '''Instantiates feature net using same scope as original function.'''
        for feature_arch in SLIM_ARCH_NAMES:
            sub_test = trySubTest(self, feature_arch=feature_arch)
            with sub_test, tf.Graph().as_default():
                original_fn = globals()[feature_arch]
                feature_fn = feature_nets.BY_NAME[feature_arch]

                image = tf.placeholder(tf.float32, (None, None, None, 3), name='image')
                with tf.variable_scope('net', reuse=False):
                    original_fn(image, is_training=True)
                with tf.variable_scope('net', reuse=True):
                    feature_fn(image, is_training=True)
示例#4
0
    def test_desired_output_size_from_receptive_field(self):
        '''Uses the receptive field to get the input size for desired output size.'''
        for feature_arch in feature_nets.NAMES:
            sub_test = trySubTest(self, feature_arch=feature_arch)
            with sub_test, tf.Graph().as_default():
                feature_fn = feature_nets.BY_NAME[feature_arch]
                field = feature_nets.get_receptive_field(feature_fn)

                desired = np.array([10, 10])
                input_size = receptive_field.input_size(field, desired)
                input_shape = [None] + list(input_size) + [3]
                image = tf.placeholder(tf.float32, input_shape, name='image')
                is_training = tf.placeholder(tf.bool, (), name='is_training')
                feat, _ = feature_fn(image, is_training)
                output_size = feat.value.shape[1:3].as_list()
                self.assertAllEqual(output_size, desired)
                receptive_field.assert_center_alignment(input_size, output_size, field)
示例#5
0
    def test_label_fns(self):
        response_size = 7
        num_scales = 3
        translation_stride = 10
        log_scale_step = np.log(2)
        base_target_size = 30
        scores_shape = (1, num_scales) + n_positive_integers(2, response_size) + (1,)

        gt_translation = [-20, -40]
        gt_size = 60

        label_fns = {
            'hard': dict(
                translation_radius_pos=0.2,
                translation_radius_neg=0.5,
                scale_radius_pos=1.1,
                scale_radius_neg=1.3,
            ),
            'hard_binary': dict(
                translation_radius=0.2,
                scale_radius=1.2,
            ),
        }

        for name, kwargs in label_fns.items():
            with trySubTest(self, label_fn=name):
                with self.test_session():
                    label_fn = regress.LABEL_FNS[name]
                    _, labels, weights = label_fn(
                        response_size, num_scales, translation_stride, log_scale_step,
                        base_target_size,
                        _make_constant_batch(gt_translation),
                        _make_constant_batch(gt_size),
                        **kwargs)

                    # labels: [b, s, h, w, c]
                    assert(len(labels.shape) == 5)
                    self.assertAllGreaterEqual(weights, 0)
                    sum_positive = tf.reduce_sum(weights * labels, axis=(-4, -3, -2, -1))
                    sum_negative = tf.reduce_sum(weights * (1 - labels), axis=(-4, -3, -2, -1))
                    self.assertAllGreater(sum_positive, 0)
                    self.assertAllGreater(sum_negative, 0)
示例#6
0
    def test_instantiate(self):
        '''Instantiates the join functions.'''
        for join_arch in join_nets.SINGLE_JOIN_FNS:
            with trySubTest(self, join_arch=join_arch):
                with tf.Graph().as_default():
                    join_fn = join_nets.BY_NAME[join_arch]
                    if join_arch in join_nets.FULLY_CONNECTED_FNS:
                        template_size = np.array([1, 1])
                    else:
                        template_size = np.array([4, 4])
                    search_size = np.array([10, 10])
                    template_shape = [3] + list(template_size) + [16]
                    search_shape = [3, 2] + list(search_size) + [16]

                    template = tf.placeholder(tf.float32,
                                              template_shape,
                                              name='template')
                    search = tf.placeholder(tf.float32,
                                            search_shape,
                                            name='search')
                    is_training = tf.placeholder(tf.bool, (),
                                                 name='is_training')
                    output = join_fn(template, search, is_training)
                    output = cnn.get_value(output)
                    output_size = output.shape[-3:-1].as_list()
                    self.assertAllEqual(output_size,
                                        search_size - template_size + 1)

                    init_op = tf.global_variables_initializer()
                    # with self.test_session() as sess:
                    with tf.Session() as sess:
                        sess.run(init_op)
                        sess.run(output,
                                 feed_dict={
                                     template:
                                     np.random.normal(size=template_shape),
                                     search:
                                     np.random.normal(size=search_shape),
                                     is_training:
                                     False,
                                 })
示例#7
0
    def test_compute_loss_map(self):
        response_size = 7
        num_scales = 3
        translation_stride = 10
        log_scale_step = np.log(2.0)
        base_target_size = 30
        scores_shape = (1, num_scales) + n_positive_integers(2, response_size) + (1,)

        gt_translation = [-20, -40]
        gt_size = 60
        scores = tf.random.normal(scores_shape, dtype=tf.float32)

        losses = {
            'sigmoid_hard': dict(
                method='sigmoid',
                params=dict(balanced=True,
                            label_method='hard',
                            label_params=dict(translation_radius_pos=0.2,
                                              translation_radius_neg=0.5,
                                              scale_radius_pos=1.1,
                                              scale_radius_neg=1.3))),
            'sigmoid_hard_binary': dict(
                method='sigmoid',
                params=dict(balanced=True,
                            label_method='hard_binary',
                            label_params=dict(translation_radius=0.2,
                                              scale_radius=1.2))),
        }

        for loss_name, loss_kwargs in losses.items():
            with trySubTest(self, loss=loss_name):
                _, loss = regress.compute_loss_discrete(
                    scores, num_scales, translation_stride, log_scale_step, base_target_size,
                    _make_constant_batch(gt_translation),
                    _make_constant_batch(gt_size),
                    **loss_kwargs)
                self.assertEqual(len(loss.shape), 1)
                with self.test_session():
                    self.assertTrue(np.all(np.isfinite(loss.eval())))
示例#8
0
    def test_output_equal(self):
        '''Compares output to library implementation of networks.'''
        # The desired_size may need to be chosen such that original network structure is valid.
        TestCase = collections.namedtuple('TestCase', ['kwargs', 'desired_size', 'end_point'])
        cases = {
            'slim_alexnet_v2': TestCase(
                kwargs=dict(
                    output_layer='conv5',
                    output_act='relu',
                    conv_padding='SAME',
                    pool_padding='VALID'),
                desired_size=np.array([13, 13]),  # 3 + (6 - 1) * 2
                end_point='alexnet_v2/conv5',
            ),
            'slim_resnet_v1_50': TestCase(
                kwargs=dict(
                    num_blocks=4,
                    conv_padding='SAME',
                    pool_padding='SAME'),
                desired_size=np.array([3, 3]),
                end_point='resnet_v1_50/block4',
            ),
            'slim_vgg_16': TestCase(
                kwargs=dict(
                    output_layer='fc6',
                    output_act='relu',
                    conv_padding='SAME',
                    pool_padding='VALID'),
                desired_size=np.array([1, 1]),
                end_point='vgg_16/fc6',
            ),
        }

        for feature_arch, test_case in cases.items():
            graph = tf.Graph()
            sub_test = trySubTest(self, feature_arch=feature_arch)
            with sub_test, graph.as_default():
                original_fn = globals()[feature_arch]
                feature_fn = functools.partial(feature_nets.BY_NAME[feature_arch],
                                               **test_case.kwargs)
                field = feature_nets.get_receptive_field(feature_fn)
                input_size = receptive_field.input_size(field, test_case.desired_size)
                input_shape = [None] + list(input_size) + [3]

                image = tf.placeholder(tf.float32, input_shape, name='image')
                with tf.variable_scope('net', reuse=False):
                    _, end_points = original_fn(image, is_training=True)
                    try:
                        original = end_points['net/' + test_case.end_point]
                    except KeyError as ex:
                        raise ValueError('key not found ({}) in list: {}'.format(
                            ex, sorted(end_points.keys())))
                init_op = tf.global_variables_initializer()
                with tf.variable_scope('net', reuse=True):
                    ours, _ = feature_fn(image, is_training=True)
                    ours = cnn.get_value(ours)
                # self.assertEqual(original.shape.as_list(), ours.shape.as_list())

                with self.session(graph=graph) as sess:
                    sess.run(init_op)
                    want, got = sess.run((original, ours), feed_dict={
                        image: np.random.uniform(size=[BATCH_LEN] + input_shape[1:]),
                    })
                    self.assertAllClose(want, got)