コード例 #1
0
    def test_no_dangling_outputs(self):
        image_features = tf.placeholder(dtype=tf.float32,
                                        shape=[4, None, None, 64])
        conv_box_predictor = (
            box_predictor_builder.build_convolutional_box_predictor(
                is_training=False,
                num_classes=0,
                conv_hyperparams_fn=self.
                _build_arg_scope_with_conv_hyperparams(),
                min_depth=0,
                max_depth=32,
                num_layers_before_predictor=1,
                dropout_keep_prob=0.8,
                kernel_size=3,
                box_code_size=4,
                use_dropout=True,
                use_depthwise=True))
        box_predictions = conv_box_predictor.predict(
            [image_features],
            num_predictions_per_location=[5],
            scope='BoxPredictor')
        tf.concat(box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
        tf.concat(
            box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
            axis=1)

        bad_dangling_ops = []
        types_safe_to_dangle = set(['Assign', 'Mul', 'Const'])
        for op in tf.get_default_graph().get_operations():
            if (not op.outputs) or (not op.outputs[0].consumers()):
                if 'BoxPredictor' in op.name:
                    if op.type not in types_safe_to_dangle:
                        bad_dangling_ops.append(op)

        self.assertEqual(bad_dangling_ops, [])
コード例 #2
0
  def test_no_dangling_outputs(self):
    image_features = tf.placeholder(dtype=tf.float32, shape=[4, None, None, 64])
    conv_box_predictor = (
        box_predictor_builder.build_convolutional_box_predictor(
            is_training=False,
            num_classes=0,
            conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(),
            min_depth=0,
            max_depth=32,
            num_layers_before_predictor=1,
            dropout_keep_prob=0.8,
            kernel_size=1,
            box_code_size=4,
            use_dropout=True,
            use_depthwise=True))
    box_predictions = conv_box_predictor.predict(
        [image_features], num_predictions_per_location=[5],
        scope='BoxPredictor')
    tf.concat(
        box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
    tf.concat(
        box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
        axis=1)

    bad_dangling_ops = []
    types_safe_to_dangle = set(['Assign', 'Mul', 'Const'])
    for op in tf.get_default_graph().get_operations():
      if (not op.outputs) or (not op.outputs[0].consumers()):
        if 'BoxPredictor' in op.name:
          if op.type not in types_safe_to_dangle:
            bad_dangling_ops.append(op)

    self.assertEqual(bad_dangling_ops, [])
    def test_use_depthwise_convolution(self):
        image_features = tf.compat.v1.placeholder(dtype=tf.float32,
                                                  shape=[4, None, None, 64])
        conv_box_predictor = (
            box_predictor_builder.build_convolutional_box_predictor(
                is_training=False,
                num_classes=0,
                conv_hyperparams_fn=self.
                _build_arg_scope_with_conv_hyperparams(),
                min_depth=0,
                max_depth=32,
                num_layers_before_predictor=1,
                dropout_keep_prob=0.8,
                kernel_size=1,
                box_code_size=4,
                use_dropout=True,
                use_depthwise=True))
        box_predictions = conv_box_predictor.predict(
            [image_features],
            num_predictions_per_location=[5],
            scope='BoxPredictor')
        box_encodings = tf.concat(box_predictions[box_predictor.BOX_ENCODINGS],
                                  axis=1)
        objectness_predictions = tf.concat(
            box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
            axis=1)
        init_op = tf.compat.v1.global_variables_initializer()

        resolution = 32
        expected_num_anchors = resolution * resolution * 5
        with self.test_session() as sess:
            sess.run(init_op)
            (box_encodings_shape, objectness_predictions_shape) = sess.run(
                [
                    tf.shape(input=box_encodings),
                    tf.shape(input=objectness_predictions)
                ],
                feed_dict={
                    image_features: np.random.rand(4, resolution, resolution,
                                                   64)
                })
            actual_variable_set = set(
                [var.op.name for var in tf.compat.v1.trainable_variables()])
        self.assertAllEqual(box_encodings_shape,
                            [4, expected_num_anchors, 1, 4])
        self.assertAllEqual(objectness_predictions_shape,
                            [4, expected_num_anchors, 1])
        expected_variable_set = set([
            'BoxPredictor/Conv2d_0_1x1_32/biases',
            'BoxPredictor/Conv2d_0_1x1_32/weights',
            'BoxPredictor/BoxEncodingPredictor_depthwise/biases',
            'BoxPredictor/BoxEncodingPredictor_depthwise/depthwise_weights',
            'BoxPredictor/BoxEncodingPredictor/biases',
            'BoxPredictor/BoxEncodingPredictor/weights',
            'BoxPredictor/ClassPredictor_depthwise/biases',
            'BoxPredictor/ClassPredictor_depthwise/depthwise_weights',
            'BoxPredictor/ClassPredictor/biases',
            'BoxPredictor/ClassPredictor/weights'
        ])
        self.assertEqual(expected_variable_set, actual_variable_set)
コード例 #4
0
  def test_use_depthwise_convolution(self):
    image_features = tf.placeholder(dtype=tf.float32, shape=[4, None, None, 64])
    conv_box_predictor = (
        box_predictor_builder.build_convolutional_box_predictor(
            is_training=False,
            num_classes=0,
            conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(),
            min_depth=0,
            max_depth=32,
            num_layers_before_predictor=1,
            dropout_keep_prob=0.8,
            kernel_size=1,
            box_code_size=4,
            use_dropout=True,
            use_depthwise=True))
    box_predictions = conv_box_predictor.predict(
        [image_features], num_predictions_per_location=[5],
        scope='BoxPredictor')
    box_encodings = tf.concat(
        box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
    objectness_predictions = tf.concat(
        box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
        axis=1)
    init_op = tf.global_variables_initializer()

    resolution = 32
    expected_num_anchors = resolution*resolution*5
    with self.test_session() as sess:
      sess.run(init_op)
      (box_encodings_shape,
       objectness_predictions_shape) = sess.run(
           [tf.shape(box_encodings), tf.shape(objectness_predictions)],
           feed_dict={image_features:
                      np.random.rand(4, resolution, resolution, 64)})
      actual_variable_set = set(
          [var.op.name for var in tf.trainable_variables()])
    self.assertAllEqual(box_encodings_shape, [4, expected_num_anchors, 1, 4])
    self.assertAllEqual(objectness_predictions_shape,
                        [4, expected_num_anchors, 1])
    expected_variable_set = set([
        'BoxPredictor/Conv2d_0_1x1_32/biases',
        'BoxPredictor/Conv2d_0_1x1_32/weights',
        'BoxPredictor/BoxEncodingPredictor_depthwise/biases',
        'BoxPredictor/BoxEncodingPredictor_depthwise/depthwise_weights',
        'BoxPredictor/BoxEncodingPredictor/biases',
        'BoxPredictor/BoxEncodingPredictor/weights',
        'BoxPredictor/ClassPredictor_depthwise/biases',
        'BoxPredictor/ClassPredictor_depthwise/depthwise_weights',
        'BoxPredictor/ClassPredictor/biases',
        'BoxPredictor/ClassPredictor/weights'])
    self.assertEqual(expected_variable_set, actual_variable_set)
コード例 #5
0
 def graph_fn(image_features):
   conv_box_predictor = (
       box_predictor_builder.build_convolutional_box_predictor(
           is_training=False,
           num_classes=0,
           conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(),
           min_depth=0,
           max_depth=32,
           num_layers_before_predictor=1,
           use_dropout=True,
           dropout_keep_prob=0.8,
           kernel_size=1,
           box_code_size=4))
   box_predictions = conv_box_predictor.predict(
       [image_features], num_predictions_per_location=[1],
       scope='BoxPredictor')
   box_encodings = tf.concat(
       box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
   objectness_predictions = tf.concat(box_predictions[
       box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1)
   return (box_encodings, objectness_predictions)
 def graph_fn(image_features):
   conv_box_predictor = (
       box_predictor_builder.build_convolutional_box_predictor(
           is_training=False,
           num_classes=0,
           conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(),
           min_depth=0,
           max_depth=32,
           num_layers_before_predictor=1,
           use_dropout=True,
           dropout_keep_prob=0.8,
           kernel_size=1,
           box_code_size=4))
   box_predictions = conv_box_predictor.predict(
       [image_features], num_predictions_per_location=[1],
       scope='BoxPredictor')
   box_encodings = tf.concat(
       box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
   objectness_predictions = tf.concat(box_predictions[
       box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1)
   return (box_encodings, objectness_predictions)