コード例 #1
0
    def testIncorrectEncoderShapes(self):
        """Test that a possible misconfiguration throws an error as expected.

    If the two encoders used produce different spatial sizes for their
    feature maps, this should cause an error when multiplying tensors together.
    """
        decoder_filters = 4
        num_keypoints = 5
        gauss_std = 0.1

        encoder = transporter.Encoder(filters=FILTERS,
                                      strides=STRIDES,
                                      kernel_sizes=KERNEL_SIZES)
        # Use less conv layers in this, in particular one less stride 2 layer, so
        # we will get a different spatial output resolution.
        keypoint_encoder = transporter.Encoder(filters=FILTERS[:-2],
                                               strides=STRIDES[:-2],
                                               kernel_sizes=KERNEL_SIZES[:-2])

        keypointer = transporter.KeyPointer(keypoint_encoder=keypoint_encoder,
                                            num_keypoints=num_keypoints,
                                            gauss_std=gauss_std)

        decoder = transporter.Decoder(initial_filters=decoder_filters,
                                      output_size=[IMAGE_H, IMAGE_W],
                                      output_channels=IMAGE_C)
        model = transporter.Transporter(encoder=encoder,
                                        decoder=decoder,
                                        keypointer=keypointer)

        with self.assertRaisesRegexp(ValueError, 'Dimensions must be equal'):
            model(tf.random.normal(IMAGE_BATCH_SHAPE),
                  tf.random.normal(IMAGE_BATCH_SHAPE),
                  is_training=True)
コード例 #2
0
    def test_output_shape(self):
        image_batch = tf.random.normal(shape=IMAGE_BATCH_SHAPE)

        filters = (4, 4, 8, 8, 16, 16)
        encoder = transporter.Encoder(filters=filters,
                                      strides=STRIDES,
                                      kernel_sizes=KERNEL_SIZES)

        features = encoder(image_batch, is_training=True)

        self.assertEqual(features.shape,
                         (BATCH_SIZE, IMAGE_H // 4, IMAGE_W // 4, filters[-1]))
コード例 #3
0
  def test_encoder_decoder_output_shape(self):
    image_batch = tf.random.normal(shape=IMAGE_BATCH_SHAPE)

    encoder = transporter.Encoder(filters=FILTERS,
                                  strides=STRIDES,
                                  kernel_sizes=KERNEL_SIZES)
    decoder = transporter.Decoder(initial_filters=4,
                                  output_size=[IMAGE_H, IMAGE_W],
                                  output_channels=IMAGE_C)

    features = encoder(image_batch, is_training=True)
    reconstructed_images = decoder(features, is_training=True)

    self.assertEqual(reconstructed_images.shape, IMAGE_BATCH_SHAPE)
コード例 #4
0
  def test_output_shape(self):
    image_batch = tf.random.normal(shape=IMAGE_BATCH_SHAPE)
    num_keypoints = 6
    gauss_std = 0.1

    keypoint_encoder = transporter.Encoder(filters=FILTERS,
                                           strides=STRIDES,
                                           kernel_sizes=KERNEL_SIZES)
    keypointer = transporter.KeyPointer(keypoint_encoder=keypoint_encoder,
                                        num_keypoints=num_keypoints,
                                        gauss_std=gauss_std)

    keypointer_results = keypointer(image_batch, is_training=True)

    self.assertEqual(keypointer_results['centers'].shape,
                     (BATCH_SIZE, num_keypoints, 2))
    self.assertEqual(keypointer_results['heatmaps'].shape,
                     (BATCH_SIZE, IMAGE_H // 4, IMAGE_W // 4, num_keypoints))