Esempio n. 1
0
    def test_layer(self):
        layer_multi_io_test(
            PointRend,
            kwargs={
                'classes': 5, 'units': [2, 3], 'points': (0.165, 0.0005), 'oversample': 3, 'importance': 0.75,
                'fines': 1, 'residual': False, 'align_corners': True},
            input_shapes=[(4, 64, 64, 3), (4, 16, 16, 5), (4, 32, 32, 6)],
            input_dtypes=['float32', 'float32', 'float32'],
            expected_output_shapes=[(None, 64, 64, 5), (None, None, 5), (None, None, 2)],
            expected_output_dtypes=['float32', 'float32', 'float32']
        )

        mixed_precision.set_global_policy('mixed_float16')
        layer_multi_io_test(
            PointRend,
            kwargs={
                'classes': 5, 'units': [4], 'points': (0.165, 0.0005), 'oversample': 4, 'importance': 0.95, 'fines': 2,
                'residual': True, 'align_corners': False},
            input_shapes=[(4, 64, 64, 3), (4, 16, 16, 5), (4, 48, 48, 6), (4, 32, 32, 7)],
            input_dtypes=['uint8', 'float16', 'float16', 'float16'],
            expected_output_shapes=[(None, 64, 64, 5), (None, None, 5), (None, None, 2)],
            expected_output_dtypes=['float32', 'float16', 'float16']
        )
Esempio n. 2
0
    def test_layer(self):
        test_utils.layer_test(PyramidPooling,
                              kwargs={
                                  'filters': 2,
                                  'sizes': (1, 2, 3, 6)
                              },
                              input_shape=[2, 18, 18, 3],
                              input_dtype='float32',
                              expected_output_shape=[None, 18, 18, 2],
                              expected_output_dtype='float32')

        mixed_precision.set_global_policy('mixed_float16')
        test_utils.layer_test(PyramidPooling,
                              kwargs={
                                  'filters': 32,
                                  'sizes': (1, 2, 3, 6),
                                  'activation': 'leaky_relu',
                                  'standardized': True
                              },
                              input_shape=[2, 18, 18, 64],
                              input_dtype='float16',
                              expected_output_shape=[None, 18, 18, 32],
                              expected_output_dtype='float16')
Esempio n. 3
0
    def test_layer(self):
        layer_multi_io_test(PointHead,
                            kwargs={
                                'classes': 5,
                                'units': [4, 3, 2],
                                'fines': 1,
                                'residual': False
                            },
                            input_shapes=[(2, 16, 5), (2, 16, 10)],
                            input_dtypes=['float32', 'float32'],
                            expected_output_shapes=[(None, 16, 5)],
                            expected_output_dtypes=['float32'])
        layer_multi_io_test(PointHead,
                            kwargs={
                                'classes': 2,
                                'units': [4, 3, 2],
                                'fines': 2,
                                'residual': True
                            },
                            input_shapes=[(2, 16, 2), (2, 16, 10),
                                          (2, 16, 11)],
                            input_dtypes=['float32', 'float32', 'float32'],
                            expected_output_shapes=[(None, 16, 2)],
                            expected_output_dtypes=['float32'])

        mixed_precision.set_global_policy('mixed_float16')
        layer_multi_io_test(PointHead,
                            kwargs={
                                'classes': 3,
                                'units': [4, 3, 2],
                                'fines': 1,
                                'residual': True
                            },
                            input_shapes=[(2, 16, 3), (2, 16, 10)],
                            input_dtypes=['float16', 'float16'],
                            expected_output_shapes=[(None, 16, 3)],
                            expected_output_dtypes=['float16'])
Esempio n. 4
0
    def test_layer(self):
        # TODO: wait for issue with Sequential model restoring
        #  will be resolved to migrate back on test_utils.layer_test
        layer_multi_io_test(DeepLabV3PlusWithHierarchicalAttention,
                            kwargs={
                                'classes': 4,
                                'bone_arch': 'resnet_50',
                                'bone_init': 'imagenet',
                                'bone_train': False,
                                'aspp_filters': 8,
                                'aspp_stride': 32,
                                'low_filters': 16,
                                'decoder_filters': 5,
                                'scales': ((0.5, ), (0.25, 0.5, 2.0))
                            },
                            input_shapes=[(2, 224, 224, 3)],
                            input_dtypes=['uint8'],
                            expected_output_shapes=[(None, 224, 224, 4)],
                            expected_output_dtypes=['float32'])

        mixed_precision.set_global_policy('mixed_float16')
        layer_multi_io_test(DeepLabV3PlusWithHierarchicalAttention,
                            kwargs={
                                'classes': 1,
                                'bone_arch': 'resnet_50',
                                'bone_init': 'imagenet',
                                'bone_train': False,
                                'aspp_filters': 8,
                                'aspp_stride': 32,
                                'low_filters': 16,
                                'decoder_filters': 4,
                                'scales': ((0.5, ), (0.25, 0.5, 2.0))
                            },
                            input_shapes=[(2, 224, 224, 3)],
                            input_dtypes=['uint8'],
                            expected_output_shapes=[(None, 224, 224, 1)],
                            expected_output_dtypes=['float32'])
Esempio n. 5
0
    def test_layer(self):
        test_utils.layer_test(StandardizedDepthwiseConv2D,
                              kwargs={
                                  'kernel_size': 1,
                                  'strides': 1,
                                  'padding': 'valid'
                              },
                              input_shape=[2, 16, 16, 8],
                              input_dtype='float32',
                              expected_output_shape=[None, 16, 16, 8],
                              expected_output_dtype='float32')

        mixed_precision.set_global_policy('mixed_float16')
        result = test_utils.layer_test(StandardizedDepthwiseConv2D,
                                       kwargs={
                                           'kernel_size': 3,
                                           'strides': 2,
                                           'padding': 'same'
                                       },
                                       input_shape=[2, 16, 16, 8],
                                       input_dtype='float16',
                                       expected_output_shape=[None, 8, 8, 8],
                                       expected_output_dtype='float16')
        self.assertTrue(np.all(np.isfinite(result)))
Esempio n. 6
0
 def test_global_policy(self):
   if base_layer_utils.v2_dtype_behavior_enabled():
     default_policy = 'float32'
   else:
     default_policy = '_infer'
   self.assertEqual(mp_policy.global_policy().name, default_policy)
   try:
     mp_policy.set_global_policy('mixed_float16')
     self.assertEqual(mp_policy.global_policy().name, 'mixed_float16')
     with tf.Graph().as_default():  # Policies are not associated with a graph
       self.assertEqual(mp_policy.global_policy().name, 'mixed_float16')
     mp_policy.set_global_policy('_infer')
     self.assertEqual(mp_policy.global_policy().name, '_infer')
     policy = mp_policy.Policy('mixed_bfloat16')
     mp_policy.set_global_policy(policy)
     self.assertIs(mp_policy.global_policy(), policy)
   finally:
     mp_policy.set_global_policy(None)
Esempio n. 7
0
 def test_global_policy(self):
     if base_layer_utils.v2_dtype_behavior_enabled():
         default_policy = "float32"
     else:
         default_policy = "_infer"
     self.assertEqual(mp_policy.global_policy().name, default_policy)
     try:
         mp_policy.set_global_policy("mixed_float16")
         self.assertEqual(mp_policy.global_policy().name, "mixed_float16")
         # Policies are not associated with a graph
         with tf.Graph().as_default():
             self.assertEqual(
                 mp_policy.global_policy().name, "mixed_float16"
             )
         mp_policy.set_global_policy("_infer")
         self.assertEqual(mp_policy.global_policy().name, "_infer")
         policy = mp_policy.Policy("mixed_bfloat16")
         mp_policy.set_global_policy(policy)
         self.assertIs(mp_policy.global_policy(), policy)
     finally:
         mp_policy.set_global_policy(None)
Esempio n. 8
0
 def tearDown(self):
     super(TestUncertainPointsCoordsOnGrid, self).tearDown()
     mixed_precision.set_global_policy(self.default_policy)
Esempio n. 9
0
 def tearDown(self):
     super(TestPointHead, self).tearDown()
     mixed_precision.set_global_policy(self.default_policy)
Esempio n. 10
0
    def test_layer(self):
        layer_multi_io_test(PointSample,
                            kwargs={
                                'align_corners': False,
                                'mode': 'bilinear'
                            },
                            input_datas=[
                                np.random.rand(2, 16, 16, 10),
                                np.random.rand(2, 20, 2)
                            ],
                            input_dtypes=['float32', 'float32'],
                            expected_output_shapes=[(None, 20, 10)],
                            expected_output_dtypes=['float32'])
        layer_multi_io_test(PointSample,
                            kwargs={
                                'align_corners': True,
                                'mode': 'nearest'
                            },
                            input_datas=[
                                np.random.rand(2, 16, 16, 10),
                                np.random.rand(2, 20, 2)
                            ],
                            input_dtypes=['float32', 'float32'],
                            expected_output_shapes=[(None, 20, 10)],
                            expected_output_dtypes=['float32'])
        layer_multi_io_test(PointSample,
                            kwargs={
                                'align_corners': False,
                                'mode': 'bilinear'
                            },
                            input_datas=[
                                np.random.rand(2, 16, 16, 10),
                                np.random.rand(2, 20, 2)
                            ],
                            input_dtypes=['int32', 'float32'],
                            expected_output_shapes=[(None, 20, 10)],
                            expected_output_dtypes=['int32'])
        layer_multi_io_test(PointSample,
                            kwargs={
                                'align_corners': True,
                                'mode': 'nearest'
                            },
                            input_datas=[
                                np.random.rand(2, 16, 16, 10),
                                np.random.rand(2, 20, 2)
                            ],
                            input_dtypes=['int32', 'float32'],
                            expected_output_shapes=[(None, 20, 10)],
                            expected_output_dtypes=['int32'])

        mixed_precision.set_global_policy('mixed_float16')
        layer_multi_io_test(PointSample,
                            kwargs={
                                'align_corners': False,
                                'mode': 'bilinear'
                            },
                            input_datas=[
                                np.random.rand(2, 16, 16,
                                               10).astype(np.float16),
                                np.random.rand(2, 20, 2).astype(np.float16)
                            ],
                            input_dtypes=['float16', 'float16'],
                            expected_output_shapes=[(None, 20, 10)],
                            expected_output_dtypes=['float16'])
        layer_multi_io_test(PointSample,
                            kwargs={
                                'align_corners': True,
                                'mode': 'bilinear'
                            },
                            input_datas=[
                                np.random.rand(2, 16, 16, 10),
                                np.random.rand(2, 20, 2)
                            ],
                            input_dtypes=['float32', 'float16'],
                            expected_output_shapes=[(None, 20, 10)],
                            expected_output_dtypes=['float32'])
        layer_multi_io_test(PointSample,
                            kwargs={
                                'align_corners': False,
                                'mode': 'bilinear'
                            },
                            input_datas=[
                                np.random.rand(2, 16, 16, 10),
                                np.random.rand(2, 20, 2)
                            ],
                            input_dtypes=['int32', 'float16'],
                            expected_output_shapes=[(None, 20, 10)],
                            expected_output_dtypes=['int32'])
        layer_multi_io_test(PointSample,
                            kwargs={
                                'align_corners': True,
                                'mode': 'bilinear'
                            },
                            input_datas=[
                                np.random.rand(2, 16, 16, 10),
                                np.random.rand(2, 20, 2)
                            ],
                            input_dtypes=['int32', 'float32'],
                            expected_output_shapes=[(None, 20, 10)],
                            expected_output_dtypes=['int32'])
Esempio n. 11
0
 def tearDown(self):
     super(TestUncertainPointsWithRandomness, self).tearDown()
     mixed_precision.set_global_policy(self.default_policy)
Esempio n. 12
0
    def test_layer(self):
        outputs = layer_multi_io_test(
            PointLoss,
            kwargs={
                'classes': 5,
                'weighted': False,
                'reduction': Reduction.NONE
            },
            input_datas=[
                np.random.rand(2, 16, 5),
                np.random.rand(2, 16, 2),
                (np.random.rand(2, 8, 8, 1) > 0.5).astype(np.int32)
            ],
            input_dtypes=['float32', 'float32', 'int32'],
            expected_output_shapes=[(None, 16)],
            expected_output_dtypes=['float32'])
        self.assertTrue(np.all(outputs >= 0.))

        outputs = layer_multi_io_test(
            PointLoss,
            kwargs={
                'classes': 5,
                'weighted': True,
                'reduction': Reduction.NONE
            },
            input_datas=[
                np.random.rand(2, 16, 5),
                np.random.rand(2, 16, 2),
                (np.random.rand(2, 8, 8, 1) > 0.5).astype(np.int32),
                np.random.rand(2, 8, 8, 1)
            ],
            input_dtypes=['float32', 'float32', 'int32', 'float32'],
            expected_output_shapes=[(None, 16)],
            expected_output_dtypes=['float32'])
        self.assertTrue(np.all(outputs >= 0.))

        mixed_precision.set_global_policy('mixed_float16')
        outputs = layer_multi_io_test(
            PointLoss,
            kwargs={
                'classes': 5,
                'weighted': True,
                'reduction': Reduction.NONE
            },
            input_datas=[
                np.random.rand(2, 16, 5),
                np.random.rand(2, 16, 2),
                (np.random.rand(2, 8, 8, 1) > 0.5).astype(np.int32),
                np.random.rand(2, 8, 8, 1)
            ],
            input_dtypes=['float32', 'float32', 'int32', 'float32'],
            expected_output_shapes=[(None, 16)],
            expected_output_dtypes=['float32'])
        self.assertTrue(np.all(outputs >= 0.))

        outputs = layer_multi_io_test(
            PointLoss,
            kwargs={
                'classes': 5,
                'weighted': True,
                'reduction': Reduction.NONE
            },
            input_datas=[
                np.random.rand(2, 16, 5).astype(np.float16),
                np.random.rand(2, 16, 2).astype(np.float16),
                (np.random.rand(2, 8, 8, 1) > 0.5).astype(np.int32),
                np.random.rand(2, 8, 8, 1).astype(np.float16)
            ],
            input_dtypes=['float16', 'float16', 'int32', 'float16'],
            expected_output_shapes=[(None, 16)],
            expected_output_dtypes=['float32'])
        self.assertTrue(np.all(outputs >= 0.))
Esempio n. 13
0
 def tearDown(self):
     super(TestCascadePSP, self).tearDown()
     mixed_precision.set_global_policy(self.default_policy)
Esempio n. 14
0
 def tearDown(self):
     super(TestUPerNet, self).tearDown()
     mixed_precision.set_global_policy(self.default_policy)
Esempio n. 15
0
 def tearDown(self):
     super(TestPyramidPooling, self).tearDown()
     mixed_precision.set_global_policy(self.default_policy)
Esempio n. 16
0
 def tearDown(self):
     super(TestFBAMatting, self).tearDown()
     mixed_precision.set_global_policy(self.default_policy)
Esempio n. 17
0
 def tearDown(self):
     super(TestDeepLabV3PlusWithHierarchicalAttention, self).tearDown()
     mixed_precision.set_global_policy(self.default_policy)
Esempio n. 18
0
 def tearDown(self):
     super(TestSameDepthwiseConv, self).tearDown()
     mixed_precision.set_global_policy(self.default_policy)
Esempio n. 19
0
 def tearDown(self):
     super(TestSameStandardizedConv, self).tearDown()
     mixed_precision.set_global_policy(self.default_policy)
Esempio n. 20
0
 def tearDown(self):
     super(TestMatteFormer, self).tearDown()
     mixed_precision.set_global_policy(self.default_policy)
Esempio n. 21
0
 def tearDown(self):
     super(TestDeepLabV3PlusWithPointRend, self).tearDown()
     mixed_precision.set_global_policy(self.default_policy)
Esempio n. 22
0
 def tearDown(self):
     super(TestResizeByScale, self).tearDown()
     mixed_precision.set_global_policy(self.default_policy)
Esempio n. 23
0
 def tearDown(self):
     super(TestClassificationHead, self).tearDown()
     mixed_precision.set_global_policy(self.default_policy)
Esempio n. 24
0
 def tearDown(self):
     super(TestHierarchicalMultiScaleAttention, self).tearDown()
     mixed_precision.set_global_policy(self.default_policy)
Esempio n. 25
0
 def tearDown(self):
     super(TestGroupNormalization, self).tearDown()
     mixed_precision.set_global_policy(self.default_policy)
Esempio n. 26
0
 def tearDown(self):
     super(TestStandardizedDepthwiseConv2D, self).tearDown()
     mixed_precision.set_global_policy(self.default_policy)