def test_delattr(self):
     params = params_dict.ParamsDict()
     params.override(
         {
             'a': 'aa',
             'b': 2,
             'c': None,
             'd': {
                 'd1': 1,
                 'd2': 10
             }
         },
         is_strict=False)
     del params.c
     self.assertEqual(params.a, 'aa')
     self.assertEqual(params.b, 2)
     with self.assertRaises(AttributeError):
         _ = params.c
     del params.d
     with self.assertRaises(AttributeError):
         _ = params.d.d1
 def test_override_params_dict_using_yaml_file(self):
     params = params_dict.ParamsDict({
         'a': 1,
         'b': 2.5,
         'c': [3, 4],
         'd': 'hello',
         'e': False
     })
     override_yaml_file = self.write_temp_file(
         'params.yaml', r"""
     b: 5.2
     c: [30, 40]
     """)
     params = params_dict.override_params_dict(params,
                                               override_yaml_file,
                                               is_strict=True)
     self.assertEqual(1, params.a)
     self.assertEqual(5.2, params.b)
     self.assertEqual([30, 40], params.c)
     self.assertEqual('hello', params.d)
     self.assertEqual(False, params.e)
 def test_override_params_dict_using_csv_string(self):
   params = params_dict.ParamsDict({
       'a': 1,
       'b': {
           'b1': 2,
           'b2': [2, 3],
       },
       'd': {
           'd1': {
               'd2': 'hello'
           }
       },
       'e': False
   })
   override_csv_string = "b.b2=[3,4], d.d1.d2='hi, world', e=gs://test"
   params = params_dict.override_params_dict(
       params, override_csv_string, is_strict=True)
   self.assertEqual(1, params.a)
   self.assertEqual(2, params.b.b1)
   self.assertEqual([3, 4], params.b.b2)
   self.assertEqual('hi, world', params.d.d1.d2)
   self.assertEqual('gs://test', params.e)
 def test_override_params_dict_using_json_string(self):
   params = params_dict.ParamsDict({
       'a': 1,
       'b': {
           'b1': 2,
           'b2': [2, 3],
       },
       'd': {
           'd1': {
               'd2': 'hello'
           }
       },
       'e': False
   })
   override_json_string = "{ b: { b2: [3, 4] }, d: { d1: { d2: 'hi' } } }"
   params = params_dict.override_params_dict(
       params, override_json_string, is_strict=True)
   self.assertEqual(1, params.a)
   self.assertEqual(2, params.b.b1)
   self.assertEqual([3, 4], params.b.b2)
   self.assertEqual('hi', params.d.d1.d2)
   self.assertEqual(False, params.e)
Example #5
0
  def test_validate(self):
    # Raise error due to the unknown parameter.
    with self.assertRaises(KeyError):
      params = params_dict.ParamsDict({'a': 1, 'b': {'a': 11}}, ['a == c'])
      params.validate()

    # OK to check equality of two nested dicts.
    params = params_dict.ParamsDict({
        'a': 1,
        'b': {
            'a': 10
        },
        'c': {
            'a': 10
        }
    }, ['b == c'])

    # Raise error due to inconsistency
    with self.assertRaises(KeyError):
      params = params_dict.ParamsDict({'a': 1, 'c': {'a': 10}}, ['a == c.a'])
      params.validate()

    # Valid rule.
    params = params_dict.ParamsDict({'a': 1, 'c': {'a': 1}}, ['a == c.a'])

    # Overridding violates the existing rule, raise error upon validate.
    params.override({'a': 11})
    with self.assertRaises(KeyError):
      params.validate()

    # Valid restrictions with constant.
    params = params_dict.ParamsDict({
        'a': None,
        'c': {
            'a': 1
        }
    }, ['a == None', 'c.a == 1'])
    params.validate()
    with self.assertRaises(KeyError):
      params = params_dict.ParamsDict({
          'a': 4,
          'c': {
              'a': 1
          }
      }, ['a == None', 'c.a == 1'])
      params.validate()
 def test_cosine_learning_rate_with_linear_warmup(self):
     params = params_dict.ParamsDict({
         'type': 'cosine',
         'init_learning_rate': 0.2,
         'warmup_learning_rate': 0.1,
         'warmup_steps': 100,
         'total_steps': 1100,
     })
     learning_rate_fn = learning_rates.learning_rate_generator(params)
     lr = learning_rate_fn(0).numpy()
     self.assertAlmostEqual(0.1, lr)
     lr = learning_rate_fn(50).numpy()
     self.assertAlmostEqual(0.15, lr)
     lr = learning_rate_fn(100).numpy()
     self.assertAlmostEqual(0.2, lr)
     lr = learning_rate_fn(350).numpy()
     self.assertAlmostEqual(0.17071067811865476, lr)
     lr = learning_rate_fn(600).numpy()
     self.assertAlmostEqual(0.1, lr)
     lr = learning_rate_fn(850).numpy()
     self.assertAlmostEqual(0.029289321881345254, lr)
     lr = learning_rate_fn(1100).numpy()
     self.assertAlmostEqual(0.0, lr)
 def test_get(self):
     params = params_dict.ParamsDict()
     params.override({'a': 'aa'}, is_strict=False)
     self.assertEqual(params.get('a'), 'aa')
     self.assertEqual(params.get('b', 2), 2)
     self.assertEqual(params.get('b'), None)
 def test_contains(self):
     params = params_dict.ParamsDict()
     params.override({'a': 'aa'}, is_strict=False)
     self.assertIn('a', params)
     self.assertNotIn('b', params)
 def test_getattr(self):
     params = params_dict.ParamsDict()
     params.override({'a': 'aa', 'b': 2, 'c': None}, is_strict=False)
     self.assertEqual(params.a, 'aa')
     self.assertEqual(params.b, 2)
     self.assertEqual(params.c, None)
 def test_init_from_a_param_dict(self):
     params_init = params_dict.ParamsDict({'a': 'aa', 'b': 2})
     params = params_dict.ParamsDict(params_init)
     self.assertEqual(params.a, 'aa')
     self.assertEqual(params.b, 2)
Example #11
0
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config template to train Object Localization Network (OLN)."""

from official.legacy.detection.configs import base_config
from official.modeling.hyperparams import params_dict


# pylint: disable=line-too-long
OLNMASK_CFG = params_dict.ParamsDict(base_config.BASE_CFG)
OLNMASK_CFG.override(
    {
        'type': 'olnmask',
        'eval': {
            'type': 'oln_xclass_box',
            'use_category': False,
            'seen_class': 'voc',
            'num_images_to_visualize': 0,
        },
        'architecture': {
            'parser': 'olnmask_parser',
            'min_level': 2,
            'max_level': 6,
            'include_rpn_class': False,
            'include_frcnn_class': False,
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config template to train Mask R-CNN."""

from official.legacy.detection.configs import base_config
from official.modeling.hyperparams import params_dict


# pylint: disable=line-too-long
MASKRCNN_CFG = params_dict.ParamsDict(base_config.BASE_CFG)
MASKRCNN_CFG.override(
    {
        'type': 'mask_rcnn',
        'eval': {
            'type': 'box_and_mask',
            'num_images_to_visualize': 0,
        },
        'architecture': {
            'parser': 'maskrcnn_parser',
            'min_level': 2,
            'max_level': 6,
            'include_mask': True,
            'mask_target_size': 28,
        },
        'maskrcnn_parser': {
Example #13
0
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config template to train Retinanet."""

from official.legacy.detection.configs import base_config
from official.modeling.hyperparams import params_dict


# pylint: disable=line-too-long
RETINANET_CFG = params_dict.ParamsDict(base_config.BASE_CFG)
RETINANET_CFG.override(
    {
        'type': 'retinanet',
        'architecture': {
            'parser': 'retinanet_parser',
        },
        'retinanet_parser': {
            'output_size': [640, 640],
            'num_channels': 3,
            'match_threshold': 0.5,
            'unmatched_threshold': 0.5,
            'aug_rand_hflip': True,
            'aug_scale_min': 1.0,
            'aug_scale_max': 1.0,
            'use_autoaugment': False,
Example #14
0
    def testDetectionsOutputShape(self, use_batched_nms):
        min_level = 4
        max_level = 6
        num_scales = 2
        max_total_size = 100
        aspect_ratios = [
            1.0,
            2.0,
        ]
        anchor_scale = 2.0
        output_size = [64, 64]
        num_classes = 4
        # pre_nms_num_boxes = 5000
        score_threshold = 0.01
        batch_size = 1
        postprocessor_params = params_dict.ParamsDict({
            'use_batched_nms':
            use_batched_nms,
            'max_total_size':
            max_total_size,
            'nms_iou_threshold':
            0.5,
            'score_threshold':
            score_threshold,
            'min_level':
            min_level,
            'max_level':
            max_level,
            'num_classes':
            num_classes,
        })

        input_anchor = anchor.Anchor(min_level, max_level, num_scales,
                                     aspect_ratios, anchor_scale, output_size)
        cls_outputs_all = (np.random.rand(84, num_classes) -
                           0.5) * 3  # random 84x3 outputs.
        box_outputs_all = np.random.rand(84, 4)  # random 84 boxes.
        class_outputs = {
            4:
            tf.reshape(
                tf.convert_to_tensor(value=cls_outputs_all[0:64],
                                     dtype=tf.float32),
                [1, 8, 8, num_classes]),
            5:
            tf.reshape(
                tf.convert_to_tensor(value=cls_outputs_all[64:80],
                                     dtype=tf.float32),
                [1, 4, 4, num_classes]),
            6:
            tf.reshape(
                tf.convert_to_tensor(value=cls_outputs_all[80:84],
                                     dtype=tf.float32),
                [1, 2, 2, num_classes]),
        }
        box_outputs = {
            4:
            tf.reshape(
                tf.convert_to_tensor(value=box_outputs_all[0:64],
                                     dtype=tf.float32), [1, 8, 8, 4]),
            5:
            tf.reshape(
                tf.convert_to_tensor(value=box_outputs_all[64:80],
                                     dtype=tf.float32), [1, 4, 4, 4]),
            6:
            tf.reshape(
                tf.convert_to_tensor(value=box_outputs_all[80:84],
                                     dtype=tf.float32), [1, 2, 2, 4]),
        }
        image_info = tf.constant(
            [[[1000, 1000], [100, 100], [0.1, 0.1], [0, 0]]], dtype=tf.float32)
        predict_fn = postprocess.GenerateOneStageDetections(
            postprocessor_params)
        boxes, scores, classes, valid_detections = predict_fn(
            inputs=(box_outputs, class_outputs, input_anchor.multilevel_boxes,
                    image_info[:, 1:2, :]))
        (boxes, scores, classes, valid_detections) = [
            boxes.numpy(),
            scores.numpy(),
            classes.numpy(),
            valid_detections.numpy()
        ]
        self.assertEqual(boxes.shape, (batch_size, max_total_size, 4))
        self.assertEqual(scores.shape, (
            batch_size,
            max_total_size,
        ))
        self.assertEqual(classes.shape, (
            batch_size,
            max_total_size,
        ))
        self.assertEqual(valid_detections.shape, (batch_size, ))
Example #15
0
 def test_restrictions_do_not_have_typos(self):
     cfg = params_dict.ParamsDict(retinanet_config.RETINANET_CFG,
                                  retinanet_config.RETINANET_RESTRICTIONS)
     cfg.validate()
Example #16
0
    def testBaseModelTrainAndEval(self):
        params = params_dict.ParamsDict({
            'batch_size': 1,
            'model_dir': self._model_dir,
            'train': {
                'optimizer': {
                    'type': 'momentum',
                    'momentum': 0.9,
                },
                'learning_rate': {
                    'type': 'step',
                    'init_learning_rate': 0.2,
                    'warmup_learning_rate': 0.1,
                    'warmup_steps': 100,
                    'learning_rate_levels': [0.02, 0.002],
                    'learning_rate_steps': [200, 400],
                },
                'checkpoint': {
                    'path': '',
                    'prefix': '',
                    'skip_checkpoint_variables': True,
                },
                'iterations_per_loop': 1,
                'frozen_variable_prefix': 'resnet50_conv2',
            },
            'enable_summary': False,
            'architecture': {
                'use_bfloat16': False,
            },
        })

        def _input_fn(params):
            features = tf.data.Dataset.from_tensor_slices([[1], [2], [3]])
            labels = tf.data.Dataset.from_tensor_slices([[1], [2], [3]])
            data = tf.data.Dataset.zip((features, labels)).repeat()
            dataset = data.batch(params['batch_size'], drop_remainder=True)
            return dataset

        model_factory = DummyModel(params)

        # Use local TPU for testing.
        resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')
        tf.tpu.experimental.initialize_tpu_system(resolver)
        strategy = tf.distribute.experimental.TPUStrategy(resolver)

        with tf.device(''):
            with strategy.scope():
                model = model_factory.build_model()
                metrics = [tf.keras.metrics.MeanSquaredError()]
                loss = model_factory.build_loss_fn()
                model.compile(optimizer=model.optimizer,
                              loss=loss,
                              metrics=metrics)
                model.summary()

            training_steps_per_epoch = 3
            tensorboard_cb = tf.keras.callbacks.TensorBoard(
                log_dir=self._model_dir)
            weights_file_path = os.path.join(
                self._model_dir, 'weights.{epoch:02d}-{val_loss:.2f}.tf')
            checkpoint_cb = tf.keras.callbacks.ModelCheckpoint(
                weights_file_path)

            training_callbacks = [checkpoint_cb, tensorboard_cb]

            model.fit(_input_fn({'batch_size': params.batch_size}),
                      epochs=2,
                      steps_per_epoch=training_steps_per_epoch,
                      callbacks=training_callbacks,
                      validation_data=_input_fn(
                          {'batch_size': params.batch_size}),
                      validation_steps=1,
                      validation_freq=1)
            model.evaluate(_input_fn({'batch_size': params.batch_size}),
                           steps=3)

        out_files = tf.io.gfile.glob(os.path.join(self._model_dir, '*'))
        logging.info('Model output files: %s', out_files)
        self.assertNotEmpty(out_files)