def test_evaluate_all(self):
        batch_size = 8
        num_threads = 4
        global_batch_size = num_threads * batch_size

        config = exp_factory.get_exp_config('mobilenet_edgetpu_v2_xs')
        config.task.validation_data.global_batch_size = global_batch_size
        config.task.validation_data.dtype = 'float32'

        task = image_classification.EdgeTPUTask(config.task)
        dataset = task.build_inputs(config.task.validation_data)

        num_batches = 5
        with mock.patch.object(tflite_imagenet_evaluator.AccuracyEvaluator,
                               'evaluate_single_image',
                               return_value=True,
                               autospec=True):
            evaluator = tflite_imagenet_evaluator.AccuracyEvaluator(
                model_content='MockModelContent'.encode('utf-8'),
                dataset=dataset.take(num_batches),
                num_threads=num_threads)
            num_evals, num_corrects = evaluator.evaluate_all()

        expected_evals = num_batches * num_threads * batch_size

        self.assertEqual(num_evals, expected_evals)
        self.assertEqual(num_corrects, expected_evals)
Exemple #2
0
    def test_task(self, config_name, is_training):
        params = exp_factory.get_exp_config(config_name)

        params.task.train_data.global_batch_size = 16
        params.task.validation_data.global_batch_size = 16
        params.task.model.vocab_sizes = [40, 12, 11, 13, 2, 5]
        params.task.use_synthetic_data = True
        params.task.model.num_dense_features = 5

        ranking_task = task.RankingTask(params.task,
                                        params.trainer.optimizer_config)

        if is_training:
            dataset = data_pipeline.train_input_fn(params.task)
        else:
            dataset = data_pipeline.eval_input_fn(params.task)

        iterator = iter(dataset(ctx=None))
        model = ranking_task.build_model()

        if is_training:
            ranking_task.train_step(next(iterator),
                                    model,
                                    model.optimizer,
                                    metrics=model.metrics)
        else:
            ranking_task.validation_step(next(iterator),
                                         model,
                                         metrics=model.metrics)
Exemple #3
0
  def test_export_tflite_detection(self, experiment, quant_type,
                                   input_image_size):
    test_tfrecord_file = os.path.join(self.get_temp_dir(), 'det_test.tfrecord')
    example = tfexample_utils.create_detection_test_example(
        image_height=input_image_size[0],
        image_width=input_image_size[1],
        image_channel=3,
        num_instances=10)
    self._create_test_tfrecord(
        tfrecord_file=test_tfrecord_file, example=example, num_samples=10)
    params = exp_factory.get_exp_config(experiment)
    params.task.validation_data.input_path = test_tfrecord_file
    params.task.train_data.input_path = test_tfrecord_file
    temp_dir = self.get_temp_dir()
    module = detection_serving.DetectionModule(
        params=params,
        batch_size=1,
        input_image_size=input_image_size,
        input_type='tflite')
    self._export_from_module(
        module=module,
        input_type='tflite',
        saved_model_dir=os.path.join(temp_dir, 'saved_model'))

    tflite_model = export_tflite_lib.convert_tflite_model(
        saved_model_dir=os.path.join(temp_dir, 'saved_model'),
        quant_type=quant_type,
        params=params,
        calibration_steps=5)

    self.assertIsInstance(tflite_model, bytes)
Exemple #4
0
def main(_):

    params = exp_factory.get_exp_config(_EXPERIMENT.value)
    for config_file in _CONFIG_FILE.value or []:
        params = hyperparams.override_params_dict(params,
                                                  config_file,
                                                  is_strict=True)
    if _PARAMS_OVERRIDE.value:
        params = hyperparams.override_params_dict(params,
                                                  _PARAMS_OVERRIDE.value,
                                                  is_strict=True)

    params.validate()
    params.lock()

    export_saved_model_lib.export_inference_graph(
        input_type=_IMAGE_TYPE.value,
        batch_size=_BATCH_SIZSE.value,
        input_image_size=[int(x) for x in _INPUT_IMAGE_SIZE.value.split(',')],
        params=params,
        checkpoint_path=_CHECKPOINT_PATH.value,
        export_dir=_EXPORT_DIR.value,
        export_checkpoint_subdir=_EXPORT_CHECKPOINT_SUBDIR.value,
        export_saved_model_subdir=_EXPORT_SAVED_MODEL_SUBDIR.value,
        log_model_flops_and_params=_LOG_MODEL_FLOPS_AND_PARAMS.value,
        input_name=_INPUT_NAME.value)
    def test_retinanet_task_train(self, test_config, is_training):
        """RetinaNet task test for training and val using toy configs."""
        config = exp_factory.get_exp_config(test_config)
        tf.keras.mixed_precision.experimental.Policy("mixed_bfloat16")
        # modify config to suit local testing
        config.trainer.steps_per_loop = 1
        config.task.train_data.global_batch_size = 2
        config.task.model.input_size = [384, 384, 3]
        config.train_steps = 2
        config.task.train_data.shuffle_buffer_size = 10
        config.task.train_data.input_path = "/readahead/200M/placer/prod/home/snaggletooth/test/data/coco/train-00000-of-00256.tfrecord"
        config.task.validation_data.global_batch_size = 2
        config.task.validation_data.input_path = "/readahead/200M/placer/prod/home/snaggletooth/test/data/coco/val-00000-of-00032.tfrecord"

        task = maskrcnn.MaskRCNNTask(config.task)
        model = task.build_model()
        metrics = task.build_metrics(training=is_training)

        strategy = tf.distribute.get_strategy()

        data_config = config.task.train_data if is_training else config.task.validation_data
        dataset = orbit.utils.make_distributed_dataset(strategy,
                                                       task.build_inputs,
                                                       data_config)
        iterator = iter(dataset)
        opt_factory = optimization.OptimizerFactory(
            config.trainer.optimizer_config)
        optimizer = opt_factory.build_optimizer(
            opt_factory.build_learning_rate())

        if is_training:
            task.train_step(next(iterator), model, optimizer, metrics=metrics)
        else:
            task.validation_step(next(iterator), model, metrics=metrics)
Exemple #6
0
 def test_build_model_fail_with_none_batch_size(self):
     params = exp_factory.get_exp_config('retinanet_resnetfpn_coco')
     with self.assertRaisesRegex(
             ValueError, 'batch_size cannot be None for detection models.'):
         detection.DetectionModule(params,
                                   batch_size=None,
                                   input_image_size=[640, 640])
Exemple #7
0
def parse_configuration(flags_obj):
    """Parses ExperimentConfig from flags."""

    # 1. Get the default config from the registered experiment.
    params = exp_factory.get_exp_config(flags_obj.experiment)
    params.override({'runtime': {
        'tpu': flags_obj.tpu,
    }})

    # 2. Get the first level of override from `--config_file`.
    #    `--config_file` is typically used as a template that specifies the common
    #    override for a particular experiment.
    for config_file in flags_obj.config_file or []:
        params = hyperparams.override_params_dict(params,
                                                  config_file,
                                                  is_strict=True)

    # 3. Get the second level of override from `--params_override`.
    #    `--params_override` is typically used as a further override over the
    #    template. For example, one may define a particular template for training
    #    ResNet50 on ImageNet in a config file and pass it via `--config_file`,
    #    then define different learning rates and pass it via `--params_override`.
    if flags_obj.params_override:
        params = hyperparams.override_params_dict(params,
                                                  flags_obj.params_override,
                                                  is_strict=True)

    params.validate()
    params.lock()

    pp = pprint.PrettyPrinter()
    logging.info('Final experiment parameters: %s',
                 pp.pformat(params.as_dict()))

    return params
    def test_task(self, config_name):
        config = exp_factory.get_exp_config(config_name)
        config.task.train_data.global_batch_size = 2

        task = yolo.YoloTask(config.task)
        model = task.build_model()
        metrics = task.build_metrics(training=False)
        strategy = tf.distribute.get_strategy()

        dataset = orbit.utils.make_distributed_dataset(strategy,
                                                       task.build_inputs,
                                                       config.task.train_data)

        iterator = iter(dataset)
        opt_factory = optimization.OptimizerFactory(
            config.trainer.optimizer_config)
        optimizer = opt_factory.build_optimizer(
            opt_factory.build_learning_rate())
        logs = task.train_step(next(iterator),
                               model,
                               optimizer,
                               metrics=metrics)
        self.assertIn("loss", logs)
        logs = task.validation_step(next(iterator), model, metrics=metrics)
        self.assertIn("loss", logs)
    def test_task(self, config_name):
        config = exp_factory.get_exp_config(config_name)
        config.task.train_data.global_batch_size = 2

        task = img_cls_task.ImageClassificationTask(config.task)
        model = task.build_model()
        metrics = task.build_metrics()
        strategy = tf.distribute.get_strategy()

        dataset = orbit.utils.make_distributed_dataset(strategy,
                                                       task.build_inputs,
                                                       config.task.train_data)

        iterator = iter(dataset)
        opt_factory = optimization.OptimizerFactory(
            config.trainer.optimizer_config)
        optimizer = opt_factory.build_optimizer(
            opt_factory.build_learning_rate())
        logs = task.train_step(next(iterator),
                               model,
                               optimizer,
                               metrics=metrics)
        for metric in metrics:
            logs[metric.name] = metric.result()
        self.assertIn('loss', logs)
        self.assertIn('accuracy', logs)
        self.assertIn('top_5_accuracy', logs)
        logs = task.validation_step(next(iterator), model, metrics=metrics)
        for metric in metrics:
            logs[metric.name] = metric.result()
        self.assertIn('loss', logs)
        self.assertIn('accuracy', logs)
        self.assertIn('top_5_accuracy', logs)
Exemple #10
0
def build_experiment_model(experiment_type):
    """Builds model from experiment type configuration."""
    params = exp_factory.get_exp_config(experiment_type)
    params.validate()
    params.lock()
    task = task_factory.get_task(params.task)
    return task.build_model()
Exemple #11
0
  def test_export_tflite_detection(self, experiment, quant_type):

    params = exp_factory.get_exp_config(experiment)
    params.task.validation_data.input_path = self.test_tfrecord_file_det
    params.task.train_data.input_path = self.test_tfrecord_file_det
    params.task.model.num_classes = 2
    params.task.model.backbone.spinenet_mobile.model_id = '49XS'
    params.task.model.input_size = [128, 128, 3]
    params.task.model.detection_generator.nms_version = 'v1'
    params.task.train_data.shuffle_buffer_size = 5
    temp_dir = self.get_temp_dir()
    module = detection_serving.DetectionModule(
        params=params,
        batch_size=1,
        input_image_size=[128, 128],
        input_type='tflite')
    self._export_from_module(
        module=module,
        input_type='tflite',
        saved_model_dir=os.path.join(temp_dir, 'saved_model'))

    tflite_model = export_tflite_lib.convert_tflite_model(
        saved_model_dir=os.path.join(temp_dir, 'saved_model'),
        quant_type=quant_type,
        params=params,
        calibration_steps=1)

    self.assertIsInstance(tflite_model, bytes)
  def testTaskWithUnstructuredSparsity(self, config_name):
    config = exp_factory.get_exp_config(config_name)
    config.task.train_data.global_batch_size = 2

    task = img_cls_task.ImageClassificationTask(config.task)
    model = task.build_model()

    metrics = task.build_metrics()
    strategy = tf.distribute.get_strategy()

    dataset = orbit.utils.make_distributed_dataset(strategy, task.build_inputs,
                                                   config.task.train_data)

    iterator = iter(dataset)
    opt_factory = optimization.OptimizerFactory(config.trainer.optimizer_config)
    optimizer = opt_factory.build_optimizer(opt_factory.build_learning_rate())

    if isinstance(optimizer, optimization.ExponentialMovingAverage
                 ) and not optimizer.has_shadow_copy:
      optimizer.shadow_copy(model)

    if config.task.pruning:
      # This is an auxilary initialization required to prune a model which is
      # originally done in the train library.
      actions.PruningAction(
          export_dir=tempfile.gettempdir(), model=model, optimizer=optimizer)

    # Check all layers and target weights are successfully pruned.
    self._validate_model_pruned(model, config_name)

    logs = task.train_step(next(iterator), model, optimizer, metrics=metrics)
    self._validate_metrics(logs, metrics)

    logs = task.validation_step(next(iterator), model, metrics=metrics)
    self._validate_metrics(logs, metrics)
Exemple #13
0
def main(_) -> None:
    params = exp_factory.get_exp_config(FLAGS.experiment)
    if FLAGS.config_file is not None:
        for config_file in FLAGS.config_file:
            params = hyperparams.override_params_dict(params,
                                                      config_file,
                                                      is_strict=True)
    if FLAGS.params_override:
        params = hyperparams.override_params_dict(params,
                                                  FLAGS.params_override,
                                                  is_strict=True)

    params.validate()
    params.lock()

    logging.info('Converting SavedModel from %s to TFLite model...',
                 FLAGS.saved_model_dir)
    tflite_model = export_tflite_lib.convert_tflite_model(
        saved_model_dir=FLAGS.saved_model_dir,
        quant_type=FLAGS.quant_type,
        params=params,
        calibration_steps=FLAGS.calibration_steps)

    with tf.io.gfile.GFile(FLAGS.tflite_path, 'wb') as fw:
        fw.write(tflite_model)

    logging.info('TFLite model converted and saved to %s.', FLAGS.tflite_path)
    def test_task(self):
        config = exp_factory.get_exp_config('video_ssl_pretrain_kinetics600')
        config.task.train_data.global_batch_size = 2
        config.task.train_data.input_path = self._data_path

        task = pretrain.VideoSSLPretrainTask(config.task)
        model = task.build_model()
        metrics = task.build_metrics()
        strategy = tf.distribute.get_strategy()

        dataset = orbit.utils.make_distributed_dataset(
            strategy, functools.partial(task.build_inputs),
            config.task.train_data)

        iterator = iter(dataset)
        opt_factory = optimization.OptimizerFactory(
            config.trainer.optimizer_config)
        optimizer = opt_factory.build_optimizer(
            opt_factory.build_learning_rate())
        logs = task.train_step(next(iterator),
                               model,
                               optimizer,
                               metrics=metrics)
        self.assertIn('total_loss', logs)
        self.assertIn('reg_loss', logs)
        self.assertIn('contrast_acc', logs)
        self.assertIn('contrast_entropy', logs)
Exemple #15
0
 def _get_basnet_module(self):
     params = exp_factory.get_exp_config('basnet_duts')
     #params.task.model.backbone.resnet.model_id = 18
     basnet_module = basnet.BASNetModule(params,
                                         batch_size=1,
                                         input_image_size=[256, 256])
     return basnet_module
    def test_task(self, config_name):
        config = exp_factory.get_exp_config(config_name)
        config.task.train_data.global_batch_size = 2

        task = image_classification.EdgeTPUTask(config.task)
        model = task.build_model()
        metrics = task.build_metrics()

        dataset = dummy_imagenet_dataset()

        iterator = iter(dataset)
        opt_factory = optimization.OptimizerFactory(
            config.trainer.optimizer_config)
        optimizer = opt_factory.build_optimizer(
            opt_factory.build_learning_rate())
        if isinstance(optimizer, optimization.ExponentialMovingAverage
                      ) and not optimizer.has_shadow_copy:
            optimizer.shadow_copy(model)

        logs = task.train_step(next(iterator),
                               model,
                               optimizer,
                               metrics=metrics)
        for metric in metrics:
            logs[metric.name] = metric.result()
        self.assertIn('loss', logs)
        self.assertIn('accuracy', logs)
        self.assertIn('top_5_accuracy', logs)
        logs = task.validation_step(next(iterator), model, metrics=metrics)
        for metric in metrics:
            logs[metric.name] = metric.result()
        self.assertIn('loss', logs)
        self.assertIn('accuracy', logs)
        self.assertIn('top_5_accuracy', logs)
Exemple #17
0
 def _get_detection_module(self, experiment_name):
   params = exp_factory.get_exp_config(experiment_name)
   params.task.model.backbone.resnet.model_id = 18
   params.task.model.detection_generator.use_batched_nms = True
   detection_module = detection.DetectionModule(
       params, batch_size=1, input_image_size=[640, 640])
   return detection_module
  def test_retinanet_task(self, test_config, is_training):
    """RetinaNet task test for training and val using toy configs."""
    config = exp_factory.get_exp_config(test_config)
    # modify config to suit local testing
    config.task.model.input_size = [128, 128, 3]
    config.trainer.steps_per_loop = 1
    config.task.train_data.global_batch_size = 1
    config.task.validation_data.global_batch_size = 1
    config.task.train_data.shuffle_buffer_size = 2
    config.task.validation_data.shuffle_buffer_size = 2
    config.train_steps = 1

    task = retinanet.RetinaNetTask(config.task)
    model = task.build_model()
    metrics = task.build_metrics(training=is_training)

    strategy = tf.distribute.get_strategy()

    data_config = config.task.train_data if is_training else config.task.validation_data
    dataset = orbit.utils.make_distributed_dataset(strategy, task.build_inputs,
                                                   data_config)
    iterator = iter(dataset)
    opt_factory = optimization.OptimizerFactory(config.trainer.optimizer_config)
    optimizer = opt_factory.build_optimizer(opt_factory.build_learning_rate())

    if is_training:
      task.train_step(next(iterator), model, optimizer, metrics=metrics)
    else:
      task.validation_step(next(iterator), model, metrics=metrics)
Exemple #19
0
def main(_):

    params = exp_factory.get_exp_config(FLAGS.experiment)
    for config_file in FLAGS.config_file or []:
        params = hyperparams.override_params_dict(params,
                                                  config_file,
                                                  is_strict=True)
    if FLAGS.params_override:
        params = hyperparams.override_params_dict(params,
                                                  FLAGS.params_override,
                                                  is_strict=True)

    params.validate()
    params.lock()

    export_saved_model_lib.export_inference_graph(
        input_type=FLAGS.input_type,
        batch_size=FLAGS.batch_size,
        input_image_size=[int(x) for x in FLAGS.input_image_size.split(',')],
        params=params,
        checkpoint_path=FLAGS.checkpoint_path,
        export_dir=FLAGS.export_dir,
        export_module=basnet.BASNetModule(
            params=params,
            batch_size=FLAGS.batch_size,
            input_image_size=[
                int(x) for x in FLAGS.input_image_size.split(',')
            ]),
        export_checkpoint_subdir='checkpoint',
        export_saved_model_subdir='saved_model')
Exemple #20
0
 def test_build_model_fail_with_batched_nms_false(self):
     params = exp_factory.get_exp_config('retinanet_resnetfpn_coco')
     params.task.model.detection_generator.use_batched_nms = False
     with self.assertRaisesRegex(ValueError,
                                 'Only batched_nms is supported.'):
         detection.DetectionModule(params,
                                   batch_size=1,
                                   input_image_size=[640, 640])
    def setUp(self):
        super().setUp()
        self._num_channels = 2
        self._input_image_size = [32, 32, 32]
        self._params = exp_factory.get_exp_config('seg_unet3d_test')

        input_shape = self._input_image_size + [self._num_channels]
        self._image_array = np.zeros(shape=input_shape, dtype=np.uint8)
 def _get_segmentation_module(self, input_type):
     params = exp_factory.get_exp_config('mnv2_deeplabv3_pascal')
     segmentation_module = semantic_segmentation.SegmentationModule(
         params,
         batch_size=1,
         input_image_size=[112, 112],
         input_type=input_type)
     return segmentation_module
Exemple #23
0
 def test_detr_configs(self, config_name):
   config = exp_factory.get_exp_config(config_name)
   self.assertIsInstance(config, cfg.ExperimentConfig)
   self.assertIsInstance(config.task, exp_cfg.DetrTask)
   self.assertIsInstance(config.task.train_data, cfg.DataConfig)
   config.task.train_data.is_training = None
   with self.assertRaises(KeyError):
     config.validate()
Exemple #24
0
 def _get_classification_module(self):
   params = exp_factory.get_exp_config('video_classification_ucf101')
   params.task.train_data.feature_shape = (8, 64, 64, 3)
   params.task.validation_data.feature_shape = (8, 64, 64, 3)
   params.task.model.backbone.resnet_3d.model_id = 50
   classification_module = video_classification.VideoClassificationModule(
       params, batch_size=1, input_image_size=[8, 64, 64])
   return classification_module
Exemple #25
0
 def _get_classification_module(self, input_type, input_image_size):
     params = exp_factory.get_exp_config('resnet_imagenet')
     params.task.model.backbone.resnet.model_id = 18
     module = export_module_factory.create_classification_export_module(
         params,
         input_type,
         batch_size=1,
         input_image_size=input_image_size)
     return module
Exemple #26
0
 def test_video_ssl_linear_eval_configs(self, config_name):
   config = exp_factory.get_exp_config(config_name)
   self.assertIsInstance(config, cfg.ExperimentConfig)
   self.assertIsInstance(config.task, exp_cfg.VideoSSLEvalTask)
   self.assertIsInstance(config.task.model, exp_cfg.VideoSSLModel)
   self.assertIsInstance(config.task.train_data, exp_cfg.DataConfig)
   config.task.train_data.is_training = None
   with self.assertRaises(KeyError):
     config.validate()
Exemple #27
0
 def test_yt8m_configs(self, config_name):
     config = exp_factory.get_exp_config(config_name)
     self.assertIsInstance(config, cfg.ExperimentConfig)
     self.assertIsInstance(config.task, cfg.TaskConfig)
     self.assertIsInstance(config.task.model, hyperparams.Config)
     self.assertIsInstance(config.task.train_data, cfg.DataConfig)
     config.task.train_data.is_training = None
     with self.assertRaises(KeyError):
         config.validate()
Exemple #28
0
 def test_video_classification_configs(self, config_name):
     config = exp_factory.get_exp_config(config_name)
     self.assertIsInstance(config, cfg.ExperimentConfig)
     self.assertIsInstance(config.task, exp_cfg.VideoClassificationTask)
     self.assertIsInstance(config.task.model, movinet.MovinetModel)
     self.assertIsInstance(config.task.train_data, exp_cfg.DataConfig)
     config.task.train_data.is_training = None
     with self.assertRaises(KeyError):
         config.validate()
Exemple #29
0
 def _get_classification_module(self, input_type):
     params = exp_factory.get_exp_config('resnet_imagenet')
     params.task.model.backbone.resnet.model_id = 18
     classification_module = image_classification.ClassificationModule(
         params,
         batch_size=1,
         input_image_size=[224, 224],
         input_type=input_type)
     return classification_module
def parse_configuration(flags_obj, lock_return=True, print_return=True):
    """Parses ExperimentConfig from flags."""

    if flags_obj.experiment is None:
        raise ValueError('The flag --experiment must be specified.')

    # 1. Get the default config from the registered experiment.
    params = exp_factory.get_exp_config(flags_obj.experiment)

    # 2. Get the first level of override from `--config_file`.
    #    `--config_file` is typically used as a template that specifies the common
    #    override for a particular experiment.
    for config_file in flags_obj.config_file or []:
        params = hyperparams.override_params_dict(params,
                                                  config_file,
                                                  is_strict=True)

    # 3. Override the TPU address and tf.data service address.
    params.override({
        'runtime': {
            'tpu': flags_obj.tpu,
        },
    })
    if ('tf_data_service' in flags_obj and flags_obj.tf_data_service
            and isinstance(params.task, config_definitions.TaskConfig)):
        params.override({
            'task': {
                'train_data': {
                    'tf_data_service_address': flags_obj.tf_data_service,
                },
                'validation_data': {
                    'tf_data_service_address': flags_obj.tf_data_service,
                }
            }
        })

    # 4. Get the second level of override from `--params_override`.
    #    `--params_override` is typically used as a further override over the
    #    template. For example, one may define a particular template for training
    #    ResNet50 on ImageNet in a config file and pass it via `--config_file`,
    #    then define different learning rates and pass it via `--params_override`.
    if flags_obj.params_override:
        params = hyperparams.override_params_dict(params,
                                                  flags_obj.params_override,
                                                  is_strict=True)

    params.validate()
    if lock_return:
        params.lock()

    if print_return:
        pp = pprint.PrettyPrinter()
        logging.info('Final experiment parameters:\n%s',
                     pp.pformat(params.as_dict()))

    return params