Exemplo n.º 1
0
 def testKeyValueOverrideBadKey(self):
     """Tests that overwriting with a bad key causes an exception."""
     pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
     configs = self._create_and_load_test_configs(pipeline_config)
     hparams = tf.contrib.training.HParams(
         **{"train_config.no_such_field": 10})
     with self.assertRaises(ValueError):
         config_util.merge_external_params_with_configs(configs, hparams)
Exemplo n.º 2
0
 def testOverwriteBatchSizeWithBadValueType(self):
     """Tests that overwriting with a bad valuye type causes an exception."""
     pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
     pipeline_config.train_config.batch_size = 2
     configs = self._create_and_load_test_configs(pipeline_config)
     # Type should be an integer, but we're passing a string "10".
     hparams = tf.contrib.training.HParams(
         **{"train_config.batch_size": "10"})
     with self.assertRaises(TypeError):
         config_util.merge_external_params_with_configs(configs, hparams)
Exemplo n.º 3
0
    def testNewFocalLossParameters(self):
        """Tests that the loss weight ratio is updated appropriately."""
        original_alpha = 1.0
        original_gamma = 1.0
        new_alpha = 0.3
        new_gamma = 2.0
        hparams = tf.contrib.training.HParams(focal_loss_alpha=new_alpha,
                                              focal_loss_gamma=new_gamma)
        pipeline_config_path = os.path.join(self.get_temp_dir(),
                                            "pipeline.config")

        pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
        classification_loss = pipeline_config.model.ssd.loss.classification_loss
        classification_loss.weighted_sigmoid_focal.alpha = original_alpha
        classification_loss.weighted_sigmoid_focal.gamma = original_gamma
        _write_config(pipeline_config, pipeline_config_path)

        configs = config_util.get_configs_from_pipeline_file(
            pipeline_config_path)
        configs = config_util.merge_external_params_with_configs(
            configs, hparams)
        classification_loss = configs["model"].ssd.loss.classification_loss
        self.assertAlmostEqual(
            new_alpha, classification_loss.weighted_sigmoid_focal.alpha)
        self.assertAlmostEqual(
            new_gamma, classification_loss.weighted_sigmoid_focal.gamma)
Exemplo n.º 4
0
 def testOverwriteBatchSizeWithKeyValue(self):
     """Tests that batch size is overwritten based on key/value."""
     pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
     pipeline_config.train_config.batch_size = 2
     configs = self._create_and_load_test_configs(pipeline_config)
     hparams = tf.contrib.training.HParams(
         **{"train_config.batch_size": 10})
     configs = config_util.merge_external_params_with_configs(
         configs, hparams)
     new_batch_size = configs["train_config"].batch_size
     self.assertEqual(10, new_batch_size)
Exemplo n.º 5
0
def _get_configs_for_model(model_name):
  """Returns configurations for model."""
  filename = get_pipeline_config_path(model_name)
  data_path = _get_data_path()
  label_map_path = _get_labelmap_path()
  configs = config_util.get_configs_from_pipeline_file(filename)
  configs = config_util.merge_external_params_with_configs(
      configs,
      train_input_path=data_path,
      eval_input_path=data_path,
      label_map_path=label_map_path)
  return configs
Exemplo n.º 6
0
    def testUseMovingAverageForEval(self):
        use_moving_averages_orig = False
        pipeline_config_path = os.path.join(self.get_temp_dir(),
                                            "pipeline.config")

        pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
        pipeline_config.eval_config.use_moving_averages = use_moving_averages_orig
        _write_config(pipeline_config, pipeline_config_path)

        configs = config_util.get_configs_from_pipeline_file(
            pipeline_config_path)
        configs = config_util.merge_external_params_with_configs(
            configs, eval_with_moving_averages=True)
        self.assertEqual(True, configs["eval_config"].use_moving_averages)
Exemplo n.º 7
0
def _get_configs_for_model(model_name):
    """Returns configurations for model."""
    fname = os.path.join(tf.resource_loader.get_data_files_path(),
                         'samples/configs/' + model_name + '.config')
    label_map_path = os.path.join(tf.resource_loader.get_data_files_path(),
                                  'data/pet_label_map.pbtxt')
    data_path = os.path.join(tf.resource_loader.get_data_files_path(),
                             'test_data/pets_examples.record')
    configs = config_util.get_configs_from_pipeline_file(fname)
    return config_util.merge_external_params_with_configs(
        configs,
        train_input_path=data_path,
        eval_input_path=data_path,
        label_map_path=label_map_path)
Exemplo n.º 8
0
    def testTrainShuffle(self):
        """Tests that `train_shuffle` keyword arguments are applied correctly."""
        original_shuffle = True
        desired_shuffle = False

        pipeline_config_path = os.path.join(self.get_temp_dir(),
                                            "pipeline.config")
        pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
        pipeline_config.train_input_reader.shuffle = original_shuffle
        _write_config(pipeline_config, pipeline_config_path)

        configs = config_util.get_configs_from_pipeline_file(
            pipeline_config_path)
        configs = config_util.merge_external_params_with_configs(
            configs, train_shuffle=desired_shuffle)
        train_shuffle = configs["train_input_config"].shuffle
        self.assertEqual(desired_shuffle, train_shuffle)
Exemplo n.º 9
0
    def testNewBatchSize(self):
        """Tests that batch size is updated appropriately."""
        original_batch_size = 2
        hparams = tf.contrib.training.HParams(batch_size=16)
        pipeline_config_path = os.path.join(self.get_temp_dir(),
                                            "pipeline.config")

        pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
        pipeline_config.train_config.batch_size = original_batch_size
        _write_config(pipeline_config, pipeline_config_path)

        configs = config_util.get_configs_from_pipeline_file(
            pipeline_config_path)
        configs = config_util.merge_external_params_with_configs(
            configs, hparams)
        new_batch_size = configs["train_config"].batch_size
        self.assertEqual(16, new_batch_size)
Exemplo n.º 10
0
    def testNewBatchSizeWithClipping(self):
        """Tests that batch size is clipped to 1 from below."""
        original_batch_size = 2
        hparams = tf.contrib.training.HParams(batch_size=0.5)
        pipeline_config_path = os.path.join(self.get_temp_dir(),
                                            "pipeline.config")

        pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
        pipeline_config.train_config.batch_size = original_batch_size
        _write_config(pipeline_config, pipeline_config_path)

        configs = config_util.get_configs_from_pipeline_file(
            pipeline_config_path)
        configs = config_util.merge_external_params_with_configs(
            configs, hparams)
        new_batch_size = configs["train_config"].batch_size
        self.assertEqual(1, new_batch_size)  # Clipped to 1.0.
Exemplo n.º 11
0
    def testNewTrainInputPathList(self):
        """Tests that train input path can be overwritten with multiple files."""
        original_train_path = ["path/to/data"]
        new_train_path = ["another/path/to/data", "yet/another/path/to/data"]
        pipeline_config_path = os.path.join(self.get_temp_dir(),
                                            "pipeline.config")

        pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
        reader_config = pipeline_config.train_input_reader.tf_record_input_reader
        reader_config.input_path.extend(original_train_path)
        _write_config(pipeline_config, pipeline_config_path)

        configs = config_util.get_configs_from_pipeline_file(
            pipeline_config_path)
        configs = config_util.merge_external_params_with_configs(
            configs, train_input_path=new_train_path)
        reader_config = configs["train_input_config"].tf_record_input_reader
        final_path = reader_config.input_path
        self.assertEqual(new_train_path, final_path)
Exemplo n.º 12
0
    def testNewMomentumOptimizerValue(self):
        """Tests that new momentum value is updated appropriately."""
        original_momentum_value = 0.4
        hparams = tf.contrib.training.HParams(momentum_optimizer_value=1.1)
        pipeline_config_path = os.path.join(self.get_temp_dir(),
                                            "pipeline.config")

        pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
        optimizer_config = pipeline_config.train_config.optimizer.rms_prop_optimizer
        optimizer_config.momentum_optimizer_value = original_momentum_value
        _write_config(pipeline_config, pipeline_config_path)

        configs = config_util.get_configs_from_pipeline_file(
            pipeline_config_path)
        configs = config_util.merge_external_params_with_configs(
            configs, hparams)
        optimizer_config = configs["train_config"].optimizer.rms_prop_optimizer
        new_momentum_value = optimizer_config.momentum_optimizer_value
        self.assertAlmostEqual(1.0, new_momentum_value)  # Clipped to 1.0.
Exemplo n.º 13
0
    def testOverWriteRetainOriginalImages(self):
        """Tests that `train_shuffle` keyword arguments are applied correctly."""
        original_retain_original_images = True
        desired_retain_original_images = False

        pipeline_config_path = os.path.join(self.get_temp_dir(),
                                            "pipeline.config")
        pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
        pipeline_config.eval_config.retain_original_images = (
            original_retain_original_images)
        _write_config(pipeline_config, pipeline_config_path)

        configs = config_util.get_configs_from_pipeline_file(
            pipeline_config_path)
        configs = config_util.merge_external_params_with_configs(
            configs,
            retain_original_images_in_eval=desired_retain_original_images)
        retain_original_images = configs["eval_config"].retain_original_images
        self.assertEqual(desired_retain_original_images,
                         retain_original_images)
Exemplo n.º 14
0
    def testNewMaskType(self):
        """Tests that mask type can be overwritten in input readers."""
        original_mask_type = input_reader_pb2.NUMERICAL_MASKS
        new_mask_type = input_reader_pb2.PNG_MASKS
        pipeline_config_path = os.path.join(self.get_temp_dir(),
                                            "pipeline.config")

        pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
        train_input_reader = pipeline_config.train_input_reader
        train_input_reader.mask_type = original_mask_type
        eval_input_reader = pipeline_config.eval_input_reader
        eval_input_reader.mask_type = original_mask_type
        _write_config(pipeline_config, pipeline_config_path)

        configs = config_util.get_configs_from_pipeline_file(
            pipeline_config_path)
        configs = config_util.merge_external_params_with_configs(
            configs, mask_type=new_mask_type)
        self.assertEqual(new_mask_type,
                         configs["train_input_config"].mask_type)
        self.assertEqual(new_mask_type, configs["eval_input_config"].mask_type)
Exemplo n.º 15
0
    def testDontOverwriteEmptyLabelMapPath(self):
        """Tests that label map path will not by overwritten with empty string."""
        original_label_map_path = "path/to/original/label_map"
        new_label_map_path = ""
        pipeline_config_path = os.path.join(self.get_temp_dir(),
                                            "pipeline.config")

        pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
        train_input_reader = pipeline_config.train_input_reader
        train_input_reader.label_map_path = original_label_map_path
        eval_input_reader = pipeline_config.eval_input_reader
        eval_input_reader.label_map_path = original_label_map_path
        _write_config(pipeline_config, pipeline_config_path)

        configs = config_util.get_configs_from_pipeline_file(
            pipeline_config_path)
        configs = config_util.merge_external_params_with_configs(
            configs, label_map_path=new_label_map_path)
        self.assertEqual(original_label_map_path,
                         configs["train_input_config"].label_map_path)
        self.assertEqual(original_label_map_path,
                         configs["eval_input_config"].label_map_path)
Exemplo n.º 16
0
    def testNewClassificationLocalizationWeightRatio(self):
        """Tests that the loss weight ratio is updated appropriately."""
        original_localization_weight = 0.1
        original_classification_weight = 0.2
        new_weight_ratio = 5.0
        hparams = tf.contrib.training.HParams(
            classification_localization_weight_ratio=new_weight_ratio)
        pipeline_config_path = os.path.join(self.get_temp_dir(),
                                            "pipeline.config")

        pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
        pipeline_config.model.ssd.loss.localization_weight = (
            original_localization_weight)
        pipeline_config.model.ssd.loss.classification_weight = (
            original_classification_weight)
        _write_config(pipeline_config, pipeline_config_path)

        configs = config_util.get_configs_from_pipeline_file(
            pipeline_config_path)
        configs = config_util.merge_external_params_with_configs(
            configs, hparams)
        loss = configs["model"].ssd.loss
        self.assertAlmostEqual(1.0, loss.localization_weight)
        self.assertAlmostEqual(new_weight_ratio, loss.classification_weight)
Exemplo n.º 17
0
    def testMergingKeywordArguments(self):
        """Tests that keyword arguments get merged as do hyperparameters."""
        original_num_train_steps = 100
        original_num_eval_steps = 5
        desired_num_train_steps = 10
        desired_num_eval_steps = 1
        pipeline_config_path = os.path.join(self.get_temp_dir(),
                                            "pipeline.config")

        pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
        pipeline_config.train_config.num_steps = original_num_train_steps
        pipeline_config.eval_config.num_examples = original_num_eval_steps
        _write_config(pipeline_config, pipeline_config_path)

        configs = config_util.get_configs_from_pipeline_file(
            pipeline_config_path)
        configs = config_util.merge_external_params_with_configs(
            configs,
            train_steps=desired_num_train_steps,
            eval_steps=desired_num_eval_steps)
        train_steps = configs["train_config"].num_steps
        eval_steps = configs["eval_config"].num_examples
        self.assertEqual(desired_num_train_steps, train_steps)
        self.assertEqual(desired_num_eval_steps, eval_steps)
Exemplo n.º 18
0
    def _assertOptimizerWithNewLearningRate(self, optimizer_name):
        """Asserts successful updating of all learning rate schemes."""
        original_learning_rate = 0.7
        learning_rate_scaling = 0.1
        warmup_learning_rate = 0.07
        hparams = tf.contrib.training.HParams(learning_rate=0.15)
        pipeline_config_path = os.path.join(self.get_temp_dir(),
                                            "pipeline.config")

        # Constant learning rate.
        pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
        optimizer = getattr(pipeline_config.train_config.optimizer,
                            optimizer_name)
        _update_optimizer_with_constant_learning_rate(optimizer,
                                                      original_learning_rate)
        _write_config(pipeline_config, pipeline_config_path)

        configs = config_util.get_configs_from_pipeline_file(
            pipeline_config_path)
        configs = config_util.merge_external_params_with_configs(
            configs, hparams)
        optimizer = getattr(configs["train_config"].optimizer, optimizer_name)
        constant_lr = optimizer.learning_rate.constant_learning_rate
        self.assertAlmostEqual(hparams.learning_rate,
                               constant_lr.learning_rate)

        # Exponential decay learning rate.
        pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
        optimizer = getattr(pipeline_config.train_config.optimizer,
                            optimizer_name)
        _update_optimizer_with_exponential_decay_learning_rate(
            optimizer, original_learning_rate)
        _write_config(pipeline_config, pipeline_config_path)

        configs = config_util.get_configs_from_pipeline_file(
            pipeline_config_path)
        configs = config_util.merge_external_params_with_configs(
            configs, hparams)
        optimizer = getattr(configs["train_config"].optimizer, optimizer_name)
        exponential_lr = optimizer.learning_rate.exponential_decay_learning_rate
        self.assertAlmostEqual(hparams.learning_rate,
                               exponential_lr.initial_learning_rate)

        # Manual step learning rate.
        pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
        optimizer = getattr(pipeline_config.train_config.optimizer,
                            optimizer_name)
        _update_optimizer_with_manual_step_learning_rate(
            optimizer, original_learning_rate, learning_rate_scaling)
        _write_config(pipeline_config, pipeline_config_path)

        configs = config_util.get_configs_from_pipeline_file(
            pipeline_config_path)
        configs = config_util.merge_external_params_with_configs(
            configs, hparams)
        optimizer = getattr(configs["train_config"].optimizer, optimizer_name)
        manual_lr = optimizer.learning_rate.manual_step_learning_rate
        self.assertAlmostEqual(hparams.learning_rate,
                               manual_lr.initial_learning_rate)
        for i, schedule in enumerate(manual_lr.schedule):
            self.assertAlmostEqual(
                hparams.learning_rate * learning_rate_scaling**i,
                schedule.learning_rate)

        # Cosine decay learning rate.
        pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
        optimizer = getattr(pipeline_config.train_config.optimizer,
                            optimizer_name)
        _update_optimizer_with_cosine_decay_learning_rate(
            optimizer, original_learning_rate, warmup_learning_rate)
        _write_config(pipeline_config, pipeline_config_path)

        configs = config_util.get_configs_from_pipeline_file(
            pipeline_config_path)
        configs = config_util.merge_external_params_with_configs(
            configs, hparams)
        optimizer = getattr(configs["train_config"].optimizer, optimizer_name)
        cosine_lr = optimizer.learning_rate.cosine_decay_learning_rate

        self.assertAlmostEqual(hparams.learning_rate,
                               cosine_lr.learning_rate_base)
        warmup_scale_factor = warmup_learning_rate / original_learning_rate
        self.assertAlmostEqual(hparams.learning_rate * warmup_scale_factor,
                               cosine_lr.warmup_learning_rate)