Ejemplo n.º 1
0
 def runner(*args, **kwargs):
     """Creates a temporary context to activate --benchmark_method_flags."""
     if FLAGS.benchmark_method_flags:
         saved_flag_values = flagsaver.save_flag_values()
         for key_value in FLAGS.benchmark_method_flags:
             key, value = key_value.split('=', 1)
             try:
                 numeric_float = float(value)
                 numeric_int = int(numeric_float)
                 if abs(numeric_int) == abs(numeric_float):
                     flag_value = numeric_int
                 else:
                     flag_value = numeric_float
             except ValueError:
                 flag_value = value
             logging.info('Setting --%s=%s', key, flag_value)
             setattr(FLAGS, key, flag_value)
     else:
         saved_flag_values = None
     try:
         result = decorated_func(*args, **kwargs)
         return result
     finally:
         if saved_flag_values:
             flagsaver.restore_flag_values(saved_flag_values)
Ejemplo n.º 2
0
 def setUp(self):
   temp_dir = self.get_temp_dir()
   if TransformerTaskTest.local_flags is None:
     misc.define_transformer_flags()
     # Loads flags, array cannot be blank.
     flags.FLAGS(['foo'])
     TransformerTaskTest.local_flags = flagsaver.save_flag_values()
   else:
     flagsaver.restore_flag_values(TransformerTaskTest.local_flags)
   FLAGS.model_dir = os.path.join(temp_dir, FIXED_TIMESTAMP)
   FLAGS.param_set = 'tiny'
   FLAGS.use_synthetic_data = True
   FLAGS.steps_between_evals = 1
   FLAGS.train_steps = 2
   FLAGS.validation_steps = 1
   FLAGS.batch_size = 8
   FLAGS.max_length = 1
   FLAGS.num_gpus = 1
   FLAGS.distribution_strategy = 'off'
   FLAGS.dtype = 'fp32'
   self.model_dir = FLAGS.model_dir
   self.temp_dir = temp_dir
   self.vocab_file = os.path.join(temp_dir, 'vocab')
   self.vocab_size = misc.get_model_params(FLAGS.param_set, 0)['vocab_size']
   self.bleu_source = os.path.join(temp_dir, 'bleu_source')
   self.bleu_ref = os.path.join(temp_dir, 'bleu_ref')
   self.orig_policy = (
       tf.compat.v2.keras.mixed_precision.experimental.global_policy())
Ejemplo n.º 3
0
 def _setup(self):
   tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.DEBUG)
   if EstimatorCifar10BenchmarkTests.local_flags is None:
     cifar_main.define_cifar_flags()
     # Loads flags to get defaults to then override.
     flags.FLAGS(['foo'])
     saved_flag_values = flagsaver.save_flag_values()
     EstimatorCifar10BenchmarkTests.local_flags = saved_flag_values
     return
   flagsaver.restore_flag_values(EstimatorCifar10BenchmarkTests.local_flags)
Ejemplo n.º 4
0
 def _setup(self):
   """Sets up and resets flags before each test."""
   logging.set_verbosity(logging.INFO)
   if PerfZeroBenchmark.local_flags is None:
     # Loads flags to get defaults to then override. List cannot be empty.
     flags.FLAGS(['foo'])
     saved_flag_values = flagsaver.save_flag_values()
     PerfZeroBenchmark.local_flags = saved_flag_values
   else:
     flagsaver.restore_flag_values(PerfZeroBenchmark.local_flags)
Ejemplo n.º 5
0
 def _setup(self):
   """Sets up and resets flags before each test."""
   tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.DEBUG)
   if KerasNCFBenchmarkBase.local_flags is None:
     # Loads flags to get defaults to then override. List cannot be empty.
     flags.FLAGS(['foo'])
     core.set_defaults(**self.default_flags)
     saved_flag_values = flagsaver.save_flag_values()
     KerasNCFBenchmarkBase.local_flags = saved_flag_values
   else:
     flagsaver.restore_flag_values(KerasNCFBenchmarkBase.local_flags)
Ejemplo n.º 6
0
    def _setup(self):
        """Sets up and resets flags before each test."""
        self.timer_callback = benchmark_utils.BenchmarkTimerCallback()

        if DetectionBenchmarkBase.local_flags is None:
            # Loads flags to get defaults to then override. List cannot be empty.
            flags.FLAGS(['foo'])
            saved_flag_values = flagsaver.save_flag_values()
            DetectionBenchmarkBase.local_flags = saved_flag_values
        else:
            flagsaver.restore_flag_values(DetectionBenchmarkBase.local_flags)
Ejemplo n.º 7
0
    def test_save_flag_value(self):
        # First save the flag values.
        saved_flag_values = flagsaver.save_flag_values()

        # Now mutate the flag's value field and check that it changed.
        FLAGS.flagsaver_test_flag0 = 'new value'
        self.assertEqual('new value', FLAGS.flagsaver_test_flag0)

        # Now restore the flag to its original value.
        flagsaver.restore_flag_values(saved_flag_values)
        self.assertEqual('unchanged0', FLAGS.flagsaver_test_flag0)
Ejemplo n.º 8
0
 def _setup(self):
   """Sets up and resets flags before each test."""
   tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.DEBUG)
   if KerasNCFBenchmarkBase.local_flags is None:
     # Loads flags to get defaults to then override. List cannot be empty.
     flags.FLAGS(['foo'])
     core.set_defaults(**self.default_flags)
     saved_flag_values = flagsaver.save_flag_values()
     KerasNCFBenchmarkBase.local_flags = saved_flag_values
   else:
     flagsaver.restore_flag_values(KerasNCFBenchmarkBase.local_flags)
Ejemplo n.º 9
0
    def test_save_flag_default(self):
        # First save the flag.
        saved_flag_values = flagsaver.save_flag_values()

        # Now mutate the flag's default field and check that it changed.
        FLAGS.set_default('flagsaver_test_flag0', 'new_default')
        self.assertEqual('new_default', FLAGS['flagsaver_test_flag0'].default)

        # Now restore the flag's default field.
        flagsaver.restore_flag_values(saved_flag_values)
        self.assertEqual('unchanged0', FLAGS['flagsaver_test_flag0'].default)
Ejemplo n.º 10
0
    def _setup(self):
        """Sets up and resets flags before each test."""
        tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.DEBUG)
        self.timer_callback = BenchmarkTimerCallback()

        if BertBenchmarkBase.local_flags is None:
            # Loads flags to get defaults to then override. List cannot be empty.
            flags.FLAGS(['foo'])
            saved_flag_values = flagsaver.save_flag_values()
            BertBenchmarkBase.local_flags = saved_flag_values
        else:
            flagsaver.restore_flag_values(BertBenchmarkBase.local_flags)
 def _setup(self):
     """Sets up and resets flags before each test."""
     logging.set_verbosity(logging.INFO)
     if NCFKerasBenchmarkBase.local_flags is None:
         ncf_common.define_ncf_flags()
         # Loads flags to get defaults to then override. List cannot be empty.
         flags.FLAGS(['foo'])
         core.set_defaults(**self.default_flags)
         saved_flag_values = flagsaver.save_flag_values()
         NCFKerasBenchmarkBase.local_flags = saved_flag_values
     else:
         flagsaver.restore_flag_values(NCFKerasBenchmarkBase.local_flags)
 def _setup(self):
     """Setups up and resets flags before each test."""
     tf.logging.set_verbosity(tf.logging.DEBUG)
     if KerasCifar10BenchmarkTests.local_flags is None:
         keras_common.define_keras_flags()
         cifar_main.define_cifar_flags()
         # Loads flags to get defaults to then override. List cannot be empty.
         flags.FLAGS(['foo'])
         saved_flag_values = flagsaver.save_flag_values()
         KerasCifar10BenchmarkTests.local_flags = saved_flag_values
         return
     flagsaver.restore_flag_values(KerasCifar10BenchmarkTests.local_flags)
Ejemplo n.º 13
0
 def _setup(self):
   """Sets up and resets flags before each test."""
   assert tf.version.VERSION.startswith('2.')
   tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.DEBUG)
   if NCFKerasBenchmarkBase.local_flags is None:
     ncf_common.define_ncf_flags()
     # Loads flags to get defaults to then override. List cannot be empty.
     flags.FLAGS(['foo'])
     core.set_defaults(**self.default_flags)
     saved_flag_values = flagsaver.save_flag_values()
     NCFKerasBenchmarkBase.local_flags = saved_flag_values
   else:
     flagsaver.restore_flag_values(NCFKerasBenchmarkBase.local_flags)
Ejemplo n.º 14
0
    def test_run(self):
        saved_flag_values = flagsaver.save_flag_values()
        train_lib.tfm_flags.define_flags()
        FLAGS.mode = 'train'
        FLAGS.model_dir = self._model_dir
        FLAGS.experiment = 'seg_unet3d_test'
        logging.info('Test pipeline correctness.')

        params_override = json.dumps({
            'runtime': {
                'mixed_precision_dtype': 'float32',
            },
            'trainer': {
                'train_steps': 1,
                'validation_steps': 1,
            },
            'task': {
                'model': {
                    'backbone': {
                        'unet_3d': {
                            'model_id': 4,
                        },
                    },
                    'decoder': {
                        'unet_3d_decoder': {
                            'model_id': 4,
                        },
                    },
                },
                'train_data': {
                    'input_path': self._data_path,
                    'file_type': 'tfrecord',
                    'global_batch_size': 2,
                },
                'validation_data': {
                    'input_path': self._data_path,
                    'file_type': 'tfrecord',
                    'global_batch_size': 2,
                }
            }
        })
        FLAGS.params_override = params_override

        train_lib.main('unused_args')

        FLAGS.mode = 'eval'

        with train_lib.gin.unlock_config():
            train_lib.main('unused_args')

        flagsaver.restore_flag_values(saved_flag_values)
Ejemplo n.º 15
0
    def test_run(self):
        saved_flag_values = flagsaver.save_flag_values()
        train_lib.tfm_flags.define_flags()
        FLAGS.mode = 'train'
        FLAGS.model_dir = self._model_dir
        FLAGS.experiment = 'assemblenet50_kinetics600'
        logging.info('Test pipeline correctness.')
        num_frames = 4

        params_override = json.dumps({
            'runtime': {
                'mixed_precision_dtype': 'float32',
            },
            'trainer': {
                'train_steps': 1,
                'validation_steps': 1,
            },
            'task': {
                'model': {
                    'backbone': {
                        'assemblenet': {
                            'model_id': '26',
                            'num_frames': num_frames,
                        },
                    },
                },
                'train_data': {
                    'input_path': self._data_path,
                    'file_type': 'tfrecord',
                    'feature_shape': [num_frames, 32, 32, 3],
                    'global_batch_size': 2,
                },
                'validation_data': {
                    'input_path': self._data_path,
                    'file_type': 'tfrecord',
                    'global_batch_size': 2,
                    'feature_shape': [num_frames * 2, 32, 32, 3],
                }
            }
        })
        FLAGS.params_override = params_override

        train_lib.main('unused_args')

        FLAGS.mode = 'eval'

        with train_lib.gin.unlock_config():
            train_lib.main('unused_args')

        flagsaver.restore_flag_values(saved_flag_values)
Ejemplo n.º 16
0
    def test_train_and_eval(self, use_segment_level_labels):
        saved_flag_values = flagsaver.save_flag_values()
        train_lib.tfm_flags.define_flags()
        FLAGS.mode = 'train'
        FLAGS.model_dir = self._model_dir
        FLAGS.experiment = 'yt8m_experiment'
        FLAGS.tpu = ''

        params_override = json.dumps({
            'runtime': {
                'distribution_strategy': 'mirrored',
                'mixed_precision_dtype': 'float32',
            },
            'trainer': {
                'train_steps': 1,
                'validation_steps': 1,
            },
            'task': {
                'model': {
                    'cluster_size': 16,
                    'hidden_size': 16,
                    'use_context_gate_cluster_layer': True,
                    'agg_model': {
                        'use_input_context_gate': True,
                        'use_output_context_gate': True,
                    },
                },
                'train_data': {
                    'input_path': self._data_path,
                    'global_batch_size': 4,
                },
                'validation_data': {
                    'input_path': self._data_path,
                    'segment_labels': use_segment_level_labels,
                    'global_batch_size': 4,
                }
            }
        })
        FLAGS.params_override = params_override

        with train_lib.train.gin.unlock_config():
            train_lib.train.main('unused_args')

        FLAGS.mode = 'eval'

        with train_lib.train.gin.unlock_config():
            train_lib.train.main('unused_args')

        flagsaver.restore_flag_values(saved_flag_values)
Ejemplo n.º 17
0
    def test_restore_after_parse(self):
        # First save the flag.
        saved_flag_values = flagsaver.save_flag_values()

        # Sanity check (would fail if called with --flagsaver_test_flag0).
        self.assertEqual(0, FLAGS['flagsaver_test_flag0'].present)
        # Now populate the flag and check that it changed.
        FLAGS['flagsaver_test_flag0'].parse('new value')
        self.assertEqual('new value', FLAGS['flagsaver_test_flag0'].value)
        self.assertEqual(1, FLAGS['flagsaver_test_flag0'].present)

        # Now restore the flag to its original value.
        flagsaver.restore_flag_values(saved_flag_values)
        self.assertEqual('unchanged0', FLAGS['flagsaver_test_flag0'].value)
        self.assertEqual(0, FLAGS['flagsaver_test_flag0'].present)
 def _setup(self):
   """Sets up and resets flags before each test."""
   tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.DEBUG)
   if EstimatorBenchmark.local_flags is None:
     for flag_method in self.flag_methods:
       flag_method()
     # Loads flags to get defaults to then override. List cannot be empty.
     flags.FLAGS(['foo'])
     # Overrides flag values with defaults for the class of tests.
     for k, v in self.default_flags.items():
       setattr(FLAGS, k, v)
     saved_flag_values = flagsaver.save_flag_values()
     EstimatorBenchmark.local_flags = saved_flag_values
   else:
     flagsaver.restore_flag_values(EstimatorBenchmark.local_flags)
Ejemplo n.º 19
0
 def _setup(self):
   """Sets up and resets flags before each test."""
   tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.DEBUG)
   if KerasBenchmark.local_flags is None:
     for flag_method in self.flag_methods:
       flag_method()
     # Loads flags to get defaults to then override. List cannot be empty.
     flags.FLAGS(['foo'])
     # Overrides flag values with defaults for the class of tests.
     for k, v in self.default_flags.items():
       setattr(FLAGS, k, v)
     saved_flag_values = flagsaver.save_flag_values()
     KerasBenchmark.local_flags = saved_flag_values
   else:
     flagsaver.restore_flag_values(KerasBenchmark.local_flags)
Ejemplo n.º 20
0
    def test_train_and_evaluation_pipeline_runs(self):
        saved_flag_values = flagsaver.save_flag_values()
        train_lib.tfm_flags.define_flags()
        FLAGS.mode = 'train'
        FLAGS.model_dir = self._model_dir
        FLAGS.experiment = 'movinet_kinetics600'
        logging.info('Test pipeline correctness.')
        num_frames = 4

        # Test model training pipeline runs.
        params_override = json.dumps({
            'runtime': {
                'distribution_strategy': 'mirrored',
                'mixed_precision_dtype': 'float32',
            },
            'trainer': {
                'train_steps': 2,
                'validation_steps': 2,
            },
            'task': {
                'train_data': {
                    'input_path': self._data_path,
                    'file_type': 'tfrecord',
                    'feature_shape': [num_frames, 32, 32, 3],
                    'global_batch_size': 2,
                },
                'validation_data': {
                    'input_path': self._data_path,
                    'file_type': 'tfrecord',
                    'global_batch_size': 2,
                    'feature_shape': [num_frames * 2, 32, 32, 3],
                }
            }
        })
        FLAGS.params_override = params_override
        train_lib.main('unused_args')

        # Test model evaluation pipeline runs on newly produced checkpoint.
        FLAGS.mode = 'eval'
        with train_lib.gin.unlock_config():
            train_lib.main('unused_args')

        flagsaver.restore_flag_values(saved_flag_values)
Ejemplo n.º 21
0
 def tearDown(self):
     super(RegionProcessorTest, self).tearDown()
     flagsaver.restore_flag_values(self._saved_flags)
Ejemplo n.º 22
0
 def tearDown(self):
   super(BeyondcorpTest, self).tearDown()
   flagsaver.restore_flag_values(self.__saved_flags)
Ejemplo n.º 23
0
 def tearDown(self):
     flagsaver.restore_flag_values(self.saved_flag_values)