def _setup(self): tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.DEBUG) if EstimatorCifar10BenchmarkTests.local_flags is None: cifar_main.define_cifar_flags() # Loads flags to get defaults to then override. flags.FLAGS(['foo']) saved_flag_values = flagsaver.save_flag_values() EstimatorCifar10BenchmarkTests.local_flags = saved_flag_values return flagsaver.restore_flag_values(EstimatorCifar10BenchmarkTests.local_flags)
def test_save_flag_default(self): # First save the flag. saved_flag_values = flagsaver.save_flag_values() # Now mutate the flag's default field and check that it changed. FLAGS.set_default('flagsaver_test_flag0', 'new_default') self.assertEqual('new_default', FLAGS['flagsaver_test_flag0'].default) # Now restore the flag's default field. flagsaver.restore_flag_values(saved_flag_values) self.assertEqual('unchanged0', FLAGS['flagsaver_test_flag0'].default)
def _setup(self): """Sets up and resets flags before each test.""" tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.DEBUG) if KerasNCFBenchmarkBase.local_flags is None: # Loads flags to get defaults to then override. List cannot be empty. flags.FLAGS(['foo']) core.set_defaults(**self.default_flags) saved_flag_values = flagsaver.save_flag_values() KerasNCFBenchmarkBase.local_flags = saved_flag_values else: flagsaver.restore_flag_values(KerasNCFBenchmarkBase.local_flags)
def _setup(self): """Sets up and resets flags before each test.""" self.timer_callback = benchmark_utils.BenchmarkTimerCallback() if DetectionBenchmarkBase.local_flags is None: # Loads flags to get defaults to then override. List cannot be empty. flags.FLAGS(['foo']) saved_flag_values = flagsaver.save_flag_values() DetectionBenchmarkBase.local_flags = saved_flag_values else: flagsaver.restore_flag_values(DetectionBenchmarkBase.local_flags)
def _setup(self): """Sets up and resets flags before each test.""" tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.DEBUG) if KerasNCFBenchmarkBase.local_flags is None: # Loads flags to get defaults to then override. List cannot be empty. flags.FLAGS(['foo']) core.set_defaults(**self.default_flags) saved_flag_values = flagsaver.save_flag_values() KerasNCFBenchmarkBase.local_flags = saved_flag_values else: flagsaver.restore_flag_values(KerasNCFBenchmarkBase.local_flags)
def _setup(self): tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.DEBUG) if EstimatorCifar10BenchmarkTests.local_flags is None: cifar_main.define_cifar_flags() # Loads flags to get defaults to then override. flags.FLAGS(['foo']) saved_flag_values = flagsaver.save_flag_values() EstimatorCifar10BenchmarkTests.local_flags = saved_flag_values return flagsaver.restore_flag_values( EstimatorCifar10BenchmarkTests.local_flags)
def test_save_flag_value(self): # First save the flag values. saved_flag_values = flagsaver.save_flag_values() # Now mutate the flag's value field and check that it changed. FLAGS.flagsaver_test_flag0 = 'new value' self.assertEqual('new value', FLAGS.flagsaver_test_flag0) # Now restore the flag to its original value. flagsaver.restore_flag_values(saved_flag_values) self.assertEqual('unchanged0', FLAGS.flagsaver_test_flag0)
def _setup(self): """Sets up and resets flags before each test.""" tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.DEBUG) self.timer_callback = BenchmarkTimerCallback() if BertBenchmarkBase.local_flags is None: # Loads flags to get defaults to then override. List cannot be empty. flags.FLAGS(['foo']) saved_flag_values = flagsaver.save_flag_values() BertBenchmarkBase.local_flags = saved_flag_values else: flagsaver.restore_flag_values(BertBenchmarkBase.local_flags)
def _setup(self): """Sets up and resets flags before each test.""" logging.set_verbosity(logging.INFO) if NCFKerasBenchmarkBase.local_flags is None: ncf_common.define_ncf_flags() # Loads flags to get defaults to then override. List cannot be empty. flags.FLAGS(['foo']) core.set_defaults(**self.default_flags) saved_flag_values = flagsaver.save_flag_values() NCFKerasBenchmarkBase.local_flags = saved_flag_values else: flagsaver.restore_flag_values(NCFKerasBenchmarkBase.local_flags)
def _setup(self): """Setups up and resets flags before each test.""" tf.logging.set_verbosity(tf.logging.DEBUG) if KerasCifar10BenchmarkTests.local_flags is None: keras_common.define_keras_flags() cifar_main.define_cifar_flags() # Loads flags to get defaults to then override. List cannot be empty. flags.FLAGS(['foo']) saved_flag_values = flagsaver.save_flag_values() KerasCifar10BenchmarkTests.local_flags = saved_flag_values return flagsaver.restore_flag_values(KerasCifar10BenchmarkTests.local_flags)
def setUp(self): super(BeyondcorpTest, self).setUp() self.__saved_flags = flagsaver.save_flag_values() mock_wmi = mock.patch.object( beyondcorp.wmi_query, 'WMIQuery', autospec=True) self.addCleanup(mock_wmi.stop) self.mock_wmi = mock_wmi.start() self.filesystem = fake_filesystem.FakeFilesystem() self.filesystem.create_file(r'C:\seed.json', contents=_TEST_SEED) self.filesystem.create_file(_TEST_WIM_PATH, contents=_TEST_WIM) beyondcorp.os = fake_filesystem.FakeOsModule(self.filesystem) beyondcorp.open = fake_filesystem.FakeFileOpen(self.filesystem) self.beyondcorp = beyondcorp.BeyondCorp()
def _setup(self): """Sets up and resets flags before each test.""" assert tf.version.VERSION.startswith('2.') tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.DEBUG) if NCFKerasBenchmarkBase.local_flags is None: ncf_common.define_ncf_flags() # Loads flags to get defaults to then override. List cannot be empty. flags.FLAGS(['foo']) core.set_defaults(**self.default_flags) saved_flag_values = flagsaver.save_flag_values() NCFKerasBenchmarkBase.local_flags = saved_flag_values else: flagsaver.restore_flag_values(NCFKerasBenchmarkBase.local_flags)
def setUp(self): super(PkbCommonTestCase, self).setUp() saved_flag_values = flagsaver.save_flag_values() self.addCleanup(flagsaver.restore_flag_values, saved_flag_values) # Functions that create a benchmark_spec.BenchmarkSpec attach the # benchmark spec to the running thread in __init__(). If this isn't # cleaned up, it creates problems for tests run using unittest. self.addCleanup(context.SetThreadBenchmarkSpec, None) p = mock.patch(util.__name__ + '.GetDefaultProject', return_value='test_project') self.enter_context(p)
def test_run(self): saved_flag_values = flagsaver.save_flag_values() train_lib.tfm_flags.define_flags() FLAGS.mode = 'train' FLAGS.model_dir = self._model_dir FLAGS.experiment = 'seg_unet3d_test' logging.info('Test pipeline correctness.') params_override = json.dumps({ 'runtime': { 'mixed_precision_dtype': 'float32', }, 'trainer': { 'train_steps': 1, 'validation_steps': 1, }, 'task': { 'model': { 'backbone': { 'unet_3d': { 'model_id': 4, }, }, 'decoder': { 'unet_3d_decoder': { 'model_id': 4, }, }, }, 'train_data': { 'input_path': self._data_path, 'file_type': 'tfrecord', 'global_batch_size': 2, }, 'validation_data': { 'input_path': self._data_path, 'file_type': 'tfrecord', 'global_batch_size': 2, } } }) FLAGS.params_override = params_override train_lib.main('unused_args') FLAGS.mode = 'eval' with train_lib.gin.unlock_config(): train_lib.main('unused_args') flagsaver.restore_flag_values(saved_flag_values)
def setUp(self): # Need to initialize flags since unittest doesn't do this for us. FLAGS(['aclgen_test.py']) self.saved_flag_values = flagsaver.save_flag_values() self.test_subdirectory = tempfile.mkdtemp() self.def_dir = os.path.join(self.test_subdirectory, 'def') self.pol_dir = os.path.join(self.test_subdirectory, 'policies') shutil.rmtree(self.test_subdirectory, ignore_errors=True) os.mkdir(self.test_subdirectory) shutil.copytree('def', self.def_dir) shutil.copytree('policies', self.pol_dir) FLAGS.base_directory = self.pol_dir FLAGS.definitions_directory = self.def_dir FLAGS.output_directory = self.test_subdirectory
def test_run(self): saved_flag_values = flagsaver.save_flag_values() train_lib.tfm_flags.define_flags() FLAGS.mode = 'train' FLAGS.model_dir = self._model_dir FLAGS.experiment = 'assemblenet50_kinetics600' logging.info('Test pipeline correctness.') num_frames = 4 params_override = json.dumps({ 'runtime': { 'mixed_precision_dtype': 'float32', }, 'trainer': { 'train_steps': 1, 'validation_steps': 1, }, 'task': { 'model': { 'backbone': { 'assemblenet': { 'model_id': '26', 'num_frames': num_frames, }, }, }, 'train_data': { 'input_path': self._data_path, 'file_type': 'tfrecord', 'feature_shape': [num_frames, 32, 32, 3], 'global_batch_size': 2, }, 'validation_data': { 'input_path': self._data_path, 'file_type': 'tfrecord', 'global_batch_size': 2, 'feature_shape': [num_frames * 2, 32, 32, 3], } } }) FLAGS.params_override = params_override train_lib.main('unused_args') FLAGS.mode = 'eval' with train_lib.gin.unlock_config(): train_lib.main('unused_args') flagsaver.restore_flag_values(saved_flag_values)
def test_train_and_eval(self, use_segment_level_labels): saved_flag_values = flagsaver.save_flag_values() train_lib.tfm_flags.define_flags() FLAGS.mode = 'train' FLAGS.model_dir = self._model_dir FLAGS.experiment = 'yt8m_experiment' FLAGS.tpu = '' params_override = json.dumps({ 'runtime': { 'distribution_strategy': 'mirrored', 'mixed_precision_dtype': 'float32', }, 'trainer': { 'train_steps': 1, 'validation_steps': 1, }, 'task': { 'model': { 'cluster_size': 16, 'hidden_size': 16, 'use_context_gate_cluster_layer': True, 'agg_model': { 'use_input_context_gate': True, 'use_output_context_gate': True, }, }, 'train_data': { 'input_path': self._data_path, 'global_batch_size': 4, }, 'validation_data': { 'input_path': self._data_path, 'segment_labels': use_segment_level_labels, 'global_batch_size': 4, } } }) FLAGS.params_override = params_override with train_lib.train.gin.unlock_config(): train_lib.train.main('unused_args') FLAGS.mode = 'eval' with train_lib.train.gin.unlock_config(): train_lib.train.main('unused_args') flagsaver.restore_flag_values(saved_flag_values)
def _setup(self): """Sets up and resets flags before each test.""" tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.DEBUG) if KerasBenchmark.local_flags is None: for flag_method in self.flag_methods: flag_method() # Loads flags to get defaults to then override. List cannot be empty. flags.FLAGS(['foo']) # Overrides flag values with defaults for the class of tests. for k, v in self.default_flags.items(): setattr(FLAGS, k, v) saved_flag_values = flagsaver.save_flag_values() KerasBenchmark.local_flags = saved_flag_values else: flagsaver.restore_flag_values(KerasBenchmark.local_flags)
def _setup(self): """Sets up and resets flags before each test.""" tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.DEBUG) if EstimatorBenchmark.local_flags is None: for flag_method in self.flag_methods: flag_method() # Loads flags to get defaults to then override. List cannot be empty. flags.FLAGS(['foo']) # Overrides flag values with defaults for the class of tests. for k, v in self.default_flags.items(): setattr(FLAGS, k, v) saved_flag_values = flagsaver.save_flag_values() EstimatorBenchmark.local_flags = saved_flag_values else: flagsaver.restore_flag_values(EstimatorBenchmark.local_flags)
def test_restore_after_parse(self): # First save the flag. saved_flag_values = flagsaver.save_flag_values() # Sanity check (would fail if called with --flagsaver_test_flag0). self.assertEqual(0, FLAGS['flagsaver_test_flag0'].present) # Now populate the flag and check that it changed. FLAGS['flagsaver_test_flag0'].parse('new value') self.assertEqual('new value', FLAGS['flagsaver_test_flag0'].value) self.assertEqual(1, FLAGS['flagsaver_test_flag0'].present) # Now restore the flag to its original value. flagsaver.restore_flag_values(saved_flag_values) self.assertEqual('unchanged0', FLAGS['flagsaver_test_flag0'].value) self.assertEqual(0, FLAGS['flagsaver_test_flag0'].present)
def main(argv): if len(argv) > 6: raise app.UsageError('Too many command-line arguments.') logging.info('Starting wildfire ee export job...') logging.info('bucket=%s', FLAGS.bucket) logging.info('folder=%s', FLAGS.folder) logging.info('start_date=%s', FLAGS.start_date) logging.info('end_date=%s', FLAGS.end_date) logging.info('prefix=%s', FLAGS.prefix) logging.info('eval_split_ratio=%f', FLAGS.eval_split_ratio) ee.Initialize() logging.info('ee authenticated!') start_date = ee.Date(FLAGS.start_date) end_date = ee.Date(FLAGS.end_date) export_ee_data.export_ml_datasets( bucket=FLAGS.bucket, folder=FLAGS.folder, start_date=start_date, end_date=end_date, prefix=FLAGS.prefix, kernel_size=FLAGS.kernel_size, sampling_scale=FLAGS.sampling_scale, eval_split_ratio=FLAGS.eval_split_ratio, num_samples_per_file=FLAGS.num_samples_per_file, ) if FLAGS.config_dir: saved_flag_values = flagsaver.save_flag_values() # Save the names and values of the flags as a json file in a local folder. # Note that this includes more flags than just those defined in this file, # since FLAGS includes many other flags, including default flags. saved_flag_values = { key: flag_dict['_value'] for key, flag_dict in saved_flag_values.items() } saved_flag_path = os.path.join(FLAGS.config_dir, 'export_flags.json') json_str = json.dumps(saved_flag_values, indent=2) + '\n' with f_open(saved_flag_path, 'w') as f: f.write(json_str) logging.info( 'Ending wildfire ee export job!' 'Note that the export job may continue in the background by EE.')
def test_train_and_evaluation_pipeline_runs(self): saved_flag_values = flagsaver.save_flag_values() train_lib.tfm_flags.define_flags() FLAGS.mode = 'train' FLAGS.model_dir = self._model_dir FLAGS.experiment = 'movinet_kinetics600' logging.info('Test pipeline correctness.') num_frames = 4 # Test model training pipeline runs. params_override = json.dumps({ 'runtime': { 'distribution_strategy': 'mirrored', 'mixed_precision_dtype': 'float32', }, 'trainer': { 'train_steps': 2, 'validation_steps': 2, }, 'task': { 'train_data': { 'input_path': self._data_path, 'file_type': 'tfrecord', 'feature_shape': [num_frames, 32, 32, 3], 'global_batch_size': 2, }, 'validation_data': { 'input_path': self._data_path, 'file_type': 'tfrecord', 'global_batch_size': 2, 'feature_shape': [num_frames * 2, 32, 32, 3], } } }) FLAGS.params_override = params_override train_lib.main('unused_args') # Test model evaluation pipeline runs on newly produced checkpoint. FLAGS.mode = 'eval' with train_lib.gin.unlock_config(): train_lib.main('unused_args') flagsaver.restore_flag_values(saved_flag_values)
def setUp(self): super(RegionProcessorTest, self).setUp() self._saved_flags = flagsaver.save_flag_values() self.region = ranges.parse_literal('chr20:10,000,000-10,000,100') FLAGS.reads = '' self.options = make_examples.default_options(add_flags=False) self.options.reference_filename = testdata.CHR20_FASTA main_sample = self.options.sample_options[0] if not main_sample.reads_filenames: main_sample.reads_filenames.append(testdata.CHR20_BAM) main_sample.variant_caller_options.sample_name = 'sample_id' main_sample.name = 'sample_id' self.options.truth_variants_filename = testdata.TRUTH_VARIANTS_VCF self.options.mode = deepvariant_pb2.MakeExamplesOptions.TRAINING self.processor = make_examples_core.RegionProcessor(self.options) self.ref_reader = fasta.IndexedFastaReader( self.options.reference_filename) self.mock_init = self.add_mock('initialize') for sample in self.processor.samples: sample.in_memory_sam_reader = mock.Mock() self.default_shape = [5, 5, 7] self.default_format = 'raw'
def setUp(self): self.saved_flag_values = flagsaver.save_flag_values() self.patches = [] vm_prefix = linux_virtual_machine.__name__ + '.BaseLinuxMixin' self.patches.append( mock.patch(vm_prefix + '.FormatDisk')) self.patches.append( mock.patch(vm_prefix + '.MountDisk')) self.patches.append( mock.patch( util.__name__ + '.GetDefaultProject', side_effect='test_project')) # Patch subprocess.Popen to make sure we don't issue any commands to spin up # resources. self.patches.append(mock.patch('subprocess.Popen')) self.patches.append( mock.patch(vm_util.__name__ + '.GetTempDir', return_value='/tmp/dir')) self._PatchCloudSpecific() for p in self.patches: p.start() self.addCleanup(p.stop) # We need the disk class mocks to return new mocks each time they are # called. Otherwise all "disks" instantiated will be the same object. self._GetDiskClass().side_effect = ( lambda *args, **kwargs: mock.MagicMock(is_striped=False)) # VM Creation depends on there being a BenchmarkSpec. config_spec = benchmark_config_spec.BenchmarkConfigSpec( _BENCHMARK_NAME, flag_values=FLAGS, vm_groups={}) self.spec = benchmark_spec.BenchmarkSpec(mock.MagicMock(), config_spec, _BENCHMARK_UID) self.addCleanup(context.SetThreadBenchmarkSpec, None) self.addCleanup(flagsaver.restore_flag_values, self.saved_flag_values)
def setUp(self): self.saved_flag_values = flagsaver.save_flag_values() self.patches = [] vm_prefix = linux_virtual_machine.__name__ + '.BaseLinuxMixin' self.patches.append( mock.patch(vm_prefix + '.FormatDisk')) self.patches.append( mock.patch(vm_prefix + '.MountDisk')) self.patches.append( mock.patch(util.__name__ + '.GetDefaultProject')) # Patch subprocess.Popen to make sure we don't issue any commands to spin up # resources. self.patches.append(mock.patch('subprocess.Popen')) self.patches.append( mock.patch(vm_util.__name__ + '.GetTempDir', return_value='/tmp/dir')) self._PatchCloudSpecific() for p in self.patches: p.start() self.addCleanup(p.stop) # We need the disk class mocks to return new mocks each time they are # called. Otherwise all "disks" instantiated will be the same object. self._GetDiskClass().side_effect = ( lambda *args, **kwargs: mock.MagicMock(is_striped=False)) # VM Creation depends on there being a BenchmarkSpec. config_spec = benchmark_config_spec.BenchmarkConfigSpec( _BENCHMARK_NAME, flag_values=FLAGS, vm_groups={}) self.spec = benchmark_spec.BenchmarkSpec(mock.MagicMock(), config_spec, _BENCHMARK_UID) self.addCleanup(context.SetThreadBenchmarkSpec, None) self.addCleanup(flagsaver.restore_flag_values, self.saved_flag_values)
def setUp(self): self.saved_flag_values = flagsaver.save_flag_values()
def setUp(self): super(PkbCommonTestCase, self).setUp() saved_flag_values = flagsaver.save_flag_values() self.addCleanup(flagsaver.restore_flag_values, saved_flag_values)
def setUp(self): saved_flag_values = flagsaver.save_flag_values() self.addCleanup( flagsaver.restore_flag_values, saved_flag_values)
def setUp(self): saved_flag_values = flagsaver.save_flag_values() self.addCleanup(flagsaver.restore_flag_values, saved_flag_values)