def main(unused_argv): # Necessary to use the tfds loader. tf.enable_v2_behavior() if jax.process_count() > 1: # TODO(ankugarg): Add support for multihost inference. raise NotImplementedError( 'BLEU eval does not support multihost inference.') rng = jax.random.PRNGKey(FLAGS.seed) mt_eval_config = json.loads(FLAGS.mt_eval_config) if FLAGS.experiment_config_filename: with tf.io.gfile.GFile(FLAGS.experiment_config_filename) as f: experiment_config = json.load(f) if jax.process_index() == 0: logging.info('experiment_config: %r', experiment_config) dataset_name = experiment_config['dataset'] model_name = experiment_config['model'] else: assert FLAGS.dataset and FLAGS.model dataset_name = FLAGS.dataset model_name = FLAGS.model if jax.process_index() == 0: logging.info('argv:\n%s', ' '.join(sys.argv)) logging.info('device_count: %d', jax.device_count()) logging.info('num_hosts : %d', jax.host_count()) logging.info('host_id : %d', jax.host_id()) model_class = models.get_model(model_name) dataset_builder = datasets.get_dataset(dataset_name) dataset_meta_data = datasets.get_dataset_meta_data(dataset_name) hparam_overrides = None if FLAGS.hparam_overrides: if isinstance(FLAGS.hparam_overrides, str): hparam_overrides = json.loads(FLAGS.hparam_overrides) merged_hps = hyperparameters.build_hparams( model_name=model_name, initializer_name=experiment_config['initializer'], dataset_name=dataset_name, hparam_file=FLAGS.trial_hparams_filename, hparam_overrides=hparam_overrides) if jax.process_index() == 0: logging.info('Merged hps are: %s', json.dumps(merged_hps.to_json())) evaluator = bleu_evaluator.BLEUEvaluator(FLAGS.checkpoint_dir, merged_hps, rng, model_class, dataset_builder, dataset_meta_data, mt_eval_config) evaluator.translate_and_calculate_bleu()
def main(unused_argv): # Necessary to use the tfds imagenet loader. tf.enable_v2_behavior() rng = jax.random.PRNGKey(FLAGS.seed) if FLAGS.hessian_eval_config: hessian_eval_config = json.loads(FLAGS.hessian_eval_config) else: hessian_eval_config = hessian_eval.DEFAULT_EVAL_CONFIG if FLAGS.experiment_config_filename: with tf.io.gfile.GFile(FLAGS.experiment_config_filename, 'r') as f: experiment_config = json.load(f) if jax.process_index() == 0: logging.info('experiment_config: %r', experiment_config) dataset_name = experiment_config['dataset'] model_name = experiment_config['model'] else: assert FLAGS.dataset and FLAGS.model dataset_name = FLAGS.dataset model_name = FLAGS.model if jax.process_index() == 0: logging.info('argv:\n%s', ' '.join(sys.argv)) logging.info('device_count: %d', jax.device_count()) logging.info('num_hosts : %d', jax.process_count()) logging.info('host_id : %d', jax.process_index()) model = models.get_model(model_name) dataset_builder = datasets.get_dataset(dataset_name) dataset_meta_data = datasets.get_dataset_meta_data(dataset_name) with tf.io.gfile.GFile(FLAGS.trial_hparams_filename, 'r') as f: hps = config_dict.ConfigDict(json.load(f)) if FLAGS.hparam_overrides: if isinstance(FLAGS.hparam_overrides, str): hparam_overrides = json.loads(FLAGS.hparam_overrides) hps.update_from_flattened_dict(hparam_overrides) run_lanczos.eval_checkpoints( FLAGS.checkpoint_dir, hps, rng, FLAGS.eval_num_batches, model, dataset_builder, dataset_meta_data, hessian_eval_config, FLAGS.min_global_step, FLAGS.max_global_step)
def _get_dataset(self, hps, rng): """Sets ups dataset builders.""" hparams_dict = hps.to_dict() hparams_dict.update(self.callback_config) hparams = config_dict.ConfigDict(hparams_dict) dataset_builder = datasets.get_dataset( self.callback_config['dataset_name']) dataset = dataset_builder( rng, hparams.batch_size, eval_batch_size=self.callback_config['eval_batch_size'], hps=hparams) return dataset
def main(unused_argv): if jax.process_index() == 0: logging.info('argv:\n%s', ' '.join(sys.argv)) logging.info('device_count: %d', jax.device_count()) logging.info('num_hosts : %d', jax.process_count()) logging.info('host_id : %d', jax.process_index()) if FLAGS.batch_size is None or FLAGS.batch_size <= 0: raise ValueError("""FLAGS.batch_size value is invalid, expected a positive non-zero integer.""") if FLAGS.dataset is None: raise ValueError("""FLAGS.dataset value is invalid, expected a non-empty string describing dataset name.""") batch_size = FLAGS.batch_size num_batches = FLAGS.num_batches dataset_name = FLAGS.dataset model_name = FLAGS.model initializer_name = 'noop' hparam_overrides = { 'batch_size': batch_size, } hps = hyperparameters.build_hparams(model_name=model_name, initializer_name=initializer_name, dataset_name=dataset_name, hparam_file=None, hparam_overrides=hparam_overrides) rng = jax.random.PRNGKey(0) rng, data_rng = jax.random.split(rng) dataset = datasets.get_dataset(FLAGS.dataset)(data_rng, batch_size, batch_size, hps) train_iter = dataset.train_iterator_fn() for i in range(num_batches): batch = next(train_iter) logging.info('train batch_num = %d, batch = %r', i, batch) for batch in dataset.valid_epoch(num_batches): logging.info('validation batch = %r', batch)
def _get_dataset(shuffle_seed, additional_hps=None): """Loads the ogbg-molpcba dataset using mock data.""" with tfds.testing.mock_data(as_dataset_fn=_as_dataset): ds = 'ogbg_molpcba' dataset_builder = get_dataset(ds) hps_dict = get_dataset_hparams(ds).to_dict() if additional_hps is not None: hps_dict.update(additional_hps) hps = config_dict.ConfigDict(hps_dict) hps.train_size = 4 hps.valid_size = 4 hps.test_size = 4 hps.max_nodes_multiplier = NODES_SIZE_MULTIPLIER hps.max_edges_multiplier = EDGES_SIZE_MULTIPLIER batch_size = BATCH_SIZE eval_batch_size = BATCH_SIZE dataset = dataset_builder( shuffle_rng=shuffle_seed, batch_size=batch_size, eval_batch_size=eval_batch_size, hps=hps) return dataset
def _run(train_fn, dataset_name, eval_batch_size, eval_num_batches, eval_train_num_batches, eval_frequency, checkpoint_steps, num_tf_data_prefetches, num_device_prefetches, num_tf_data_map_parallel_calls, early_stopping_target_name, early_stopping_target_value, early_stopping_mode, eval_steps, hparam_file, hparam_overrides, initializer_name, model_name, loss_name, metrics_name, num_train_steps, experiment_dir, worker_id, training_metrics_config, callback_configs, external_checkpoint_path): """Function that runs a Jax experiment. See flag definitions for args.""" model_cls = models.get_model(model_name) initializer = initializers.get_initializer(initializer_name) dataset_builder = datasets.get_dataset(dataset_name) dataset_meta_data = datasets.get_dataset_meta_data(dataset_name) input_pipeline_hps = config_dict.ConfigDict( dict( num_tf_data_prefetches=num_tf_data_prefetches, num_device_prefetches=num_device_prefetches, num_tf_data_map_parallel_calls=num_tf_data_map_parallel_calls, )) merged_hps = hyperparameters.build_hparams( model_name=model_name, initializer_name=initializer_name, dataset_name=dataset_name, hparam_file=hparam_file, hparam_overrides=hparam_overrides, input_pipeline_hps=input_pipeline_hps) # Note that one should never tune an RNG seed!!! The seed is only included in # the hparams for convenience of running hparam trials with multiple seeds per # point. rng_seed = merged_hps.rng_seed if merged_hps.rng_seed < 0: rng_seed = _create_synchronized_rng_seed() xm_experiment = None xm_work_unit = None if jax.process_index() == 0: logging.info('Running with seed %d', rng_seed) rng = jax.random.PRNGKey(rng_seed) # Build the loss_fn, metrics_bundle, and flax_module. model = model_cls(merged_hps, dataset_meta_data, loss_name, metrics_name) trial_dir = os.path.join(experiment_dir, str(worker_id)) meta_data_path = os.path.join(trial_dir, 'meta_data.json') meta_data = {'worker_id': worker_id, 'status': 'incomplete'} if jax.process_index() == 0: logging.info('rng: %s', rng) gfile.makedirs(trial_dir) # Set up the metric loggers for host 0. metrics_logger, init_logger = utils.set_up_loggers( trial_dir, xm_work_unit) hparams_fname = os.path.join(trial_dir, 'hparams.json') logging.info('saving hparams to %s', hparams_fname) with gfile.GFile(hparams_fname, 'w') as f: f.write(merged_hps.to_json()) _write_trial_meta_data(meta_data_path, meta_data) else: metrics_logger = None init_logger = None try: epoch_reports = list( train_fn(trial_dir, model, dataset_builder, initializer, num_train_steps, merged_hps, rng, eval_batch_size, eval_num_batches, eval_train_num_batches, eval_frequency, checkpoint_steps, early_stopping_target_name, early_stopping_target_value, early_stopping_mode, eval_steps, metrics_logger, init_logger, training_metrics_config=training_metrics_config, callback_configs=callback_configs, external_checkpoint_path=external_checkpoint_path)) logging.info(epoch_reports) meta_data['status'] = 'done' except utils.TrainingDivergedError as err: meta_data['status'] = 'diverged' raise err finally: if jax.process_index() == 0: _write_trial_meta_data(meta_data_path, meta_data)
def test_determinism(self, ds): """Test that shuffle_rng and epoch correctly determine the order of data.""" batch_size = 32 eval_batch_size = 16 np.random.seed(0) # set the seed so the mock data is deterministic. # This will override the tfds.load(mnist) call to return 100 fake samples. with tfds.testing.mock_data(num_examples=128): dataset_builder = get_dataset(ds) hps = get_dataset_hparams(ds) hps.train_size = 80 hps.valid_size = 48 hps.test_size = 40 dataset = dataset_builder(shuffle_rng=jax.random.PRNGKey(0), batch_size=batch_size, eval_batch_size=eval_batch_size, hps=hps) dataset_copy = dataset_builder(shuffle_rng=jax.random.PRNGKey(0), batch_size=batch_size, eval_batch_size=eval_batch_size, hps=hps) batch_idx_to_test = 1 saved_batch = next( itertools.islice(dataset.train_iterator_fn(), batch_idx_to_test, batch_idx_to_test + 1)) saved_batch_same_epoch = next( itertools.islice(dataset_copy.train_iterator_fn(), batch_idx_to_test, batch_idx_to_test + 1)) saved_batch_diff_epoch = next( itertools.islice(dataset.train_iterator_fn(), batch_idx_to_test + 3, batch_idx_to_test + 4)) saved_batch_eval = next( itertools.islice(dataset.valid_epoch(), batch_idx_to_test, batch_idx_to_test + 1)) saved_batch_eval_same_epoch = next( itertools.islice(dataset_copy.valid_epoch(), batch_idx_to_test, batch_idx_to_test + 1)) self.assertTrue( jnp.array_equal(saved_batch['inputs'], saved_batch_same_epoch['inputs'])) self.assertTrue( jnp.array_equal(saved_batch['targets'], saved_batch_same_epoch['targets'])) self.assertFalse( jnp.array_equal(saved_batch['inputs'], saved_batch_diff_epoch['inputs'])) self.assertFalse( jnp.array_equal(saved_batch['targets'], saved_batch_diff_epoch['targets'])) self.assertTrue( jnp.array_equal(saved_batch_eval['inputs'], saved_batch_eval_same_epoch['inputs'])) # Check shapes expected_shape = jnp.array([ batch_size, hps.input_shape[0], hps.input_shape[1], hps.input_shape[2] ]) expected_shape_eval = jnp.array([ eval_batch_size, hps.input_shape[0], hps.input_shape[1], hps.input_shape[2], ]) self.assertTrue( jnp.array_equal(saved_batch['inputs'].shape, expected_shape)) self.assertTrue( jnp.array_equal(saved_batch_eval['inputs'].shape, expected_shape_eval)) expected_target_shape = jnp.array( [batch_size, get_dataset_hparams(ds)['output_shape'][-1]]) self.assertTrue( jnp.array_equal(saved_batch['targets'].shape, expected_target_shape)) # Check that the training gen drops the last partial batch. drop_partial_batches = list( itertools.islice(dataset.train_iterator_fn(), 0, 2)) # Check that the validation set correctly pads the final partial batch. no_drop_partial_batches = list(dataset.test_epoch(num_batches=3)) self.assertLen(drop_partial_batches, 2) self.assertLen(no_drop_partial_batches, 3) expected_shape = jnp.array([ 80 % batch_size, hps.input_shape[0], hps.input_shape[1], hps.input_shape[2], ]) self.assertTrue( jnp.array_equal(no_drop_partial_batches[2]['inputs'].shape, expected_shape)) # We expect the partial batch to have 40 % 16 = 8 non padded inputs. self.assertEqual(no_drop_partial_batches[2]['weights'].sum(), 8) # Test number of batches num_batches = 1 num_generated = len([ b for b in itertools.islice(dataset.train_iterator_fn(), 0, num_batches) ]) self.assertEqual(num_batches, num_generated)
def test_early_stopping(self): """Test training early stopping on MNIST with a small model.""" rng = jax.random.PRNGKey(0) # Set the numpy seed to make the fake data deterministc. mocking.mock_data # ultimately calls numpy.random. np.random.seed(0) model_name = 'fully_connected' loss_name = 'cross_entropy' metrics_name = 'classification_metrics' initializer_name = 'noop' dataset_name = 'mnist' model_cls = models.get_model(model_name) initializer = initializers.get_initializer(initializer_name) dataset_builder = datasets.get_dataset(dataset_name) hparam_overrides = { 'lr_hparams': { 'base_lr': 0.1, 'schedule': 'cosine' }, 'batch_size': 8, 'train_size': 160, 'valid_size': 96, 'test_size': 80, } input_pipeline_hps = config_dict.ConfigDict( dict( num_tf_data_prefetches=-1, num_device_prefetches=0, num_tf_data_map_parallel_calls=-1, )) hps = hyperparameters.build_hparams( model_name, initializer_name, dataset_name, hparam_file=None, hparam_overrides=hparam_overrides, input_pipeline_hps=input_pipeline_hps) eval_batch_size = 16 num_examples = 256 def as_dataset(self, *args, **kwargs): del args del kwargs # pylint: disable=g-long-lambda,g-complex-comprehension return tf.data.Dataset.from_generator( lambda: ({ 'image': np.ones(shape=(28, 28, 1), dtype=np.uint8), 'label': 9, } for i in range(num_examples)), output_types=self.info.features.dtype, output_shapes=self.info.features.shape, ) # This will override the tfds.load(mnist) call to return 100 fake samples. with tfds.testing.mock_data(as_dataset_fn=as_dataset, num_examples=num_examples): dataset = dataset_builder(shuffle_rng=jax.random.PRNGKey(0), batch_size=hps.batch_size, eval_batch_size=eval_batch_size, hps=hps) model = model_cls(hps, datasets.get_dataset_meta_data(dataset_name), loss_name, metrics_name) num_train_steps = 40 early_stopping_target_name = 'test/ce_loss' early_stopping_target_value = 0.005 early_stopping_mode = 'less' eval_num_batches = 5 eval_every = 10 checkpoint_steps = [1, 3, 15] metrics_logger, init_logger = utils.set_up_loggers(self.test_dir) epoch_reports = list( trainer.train( train_dir=self.test_dir, model=model, dataset_builder=lambda *unused_args, **unused_kwargs: dataset, initializer=initializer, num_train_steps=num_train_steps, hps=hps, rng=rng, eval_batch_size=eval_batch_size, eval_num_batches=eval_num_batches, eval_train_num_batches=eval_num_batches, eval_frequency=eval_every, checkpoint_steps=checkpoint_steps, early_stopping_target_name=early_stopping_target_name, early_stopping_target_value=early_stopping_target_value, early_stopping_mode=early_stopping_mode, metrics_logger=metrics_logger, init_logger=init_logger)) self.assertLen(epoch_reports, 3) self.assertGreater(epoch_reports[-2][early_stopping_target_name], early_stopping_target_value) self.assertLess(epoch_reports[-1][early_stopping_target_name], early_stopping_target_value)
def test_trainer(self): """Test training for two epochs on MNIST with a small model.""" rng = jax.random.PRNGKey(0) # Set the numpy seed to make the fake data deterministc. mocking.mock_data # ultimately calls numpy.random. np.random.seed(0) model_name = 'fully_connected' loss_name = 'cross_entropy' metrics_name = 'classification_metrics' initializer_name = 'noop' dataset_name = 'mnist' model_cls = models.get_model(model_name) initializer = initializers.get_initializer(initializer_name) dataset_builder = datasets.get_dataset(dataset_name) hparam_overrides = { 'lr_hparams': { 'base_lr': 0.1, 'schedule': 'cosine' }, 'batch_size': 8, 'train_size': 160, 'valid_size': 96, 'test_size': 80, } input_pipeline_hps = config_dict.ConfigDict( dict( num_tf_data_prefetches=-1, num_device_prefetches=0, num_tf_data_map_parallel_calls=-1, )) hps = hyperparameters.build_hparams( model_name, initializer_name, dataset_name, hparam_file=None, hparam_overrides=hparam_overrides, input_pipeline_hps=input_pipeline_hps) eval_batch_size = 16 num_examples = 256 def as_dataset(self, *args, **kwargs): del args del kwargs # pylint: disable=g-long-lambda,g-complex-comprehension return tf.data.Dataset.from_generator( lambda: ({ 'image': np.ones(shape=(28, 28, 1), dtype=np.uint8), 'label': 9, } for i in range(num_examples)), output_types=self.info.features.dtype, output_shapes=self.info.features.shape, ) # This will override the tfds.load(mnist) call to return 100 fake samples. with tfds.testing.mock_data(as_dataset_fn=as_dataset, num_examples=num_examples): dataset = dataset_builder(shuffle_rng=jax.random.PRNGKey(0), batch_size=hps.batch_size, eval_batch_size=eval_batch_size, hps=hps) model = model_cls(hps, datasets.get_dataset_meta_data(dataset_name), loss_name, metrics_name) num_train_steps = 40 eval_num_batches = 5 eval_every = 10 checkpoint_steps = [1, 3, 15] metrics_logger, init_logger = utils.set_up_loggers(self.test_dir) epoch_reports = list( trainer.train( train_dir=self.test_dir, model=model, dataset_builder=lambda *unused_args, **unused_kwargs: dataset, initializer=initializer, num_train_steps=num_train_steps, hps=hps, rng=rng, eval_batch_size=eval_batch_size, eval_num_batches=eval_num_batches, eval_train_num_batches=eval_num_batches, eval_frequency=eval_every, checkpoint_steps=checkpoint_steps, metrics_logger=metrics_logger, init_logger=init_logger)) # check that the additional checkpoints are saved. checkpoint_dir = os.path.join(self.test_dir, 'checkpoints') saved_steps = [] for f in tf.io.gfile.listdir(checkpoint_dir): if f[:5] == 'ckpt_': saved_steps.append(int(f[5:])) self.assertEqual(set(saved_steps), set(checkpoint_steps)) self.assertLen(epoch_reports, num_train_steps / eval_every) with tf.io.gfile.GFile(os.path.join(self.test_dir, 'measurements.csv')) as f: df = pandas.read_csv(f) train_err = df['train/error_rate'].values[-1] self.assertEqual(df['preemption_count'].values[-1], 0) self.assertLess(train_err, 0.9) self.assertEqual(set(df.columns.values), set(get_column_names())) model = model_cls(hps, {'apply_one_hot_in_loss': False}, loss_name, metrics_name) # Test reload from the checkpoint by increasing num_train_steps. num_train_steps_reload = 100 epoch_reports = list( trainer.train( train_dir=self.test_dir, model=model, dataset_builder=lambda *unused_args, **unused_kwargs: dataset, initializer=initializer, num_train_steps=num_train_steps_reload, hps=hps, rng=rng, eval_batch_size=eval_batch_size, eval_num_batches=eval_num_batches, eval_train_num_batches=eval_num_batches, eval_frequency=eval_every, checkpoint_steps=checkpoint_steps, metrics_logger=metrics_logger, init_logger=init_logger)) self.assertLen(epoch_reports, (num_train_steps_reload - num_train_steps) / eval_every) with tf.io.gfile.GFile(os.path.join(self.test_dir, 'measurements.csv')) as f: df = pandas.read_csv(f) train_err = df['train/error_rate'].values[-1] train_loss = df['train/ce_loss'].values[-1] self.assertLess(train_err, 0.35) self.assertLess(train_loss, 0.1) self.assertEqual(df['valid/num_examples'].values[-1], eval_num_batches * eval_batch_size) self.assertEqual(df['preemption_count'].values[-1], 1) # Check that the correct learning rate was saved in the measurements file. final_learning_rate = df['learning_rate'].values[-1] final_step = df['global_step'].values[-1] self.assertEqual(num_train_steps_reload, final_step) # final_step will be one larger than the last step used to calculate the # lr_decay, hense we plug in (final_step - 1) to the decay formula. # Note that there is a small numerical different here with np vs jnp. decay_factor = (1 + np.cos( (final_step - 1) / num_train_steps_reload * np.pi)) * 0.5 self.assertEqual(float(final_learning_rate), hps.lr_hparams['base_lr'] * decay_factor) self.assertEqual(set(df.columns.values), set(get_column_names()))
def test_run_lanczos(self): """Test training for two epochs on MNIST with a small model.""" rng = jax.random.PRNGKey(0) # Set the numpy seed to make the fake data deterministc. mocking.mock_data # ultimately calls numpy.random. np.random.seed(0) model_name = 'fully_connected' loss_name = 'cross_entropy' metrics_name = 'classification_metrics' initializer_name = 'noop' dataset_name = 'mnist' model_cls = models.get_model(model_name) initializer = initializers.get_initializer(initializer_name) dataset_builder = datasets.get_dataset(dataset_name) hparam_overrides = { 'lr_hparams': { 'base_lr': 0.1, 'schedule': 'cosine' }, 'batch_size': 8, 'train_size': 160, 'valid_size': 96, 'test_size': 80, } input_pipeline_hps = config_dict.ConfigDict(dict( num_tf_data_prefetches=-1, num_device_prefetches=0, num_tf_data_map_parallel_calls=-1, )) hps = hyperparameters.build_hparams( model_name, initializer_name, dataset_name, hparam_file=None, hparam_overrides=hparam_overrides, input_pipeline_hps=input_pipeline_hps) model = model_cls(hps, datasets.get_dataset_meta_data(dataset_name), loss_name, metrics_name) eval_batch_size = 16 num_examples = 256 def as_dataset(self, *args, **kwargs): del args del kwargs # pylint: disable=g-long-lambda,g-complex-comprehension return tf.data.Dataset.from_generator( lambda: ({ 'image': np.ones(shape=(28, 28, 1), dtype=np.uint8), 'label': 9, } for i in range(num_examples)), output_types=self.info.features.dtype, output_shapes=self.info.features.shape, ) # This will override the tfds.load(mnist) call to return 100 fake samples. with tfds.testing.mock_data( as_dataset_fn=as_dataset, num_examples=num_examples): dataset = dataset_builder( shuffle_rng=jax.random.PRNGKey(0), batch_size=hps.batch_size, eval_batch_size=eval_batch_size, hps=hps) num_train_steps = 41 eval_num_batches = 5 eval_every = 10 checkpoint_steps = [10, 30, 40] metrics_logger, init_logger = None, None _ = list( trainer.train( train_dir=self.test_dir, model=model, dataset_builder=lambda *unused_args, **unused_kwargs: dataset, initializer=initializer, num_train_steps=num_train_steps, hps=hps, rng=rng, eval_batch_size=eval_batch_size, eval_num_batches=eval_num_batches, eval_train_num_batches=eval_num_batches, eval_frequency=eval_every, checkpoint_steps=checkpoint_steps, metrics_logger=metrics_logger, init_logger=init_logger)) checkpoint_dir = os.path.join(self.test_dir, 'checkpoints') rng = jax.random.PRNGKey(0) run_lanczos.eval_checkpoints( checkpoint_dir, hps, rng, eval_num_batches, model_cls=model_cls, dataset_builder=lambda *unused_args, **unused_kwargs: dataset, dataset_meta_data=datasets.get_dataset_meta_data(dataset_name), hessian_eval_config=hessian_eval.DEFAULT_EVAL_CONFIG, ) # Load the saved file. hessian_dir = os.path.join(checkpoint_dir, 'hessian', 'training_metrics') pytree_list = checkpoint.load_pytree(hessian_dir) # Convert to a regular list (checkpointer will have converted the saved # list to a dict of keys '0', '1', ... pytree_list = [pytree_list[str(i)] for i in range(len(pytree_list))] # Test that the logged steps are correct. saved_steps = [row['step'] for row in pytree_list] self.assertEqual(saved_steps, checkpoint_steps)
def test_accumulation(self): """Test simple gradient accumulation.""" num_steps = 3 per_step_batch_size = 16 total_batch_size = 48 virtual_batch_size = 8 model_str = 'wide_resnet' # Pick a model with batch norm. model_cls = models.get_model(model_str) model_hps = models.get_model_hparams(model_str) dataset_name = 'cifar10' dataset_builder = datasets.get_dataset(dataset_name) hps = copy.copy(model_hps) hps.update(datasets.get_dataset_hparams(dataset_name)) # Compute updates using gradient accumulation. hps.update({ 'batch_size': per_step_batch_size, 'virtual_batch_size': virtual_batch_size, 'normalizer': 'virtual_batch_norm', 'total_accumulated_batch_size': total_batch_size, }) grad_acc_params, grad_acc_batch_stats, grad_acc_training_cost = _init_model( model_cls, hps) total_dataset = dataset_builder(shuffle_rng=jax.random.PRNGKey(1), batch_size=total_batch_size, eval_batch_size=10, hps=hps) # Ensure we see the same exact batches. train_iter = total_dataset.train_iterator_fn() train_iter = itertools.islice(train_iter, 0, num_steps) train_iter = itertools.cycle(train_iter) def grad_acc_train_iter(): for _ in range(num_steps): total_batch = next(train_iter) # Split each total batch into sub batches. num_sub_batches = total_batch_size // per_step_batch_size start_index = 0 end_index = int(total_batch_size / num_sub_batches) for bi in range(num_sub_batches): yield jax.tree_map(lambda x: x[start_index:end_index], total_batch) # pylint: disable=cell-var-from-loop start_index = end_index end_index = int(total_batch_size * (bi + 2) / num_sub_batches) lrs = jnp.array([1.0, 0.1, 1e-2]) sgd_opt_init, sgd_opt_update = optax.sgd( learning_rate=lambda t: lrs.at[t].get()) opt_init, opt_update = gradient_accumulator.accumulate_gradients( per_step_batch_size=per_step_batch_size, total_batch_size=total_batch_size, virtual_batch_size=virtual_batch_size, base_opt_init_fn=sgd_opt_init, base_opt_update_fn=sgd_opt_update) grad_acc_params, grad_acc_batch_stats = _optimize( # Run for 3x the number of steps to see the same number of examples. num_steps=3 * num_steps, params=grad_acc_params, batch_stats=grad_acc_batch_stats, training_cost=grad_acc_training_cost, train_iter=grad_acc_train_iter(), opt_init=opt_init, opt_update=opt_update) # Compute the same updates, but without gradient accumulation. hps.update({ 'batch_size': total_batch_size, 'total_accumulated_batch_size': None, }) params, batch_stats, training_cost = _init_model(model_cls, hps) params, batch_stats = _optimize(num_steps=num_steps, params=params, batch_stats=batch_stats, training_cost=training_cost, train_iter=train_iter, opt_init=sgd_opt_init, opt_update=sgd_opt_update) diffs_params = jax.tree_multimap(lambda a, b: jnp.mean(jnp.abs(a - b)), grad_acc_params, params) def batch_stats_reduce(a, b): if len(a.shape) > 0: # pylint: disable=g-explicit-length-test return jnp.mean( jnp.abs(jnp.mean(a, axis=0) - jnp.mean(b, axis=0))) # The gradient accumulator counters are scalars. return a - b diffs_batch_stats = jax.tree_multimap(batch_stats_reduce, grad_acc_batch_stats, batch_stats) # We sometimes get small floating point errors in the gradients, so we # cannot test for the values being exactly the same. acceptable_params_diff = 1e-4 acceptable_batch_stats_diff = 5e-3 def check_closeness(root_name, d, max_diff): not_close_dict = {} for name, dd in d.items(): new_name = root_name + '/' + name if root_name else name if isinstance(dd, (dict, core.FrozenDict)): not_close_dict.update( check_closeness(new_name, dd, max_diff)) else: if dd > max_diff: not_close_dict[new_name] = dd return not_close_dict not_close_params = check_closeness('', diffs_params, acceptable_params_diff) self.assertEmpty(not_close_params) not_close_batch_stats = check_closeness('', diffs_batch_stats, acceptable_batch_stats_diff) # Note that for the variance variables in the batch stats collection, they # sometimes can start to diverge slightly over time (with a higher number of # training steps), likely due to numerical issues. self.assertEmpty(not_close_batch_stats)
def test_shampoo_wrn(self): """Test distributed shampoo on fake dataset.""" model_name = 'simple_cnn' model_cls = models.get_model(model_name) hparam_overrides = { 'optimizer': 'distributed_shampoo', 'batch_size': 1, 'train_size': 10, 'valid_size': 10, 'input_shape': (32, 32, 3), 'output_shape': (10,), 'opt_hparams': { 'block_size': 32, 'beta1': 0.9, 'beta2': 0.999, 'diagonal_epsilon': 1e-10, 'matrix_epsilon': 1e-6, 'weight_decay': 0.0, 'start_preconditioning_step': 5, 'preconditioning_compute_steps': 1, 'statistics_compute_steps': 1, 'best_effort_shape_interpretation': True, 'graft_type': distributed_shampoo.GraftingType.SGD, 'nesterov': True, 'exponent_override': 0, 'batch_axis_name': 'batch', 'num_devices_for_pjit': None, 'shard_optimizer_states': False, 'inverse_failure_threshold': 0.1, 'clip_by_scaled_gradient_norm': None, 'precision': lax.Precision.HIGHEST, 'moving_average_for_momentum': False, 'skip_preconditioning_dim_size_gt': 4096, 'best_effort_memory_usage_reduction': False, }, } input_pipeline_hps = config_dict.ConfigDict(dict( num_tf_data_prefetches=-1, num_device_prefetches=0, num_tf_data_map_parallel_calls=-1, )) hps = hyperparameters.build_hparams( model_name, initializer_name='noop', dataset_name='fake', hparam_file=None, hparam_overrides=hparam_overrides, input_pipeline_hps=input_pipeline_hps) initializer = initializers.get_initializer('noop') dataset_builder = datasets.get_dataset('fake') dataset = dataset_builder( shuffle_rng=jax.random.PRNGKey(0), batch_size=hps.batch_size, eval_batch_size=hps.batch_size, hps=hps) loss_name = 'cross_entropy' metrics_name = 'classification_metrics' dataset_meta_data = datasets.get_dataset_meta_data('fake') model = model_cls(hps, dataset_meta_data, loss_name, metrics_name) metrics_logger, init_logger = utils.set_up_loggers(self.test_dir) _ = list( trainer.train( train_dir=self.test_dir, model=model, dataset_builder=lambda *unused_args, **unused_kwargs: dataset, initializer=initializer, num_train_steps=1, hps=hps, rng=jax.random.PRNGKey(42), eval_batch_size=hps.batch_size, eval_num_batches=None, eval_train_num_batches=None, eval_frequency=10, checkpoint_steps=[], metrics_logger=metrics_logger, init_logger=init_logger)) with tf.io.gfile.GFile(os.path.join(self.test_dir, 'measurements.csv')) as f: df = pandas.read_csv(f) valid_ce_loss = df['valid/ce_loss'].values[-1] self.assertLess(valid_ce_loss, 1e-3)