def setUp(self): super(TrainExperimentTest, self).setUp() self.hparams = train_experiment.HParams( # Make the test on CPU. use_tpu=False, eval_on_tpu=False, # Make small batch sizes. z_dim=4, train_batch_size=4, eval_batch_size=16, predict_batch_size=1, fake_data=True, fake_nets=True, # Make small networks. gf_dim=2, df_dim=4, # Take few steps. max_number_of_steps=1, num_eval_steps=1, continuous_eval_timeout_secs=1, tpu_iterations_per_loop=1, model_dir=self.create_tempdir().full_path, use_tpu_estimator=False, train_steps_per_eval=1, generator_lr=1.0, discriminator_lr=1.0, beta1=1.0, shuffle_buffer_size=1, num_classes=10, )
def main(_): hparams = train_experiment.HParams( train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size, predict_batch_size=FLAGS.predict_batch_size, use_tpu=FLAGS.use_tpu, eval_on_tpu=FLAGS.eval_on_tpu, generator_lr=FLAGS.generator_lr, discriminator_lr=FLAGS.discriminator_lr, beta1=FLAGS.beta1, gf_dim=FLAGS.gf_dim, df_dim=FLAGS.df_dim, num_classes=1000, shuffle_buffer_size=10000, z_dim=FLAGS.z_dim, model_dir=FLAGS.model_dir, continuous_eval_timeout_secs=FLAGS.continuous_eval_timeout_secs, use_tpu_estimator=FLAGS.use_tpu_estimator, max_number_of_steps=FLAGS.max_number_of_steps, train_steps_per_eval=FLAGS.train_steps_per_eval, num_eval_steps=FLAGS.num_eval_steps, fake_nets=FLAGS.fake_nets, tpu_iterations_per_loop=FLAGS.tpu_iterations_per_loop, ) if FLAGS.mode == 'train': train_experiment.run_train(hparams) elif FLAGS.mode == 'continuous_eval': train_experiment.run_continuous_eval(hparams) elif FLAGS.mode == 'train_and_eval' or FLAGS.mode is None: train_experiment.run_train_and_eval(hparams) else: raise ValueError('Mode not recognized: ', FLAGS.mode)
def setUp(self): super(EstimatorLibTest, self).setUp() self.hparams = train_experiment.HParams( train_batch_size=1, eval_batch_size=32, predict_batch_size=1, generator_lr=1.0, discriminator_lr=1.0, beta1=1.0, gf_dim=2, df_dim=2, num_classes=10, shuffle_buffer_size=1, z_dim=8, model_dir=None, max_number_of_steps=None, train_steps_per_eval=1, num_eval_steps=1, debug_params=train_experiment.DebugParams( use_tpu=False, eval_on_tpu=False, fake_nets=None, fake_data=None, continuous_eval_timeout_secs=1, ), tpu_params=train_experiment.TPUParams( use_tpu_estimator=False, tpu_location='local', gcp_project=None, tpu_zone=None, tpu_iterations_per_loop=1, ), )
def test_get_metrics_syntax(self, mock_fid, mock_iscore, use_tpu): if tf.executing_eagerly(): # tf.metrics.mean is not supported when eager execution is enabled. return bs = 40 hparams = train_experiment.HParams( train_batch_size=1, eval_batch_size=bs, predict_batch_size=1, generator_lr=1.0, discriminator_lr=1.0, beta1=1.0, gf_dim=2, df_dim=2, num_classes=10, shuffle_buffer_size=1, z_dim=8, model_dir=None, max_number_of_steps=None, train_steps_per_eval=1, num_eval_steps=1, debug_params=train_experiment.DebugParams( use_tpu=use_tpu, eval_on_tpu=use_tpu, fake_nets=True, fake_data=True, continuous_eval_timeout_secs=1, ), tpu_params=None, ) # Fake arguments to pass to `get_metrics`. fake_noise = tf.zeros([bs, 128]) fake_imgs = tf.zeros([bs, 128, 128, 3]) fake_lbls = tf.zeros([bs]) fake_logits = tf.ones([bs, 1008]) # Mock Inception-inference computations. mock_iscore.return_value = 1.0 mock_fid.return_value = 0.0 estimator_lib.get_metrics(generator_inputs=fake_noise, generated_data={ 'images': fake_imgs, 'labels': fake_lbls }, real_data={ 'images': fake_imgs, 'labels': fake_lbls }, discriminator_real_outputs=(fake_logits, ()), discriminator_gen_outputs=(fake_logits, ()), hparams=hparams)
def main(_): from tensorflow_gan.examples.self_attention_estimator import train_experiment # get TF logger log = logging.getLogger('tensorflow') log.setLevel(logging.INFO) # create formatter and add it to the handlers formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') # create file handler logging_dir = os.environ['HOME'] if FLAGS.use_tpu else FLAGS.model_dir if not os.path.isdir(logging_dir): os.makedirs(logging_dir) fh = logging.FileHandler(logging_dir + '/tensorflow.log') fh.setLevel(logging.INFO) fh.setFormatter(formatter) log.addHandler(fh) tpu_location = FLAGS.tpu if FLAGS.use_tpu: assert ',' not in tpu_location, 'Only using 1 TPU is supported' hparams = train_experiment.HParams( train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size, predict_batch_size=FLAGS.predict_batch_size, generator_lr=FLAGS.generator_lr, discriminator_lr=FLAGS.discriminator_lr, beta1=FLAGS.beta1, gf_dim=FLAGS.gf_dim, df_dim=FLAGS.df_dim, num_classes=FLAGS.num_classes, shuffle_buffer_size=10000, z_dim=FLAGS.z_dim, model_dir=FLAGS.model_dir, max_number_of_steps=FLAGS.max_number_of_steps, train_steps_per_eval=FLAGS.train_steps_per_eval, num_eval_steps=FLAGS.num_eval_steps, debug_params=train_experiment.DebugParams( use_tpu=FLAGS.use_tpu, eval_on_tpu=FLAGS.eval_on_tpu, fake_nets=False, fake_data=False, continuous_eval_timeout_secs=FLAGS.continuous_eval_timeout_secs, ), tpu_params=train_experiment.TPUParams( use_tpu_estimator=FLAGS.use_tpu_estimator, tpu_location=tpu_location, gcp_project=FLAGS.gcp_project, tpu_zone=FLAGS.tpu_zone, tpu_iterations_per_loop=FLAGS.tpu_iterations_per_loop, ), ) if FLAGS.mode == 'train': train_experiment.run_train(hparams) elif FLAGS.mode == 'continuous_eval': train_experiment.run_continuous_eval(hparams) elif FLAGS.mode == 'intra_fid_eval': train_experiment.run_intra_fid_eval(hparams) elif FLAGS.mode == 'train_and_eval' or FLAGS.mode is None: train_experiment.run_train_and_eval(hparams) elif FLAGS.mode == 'gen_images': train_experiment.gen_images(hparams) elif FLAGS.mode == 'gen_matrices': train_experiment.gen_matrices(hparams) else: raise ValueError('Mode not recognized: ', FLAGS.mode)