'resume', False, 'Tries to resume if True. Throws an error if False and any of the log files exist' ' unless F.overwrite is True') flags.DEFINE_boolean('overwrite', False, '') flags.DEFINE_string('gpu', '0', 'Id of the gpu to allocate') flags.DEFINE_boolean('debug', False, 'Adds a lot of summaries if True') F = flags.FLAGS os.environ['CUDA_VISIBLE_DEVICES'] = F.gpu if __name__ == '__main__': _load_flags(F.model_config, F.data_config) flags = parse_flags() assert_all_flags_parsed() flag_path = os.path.join(F.checkpoint_dir, FLAG_FILE) restored_flags = json_load(flag_path) flags.update(restored_flags) _restore_flags(flags) print('Processing:', F.checkpoint_dir) checkpoint_state = tf.train.get_checkpoint_state(F.checkpoint_dir) if checkpoint_state is None: print('No checkpoints found in {}'.format(F.checkpoint_dir)) checkpoint_paths = checkpoint_state.all_model_checkpoint_paths if F.from_itr > 0:
flags.DEFINE_string( 'schedule', '4,6,10', 'Uses a learning rate schedule if True. Schedule = \'4,6,10\' ' 'means that F.train_itr will be split in proportions 4/s, 6/s, 10/s,' 'where s = sum(schedule)') flags.DEFINE_boolean('test_run', False, 'Only a small run if True') flags.DEFINE_string('gpu', '0', 'Id of the gpu to use for this job.') flags.DEFINE_boolean('debug', False, 'Adds a lot of tensorboard summaries if True.') F = flags.FLAGS os.environ['CUDA_VISIBLE_DEVICES'] = F.gpu # Parse flags parse_flags() F = flags.FLAGS if F.test_run: # F.run_name = 'duke_test' # F.data_config = 'configs/duke_data.py' # F.data_path = 'pruned_merged225.pickle' # F.model_config = 'configs/duke_model.py' # F.glimpse_size = '32x12' F.run_name = 'mnist_test' F.data_config = 'configs/small_new_seq_mnist_data.py' F.model_config = 'configs/mlp_mnist_model.py' F.seq_len = 2 F.eval_on_train = False