) else: hparams.include_classes = hparams.include_classes.split('_') ##################################################################################### # Instantiate model torch.backends.cudnn.deterministic = True print("==> creating model FUSION '{}' '{}'".format(hparams.arch, hparams.rnn_model)) model = FusionModel(hparams) ##################################################################################### print('Logging to: % s' % hparams.logging_dir) logger = TensorBoardLogger(hparams.logging_dir, name='%s/%s_%s_%s' % (hparams.datadir.split('/')[-1], hparams.arch, hparams.trainable_base, hparams.rnn_model)) logger.log_hyperparams(hparams) # Log the hyperparameters # Set default device # torch.cuda.set_device(hparams.gpu) checkpoint_callback = ModelCheckpoint(filepath=os.path.join( logger.log_dir, 'checkpoints'), save_top_k=3, verbose=True, monitor='val_acc', mode='max', prefix='') kwargs = { 'gpus': [hparams.gpu], 'logger': logger, 'check_val_every_n_epoch': 1,
def test_tensorboard_log_hyperparams(tmpdir): logger = TensorBoardLogger(tmpdir) hparams = {"float": 0.3, "int": 1, "string": "abc", "bool": True} logger.log_hyperparams(hparams)
run_ID = generate_run_ID(options) print('Run:', run_ID) options.run_ID = run_ID run_directory = "./experiments/"+run_ID+'/' models = {'RNN': VanillaRNN, 'LSTM': LSTM, 'IRNN': IRNN, 'FixedIRNN': FixedIRNN, 'SIREN':SirenModel} model = models[options.RNN_type](options) # we set version to 0 to keep adding to the same experiment log logger = TensorBoardLogger('./logs/', name=run_ID, version=0) # hparams == options logger.log_hyperparams(options) checkpoint_callback = ModelCheckpoint( filepath= run_directory+'{epoch}-{val_loss:.2f}', verbose=True, monitor='val_loss', mode='min' ) # Trainer config gpus = 1 num_nodes=1 nb_sanity_val_steps=1 track_grad_norm=2 log_gpu_memory=True