test_name = test_name + "_data-" + dataset_name + "_nFold-" + str(n_folds) + "_lr-" + \ str(lr) +"_drop_prob-"+str(drop_prob)+"_weight-decay-"+ str(weight_decay)+ \ "_batchSize-" + str(batch_size) + "_nHidden-" + str(n_units) + \ "_output-" + str(output) + "_maxK-" + str(max_k) training_log_dir = os.path.join("./test_log/", test_name) if not os.path.exists(training_log_dir): os.makedirs(training_log_dir) printParOnFile(test_name=test_name, log_dir=training_log_dir, par_list={"dataset_name": dataset_name, "n_fold": n_folds, "learning_rate": lr, "drop_prob": drop_prob, "weight_decay": weight_decay, "batch_size": batch_size, "n_hidden": n_units, "test_epoch": test_epoch, "output": output, "max_k": max_k}) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') criterion = torch.nn.NLLLoss() dataset_cv_splits = getcross_validation_split(dataset_path, dataset_name, n_folds, batch_size) for split_id, split in enumerate(dataset_cv_splits): loader_train = split[0] loader_test = split[1] loader_valid = split[2]
"_som_grid-" + str(som_grids_dim[0]) + "_" + str(som_grids_dim[1]) + \ "_som_lr-" + str(som_lr) training_log_dir = os.path.join("./test_log/", test_name) if not os.path.exists(training_log_dir): os.makedirs(training_log_dir) printParOnFile(test_name=test_name, log_dir=training_log_dir, par_list={ "dataset_name": dataset_name, "n_fold": n_folds, "learning_rate_conv": lr_conv, "learning_rate_som": som_lr, "learning_rate_read_out": lr_readout, "learning_rate_fine_tuning": lr_fine_tuning, "drop_prob": drop_prob, "weight_decay": weight_decay, "batch_size": batch_size, "n_hidden": n_units, "som_grid_dims": som_grids_dim, "som_lr": som_lr, "test_epoch": test_epoch }) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') criterion = torch.nn.NLLLoss() dataset_cv_splits = getcross_validation_split(dataset_path, dataset_name, n_folds, batch_size)