def objective(trial): # type: (optuna.trial.Trial) -> float model = torch.nn.Linear(4, 1) criterion = torch.nn.MSELoss() optimizer = torch.optim.Adam(model.parameters()) dirpath = tempfile.mkdtemp() runner = catalyst.dl.SupervisedRunner() runner.train( model=model, criterion=criterion, optimizer=optimizer, loaders=loaders, logdir=dirpath, num_epochs=2, verbose=True, callbacks=[ CatalystPruningCallback(trial, metric="loss"), ], ) shutil.rmtree(dirpath) return 1.0
def objective(trial): logdir = "./logdir" num_epochs = 10 model = Net(trial) optimizer = torch.optim.Adam(model.parameters(), lr=0.02) criterion = torch.nn.CrossEntropyLoss() # model training runner = SupervisedRunner() runner.train( model=model, criterion=criterion, optimizer=optimizer, loaders=loaders, logdir=logdir, num_epochs=num_epochs, verbose=True, callbacks=[ AccuracyCallback(), CatalystPruningCallback( trial, metric="accuracy01"), # top-1 accuracy as metric for pruning ], ) return runner.state.valid_metrics["accuracy01"]
def objective(trial): logdir = "./logdir" num_epochs = 10 model = define_model(trial) optimizer = torch.optim.Adam(model.parameters(), lr=0.02) criterion = torch.nn.CrossEntropyLoss() # model training runner = SupervisedRunner() runner.train( model=model, criterion=criterion, optimizer=optimizer, loaders=loaders, logdir=logdir, num_epochs=num_epochs, verbose=True, callbacks={ # top-1 accuracy as metric for pruning "optuna": CatalystPruningCallback( loader_key="valid", metric_key="accuracy01", minimize=False, trial=trial, ), "accuracy": AccuracyCallback( input_key="logits", target_key="targets", num_classes=10, ), }, ) return runner.callbacks["optuna"].best_score
def test_catalyst_pruning_callback_experimental_warning() -> None: with pytest.warns(optuna.exceptions.ExperimentalWarning): CatalystPruningCallback(None) # type: ignore
def objective(trial): logdir = "/clusterdata/uqyzha77/Log/vic/" num_epochs = 100 INPUT_DIM = 1 OUTPUT_DIM = 5 BATCH_SIZE = 64 # change here for multi gpu training 16*4=64 num_classes = 5 num_gpu = 1 lr = trial.suggest_loguniform("lr", 1e-3, 1e-1) # generate dataloader data_path = '/afm02/Q2/Q2067/MoDS/Dabang_Sheng/Data/VIC_ready2use150000_yz_filtered80210.csv' df_all = pd.read_csv(data_path) labels = df_all.iloc[:, 4].copy() columns_name = list(range(0, 276)) df2 = pd.DataFrame(df_all['VI_values'].str.slice( 1, -1).str.split().values.tolist(), columns=columns_name, dtype=float) X = df2 y = labels le = LabelEncoder() le.fit(y) print(le.classes_) class_names = le.classes_ y = le.transform(y) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=SEED, stratify=y) X_train_resampled, y_train_resampled = X_train, y_train unique_elements, counts_elements = np.unique(y_train, return_counts=True) weights = [1 / i for i in counts_elements] weights[2] = weights[2] / 15 print(np.asarray((unique_elements, counts_elements))) print(weights) samples_weight = np.array([weights[t] for t in y_train]) samples_weights = torch.FloatTensor(samples_weight).to(device) class_weights = torch.FloatTensor(weights).to(device) sampler = torch.utils.data.sampler.WeightedRandomSampler( samples_weights, len(X_train_resampled), replacement=True) # prepare PyTorch Datasets X_train_tensor = numpy_to_tensor(X_train_resampled.to_numpy(), torch.FloatTensor) y_train_tensor = numpy_to_tensor(y_train_resampled, torch.long) X_test_tensor = numpy_to_tensor(X_test.to_numpy(), torch.FloatTensor) y_test_tensor = numpy_to_tensor(y_test, torch.long) X_train_tensor = torch.unsqueeze(X_train_tensor, 2) X_test_tensor = torch.unsqueeze(X_test_tensor, 2) train_ds = TensorDataset(X_train_tensor, y_train_tensor) valid_ds = TensorDataset(X_test_tensor, y_test_tensor) train_dl = DataLoader(train_ds, batch_size=BATCH_SIZE, sampler=sampler, drop_last=True, num_workers=0) valid_dl = DataLoader(valid_ds, batch_size=BATCH_SIZE, shuffle=False, drop_last=True, num_workers=0) # Catalyst loader: loaders = OrderedDict() loaders["train"] = train_dl loaders["valid"] = valid_dl # model model = AttentionModel(trial, BATCH_SIZE // num_gpu, INPUT_DIM, OUTPUT_DIM).to(device) optimizer = torch.optim.Adam(model.parameters(), lr=lr) scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [20, 40, 60]) criterion = torch.nn.CrossEntropyLoss() # model training runner = SupervisedRunner() runner.train( model=model, criterion=criterion, optimizer=optimizer, scheduler=scheduler, loaders=loaders, logdir=logdir, num_epochs=num_epochs, verbose=True, callbacks=[ AccuracyCallback(num_classes=num_classes), CatalystPruningCallback( trial, metric="accuracy01"), # top-1 accuracy as metric for pruning ], ) return runner.state.valid_metrics["accuracy01"]