def create_model(input_len, n_neurons1, n_neurons2, n_neurons3): # Define neural networks for processing of data before and after aggregation prepNN1 = torch.nn.Sequential( torch.nn.Linear(input_len, n_neurons1, bias=True), torch.nn.ReLU(), torch.nn.Linear(n_neurons1, n_neurons2, bias=True), torch.nn.ReLU(), ) afterNN1 = torch.nn.Sequential(torch.nn.Identity()) prepNN2 = torch.nn.Sequential( torch.nn.Linear(n_neurons2, n_neurons3, bias=True), torch.nn.ReLU(), ) afterNN2 = torch.nn.Sequential(torch.nn.Linear(n_neurons3, 1), torch.nn.Tanh()) # Define model ,loss function and optimizer model = torch.nn.Sequential( mil.BagModel(prepNN1, afterNN1, torch.mean, device), mil.BagModel(prepNN2, afterNN2, torch.mean, device)).double() criterion = mil.MyHingeLoss() optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay) return model
for n_neurons3 in n_neurons3_grid: for learning_rate in learning_rate_grid: for weight_decay in weight_decay_grid: config['n_neurons1'] = n_neurons1 config['n_neurons2'] = n_neurons2 config['n_neurons3'] = n_neurons3 config['learning_rate'] = learning_rate config['weight_decay'] = weight_decay print('INFO: Running cross validation with config:\n{}'. format(config)) # --- MODEL --- model = create_model(len(dataset.data[0]), n_neurons1, n_neurons2, n_neurons3) criterion = mil.MyHingeLoss() optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay) # Move model to gpu if available model = model.to(device) avg_loss = train_utils.k_fold_cv( model=model, fit_fn=train_utils.train_model, criterion=criterion, optimizer=optimizer, dataset=dataset, train_indices=train_indices, epochs=epochs,