return self.mse(pred, gt) + self.gdl_weight * gdl_loss img_transform = transforms.Compose( [ transforms.ToTensor(), # transforms.Normalize((0.5), (0.5)) ] ) dataset = MNIST(DATA_PATH, transform=img_transform) dataloader = DataLoader(dataset, batch_size=128, shuffle=True) model = Model() model.torchnet = autoencoder() model.optimizer = torch.optim.Adam( model.torchnet.parameters(), lr=1e-3, weight_decay=1e-5 ) config = ConfigManger().parsed_args if config["loss"] == "mse": criterion = nn.MSELoss() elif config["loss"] == "gdl": criterion = gradient_difference_loss(config["weight"]) trainer = MNISTTrainer( model=model, train_loader=dataloader, val_loader=dataloader, criterion=nn.MSELoss(), device="cuda", **config["Trainer"]
) prior = get_prior_from_dataset(unlabeled_loader.dataset) print("prior for unlabeled dataset", prior) # network part import warnings fix_all_seed(int(config.get("Seed", 0))) with warnings.catch_warnings(): warnings.filterwarnings("ignore") net = SimpleNet(1, len(unlabeled_class_sample_nums)) optim = RAdam(net.parameters(), lr=1e-4, weight_decay=1e-4) scheduler = MultiStepLR(optim, milestones=[50, 80], gamma=0.2) model = Model() model.torchnet = net model.optimizer = optim model.scheduler = scheduler # trainer part Trainer = { "SemiTrainer": SemiTrainer, "SemiEntropyTrainer": SemiEntropyTrainer, "SemiPrimalDualTrainer": SemiPrimalDualTrainer, "SemiWeightedIICTrainer": SemiWeightedIICTrainer, "SemiUDATrainer": SemiUDATrainer, }.get(config["Trainer"]["name"]) assert Trainer trainer = Trainer( model, labeled_loader,