def __init__(self, dataset, conf, unlabeled_indices=None): super().__init__(dataset, conf, unlabeled_indices) self.lossnet = lossnet.LossNet( feature_sizes=conf["lossnet"]["feature_sizes"], num_channels=conf["lossnet"]["num_channels"]) self.lossnet_optimizer = getattr( optimUtils, self.conf["lossnet"]["optimizer"]["name"])( self.lossnet.parameters(), conf["lossnet"]["optimizer"]) self.lossnet.to(self.device) self.sampler = LossPredictionSampler(self.budget, self.model, self.lossnet, self.device)
ADDENDUM] # ADDENDUM == 1000, 最开始的,不过好像后来逐渐每次label的data加1000 unlabeled_set = indices[ADDENDUM:] train_loader = DataLoader( cifar10_train, batch_size=BATCH, # BATCH == 128, 可以改改!! sampler=SubsetRandomSampler( labeled_set), # 这是怎么弄得???好像只选了那1000个label数据来train?? pin_memory=True) # 这是干啥的?? test_loader = DataLoader(cifar10_test, batch_size=BATCH) dataloaders = {'train': train_loader, 'test': test_loader} # Model resnet18 = resnet.ResNet18( num_classes=10).cuda() # 注意者利用的 resNet18, 还可以改成resnet很深的模型 loss_module = lossnet.LossNet().cuda() models = {'backbone': resnet18, 'module': loss_module} torch.backends.cudnn.benchmark = False # if True, causes cuDNN to benchmark multiple convolution algorithms # and select the fastest. # Active learning cycles # 主动学习 for cycle in range(CYCLES): # CYCLES:10 # Loss, criterion and scheduler (re)initialization criterion = nn.CrossEntropyLoss(reduction='none') optim_backbone = optim.SGD( models['backbone'].parameters(), lr=LR, # ??用Adam 可以嘛??LR 才0.1, 可以改!! momentum=MOMENTUM, weight_decay=WDECAY) optim_module = optim.SGD( models['module'].parameters(),