class ActiveLearning(torch.nn.Module): def __init__(self, exp_dict): super().__init__() self.backbone = models.vgg16( pretrained=exp_dict["imagenet_pretraining"], progress=True) num_ftrs = self.backbone.classifier[-1].in_features self.backbone.classifier[-1] = torch.nn.Linear(num_ftrs, exp_dict["num_classes"]) self.backbone = patch_module(self.backbone) self.initial_weights = deepcopy(self.backbone.state_dict()) self.backbone.cuda() self.batch_size = exp_dict['batch_size'] self.calibrate = exp_dict.get('calibrate', False) self.learning_epoch = exp_dict['learning_epoch'] self.optimizer = torch.optim.SGD(self.backbone.parameters(), lr=exp_dict['lr'], weight_decay=5e-4, momentum=0.9, nesterov=True) self.criterion = CrossEntropyLoss() shuffle_prop = exp_dict.get('shuffle_prop', 0.0) max_sample = -1 self.heuristic = get_heuristic(exp_dict['heuristic'], shuffle_prop=shuffle_prop) self.wrapper = ModelWrapper(self.backbone, criterion=self.criterion) self.wrapper.add_metric( 'cls_report', lambda: ClassificationReport(exp_dict["num_classes"])) self.wrapper.add_metric('accuracy', lambda: Accuracy()) self.loop = ActiveLearningLoop(None, self.wrapper.predict_on_dataset, heuristic=self.heuristic, ndata_to_label=exp_dict['query_size'], batch_size=self.batch_size, iterations=exp_dict['iterations'], use_cuda=True, max_sample=max_sample) self.calib_set = get_dataset('calib', exp_dict['dataset']) self.valid_set = get_dataset('val', exp_dict['dataset']) self.calibrator = DirichletCalibrator( self.wrapper, exp_dict["num_classes"], lr=0.001, reg_factor=exp_dict['reg_factor'], mu=exp_dict['mu']) self.active_dataset = None self.active_dataset_settings = None def train_on_loader(self, loader: DataLoader): self.wrapper.load_state_dict(self.initial_weights) if self.active_dataset is None: self.active_dataset = loader.dataset if self.active_dataset_settings is not None: self.active_dataset.load_state_dict( self.active_dataset_settings) self.loop.dataset = self.active_dataset self.criterion.train() self.wrapper.train_on_dataset(self.active_dataset, self.optimizer, self.batch_size, epoch=self.learning_epoch, use_cuda=True) metrics = self.wrapper.metrics return self._format_metrics(metrics, 'train') def val_on_loader(self, loader, savedir=None): val_data = loader.dataset self.loop.step() self.criterion.eval() self.wrapper.test_on_dataset(val_data, batch_size=self.batch_size, use_cuda=True, average_predictions=20) metrics = self.wrapper.metrics mets = self._format_metrics(metrics, 'test') mets.update({'num_samples': len(self.active_dataset)}) return mets def on_train_end(self, savedir, epoch): h5_path = pjoin(savedir, 'ckpt.h5') labelled = self.active_dataset.state_dict()['labelled'] with h5py.File(h5_path, 'a') as f: if f'epoch_{epoch}' not in f: g = f.create_group(f'epoch_{epoch}') g.create_dataset('labelled', data=labelled.astype(np.bool)) def _format_metrics(self, metrics, step): mets = {k: v.value for k, v in metrics.items() if step in k} mets_unpacked = {} for k, v in mets.items(): if isinstance(v, float): mets_unpacked[k] = v elif isinstance(v, np.ndarray): mets_unpacked[k] = v.mean() else: mets_unpacked.update( {f"{k}_{ki}": np.mean(vi) for ki, vi in v.items()}) return mets_unpacked def get_state_dict(self): state = {} state["model"] = self.backbone.state_dict() state["optimizer"] = self.optimizer.state_dict() if self.active_dataset is None: state['dataset'] = None else: state["dataset"] = self.active_dataset.state_dict() return state def set_state_dict(self, state_dict): self.backbone.load_state_dict(state_dict["model"]) self.optimizer.load_state_dict(state_dict["optimizer"]) self.active_dataset_settings = state_dict["dataset"] if self.active_dataset is not None: self.active_dataset.load_state_dict(self.active_dataset_settings)
# KWARGS for predict_on_dataset iterations=20, # 20 sampling for MC-Dropout batch_size=32, use_cuda=use_cuda, verbose=False, ) # Following Gal 2016, we reset the weights at the beginning of each step. initial_weights = deepcopy(model.state_dict()) for step in range(100): model.load_state_dict(initial_weights) train_loss = wrapper.train_on_dataset(al_dataset, optimizer=optimizer, batch_size=32, epoch=10, use_cuda=use_cuda) test_loss = wrapper.test_on_dataset(test_ds, batch_size=32, use_cuda=use_cuda) pprint({ "dataset_size": len(al_dataset), "train_loss": wrapper.metrics["train_loss"].value, "test_loss": wrapper.metrics["test_loss"].value, }) flag = al_loop.step() if not flag: # We are done labelling! stopping break
def main(): args = parse_args() use_cuda = torch.cuda.is_available() torch.backends.cudnn.benchmark = True random.seed(1337) torch.manual_seed(1337) if not use_cuda: print("warning, the experiments would take ages to run on cpu") hyperparams = vars(args) active_set, test_set = get_datasets(hyperparams["initial_pool"]) heuristic = get_heuristic(hyperparams["heuristic"], hyperparams["shuffle_prop"]) criterion = CrossEntropyLoss() model = vgg16(pretrained=False, num_classes=10) weights = load_state_dict_from_url( "https://download.pytorch.org/models/vgg16-397923af.pth") weights = {k: v for k, v in weights.items() if "classifier.6" not in k} model.load_state_dict(weights, strict=False) # change dropout layer to MCDropout model = patch_module(model) if use_cuda: model.cuda() optimizer = optim.SGD(model.parameters(), lr=hyperparams["lr"], momentum=0.9) # Wraps the model into a usable API. model = ModelWrapper(model, criterion) logs = {} logs["epoch"] = 0 # for prediction we use a smaller batchsize # since it is slower active_loop = ActiveLearningLoop( active_set, model.predict_on_dataset, heuristic, hyperparams.get("query_size", 1), batch_size=10, iterations=hyperparams["iterations"], use_cuda=use_cuda, ) # We will reset the weights at each active learning step. init_weights = deepcopy(model.state_dict()) for epoch in tqdm(range(args.epoch)): # Load the initial weights. model.load_state_dict(init_weights) model.train_on_dataset( active_set, optimizer, hyperparams["batch_size"], hyperparams["learning_epoch"], use_cuda, ) # Validation! model.test_on_dataset(test_set, hyperparams["batch_size"], use_cuda) metrics = model.metrics should_continue = active_loop.step() if not should_continue: break val_loss = metrics["test_loss"].value logs = { "val": val_loss, "epoch": epoch, "train": metrics["train_loss"].value, "labeled_data": active_set.labelled, "Next Training set size": len(active_set), } print(logs)
use_cuda=use_cuda, verbose=False, workers=0, ) # Following Gal 2016, we reset the weights at the beginning of each step. initial_weights = deepcopy(model.state_dict()) for step in range(1000): model.load_state_dict(initial_weights) train_loss = wrapper.train_on_dataset(al_dataset, optimizer=optimizer, batch_size=16, epoch=1000, use_cuda=use_cuda, workers=0) test_loss = wrapper.test_on_dataset(test_ds, batch_size=16, use_cuda=use_cuda, workers=0) pprint({ "dataset_size": len(al_dataset), "train_loss": wrapper.metrics["train_loss"].value, "test_loss": wrapper.metrics["test_loss"].value, }) flag = al_loop.step() if not flag: # We are done labelling! stopping break