def __init__(self, exp_dict): super().__init__() self.backbone = models.vgg16( pretrained=exp_dict["imagenet_pretraining"], progress=True) num_ftrs = self.backbone.classifier[-1].in_features self.backbone.classifier[-1] = torch.nn.Linear(num_ftrs, exp_dict["num_classes"]) self.backbone = patch_module(self.backbone) self.initial_weights = deepcopy(self.backbone.state_dict()) self.backbone.cuda() self.batch_size = exp_dict['batch_size'] self.calibrate = exp_dict.get('calibrate', False) self.learning_epoch = exp_dict['learning_epoch'] self.optimizer = torch.optim.SGD(self.backbone.parameters(), lr=exp_dict['lr'], weight_decay=5e-4, momentum=0.9, nesterov=True) self.criterion = CrossEntropyLoss() shuffle_prop = exp_dict.get('shuffle_prop', 0.0) max_sample = -1 self.heuristic = get_heuristic(exp_dict['heuristic'], shuffle_prop=shuffle_prop) self.wrapper = ModelWrapper(self.backbone, criterion=self.criterion) self.wrapper.add_metric( 'cls_report', lambda: ClassificationReport(exp_dict["num_classes"])) self.wrapper.add_metric('accuracy', lambda: Accuracy()) self.loop = ActiveLearningLoop(None, self.wrapper.predict_on_dataset, heuristic=self.heuristic, ndata_to_label=exp_dict['query_size'], batch_size=self.batch_size, iterations=exp_dict['iterations'], use_cuda=True, max_sample=max_sample) self.calib_set = get_dataset('calib', exp_dict['dataset']) self.valid_set = get_dataset('val', exp_dict['dataset']) self.calibrator = DirichletCalibrator( self.wrapper, exp_dict["num_classes"], lr=0.001, reg_factor=exp_dict['reg_factor'], mu=exp_dict['mu']) self.active_dataset = None self.active_dataset_settings = None
def _build_model(self): # We use `patch_module` to swap Dropout modules in the model # for our implementation which enables MC-Dropou self.vgg16 = patch_module(vgg16(num_classes=self.hparams.num_classes))
def main(): args = parse_args() use_cuda = torch.cuda.is_available() torch.backends.cudnn.benchmark = True random.seed(1337) torch.manual_seed(1337) if not use_cuda: print("warning, the experiments would take ages to run on cpu") hyperparams = vars(args) active_set, test_set = get_datasets(hyperparams["initial_pool"]) heuristic = get_heuristic(hyperparams["heuristic"], hyperparams["shuffle_prop"]) criterion = CrossEntropyLoss() model = vgg16(pretrained=False, num_classes=10) weights = load_state_dict_from_url( "https://download.pytorch.org/models/vgg16-397923af.pth") weights = {k: v for k, v in weights.items() if "classifier.6" not in k} model.load_state_dict(weights, strict=False) # change dropout layer to MCDropout model = patch_module(model) if use_cuda: model.cuda() optimizer = optim.SGD(model.parameters(), lr=hyperparams["lr"], momentum=0.9) # Wraps the model into a usable API. model = ModelWrapper(model, criterion) logs = {} logs["epoch"] = 0 # for prediction we use a smaller batchsize # since it is slower active_loop = ActiveLearningLoop( active_set, model.predict_on_dataset, heuristic, hyperparams.get("query_size", 1), batch_size=10, iterations=hyperparams["iterations"], use_cuda=use_cuda, ) # We will reset the weights at each active learning step. init_weights = deepcopy(model.state_dict()) for epoch in tqdm(range(args.epoch)): # Load the initial weights. model.load_state_dict(init_weights) model.train_on_dataset( active_set, optimizer, hyperparams["batch_size"], hyperparams["learning_epoch"], use_cuda, ) # Validation! model.test_on_dataset(test_set, hyperparams["batch_size"], use_cuda) metrics = model.metrics should_continue = active_loop.step() if not should_continue: break val_loss = metrics["test_loss"].value logs = { "val": val_loss, "epoch": epoch, "train": metrics["train_loss"].value, "labeled_data": active_set.labelled, "Next Training set size": len(active_set), } print(logs)
def main(): args = parse_args() use_cuda = torch.cuda.is_available() torch.backends.cudnn.benchmark = True random.seed(1337) torch.manual_seed(1337) if not use_cuda: print("warning, the experiments would take ages to run on cpu") hyperparams = vars(args) heuristic = get_heuristic(hyperparams['heuristic'], hyperparams['shuffle_prop']) model = BertForSequenceClassification.from_pretrained( pretrained_model_name_or_path=hyperparams["model"]) tokenizer = BertTokenizer.from_pretrained( pretrained_model_name_or_path=hyperparams["model"]) # In this example we use tokenizer once only in the beginning since it would # make the whole process faster. However, it is also possible to input tokenizer # in trainer. active_set, test_set = get_datasets(hyperparams['initial_pool'], tokenizer) # change dropout layer to MCDropout model = patch_module(model) if use_cuda: model.cuda() init_weights = deepcopy(model.state_dict()) training_args = TrainingArguments( output_dir='/app/baal/results', # output directory num_train_epochs=hyperparams['learning_epoch'], # total # of training epochs per_device_train_batch_size=16, # batch size per device during training per_device_eval_batch_size=64, # batch size for evaluation weight_decay=0.01, # strength of weight decay logging_dir='/app/baal/logs', # directory for storing logs ) # We wrap the huggingface Trainer to create an Active Learning Trainer model = BaalTransformersTrainer(model=model, args=training_args, train_dataset=active_set, eval_dataset=test_set, tokenizer=None) logs = {} logs['epoch'] = 0 # In this case, nlp data is fast to process and we do NoT need to use a smaller batch_size active_loop = ActiveLearningLoop(active_set, model.predict_on_dataset, heuristic, hyperparams.get('n_data_to_label', 1), iterations=hyperparams['iterations']) for epoch in tqdm(range(args.epoch)): # we use the default setup of HuggingFace for training (ex: epoch=1). # The setup is adjustable when BaalHuggingFaceTrainer is defined. model.train() # Validation! eval_metrics = model.evaluate() # We reorder the unlabelled pool at the frequency of learning_epoch # This helps with speed while not changing the quality of uncertainty estimation. should_continue = active_loop.step() # We reset the model weights to relearn from the new trainset. model.load_state_dict(init_weights) model.lr_scheduler = None if not should_continue: break active_logs = {"epoch": epoch, "labeled_data": active_set._labelled, "Next Training set size": len(active_set)} logs = {**eval_metrics, **active_logs} print(logs)
def __init__(self, active_dataset: ActiveLearningDataset, hparams: Namespace, network: nn.Module): super().__init__(active_dataset, hparams, network) self.network = patch_module(self.network)
# Uses an ActiveLearningDataset to help us split labelled and unlabelled examples. al_dataset = ActiveLearningDataset( train_ds, pool_specifics={"transform": test_transform}) al_dataset.label_randomly(200) # Start with 200 items labelled. # Creates an MLP to classify MNIST model = nn.Sequential( nn.Flatten(), nn.Linear(784, 512), nn.Dropout(), nn.Linear(512, 512), nn.Dropout(), nn.Linear(512, 10), ) model = patch_module(model) # Set dropout layers for MC-Dropout. if use_cuda: model = model.cuda() wrapper = ModelWrapper(model=model, criterion=nn.CrossEntropyLoss()) optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9, weight_decay=5e-4) # We will use BALD as our heuristic as it is a great tradeoff between performance and efficiency. bald = BALD() # Setup our active learning loop for our experiments al_loop = ActiveLearningLoop( dataset=al_dataset, get_probabilities=wrapper.predict_on_dataset, heuristic=bald,