Exemplo n.º 1
0
 def setUp(self):
     self.model = DummyModel()
     self.criterion = nn.CrossEntropyLoss()
     self.wrapper = ModelWrapper(self.model, self.criterion)
     self.optim = torch.optim.SGD(self.wrapper.get_params(), 0.01)
     self.dataset = DummyDataset()
     self.calibrator = DirichletCalibrator(self.wrapper, 2, lr=0.001, reg_factor=0.001)
Exemplo n.º 2
0
 def setUp(self):
     # self.model = nn.Sequential(
     #     nn.Linear(10, 8), nn.ReLU(), nn.Dropout(), nn.Linear(8, 1), nn.Sigmoid()
     # )
     self.model = DummyModel()
     self.criterion = nn.BCEWithLogitsLoss()
     self.wrapper = ModelWrapper(self.model, self.criterion)
     self.optim = torch.optim.SGD(self.wrapper.get_params(), 0.01)
     self.dataset = DummyDataset()
Exemplo n.º 3
0
 def pred_with_dropout(replicate_in_memory):
     self.wrapper = ModelWrapper(
         self.model,
         self.criterion,
         replicate_in_memory=replicate_in_memory)
     self.wrapper.train()
     # Dropout make the pred changes
     preds = torch.stack([
         self.wrapper.predict_on_batch(input, iterations=1, cuda=False)
         for _ in range(10)
     ]).view(10, -1)
     assert not torch.allclose(torch.mean(preds, 0), preds[0])
Exemplo n.º 4
0
 def pred_without_dropout(replicate_in_memory):
     self.wrapper = ModelWrapper(
         self.model,
         self.criterion,
         replicate_in_memory=replicate_in_memory)
     # Dropout is not active in eval
     self.wrapper.eval()
     preds = torch.stack([
         self.wrapper.predict_on_batch(input, iterations=1, cuda=False)
         for _ in range(10)
     ]).view(10, -1)
     assert torch.allclose(torch.mean(preds, 0), preds[0])
Exemplo n.º 5
0
    def setUp(self):
        class MultiOutModel(nn.Module):
            def __init__(self):
                super().__init__()
                self.model = DummyModel()

            def forward(self, x):
                return [self.model(x)] * 2

        self._crit = nn.MSELoss()
        self.criterion = lambda x, y: self._crit(x[0], y) + self._crit(x[1], y)
        self.model = MultiOutModel()
        self.wrapper = ModelWrapper(self.model, self.criterion)
        self.optim = torch.optim.SGD(self.wrapper.get_params(), 0.01)
        self.dataset = DummyDataset()
Exemplo n.º 6
0
class CalibrationTest(unittest.TestCase):
    def setUp(self):
        self.model = DummyModel()
        self.criterion = nn.CrossEntropyLoss()
        self.wrapper = ModelWrapper(self.model, self.criterion)
        self.optim = torch.optim.SGD(self.wrapper.get_params(), 0.01)
        self.dataset = DummyDataset()
        self.calibrator = DirichletCalibrator(self.wrapper,
                                              2,
                                              lr=0.001,
                                              reg_factor=0.001)

    def test_calibrated_model(self):
        assert len(list(self.calibrator.init_model.modules())) < len(
            list(self.calibrator.calibrated_model.modules()))

    def test_calibration(self):
        before_calib_param_init = list(
            map(lambda x: x.clone(), self.calibrator.init_model.parameters()))
        before_calib_param = list(
            map(lambda x: x.clone(),
                self.calibrator.calibrated_model.parameters()))

        self.calibrator.calibrate(self.dataset,
                                  self.dataset,
                                  batch_size=10,
                                  epoch=5,
                                  use_cuda=False,
                                  double_fit=False,
                                  workers=0)
        after_calib_param_init = list(
            map(lambda x: x.clone(), self.calibrator.init_model.parameters()))
        after_calib_param = list(
            map(lambda x: x.clone(),
                self.calibrator.calibrated_model.parameters()))

        assert all([
            np.allclose(i.detach(), j.detach())
            for i, j in zip(before_calib_param_init, after_calib_param_init)
        ])

        assert not all([
            np.allclose(i.detach(), j.detach())
            for i, j in zip(before_calib_param, after_calib_param)
        ])

    def test_reg_l2_called(self):
        self.calibrator.l2_reg = Mock(return_value=torch.Tensor([0]))
        self.calibrator.calibrate(self.dataset,
                                  self.dataset,
                                  batch_size=10,
                                  epoch=5,
                                  use_cuda=False,
                                  double_fit=False,
                                  workers=0)
        self.calibrator.l2_reg.assert_called()
Exemplo n.º 7
0
def test_multi_input_model():
    class MultiInModel(nn.Module):
        def __init__(self):
            super().__init__()
            self.model = DummyModel()

        def forward(self, x):
            # We get two inputs
            x1, x2 = x
            # We merge those inputs
            return self.model(x1) + self.model(x2)

    model = MultiInModel()
    wrapper = ModelWrapper(model, None)
    dataset = DummyDataset(n_in=2)
    assert len(dataset[0]) == 2
    b = next(iter(DataLoader(dataset, 15, False)))[0]
    l = wrapper.predict_on_batch(b, iterations=10, cuda=False)
    assert l.shape[0] == 15 and l.shape[-1] == 10
Exemplo n.º 8
0
    def test_predict_on_batch(self):
        self.wrapper.eval()
        input = torch.stack((self.dataset[0][0], self.dataset[1][0]))

        # iteration == 1
        pred = self.wrapper.predict_on_batch(input, 1, False)
        assert pred[0].size() == (2, 1, 1)

        # iterations > 1
        pred = self.wrapper.predict_on_batch(input, 10, False)
        assert pred[0].size() == (2, 1, 10)

        # iteration == 1
        self.wrapper = ModelWrapper(self.model,
                                    self.criterion,
                                    replicate_in_memory=False)
        pred = self.wrapper.predict_on_batch(input, 1, False)
        assert pred[0].size() == (2, 1, 1)

        # iterations > 1
        pred = self.wrapper.predict_on_batch(input, 10, False)
        assert pred[0].size() == (2, 1, 10)
Exemplo n.º 9
0
    def test_predict_on_batch(self):
        self.wrapper.eval()
        input = torch.randn([2, 3, 10, 10])

        # iteration == 1
        pred = self.wrapper.predict_on_batch(input, 1, False)
        assert pred.size() == (2, 1, 1)

        # iterations > 1
        pred = self.wrapper.predict_on_batch(input, 10, False)
        assert pred.size() == (2, 1, 10)

        # iteration == 1
        self.wrapper = ModelWrapper(self.model,
                                    self.criterion,
                                    replicate_in_memory=False)
        pred = self.wrapper.predict_on_batch(input, 1, False)
        assert pred.size() == (2, 1, 1)

        # iterations > 1
        pred = self.wrapper.predict_on_batch(input, 10, False)
        assert pred.size() == (2, 1, 10)
Exemplo n.º 10
0
def test_calibration_integration():
    transform_pipeline = Compose([Resize((64, 64)), ToTensor()])
    cifar10_train = DummyDataset(transform_pipeline)
    cifar10_test = DummyDataset(transform_pipeline)

    # we don't create different trainset for calibration since the goal is not
    # to calibrate
    al_dataset = ActiveLearningDataset(
        cifar10_train, pool_specifics={'transform': transform_pipeline})
    al_dataset.label_randomly(10)
    use_cuda = False
    model = vgg.vgg16(pretrained=False, num_classes=10)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(),
                          lr=0.001,
                          momentum=0.9,
                          weight_decay=0.0005)

    wrapper = ModelWrapper(model, criterion)
    calibrator = DirichletCalibrator(wrapper=wrapper,
                                     num_classes=10,
                                     lr=0.001,
                                     reg_factor=0.01)

    for step in range(2):
        wrapper.train_on_dataset(al_dataset,
                                 optimizer=optimizer,
                                 batch_size=10,
                                 epoch=1,
                                 use_cuda=use_cuda,
                                 workers=0)

        wrapper.test_on_dataset(cifar10_test,
                                batch_size=10,
                                use_cuda=use_cuda,
                                workers=0)

        before_calib_param = list(
            map(lambda x: x.clone(), wrapper.model.parameters()))

        calibrator.calibrate(al_dataset,
                             cifar10_test,
                             batch_size=10,
                             epoch=5,
                             use_cuda=use_cuda,
                             double_fit=False,
                             workers=0)

        after_calib_param = list(map(lambda x: x.clone(), model.parameters()))

        assert all([
            np.allclose(i.detach(), j.detach())
            for i, j in zip(before_calib_param, after_calib_param)
        ])

        assert len(list(wrapper.model.modules())) < len(
            list(calibrator.calibrated_model.modules()))
Exemplo n.º 11
0
def test_integration():
    transform_pipeline = Compose([Resize((64, 64)), ToTensor()])
    cifar10_train = DummyDataset(transform_pipeline)
    cifar10_test = DummyDataset(transform_pipeline)

    al_dataset = ActiveLearningDataset(
        cifar10_train, pool_specifics={'transform': transform_pipeline})
    al_dataset.label_randomly(10)

    use_cuda = False
    model = vgg.vgg16(pretrained=False, num_classes=10)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(),
                          lr=0.001,
                          momentum=0.9,
                          weight_decay=0.0005)

    # We can now use BaaL to create the active learning loop.

    model = ModelWrapper(model, criterion)
    # We create an ActiveLearningLoop that will automatically label the most uncertain samples.
    # In this case, we use the widely used BALD heuristic.

    active_loop = ActiveLearningLoop(al_dataset,
                                     model.predict_on_dataset,
                                     heuristic=heuristics.BALD(),
                                     ndata_to_label=10,
                                     batch_size=10,
                                     iterations=10,
                                     use_cuda=use_cuda,
                                     workers=4)

    # We're all set!
    num_steps = 10
    for step in range(num_steps):
        old_param = list(map(lambda x: x.clone(), model.model.parameters()))
        model.train_on_dataset(al_dataset,
                               optimizer=optimizer,
                               batch_size=10,
                               epoch=5,
                               use_cuda=use_cuda,
                               workers=2)
        model.test_on_dataset(cifar10_test,
                              batch_size=10,
                              use_cuda=use_cuda,
                              workers=2)

        if not active_loop.step():
            break
        new_param = list(map(lambda x: x.clone(), model.model.parameters()))
        assert any([
            not np.allclose(i.detach(), j.detach())
            for i, j in zip(old_param, new_param)
        ])
    assert step == 4  # 10 + (4 * 10) = 50, so it stops at iterations 4
Exemplo n.º 12
0
class CalibrationTest(unittest.TestCase):
    def setUp(self):
        self.model = DummyModel()
        self.criterion = nn.CrossEntropyLoss()
        self.wrapper = ModelWrapper(self.model, self.criterion)
        self.optim = torch.optim.SGD(self.wrapper.get_params(), 0.01)
        self.dataset = DummyDataset()
        self.calibrator = DirichletCalibrator(self.wrapper, 2, lr=0.001, reg_factor=0.001)

    def test_calibrated_model(self):
        # Check that a layer was added.
        assert len(list(self.wrapper.model.modules())) < len(
            list(self.calibrator.calibrated_model.modules()))

    def test_calibration(self):
        before_calib_param_init = list(
            map(lambda x: x.clone(), _get_first_module(self.calibrator.wrapper.model).parameters()))
        before_calib_param = list(
            map(lambda x: x.clone(), self.calibrator.calibrated_model.parameters()))

        self.calibrator.calibrate(self.dataset, self.dataset,
                                  batch_size=10, epoch=5,
                                  use_cuda=False,
                                  double_fit=False, workers=0)
        after_calib_param_init = list(
            map(lambda x: x.clone(), _get_first_module(self.calibrator.wrapper.model).parameters()))
        after_calib_param = list(
            map(lambda x: x.clone(), self.calibrator.calibrated_model.parameters()))

        assert all([np.allclose(i.detach(), j.detach())
                    for i, j in zip(before_calib_param_init, after_calib_param_init)])

        assert not all([np.allclose(i.detach(), j.detach())
                        for i, j in zip(before_calib_param, after_calib_param)])

    def test_reg_l2_called(self):
        self.calibrator.l2_reg = Mock(return_value=torch.Tensor([0]))
        self.calibrator.calibrate(self.dataset, self.dataset,
                                  batch_size=10, epoch=5,
                                  use_cuda=False,
                                  double_fit=False, workers=0)
        self.calibrator.l2_reg.assert_called()

    def test_weight_assignment(self):
        params = list(self.wrapper.model.parameters())
        self.wrapper.train_on_dataset(self.dataset, self.optim, 32, 1, False)
        assert all([k is v for k, v in zip(params, self.optim.param_groups[0]['params'])])

        self.calibrator.calibrate(self.dataset, self.dataset, 32, 1, False, True)
        assert all(
            [k is v for k, v in
             zip(self.wrapper.model.parameters(), self.optim.param_groups[0]['params'])])

        # Check that we can train the original model
        before_params = list(
            map(lambda x: x.clone(), self.wrapper.model.parameters()))
        self.wrapper.train_on_dataset(self.dataset, self.optim, 10, 2, False)
        after_params = list(
            map(lambda x: x.clone(), self.wrapper.model.parameters()))
        assert not all([np.allclose(i.detach(), j.detach())
                        for i, j in zip(before_params, after_params)])

        # Check that the parameters are still tied.
        calib_params = list(
            map(lambda x: x.clone(), _get_first_module(self.calibrator.wrapper.model).parameters()))
        assert all([np.allclose(i.detach(), j.detach())
                    for i, j in zip(calib_params, after_params)])
Exemplo n.º 13
0
def main():
    args = parse_args()
    batch_size = args.batch_size
    use_cuda = torch.cuda.is_available()
    hyperparams = vars(args)
    pprint(hyperparams)

    active_set, test_set = get_datasets(hyperparams['initial_pool'],
                                        hyperparams['data_path'])

    # We will use the FocalLoss
    criterion = FocalLoss(gamma=2, alpha=0.25)

    # Our model is a simple Unet
    model = smp.Unet(encoder_name='resnext50_32x4d',
                     encoder_depth=5,
                     encoder_weights='imagenet',
                     decoder_use_batchnorm=False,
                     classes=len(pascal_voc_ids))
    # Add a Dropout layerto use MC-Dropout
    add_dropout(model, classes=len(pascal_voc_ids), activation=None)

    # This will enable Dropout at test time.
    model = MCDropoutModule(model)

    # Put everything on GPU.
    if use_cuda:
        model.cuda()

    # Make an optimizer
    optimizer = optim.SGD(model.parameters(),
                          lr=hyperparams["lr"],
                          momentum=0.9,
                          weight_decay=5e-4)
    # Keep a copy of the original weights
    initial_weights = deepcopy(model.state_dict())

    # Add metrics
    model = ModelWrapper(model, criterion)
    model.add_metric('cls_report',
                     lambda: ClassificationReport(len(pascal_voc_ids)))

    # Which heuristic you want to use?
    # We will use our custom reduction function.
    heuristic = get_heuristic(hyperparams['heuristic'], reduction=mean_regions)

    # The ALLoop is in charge of predicting the uncertainty and
    loop = ActiveLearningLoop(
        active_set,
        model.predict_on_dataset_generator,
        heuristic=heuristic,
        ndata_to_label=hyperparams['n_data_to_label'],
        # Instead of predicting on the entire pool, only a subset is used
        max_sample=1000,
        batch_size=batch_size,
        iterations=hyperparams["iterations"],
        use_cuda=use_cuda)
    acc = []
    for epoch in tqdm(range(args.al_step)):
        # Following Gal et al. 2016, we reset the weights.
        model.load_state_dict(initial_weights)
        # Train 50 epochs before sampling.
        model.train_on_dataset(active_set, optimizer, batch_size,
                               hyperparams['learning_epoch'], use_cuda)

        # Validation!
        model.test_on_dataset(test_set, batch_size, use_cuda)
        should_continue = loop.step()
        metrics = model.metrics

        val_loss = metrics['test_loss'].value
        logs = {
            "val": val_loss,
            "epoch": epoch,
            "train": metrics['train_loss'].value,
            "labeled_data": active_set._labelled,
            "Next Training set size": len(active_set),
            'cls_report': metrics['test_cls_report'].value,
        }
        pprint(logs)
        acc.append(logs)
        if not should_continue:
            break
Exemplo n.º 14
0
class ModelWrapperMultiOutMultiInTest(unittest.TestCase):
    def setUp(self):
        class MultiOutModel(nn.Module):
            def __init__(self):
                super().__init__()
                self.model = DummyModel()

            def forward(self, x):
                return [self.model(x)] * 2

        self._crit = nn.MSELoss()
        self.criterion = lambda x, y: self._crit(x[0], y) + self._crit(x[1], y)
        self.model = MultiOutModel()
        self.wrapper = ModelWrapper(self.model, self.criterion)
        self.optim = torch.optim.SGD(self.wrapper.get_params(), 0.01)
        self.dataset = DummyDataset()

    def test_train_on_batch(self):
        self.wrapper.train()
        old_param = list(map(lambda x: x.clone(), self.model.parameters()))
        input, target = [
            torch.stack(v) for v in zip(*(self.dataset[0], self.dataset[1]))
        ]
        self.wrapper.train_on_batch(input, target, self.optim)
        new_param = list(map(lambda x: x.clone(), self.model.parameters()))
        assert any(
            [not torch.allclose(i, j) for i, j in zip(old_param, new_param)])

    def test_test_on_batch(self):
        self.wrapper.eval()
        input, target = [
            torch.stack(v) for v in zip(*(self.dataset[0], self.dataset[1]))
        ]
        preds = torch.stack([
            self.wrapper.test_on_batch(input, target, cuda=False)
            for _ in range(10)
        ]).view(10, -1)

        # Same loss
        assert torch.allclose(torch.mean(preds, 0), preds[0])

        preds = torch.stack([
            self.wrapper.test_on_batch(input,
                                       target,
                                       cuda=False,
                                       average_predictions=10)
            for _ in range(10)
        ]).view(10, -1)
        assert torch.allclose(torch.mean(preds, 0), preds[0])

    def test_predict_on_batch(self):
        self.wrapper.eval()
        input = torch.stack((self.dataset[0][0], self.dataset[1][0]))

        # iteration == 1
        pred = self.wrapper.predict_on_batch(input, 1, False)
        assert pred[0].size() == (2, 1, 1)

        # iterations > 1
        pred = self.wrapper.predict_on_batch(input, 10, False)
        assert pred[0].size() == (2, 1, 10)

        # iteration == 1
        self.wrapper = ModelWrapper(self.model,
                                    self.criterion,
                                    replicate_in_memory=False)
        pred = self.wrapper.predict_on_batch(input, 1, False)
        assert pred[0].size() == (2, 1, 1)

        # iterations > 1
        pred = self.wrapper.predict_on_batch(input, 10, False)
        assert pred[0].size() == (2, 1, 10)

    def test_out_of_mem_raises_error(self):
        self.wrapper.eval()
        input = torch.stack((self.dataset[0][0], self.dataset[1][0]))
        with pytest.raises(RuntimeError) as e_info:
            self.wrapper.predict_on_batch(input, 0, False)
        assert 'CUDA ran out of memory while BaaL tried to replicate data' in str(
            e_info.value)

    def test_raising_type_errors(self):
        iterations = math.inf
        self.wrapper.eval()
        input = torch.stack((self.dataset[0][0], self.dataset[1][0]))
        with pytest.raises(TypeError):
            self.wrapper.predict_on_batch(input, iterations, False)

    def test_using_cuda_raises_error_while_testing(self):
        '''CUDA is not available on test environment'''
        self.wrapper.eval()
        input = torch.stack((self.dataset[0][0], self.dataset[1][0]))
        with pytest.raises(Exception):
            self.wrapper.predict_on_batch(input, 1, True)

    def test_train(self):
        history = self.wrapper.train_on_dataset(self.dataset,
                                                self.optim,
                                                10,
                                                2,
                                                use_cuda=False,
                                                workers=0)
        assert len(history) == 2

    def test_test(self):
        l = self.wrapper.test_on_dataset(self.dataset,
                                         10,
                                         use_cuda=False,
                                         workers=0)
        assert np.isfinite(l)
        l = self.wrapper.test_on_dataset(self.dataset,
                                         10,
                                         use_cuda=False,
                                         workers=0,
                                         average_predictions=10)
        assert np.isfinite(l)

    def test_predict(self):
        l = self.wrapper.predict_on_dataset(self.dataset,
                                            10,
                                            20,
                                            use_cuda=False,
                                            workers=0)
        self.wrapper.eval()
        assert np.allclose(
            self.wrapper.predict_on_batch(self.dataset[0][0].unsqueeze(0),
                                          20)[0].detach().numpy(), l[0][0])
        assert np.allclose(
            self.wrapper.predict_on_batch(self.dataset[19][0].unsqueeze(0),
                                          20)[0][0].detach().numpy(), l[0][19])
        assert l[0].shape == (len(self.dataset), 1, 20)

        # Test generators
        l_gen = self.wrapper.predict_on_dataset_generator(self.dataset,
                                                          10,
                                                          20,
                                                          use_cuda=False,
                                                          workers=0)
        assert np.allclose(next(l_gen)[0][0], l[0][0])
        for last in l_gen:
            pass  # Get last item
        assert np.allclose(last[0][-1], l[0][-1])

        # Test Half
        l_gen = self.wrapper.predict_on_dataset_generator(self.dataset,
                                                          10,
                                                          20,
                                                          use_cuda=False,
                                                          workers=0,
                                                          half=True)
        l = self.wrapper.predict_on_dataset(self.dataset,
                                            10,
                                            20,
                                            use_cuda=False,
                                            workers=0,
                                            half=True)
        assert next(l_gen)[0].dtype == np.float16
        assert l[0].dtype == np.float16

        data_s = []
        l_gen = self.wrapper.predict_on_dataset_generator(data_s,
                                                          10,
                                                          20,
                                                          use_cuda=False,
                                                          workers=0,
                                                          half=True)

        assert len(list(l_gen)) == 0
Exemplo n.º 15
0
def main():
    args = parse_args()
    use_cuda = torch.cuda.is_available()
    torch.backends.cudnn.benchmark = True
    random.seed(1337)
    torch.manual_seed(1337)
    if not use_cuda:
        print("warning, the experiments would take ages to run on cpu")

    hyperparams = vars(args)

    active_set, test_set = get_datasets(hyperparams['initial_pool'])

    heuristic = get_heuristic(hyperparams['heuristic'],
                              hyperparams['shuffle_prop'])
    criterion = CrossEntropyLoss()
    model = vgg16(pretrained=False, num_classes=10)
    weights = load_state_dict_from_url(
        'https://download.pytorch.org/models/vgg16-397923af.pth')
    weights = {k: v for k, v in weights.items() if 'classifier.6' not in k}
    model.load_state_dict(weights, strict=False)

    # change dropout layer to MCDropout
    model = patch_module(model)

    if use_cuda:
        model.cuda()
    optimizer = optim.SGD(model.parameters(),
                          lr=hyperparams["lr"],
                          momentum=0.9)

    # Wraps the model into a usable API.
    model = ModelWrapper(model, criterion)

    logs = {}
    logs['epoch'] = 0

    # for prediction we use a smaller batchsize
    # since it is slower
    active_loop = ActiveLearningLoop(active_set,
                                     model.predict_on_dataset,
                                     heuristic,
                                     hyperparams.get('n_data_to_label', 1),
                                     batch_size=10,
                                     iterations=hyperparams['iterations'],
                                     use_cuda=use_cuda)

    for epoch in tqdm(range(args.epoch)):
        model.train_on_dataset(active_set, optimizer,
                               hyperparams["batch_size"], 1, use_cuda)

        # Validation!
        model.test_on_dataset(test_set, hyperparams["batch_size"], use_cuda)
        metrics = model.metrics

        if epoch % hyperparams['learning_epoch'] == 0:
            should_continue = active_loop.step()
            model.reset_fcs()
            if not should_continue:
                break
        val_loss = metrics['test_loss'].value
        logs = {
            "val": val_loss,
            "epoch": epoch,
            "train": metrics['train_loss'].value,
            "labeled_data": active_set._labelled,
            "Next Training set size": len(active_set)
        }
        print(logs)
Exemplo n.º 16
0
class ModelWrapperTest(unittest.TestCase):
    def setUp(self):
        # self.model = nn.Sequential(
        #     nn.Linear(10, 8), nn.ReLU(), nn.Dropout(), nn.Linear(8, 1), nn.Sigmoid()
        # )
        self.model = DummyModel()
        self.criterion = nn.BCEWithLogitsLoss()
        self.wrapper = ModelWrapper(self.model, self.criterion)
        self.optim = torch.optim.SGD(self.wrapper.get_params(), 0.01)
        self.dataset = DummyDataset()

    def test_train_on_batch(self):
        self.wrapper.train()
        old_param = list(map(lambda x: x.clone(), self.model.parameters()))
        input, target = torch.randn([1, 3, 10, 10]), torch.randn(1, 1)
        self.wrapper.train_on_batch(input, target, self.optim)
        new_param = list(map(lambda x: x.clone(), self.model.parameters()))

        assert any(
            [not torch.allclose(i, j) for i, j in zip(old_param, new_param)])

        # test reset weights properties
        linear_weights = list(
            self.wrapper.model.named_children())[3][1].weight.clone()
        conv_weights = list(
            self.wrapper.model.named_children())[0][1].weight.clone()
        self.wrapper.reset_fcs()
        linear_new_weights = list(
            self.wrapper.model.named_children())[3][1].weight.clone()
        conv_new_weights = list(
            self.wrapper.model.named_children())[0][1].weight.clone()
        assert all([
            not torch.allclose(i, j)
            for i, j in zip(linear_new_weights, linear_weights)
        ])
        assert all([
            torch.allclose(i, j)
            for i, j in zip(conv_new_weights, conv_weights)
        ])

        self.wrapper.reset_all()
        conv_next_new_weights = list(
            self.wrapper.model.named_children())[0][1].weight.clone()
        assert all([
            not torch.allclose(i, j)
            for i, j in zip(conv_new_weights, conv_next_new_weights)
        ])

    def test_test_on_batch(self):
        self.wrapper.eval()
        input, target = torch.randn([1, 3, 10, 10]), torch.randn(1, 1)
        preds = torch.stack([
            self.wrapper.test_on_batch(input, target, cuda=False)
            for _ in range(10)
        ]).view(10, -1)

        # Same loss
        assert torch.allclose(torch.mean(preds, 0), preds[0])

        preds = torch.stack([
            self.wrapper.test_on_batch(input,
                                       target,
                                       cuda=False,
                                       average_predictions=10)
            for _ in range(10)
        ]).view(10, -1)
        assert torch.allclose(torch.mean(preds, 0), preds[0])

    def test_predict_on_batch(self):
        self.wrapper.eval()
        input = torch.randn([2, 3, 10, 10])

        # iteration == 1
        pred = self.wrapper.predict_on_batch(input, 1, False)
        assert pred.size() == (2, 1, 1)

        # iterations > 1
        pred = self.wrapper.predict_on_batch(input, 10, False)
        assert pred.size() == (2, 1, 10)

        # iteration == 1
        self.wrapper = ModelWrapper(self.model,
                                    self.criterion,
                                    replicate_in_memory=False)
        pred = self.wrapper.predict_on_batch(input, 1, False)
        assert pred.size() == (2, 1, 1)

        # iterations > 1
        pred = self.wrapper.predict_on_batch(input, 10, False)
        assert pred.size() == (2, 1, 10)

    def test_train(self):
        history = self.wrapper.train_on_dataset(self.dataset,
                                                self.optim,
                                                10,
                                                2,
                                                use_cuda=False,
                                                workers=0)
        assert len(history) == 2

    def test_test(self):
        l = self.wrapper.test_on_dataset(self.dataset,
                                         10,
                                         use_cuda=False,
                                         workers=0)
        assert np.isfinite(l)
        l = self.wrapper.test_on_dataset(self.dataset,
                                         10,
                                         use_cuda=False,
                                         workers=0,
                                         average_predictions=10)
        assert np.isfinite(l)

    def test_predict(self):
        l = self.wrapper.predict_on_dataset(self.dataset,
                                            10,
                                            20,
                                            use_cuda=False,
                                            workers=0)
        self.wrapper.eval()
        assert np.allclose(
            self.wrapper.predict_on_batch(self.dataset[0][0].unsqueeze(0),
                                          20)[0].detach().numpy(), l[0])
        assert np.allclose(
            self.wrapper.predict_on_batch(self.dataset[19][0].unsqueeze(0),
                                          20)[0].detach().numpy(), l[19])
        assert l.shape == (len(self.dataset), 1, 20)

        # Test generators
        l_gen = self.wrapper.predict_on_dataset_generator(self.dataset,
                                                          10,
                                                          20,
                                                          use_cuda=False,
                                                          workers=0)
        assert np.allclose(next(l_gen)[0], l[0])
        for last in l_gen:
            pass  # Get last item
        assert np.allclose(last[-1], l[-1])

        # Test Half
        l_gen = self.wrapper.predict_on_dataset_generator(self.dataset,
                                                          10,
                                                          20,
                                                          use_cuda=False,
                                                          workers=0,
                                                          half=True)
        l = self.wrapper.predict_on_dataset(self.dataset,
                                            10,
                                            20,
                                            use_cuda=False,
                                            workers=0,
                                            half=True)
        assert next(l_gen).dtype == np.float16
        assert l.dtype == np.float16

    def test_states(self):
        input = torch.randn([1, 3, 10, 10])

        def pred_with_dropout(replicate_in_memory):
            self.wrapper = ModelWrapper(
                self.model,
                self.criterion,
                replicate_in_memory=replicate_in_memory)
            self.wrapper.train()
            # Dropout make the pred changes
            preds = torch.stack([
                self.wrapper.predict_on_batch(input, iterations=1, cuda=False)
                for _ in range(10)
            ]).view(10, -1)
            assert not torch.allclose(torch.mean(preds, 0), preds[0])

        pred_with_dropout(replicate_in_memory=True)
        pred_with_dropout(replicate_in_memory=False)

        def pred_without_dropout(replicate_in_memory):
            self.wrapper = ModelWrapper(
                self.model,
                self.criterion,
                replicate_in_memory=replicate_in_memory)
            # Dropout is not active in eval
            self.wrapper.eval()
            preds = torch.stack([
                self.wrapper.predict_on_batch(input, iterations=1, cuda=False)
                for _ in range(10)
            ]).view(10, -1)
            assert torch.allclose(torch.mean(preds, 0), preds[0])

        pred_without_dropout(replicate_in_memory=True)
        pred_without_dropout(replicate_in_memory=False)

    def test_add_metric(self):
        self.wrapper.add_metric('cls_report', lambda: ClassificationReport(2))
        assert 'test_cls_report' in self.wrapper.metrics
        assert 'train_cls_report' in self.wrapper.metrics
        self.wrapper.train_on_dataset(self.dataset, self.optim, 32, 2, False)
        self.wrapper.test_on_dataset(self.dataset, 32, False)
        assert (self.wrapper.metrics['train_cls_report'].value['accuracy'] !=
                0).any()
        assert (self.wrapper.metrics['test_cls_report'].value['accuracy'] !=
                0).any()

    def test_train_and_test(self):
        res = self.wrapper.train_and_test_on_datasets(
            self.dataset,
            self.dataset,
            self.optim,
            32,
            5,
            False,
            return_best_weights=False)
        assert len(res) == 5
        res = self.wrapper.train_and_test_on_datasets(self.dataset,
                                                      self.dataset,
                                                      self.optim,
                                                      32,
                                                      5,
                                                      False,
                                                      return_best_weights=True)
        assert len(res) == 2
        assert len(res[0]) == 5
        assert isinstance(res[1], dict)
        mock = Mock()
        mock.side_effect = (((np.linspace(0, 50) - 10) / 10)**2).tolist()
        self.wrapper.test_on_dataset = mock
        res = self.wrapper.train_and_test_on_datasets(self.dataset,
                                                      self.dataset,
                                                      self.optim,
                                                      32,
                                                      50,
                                                      False,
                                                      return_best_weights=True,
                                                      patience=1)

        assert len(res) == 2
        assert len(res[0]) < 50

        mock = Mock()
        mock.side_effect = (((np.linspace(0, 50) - 10) / 10)**2).tolist()
        self.wrapper.test_on_dataset = mock
        res = self.wrapper.train_and_test_on_datasets(self.dataset,
                                                      self.dataset,
                                                      self.optim,
                                                      32,
                                                      50,
                                                      False,
                                                      return_best_weights=True,
                                                      patience=1,
                                                      min_epoch_for_es=20)
        assert len(res) == 2
        assert len(res[0]) < 50 and len(res[0]) > 20
Exemplo n.º 17
0
class ModelWrapperMultiOutTest(unittest.TestCase):
    def setUp(self):
        class MultiOutModel(nn.Module):
            def __init__(self):
                super().__init__()
                self.model = DummyModel()

            def forward(self, x):
                return [self.model(x)] * 2

        self._crit = nn.MSELoss()
        self.criterion = lambda x, y: self._crit(x[0], y) + self._crit(x[1], y)
        self.model = MultiOutModel()
        self.wrapper = ModelWrapper(self.model, self.criterion)
        self.optim = torch.optim.SGD(self.wrapper.get_params(), 0.01)
        self.dataset = DummyDataset()

    def test_train_on_batch(self):
        self.wrapper.train()
        old_param = list(map(lambda x: x.clone(), self.model.parameters()))
        input, target = [torch.stack(v) for v in zip(*(self.dataset[0], self.dataset[1]))]
        self.wrapper.train_on_batch(input, target, self.optim)
        new_param = list(map(lambda x: x.clone(), self.model.parameters()))
        assert any([not torch.allclose(i, j) for i, j in zip(old_param, new_param)])

    def test_test_on_batch(self):
        self.wrapper.eval()
        input, target = [torch.stack(v) for v in zip(*(self.dataset[0], self.dataset[1]))]
        preds = torch.stack(
            [self.wrapper.test_on_batch(input, target, cuda=False) for _ in range(10)]
        ).view(10, -1)

        # Same loss
        assert torch.allclose(torch.mean(preds, 0), preds[0])

        preds = torch.stack(
            [
                self.wrapper.test_on_batch(
                    input, target, cuda=False, average_predictions=10
                )
                for _ in range(10)
            ]
        ).view(10, -1)
        assert torch.allclose(torch.mean(preds, 0), preds[0])

    def test_predict_on_batch(self):
        self.wrapper.eval()
        input = torch.stack((self.dataset[0][0], self.dataset[1][0]))

        # iteration == 1
        pred = self.wrapper.predict_on_batch(input, 1, False)
        assert pred[0].size() == (2, 1, 1)

        # iterations > 1
        pred = self.wrapper.predict_on_batch(input, 10, False)
        assert pred[0].size() == (2, 1, 10)

    def test_train(self):
        history = self.wrapper.train_on_dataset(self.dataset, self.optim, 10, 2, use_cuda=False,
                                                workers=0)
        assert len(history) == 2

    def test_test(self):
        l = self.wrapper.test_on_dataset(self.dataset, 10, use_cuda=False, workers=0)
        assert np.isfinite(l)
        l = self.wrapper.test_on_dataset(
            self.dataset, 10, use_cuda=False, workers=0, average_predictions=10
        )
        assert np.isfinite(l)

    def test_predict(self):
        l = self.wrapper.predict_on_dataset(self.dataset, 10, 20, use_cuda=False, workers=0)
        self.wrapper.eval()
        assert np.allclose(
            self.wrapper.predict_on_batch(self.dataset[0][0].unsqueeze(0), 20)[0].detach().numpy(),
            l[0][0])
        assert np.allclose(
            self.wrapper.predict_on_batch(self.dataset[19][0].unsqueeze(0), 20)[0][
                0].detach().numpy(),
            l[0][19])
        assert l[0].shape == (len(self.dataset), 1, 20)

        # Test generators
        l_gen = self.wrapper.predict_on_dataset_generator(self.dataset, 10, 20, use_cuda=False,
                                                          workers=0)
        assert np.allclose(next(l_gen)[0][0], l[0][0])
        for last in l_gen:
            pass  # Get last item
        assert np.allclose(last[0][-1], l[0][-1])

        # Test Half
        l_gen = self.wrapper.predict_on_dataset_generator(self.dataset, 10, 20, use_cuda=False,
                                                          workers=0, half=True)
        l = self.wrapper.predict_on_dataset(self.dataset, 10, 20, use_cuda=False, workers=0,
                                            half=True)
        assert next(l_gen)[0].dtype == np.float16
        assert l[0].dtype == np.float16