Esempio n. 1
0
    def test_main_loop_stop_training(self):
        class stop_training_test_callback(Callback):
            def on_sample(self, state):
                super().on_sample(state)
                state[torchbearer.STOP_TRAINING] = True

        metric = Metric('test')

        data = [(torch.Tensor([1]), torch.Tensor([1])), (torch.Tensor([2]), torch.Tensor([2])), (torch.Tensor([3]), torch.Tensor([3]))]
        generator = DataLoader(data)
        train_steps = None

        epochs = 1

        callback = stop_training_test_callback()

        torchmodel = MagicMock()
        torchmodel.forward = Mock(return_value=1)
        optimizer = MagicMock()

        loss = Mock()
        criterion = Mock(return_value=loss)

        torchbearermodel = Model(torchmodel, optimizer, criterion, [metric])
        torchbearerstate = torchbearermodel.fit_generator(generator, train_steps, epochs, 0, [callback], initial_epoch=0, pass_state=True)
        self.assertTrue(torchbearerstate[torchbearer.MODEL].call_count == 1)
Esempio n. 2
0
    def test_main_loop_metrics(self):
        metric = Metric('test')
        metric.process = Mock(return_value={'test': 0})
        metric.process_final = Mock(return_value={'test': 0})
        metric.reset = Mock(return_value=None)

        data = [(torch.Tensor([1]), torch.Tensor([1])), (torch.Tensor([2]), torch.Tensor([2])), (torch.Tensor([3]), torch.Tensor([3]))]
        generator = DataLoader(data)
        train_steps = len(data)

        epochs = 1

        callback = MagicMock()

        torchmodel = MagicMock()
        torchmodel.forward = Mock(return_value=1)
        optimizer = MagicMock()

        loss = torch.tensor([2], requires_grad=True)
        criterion = Mock(return_value=loss)

        torchbearermodel = Model(torchmodel, optimizer, criterion, [metric])
        torchbearerstate = torchbearermodel.fit_generator(generator, train_steps, epochs, 0, [callback], initial_epoch=0, pass_state=False)

        torchbearerstate[torchbearer.METRIC_LIST].metric_list[0].reset.assert_called_once()
        self.assertTrue(torchbearerstate[torchbearer.METRIC_LIST].metric_list[0].process.call_count == len(data))
        torchbearerstate[torchbearer.METRIC_LIST].metric_list[0].process_final.assert_called_once()
        self.assertTrue(torchbearerstate[torchbearer.METRICS]['test'] == 0)
Esempio n. 3
0
    def test_main_loop_verbose(self):
        metric = Metric('test')

        data = [(torch.Tensor([1]), torch.Tensor([1])), (torch.Tensor([2]), torch.Tensor([2])), (torch.Tensor([3]), torch.Tensor([3]))]
        generator = DataLoader(data)
        train_steps = len(data)

        epochs = 1

        callback = MagicMock()

        torchmodel = MagicMock()
        torchmodel.forward = Mock(return_value=1)
        optimizer = MagicMock()

        loss = torch.tensor([2], requires_grad=True)
        criterion = Mock(return_value=loss)

        import sys
        from io import StringIO
        saved_std_err = sys.stderr
        out = StringIO()
        sys.stderr = out

        torchbearermodel = Model(torchmodel, optimizer, criterion, [metric])
        torchbearerstate = torchbearermodel.fit_generator(generator, train_steps, epochs, 1, [callback], initial_epoch=0, pass_state=False)

        output = out.getvalue().strip()
        self.assertTrue(output != '')
        sys.stderr = saved_std_err
Esempio n. 4
0
    def test_main_loop_callback_calls(self):
        metric = Metric('test')

        data = [(torch.Tensor([1]), torch.Tensor([1])), (torch.Tensor([2]), torch.Tensor([2])), (torch.Tensor([3]), torch.Tensor([3]))]
        generator = DataLoader(data)
        train_steps = 2

        epochs = 1

        callback = MagicMock()

        torchmodel = MagicMock()
        torchmodel.forward = Mock(return_value=1)
        optimizer = MagicMock()

        loss = Mock()
        criterion = Mock(return_value=loss)

        torchbearermodel = Model(torchmodel, optimizer, criterion, [metric])
        torchbearerstate = torchbearermodel.fit_generator(generator, train_steps, epochs, 0, [callback], initial_epoch=0, pass_state=True)
        callback.on_start.assert_called_once()
        callback.on_start_epoch.asser_called_once()
        callback.on_start_training.assert_called_once()
        self.assertTrue(callback.on_sample.call_count == train_steps*epochs)
        self.assertTrue(callback.on_forward.call_count == train_steps*epochs)
        self.assertTrue(callback.on_criterion.call_count == train_steps*epochs)
        self.assertTrue(callback.on_backward.call_count == train_steps*epochs)
        self.assertTrue(callback.on_step_training.call_count == train_steps*epochs)
        callback.on_end_training.assert_called_once()
        callback.on_end_epoch.assert_called_once()
Esempio n. 5
0
    def test_fit_valid_sets_args(self, gtvs):
        x = torch.rand(1,5)
        y = torch.rand(1,5)
        val_data = (1,2)
        val_split = 0.2
        shuffle = False

        torchmodel = MagicMock()
        torchmodel.forward = Mock(return_value=1)
        optimizer = MagicMock()
        metric = Metric('test')

        loss = torch.tensor([2], requires_grad=True)
        criterion = Mock(return_value=loss)

        gtvs.return_value = (1, 2)

        torchbearermodel = Model(torchmodel, optimizer, criterion, [metric])
        torchbearermodel.fit_generator = Mock()
        torchbearermodel.fit(x, y, 1, validation_data=val_data, validation_split=val_split, shuffle=shuffle)

        gtvs.assert_called_once()
        self.assertTrue(list(gtvs.call_args[0][0].numpy()[0]) == list(x.numpy()[0]))
        self.assertTrue(list(gtvs.call_args[0][1].numpy()[0]) == list(y.numpy()[0]))
        self.assertTrue(gtvs.call_args[0][2] == val_data)
        self.assertTrue(gtvs.call_args[0][3] == val_split)
        self.assertTrue(gtvs.call_args[1]['shuffle'] == shuffle)
Esempio n. 6
0
    def test_test_loop_stop_training(self):
        metric = Metric('test')
        metric_list = MetricList([metric])

        data = [(torch.Tensor([1]), torch.Tensor([1])), (torch.Tensor([2]), torch.Tensor([2])),
                (torch.Tensor([3]), torch.Tensor([3]))]
        validation_generator = DataLoader(data)
        validation_steps = len(data)

        callback = MagicMock()
        callback_List = torchbearer.CallbackList([callback])

        torchmodel = Mock(return_value=1)
        optimizer = MagicMock()

        criterion = Mock(return_value=2)

        torchbearermodel = Model(torchmodel, optimizer, criterion, [metric])

        state = torchbearermodel.main_state.copy()
        state.update({torchbearer.METRIC_LIST: metric_list, torchbearer.VALIDATION_GENERATOR: validation_generator,
                      torchbearer.CallbackList: callback_List, torchbearer.VALIDATION_STEPS: validation_steps,
                      torchbearer.CRITERION: criterion, torchbearer.STOP_TRAINING: True, torchbearer.METRICS: {}})

        torchbearerstate = torchbearermodel._test_loop(state, callback_List, False, Model._load_batch_standard, num_steps=None)

        self.assertTrue(torchbearerstate[torchbearer.MODEL].call_count == 1)
Esempio n. 7
0
    def test_test_loop_metrics(self):
        metric = Metric('test')
        metric.process = Mock(return_value={'test': 0})
        metric.process_final = Mock(return_value={'test': 0})
        metric.reset = Mock(return_value=None)
        metric_list = MetricList([metric])

        data = [(torch.Tensor([1]), torch.Tensor([1])), (torch.Tensor([2]), torch.Tensor([2])), (torch.Tensor([3]), torch.Tensor([3]))]
        validation_generator = DataLoader(data)
        validation_steps = len(data)

        callback = MagicMock()
        callback_List = torchbearer.CallbackList([callback])

        torchmodel = MagicMock()
        torchmodel.forward = Mock(return_value=1)
        optimizer = MagicMock()

        criterion = Mock(return_value=2)

        torchbearermodel = Model(torchmodel, optimizer, criterion, [metric])

        state = torchbearermodel.main_state.copy()
        state.update({torchbearer.METRIC_LIST: metric_list, torchbearer.VALIDATION_GENERATOR: validation_generator,
                 torchbearer.CallbackList: callback_List, torchbearer.MODEL: torchmodel, torchbearer.VALIDATION_STEPS: validation_steps,
                 torchbearer.CRITERION: criterion, torchbearer.STOP_TRAINING: False, torchbearer.METRICS: {}})

        torchbearerstate = torchbearermodel._test_loop(state, callback_List, False, Model._load_batch_standard, num_steps=None)

        torchbearerstate[torchbearer.METRIC_LIST].metric_list[0].reset.assert_called_once()
        self.assertTrue(torchbearerstate[torchbearer.METRIC_LIST].metric_list[0].process.call_count == len(data))
        torchbearerstate[torchbearer.METRIC_LIST].metric_list[0].process_final.assert_called_once()
        self.assertTrue(torchbearerstate[torchbearer.METRICS]['test'] == 0)
Esempio n. 8
0
    def test_main_loop_validation_setup(self):
        metric = Metric('test')

        data = [(torch.Tensor([1]), torch.Tensor([1])), (torch.Tensor([2]), torch.Tensor([2])), (torch.Tensor([3]), torch.Tensor([3]))]
        generator = DataLoader(data)
        valgenerator = DataLoader(data)
        train_steps = 2

        epochs = 1

        callback = MagicMock()

        torchmodel = MagicMock()
        torchmodel.forward = Mock(return_value=1)
        optimizer = MagicMock()

        loss = torch.tensor([2], requires_grad=True)
        criterion = Mock(return_value=loss)

        torchbearermodel = Model(torchmodel, optimizer, criterion, [metric])
        torchbearermodel._test_loop = Mock()
        torchbearerstate = torchbearermodel.fit_generator(generator, train_steps, epochs, 0, [callback],
                                                          validation_generator=valgenerator, initial_epoch=0,
                                                          pass_state=False)

        self.assertTrue(torchbearerstate[torchbearer.VALIDATION_STEPS] == len(valgenerator))
        self.assertTrue(torchbearerstate[torchbearer.VALIDATION_GENERATOR] == valgenerator)
Esempio n. 9
0
    def test_deep_to_tensor(self):
        tensor = MagicMock()
        new_dtype = torch.float16
        new_device = 'cuda:1'

        Model._deep_to(tensor, new_device, new_dtype)
        self.assertTrue(tensor.to.call_args[0][0] == new_device)
        self.assertTrue(tensor.to.call_args[0][1] == new_dtype)
Esempio n. 10
0
    def test_load_batch_predict_list(self):
        items = [(torch.Tensor([1]), torch.Tensor([1])), (torch.Tensor([2]), torch.Tensor([2]))]
        iterator = iter(items)
        state = {'training_iterator': iterator, 'device': 'cpu', 'dtype': torch.int}

        Model._load_batch_predict('training', state)
        self.assertTrue(state['x'].item() == items[0][0].item())
        self.assertTrue(state['y_true'].item() == items[0][1].item())
Esempio n. 11
0
    def test_deep_to_tensor_int_dtype(self):
        tensor = MagicMock()
        tensor.dtype = torch.uint8
        new_device = 'cuda:1'
        new_dtype = torch.uint8

        Model._deep_to(tensor, new_device, new_dtype)
        self.assertTrue(tensor.to.call_args[0][0] == new_device)
        self.assertTrue(len(tensor.to.call_args[0]) == 1)
Esempio n. 12
0
    def test_eval(self):
        torchmodel = torch.nn.Sequential(torch.nn.Linear(1,1))
        optimizer = MagicMock()
        metric_list = MagicMock()

        torchbearermodel = Model(torchmodel, optimizer, torch.nn.L1Loss(), [])
        torchbearermodel.main_state = {torchbearer.MODEL: torchmodel, torchbearer.METRIC_LIST: metric_list}
        torchbearermodel.eval()
        self.assertTrue(torchbearermodel.main_state[torchbearer.MODEL].training == False)
        torchbearermodel.main_state[torchbearer.METRIC_LIST].eval.assert_called_once()
Esempio n. 13
0
    def test_state_dict_kwargs(self):
        keywords = {'destination': None, 'prefix': '', 'keep_vars': False}
        torchmodel = MagicMock()
        optimizer = MagicMock()

        torchbearermodel = Model(torchmodel, optimizer, torch.nn.L1Loss(), [])
        torchbearermodel.state_dict(**keywords)

        self.assertTrue(torchmodel.state_dict.call_args[1] == keywords)
        self.assertTrue(optimizer.state_dict.call_args[1] == {})
Esempio n. 14
0
    def test_deep_to_list(self):
        tensor_1 = MagicMock()
        tensor_2 = MagicMock()
        tensors = [tensor_1, tensor_2]
        new_dtype = torch.float16
        new_device = 'cuda:1'

        Model._deep_to(tensors, new_device, new_dtype)
        for tensor in tensors:
            self.assertTrue(tensor.to.call_args[0][0] == new_device)
            self.assertTrue(tensor.to.call_args[0][1] == new_dtype)
Esempio n. 15
0
    def test_cpu(self):
        torchmodel = torch.nn.Sequential(torch.nn.Linear(1,1))
        torchmodel.load_state_dict = Mock()

        optimizer = torch.optim.SGD(torchmodel.parameters(), 0.1)
        optimizer.load_state_dict = Mock()

        torchbearermodel = Model(torchmodel, optimizer, torch.nn.L1Loss(), [])
        torchbearermodel.to = Mock()
        torchbearermodel.cpu()

        self.assertTrue(torchbearermodel.to.call_args[0][0] == 'cpu')
Esempio n. 16
0
    def test_state_dict(self):
        torchmodel = torch.nn.Sequential(torch.nn.Linear(1,1))
        torchmodel_state = torchmodel.state_dict()

        optimizer = torch.optim.SGD(torchmodel.parameters(), 0.1)
        optimizer_state = optimizer.state_dict()

        torchbearermodel = Model(torchmodel, optimizer, torch.nn.L1Loss(), [])
        torchbearer_state = torchbearermodel.state_dict()

        self.assertTrue(torchbearer_state[torchbearer.MODEL] == torchmodel_state)
        self.assertTrue(torchbearer_state[torchbearer.OPTIMIZER] == optimizer_state)
Esempio n. 17
0
    def test_evaluate_generator_steps(self):
        torchmodel = MagicMock()
        optimizer = MagicMock()
        generator = MagicMock()

        pass_state = False
        steps = 100

        torchbearermodel = Model(torchmodel, optimizer, torch.nn.L1Loss(), [])
        torchbearermodel.main_state[torchbearer.METRICS] = 1
        torchbearermodel._test_loop = Mock()

        torchbearermodel.evaluate_generator(generator, 0, steps, pass_state)
        self.assertTrue(torchbearermodel._test_loop.call_args[0][4] == steps)
Esempio n. 18
0
    def test_predict_generator_pass_state(self):
        torchmodel = MagicMock()
        optimizer = MagicMock()
        generator = MagicMock()

        pass_state = False
        steps = 100

        torchbearermodel = Model(torchmodel, optimizer, torch.nn.L1Loss(), [])
        torchbearermodel.main_state[torchbearer.FINAL_PREDICTIONS] = 1
        torchbearermodel._test_loop = Mock()

        torchbearermodel.predict_generator(generator, 0, steps, pass_state)
        self.assertTrue(torchbearermodel._test_loop.call_args[0][2] == pass_state)
Esempio n. 19
0
    def test_cuda_no_device(self, device_mock):
        device_mock.return_value = 111

        torchmodel = torch.nn.Sequential(torch.nn.Linear(1,1))
        torchmodel.load_state_dict = Mock()

        optimizer = torch.optim.SGD(torchmodel.parameters(), 0.1)
        optimizer.load_state_dict = Mock()

        torchbearermodel = Model(torchmodel, optimizer, torch.nn.L1Loss(), [])
        torchbearermodel.to = Mock()
        torchbearermodel.cuda()

        self.assertTrue(torchbearermodel.to.call_args[0][0] == 'cuda:' + str(111))
Esempio n. 20
0
    def test_to_only_dtype(self):
        dtype = torch.float16

        torchmodel = torch.nn.Sequential(torch.nn.Linear(1,1))
        torchmodel.to = Mock()
        optimizer = torch.optim.Adam(torchmodel.parameters(), 0.1)
        state_tensor = torch.Tensor([1])
        state_tensor.to = Mock()
        optimizer.state = {'test': {'test': state_tensor}}

        torchbearermodel = Model(torchmodel, optimizer, torch.nn.L1Loss(), [])
        torchbearermodel.to(dtype)

        self.assertTrue(torchmodel.to.call_args[0][0] == dtype)
        self.assertTrue(state_tensor.to.call_args[0][0] == dtype)
Esempio n. 21
0
    def test_evaluate_generator_verbose(self):
        from torchbearer.callbacks import Tqdm

        torchmodel = MagicMock()
        optimizer = MagicMock()
        generator = MagicMock()

        pass_state = False
        steps = None

        torchbearermodel = Model(torchmodel, optimizer, torch.nn.L1Loss(), [])
        torchbearermodel.main_state[torchbearer.METRICS] = 1
        torchbearermodel._test_loop = Mock()

        torchbearermodel.evaluate_generator(generator, 1, steps, pass_state)
        self.assertIsInstance(torchbearermodel._test_loop.call_args[0][1].callback_list[0], Tqdm)
Esempio n. 22
0
    def test_predict_generator_verbose(self):
        from torchbearer.callbacks import Tqdm

        torchmodel = MagicMock()
        optimizer = MagicMock()
        generator = MagicMock()

        pass_state = False
        steps = None

        torchbearermodel = Model(torchmodel, optimizer, torch.nn.L1Loss(), [])
        torchbearermodel.main_state[torchbearer.FINAL_PREDICTIONS] = 1
        torchbearermodel._test_loop = Mock()

        torchbearermodel.predict_generator(generator, 1, steps, pass_state)
        self.assertIsInstance(torchbearermodel._test_loop.call_args[0][1].callback_list[1], Tqdm)
        self.assertTrue(torchbearermodel._test_loop.call_args[0][2] == pass_state)
        self.assertTrue(torchbearermodel._test_loop.call_args[0][4] == steps)
Esempio n. 23
0
    def test_fit_no_valid(self):
        x = torch.rand(1, 5)
        y = torch.rand(1, 5)

        torchmodel = MagicMock()
        torchmodel.forward = Mock(return_value=1)
        optimizer = MagicMock()
        metric = Metric('test')

        loss = torch.tensor([2], requires_grad=True)
        criterion = Mock(return_value=loss)

        torchbearermodel = Model(torchmodel, optimizer, criterion, [metric])
        torchbearermodel.fit_generator = Mock()
        fit = torchbearermodel.fit_generator
        torchbearermodel.fit(x, y, 1, validation_split=None)

        self.assertTrue(fit.call_args[1]['validation_generator'] is None)
Esempio n. 24
0
    def test_update_device_and_dtype_from_args_only_arg(self):
        main_state = {}
        dtype = torch.float16
        dev = 'cuda:1'
        args = (dtype, dev)

        main_state = Model._update_device_and_dtype_from_args(main_state, *args)

        self.assertTrue(main_state[torchbearer.DATA_TYPE] == dtype)
        self.assertTrue(main_state[torchbearer.DEVICE] == dev)
Esempio n. 25
0
    def test_load_state_dict(self):
        key_words = {'strict': True}

        torchmodel = torch.nn.Sequential(torch.nn.Linear(1,1))
        torchmodel.load_state_dict = Mock()
        torch_state = torchmodel.state_dict()

        optimizer = torch.optim.SGD(torchmodel.parameters(), 0.1)
        optimizer.load_state_dict = Mock()
        optimizer_state = optimizer.state_dict()

        torchbearermodel = Model(torchmodel, optimizer, torch.nn.L1Loss(), [])
        torchbearer_state = torchbearermodel.state_dict()

        torchbearermodel.load_state_dict(torchbearer_state, **key_words)

        self.assertTrue(torchmodel.load_state_dict.call_args[0][0] == torch_state)
        self.assertTrue(optimizer.load_state_dict.call_args[0][0] == optimizer_state)
        self.assertTrue(torchmodel.load_state_dict.call_args[1] == key_words)
Esempio n. 26
0
    def test_predict(self):
        x = torch.rand(1,5)
        pass_state = False
        verbose=0

        torchmodel = MagicMock()
        torchmodel.forward = Mock(return_value=1)
        optimizer = MagicMock()
        metric = Metric('test')

        loss = torch.tensor([2], requires_grad=True)
        criterion = Mock(return_value=loss)

        torchbearermodel = Model(torchmodel, optimizer, criterion, [metric])
        torchbearermodel.predict_generator = Mock()
        pred = torchbearermodel.predict_generator
        torchbearermodel.predict(x, verbose=verbose, pass_state=pass_state)

        pred.assert_called_once()
        self.assertTrue(pred.call_args[0][1] == verbose)
        self.assertTrue(pred.call_args[1]['pass_state'] == pass_state)
Esempio n. 27
0
    def test_main_loop_backward(self):
        metric = Metric('test')

        data = [(torch.Tensor([1]), torch.Tensor([1])), (torch.Tensor([2]), torch.Tensor([2])), (torch.Tensor([3]), torch.Tensor([3]))]
        generator = DataLoader(data)
        train_steps = None

        epochs = 1

        callback = MagicMock()

        torchmodel = MagicMock()
        torchmodel.forward = Mock(return_value=1)
        optimizer = MagicMock()

        loss = Mock()
        criterion = Mock(return_value=loss)

        torchbearermodel = Model(torchmodel, optimizer, criterion, [metric])
        torchbearerstate = torchbearermodel.fit_generator(generator, train_steps, epochs, 0, [callback], initial_epoch=0, pass_state=True)
        self.assertTrue(torchbearerstate[torchbearer.LOSS].backward.call_count == epochs*len(data))
Esempio n. 28
0
    def test_main_loop_pass_state(self):
        metric = Metric('test')

        data = [(torch.Tensor([1]), torch.Tensor([1])), (torch.Tensor([2]), torch.Tensor([2])), (torch.Tensor([3]), torch.Tensor([3]))]
        generator = DataLoader(data)
        train_steps = None

        epochs = 1

        callback = MagicMock()

        torchmodel = MagicMock()
        torchmodel.forward = Mock(return_value=1)
        optimizer = MagicMock()

        loss = torch.tensor([2], requires_grad=True)
        criterion = Mock(return_value=loss)

        torchbearermodel = Model(torchmodel, optimizer, criterion, [metric])
        torchbearerstate = torchbearermodel.fit_generator(generator, train_steps, epochs, 0, [callback], initial_epoch=0, pass_state=True)

        self.assertTrue(len(torchbearerstate[torchbearer.MODEL].call_args) == 2)
Esempio n. 29
0
    def test_save_checkpoint_save_filename(self, mock_save):
        torchmodel = Mock()
        optim = Mock()
        state = {
            torchbearer.SELF: Model(torchmodel, optim, None, []),
            torchbearer.METRICS: {}
        }

        file_format = 'test_file.pt'
        check = _Checkpointer(file_format)
        check.save_checkpoint(state)
        self.assertEqual(mock_save.call_count, 1)

        self.assertTrue(mock_save.call_args[0][1] == 'test_file.pt')
Esempio n. 30
0
    def test_save_checkpoint_formatting(self, mock_save):
        torchmodel = Mock()
        optim = Mock()
        state = {
            torchbearer.SELF: Model(torchmodel, optim, None, []),
            torchbearer.METRICS: {},
            torchbearer.EPOCH: 2
        }

        file_format = 'test_file_{epoch}.pt'
        check = _Checkpointer(file_format)
        check.save_checkpoint(state)
        mock_save.assert_called_once()

        self.assertTrue(mock_save.call_args[0][1] == 'test_file_2.pt')
Esempio n. 31
0
    def test_save_checkpoint_subformatting(self, mock_save):
        torchmodel = Mock()
        optim = Mock()
        state = {
            torchbearer.SELF: Model(torchmodel, optim, None, []),
            torchbearer.METRICS: {
                'test_metric': 0.001
            },
            torchbearer.EPOCH: 2
        }

        file_format = 'test_file_{test_metric:.01f}.pt'
        check = _Checkpointer(file_format)
        check.save_checkpoint(state)
        self.assertEqual(mock_save.call_count, 1)

        self.assertTrue(mock_save.call_args[0][1] == 'test_file_0.0.pt')
Esempio n. 32
0
    def test_save_checkpoint_overwrite_recent(self, _, __):
        torchmodel = Mock()
        optim = Mock()
        state = {
            torchbearer.SELF: Model(torchmodel, optim, None, []),
            torchbearer.EPOCH: 0,
            torchbearer.METRICS: {}
        }

        file_format = 'test_file_{epoch}.pt'
        check = _Checkpointer(file_format)
        check.save_checkpoint(state, True)
        self.assertTrue(check.most_recent == 'test_file_0.pt')

        state[torchbearer.EPOCH] = 1
        check.save_checkpoint(state, True)
        self.assertTrue(check.most_recent == 'test_file_1.pt')
Esempio n. 33
0
    def test_save_checkpoint_wrong_format(self, _):
        torchmodel = Mock()
        optim = Mock()
        state = {
            torchbearer.SELF: Model(torchmodel, optim, None, []),
            torchbearer.METRICS: {
                'test_metric': 0.001
            },
            torchbearer.EPOCH: 2
        }

        file_format = 'test_file_{test_metric:d}.pt'
        check = _Checkpointer(file_format)
        try:
            check.save_checkpoint(state)
        except:
            return

        self.fail(
            'No error was thrown when wrong format chosen for save file format'
        )
Esempio n. 34
0
                                            cmap=plt.cm.jet,
                                            alpha=0.5)
        else:
            state['contour'] = plt.contourf(x,
                                            y,
                                            z,
                                            cmap=plt.cm.jet,
                                            alpha=0.5)
            plt.tight_layout()
            plt.show()

        mypause(0.001)


svm = LinearSVM()
model = Model(svm, optim.SGD(svm.parameters(), 0.1), hinge_loss,
              ['loss']).to('cuda')

model.fit(X,
          Y,
          batch_size=32,
          epochs=50,
          verbose=1,
          callbacks=[
              scatter, draw_margin,
              ExponentialLR(0.999, step_on_batch=True),
              L2WeightDecay(0.01, params=[svm.w])
          ])

plt.ioff()
plt.show()
Esempio n. 35
0
        self.convs = nn.Sequential(nn.Conv2d(3, 16, stride=2, kernel_size=3),
                                   nn.BatchNorm2d(16), nn.ReLU(),
                                   nn.Conv2d(16, 32, stride=2, kernel_size=3),
                                   nn.BatchNorm2d(32), nn.ReLU(),
                                   nn.Conv2d(32, 64, stride=2, kernel_size=3),
                                   nn.BatchNorm2d(64), nn.ReLU())

        self.classifier = nn.Linear(576, 10)

    def forward(self, x):
        x = self.convs(x)
        x = x.view(-1, 576)
        return self.classifier(x)


model = SimpleModel()

optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),
                       lr=0.001)
loss = nn.CrossEntropyLoss()

from torchbearer import Model

torchbearer_model = Model(model, optimizer, loss, metrics=['acc',
                                                           'loss']).to('cuda')
torchbearer_model.fit_generator(traingen,
                                epochs=10,
                                validation_generator=valgen)

torchbearer_model.evaluate_generator(testgen)
Esempio n. 36
0
def add_kld_loss_callback(state):
    KLD = kld_Loss(state['mu'], state['logvar'])
    return KLD


def save_reconstruction_callback(num_images=8, folder='results/'):
    import os
    os.makedirs(os.path.dirname(folder), exist_ok=True)

    @torchbearer.callbacks.on_step_validation
    def saver(state):
        if state[torchbearer.BATCH] == 0:
            data = state[torchbearer.X]
            recon_batch = state[torchbearer.Y_PRED]
            comparison = torch.cat([data[:num_images],
                                    recon_batch.view(128, 1, 28, 28)[:num_images]])
            save_image(comparison.cpu(),
                       str(folder) + 'reconstruction_' + str(state[torchbearer.EPOCH]) + '.png', nrow=num_images)
    return saver


model = VAE()
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=0.001)
loss = bce_loss

from torchbearer import Model

torchbearer_model = Model(model, optimizer, loss, metrics=['loss']).to('cuda')
torchbearer_model.fit_generator(traingen, epochs=10, validation_generator=testgen,
                                callbacks=[add_kld_loss_callback, save_reconstruction_callback()], pass_state=True)
Esempio n. 37
0
        if state[torchbearer.BATCH] == 0:
            data = state[torchbearer.X]
            recon_batch = state[torchbearer.Y_PRED]
            comparison = torch.cat([
                data[:num_images],
                recon_batch.view(128, 1, 28, 28)[:num_images]
            ])
            save_image(comparison.cpu(),
                       str(folder) + 'reconstruction_' +
                       str(state[torchbearer.EPOCH]) + '.png',
                       nrow=num_images)

    return saver


model = VAE()
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),
                       lr=0.001)
loss = bce_loss

from torchbearer import Model

torchbearer_model = Model(model, optimizer, loss, metrics=['loss']).to('cuda')
torchbearer_model.fit_generator(
    traingen,
    epochs=10,
    validation_generator=testgen,
    callbacks=[add_kld_loss_callback,
               save_reconstruction_callback()],
    pass_state=True)
Esempio n. 38
0
        super(SimpleModel, self).__init__()
        self.convs = nn.Sequential(
            nn.Conv2d(3, 16, stride=2, kernel_size=3),
            nn.BatchNorm2d(16),
            nn.ReLU(),
            nn.Conv2d(16, 32, stride=2, kernel_size=3),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            nn.Conv2d(32, 64, stride=2, kernel_size=3),
            nn.BatchNorm2d(64),
            nn.ReLU()
        )

        self.classifier = nn.Linear(576, 10)

    def forward(self, x):
        x = self.convs(x)
        x = x.view(-1, 576)
        return self.classifier(x)


model = SimpleModel()

optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=0.001)
loss = nn.CrossEntropyLoss()

from torchbearer import Model

torchbearer_model = Model(model, optimizer, loss, metrics=['acc', 'loss']).to('cuda')
torchbearer_model.fit_generator(traingen, epochs=10, validation_generator=testgen)
Esempio n. 39
0
def kld_Loss(mu, logvar):
    KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
    return KLD


# Reconstruction + KL divergence losses summed over all elements and batch
def loss_function(y_pred, y_true):
    recon_x, mu, logvar = y_pred
    x = y_true

    BCE = bce_loss(recon_x, x)

    KLD = kld_Loss(mu, logvar)

    return BCE + KLD


model = VAE()

optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),
                       lr=0.001)
loss = loss_function

from torchbearer import Model

torchbearer_model = Model(model, optimizer, loss, metrics=['loss']).to('cuda')
torchbearer_model.fit_generator(traingen,
                                epochs=10,
                                validation_generator=testgen,
                                pass_state=False)
Esempio n. 40
0
        self.classifier = nn.Linear(576, 10)

    def forward(self, x):
        x = self.convs(x)
        x = x.view(-1, 576)
        return self.classifier(x)


model = SimpleModel()

optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=0.001)
loss = nn.CrossEntropyLoss()

from torchbearer import Model
from torchbearer.callbacks import TensorBoard

torchbearer_model = Model(model, optimizer, loss, metrics=['acc', 'loss']).to('cuda')

torchbearer_model.fit_generator(traingen, epochs=5, validation_generator=valgen,
                                callbacks=[TensorBoard(visdom=True, write_graph=False, write_batch_metrics=True, batch_step_size=10, write_epoch_metrics=False)])

torchbearer_model.fit_generator(traingen, epochs=5, validation_generator=valgen,
                                callbacks=[TensorBoard(visdom=True, write_graph=False, write_batch_metrics=False, write_epoch_metrics=True)])

import torchbearer.callbacks.tensor_board as tensorboard

tensorboard.VisdomParams.ENV = 'Test'
torchbearer_model.fit_generator(traingen, epochs=5, validation_generator=valgen,
                                callbacks=[TensorBoard(visdom=True, write_graph=False, write_batch_metrics=True, batch_step_size=10, write_epoch_metrics=False)])