예제 #1
0
    def test_log_dir(self, mock_board):
        state = {torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3))}

        tboard = TensorBoard(write_epoch_metrics=False)
        tboard.on_start(state)

        mock_board.assert_called_once_with(log_dir=os.path.join('./logs', 'Sequential_torchbearer'))
예제 #2
0
    def test_log_dir(self, mock_board):
        state = {torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3))}

        tboard = TensorBoard(write_epoch_metrics=False)
        tboard.on_start(state)

        mock_board.assert_called_once_with(
            log_dir=os.path.join('./logs', 'Sequential_torchbearer'))
예제 #3
0
    def test_batch_log_dir(self, mock_board):
        state = {torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3)), torchbearer.EPOCH: 0}

        tboard = TensorBoard(write_batch_metrics=True, write_graph=False, write_epoch_metrics=False)
        tboard.on_start(state)
        tboard.on_start_epoch(state)

        mock_board.assert_called_with(log_dir=os.path.join('./logs', 'Sequential_torchbearer', 'epoch-0'))
예제 #4
0
    def test_batch_writer_closed_on_end_epoch(self, mock_board):
        mock_board.return_value = Mock()
        mock_board.return_value.close = Mock()

        state = {torchbearer.EPOCH: 0}

        tboard = TensorBoard(write_batch_metrics=True, write_epoch_metrics=False)
        tboard.on_start_epoch(state)
        tboard.on_end_epoch({})
        mock_board.return_value.close.assert_called_once()
예제 #5
0
    def test_writer_closed_on_end(self, mock_board):
        mock_board.return_value = Mock()
        mock_board.return_value.close = Mock()

        state = {torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3))}

        tboard = TensorBoard(write_epoch_metrics=False)
        tboard.on_start(state)
        tboard.on_end({})
        mock_board.return_value.close.assert_called_once()
예제 #6
0
    def test_batch_log_dir(self, mock_board, _):
        state = {torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3)), torchbearer.EPOCH: 0}

        tboard = TensorBoard(write_batch_metrics=True, write_graph=False, write_epoch_metrics=False)
        tboard.on_start(state)
        tboard.on_start_epoch(state)
        tboard.on_end_epoch(state)
        tboard.on_end(state)

        mock_board.assert_called_with(log_dir=os.path.join('./logs', 'Sequential_torchbearer', 'epoch-0'))
예제 #7
0
    def test_epoch_metrics(self, mock_board):
        mock_board.return_value = Mock()
        mock_board.return_value.add_scalar = Mock()

        state = {torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3)), torchbearer.EPOCH: 0, torchbearer.METRICS: {'test': 1}}

        tboard = TensorBoard(write_batch_metrics=False, write_epoch_metrics=True)
        tboard.on_start(state)
        tboard.on_end_epoch(state)
        mock_board.return_value.add_scalar.assert_called_once_with('epoch/test', 1, 0)
예제 #8
0
    def test_batch_log_dir_visdom(self, mock_visdom, mock_writer, _):
        state = {torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3)),
                 torchbearer.EPOCH: 0, torchbearer.METRICS: {'test': 1}, torchbearer.BATCH: 0}

        tboard = TensorBoard(visdom=True, write_batch_metrics=True, write_graph=False, write_epoch_metrics=False)
        tboard.on_start(state)
        tboard.on_start_epoch(state)
        tboard.on_end_epoch(state)
        tboard.on_end(state)

        self.assertTrue(mock_visdom.call_args[1]['log_to_filename'] == os.path.join('./logs', 'Sequential_torchbearer', 'epoch', 'log.log'))
예제 #9
0
    def test_batch_writer_closed_on_end_epoch_visdom(self, mock_visdom, mock_writer, _):
        mock_writer.return_value = Mock()
        mock_writer.return_value.close = Mock()

        state = {torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3)), torchbearer.EPOCH: 0}

        tboard = TensorBoard(visdom=True, write_batch_metrics=True, write_epoch_metrics=False)
        tboard.on_start(state)
        tboard.on_start_epoch(state)
        tboard.on_end_epoch({})
        tboard.on_end(state)
        self.assertTrue(mock_writer.return_value.close.call_count == 2)
예제 #10
0
    def test_epoch_metrics_visdom(self, mock_visdom, mock_writer, _):
        mock_writer.return_value = Mock()
        mock_writer.return_value.add_scalar = Mock()

        state = {torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3)), torchbearer.EPOCH: 0,
                 torchbearer.METRICS: {'test': 1}}

        tboard = TensorBoard(visdom=True, write_batch_metrics=False, write_epoch_metrics=True)
        tboard.on_start(state)
        tboard.on_start_epoch(state)
        tboard.on_end_epoch(state)
        mock_writer.return_value.add_scalar.assert_called_once_with('test', 1, 0, main_tag='epoch')
        tboard.on_end(state)
예제 #11
0
    def test_write_graph(self, mock_rand, mock_board):
        mock_board.return_value = Mock()
        mock_board.return_value.add_graph = Mock()
        mock_rand.return_value = 1

        state = {torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3)), torchbearer.X: torch.zeros(1, 1, 9, 9)}

        tboard = TensorBoard(write_epoch_metrics=False)
        tboard.on_start(state)
        tboard.on_sample(state)

        mock_rand.assert_called_once_with(state[torchbearer.X].size(), requires_grad=False)
        mock_board.return_value.add_graph.assert_called_once()
        self.assertEqual(str(state[torchbearer.MODEL]), str(mock_board.return_value.add_graph.call_args_list[0][0][0]))
        self.assertNotEqual(state[torchbearer.MODEL], mock_board.return_value.add_graph.call_args_list[0][0][0])
예제 #12
0
    def test_batch_writer_closed_on_end_epoch(self, mock_board):
        mock_board.return_value = Mock()
        mock_board.return_value.close = Mock()

        state = {
            torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3)),
            torchbearer.EPOCH: 0
        }

        tboard = TensorBoard(write_batch_metrics=True,
                             write_epoch_metrics=False)
        tboard.on_start(state)
        tboard.on_start_epoch(state)
        tboard.on_end_epoch({})
        mock_board.return_value.close.assert_called_once()
        tboard.on_end(state)
예제 #13
0
    def test_visdom_error(self, mock_visdom, mock_writer, _, mock_warn):
        import unittest.mock

        orig_import = __import__

        def import_mock(name, *args):
            if name == 'visdom':
                raise ImportError('Test error')
            return orig_import(name, *args)

        with unittest.mock.patch('builtins.__import__', side_effect=import_mock):
            state = {torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3))}
            tboard = TensorBoard(visdom=True, write_epoch_metrics=False)
            tboard.on_start(state)

            self.assertEqual(mock_warn.call_count, 1)
예제 #14
0
    def test_state_dict(self):
        self.callback_1.state_dict = Mock(return_value='test_1')
        self.callback_2.state_dict = Mock(return_value='test_2')

        state = self.list.state_dict()

        self.assertEqual(self.callback_1.state_dict.call_count, 1)
        self.assertEqual(self.callback_2.state_dict.call_count, 1)
        self.assertEqual(state[CallbackList.CALLBACK_STATES][0], 'test_1')
        self.assertEqual(state[CallbackList.CALLBACK_STATES][1], 'test_2')
        self.assertEqual(state[CallbackList.CALLBACK_TYPES][0],
                         Tqdm().__class__)
        self.assertEqual(state[CallbackList.CALLBACK_TYPES][1],
                         TensorBoard().__class__)
예제 #15
0
    def test_write_graph(self, mock_rand, mock_board, _):
        mock_board.return_value = Mock()
        mock_board.return_value.add_graph = Mock()
        mock_rand.return_value = 1

        state = {torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3)), torchbearer.X: torch.zeros(1, 1, 9, 9)}

        tboard = TensorBoard(write_epoch_metrics=False)
        tboard.on_start(state)
        tboard.on_sample(state)
        tboard.on_end(state)

        mock_rand.assert_called_once_with(state[torchbearer.X].size(), requires_grad=False)
        self.assertEqual(mock_board.return_value.add_graph.call_count, 1)
        self.assertEqual(str(state[torchbearer.MODEL]), str(mock_board.return_value.add_graph.call_args_list[0][0][0]))
        self.assertNotEqual(state[torchbearer.MODEL], mock_board.return_value.add_graph.call_args_list[0][0][0])
예제 #16
0
    def test_batch_metrics(self, mock_board):
        mock_board.return_value = Mock()
        mock_board.return_value.add_scalar = Mock()

        state = {torchbearer.EPOCH: 0, torchbearer.METRICS: {'test': 1}, torchbearer.BATCH: 0}

        tboard = TensorBoard(write_batch_metrics=True, write_epoch_metrics=False)
        tboard.on_start_epoch(state)
        tboard.on_step_training(state)
        mock_board.return_value.add_scalar.assert_called_once_with('batch/test', 1, 0)
        mock_board.return_value.add_scalar.reset_mock()
        tboard.on_step_validation(state)
        mock_board.return_value.add_scalar.assert_called_once_with('batch/test', 1, 0)
예제 #17
0
    def test_writer_closed_on_end(self, mock_board, _):
        mock_board.return_value = Mock()
        mock_board.return_value.close = Mock()

        state = {torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3))}

        tboard = TensorBoard(write_epoch_metrics=False)
        tboard.on_start(state)
        tboard.on_end({})
        self.assertEqual(mock_board.return_value.close.call_count, 1)
예제 #18
0
    def test_writer_closed_on_end_visdom_visdom(self, mock_visdom, mock_writer, _):
        mock_writer.return_value = Mock()
        mock_writer.return_value.close = Mock()

        state = {torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3))}

        tboard = TensorBoard(visdom=True)
        tboard.on_start(state)
        tboard.on_end({})
        self.assertEqual(mock_writer.return_value.close.call_count, 1)
예제 #19
0
    def test_writer_closed_on_end_visdom(self, mock_visdom, mock_writer, _):
        mock_writer.return_value = Mock()
        mock_writer.return_value.close = Mock()

        state = {torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3))}

        tboard = TensorBoard(visdom=True, write_epoch_metrics=False)
        tboard.on_start(state)
        tboard.on_end({})
        mock_writer.return_value.close.assert_called_once()
예제 #20
0
    def test_log_dir_visdom(self, mock_visdom, mock_writer, _):
        state = {torchbearer.MODEL: nn.Sequential(nn.Conv2d(3, 3, 3))}
        mock_writer.__delete__ = Mock()

        tboard = TensorBoard(visdom=True, write_epoch_metrics=False)

        tboard.on_start(state)
        tboard.on_end(state)

        self.assertEqual(mock_visdom.call_count, 1)
        self.assertTrue(mock_visdom.call_args[1]['log_to_filename'] == os.path.join('./logs', 'Sequential_torchbearer',
                                                                                    'log.log'))
예제 #21
0
    def test_batch_metrics(self, mock_board):
        mock_board.return_value = Mock()
        mock_board.return_value.add_scalar = Mock()

        state = {
            torchbearer.EPOCH: 0,
            torchbearer.METRICS: {
                'test': 1
            },
            torchbearer.BATCH: 0
        }

        tboard = TensorBoard(write_batch_metrics=True,
                             write_epoch_metrics=False)
        tboard.on_start_epoch(state)
        tboard.on_step_training(state)
        mock_board.return_value.add_scalar.assert_called_once_with(
            'batch/test', 1, 0)
        mock_board.return_value.add_scalar.reset_mock()
        tboard.on_step_validation(state)
        mock_board.return_value.add_scalar.assert_called_once_with(
            'batch/test', 1, 0)
예제 #22
0
    elif state[tb.MODEL].x < -1:
        state[tb.MODEL].x.data.fill_(-1)


training_steps = 6000000

model = Online()
optim = torch.optim.Adam(model.parameters(), lr=0.001, betas=[0.9, 0.99])
tbtrial = tb.Trial(model,
                   optim,
                   loss, [est()],
                   pass_state=True,
                   callbacks=[
                       greedy_update,
                       TensorBoard(comment='adam',
                                   write_graph=False,
                                   write_batch_metrics=True,
                                   write_epoch_metrics=False)
                   ])
tbtrial.for_train_steps(training_steps).run()

model = Online()
optim = torch.optim.Adam(model.parameters(),
                         lr=0.001,
                         betas=[0.9, 0.99],
                         amsgrad=True)
tbtrial = tb.Trial(model,
                   optim,
                   loss, [est()],
                   pass_state=True,
                   callbacks=[
                       greedy_update,
예제 #23
0
            nn.ReLU(),
            nn.Conv2d(16, 32, stride=2, kernel_size=3),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            nn.Conv2d(32, 64, stride=2, kernel_size=3),
            nn.BatchNorm2d(64),
            nn.ReLU()
        )

        self.classifier = nn.Linear(576, 10)

    def forward(self, x):
        x = self.convs(x)
        x = x.view(-1, 576)
        return self.classifier(x)


model = SimpleModel()

optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=0.001)
loss = nn.CrossEntropyLoss()

import torchbearer
from torchbearer import Trial
from torchbearer.callbacks import TensorBoard

torchbearer_trial = Trial(model, optimizer, loss, metrics=['acc', 'loss'], callbacks=[TensorBoard(write_batch_metrics=True)]).to('cuda')
torchbearer_trial.with_generators(train_generator=traingen, val_generator=valgen, test_generator=testgen)
torchbearer_trial.run(epochs=10)
torchbearer_trial.evaluate(data_key=torchbearer.TEST_DATA)
# test_set = CIFAR10('./data', train=False, download=True, transform=transforms.Compose([transform_test]))

test_gen = torch.utils.data.DataLoader(test_set,
                                       pin_memory=True,
                                       batch_size=128,
                                       shuffle=False,
                                       num_workers=4)

optimizer = optim.SGD(net.parameters(),
                      lr=0.1,
                      momentum=0.9,
                      weight_decay=1e-4)

from torchbearer.callbacks import MultiStepLR, TensorBoard
from torchbearer import Trial

from datetime import datetime
current_time = datetime.now().strftime('%b%d_%H-%M-%S')
trial = Trial(net,
              optimizer,
              nn.CrossEntropyLoss(),
              metrics=['acc', 'loss'],
              callbacks=[
                  UnpackState(),
                  TensorBoard(write_graph=False, comment=current_time),
                  MultiStepLR([100, 150])
              ])
trial.with_generators(train_generator=train_gen,
                      val_generator=test_gen).to('cuda')
trial.run(200, verbose=1)
예제 #25
0
trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers) if (trainset is not None) and (args.dataset not in nlp_data) else trainset
valloader = torch.utils.data.DataLoader(valset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers) if (valset is not None) and (args.dataset not in nlp_data) else valset
testloader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers) if (args.dataset not in nlp_data) else testset

print('==> Building model..')
net = get_model(args, classes, nc)
net = nn.DataParallel(net) if args.parallel else net
optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4)

if (args.dataset in nlp_data) or ('modelnet' in args.dataset):
    optimizer = optim.Adam(net.parameters(), lr=args.lr)


print('==> Setting up callbacks..')
current_time = datetime.now().strftime('%b%d_%H-%M-%S') + "-run-" + str(args.run_id)
tboard = TensorBoard(write_graph=False, comment=current_time, log_dir=args.log_dir)
tboardtext = TensorBoardText(write_epoch_metrics=False, comment=current_time, log_dir=args.log_dir)


@torchbearer.callbacks.on_start
def write_params(_):
    params = vars(args)
    params['schedule'] = str(params['schedule'])
    df = pd.DataFrame(params, index=[0]).transpose()
    tboardtext.get_writer(tboardtext.log_dir).add_text('params', df.to_html(), 1)


modes = {
    'fmix': FMix(decay_power=args.f_decay, alpha=args.alpha, size=size, max_soft=0, reformulate=args.reformulate),
    'mixup': RMixup(args.alpha, reformulate=args.reformulate),
    'cutmix': CutMix(args.alpha, classes, True),
예제 #26
0
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),
                       lr=0.001)
loss = nn.CrossEntropyLoss()

from torchbearer import Trial
from torchbearer.callbacks import TensorBoard

torchbearer_trial = Trial(model,
                          optimizer,
                          loss,
                          metrics=['acc', 'loss'],
                          callbacks=[
                              TensorBoard(visdom=True,
                                          write_graph=True,
                                          write_batch_metrics=True,
                                          batch_step_size=10,
                                          write_epoch_metrics=True)
                          ]).to('cuda')
torchbearer_trial.with_generators(train_generator=traingen,
                                  val_generator=valgen)
torchbearer_trial.run(epochs=5)

import torchbearer.callbacks.tensor_board as tensorboard

tensorboard.VisdomParams.ENV = 'Test'
torchbearer_trial = Trial(model,
                          optimizer,
                          loss,
                          metrics=['acc', 'loss'],
                          callbacks=[
예제 #27
0
        return state[tb.MODEL].x.data


@tb.callbacks.on_step_training
def greedy_update(state):
    if state[tb.MODEL].x > 1:
        state[tb.MODEL].x.data.fill_(1)
    elif state[tb.MODEL].x < -1:
        state[tb.MODEL].x.data.fill_(-1)


training_steps = 6000000

model = Online()
optim = torch.optim.Adam(model.parameters(), lr=0.001, betas=[0.9, 0.99])
tbtrial = tb.Trial(model, optim, loss, [est()], pass_state=True, callbacks=[greedy_update, TensorBoard(comment='adam', write_graph=False, write_batch_metrics=True, write_epoch_metrics=False)])
tbtrial.for_train_steps(training_steps).run()

model = Online()
optim = torch.optim.Adam(model.parameters(), lr=0.001, betas=[0.9, 0.99], amsgrad=True)
tbtrial = tb.Trial(model, optim, loss, [est()], pass_state=True, callbacks=[greedy_update, TensorBoard(comment='amsgrad', write_graph=False, write_batch_metrics=True, write_epoch_metrics=False)])
tbtrial.for_train_steps(training_steps).run()

model = Stochastic()
optim = torch.optim.Adam(model.parameters(), lr=0.001, betas=[0.9, 0.99])
tbtrial = tb.Trial(model, optim, loss, [est()], callbacks=[greedy_update, TensorBoard(comment='adam', write_graph=False, write_batch_metrics=True, write_epoch_metrics=False)])
tbtrial.for_train_steps(training_steps).run()

model = Stochastic()
optim = torch.optim.Adam(model.parameters(), lr=0.001, betas=[0.9, 0.99], amsgrad=True)
tbtrial = tb.Trial(model, optim, loss, [est()], callbacks=[greedy_update, TensorBoard(comment='amsgrad', write_graph=False, write_batch_metrics=True, write_epoch_metrics=False)])