Esempio n. 1
0
    def train(self, num_epochs):
        '''
    The main train loop
    '''
        self.model.train()
        for epoch_idx in range(num_epochs):
            for batch_idx, batch in enumerate(self.train_loader):
                if self.cuda:
                    input_data, target_data = Variable(
                        batch[0]).cuda(), Variable(batch[1]).cuda()
                else:
                    input_data, target_data = Variable(batch[0]), Variable(
                        batch[1])

                output_data = self.model(input_data)
                loss = compute_loss(self.model, output_data, target_data)
                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()

            self.train_loss_history.append(float(loss))
            self.model.eval()
            self.eval_on_test()
            self.validation_accuracy_history.append(
                self.get_accuracy(split='test'))
            self.train_accuracy_history.append(
                self.get_accuracy(split='train'))
            self.model.train()

            if epoch_idx % 1 == 0:
                print('Epoch:{}, Loss:{:.4f}'.format(epoch_idx + 1,
                                                     float(loss)))
                self.save_model()
Esempio n. 2
0
    def eval_on_test(self):
        '''
    Get loss on test set
    '''
        test_loss = 0.0

        num_examples = 0
        for batch_idx, batch in enumerate(self.test_loader):
            if self.cuda:
                input_data, target_data = Variable(batch[0]).cuda(), Variable(
                    batch[1]).cuda()
            else:
                input_data, target_data = Variable(batch[0]), Variable(
                    batch[1])

            num_examples += input_data.shape[0]
            output_data = self.model.forward(input_data)
            loss = compute_loss(self.model,
                                output_data,
                                target_data,
                                is_normalize=False)

            test_loss += float(loss)

        self.validation_loss_history.append(test_loss / num_examples)

        return self.validation_loss_history[-1]
Esempio n. 3
0
    def evaluate(self, split="test"):
        """
        Get the loss and accuracy on the test/train dataset
        """
        self.model.eval()

        num_examples = 0
        num_correct = 0
        loss = 0

        for _, batch in enumerate(
                self.test_loader if split is "test" else self.train_loader):
            if self.cuda:
                input_data, target_data = Variable(batch[0]).cuda(), Variable(
                    batch[1]).cuda()
            else:
                input_data, target_data = Variable(batch[0]), Variable(
                    batch[1])

            output_data = self.model(input_data)

            num_examples += input_data.shape[0]
            loss += float(
                compute_loss(self.model,
                             output_data,
                             target_data,
                             is_normalize=False))
            predicted_labels = predict_labels(output_data)
            num_correct += torch.sum(
                predicted_labels == target_data).cpu().item()

        self.model.train()

        return loss / float(num_examples), float(num_correct) / float(
            num_examples)
Esempio n. 4
0
def test_compute_loss():
    '''
  Test the loss computation on a dummy data
  '''

    test_net = TestModel()

    x = torch.FloatTensor([+1.4, -1.4, -0.7, 2.3, 0.3]).reshape(1, -1)

    assert torch.allclose(compute_loss(test_net, test_net(x),
                                       torch.LongTensor([4])),
                          torch.FloatTensor([7.486063259420916e-05]),
                          atol=5e-7)
    assert torch.allclose(compute_loss(test_net, test_net(x),
                                       torch.LongTensor([3])),
                          torch.FloatTensor([9.500075340270996]),
                          atol=1e-3)
Esempio n. 5
0
    def train(self, num_epochs):
        """
        The main train loop
        """
        self.model.train()

        train_loss, train_acc = self.evaluate(split="train")
        val_loss, val_acc = self.evaluate(split="test")

        self.train_loss_history.append(train_loss)
        self.train_accuracy_history.append(train_acc)
        self.validation_loss_history.append(val_loss)
        self.validation_accuracy_history.append(val_acc)

        print("Epoch:{}, Training Loss:{:.4f}, Validation Loss:{:.4f}".format(
            0, self.train_loss_history[-1], self.validation_loss_history[-1]))

        for epoch_idx in range(num_epochs):
            self.model.train()
            for _, batch in enumerate(self.train_loader):
                if self.cuda:
                    input_data, target_data = Variable(
                        batch[0]).cuda(), Variable(batch[1]).cuda()
                else:
                    input_data, target_data = Variable(batch[0]), Variable(
                        batch[1])

                output_data = self.model(input_data)
                loss = compute_loss(self.model, output_data, target_data)
                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()

            train_loss, train_acc = self.evaluate(split="train")
            val_loss, val_acc = self.evaluate(split="test")

            self.train_loss_history.append(train_loss)
            self.train_accuracy_history.append(train_acc)
            self.validation_loss_history.append(val_loss)
            self.validation_accuracy_history.append(val_acc)

            print("Epoch:{}, Training Loss:{:.4f}, Validation Loss:{:.4f}".
                  format(epoch_idx + 1, self.train_loss_history[-1],
                         self.validation_loss_history[-1]))

        self.save_model()