예제 #1
0
    def test_no_class(self):
        result = Result(self.cardinality)
        result.number_of_classes = 0
        result.update(torch.Tensor([[0.4, 0.3], [0.3, 0.4], [0.4, 0.3],
                                    [0.3, 0.4]]),
                      labels=[0, 1, 1, 0])
        result.computing_result()

        self.assertEqual(result.micro_precision, 0)
        self.assertEqual(result.micro_recall, 0)
예제 #2
0
    def test_print(self):
        result = Result(self.cardinality, "Test")
        result.number_of_classes = 0
        result.update(torch.Tensor([[0.4, 0.3], [0.3, 0.4], [0.4, 0.3],
                                    [0.3, 0.4]]),
                      labels=[0, 1, 1, 0])
        result.computing_result()

        result = Result(self.cardinality)
        result.number_of_classes = 0
        result.update(torch.Tensor([[0.4, 0.3], [0.3, 0.4], [0.4, 0.3],
                                    [0.3, 0.4]]),
                      labels=[0, 1, 1, 0])
        result.computing_result()
예제 #3
0
    def test_compute(self):
        result = Result(self.cardinality)
        result.update(torch.Tensor([[0.4, 0.3], [0.3, 0.4], [0.4, 0.3],
                                    [0.3, 0.4]]),
                      labels=[0, 1, 1, 0])

        result.computing_result()
        self.assertEqual(result.global_TP, 2)
        self.assertEqual(result.global_FP, 2)
        self.assertEqual(result.global_FN, 2)

        self.assertEqual(result.macro_precision, 0.50)
        self.assertEqual(result.macro_recall, 0.50)
        self.assertTrue(math.isclose(result.micro_recall, 1 / 3))
        self.assertTrue(math.isclose(result.micro_precision, 1 / 3))
예제 #4
0
    def test(self,
             validation_split=0.6,
             subsample=False,
             subsample_split=0.01):
        """Test the model

        Args:
            validation_split (float, optional): ratio between testing and learning set. Defaults to 0.6.
            subsample (bool, optional): if False, use all the available data, if True, use only a ratio of the data (subsample_split*data). Defaults to False.
            subsample_split (float, optional): ratio of the data to use. Defaults to 0.01.
        """
        dataloader_test = self.create_dataloader(
            validation_split=validation_split,
            condition="Test",
            subsample=subsample,
            subsample_split=0.01)
        result = Result(self.dataset, condition="Test", subsample=subsample)
        if self.model == -1:
            self.load_model()
        self.model.eval()
        self.conf_matrix = torch.zeros(self.dataset.number_of_classes,
                                       self.dataset.number_of_classes)
        for index_batch, batch in enumerate(dataloader_test):
            label = batch['output']
            input_data = batch['input'].to(self.device)
            prediction = self.model(input_data)
            result.update(prediction, label)
            if index_batch % self.batch_result == 0:
                result.computing_result(reinit=False,
                                        progress=index_batch /
                                        len(dataloader_test))
        self.model.train()
        self.saver.save(model=self.model, result=result, condition="Test")
        result.computing_result(reinit=True, progress=1)
        self.stopping_condition.update(result.microf1)
        self.stopping_condition.stop()
예제 #5
0
    def train(self, validation_split=0.6, resuming=False):
        """Train the model

        Args:
            validation_split (float, optional): ratio between testing and learning set. Defaults to 0.6.
            resuming (bool, optional): resume the learning from a previous step. Not implemented yet. Defaults to False.
        """
        # Create the dataloader
        dataloader_train = self.create_dataloader(
            validation_split=validation_split, condition="train")
        if resuming:
            self.load_model()
        else:
            self.model = LSTMLayer(num_classes=self.dataset.number_of_classes,
                                   batch_size=self.batch_size).to(self.device)
        # Create the results
        result = Result(self.dataset, condition="Train")
        optimizer = optim.Adam(self.model.parameters())
        loss_fn = nn.CrossEntropyLoss()
        self.model.train()
        logger.info("Cardinality: " + str(self.cardinality) +
                    " Starting the learning step")
        # Start the learning
        while not self.stopping_condition.stop():
            for index_batch, batch in enumerate(dataloader_train):
                optimizer.zero_grad()
                label = batch['output']
                input_data = batch['input'].to(self.device)
                prediction = self.model(input_data)
                loss = loss_fn(prediction, label.to(self.device))
                loss.backward()
                optimizer.step()
                result.update(prediction, label)
                # Compute the results each 2000 batchs.
                if index_batch % self.batch_result == 0 and index_batch != 0:
                    result.computing_result(progress=index_batch /
                                            len(dataloader_train))
                    self.saver.save(model=self.model,
                                    result=result,
                                    condition="temp")
                    if not self.exlude_test:
                        # Test only on a subsample
                        self.test(subsample=True, subsample_split=0.1)
                    print(self.stopping_condition)
                # Test if we stop only for the timer method at each batch
                if self.stopping_condition.method == "timer" and self.stopping_condition.stop(
                ):
                    logger.debug("[Stopping] Cardinality: " +
                                 str(self.cardinality) + " " +
                                 str(self.stopping_condition) +
                                 " stopping learning step.")
                    break
            # At the end of one epoch, use the all testing test and update the condition
            if not self.exlude_test:
                self.test()
            result.computing_result(reinit=True, progress=1)
            if self.stopping_condition.stop():
                logger.debug("[Stopping] Cardinality: " +
                             str(self.cardinality) + " " +
                             str(self.stopping_condition) +
                             " stopping learning step.")
            self.saver.save(model=self.model, result=result, condition="Train")