Пример #1
0
    def test_epoch(self, epoch):
        # sets the model to train mode: no dropout is applied
        self.model.eval()

        # builds a confusion matrix
        #metric_maxvoted = ClassMetric(num_classes=self.nclasses)
        metric = ClassMetric(num_classes=self.nclasses)
        #metric_all_t = ClassMetric(num_classes=self.nclasses)

        with torch.no_grad():
            for iteration, data in enumerate(self.validdataloader):

                inputs, targets = data

                if torch.cuda.is_available():
                    inputs = inputs.cuda()
                    targets = targets.cuda()

                logprobabilities, deltas, pts, budget = self.model.forward(
                    inputs.transpose(1, 2))
                loss, stats = self.loss_criterion(logprobabilities,
                                                  pts,
                                                  targets,
                                                  self.earliness_factor,
                                                  self.entropy_factor,
                                                  ptsepsilon=self.ptsepsilon)
                prediction, t_stop = self.model.predict(
                    logprobabilities, deltas)

                stats = metric.add(stats)

                accuracy_metrics = metric.update_confmat(
                    targets.mode(1)[0].detach().cpu().numpy(),
                    prediction.detach().cpu().numpy())

                stats["accuracy"] = accuracy_metrics["overall_accuracy"]
                stats["mean_accuracy"] = accuracy_metrics["accuracy"].mean()
                stats["mean_recall"] = accuracy_metrics["recall"].mean()
                stats["mean_precision"] = accuracy_metrics["precision"].mean()
                stats["mean_f1"] = accuracy_metrics["f1"].mean()
                stats["kappa"] = accuracy_metrics["kappa"]
                if t_stop is not None:
                    earliness = (t_stop.float() / (inputs.shape[1] - 1)).mean()
                    stats["earliness"] = metric.update_earliness(
                        earliness.cpu().detach().numpy())

            stats["confusion_matrix"] = metric.hist
            stats["targets"] = targets.cpu().numpy()
            stats["inputs"] = inputs.cpu().numpy()
            if deltas is not None:
                stats["deltas"] = deltas.detach().cpu().numpy()
            if pts is not None: stats["pts"] = pts.detach().cpu().numpy()
            if budget is not None:
                stats["budget"] = budget.detach().cpu().numpy()

            probas = logprobabilities.exp().transpose(0, 1)
            stats["probas"] = probas.detach().cpu().numpy()

        return stats
Пример #2
0
    def train_epoch(self, epoch):
        # sets the model to train mode: dropout is applied
        self.model.train()

        # builds a confusion matrix
        metric = ClassMetric(num_classes=self.nclasses)

        for iteration, data in enumerate(self.traindataloader):
            self.optimizer.zero_grad()

            inputs, targets, meta = data

            if torch.cuda.is_available():
                inputs = inputs.cuda()
                targets = targets.cuda()

            logprobabilities, deltas, pts, budget = self.model.forward(
                inputs.transpose(1, 2))

            loss, stats = self.loss_criterion(logprobabilities, pts, targets,
                                              self.earliness_factor,
                                              self.entropy_factor,
                                              self.ptsepsilon,
                                              self.earliness_reward_power)
            loss.backward()

            if isinstance(self.optimizer, ScheduledOptim):
                self.optimizer.step_and_update_lr()
                lr = self.optimizer._optimizer.state_dict(
                )["param_groups"][0]["lr"]
            else:
                self.optimizer.step()
                lr = self.optimizer.state_dict()["param_groups"][0]["lr"]

            prediction, t_stop = self.model.predict(logprobabilities, deltas)

            stats = metric.add(stats)
            stats["lr"] = lr

            accuracy_metrics = metric.update_confmat(
                targets.mode(1)[0].detach().cpu().numpy(),
                prediction.detach().cpu().numpy())
            stats["accuracy"] = accuracy_metrics["overall_accuracy"]
            stats["mean_accuracy"] = accuracy_metrics["accuracy"].mean()
            stats["mean_recall"] = accuracy_metrics["recall"].mean()
            stats["mean_precision"] = accuracy_metrics["precision"].mean()
            stats["mean_f1"] = accuracy_metrics["f1"].mean()
            stats["kappa"] = accuracy_metrics["kappa"]
            if t_stop is not None:
                earliness = (t_stop.float() / (inputs.shape[1] - 1)).mean()
                stats["earliness"] = metric.update_earliness(
                    earliness.cpu().detach().numpy())

        return stats
Пример #3
0
    def test_epoch(self, dataloader, epoch=None):
        # sets the model to train mode: no dropout is applied
        self.model.eval()

        # builds a confusion matrix
        #metric_maxvoted = ClassMetric(num_classes=self.nclasses)
        metric = ClassMetric(num_classes=self.nclasses)
        #metric_all_t = ClassMetric(num_classes=self.nclasses)

        tstops = list()
        predictions = list()
        probas = list()
        ids_list = list()
        labels = list()

        with torch.no_grad():
            for iteration, data in enumerate(dataloader):

                inputs, targets, ids = data

                if torch.cuda.is_available():
                    inputs = inputs.cuda()
                    targets = targets.cuda()

                logprobabilities, deltas, pts, budget = self.model.forward(
                    inputs.transpose(1, 2))

                loss = F.nll_loss(logprobabilities, targets[:, 0])

                stats = dict(loss=loss, )

                prediction = self.model.predict(logprobabilities)
                t_stop = None

                ## enter numpy world
                prediction = prediction.detach().cpu().numpy()
                label = targets.mode(1)[0].detach().cpu().numpy()
                if t_stop is not None: t_stop = t_stop.cpu().detach().numpy()
                if pts is not None: pts = pts.detach().cpu().numpy()
                if deltas is not None: deltas = deltas.detach().cpu().numpy()
                if budget is not None: budget = budget.detach().cpu().numpy()

                if t_stop is not None: tstops.append(t_stop)
                predictions.append(prediction)
                labels.append(label)
                probas.append(logprobabilities.exp().detach().cpu().numpy())
                ids_list.append(ids.detach().cpu().numpy())

                stats = metric.add(stats)

                accuracy_metrics = metric.update_confmat(label, prediction)

                stats["accuracy"] = accuracy_metrics["overall_accuracy"]
                stats["mean_accuracy"] = accuracy_metrics["accuracy"].mean()

                #for cl in range(len(accuracy_metrics["accuracy"])):
                #    acc = accuracy_metrics["accuracy"][cl]
                #    stats["class_{}_accuracy".format(cl)] = acc

                stats["mean_recall"] = accuracy_metrics["recall"].mean()
                stats["mean_precision"] = accuracy_metrics["precision"].mean()
                stats["mean_f1"] = accuracy_metrics["f1"].mean()
                stats["kappa"] = accuracy_metrics["kappa"]
                if t_stop is not None:
                    earliness = (t_stop.astype(float) /
                                 (inputs.shape[1] - 1)).mean()
                    stats["earliness"] = metric.update_earliness(earliness)

            stats["confusion_matrix"] = copy.copy(metric.hist)
            stats["targets"] = targets.cpu().numpy()
            stats["inputs"] = inputs.cpu().numpy()
            if deltas is not None: stats["deltas"] = deltas
            if pts is not None: stats["pts"] = pts
            if budget is not None: stats["budget"] = budget

        if t_stop is not None: stats["t_stops"] = np.hstack(tstops)
        stats["predictions"] = np.hstack(predictions)  # N
        stats["labels"] = np.hstack(labels)  # N
        stats["probas"] = np.vstack(probas)  # NxC
        stats["ids"] = np.hstack(ids_list)

        return stats