예제 #1
0
    def __get_metrics_map(self):
        from fastai.metrics import rmse, mse, mae, accuracy, FBeta, RocAucBinary, Precision, Recall, R2Score
        from .fastai_helpers import medae
        from .quantile_helpers import HuberPinballLoss
        metrics_map = {
            # Regression
            'root_mean_squared_error': rmse,
            'mean_squared_error': mse,
            'mean_absolute_error': mae,
            'r2': R2Score(),
            'median_absolute_error': medae,

            # Classification
            'accuracy': accuracy,
            'f1': FBeta(beta=1),
            'f1_macro': FBeta(beta=1, average='macro'),
            'f1_micro': FBeta(beta=1, average='micro'),
            'f1_weighted':
            FBeta(beta=1, average='weighted'),  # this one has some issues
            'roc_auc': RocAucBinary(),
            'precision': Precision(),
            'precision_macro': Precision(average='macro'),
            'precision_micro': Precision(average='micro'),
            'precision_weighted': Precision(average='weighted'),
            'recall': Recall(),
            'recall_macro': Recall(average='macro'),
            'recall_micro': Recall(average='micro'),
            'recall_weighted': Recall(average='weighted'),
            'log_loss': None,
            'pinball_loss':
            HuberPinballLoss(quantile_levels=self.quantile_levels)
            # Not supported: pac_score
        }
        return metrics_map
예제 #2
0
    def __get_objective_func_name(self):
        from fastai.metrics import root_mean_squared_error, mean_squared_error, mean_absolute_error, accuracy, FBeta, AUROC, Precision, Recall, r2_score

        metrics_map = {
            # Regression
            'root_mean_squared_error': root_mean_squared_error,
            'mean_squared_error': mean_squared_error,
            'mean_absolute_error': mean_absolute_error,
            'r2': r2_score,
            # Not supported: median_absolute_error

            # Classification
            'accuracy': accuracy,

            'f1': FBeta(beta=1),
            'f1_macro': FBeta(beta=1, average='macro'),
            'f1_micro': FBeta(beta=1, average='micro'),
            'f1_weighted': FBeta(beta=1, average='weighted'),  # this one has some issues

            'roc_auc': AUROC(),

            'precision': Precision(),
            'precision_macro': Precision(average='macro'),
            'precision_micro': Precision(average='micro'),
            'precision_weighted': Precision(average='weighted'),

            'recall': Recall(),
            'recall_macro': Recall(average='macro'),
            'recall_micro': Recall(average='micro'),
            'recall_weighted': Recall(average='weighted'),
            'log_loss': None,
            # Not supported: pac_score
        }

        # Unsupported metrics will be replaced by defaults for a given problem type
        objective_func_name = self.stopping_metric.name
        if objective_func_name not in metrics_map.keys():
            if self.problem_type == REGRESSION:
                objective_func_name = 'mean_squared_error'
            else:
                objective_func_name = 'log_loss'
            logger.warning(f'Metric {self.stopping_metric.name} is not supported by this model - using {objective_func_name} instead')

        if objective_func_name in metrics_map.keys():
            nn_metric = metrics_map[objective_func_name]
        else:
            nn_metric = None
        return nn_metric, objective_func_name
def get_performance_metrics(model, imgs, labels, dataset_name):
    # pred_logits = model.model(imgs)  # This is the PyTorch way to do it, which is faster but
    # doesn't apply all the preprocessing exactly like in FastAI
    pred_logits = list()
    for i in range(imgs.shape[0]):
        img_pred = model.predict(imgs[i])[2]
        pred_logits.append(img_pred)
    pred_logits = torch.stack(pred_logits)
    pred_proba = torch.sigmoid(pred_logits)
    pred = (pred_proba > 0.6).int()  # 0.6 seems to be threshold used by FastAI
    if dataset_name == "Oil palm":
        # Extract a deforestation label based on relevant tags
        deforestation_tags_idx = [
            TAGS.index(deforestation_tag)
            for deforestation_tag in DEFORESTATION_TAGS
        ]
        pred_logits = pred_logits[:, deforestation_tags_idx]
        pred_logits = torch.sum(pred_logits, dim=1)
        pred = pred[:, deforestation_tags_idx]
        pred = (torch.sum(pred, dim=1) > 0).int()
        acc = float(torch.mean((pred == labels).float()))
        fbeta = FBeta(beta=2)(preds=pred, targs=labels)
    else:
        acc = float(accuracy_multi(inp=pred_logits, targ=labels, thresh=0.6))
        fbeta = FBetaMulti(beta=2, average="samples", thresh=0.6)(preds=pred,
                                                                  targs=labels)
    return pred, acc, fbeta
예제 #4
0
    def createmodel(self, quantize=True):
        """Creates the model and attaches with the dataloader.

        By default it sets up the model for quantization aware training.

        Parameters
        ----------
        quantize : bool, optional
            To quantize or not, by default True
        """
        print("Creating model..")

        vision.learner.create_body = self.create_custom_body

        self.learn = cnn_learner(
            self.data,
            models.mobilenet_v2,
            pretrained=True,
            metrics=[error_rate, FBeta(beta=1), Precision(), Recall(), AUROC()],
            split_on=custom_split,
            model_dir=self.model_dir,
        )

        if quantize:
            self.learn.model[0].qconfig = torch.quantization.default_qat_qconfig
            self.learn.model = torch.quantization.prepare_qat(
                self.learn.model, inplace=True
            )
예제 #5
0
def get_metrics():
    if conf['classificiation_type'] == 'binary':
        f1 = FBeta(average='macro', beta=1)
        return [accuracy, f1]