Exemple #1
0
def test_raises_error_on_wrong_input():
    """Make sure that input type errors are raised on the wrong input."""
    with pytest.raises(TypeError, match="Metric arg need to be an instance of a .*"):
        MetricTracker([1, 2, 3])

    with pytest.raises(ValueError, match="Argument `maximize` should either be a single bool or list of bool"):
        MetricTracker(MeanAbsoluteError(), maximize=2)

    with pytest.raises(
        ValueError, match="The len of argument `maximize` should match the length of the metric collection"
    ):
        MetricTracker(MetricCollection([MeanAbsoluteError(), MeanSquaredError()]), maximize=[False, False, False])
Exemple #2
0
    def __init__(
        self,
        n_channel: int = 1,
        learning_rate: float = 1e-4,
        backbone: Union[str, nn.Module] = "simple-cnn",
        backbone_output_size: int = 0,
        n_hidden: int = 512,
        dropout: float = 0.2,
        loss_fn: str = "mse",
        lr_scheduler: bool = False,
        lr_scheduler_warmup_steps: int = 100,
        lr_scheduler_total_steps: int = 0,
        **kwargs,
    ):
        super().__init__()

        self.save_hyperparameters()

        if isinstance(backbone, str):
            self.backbone, backbone_output_size = get_backbone(
                backbone, channels=n_channel, dropout=dropout, **kwargs)

        self.regressor = Classifier(backbone_output_size, 1, n_hidden, dropout)

        if loss_fn == "mse":
            self.loss_fn = nn.MSELoss()
        elif loss_fn == "mae":
            self.loss_fn = nn.L1Loss()  # MAE
        else:
            raise RuntimeError("Undefined loss function")

        self.test_metrics = MetricCollection([
            MeanAbsoluteError(),
            MeanSquaredError(),
        ])
def test_result_collection_no_batch_size_extraction():
    results = _ResultCollection(training=True, device="cpu")
    results.batch = torch.randn(1, 4)
    fx_name = "training_step"
    batch_size = 10
    log_val = torch.tensor(7.0)

    train_mae = MeanAbsoluteError()
    train_mae(torch.randn(4, 5), torch.randn(4, 5))
    train_mse = MeanSquaredError()
    train_mse(torch.randn(4, 5), torch.randn(4, 5))
    results.log(fx_name, "step_log_val", log_val, on_step=True, on_epoch=False)
    results.log(fx_name, "epoch_log_val", log_val, on_step=False, on_epoch=True, batch_size=batch_size)
    results.log(fx_name, "epoch_sum_log_val", log_val, on_step=True, on_epoch=True, reduce_fx="sum")
    results.log(fx_name, "train_mae", train_mae, on_step=True, on_epoch=False)
    results.log(fx_name, "train_mse", {"mse": train_mse}, on_step=True, on_epoch=False)

    assert results.batch_size is None
    assert isinstance(results["training_step.train_mse"]["mse"].value, MeanSquaredError)
    assert isinstance(results["training_step.train_mae"].value, MeanAbsoluteError)
    assert results["training_step.step_log_val"].value == log_val
    assert results["training_step.step_log_val"].cumulated_batch_size == 0
    assert results["training_step.epoch_log_val"].value == log_val * batch_size
    assert results["training_step.epoch_log_val"].cumulated_batch_size == batch_size
    assert results["training_step.epoch_sum_log_val"].value == log_val
def get_metrics_collections_base(
    prefix,
    is_regressor: bool = True
    # device="cuda" if torch.cuda.is_available() else "cpu",
):
    if is_regressor:
        metrics = MetricCollection(
            {
                "MeanAbsoluteError": MeanAbsoluteError(),
                "MeanSquaredError": MeanSquaredError(),
                "SpearmanCorrcoef": SpearmanCorrcoef(),
                "PearsonCorrcoef": PearsonCorrcoef()
            },
            prefix=prefix)
    else:
        metrics = MetricCollection(
            {
                "Accuracy": Accuracy(),
                "Top_3": Accuracy(top_k=3),
                # "Top_5" :Accuracy(top_k=5),
                # "Precision_micro":Precision(num_classes=NUM_CLASS,average="micro"),
                # "Precision_macro":Precision(num_classes=NUM_CLASS,average="macro"),
                # "Recall_micro":Recall(num_classes=NUM_CLASS,average="micro"),
                # "Recall_macro":Recall(num_classes=NUM_CLASS,average="macro"),
                # "F1_micro":torchmetrics.F1(NUM_CLASS,average="micro"),
                # "F1_macro":torchmetrics.F1(NUM_CLASS,average="micro"),
            },
            prefix=prefix)
    return metrics
Exemple #5
0
        def test_metrics(self):
            metric = MeanAbsolutePercentageError()
            metric_collection = MetricCollection(
                [MeanAbsolutePercentageError(),
                 MeanAbsoluteError()])

            # test single metric
            model = RNNModel(12,
                             "RNN",
                             10,
                             10,
                             n_epochs=1,
                             torch_metrics=metric)
            model.fit(self.series)

            # test metric collection
            model = RNNModel(12,
                             "RNN",
                             10,
                             10,
                             n_epochs=1,
                             torch_metrics=metric_collection)
            model.fit(self.series)

            # test multivariate series
            model = RNNModel(12,
                             "RNN",
                             10,
                             10,
                             n_epochs=1,
                             torch_metrics=metric)
            model.fit(self.multivariate_series)
    def __init__(self, hyp_params, target_names, early_stopping):
        super().__init__()
        self.model = MULTModel(hyp_params)
        self.save_hyperparameters(hyp_params)
        self.learning_rate = hyp_params.lr
        self.weight_decay = hyp_params.weight_decay
        self.target_names = target_names

        self.mae_1 = 1 - MeanAbsoluteError()
        self.acc2 = Accuracy()
        self.acc7 = Accuracy(multiclass=True)
        self.f1 = F1()
        self.loss = loss_dict[hyp_params.loss_fnc]
        self.opt = opt_dict[hyp_params.optim]

        self.early_stopping = early_stopping
 def __init__(
     self,
     fp_size: int = defaults.FP_SIZE,
     lstm_size: int = defaults.LSTM_SIZE,
     dropout_prob: float = defaults.DROPOUT_PROB,
     learning_rate: float = defaults.LEARNING_RATE,
     weight_decay: float = defaults.WEIGHT_DECAY,
 ) -> None:
     super().__init__()
     self.save_hyperparameters()
     self._tree_lstm = _TreeLstmWithPreCompression(fp_size, lstm_size,
                                                   dropout_prob)
     self._pdist = torch.nn.PairwiseDistance(p=2)
     self._loss_func = torch.nn.MSELoss()
     self._mae = MeanAbsoluteError()
     self._r2 = R2Score()
     self._lr = learning_rate
     self._weight_decay = weight_decay
Exemple #8
0
    def __init__(self, num_users, num_movies, user_training_tensor,
                 movie_training_tensor, label_training_tensor, batch_size):
        super().__init__()
        self.users_tensor = user_training_tensor
        self.movies_tensor = movie_training_tensor
        self.labels_tensor = label_training_tensor

        self.user_embedding = torch.nn.Embedding(num_embeddings=num_users,
                                                 embedding_dim=32)
        self.movie_embedding = torch.nn.Embedding(num_embeddings=num_movies,
                                                  embedding_dim=32)
        self.output = torch.nn.Linear(64, 1)
        self.batch_size = batch_size

        # defining some metrics attributes
        self.MSE = MeanSquaredError()
        self.MAE = MeanAbsoluteError()
        self.epoch_loss = 0
        loss_history.clear()  ##initiate loss history global variable
Exemple #9
0
    tracker = MetricTracker(Accuracy(num_classes=10))
    with pytest.raises(ValueError, match=f"`{method}` cannot be called before .*"):
        if method_input is not None:
            getattr(tracker, method)(*method_input)
        else:
            getattr(tracker, method)()


@pytest.mark.parametrize(
    "base_metric, metric_input, maximize",
    [
        (Accuracy(num_classes=10), (torch.randint(10, (50,)), torch.randint(10, (50,))), True),
        (Precision(num_classes=10), (torch.randint(10, (50,)), torch.randint(10, (50,))), True),
        (Recall(num_classes=10), (torch.randint(10, (50,)), torch.randint(10, (50,))), True),
        (MeanSquaredError(), (torch.randn(50), torch.randn(50)), False),
        (MeanAbsoluteError(), (torch.randn(50), torch.randn(50)), False),
        (
            MetricCollection([Accuracy(num_classes=10), Precision(num_classes=10), Recall(num_classes=10)]),
            (torch.randint(10, (50,)), torch.randint(10, (50,))),
            True,
        ),
        (
            MetricCollection([Accuracy(num_classes=10), Precision(num_classes=10), Recall(num_classes=10)]),
            (torch.randint(10, (50,)), torch.randint(10, (50,))),
            [True, True, True],
        ),
        (MetricCollection([MeanSquaredError(), MeanAbsoluteError()]), (torch.randn(50), torch.randn(50)), False),
        (
            MetricCollection([MeanSquaredError(), MeanAbsoluteError()]),
            (torch.randn(50), torch.randn(50)),
            [False, False],
Exemple #10
0
            getattr(tracker, method)(*method_input)
        else:
            getattr(tracker, method)()


@pytest.mark.parametrize(
    "base_metric, metric_input, maximize",
    [
        (Accuracy(num_classes=10),
         (torch.randint(10, (50, )), torch.randint(10, (50, ))), True),
        (Precision(num_classes=10),
         (torch.randint(10, (50, )), torch.randint(10, (50, ))), True),
        (Recall(num_classes=10),
         (torch.randint(10, (50, )), torch.randint(10, (50, ))), True),
        (MeanSquaredError(), (torch.randn(50), torch.randn(50)), False),
        (MeanAbsoluteError(), (torch.randn(50), torch.randn(50)), False),
        (
            MetricCollection([
                Accuracy(num_classes=10),
                Precision(num_classes=10),
                Recall(num_classes=10)
            ]),
            (torch.randint(10, (50, )), torch.randint(10, (50, ))),
            True,
        ),
        (
            MetricCollection([
                Accuracy(num_classes=10),
                Precision(num_classes=10),
                Recall(num_classes=10)
            ]),
Exemple #11
0
 def __init__(self, device = 'cuda:0'):
     super(MAE, self).__init__()
     self.mae = MeanAbsoluteError().to(device)