def __init__(self, num_epochs: int = 5, lr: float = 1e-4): super().__init__() self.save_hyperparameters() self.encoder = nn.Sequential(nn.Linear(28 * 28, 64), nn.ReLU(), nn.Linear(64, 3)) self.decoder = nn.Sequential(nn.Linear(3, 64), nn.ReLU(), nn.Linear(64, 28 * 28)) self.mse = MeanSquaredError()
def __init__(self): super().__init__() setattr(self, "layer_0", nn.Linear(16, 64)) setattr(self, "layer_0a", torch.nn.ReLU()) for i in range(1, 3): setattr(self, f"layer_{i}", nn.Linear(64, 64)) setattr(self, f"layer_{i}a", torch.nn.ReLU()) setattr(self, "layer_end", nn.Linear(64, 1)) self.train_mse = MeanSquaredError() self.valid_mse = MeanSquaredError() self.test_mse = MeanSquaredError()
def test_result_collection_batch_size_extraction(): fx_name = "training_step" log_val = torch.tensor(7.0) results = _ResultCollection(training=True, device="cpu") results.batch = torch.randn(1, 4) train_mse = MeanSquaredError() train_mse(torch.randn(4, 5), torch.randn(4, 5)) results.log(fx_name, "train_logs", { "mse": train_mse, "log_val": log_val }, on_step=False, on_epoch=True) assert results.batch_size == 1 assert isinstance(results["training_step.train_logs"]["mse"].value, MeanSquaredError) assert results["training_step.train_logs"]["log_val"].value == log_val results = _ResultCollection(training=True, device="cpu") results.batch = torch.randn(1, 4) results.log(fx_name, "train_log", log_val, on_step=False, on_epoch=True) assert results.batch_size == 1 assert results["training_step.train_log"].value == log_val assert results["training_step.train_log"].cumulated_batch_size == 1
def __init__( self, n_channel: int = 1, learning_rate: float = 1e-4, backbone: Union[str, nn.Module] = "simple-cnn", backbone_output_size: int = 0, n_hidden: int = 512, dropout: float = 0.2, loss_fn: str = "mse", lr_scheduler: bool = False, lr_scheduler_warmup_steps: int = 100, lr_scheduler_total_steps: int = 0, **kwargs, ): super().__init__() self.save_hyperparameters() if isinstance(backbone, str): self.backbone, backbone_output_size = get_backbone( backbone, channels=n_channel, dropout=dropout, **kwargs) self.regressor = Classifier(backbone_output_size, 1, n_hidden, dropout) if loss_fn == "mse": self.loss_fn = nn.MSELoss() elif loss_fn == "mae": self.loss_fn = nn.L1Loss() # MAE else: raise RuntimeError("Undefined loss function") self.test_metrics = MetricCollection([ MeanAbsoluteError(), MeanSquaredError(), ])
def test_result_collection_no_batch_size_extraction(): results = _ResultCollection(training=True, device="cpu") results.batch = torch.randn(1, 4) fx_name = "training_step" batch_size = 10 log_val = torch.tensor(7.0) train_mae = MeanAbsoluteError() train_mae(torch.randn(4, 5), torch.randn(4, 5)) train_mse = MeanSquaredError() train_mse(torch.randn(4, 5), torch.randn(4, 5)) results.log(fx_name, "step_log_val", log_val, on_step=True, on_epoch=False) results.log(fx_name, "epoch_log_val", log_val, on_step=False, on_epoch=True, batch_size=batch_size) results.log(fx_name, "epoch_sum_log_val", log_val, on_step=True, on_epoch=True, reduce_fx="sum") results.log(fx_name, "train_mae", train_mae, on_step=True, on_epoch=False) results.log(fx_name, "train_mse", {"mse": train_mse}, on_step=True, on_epoch=False) assert results.batch_size is None assert isinstance(results["training_step.train_mse"]["mse"].value, MeanSquaredError) assert isinstance(results["training_step.train_mae"].value, MeanAbsoluteError) assert results["training_step.step_log_val"].value == log_val assert results["training_step.step_log_val"].cumulated_batch_size == 0 assert results["training_step.epoch_log_val"].value == log_val * batch_size assert results["training_step.epoch_log_val"].cumulated_batch_size == batch_size assert results["training_step.epoch_sum_log_val"].value == log_val
def get_metrics_collections_base( prefix, is_regressor: bool = True # device="cuda" if torch.cuda.is_available() else "cpu", ): if is_regressor: metrics = MetricCollection( { "MeanAbsoluteError": MeanAbsoluteError(), "MeanSquaredError": MeanSquaredError(), "SpearmanCorrcoef": SpearmanCorrcoef(), "PearsonCorrcoef": PearsonCorrcoef() }, prefix=prefix) else: metrics = MetricCollection( { "Accuracy": Accuracy(), "Top_3": Accuracy(top_k=3), # "Top_5" :Accuracy(top_k=5), # "Precision_micro":Precision(num_classes=NUM_CLASS,average="micro"), # "Precision_macro":Precision(num_classes=NUM_CLASS,average="macro"), # "Recall_micro":Recall(num_classes=NUM_CLASS,average="micro"), # "Recall_macro":Recall(num_classes=NUM_CLASS,average="macro"), # "F1_micro":torchmetrics.F1(NUM_CLASS,average="micro"), # "F1_macro":torchmetrics.F1(NUM_CLASS,average="micro"), }, prefix=prefix) return metrics
class LitAutoEncoder(pl.LightningModule): def __init__(self, num_epochs: int = 5, lr: float = 1e-4): super().__init__() self.save_hyperparameters() self.encoder = nn.Sequential(nn.Linear(28 * 28, 64), nn.ReLU(), nn.Linear(64, 3)) self.decoder = nn.Sequential(nn.Linear(3, 64), nn.ReLU(), nn.Linear(64, 28 * 28)) self.mse = MeanSquaredError() def forward(self, x): # in lightning, forward defines the prediction/inference actions embedding = self.encoder(x) return embedding def training_step(self, batch, batch_idx): x, y = batch x = x.view(x.size(0), -1) z = self.encoder(x) x_hat = self.decoder(z) loss = self.mse(x_hat, x) self.log('train_mse', loss, sync_dist=True) return loss def training_epoch_end(self, outputs): self.mse.reset() # --------------------- # training setup # --------------------- def configure_optimizers(self): optimizer = torch.optim.Adam(self.parameters(), lr=self.hparams.lr) scheduler = CosineAnnealingWarmRestarts(optimizer, self.hparams.num_epochs, eta_min=1e-4) metric_to_track = 'train_mse' return { 'optimizer': optimizer, 'lr_scheduler': scheduler, 'monitor': metric_to_track }
def test_raises_error_on_wrong_input(): """Make sure that input type errors are raised on the wrong input.""" with pytest.raises(TypeError, match="Metric arg need to be an instance of a .*"): MetricTracker([1, 2, 3]) with pytest.raises(ValueError, match="Argument `maximize` should either be a single bool or list of bool"): MetricTracker(MeanAbsoluteError(), maximize=2) with pytest.raises( ValueError, match="The len of argument `maximize` should match the length of the metric collection" ): MetricTracker(MetricCollection([MeanAbsoluteError(), MeanSquaredError()]), maximize=[False, False, False])
def __init__(self, num_users, num_movies, user_training_tensor, movie_training_tensor, label_training_tensor, batch_size): super().__init__() self.users_tensor = user_training_tensor self.movies_tensor = movie_training_tensor self.labels_tensor = label_training_tensor self.user_embedding = torch.nn.Embedding(num_embeddings=num_users, embedding_dim=32) self.movie_embedding = torch.nn.Embedding(num_embeddings=num_movies, embedding_dim=32) self.output = torch.nn.Linear(64, 1) self.batch_size = batch_size # defining some metrics attributes self.MSE = MeanSquaredError() self.MAE = MeanAbsoluteError() self.epoch_loss = 0 loss_history.clear() ##initiate loss history global variable
v_min = v_min if v_min < val else val v_max = v_max if v_max > val else val raw = base_fn(preds, target) return [raw.cpu().numpy(), v_min, v_max] @pytest.mark.parametrize( "preds, target, base_metric", [ ( torch.rand(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES).softmax(dim=-1), torch.randint(NUM_CLASSES, (NUM_BATCHES, BATCH_SIZE)), Accuracy(num_classes=NUM_CLASSES), ), (torch.randn(NUM_BATCHES, BATCH_SIZE), torch.randn(NUM_BATCHES, BATCH_SIZE), MeanSquaredError()), ], ) class TestMinMaxWrapper(MetricTester): """Test the MinMaxMetric wrapper works as expected.""" atol = 1e-6 # TODO: fix ddp=True case, difference in how compare function works and wrapper metric @pytest.mark.parametrize("ddp", [False]) def test_minmax_wrapper(self, preds, target, base_metric, ddp): self.run_class_metric_test( ddp, preds, target, TestingMinMaxMetric,
def test_raises_error_if_increment_not_called(method, method_input): tracker = MetricTracker(Accuracy(num_classes=10)) with pytest.raises(ValueError, match=f"`{method}` cannot be called before .*"): if method_input is not None: getattr(tracker, method)(*method_input) else: getattr(tracker, method)() @pytest.mark.parametrize( "base_metric, metric_input, maximize", [ (Accuracy(num_classes=10), (torch.randint(10, (50,)), torch.randint(10, (50,))), True), (Precision(num_classes=10), (torch.randint(10, (50,)), torch.randint(10, (50,))), True), (Recall(num_classes=10), (torch.randint(10, (50,)), torch.randint(10, (50,))), True), (MeanSquaredError(), (torch.randn(50), torch.randn(50)), False), (MeanAbsoluteError(), (torch.randn(50), torch.randn(50)), False), ( MetricCollection([Accuracy(num_classes=10), Precision(num_classes=10), Recall(num_classes=10)]), (torch.randint(10, (50,)), torch.randint(10, (50,))), True, ), ( MetricCollection([Accuracy(num_classes=10), Precision(num_classes=10), Recall(num_classes=10)]), (torch.randint(10, (50,)), torch.randint(10, (50,))), [True, True, True], ), (MetricCollection([MeanSquaredError(), MeanAbsoluteError()]), (torch.randn(50), torch.randn(50)), False), ( MetricCollection([MeanSquaredError(), MeanAbsoluteError()]), (torch.randn(50), torch.randn(50)),
def __init__(self, device = 'cuda:0'): super(iRMSE, self).__init__() self.rmse = MeanSquaredError(squared = False).to(device)
"kwargs", [ {}, dict(train_only=True), dict( loss=MultiLoss([QuantileLoss(), MAE()]), data_loader_kwargs=dict( time_varying_unknown_reals=["volume", "discount"], target=["volume", "discount"], ), ), dict( loss=CrossEntropy(), data_loader_kwargs=dict(target="agency", ), ), dict(loss=MeanSquaredError()), dict( loss=MeanSquaredError(), data_loader_kwargs=dict(min_prediction_length=1, min_encoder_length=1), ), ], ) def test_integration(data_with_covariates, tmp_path, gpus, kwargs): _integration(data_with_covariates.assign(target=lambda x: x.volume), tmp_path, gpus, **kwargs) @pytest.fixture def model(dataloaders_with_covariates): dataset = dataloaders_with_covariates["train"].dataset
found_zero = _sample_checker(old_samples, new_samples, operator.ne, 0) assert found_zero, "resampling did not work because all samples were atleast sampled once" @pytest.mark.parametrize("device", ["cpu", "cuda"]) @pytest.mark.parametrize("sampling_strategy", ["poisson", "multinomial"]) @pytest.mark.parametrize( "metric, sk_metric", [ [ Precision(average="micro"), partial(precision_score, average="micro") ], [Recall(average="micro"), partial(recall_score, average="micro")], [MeanSquaredError(), mean_squared_error], ], ) def test_bootstrap(device, sampling_strategy, metric, sk_metric): """Test that the different bootstraps gets updated as we expected and that the compute method works.""" if device == "cuda" and not torch.cuda.is_available(): pytest.skip("Test with device='cuda' requires gpu") _kwargs = { "base_metric": metric, "mean": True, "std": True, "raw": True, "sampling_strategy": sampling_strategy } if _TORCH_GREATER_EQUAL_1_7: