Beispiel #1
0
 def prediction_metrics(X_bin, X_cts, mean_cts, proba_bin):
     mse = 0.
     auc = 0.
     if X_cts is not None:
         which_cts = ~X_cts.isnan()
         mse = mean_squared_error(mean_cts[which_cts],
                                  X_cts[which_cts]).item()
     if X_bin is not None:
         which_bin = ~X_bin.isnan()
         auc = auroc(proba_bin[which_bin], X_bin[which_bin]).item()
     return auc, mse
    def validation_step(self, batch, batch_idx):
        X, fx = batch
        X_compressed = self.conv(self.X_fit)
        km = self.cal_km(self.params, X_compressed, X)
        alpha_i = torch.abs(self.untreated_coef)
        constrainted_alpha_i = (alpha_i - torch.min(alpha_i)) / (torch.max(alpha_i) - torch.min(alpha_i))
        coef = constrainted_alpha_i * self.label
        fx_hat = torch.sum(coef * km.t(), axis=1)
        loss = F.smooth_l1_loss(fx_hat, fx)
        var = FM.explained_variance(fx_hat, fx)
        mae = FM.mean_absolute_error(fx_hat, fx)
        mse = FM.mean_squared_error(fx_hat, fx)

        val_metrics = {'val_var': var, 'val_mae': mae, 'val_mse': mse, 'val_loss': loss}
        self.log_dict(val_metrics)
        return val_metrics
Beispiel #3
0
 def prediction_metrics(self, X_bin, X_cts, mean_cts, proba_bin):
     n_sample = self.model.n_samples
     mse = 0.
     auc = 0.
     if X_cts is not None:
         X_cts = X_cts.unsqueeze(1)
         which_cts = ~X_cts.isnan()
         for i in range(n_sample):
             mean_cts_tmp = mean_cts[:, [i], :]
             mse += mean_squared_error(mean_cts_tmp[which_cts], X_cts[which_cts]).item()
         mse = mse / n_sample
     if X_bin is not None:
         X_bin = X_bin.unsqueeze(1)
         which_bin = ~X_bin.isnan()
         for i in range(n_sample):
             proba_bin_tmp = proba_bin[:, [i], :]
             auc += auroc(proba_bin_tmp[which_bin], X_bin[which_bin]).item()
         auc = auc / n_sample
     return auc, mse
Beispiel #4
0
def test_v1_5_metric_regress():
    ExplainedVariance.__init__._warned = False
    with pytest.deprecated_call(match='It will be removed in v1.5.0'):
        ExplainedVariance()

    MeanAbsoluteError.__init__._warned = False
    with pytest.deprecated_call(match='It will be removed in v1.5.0'):
        MeanAbsoluteError()

    MeanSquaredError.__init__._warned = False
    with pytest.deprecated_call(match='It will be removed in v1.5.0'):
        MeanSquaredError()

    MeanSquaredLogError.__init__._warned = False
    with pytest.deprecated_call(match='It will be removed in v1.5.0'):
        MeanSquaredLogError()

    target = torch.tensor([3, -0.5, 2, 7])
    preds = torch.tensor([2.5, 0.0, 2, 8])
    explained_variance._warned = False
    with pytest.deprecated_call(match='It will be removed in v1.5.0'):
        res = explained_variance(preds, target)
    assert torch.allclose(res, torch.tensor(0.9572), atol=1e-4)

    x = torch.tensor([0., 1, 2, 3])
    y = torch.tensor([0., 1, 2, 2])
    mean_absolute_error._warned = False
    with pytest.deprecated_call(match='It will be removed in v1.5.0'):
        assert mean_absolute_error(x, y) == 0.25

    mean_relative_error._warned = False
    with pytest.deprecated_call(match='It will be removed in v1.5.0'):
        assert mean_relative_error(x, y) == 0.125

    mean_squared_error._warned = False
    with pytest.deprecated_call(match='It will be removed in v1.5.0'):
        assert mean_squared_error(x, y) == 0.25

    mean_squared_log_error._warned = False
    with pytest.deprecated_call(match='It will be removed in v1.5.0'):
        res = mean_squared_log_error(x, y)
    assert torch.allclose(res, torch.tensor(0.0207), atol=1e-4)

    PSNR.__init__._warned = False
    with pytest.deprecated_call(match='It will be removed in v1.5.0'):
        PSNR()

    R2Score.__init__._warned = False
    with pytest.deprecated_call(match='It will be removed in v1.5.0'):
        R2Score()

    SSIM.__init__._warned = False
    with pytest.deprecated_call(match='It will be removed in v1.5.0'):
        SSIM()

    preds = torch.tensor([[0.0, 1.0], [2.0, 3.0]])
    target = torch.tensor([[3.0, 2.0], [1.0, 0.0]])
    psnr._warned = False
    with pytest.deprecated_call(match='It will be removed in v1.5.0'):
        res = psnr(preds, target)
    assert torch.allclose(res, torch.tensor(2.5527), atol=1e-4)

    target = torch.tensor([3, -0.5, 2, 7])
    preds = torch.tensor([2.5, 0.0, 2, 8])
    r2score._warned = False
    with pytest.deprecated_call(match='It will be removed in v1.5.0'):
        res = r2score(preds, target)
    assert torch.allclose(res, torch.tensor(0.9486), atol=1e-4)

    preds = torch.rand([16, 1, 16, 16])
    target = preds * 0.75
    ssim._warned = False
    with pytest.deprecated_call(match='It will be removed in v1.5.0'):
        res = ssim(preds, target)
    assert torch.allclose(res, torch.tensor(0.9219), atol=1e-4)
Beispiel #5
0
    def training_step(self, sgrams, batch_idx, optimizer_idx):
        batch_size = sgrams.shape[0]
        assert sgrams.shape[1:] == self.spectrogram_shape  # (N, 1, H, W)
        """
        Four classes with product of terms objective.
        See Dandi et al., "Generalized Adversarially Learned Inference"
        https://arxiv.org/pdf/2006.08089.pdf
        0: x, E(x)
        1: z, G(z)
        2: x, E(G(E(x)))
        3: G(E(G(z))), z
        """
        log_probs = [None] * 4
        # Class 0:
        true_latent_params = self.encoder(sgrams).view(
            -1, 2, self.hparams.latent_size)
        true_mu = true_latent_params[:, 0, :]
        true_std = true_latent_params[:, 1, :]
        true_latent_sample = self.sample(true_mu, true_std)
        logits = self.discriminator((sgrams, true_latent_sample))
        log_probs[0] = F.log_softmax(logits, dim=1)
        if optimizer_idx == 0:
            pt_loss = self.product_of_terms_loss(log_probs[0], 0)

        # Class 1:
        fake_latent = torch.randn(batch_size,
                                  self.hparams.latent_size,
                                  device=self.device)
        fake_sgrams = self.generator(fake_latent)
        logits = self.discriminator((fake_sgrams, fake_latent))
        log_probs[1] = F.log_softmax(logits, dim=1)
        if optimizer_idx == 0:
            pt_loss += self.product_of_terms_loss(log_probs[1], 1)

        # Class 2:
        true_reencoded_params = self.encoder(
            self.generator(true_latent_sample)).view(-1, 2,
                                                     self.hparams.latent_size)
        true_reencoded_mu = true_reencoded_params[:, 0, :]
        true_reencoded_std = true_reencoded_params[:, 1, :]
        true_reencoded_latent_sample = self.sample(true_reencoded_mu,
                                                   true_reencoded_std)
        logits = self.discriminator((sgrams, true_reencoded_latent_sample))
        log_probs[2] = F.log_softmax(logits, dim=1)
        if optimizer_idx == 0:
            pt_loss += self.product_of_terms_loss(log_probs[2], 2)

        # Class 3:
        fake_reencoded_params = self.encoder(fake_sgrams).view(
            -1, 2, self.hparams.latent_size)
        fake_reencoded_mu = fake_reencoded_params[:, 0, :]
        fake_reencoded_std = fake_reencoded_params[:, 1, :]
        fake_reencoded_latent_sample = self.sample(fake_reencoded_mu,
                                                   fake_reencoded_std)
        fake_regenerated_sgrams = self.generator(fake_reencoded_latent_sample)
        self.log("reconstruction-mse",
                 mean_squared_error(fake_regenerated_sgrams, sgrams))
        self.log("reconstruction-ssim", ssim(fake_regenerated_sgrams, sgrams))
        logits = self.discriminator((fake_regenerated_sgrams, fake_latent))
        log_probs[3] = F.log_softmax(logits, dim=1)
        # Encoder-Generator loss

        if optimizer_idx == 0:
            pt_loss += self.product_of_terms_loss(log_probs[3], 3)

        last_update = "D" if optimizer_idx == 0 else "EG"
        step_type = "t" if self.training else "v"
        self.log(f"x,E(x)|{step_type}|{last_update}",
                 torch.exp(log_probs[0][:, 0]).mean())
        self.log(f"z,G(z)|{step_type}|{last_update}",
                 torch.exp(log_probs[1][:, 1]).mean())
        self.log(f"x,E(G(E(x)))|{step_type}|{last_update}",
                 torch.exp(log_probs[2][:, 2]).mean())
        self.log(f"G(E(G(z))),z|{step_type}|{last_update}",
                 torch.exp(log_probs[3][:, 3]).mean())

        if optimizer_idx == 0:
            # Weight the objective containing m=4 log terms by 2/m=0.5, corresponding to a weight
            # of 1 for ALI's generator and discriminator objective, keeping the objectives
            # in a similar range for both optimizers
            pt_loss = pt_loss.mean() / 2
            self.log("eg_loss", pt_loss)
            return pt_loss.mul(torch.tensor(-1, device=self.device))

        # Discriminator loss

        if optimizer_idx == 1:
            d_loss = torch.zeros((batch_size, ),
                                 dtype=log_probs[0].dtype,
                                 device=self.device)
            for true_idx in range(4):
                target = torch.full((batch_size, ),
                                    true_idx,
                                    dtype=torch.long,
                                    device=self.device)
                # Maybe randomly swap labels instead
                d_loss += self.smoothing_cross_entropy_loss(
                    log_probs[true_idx], target)
                # d_loss += F.cross_entropy(logits[true_idx], target, reduction="none")

            d_loss = d_loss.mean()
            self.log("d_loss", d_loss)
            return d_loss.mean()  # Reduce across batch dimension