Esempio n. 1
0
    def compute_exursion_prob(self, lower, upper=None):
        """ Compute the excursion probability on the whole grid, given the
        currently available data.

        Parameters
        ----------
        lower: (p) Tensor
            List of lower threshold for each response. The excursion set is the set
            where responses are above the specified threshold.
            Note that np.inf is supported.
        upper: (p) Tensor
            List of upper threshold for each response. The excursion set is the set
            where responses are above the specified threshold.
            If not provided, defaults to + infinity.

        Returns
        -------
        excursion_proba: (self.grid.n_points) Tensor
            Excursion probability at each point.

        """
        # Extract covariance matrix at every point.
        pointwise_cov = self.grf.pointwise_cov

        excursion_proba = coverage_fct_fixed_location(
            self.grf.mean_vec.isotopic, pointwise_cov, lower, upper=None)
        return excursion_proba
Esempio n. 2
0
    def _eibv_part_2(self, S_y_inds, L_y, lower, upper=None, noise_std=None):
        h = 2 # In case we want to generalize later (see Proposition 2).

        id_h = torch.eye(h, dtype=torch.float32)
        full_h = torch.full((h, h), 1.0, dtype=torch.float32)

        # Extract the covariance reduction at every point. This is pointwise,
        # i.e. ignore correlations between different spatial locations.
        pw_cov_reduction = torch.diagonal(
                self.compute_cov_reduction(S_y_inds, L_y, noise_std).isotopic,
                dim1=0, dim2=1).T

        # Build the concatenated covariance matrix and mean vector
        # WARNING: At each point, we want to multiply the dimension of the
        # response by h. This means that we have to be careful to expand along
        # the response dimension (the last one) and not along the batch
        # dimension (the spatial one).
        pw_covariance_cat = (
                kronecker(id_h, self.pointwise_cov)
                + kronecker(full_h - id_h, pw_cov_reduction))

        one_vector = torch.full((h, 1), 1.0)
        mean_cat = torch.cat(h * [self.mean_vec.isotopic], dim=1)

        # Now concatenate the thresholds.
        lower_cat = torch.cat(h * [lower])
        if upper is not None:
            upper_cat = torch.cat(h * [upper])
        else: upper_cat = None

        part2 = coverage_fct_fixed_location(
                    mean_cat, pw_covariance_cat, lower_cat, upper_cat)
        return part2
Esempio n. 3
0
    def ebv(self, S_y_inds, L_y, lower, upper=None, noise_std=None):
        """ Computes the expected Bernoulli Variance (i.e. not integrated)
        if we were to make observations at the
        generalized locations (S_y, L_y).
        Since we are on a grid, we do not directly specify the spatial
        locations S_y, but the corresponding grid indices S_y_inds instead.

        Parameters
        ----------
        S_y_inds: (M) Tensor
            Indices (in the grid) of the spatial locations of the measurements.
        L_y: (M) Tensor
            Response indices of the measurements.
        noise_std: float
            Noise standard deviation. Uniform across all measurments.
            Defaults to 0.

        Returns
        -------
        ebv: (n_points) Tensor
            Expected Bernoulli Variance at each point of the grid, conditional
            on observing at the new data point.

        """
        part1 = coverage_fct_fixed_location(
                    self.mean_vec.isotopic, self.pointwise_cov, lower, upper)
        part2 = self._eibv_part_2(S_y_inds, L_y, lower, upper, noise_std)

        return part1 - part2
Esempio n. 4
0
    def compute_exursion_prob(self, points, lower, upper=None):
        """ Compute the excursion probability at a set of points given the
        currently available data.

        Note this is a helper function that take an index in the grid as input.

        Parameters
        ----------
        points: (N, d) Tensor
            List of points (coordinates) at which to compute the excursion probability.
        lower: (p) Tensor
            List of lower threshold for each response. The excursion set is the set
            where responses are above the specified threshold.
            Note that np.inf is supported.
        upper: (p) Tensor
            List of upper threshold for each response. The excursion set is the set
            where responses are above the specified threshold.
            If not provided, defaults to + infinity.

        Returns
        -------
        excursion_proba: (N) Tensor
            Excursion probability at each point.

        """
        # First step: compute kriging predictors at locations of interest
        # based on the data acquired up to now.
        # Compute the prediction for all responses (isotopic).
        mu_cond_list, mu_cond_iso, K_cond_list, K_cond_iso = self.grf.krig_isotopic(
            points,
            self.S_y_tot,
            self.L_y_tot,
            self.y_tot,
            noise_std=self.noise_std,
            compute_post_cov=True)

        # Extract the variances only.
        # TODO: Since we always compute the full covariance matrix at the
        # moment, this extraction should be delegated somewhere, or better,
        # have a (conditional) distribution object that has methods to extract
        # the diagonal.
        K_cond_diag = torch.diagonal(K_cond_iso, dim1=0, dim2=1).T

        excursion_proba = coverage_fct_fixed_location(mu_cond_iso,
                                                      K_cond_diag,
                                                      lower,
                                                      upper=None)
        return excursion_proba
mu_cond_grid, mu_cond_list, mu_cond_iso, K_cond_list, K_cond_iso = myGRF.krig_grid(
    my_grid, S_y, L_y, y, noise_std=0.05, compute_post_cov=True)

# Plot.
from meslas.plotting import plot_2d_slice, plot_krig_slice
plot_krig_slice(mu_cond_grid, S_y, L_y)

# Sample from the posterior.
from torch.distributions.multivariate_normal import MultivariateNormal
distr = MultivariateNormal(loc=mu_cond_list, covariance_matrix=K_cond_list)
sample = distr.sample()

# Reshape to a regular grid.
grid_sample = my_grid.isotopic_vector_to_grid(sample, n_out)
plot_krig_slice(grid_sample, S_y, L_y)

# Now compute and plot coverage function.
# Need only cross-covariances at fixed locations.
K_cond_diag = torch.diagonal(K_cond_iso, dim1=0, dim2=1).T
lower = torch.tensor([-1.0, -1.0]).double()

coverage = coverage_fct_fixed_location(mu_cond_iso,
                                       K_cond_diag,
                                       lower,
                                       upper=None)
plot_2d_slice(coverage.reshape(my_grid.shape),
              title="Excursion Probability",
              cmin=0,
              cmax=1.0)