コード例 #1
0
ファイル: vars.py プロジェクト: gergely-flamich/varz
    def positive(self, init=None, shape=(), dtype=None, name=None):
        def generate_init(shape, dtype):
            return B.rand(dtype, *shape)

        return self._get_var(transform=lambda x: B.exp(x),
                             inverse_transform=lambda x: B.log(x),
                             init=init,
                             generate_init=generate_init,
                             shape=shape,
                             dtype=dtype,
                             name=name)
コード例 #2
0
ファイル: util.py プロジェクト: wesselb/oilmm
    def normalise_logdet(self, y):
        """Compute the log-determinant of the Jacobian of the normalisation.

        Accepts multiple arguments.

        Args:
            y (matrix): Data that was transformed.

        Returns:
            scalar: Log-determinant.
        """
        return -B.shape(y)[0] * B.sum(B.log(self.scale))
コード例 #3
0
def normal1d_logpdf(x, var, mean=0):
    """Broadcast the one-dimensional normal logpdf.

    Args:
        x (tensor): Point to evaluate at.
        var (tensor): Variances.
        mean (tensor): Means.

    Returns:
        tensor: Logpdf.
    """
    return -(B.log_2_pi + B.log(var) + (x - mean)**2 / var) / 2
コード例 #4
0
    def _project_pattern(self, x, y, pattern):
        # Check whether all data is available.
        no_missing = all(pattern)

        if no_missing:
            # All data is available. Nothing to be done.
            u = self.u
        else:
            # Data is missing. Pick the available entries.
            y = B.take(y, pattern, axis=1)
            # Ensure that `u` remains a structured matrix.
            u = Dense(B.take(self.u, pattern))

        # Get number of data points and outputs in this part of the data.
        n = B.shape(x)[0]
        p = sum(pattern)

        # Perform projection.
        proj_y_partial = B.matmul(y, B.pinv(u), tr_b=True)
        proj_y = B.matmul(proj_y_partial, B.inv(self.s_sqrt), tr_b=True)

        # Compute projected noise.
        u_square = B.matmul(u, u, tr_a=True)
        proj_noise = (
            self.noise_obs / B.diag(self.s_sqrt) ** 2 * B.diag(B.pd_inv(u_square))
        )

        # Convert projected noise to weights.
        noises = self.model.noises
        weights = noises / (noises + proj_noise)
        proj_w = B.ones(B.dtype(weights), n, self.m) * weights[None, :]

        # Compute Frobenius norm.
        frob = B.sum(y ** 2)
        frob = frob - B.sum(proj_y_partial * B.matmul(proj_y_partial, u_square))

        # Compute regularising term.
        reg = 0.5 * (
            n * (p - self.m) * B.log(2 * B.pi * self.noise_obs)
            + frob / self.noise_obs
            + n * B.logdet(B.matmul(u, u, tr_a=True))
            + n * 2 * B.logdet(self.s_sqrt)
        )

        return x, proj_y, proj_w, reg
コード例 #5
0
    def positive(self, init=None, shape=None, dtype=None, name=None):
        # If nothing is specific, generate a scalar.
        if init is None and shape is None:
            shape = ()

        def generate_init(shape, dtype):
            return B.rand(dtype, *shape)

        return self._get_var(
            transform=lambda x: B.exp(x),
            inverse_transform=lambda x: B.log(x),
            init=init,
            generate_init=generate_init,
            shape=shape,
            shape_latent=shape,
            dtype=dtype,
            name=name,
        )
コード例 #6
0
ファイル: priors.py プロジェクト: wesselb/gpcm
 def apply_to_psd(f):
     raw = 10**(psds / 10)
     return 10 * B.log(f(raw)) / B.log(10)
コード例 #7
0
    pred_k = wd.load("pred_k.pickle")

# Unpack prediction for the PDF and cut off a frequency 0.5.
freqs, mean, lower, upper, samps = pred_psd
upper_freq = 0.5
samps = samps[freqs <= upper_freq, :]
mean = mean[freqs <= upper_freq]
lower = lower[freqs <= upper_freq]
upper = upper[freqs <= upper_freq]
freqs = freqs[freqs <= upper_freq]

# Compute the spectrum of the excitation process.
instance = model()
spec_x = (2 * instance.lam) / (instance.lam**2 + (2 * B.pi * freqs)**2)
spec_x *= instance.alpha_t**2 / (2 * instance.alpha)
spec_x = 10 * B.log(spec_x) / B.log(10)

plt.figure(figsize=(12, 3.5))

# Plot the prediction for the PSD.
plt.subplot(1, 2, 1)
plt.title("PSD")
plt.plot(
    freqs,
    mean,
    label="$|\\mathcal{F}h(f)|^2\\mathcal{F}k_{x}(f)$",
    style="pred",
    zorder=1,
)
plt.fill_between(freqs, lower, upper, style="pred", zorder=1)
plt.plot(freqs, lower, style="pred", lw=1, zorder=1)
コード例 #8
0
        posterior = model.condition(t_train, y_train)
        pred_f = (t_pred, ) + normaliser.untransform(posterior.predict(t_pred))
        pred_f_test = (t_test, ) + normaliser.untransform(
            posterior.predict(t_test))
        pred_k = posterior.predict_kernel()
        # Carefully untransform kernel prediction.
        pred_k = (
            pred_k.x,
            pred_k.mean * normaliser._scale,
            pred_k.var * normaliser._scale**2,
        )
        pred_psd = posterior.predict_psd()
        # Carefully untransform PSD prediction.
        pred_psd = (
            pred_psd.x,
            pred_psd.mean + 20 * B.log(normaliser._scale),
            pred_psd.err_95_lower + 20 * B.log(normaliser._scale),
            pred_psd.err_95_upper + 20 * B.log(normaliser._scale),
        )
        # Save predictions.
        wd.save(pred_f, model.name.lower(), "pred_f.pickle")
        wd.save(pred_f_test, model.name.lower(), "pred_f_test.pickle")
        wd.save(pred_k, model.name.lower(), "pred_k.pickle")
        wd.save(pred_psd, model.name.lower(), "pred_psd.pickle")

# Load predictions.
preds_f = {}
preds_f_test = {}
preds_k = {}
preds_psd = {}
for model in models:
コード例 #9
0
ファイル: logdet.py プロジェクト: wesselb/matrix
def logdet(a: Union[Diagonal, LowerTriangular, UpperTriangular]):
    return B.sum(B.log(B.diag(a)))
コード例 #10
0
ファイル: vars.py プロジェクト: gergely-flamich/varz
 def inverse_transform(x):
     return B.log(upper - x) - B.log(x - lower)
コード例 #11
0
 def inverse_transform(x):
     chol = B.cholesky(B.reg(x))
     return B.concat(B.log(B.diag(chol)), B.tril_to_vec(chol,
                                                        offset=-1))