Exemple #1
0
    def fisher_information_matrix(self, params):
        """
        Computes the Fisher Information Matrix.

        Returns
        -------
        fisher : ndarray
            Fisher Information Matrix
        """
        n_params = len(np.atleast_1d(params))
        fisher = np.zeros(shape=(n_params, n_params))

        if not hasattr(self.mean, 'gradient'):
            _grad = lambda mean, argnum, params: jacobian(mean, argnum=argnum)(*params)
        else:
            _grad = lambda mean, argnum, params: mean.gradient(*params)[argnum]

        grad_mean = [_grad(self.mean, i, params) for i in range(n_params)]

        for i in range(n_params):
            for j in range(i, n_params):
                fisher[i, j] = np.nansum(grad_mean[i] * grad_mean[j])
                fisher[j, i] = fisher[i, j]

        return fisher / self.var
Exemple #2
0
    def update_sigma2(y, weights, mu, Sigma):
        """
        y[N, G, T] data
        weights = [N, G, T]
        mu = T
        """
        diffs = y - mu
        total_weight = np.sum(
            weights[:, :, np.newaxis] * np.logical_not(np.isnan(y)))

        sigma2 = np.nansum(weights[:, :, np.newaxis] * (diffs ** 2))
        total_weight = np.maximum(1e-10, total_weight)
        sigma2 = (sigma2 / total_weight) + np.trace(Sigma)
        return sigma2, total_weight
Exemple #3
0
 def gradient(self, params):
     # use the gradient if the model provides it.
     # if not, compute it using autograd.
     if not hasattr(self.mean, 'gradient'):
         _grad = lambda mean, argnum, params: jacobian(mean, argnum)(*params)
     else:
         _grad = lambda mean, argnum, params: mean.gradient(*params)[argnum]
     n_params = len(np.atleast_1d(params))
     grad_likelihood = np.array([])
     for i in range(n_params):
         grad = _grad(self.mean, i, params)
         grad_likelihood = np.append(grad_likelihood,
                                     np.nansum(grad * (1 - self.data / self.mean(*params))))
     return grad_likelihood
Exemple #4
0
    def update_mean(y, sigma2, weights, kernel_params):
        """
        y[N, G, T] data
        mu [T] means
        sigma2 = global noise variance
        priors = [T, T]
        weights = [N, G]
        """
        prior = cov_func(kernel_params, inputs, inputs) + np.eye(T) * 1e-6
        weights = weights[:, :, np.newaxis] * np.logical_not(np.isnan(y))
        B = np.diag(weights.reshape(-1, T).sum(axis=0) / sigma2)

        yB = np.nansum((y * weights / sigma2).reshape(-1, T), axis=0)
        Sigma = np.linalg.inv(B + np.linalg.inv(prior))
        mu = np.dot(Sigma, yB)
        # import pdb; pdb.set_trace()
        return mu, Sigma
Exemple #5
0
    def fisher_information_matrix(self, params):
        n_params = len(np.atleast_1d(params))
        fisher = np.zeros(shape=(n_params, n_params))

        if not hasattr(self.mean, 'gradient'):
            _grad = lambda mean, argnum, params: jacobian(mean, argnum=argnum)(*params)
        else:
            _grad = lambda mean, argnum, params: mean.gradient(*params)[argnum]

        grad_mean = [_grad(self.mean, i, params) for i in range(n_params)]

        for i in range(n_params):
            for j in range(i, n_params):
                fisher[i, j] = np.nansum(grad_mean[i] * self._cov_inv * grad_mean[j])
                fisher[j, i] = fisher[i, j]

        return fisher
Exemple #6
0
def compress_observations(y, weights):
    """
    y [N, G, T]
    weights [N, G]

    takes convex combination of observations by weights
    returns mean and relative precision for each dimension of T
    """
    t = y.shape[-1]
    compressed_weights = (weights[:, :, np.newaxis] * np.logical_not(
        np.isnan(y))).sum(axis=0).sum(axis=0)

    compressed_y = np.nansum((y * weights[:, :, np.newaxis] / compressed_weights).reshape(
        -1, t), axis=0)

    if np.any(np.isnan(compressed_y)):
        pdb.set_trace()
    return compressed_y, compressed_weights
Exemple #7
0
    def update_lambda(y, phi, psi, sigma2s, mus, Sigmas):
        """
        y = [N, G, T]
        phi = [N, K]
        pi = [K]
        sigma2 = noise variance
        mus = [K, L, T]
        Sigmas = [K, L, T, T]
        """
        lam = np.zeros((G, L))
        for l in range(L):
            for k in range(K):
                ll = np.nansum(norm.logpdf(
                    y, mus[k, l], np.sqrt(sigma2s[k, l])), axis=-1)  # N, G
                ll = (ll - 0.5 * np.trace(Sigmas[k, l] / (sigma2s[k, l])))
                ll = ll * phi[:, k][:, np.newaxis]
                lam[:, l] = lam[:, l] + ll.sum(axis=0)

        lam = lam + np.log(psi)
        lam = np.exp(lam - logsumexp(lam)[:, np.newaxis])
        lam = surgery(lam)
        return lam
Exemple #8
0
    def update_phi(y, lam, pi, sigma2s, mus, Sigmas):
        """
        y = [N, G, T]
        lam = [G, L]
        pi = [K]
        sigma2 = noise variance
        mus = [K, L, T]
        Sigmas = [K, L, T, T]
        """
        phi = np.zeros((N, K))
        for k in range(K):
            for l in range(L):
                ll = np.nansum(norm.logpdf(
                    y, mus[k, l], np.sqrt(sigma2s[k, l])), axis=-1)
                ll = (ll - (0.5 * np.trace(Sigmas[k, l]) / (sigma2s[k, l])))
                ll = ll * lam[:, l]
                phi[:, k] = phi[:, k] + ll.sum(axis=1)

        phi = phi + np.log(pi)
        phi = np.exp(phi - logsumexp(phi)[:, np.newaxis])
        phi = surgery(phi)
        return phi
Exemple #9
0
    def evaluate(self, params):
        """
        Computes the negative of the log likelihood function.

        Parameters
        ----------
        params : ndarray
            parameter vector of the mean model and covariance matrix
        """
        if callable(self.cov):
            theta = params[:self.dim] # mean model parameters
            alpha = params[self.dim:] # kernel parameters (hyperparameters)
            mean = self.mean(*theta)
            cov = self.cov(*alpha)
            self._cov_inv = np.linalg.inv(cov)
        else:
            mean = self.mean(*params)
            cov = self.cov

        residual = self.data - mean

        return (np.linalg.slogdet(cov)[1]
                + np.nansum(residual * (self._cov_inv * residual.T)))
Exemple #10
0
def multinomial_entropy(p):
    return -1 * np.nansum(p * np.log(p))
 def sum_to_1(r):
     R = r.reshape((r.shape[0], -1))
     R = R / np.nansum(R[:, ~np.isnan(R.sum(0))], axis=1)[:, np.newaxis]
     return R
Exemple #12
0
 def evaluate(self, params):
     return np.nansum((self.data - self.mean(*params))**2 / (2 * self.var))
Exemple #13
0
 def evaluate(self, params):
     return np.nansum(
         self.mean(*params) - self.data * np.log(self.mean(*params)))
Exemple #14
0
 def evaluate(self, params):
     r = self.data - self.mean(*params)
     return 0.5 * np.nansum(r * r / self.var)
Exemple #15
0
 def gradient(self, theta):
     mean_theta = self.mean(*theta)
     grad = self.mean.gradient(*theta)
     return - np.nansum(self.data * grad / mean_theta
                        - (1 - self.data) * grad / (1 - mean_theta),
                        axis=-1)
Exemple #16
0
 def _evaluate_w_regularization(self, *params):
     return np.nansum(
         np.absolute(self.data - self.model(*params[:-1])) +
         params[-1] * self.regularization(*params[:-1]))
Exemple #17
0
 def sum_to_1(r):
     R = r.reshape((r.shape[0], -1))
     R = R / np.nansum(R, axis=1)[:, np.newaxis]  # changed 8/28
     return R
Exemple #18
0
 def evaluate(self, params):
     return np.nansum(np.abs(self.data - self.mean(*params)) / np.sqrt(.5 * self.var))
def svm_cost(x,data):
    # computes cost given parameters (kappa, amplitude[, optionally offset])
    return np.nansum((data-svm_fn(*x))**2)
                                    columns=full_contra.columns)
        completed_y2 = completed_y2.astype(float).round(0)

        #================================================================
        # Inverse transform both datasets
        #================================================================

        for j, colname in enumerate(y.columns):
            if colname in le_dict.keys():
                completed_y[colname] = le_dict[colname].inverse_transform(
                    completed_y[colname].astype(int))
                completed_y2[colname] = le_dict[colname].inverse_transform(
                    completed_y2[colname].astype(int))

        assert np.nansum(
            np.abs(completed_y[~nan_mask].astype(float) -
                   full_contra[~nan_mask].astype(float))) == 0
        assert np.nansum(
            np.abs(completed_y2[~nan_mask].astype(float) -
                   full_contra[~nan_mask].astype(float))) == 0

        # Mimick the format
        completed_y = completed_y.astype(int)
        completed_y2 = completed_y2.astype(int)

        #completed_y2.to_csv(res + 'Run' + str(run_idx) + '/imp' + dataset_name + '.csv', index = False)

        #================================================================
        # Diagnostic
        #================================================================
Exemple #21
0
 def evaluate(self, theta):
     p = self.mean(*theta)
     N0 = np.exp(-self.data ** 2 / (2 * self.var))
     N1 = np.exp(-(self.data - 1) ** 2 / (2 * self.var))
     return -np.nansum(np.log((1 - p) * N0 + p * N1))
Exemple #22
0
 def evaluate(self, theta):
     mean_theta = self.mean(*theta)
     return - np.nansum(self.data * np.log(mean_theta)
                        + (1. - self.data) * np.log(1. - mean_theta))
Exemple #23
0
 def n_counts(self):
     """
     Returns the sum of the number of counts over all bin.
     """
     return np.nansum(self.data)
Exemple #24
0
 def sum_to_1(r):
     R = r.reshape((r.shape[0],-1))
     #R = R/np.nansum(R[:,~np.isnan(R.sum(0))],axis=1)[:,np.newaxis]
     R = R/np.nansum(R,axis=1)[:,np.newaxis] # changed 8/28
     return R
Exemple #25
0
 def _evaluate_wo_regularization(self, *params):
     return np.nansum(np.absolute(self.data - self.model(*params)))