Exemplo n.º 1
0
    def get_general_node_params(x, lds):
        T, p = x.shape
        C, sigma_obs = lds[-2:]

        J, Jzx, Jxx, logZ = pair_mean_to_natural(C, sigma_obs)
        h = np.einsum('tzx,tx->tz', Jzx, x)
        logZ += np.einsum('ti,tij,tj->t', x, Jxx, x) - p/2.*np.log(2*np.pi)

        return J, h, logZ
Exemplo n.º 2
0
    def log_marginal_likelihood(self, theta):
        x_train = self.X_train_
        y_train = self.y_train_

        if np.ndim == 1:
            y_train = y_train[:, np.newaxis]

        # Gather hyper parameters
        signal_variance, noise_likelihood, length_scale = \
            self._get_kernel_params(theta)
        signal_variance = np.exp(signal_variance)
        noise_likelihood = np.exp(noise_likelihood)
        length_scale = np.exp(length_scale)

        n_samples = x_train.shape[0]

        # train kernel
        K = self.rbf_covariance(x_train,
                                length_scale=length_scale,
                                signal_variance=signal_variance)
        K += noise_likelihood * np.eye(n_samples)
        L = np.linalg.cholesky(K + self.jitter * np.eye(n_samples))
        weights = np.linalg.solve(L.T, np.linalg.solve(L, y_train))
        log_likelihood_dims = -0.5 * np.einsum("ik,ik->k", y_train, weights)
        log_likelihood_dims -= np.log(np.diag(L)).sum()
        log_likelihood_dims -= (K.shape[0] / 2) * np.log(2 * np.pi)

        log_likelihood = log_likelihood_dims.sum(-1)

        return -log_likelihood
def generalized_outer_product(mat):
    if len(mat.shape) == 1:
        return np.outer(mat, mat)
    elif len(mat.shape) == 2:
        return np.einsum('ij,ik->ijk', mat, mat)
    else:
        raise ArithmeticError
    def predict(self,
                user_id_N,
                item_id_N,
                mu=None,
                b_per_user=None,
                c_per_item=None,
                U=None,
                V=None):
        ''' Predict ratings at specific user_id, item_id pairs

        Args
        ----
        user_id_N : 1D array, size n_examples
            Specific user_id values to use to make predictions
        item_id_N : 1D array, size n_examples
            Specific item_id values to use to make predictions
            Each entry is paired with the corresponding entry of user_id_N

        Returns
        -------
        yhat_N : 1D array, size n_examples
            Scalar predicted ratings, one per provided example.
            Entry n is for the n-th pair of user_id, item_id values provided.
        '''
        # TODO: Update with actual prediction logic
        N = user_id_N.size
        yhat_N = ag_np.ones(N)*mu+b_per_user[user_id_N]+c_per_item[item_id_N]\
                +ag_np.einsum('ij, ij->i', U[user_id_N], V[item_id_N])

        return yhat_N
Exemplo n.º 5
0
    def _ll(self, m, p, a, xn, xln, **kwargs):
        """Computation of log likelihood

        Dimensions
        ----------
        m :  n_unique x n_features
        p :  n_unique x n_features x n_features
        a :  n_unique x n_lags (shared_alpha=F)
             OR     1 x n_lags (shared_alpha=T)
        xn:  N x n_features
        xln: N x n_features x n_lags
        """

        samples = xn.shape[0]
        xn = xn.reshape(samples, 1, self.n_features)
        m = m.reshape(1, self.n_unique, self.n_features)
        det = np.linalg.det(np.linalg.inv(p))
        det = det.reshape(1, self.n_unique)

        lagged = np.dot(xln, a.T)  # NFU
        lagged = np.swapaxes(lagged, 1, 2)  # NUF
        xm = xn-(lagged + m)
        tem = np.einsum('NUF,UFX,NUX->NU', xm, p, xm)

        res = (-self.n_features/2.0)*np.log(2*np.pi) - 0.5*tem - 0.5*np.log(det)

        return res
Exemplo n.º 6
0
def magcal_residual2(a, mb, gb, gs):
    """ residual from all observations given magnetometer eccentricity, bias,
    gyro bias, and gyro scale"""

    A = np.array([
        [a[0], a[1], a[2]],
        [0,    a[3], a[4]],
        [0,    0,    a[5]]
    ])

    mag = np.dot(MAG - mb, A)
    dt = TS[1:] - TS[:-1]
    w = gs * (GYRO[1:] - gb)
    C = so3.tensorexp(w.T * dt)
    rot_mag = np.einsum('ijl,lj->li', C, mag[:-1])
    return np.mean(np.abs(1 - np.einsum('ji,ji->j', mag[1:], rot_mag)))
Exemplo n.º 7
0
        def _ll(self, m, p, a, xn, xln, **kwargs):
            """Computation of log likelihood

            Dimensions
            ----------
            m :  n_unique x n_features
            p :  n_unique x n_features x n_features
            a :  n_unique x n_lags (shared_alpha=F)
                 OR     1 x n_lags (shared_alpha=T)
            xn:  N x n_features
            xln: N x n_features x n_lags
            """

            samples = xn.shape[0]
            xn = xn.reshape(samples, 1, self.n_features)
            m = m.reshape(1, self.n_unique, self.n_features)
            det = np.linalg.det(np.linalg.inv(p))
            det = det.reshape(1, self.n_unique)

            lagged = np.dot(xln, a.T)  # NFU
            lagged = np.swapaxes(lagged, 1, 2)  # NUF
            xm = xn-(lagged + m)
            tem = np.einsum('NUF,UFX,NUX->NU', xm, p, xm)

            # TODO division in gamma function
            res = np.log(gamma((self.degree_freedom + self.n_features)/2)) - \
                  np.log(gamma(self.degree_freedom/2)) - (self.n_features/2.0) * \
                  np.log(self.degree_freedom) - \
                  (self.n_features/2.0) * np.log(np.pi) - 0.5 * np.log(det) - \
                  ((self.degree_freedom + self.n_features) / 2.0) * \
                  np.log(1 + (1/self.degree_freedom) * tem)

            return res
Exemplo n.º 8
0
 def predictions(weights, inputs):
     inputs = np.expand_dims(inputs, 0)
     for W, b in unpack_layers(weights):
         outputs = np.einsum('mnd,mdo->mno', inputs, W) + b
         inputs = nonlinearity(outputs)
     #return outputs - logsumexp(outputs, axis=1, keepdims=True)
     return outputs
Exemplo n.º 9
0
 def construct_sphere(self,
                      θ: "Model parameters" = None
                      ) -> "Plots an unit sphere":
     if θ is None:
         θ = self.θ_bf
     c, c0, ωv, A = self.compute_ω_sphere(θ, Npoints=10)
     w, v = self.metric_eigenproblem(self.g(θ))
     f, ax = plt.subplots(1, 3, figsize=(23, 7))
     ax[0].plot(c[0], c[1], 'k', ls='-', zorder=0)
     ax[1].plot(c0[0], c0[1], 'k', zorder=0)
     im0 = ax[0].scatter(c[0], c[1], c=ωv / (2 * np.pi), zorder=1, s=10**2)
     ax[0].set_title('Standardni sustav')
     ax[1].set_title('Lokalni inercijalni sustav')
     im1 = ax[1].scatter(c0[0],
                         c0[1],
                         c=ωv / (2 * np.pi),
                         zorder=1,
                         s=10**2)
     e, ei = self.find_tetrad(self.θ_bf)
     for i in range(2):
         vn = v[:, i]
         vn = vn / np.sqrt(np.einsum('i,ij,j', vn, self.g(θ), vn))
         vninv = np.einsum('im,m', e, vn)
         ax[0].plot([0, vn[0]], [0, vn[1]],
                    color='C%d' % i,
                    lw=3,
                    label='$\lambda=%e$\n' % w[i] +
                    '$\omega=%e$' % self.external_ωv(θ, vn))
         ax[1].plot(
             [0, vninv[0]],
             [0, vninv[1]],
             'C%d' % i,
             lw=3,
         )
     ax[0].legend(loc=1)
     f.colorbar(im0, ax=ax[0]).set_label('$\omega/v/(2\pi)$')
     f.colorbar(im1, ax=ax[1]).set_label('$\omega/v/(2\pi)$')
     ax[0].set_xlabel(r'$\dot\theta^{\mu=0}$')
     ax[0].set_xlabel(r'$\dot\theta^{\mu=1}$')
     ax[1].set_xlabel(r'$\dot\theta^{i=0}$')
     ax[1].set_xlabel(r'$\dot\theta^{i=1}$')
     ax[2].plot(A[0], ωv / (2 * np.pi))
     ax[2].set_xlabel(r'$\varphi^0$')
     ax[2].set_ylabel('$\omega/v/(2\pi)$')
     f.tight_layout()
     f.savefig(NAME + '_omegas.pdf')
     return f
Exemplo n.º 10
0
    def compute_approximate_gradients(self, nvalid=0):

        if self.layer_plastic[0]:

            self.sleep_data["fsuff0"] = self.model.dists[0].suff(
                self.sleep_data["x0"])
            self.reg.train_weights(self.sleep_data,
                                   "x0->fsuff0",
                                   nvalid=nvalid)

        for i in range(1, self.model.depth):

            if not self.layer_plastic[i]:
                continue
            n = self.nsleep
            self.sleep_data["dnorm%d" % i] = self.model.dists[i].dnorm(
                self.sleep_data["x%d" % (i - 1)])
            self.reg.train_weights(self.sleep_data,
                                   "x%d->dnorm%d" % (i - 1, i),
                                   nvalid=nvalid)

            if i == (self.model.depth - 1):

                data = self.sleep_data
                lam = self.reg.lam

                z_feat = self.reg.transform_data(
                    data, ["x%d" % (i - 1)])["x%d" % (i - 1)]
                x_suff = self.model.dists[-1].suff(data["x%d" % i])

                x = np.einsum("ij,ik->ijk", z_feat,
                              x_suff).reshape(self.nsleep, -1)
                y = z_feat
                f = self.model.dists[-1].dlogp(data["x%d" % (i - 1)],
                                               data["x%d" % (i)])
                Mx = np.eye(self.nsleep) - x.dot(
                    np.linalg.solve(
                        x.T.dot(x) + np.eye(x.shape[1]) * lam, x.T))
                My = np.eye(self.nsleep) - y.dot(
                    np.linalg.solve(
                        y.T.dot(y) + np.eye(y.shape[1]) * lam, y.T))
                A = np.linalg.solve(
                    (x.T.dot(My).dot(x)) + np.eye(x.shape[1]) * lam,
                    x.T.dot(My).dot(f))
                B = -np.linalg.solve(
                    (y.T.dot(Mx).dot(y)) + np.eye(y.shape[1]) * lam,
                    y.T.dot(Mx).dot(f))

                self.reg.Ws["A"] = A
                self.reg.Ws["B"] = B

            else:
                self.sleep_data["dnatsuff%d" %
                                i] = self.model.dists[i].dnatsuff(
                                    self.sleep_data["x%d" % (i - 1)],
                                    self.sleep_data["x%d" % (i)])
                self.reg.train_weights(self.sleep_data,
                                       "x%d->dnatsuff%d" % (i, i),
                                       nvalid=nvalid)
Exemplo n.º 11
0
 def smooth(self, gamma, x, u):
     mean = []
     for _x, _u, _gamma in zip(x, u, gamma):
         _mu = np.zeros((len(_x) - 1, self.nb_states, self.dm_obs))
         for k in range(self.nb_states):
             _mu[:, k, :] = self.mean(k, _x[:-1, :], _u[:-1, :self.dm_act])
         mean.append(np.einsum('nk,nkl->nl', _gamma[1:, ...], _mu))
     return mean
Exemplo n.º 12
0
Arquivo: surf.py Projeto: asross/surf
 def weingarten_map(self, X, roll=True):
     g = np.rollaxis(self.metric_tensor(X), 2)
     h = np.rollaxis(self.shape_tensor(X), 2)
     res = np.einsum("aio, abi -> abo", np.linalg.inv(g), h)
     if roll:
         return np.rollaxis(res, 0, 3)
     else:
         return res
Exemplo n.º 13
0
 def devpays(mix):
     """Compute the dev pays"""
     profs = sample_profs(mix)
     payoffs = model_pays(profs)
     numer = rep(anp.prod(mix**profs, 2))
     denom = const_weights(profs, mix)
     weights = numer / denom / learn.num_samples
     return anp.einsum('ij,ij->j', weights, payoffs)
Exemplo n.º 14
0
 def update_hidden(self, weights, input, hidden, cells):
     concated_input = agnp.concatenate((input, hidden), axis=2)
     W_change, b_change = self.unpack_change_params(weights)
     change = agnp.tanh(
         agnp.einsum('pdh,pnd->pnh', W_change, concated_input) + b_change)
     W_forget, b_forget = self.unpack_forget_params(weights)
     forget = self.hidden_nonlinearity(
         agnp.einsum('pdh,pnd->pnh', W_forget, concated_input) + b_forget)
     W_ingate, b_ingate = self.unpack_ingate_params(weights)
     ingate = self.hidden_nonlinearity(
         agnp.einsum('pdh,pnd->pnh', W_ingate, concated_input) + b_ingate)
     W_outgate, b_outgate = self.unpack_outgate_params(weights)
     outgate = self.hidden_nonlinearity(
         agnp.einsum('pdh,pnd->pnh', W_outgate, concated_input) + b_outgate)
     cells = cells * forget + ingate * change
     hidden = outgate * agnp.tanh(cells)
     return hidden, cells
Exemplo n.º 15
0
def log_emission_probs(tau, mu_eta, mu_c, Sig_c, xi):
    """
    Calculate log psi, where psi \propto p(obs|z)
    """
    xi1 = xi[:, 1, :]
    T, U = mu_eta.shape
    _, K = mu_c.shape
    lpsi = np.einsum('u,tu,uk->tk', tau, mu_eta, mu_c)
    lpsi += 0.5 * np.einsum('u,uk,uj,tj->tk', tau, mu_c, mu_c, xi1)
    lpsi += 0.5 * np.einsum('u,ukj,tj->tk', tau, Sig_c, xi1)
    lpsi += 0.5 * np.einsum('u,uk,uk,tk->tk', tau, mu_c, mu_c, 1 - xi1)
    lpsi += 0.5 * np.einsum('u,ukk,tk->tk', tau, Sig_c, 1 - xi1)

    log_psi = np.zeros((T, 2, K))
    log_psi[:, 1, :] = lpsi

    return log_psi
Exemplo n.º 16
0
def _increment_negative_power_in_einsum_r(formula, x, exponent, args1, args2,
                                          args3):
    in_formulas, out_formula = split_einsum_formula(formula)
    new_formula = _reconstitute_einsum_formula(
        in_formulas[:len(args1) + 1 + len(args2)] +
        in_formulas[len(args1) + 2 + len(args2):], out_formula)
    return np.einsum(new_formula,
                     *(args1 + (x**(exponent + 1), ) + args2 + args3))
Exemplo n.º 17
0
def _transpose_inside_einsum(formula, args1, x, args2):
    in_formulas, out_formula = split_einsum_formula(formula)
    i = len(args1)
    new_formula = _reconstitute_einsum_formula(
        in_formulas[:i] + [in_formulas[i][::-1]] + in_formulas[i + 1:],
        out_formula)
    new_args = args1 + (x, ) + args2
    return np.einsum(new_formula, *new_args)
Exemplo n.º 18
0
 def hessian_local_log_likelihood(self, x):
     """
     d/dx  (y - lmbda)^T C = d/dx -exp(Cx + d)^T C
         = -C^T exp(Cx + d)^T C
     """
     # Observation likelihoods
     lmbda = np.exp(np.dot(x, self.C.T) + np.dot(self.inputs, self.D.T))
     return np.einsum('tn, ni, nj ->tij', -lmbda, self.C, self.C)
Exemplo n.º 19
0
def dot(a, b):
    if b.ndim == 1:
        return _np.dot(a, b)

    if a.ndim == 1:
        return _np.dot(a, b.T)

    return _np.einsum("...i,...i->...", a, b)
Exemplo n.º 20
0
def mog_samples(N, means, chols, pis):
    K, D = means.shape
    indices = discrete(pis, (N,))
    n_means = means[indices,:]
    n_chols = chols[indices,:,:]
    white   = np.random.randn(N,D)
    color   = np.einsum('ikj,ij->ik', n_chols, white)
    return color + n_means
Exemplo n.º 21
0
def mixture_of_ts_samples(N, locs, scales, pis, df):
    K, D = locs.shape
    indices = mog.discrete(pis, (N, ))
    n_means = locs[indices, :]
    n_chols = scales[indices, :, :]
    white = tdist.rvs(df=df, size=(N, D))
    color = np.einsum('ikj,ij->ik', n_chols, white)
    return color + n_means
Exemplo n.º 22
0
 def model(X, prior_precision):
   beta = ph.norm.rvs(loc=0.0,
                      scale=1.0 / np.sqrt(prior_precision),
                      size=X.shape[1],
                      name="beta")
   loc = np.einsum('ij,j->i', X, beta)
   y = ph.norm.rvs(loc=loc, scale=1.0, name="y")
   return y
Exemplo n.º 23
0
 def predictions(weights, inputs):
     """weights is shape (num_weight_samples x num_weights)
        inputs  is shape (num_datapoints x D)"""
     inputs = np.expand_dims(inputs, 0)
     for W, b in unpack_layers(weights):
         outputs = np.einsum('mnd,mdo->mno', inputs, W) + b
         inputs = nonlinearity(outputs)
     return outputs
Exemplo n.º 24
0
def _expand_integer_power_in_einsum(formula, x, exponent, args1, args2):
    in_formulas, out_formula = split_einsum_formula(formula)
    return np.einsum(
        _reconstitute_einsum_formula(
            in_formulas[:len(args1)] + [
                in_formulas[len(args1)],
            ] * exponent + in_formulas[len(args1) + 1:], out_formula),
        *(args1 + (x, ) * exponent + args2))
Exemplo n.º 25
0
def bnn_predict(weights, inputs, layer_sizes, act):
    if len(inputs.shape)<3: inputs = np.expand_dims(inputs, 0)  # [1,N,D]
    weights = reshape_weights(weights, layer_sizes)
    for W, b in weights:
        #print(W.shape, inputs.shape)
        outputs = np.einsum('mnd,mdo->mno', inputs, W) + b
        inputs = act_dict[act](outputs)
    return outputs
Exemplo n.º 26
0
 def update(self, x, u, r, x_, u_):
     rpe = r - self.uQx(u, x)
     z = np.outer(u, x)
     dQ = rpe * np.einsum('a,s->as', u, x)
     if r >= 0:
         self.Q += self.learning_rate_pos * dQ
     else:
         self.Q += self.learning_rate_neg * dQ
Exemplo n.º 27
0
def _add_powers_within_einsum(formula, x, args1, args2, args3, exponent1,
                              exponent2):
    in_formulas, out_formula = split_einsum_formula(formula)
    new_formula = _reconstitute_einsum_formula(
        _remove_list_elements(in_formulas, [len(args1) + 1 + len(args2)]),
        out_formula)
    return np.einsum(new_formula,
                     *(args1 + (x**(exponent1 + exponent2), ) + args2 + args3))
Exemplo n.º 28
0
 def predictions(weights, inputs):
     """weights is shape (num_weight_samples x num_weights)
        inputs  is shape (num_datapoints x D)"""
     inputs = np.expand_dims(inputs, 0)
     for W, b in unpack_layers(weights):
         outputs = np.einsum('mnd,mdo->mno', inputs, W) + b
         inputs = nonlinearity(outputs)
     return outputs
Exemplo n.º 29
0
    def sufficientStats( cls, x, constParams=None ):
        # Compute T( x )
        x, y = x
        assert isinstance( x, np.ndarray )
        assert isinstance( y, np.ndarray )

        if( x.ndim == 1 ):
            # Only 1 point was passed in
            x = x.reshape( ( 1, -1 ) )
            assert y.ndim == 1 or y.ndim == 2
            t2 = x.T.dot( x )

            if( y.ndim == 1 ):
                # 1 measurement for x
                y = y.reshape( ( 1, -1 ) )
                t1 = y.T.dot( y )
                t3 = x.T.dot( y )
            else:
                # Multiple measurements for x
                t2 *= y.shape[ 0 ]
                t1 = np.einsum( 'mi,mj->ij', y, y )
                t3 = np.einsum( 'i,mj->ij', x.ravel(), y )
        else:
            # Multiple data points were passed in
            t2 = x.T.dot( x )

            if( y.ndim == 3 ):
                # Multiple measurements of y per x
                assert x.shape[ 0 ] == y.shape[ 1 ]
                t2 *= y.shape[ 0 ]
                t1 = np.einsum( 'mti,mtj->ij', y, y )
                t3 = np.einsum( 'ti,mtj->ij', x, y )
            elif( y.ndim == 2 ):
                # One measurement of y per x
                assert x.shape[ 0 ] == y.shape[ 0 ]
                t1 = y.T.dot( y )
                t3 = x.T.dot( y )
            else:
                assert 0, 'Invalid dim'

        # # For the sake of numerical precision
        # t1 = ( t1 + t1.T ) / 2.0
        # t2 = ( t2 + t2.T ) / 2.0

        return t1, t2, t3
Exemplo n.º 30
0
    def _update_noderivatives(self, x, u, r, x_, u_):
        """ Computes the value function update of the instrumental Rescorla-Wagner learning rule without the derivative.

        This function is identical to `.update()` method except without the derivative computations. It is implemented solely for the purpose of unit testing the gradient calculations against `autograd`.

        """
        rpe = r - self.uQx(u, x)
        z = np.outer(u, x)
        self.Q += self.learning_rate * rpe * np.einsum('a,s->as', u, x)
Exemplo n.º 31
0
 def logprob(z):
     """z is NxD."""
     z_minus_mean = z - mean
     if len(z.shape) == 1 or z.shape[0] == 1:
         return const - 0.5 * np.dot(np.dot(z_minus_mean, pinv),
                                     z_minus_mean.T)
     else:
         return const - 0.5 * np.einsum('ij,jk,ik->i', z_minus_mean, pinv,
                                        z_minus_mean)
Exemplo n.º 32
0
 def mc_elbo(pgm_params, i):
     #Here nn_potentials are just the sufficient stats of the data
     x = get_batch(i)
     xxT = np.einsum('ij,ik->ijk', x, x)
     n = np.ones(x.shape[0]) if x.ndim == 2 else 1.
     nn_potentials = pack_dense(xxT, x, n, n)
     saved.stats, global_kl, local_kl = run_inference(
         pgm_prior, pgm_params, nn_potentials)
     return (-global_kl - num_batches * local_kl) / num_datapoints  #CHECK
Exemplo n.º 33
0
 def log_joint(x, w, epsilon, tau, alpha, beta):
   log_p_epsilon = log_probs.norm_gen_log_prob(epsilon, 0, 1)
   log_p_w = log_probs.norm_gen_log_prob(w, 0, 1)
   log_p_tau = log_probs.gamma_gen_log_prob(tau, alpha, beta)
   # TODO(mhoffman): The transposed version below should work.
   # log_p_x = log_probs.norm_gen_log_prob(x, np.dot(epsilon, w), 1. / np.sqrt(tau))
   log_p_x = log_probs.norm_gen_log_prob(x, np.einsum('ik,jk->ij', epsilon, w),
                                         1. / np.sqrt(tau))
   return log_p_epsilon + log_p_w + log_p_tau + log_p_x
Exemplo n.º 34
0
def nn_predict_tgcn_cheb(params, x):

    L = graph.rescale_L(hyper['L'][0], lmax=2)
    w = np.fft.fft(x, axis=2)
    xc = chebyshev_time_vertex(L, w, hyper['filter_order'])
    y = np.einsum('knhq,kfh->fnq', xc, params['W1'])
    y += np.expand_dims(params['b1'], axis=2)

    # nonlinear layer
    # y = np.tanh(y)
    y = ReLU(y)

    # dense layer
    y = np.einsum('fnq,cfn->cq', y, params['W2'])
    y += np.expand_dims(params['b2'], axis=1)

    outputs = np.real(y.T)
    return outputs - logsumexp(outputs, axis=1, keepdims=True)
Exemplo n.º 35
0
def outer(a, b):
    if a.ndim == 2 and b.ndim == 2:
        return _np.einsum("...i,...j->...ij", a, b)

    out = _np.outer(a, b).reshape(a.shape + b.shape)
    if b.ndim == 2:
        out = out.swapaxes(-3, -2)

    return out
Exemplo n.º 36
0
    def contextual_feature_map(self, features):
        """ Creates contextual feature map

        Args:
            features (np.array): observation features

        """
        features_squarred = np.einsum('ij,ih->ijh', features, features).reshape(features.shape[0], -1)
        return np.hstack([features_squarred, features])
Exemplo n.º 37
0
def tensorexp(r):
    """ returns a stack of rotation matrices as a tensor """
    """ r should be (3,n), n column vectors """
    theta = np.sqrt(np.sum(r*r, axis=0))  # shape = (n,)
    # note: the case where theta == 0 is not handled; we assume there is enough
    # noise and bias that this won't happen
    K = tensorhat(r / theta)  # shape = (3,3,n)
    KK = np.einsum('ijl,jkl->ikl', K, K)
    # Compute w/ Rodrigues' formula
    return np.eye(3)[:, :, np.newaxis] + np.sin(theta) * K + \
        (1 - np.cos(theta)) * KK
Exemplo n.º 38
0
def test_jacobian_against_wrapper():
    A = npr.randn(3,3,3)
    fun = lambda x: np.einsum(
        'ijk,jkl->il',
        A, np.sin(x[...,None] * np.tanh(x[None,...])))

    B = npr.randn(3,3)
    jac1 = jacobian(fun)(B)
    jac2 = old_jacobian(fun)(B)

    assert np.allclose(jac1, jac2)
Exemplo n.º 39
0
def magcal_residual(MAG, a, mb):
    """ residual from all observations given magnetometer eccentricity, bias,
    gyro bias, and gyro scale"""

    A = np.array([
        [a[0], a[1], a[2]],
        [0,    a[3], a[4]],
        [0,    0,    a[5]]
    ])

    mag = np.dot(MAG - mb, A)
    return np.mean(np.abs(1 - np.einsum('ji,ji->j', mag, mag)))
Exemplo n.º 40
0
def make_pinwheel_data(radial_std, tangential_std, num_classes, num_per_class, rate):
    rads = np.linspace(0, 2*np.pi, num_classes, endpoint=False)

    features = npr.randn(num_classes*num_per_class, 2) \
        * np.array([radial_std, tangential_std])
    features[:,0] += 1.
    labels = np.repeat(np.arange(num_classes), num_per_class)

    angles = rads[labels] + rate * np.exp(features[:,0])
    rotations = np.stack([np.cos(angles), -np.sin(angles), np.sin(angles), np.cos(angles)])
    rotations = np.reshape(rotations.T, (-1, 2, 2))

    return 10*npr.permutation(np.einsum('ti,tij->tj', features, rotations))
Exemplo n.º 41
0
def pylds_E_step_inhomog(lds, data):
    T = data.shape[0]
    mu_init, sigma_init, A, sigma_states, C, sigma_obs = lds
    normalizer, smoothed_mus, smoothed_sigmas, E_xtp1_xtT = \
        _E_step(mu_init, sigma_init, A, sigma_states, C, sigma_obs, data)

    EyyT = np.einsum('ti,tj->tij', data, data)
    EyxT = np.einsum('ti,tj->tij', data, smoothed_mus)
    ExxT = smoothed_sigmas + np.einsum('ti,tj->tij', smoothed_mus, smoothed_mus)

    E_xt_xtT = ExxT[:-1]
    E_xtp1_xtp1T = ExxT[1:]
    E_xtp1_xtT = E_xtp1_xtT

    E_x1_x1T = smoothed_sigmas[0] + np.outer(smoothed_mus[0], smoothed_mus[0])
    E_x1 = smoothed_mus[0]

    E_init_stats = E_x1_x1T, E_x1, 1.
    E_pairwise_stats = E_xt_xtT.sum(0), E_xtp1_xtT.sum(0).T, E_xtp1_xtp1T.sum(0), T-1
    E_node_stats = ExxT, np.transpose(EyxT, (0, 2, 1)), EyyT, np.ones(T)

    return E_init_stats, E_pairwise_stats, E_node_stats
Exemplo n.º 42
0
def get_next_layer(prev_layer, offset, A, Weights):
    """
    Applies the Picard iteration to the current layer.
    """
    N, _, _ = prev_layer.shape

    # precompute A.dot(prev_layer)
    Az = np.einsum('nij,njm->nim', A, prev_layer)
    new_layer = []
    for n in range(N):
        new_layer.append(np.sum(Az * Weights[n, :][:, None, None], axis=0) + \
                         offset.T)

    return np.array(new_layer)
Exemplo n.º 43
0
def pair_mean_to_natural(A, sigma):
    assert 2 <= A.ndim == sigma.ndim <= 3
    ndim = A.ndim

    einstring = 'tji,tjk->tik' if ndim == 3 else 'ji,jk->ik'
    trans = (0, 2, 1) if ndim == 3 else (1, 0)
    temp = np.linalg.solve(sigma, A)

    Jxx = -1./2 * np.einsum(einstring, A, temp)
    Jxy = np.transpose(temp, trans)
    Jyy = -1./2 * np.linalg.inv(sigma)
    logZ = -1./2 * np.linalg.slogdet(sigma)[1]

    return Jxx, Jxy, Jyy, logZ
Exemplo n.º 44
0
def make_pinwheel(radial_std, tangential_std, num_classes, num_per_class, rate,
                  rs=npr.RandomState(0)):
    """Based on code by Ryan P. Adams."""
    rads = np.linspace(0, 2*np.pi, num_classes, endpoint=False)

    features = rs.randn(num_classes*num_per_class, 2) \
        * np.array([radial_std, tangential_std])
    features[:, 0] += 1
    labels = np.repeat(np.arange(num_classes), num_per_class)

    angles = rads[labels] + rate * np.exp(features[:,0])
    rotations = np.stack([np.cos(angles), -np.sin(angles), np.sin(angles), np.cos(angles)])
    rotations = np.reshape(rotations.T, (-1, 2, 2))

    return np.einsum('ti,tij->tj', features, rotations)
Exemplo n.º 45
0
def mog_like(x, means, icovs, dets, pis):
    """ compute the log likelihood according to a mixture of gaussians
        with means = [mu0, mu1, ... muk]
             icovs = [C0^-1, ..., CK^-1]
             dets = [|C0|, ..., |CK|]
             pis  = [pi1, ..., piK] (sum to 1)
        at locations given by x = [x1, ..., xN]
    """
    xx = np.atleast_2d(x)
    centered = xx[:,:,np.newaxis] - means.T[np.newaxis,:,:]
    solved   = np.einsum('ijk,lji->lki', icovs, centered)
    logprobs = -0.5*np.sum(solved * centered, axis=1) - np.log(2*np.pi) - 0.5*np.log(dets) + np.log(pis)
    logprob  = scpm.logsumexp(logprobs, axis=1)
    if len(x.shape) == 1:
        return np.exp(logprob[0])
    else:
        return np.exp(logprob)
Exemplo n.º 46
0
    def _ll(self, m, p, xn, **kwargs):
        """Computation of log likelihood

        Dimensions
        ----------
        m : n_unique x n_features
        p : n_unique x n_features x n_features
        xn: N x n_features
        """

        samples = xn.shape[0]
        xn = xn.reshape(samples, 1, self.n_features)
        m = m.reshape(1, self.n_unique, self.n_features)

        det = np.linalg.det(np.linalg.inv(p))
        det = det.reshape(1, self.n_unique)
        tem = np.einsum('NUF,UFX,NUX->NU', (xn - m), p, (xn - m))
        res = (-self.n_features/2.0)*np.log(2*np.pi) - 0.5*tem - 0.5*np.log(det)

        return res  # N x n_unique
Exemplo n.º 47
0
def gmm_logprob(x, ws, mus, sigs, invsigs=None, logdets=None):
    """ Gaussian Mixture Model likelihood
        Input:
          - x    = N x D array of data (N iid)
          - ws   = K length vector that sums to 1, mixing weights
          - mus  = K x D array of mixture component means
          - sigs = K x D x D array of mixture component covariances

          - invsigs = K x D x D array of mixture component covariance inverses
          - logdets = K array of mixture component covariance logdets

        Output:
          - N length array of log likelihood values

        TODO: speed this up
    """

    if sigs is None:
        assert invsigs is not None and logdets is not None, \
                "need sigs if you don't include logdets and invsigs"

    # compute invsigs if needed
    if invsigs is None:
        invsigs = np.array([np.linalg.inv(sig) for sig in sigs])
        logdets = np.array([np.linalg.slogdet(sig)[1] for sig in sigs])

    # compute each gauss component separately
    xx = np.atleast_2d(x)
    centered = xx[:,:,np.newaxis] - mus.T[np.newaxis,:,:]
    solved   = np.einsum('ijk,lji->lki', invsigs, centered)
    logprobs = -0.5*np.sum(solved * centered, axis=1) - \
                    np.log(2*np.pi) - 0.5*logdets + np.log(ws)
    logprob  = scpm.logsumexp(logprobs, axis=1)
    if len(x.shape) == 1:
        return logprob[0]
    else:
        return logprob
Exemplo n.º 48
0
def edges_score(u):
    U = anp.reshape(u, (n, d))
    m = U[edges[:, 0]] * U[edges[:, 1]]
    return anp.einsum('ij,ji->i', m, ((1 + mu / k) * W[wc, :].T - vecc[:, np.newaxis])).mean()