Ejemplo n.º 1
0
def dot_equivalent():
    # MNIST-scale convolution operation
    import autograd.scipy.signal
    dat = npr.randn(256, 3, 24, 5, 24, 5)
    kernel = npr.randn(3, 5, 5)
    with tictoc():
        np.tensordot(dat, kernel, axes=[(1, 3, 5), (0, 1, 2)])
Ejemplo n.º 2
0
def label_meanfield(label_global, gaussian_globals, gaussian_stats):
    node_potentials = np.tensordot(gaussian_stats, gaussian_globals,
                                   [[1, 2], [1, 2]])
    natparam = node_potentials + label_global
    stats = categorical.expectedstats(natparam)
    kl = np.tensordot(stats, node_potentials) - categorical.logZ(natparam)
    return natparam, stats, kl
Ejemplo n.º 3
0
def meanfield_fixed_point(label_global,
                          gaussian_globals,
                          node_potentials,
                          tol=1e-3,
                          max_iter=100):
    kl = np.inf
    label_stats = initialize_meanfield(label_global, node_potentials)
    for i in range(max_iter):
        gaussian_natparam, gaussian_stats, gaussian_kl = \
            gaussian_meanfield(gaussian_globals, node_potentials, label_stats)
        label_natparam, label_stats, label_kl = \
            label_meanfield(label_global, gaussian_globals, gaussian_stats)

        # recompute gaussian_kl linear term with new label_stats b/c labels were updated
        gaussian_global_potentials = np.tensordot(label_stats,
                                                  gaussian_globals, [1, 0])
        linear_difference = gaussian_natparam - gaussian_global_potentials - node_potentials
        gaussian_kl = gaussian_kl + np.tensordot(linear_difference,
                                                 gaussian_stats, 3)

        kl, prev_kl = label_kl + gaussian_kl, kl
        if abs(kl - prev_kl) < tol:
            break
    else:
        print('iteration limit reached')

    return label_stats
Ejemplo n.º 4
0
def dot_equivalent():
    # MNIST-scale convolution operation
    import autograd.scipy.signal
    dat = npr.randn(256, 3, 24, 5, 24, 5)
    kernel = npr.randn(3, 5, 5)
    with tictoc():
        np.tensordot(dat, kernel, axes=[(1, 3, 5), (0, 1, 2)])
Ejemplo n.º 5
0
 def vector_dot_grad(*args, **kwargs):
     args, vector = args[:-1], args[-1]
     try:
         return np.tensordot(fun_grad(*args, **kwargs), vector,
                             axes=vector.ndim)
     except AttributeError:
         # Assume we are on the product manifold.
         return np.sum([np.tensordot(fun_grad(*args, **kwargs)[k],
                                     vector[k], axes=vector[k].ndim)
                        for k in range(len(vector))])
Ejemplo n.º 6
0
def label_meanfield(label_global, gaussian_globals, gaussian_stats):
    # Ref. Eq 39
    # label_global = E_{q(\pi)}[t(\pi)] where q(\pi) is dirichlet and t(\pi) is {log\pi_i}
    # stats = E_{q(z)}[t(z)] -> categorical expected statistics
    # gaussian_stats = E_{q(x)}[t(x)] where q(x) is NIW and t(x) is [x, xxT]
    # gaussian_globals = \eta_x^0(\theta)
    
    node_potentials = np.tensordot(gaussian_stats, gaussian_globals, [[1,2], [1,2]])
    natparam = node_potentials + label_global
    stats = categorical.expectedstats(natparam)
    kl = np.tensordot(stats, node_potentials) - categorical.logZ(natparam)
    return natparam, stats, kl
Ejemplo n.º 7
0
 def vector_dot_grad(*args, **kwargs):
     args, vector = args[:-1], args[-1]
     try:
         return np.tensordot(fun_grad(*args, **kwargs),
                             vector,
                             axes=vector.ndim)
     except AttributeError:
         # Assume we are on the product manifold.
         return np.sum([
             np.tensordot(fun_grad(*args, **kwargs)[k],
                          vector[k],
                          axes=vector[k].ndim) for k in range(len(vector))
         ])
Ejemplo n.º 8
0
def meanfield_update(label_global, gaussian_globals, node_potentials, label_stats):
    gaussian_natparam, gaussian_stats, gaussian_kl = \
        gaussian_meanfield(gaussian_globals, node_potentials, label_stats)
    label_natparam, label_stats, label_kl = \
        label_meanfield(label_global, gaussian_globals, gaussian_stats)

    # recompute gaussian_kl linear term with new label_stats b/c labels were updated
    gaussian_global_potentials = np.tensordot(label_stats, gaussian_globals, [1, 0])
    linear_difference = gaussian_natparam - gaussian_global_potentials - node_potentials
    gaussian_kl = gaussian_kl + np.tensordot(linear_difference, gaussian_stats, 3)
    kl = label_kl + gaussian_kl

    return (label_natparam, gaussian_natparam), (label_stats, gaussian_stats), kl
Ejemplo n.º 9
0
def gaussian_meanfield(gaussian_globals, node_potentials, label_stats):
    # Ref. Eq 39
    # gaussian_globals = E_{q(\mu, \Sigma)}[t(\mu, \Sigma)] here q(\mu, \Sigma) is posterior which is NIW. Shape = (K, 4, 4)
    # label_stats = E_{q(z)}[t(z)] -> categorical expected statistics. Shape = (batch_size, K)
    # stats = E_{q(z)}[t(z)] -> Gaussian expected statistics Shape = (batch_size, 4, 4)
    # node_potentials = r(\phi, y) Shape = (batch_size, 4, 4)
    #print gaussian_globals.shape, node_potentials.shape, label_stats.shape
    global_potentials = np.tensordot(label_stats, gaussian_globals, [1, 0])
    natparam = node_potentials + global_potentials #using Eq. 39
    stats = gaussian.expectedstats(natparam)
    #print stats.shape
    kl = np.tensordot(node_potentials, stats, 3) - gaussian.logZ(natparam)
    return natparam, stats, kl
Ejemplo n.º 10
0
def label_meanfield(label_global, gaussian_globals, gaussian_suff_stats):
    # Ref. Eq 39
    # label_global = E_{q(\pi)}[t(\pi)] where q(\pi) is dirichlet and t(\pi) is {log\pi_i}
    # stats = E_{q(z)}[t(z)] -> categorical expected statistics
    # gaussian_suff_stats = t(x) where t(x) is [x, xxT] Shape = (batch_size, 4, 4)
    # gaussian_globals = niw expected stats (Shape = (K, 4, 4))
    # node_potenials, label_global, natparam Shape = (batch_size, K)

    node_potentials = np.tensordot(gaussian_suff_stats, gaussian_globals,
                                   [[1, 2], [1, 2]])
    natparam = node_potentials + label_global
    stats = categorical.expectedstats(natparam)
    kl = np.tensordot(stats, node_potentials) - categorical.logZ(natparam)
    return natparam, stats, kl
Ejemplo n.º 11
0
def local_meanfield(global_stats, node_potentials):
    label_global, gaussian_globals = global_stats
    node_potentials = gaussian.pack_dense(*node_potentials)

    def make_fpfun((label_global, gaussian_globals, node_potentials)):
        return lambda (local_natparam, local_stats, kl): \
            meanfield_update(label_global, gaussian_globals, node_potentials, local_stats[0])

    x0 = initialize_meanfield(label_global, gaussian_globals, node_potentials)

    kl_diff = lambda a, b: abs(a[2]-b[2])

    (label_natparam, gaussian_natparam), (label_stats, gaussian_stats), _ = \
        fixed_point(make_fpfun, (label_global, gaussian_globals, node_potentials), x0, kl_diff, tol=1e-3)

    # collect sufficient statistics for gmm prior (sum across conditional iid)
    dirichlet_stats = np.sum(label_stats, 0)
    niw_stats = np.tensordot(label_stats, gaussian_stats, [0, 0])

    local_stats = label_stats, gaussian_stats
    prior_stats = dirichlet_stats, niw_stats
    natparam = label_natparam, gaussian_natparam
    kl = local_kl(getval(gaussian_globals), getval(label_global),
        label_natparam, gaussian_natparam, label_stats, gaussian_stats)

    return local_stats, prior_stats, natparam, kl
Ejemplo n.º 12
0
def test_fwd_rev_hessian_matrix_product():
    fun = lambda a: np.sum(np.sin(a))
    a = npr.randn(5, 4)
    V = npr.randn(5, 4)
    H = hessian(fun)(a)
    check_equivalent(np.tensordot(H, V),
                     hessian_vector_product(fun, method='fwd-rev')(a, V))
Ejemplo n.º 13
0
 def vector_dot_gradient(*args):
     arguments, vectors = args[:-1], args[-1]
     gradients = gradient(*arguments)
     return np.sum([
         np.tensordot(gradient, vector, axes=vector.ndim)
         for gradient, vector in zip(gradients, vectors)
     ])
Ejemplo n.º 14
0
    def m_step(self, expectations, datas, inputs, masks, tags, **kwargs):

        x = np.concatenate(datas)
        weights = np.concatenate([Ez for Ez, _, _ in expectations])  # T x D
        assert x.shape[0] == weights.shape[0]

        # convert angles to 2D representation and employ closed form solutions
        x_k = np.stack((np.sin(x), np.cos(x)), axis=1)  # T x 2 x D

        r_k = np.tensordot(weights.T, x_k, axes=1)  # K x 2 x D
        r_norm = np.sqrt(np.sum(np.power(r_k, 2), axis=1))  # K x D

        mus_k = np.divide(r_k, r_norm[:, None])  # K x 2 x D
        r_bar = np.divide(r_norm, np.sum(weights, 0)[:, None])  # K x D

        mask = (r_norm.sum(1) == 0)
        mus_k[mask] = 0
        r_bar[mask] = 0

        # Approximation
        kappa0 = r_bar * (self.D + 1 - np.power(r_bar, 2)) / (
            1 - np.power(r_bar, 2))  # K,D

        kappa0[kappa0 == 0] += 1e-6

        for k in range(self.K):
            self.mus[k] = np.arctan2(*mus_k[k])  #
            self.log_kappas[k] = np.log(kappa0[k])  # K, D
Ejemplo n.º 15
0
def local_meanfield(global_natparam, gaussian_suff_stats):
    # global_natparam = \eta_{\theta}^0
    dirichlet_natparam, niw_natparams = global_natparam
    #node_potentials = gaussian.pack_dense(*node_potentials)

    #### compute expected global parameters using current global factors
    # label_global = E_{q(\pi)}[t(\pi)] here q(\pi) is posterior which is dirichlet with parameter dirichlet_natparam and t is [log\pi_1, log\pi_2....]
    # gaussian_globals = E_{q(\mu, \Sigma)}[t(\mu, \Sigma)] here q(\mu, \Sigma) is posterior which is NIW
    # label_stats = E_{q(z)}[t(z)] -> categorical expected statistics. Shape = (batch_size, K)
    # gaussian_suff_stats  Shape = (batch_size, 4, 4)
    label_global = dirichlet.expectedstats(dirichlet_natparam)
    gaussian_globals = niw.expectedstats(niw_natparams)

    #### compute values that depend directly on boxed node_potentials at optimum
    label_natparam, label_stats, label_kl = \
        label_meanfield(label_global, gaussian_globals, gaussian_suff_stats)

    #### collect sufficient statistics for gmm prior (sum across conditional iid)
    dirichlet_stats = np.sum(label_stats, 0)
    niw_stats = np.tensordot(label_stats, gaussian_suff_stats, [0, 0])

    local_stats = label_stats, gaussian_suff_stats
    prior_stats = dirichlet_stats, niw_stats
    natparam = label_natparam
    kl = label_kl

    return prior_stats, natparam, kl
Ejemplo n.º 16
0
    def m_step(self, expectations, datas, inputs, masks, tags, **kwargs):
        from autograd.scipy.special import i0, i1
        x = np.concatenate(datas)

        weights = np.concatenate([Ez for Ez, _, _ in expectations])

        # convert angles to 2D representation and employ closed form solutions
        x_k = np.stack((np.sin(x), np.cos(x)), axis=1)

        r_k = np.tensordot(weights.T, x_k, (-1, 0))

        r_norm = np.sqrt(np.sum(r_k**2, 1))
        mus_k = r_k / r_norm[:, None]
        r_bar = r_norm / weights.sum(0)[:, None]

        # truncated newton approximation with 2 iterations
        kappa_0 = r_bar * (2 - r_bar**2) / (1 - r_bar**2)

        kappa_1 = kappa_0 - ((i1(kappa_0)/i0(kappa_0)) - r_bar) / \
                  (1 - (i1(kappa_0)/i0(kappa_0)) ** 2 - (i1(kappa_0)/i0(kappa_0)) / kappa_0)
        kappa_2 = kappa_1 - ((i1(kappa_1)/i0(kappa_1)) - r_bar) / \
                  (1 - (i1(kappa_1)/i0(kappa_1)) ** 2 - (i1(kappa_1)/i0(kappa_1)) / kappa_1)

        for k in range(self.K):
            self.mus[k] = np.arctan2(*mus_k[k])
            self.log_kappas[k] = np.log(kappa_2[k])
Ejemplo n.º 17
0
 def mapping(self, s, t, s_new, k, c):
     """ Map s_new to t_new based on known mapping of
         s (source) to t (target),
         with s original/intrinsic coordinates
         and t intrinsic/original coordinates """
     n, s_dim = s.shape
     t_dim = t.shape[1]
     n_new = s_new.shape[0]
     # 1. determine nearest neighbors
     dist = np.sum((s[np.newaxis] - s_new[:, np.newaxis])**2, -1)
     nn_ids = np.argsort(dist)[:, :k]  # change to [:,:k]
     nns = np.row_stack([s[nn_ids[:, ki]] for ki in range(k)])
     nns = nns.reshape((n_new, k, s_dim), order='F')
     # 2 determine gram matris;
     dif = s_new[:, np.newaxis] - nns
     G = np.tensordot(dif, dif, axes=([2], [2]))
     G = G[np.arange(n_new), :, np.arange(n_new)]
     # 3. determine weights not worth vectorizing this
     weights = np.zeros((n_new, k))
     for i_n in range(n_new):
         weights[i_n] = np.linalg.inv(G[i_n] + c * np.eye(k)).dot(
             np.ones((k, )))
     weights /= np.sum(weights, -1, keepdims=True)
     # 4. compute coordinates
     t_nns = np.row_stack([t[nn_ids[:, ki]] for ki in range(k)])
     t_nns = t_nns.reshape((n_new, k, t_dim), order='F')
     t_new = np.dot(weights, t_nns)
     t_new = t_new[np.arange(n_new), np.arange(n_new)]
     return t_new
Ejemplo n.º 18
0
def test_hessian_tensor_product():
    fun = lambda a: np.sum(np.sin(a))
    a = npr.randn(5, 4, 3)
    V = npr.randn(5, 4, 3)
    H = hessian(fun)(a)
    check_equivalent(np.tensordot(H, V, axes=np.ndim(V)),
                     hessian_tensor_product(fun)(a, V))
Ejemplo n.º 19
0
def test_tensor_jacobian_product():
    fun = lambda a: np.roll(np.sin(a), 1)
    a = npr.randn(5, 4, 3)
    V = npr.randn(5, 4)
    J = jacobian(fun)(a)
    check_equivalent(np.tensordot(V, J, axes=np.ndim(V)),
                     tensor_jacobian_product(fun)(a, V))
Ejemplo n.º 20
0
def local_meanfield(global_natparam, node_potentials):
    dirichlet_natparam, niw_natparams = global_natparam
    node_potentials = gaussian.pack_dense(*node_potentials)

    # compute expected global parameters using current global factors
    label_global = dirichlet.expectedstats(dirichlet_natparam)
    gaussian_globals = niw.expectedstats(niw_natparams)

    # compute mean field fixed point using unboxed node_potentials
    label_stats = meanfield_fixed_point(label_global, gaussian_globals, getval(node_potentials))

    # compute values that depend directly on boxed node_potentials at optimum
    gaussian_natparam, gaussian_stats, gaussian_kl = \
        gaussian_meanfield(gaussian_globals, node_potentials, label_stats)
    label_natparam, label_stats, label_kl = \
        label_meanfield(label_global, gaussian_globals, gaussian_stats)

    # collect sufficient statistics for gmm prior (sum across conditional iid)
    dirichlet_stats = np.sum(label_stats, 0)
    niw_stats = np.tensordot(label_stats, gaussian_stats, [0, 0])

    local_stats = label_stats, gaussian_stats
    prior_stats = dirichlet_stats, niw_stats
    natparam = label_natparam, gaussian_natparam
    kl = label_kl + gaussian_kl

    return local_stats, prior_stats, natparam, kl
Ejemplo n.º 21
0
def local_meanfield(global_natparam, node_potentials):
    # global_natparam = \eta_{\theta}^0
    # node_potentials = r(\phi, y)
    
    dirichlet_natparam, niw_natparams = global_natparam
    node_potentials = gaussian.pack_dense(*node_potentials)

    #### compute expected global parameters using current global factors
    # label_global = E_{q(\pi)}[t(\pi)] here q(\pi) is posterior which is dirichlet with parameter dirichlet_natparam and t is [log\pi_1, log\pi_2....]
    # gaussian_globals = E_{q(\mu, \Sigma)}[t(\mu, \Sigma)] here q(\mu, \Sigma) is posterior which is NIW    
    label_global = dirichlet.expectedstats(dirichlet_natparam)
    gaussian_globals = niw.expectedstats(niw_natparams)

    #### compute mean field fixed point using unboxed node_potentials
    label_stats = meanfield_fixed_point(label_global, gaussian_globals, getval(node_potentials))

    #### compute values that depend directly on boxed node_potentials at optimum
    gaussian_natparam, gaussian_stats, gaussian_kl = \
        gaussian_meanfield(gaussian_globals, node_potentials, label_stats)
    label_natparam, label_stats, label_kl = \
        label_meanfield(label_global, gaussian_globals, gaussian_stats)

    #### collect sufficient statistics for gmm prior (sum across conditional iid)
    dirichlet_stats = np.sum(label_stats, 0)
    niw_stats = np.tensordot(label_stats, gaussian_stats, [0, 0])

    local_stats = label_stats, gaussian_stats
    prior_stats = dirichlet_stats, niw_stats
    natparam = label_natparam, gaussian_natparam
    kl = label_kl + gaussian_kl

    return local_stats, prior_stats, natparam, kl
Ejemplo n.º 22
0
    def setUp(self):
        self.m = m = 100
        self.n = n = 50
        self.man = Sphere(m, n)

        # For automatic testing of ehess2rhess
        self.proj = lambda x, u: u - npa.tensordot(x, u, np.ndim(u)) * x
Ejemplo n.º 23
0
def kernelpdf(scale, sigma, dataset, datasetGen):

    #dataset is binned as eta1,eta2,mass,pt2,pt1

    maxR = np.full((100), 3.3)
    minR = np.full((100), 2.9)

    valsReco = np.linspace(minR[0], maxR[0], 100)
    valsGen = valsReco

    h = np.tensordot(
        scale, valsGen, axes=0
    )  #get a 5D vector with np.newaxis with all possible combos of kinematics and gen mass values
    h_ext = np.swapaxes(np.swapaxes(h, 2, 4), 3, 4)[:, :, np.newaxis, :, :, :]

    sigma_ext = sigma[:, :, np.newaxis, np.newaxis, :, :]

    xscale = np.sqrt(2.) * sigma_ext

    maxR_ext = maxR[np.newaxis, np.newaxis, :, np.newaxis, np.newaxis,
                    np.newaxis]
    minR_ext = minR[np.newaxis, np.newaxis, :, np.newaxis, np.newaxis,
                    np.newaxis]

    maxZ = ((maxR_ext - h_ext.astype('float64')) / xscale)
    minZ = ((minR_ext - h_ext.astype('float64')) / xscale)

    arg = np.sqrt(np.pi / 2.) * sigma_ext * (erf(maxZ) - erf(minZ))

    #take tensor product between mass and genMass dimensions and sum over gen masses
    #divide each bin by the sum of gen events in that bin
    den = np.where(
        np.sum(datasetGen, axis=2) > 1000., np.sum(datasetGen, axis=2),
        -1)[:, :, np.newaxis, :, :]

    I = np.sum(arg * datasetGen[:, :, np.newaxis, :, :, :], axis=3) / den

    #give vals the right shape -> add dimension for gen mass (axis = 3)
    vals_ext = valsReco[np.newaxis, np.newaxis, :, np.newaxis, np.newaxis,
                        np.newaxis]

    gaus = np.exp(-np.power(vals_ext - h_ext.astype('float64'), 2.) /
                  (2 * np.power(sigma_ext, 2.)))

    #take tensor product between mass and genMass dimensions and sum over gen masses
    #divide each bin by the sum of gen events in that bin
    den2 = np.where(
        np.sum(datasetGen, axis=2) > 1000., np.sum(datasetGen, axis=2),
        1)[:, :, np.newaxis, :, :]

    pdf = np.sum(gaus * datasetGen[:, :, np.newaxis, :, :, :],
                 axis=3) / den2 / np.where(I > 0., I, -1)

    pdf = np.where(pdf > 0., pdf, 0.)

    massbinwidth = (maxR[0] - minR[0]) / 100

    pdf = pdf * massbinwidth

    return pdf
def predict(prj, x_tensors, path=None):
    assert isinstance(x_tensors, list)

    freq_dim = len(prj)
    assert len(x_tensors) == freq_dim+2

    #subscripts = 'wrl,wrm, Ar,ABl,BCm,Co -> wo'

    # Ux contains (wrAB, wrBC)
    Ux = []
    for i in range(freq_dim):
        # (wrl, ABl) -> (wrAB)
        Ux.append(numpy.tensordot(prj[i], x_tensors[i+1], axes=([2],[2])))

    # (Ar, wrAB)->(wrAB)
    tmp = numpy.transpose(x_tensors[0])[None,:,:,None] * Ux[0]

    # wrAB -> wrB
    Ux[0] = numpy.sum(tmp, axis=2)

    # (wrB) * (wrBC) -> (wrC)
    tmp = numpy.sum(Ux[0][:,:,:,None] * Ux[1], axis=2)
    if freq_dim==3:
        # (wrC) * (wrCD) -> (wrD) [only if freq_dim==3]
        tmp = numpy.sum(tmp[:,:,:,None] * Ux[2], axis=2)

    # wrC -> wC
    tmp2 = numpy.sum(tmp, axis=1)

    #(wC, Co) -> wo
    tmp3 = numpy.dot(tmp2, x_tensors[-1])

    return tmp3, None
Ejemplo n.º 25
0
    def setUp(self):
        self.m = m = 100
        self.n = n = 50
        self.man = Sphere(m, n)

        # For automatic testing of ehess2rhess
        self.proj = lambda x, u: u - npa.tensordot(x, u, np.ndim(u)) * x
Ejemplo n.º 26
0
    def conv_function(self, tensor_windows):
        # compute convolutions
        a = np.tensordot(tensor_windows, self.kernels.T)

        # swap axes to match up with earlier versions
        a = a.swapaxes(0, 2)
        a = a.swapaxes(1, 2)
        return a
Ejemplo n.º 27
0
    def get_lds_global_stats(hmm_stats, lds_stats):
        _, _, expected_states = hmm_stats
        init_stats, pair_stats = lds_stats

        contract = lambda w: lambda p: np.tensordot(w, p, axes=1)
        global_init_stats = tuple(scale(w, init_stats) for w in expected_states[0])
        global_pair_stats = tuple(map(contract(w), pair_stats) for w in expected_states[1:].T)

        return zip(global_init_stats, global_pair_stats)
Ejemplo n.º 28
0
    def setUp(self):
        self.m = m = 100
        self.n = n = 50
        self.manifold = Sphere(m, n)

        # For automatic testing of euclidean_to_riemannian_hessian
        self.projection = lambda x, u: u - np.tensordot(x, u, np.ndim(u)) * x

        super().setUp()
Ejemplo n.º 29
0
 def test_inner_product(self):
     manifold = self.manifold
     k = self.k
     n = self.n
     x = manifold.random_point()
     a, b = np.random.normal(size=(2, k, n, n))
     np_testing.assert_almost_equal(
         np.tensordot(a, b.transpose((0, 2, 1)), axes=a.ndim),
         manifold.inner_product(x, x @ a, x @ b),
     )
Ejemplo n.º 30
0
def predict_2d_kaf_nn(w, X, info):
    """
    Compute the outputs of a 2D-KAF feedforward network.
    """
    Dx, Dy, gamma = info
    for W, b, alpha in w:
        outputs = np.tensordot(X, W, axes=1) + b
        K = gauss_2d_kernel(outputs, (Dx, Dy), gamma)
        X = np.sum(K * alpha, axis=2)
    return X
Ejemplo n.º 31
0
    def test_random_tangent_vector(self):
        # Just make sure that things generated are in the tangent space and
        # that if you generate two they are not equal.
        s = self.manifold
        x = s.random_point()
        u = s.random_tangent_vector(x)
        v = s.random_tangent_vector(x)
        np_testing.assert_almost_equal(np.tensordot(x, u), 0)

        assert np.linalg.norm(u - v) > 1e-3
Ejemplo n.º 32
0
    def avg_pred_acc(W, X, t):
        # compute the log MAP estimation error
        W_reshape = W.T.reshape(784, 10, 100)

        z = np.tensordot(X, W_reshape, axes=1)
        sf_sum = logsumexp(z, axis=1, keepdims=True)

        softmax = z - np.hstack([sf_sum for i in xrange(10)])
        softmax_avg = softmax.mean(axis=2)

        return np.mean(np.argmax(softmax_avg, axis=1) == np.argmax(t, axis=1))
Ejemplo n.º 33
0
Archivo: fca.py Proyecto: zuoshifan/FCA
def z2y(Zc, type='rectangular', nc=None):
    """Transform a centered z to y by truncate small eigen-values."""
    Czz = cov(Zc, type)
    e, U = la.eigh(Czz)
    if nc is None:
        nc = len(e[e > 0.0])
    es = e[-nc:]**0.5
    Us = U[:, -nc:]
    Y = np.tensordot((Us / es).T, Zc, axes=(1, 0))
    # NOTE: we have Cyy = I, already whitened

    return Y, es, Us
Ejemplo n.º 34
0
Archivo: gmm.py Proyecto: mattjj/svae
def meanfield_fixed_point(label_global, gaussian_globals, node_potentials, tol=1e-3, max_iter=100):
    kl = np.inf
    label_stats = initialize_meanfield(label_global, node_potentials)
    for i in xrange(max_iter):
        gaussian_natparam, gaussian_stats, gaussian_kl = \
            gaussian_meanfield(gaussian_globals, node_potentials, label_stats)
        label_natparam, label_stats, label_kl = \
            label_meanfield(label_global, gaussian_globals, gaussian_stats)

        # recompute gaussian_kl linear term with new label_stats b/c labels were updated
        gaussian_global_potentials = np.tensordot(label_stats, gaussian_globals, [1, 0])
        linear_difference = gaussian_natparam - gaussian_global_potentials - node_potentials
        gaussian_kl = gaussian_kl + np.tensordot(linear_difference, gaussian_stats, 3)

        kl, prev_kl = label_kl + gaussian_kl, kl
        if abs(kl - prev_kl) < tol:
            break
    else:
        print 'iteration limit reached'

    return label_stats
Ejemplo n.º 35
0
def label_meanfield(label_global, gaussian_globals, gaussian_stats):
    partial_contract = lambda a, b: \
        sum(np.tensordot(x, y, axes=np.ndim(y)) for x, y, in zip(a, b))

    gaussian_local_natparams = map(niw.expectedstats, gaussian_globals)
    node_params = np.array([
        partial_contract(gaussian_stats, natparam) for natparam in gaussian_local_natparams]).T

    local_natparam = dirichlet.expectedstats(label_global) + node_params
    stats = normalize(np.exp(local_natparam  - logsumexp(local_natparam, axis=1, keepdims=True)))
    vlb = np.sum(logsumexp(local_natparam, axis=1)) - contract(stats, node_params)

    return local_natparam, stats, vlb
Ejemplo n.º 36
0
 def get_local_natparam(gaussian_globals, node_potentials, label_stats):
     local_natparams = [np.tensordot(label_stats, param, axes=1)
                        for param in zip(*map(niw.expectedstats, gaussian_globals))]
     return add(local_natparams, make_full_potentials(node_potentials))
Ejemplo n.º 37
0
def get_global_stats(label_stats, gaussian_stats):
    contract = lambda w: lambda p: np.tensordot(w, p, axes=1)
    global_label_stats = np.sum(label_stats, axis=0)
    global_gaussian_stats = tuple(map(contract(w), gaussian_stats) for w in label_stats.T)
    return global_label_stats, global_gaussian_stats
Ejemplo n.º 38
0
Archivo: gmm.py Proyecto: mattjj/svae
def gaussian_meanfield(gaussian_globals, node_potentials, label_stats):
    global_potentials = np.tensordot(label_stats, gaussian_globals, [1, 0])
    natparam = node_potentials + global_potentials
    stats = gaussian.expectedstats(natparam)
    kl = np.tensordot(node_potentials, stats, 3) - gaussian.logZ(natparam)
    return natparam, stats, kl
Ejemplo n.º 39
0
 def vector_dot_grad(*args, **kwargs):
     args, vector = args[:-1], args[-1]
     return np.tensordot(fun_grad(*args, **kwargs), vector, np.ndim(vector))
Ejemplo n.º 40
0
def test_matrix_jacobian_product():
    fun = lambda a: np.roll(np.sin(a), 1)
    a = npr.randn(5, 4)
    V = npr.randn(5, 4)
    J = jacobian(fun)(a)
    check_equivalent(np.tensordot(V, J), vector_jacobian_product(fun)(a, V))
Ejemplo n.º 41
0
 def fun(x):
     return np.tensordot(x * np.ones((2,2)),
                         x * np.ones((2,2)), 2)
Ejemplo n.º 42
0
def test_tensor_jacobian_product():
    fun = lambda a: np.roll(np.sin(a), 1)
    a = npr.randn(5, 4, 3)
    V = npr.randn(5, 4)
    J = jacobian(fun)(a)
    check_equivalent(np.tensordot(V, J, axes=np.ndim(V)), vector_jacobian_product(fun)(a, V))
Ejemplo n.º 43
0
def test_hessian_matrix_product():
    fun = lambda a: np.sum(np.sin(a))
    a = npr.randn(5, 4)
    V = npr.randn(5, 4)
    H = hessian(fun)(a)
    check_equivalent(np.tensordot(H, V), hessian_vector_product(fun)(a, V))
Ejemplo n.º 44
0
def test_hessian_tensor_product():
    fun = lambda a: np.sum(np.sin(a))
    a = npr.randn(5, 4, 3)
    V = npr.randn(5, 4, 3)
    H = hessian(fun)(a)
    check_equivalent(np.tensordot(H, V, axes=np.ndim(V)), hessian_vector_product(fun)(a, V))
Ejemplo n.º 45
0
 def hess(x, g): return np.tensordot(ad.hessian(objective)(x),
                                     g, axes=x.ndim)
 return hess
Ejemplo n.º 46
0
Archivo: gmm.py Proyecto: mattjj/svae
def label_meanfield(label_global, gaussian_globals, gaussian_stats):
    node_potentials = np.tensordot(gaussian_stats, gaussian_globals, [[1,2], [1,2]])
    natparam = node_potentials + label_global
    stats = categorical.expectedstats(natparam)
    kl = np.tensordot(stats, node_potentials) - categorical.logZ(natparam)
    return natparam, stats, kl
Ejemplo n.º 47
0
 def vector_dot_fun(*args, **kwargs):
     args, vector = args[:-1], args[-1]
     return np.tensordot(vector, fun(*args, **kwargs), axes=np.ndim(vector))