Exemple #1
0
    def __init__(self, mean, cov, d=None):

        mean = tf.convert_to_tensor(mean)
        cov = tf.convert_to_tensor(cov)

        try:
            d1, = util.extract_shape(mean)
            mean = tf.reshape(mean, (d1, 1))
        except:
            d1, k = util.extract_shape(mean)
            assert (k == 1)

        d2, _ = util.extract_shape(cov)
        assert (d1 == d2)
        if d is None:
            d = d1
        else:
            assert (d == d1)

        super(MVGaussianMeanCov, self).__init__(d=d)

        self._mean = mean
        self._cov = cov

        self._L_cov = tf.cholesky(cov)
        self._entropy = util.dists.multivariate_gaussian_entropy(L=self._L_cov)

        L_prec_transpose = util.triangular_inv(self._L_cov)
        self._L_prec = tf.transpose(L_prec_transpose)
        self._prec = tf.matmul(self._L_prec, L_prec_transpose)
        self._prec_mean = tf.matmul(self._prec, self._mean)
Exemple #2
0
    def __init__(self, prec_mean, prec, d=None):

        prec_mean = tf.convert_to_tensor(prec_mean)
        prec = tf.convert_to_tensor(prec)

        try:
            d1, = util.extract_shape(prec_mean)
            prec_mean = tf.reshape(prec_mean, (d1, 1))
        except:
            d1, k = util.extract_shape(prec_mean)
            assert (k == 1)

        d2, _ = util.extract_shape(prec)
        assert (d1 == d2)
        if d is None:
            d = d1
        else:
            assert (d == d1)

        super(MVGaussianNatural, self).__init__(d=d)

        self._prec_mean = prec_mean
        self._prec = prec

        self._L_prec = tf.cholesky(prec)
        self._entropy = util.dists.multivariate_gaussian_entropy(
            L_prec=self._L_prec)

        # want to solve prec * mean = prec_mean for mean.
        # this is equiv to (LL') * mean = prec_mean.
        # since tf doesn't have a cholSolve shortcut, just
        # do it directly:
        #   solve L y = prec_mean
        # to get y = (L' * mean), then
        #   solve L' mean = y
        y = tf.matrix_triangular_solve(self._L_prec,
                                       self._prec_mean,
                                       lower=True,
                                       adjoint=False)
        self._mean = tf.matrix_triangular_solve(self._L_prec,
                                                y,
                                                lower=True,
                                                adjoint=True)

        L_cov_transpose = util.triangular_inv(self._L_prec)
        self._L_cov = tf.transpose(L_cov_transpose)
        self._cov = tf.matmul(self._L_cov, L_cov_transpose)
Exemple #3
0
    def inverse_linear_transform(self, A):
        # treat this as a distribution on Ax, and
        # unpack the (implied) distribution on x
        m, n = util.extract_shape(A)
        assert (m == self.d)

        At = tf.transpose(A)
        new_prec = tf.matmul(At, tf.matmul(self.prec(), A))
        new_prec_mean = tf.matmul(At, self.prec_mean())
        return MVGaussianNatural(new_prec_mean, new_prec, d=n)
Exemple #4
0
def build_trait_network(sparse_ratings, mask, n_traits, weights=None):

    from elbow.models.neural import layer, init_weights, init_biases, init_const
    # docs is a TF variable with shape n_docs, n_words

    batch_users, n_inputs = util.extract_shape(sparse_ratings)
    n_hidden1 = n_traits * 3
    n_hidden2 = n_traits * 2

    if weights is None:
        weights = {}
        weights["W1"] = init_weights((n_inputs, n_hidden1), stddev=1e-4)
        weights["b1"] = init_biases((n_hidden1, ))

        weights["Wmask"] = init_weights((n_inputs, n_hidden1), stddev=1e-4)
        weights["bmask"] = init_biases((n_hidden1, ))
        weights["Wmask2"] = init_weights((n_hidden1, n_hidden2), stddev=1e-4)
        weights["bmask2"] = init_biases((n_hidden2, ))

        weights["W2"] = init_weights((n_hidden1, n_hidden2), stddev=1e-4)
        weights["b2"] = init_biases((n_hidden2, ))

        weights["W_means"] = init_weights((n_hidden2, n_traits), stddev=1e-4)
        weights["b_means"] = init_biases((n_traits, ))

        #weights["W_stds"] = init_weights((n_hidden2, n_traits), stddev=1e-4)
        #weights["b_stds"] = init_const((n_traits,), val=-5)

        weights["W_stds"] = init_weights((n_hidden2, n_traits), stddev=1e-4)
        weights["b_stds"] = init_const((n_traits, ), val=-2)

    def build_network(W1, Wmask, W2, b1, bmask, b2, W_means, b_means, W_stds,
                      b_stds, Wmask2, bmask2):
        def sparse_layer(inp, w, b):
            return tf.matmul(inp, w, a_is_sparse=True) + b

        h1base = sparse_layer(sparse_ratings, W1, b1)
        #h1 = tf.nn.relu(h1base + h1mask)
        h1 = tf.nn.elu(h1base)
        h2 = tf.nn.elu(layer(h1, W2, b2))
        means = layer(h2, W_means, b_means)

        h1mask = sparse_layer(mask, Wmask, bmask)
        h2mask = tf.nn.elu(layer(h1mask, Wmask2, bmask2))
        stds = tf.nn.softplus(layer(h2mask, W_stds, b_stds))

        stds = tf.clip_by_value(stds, 1e-8, 100)
        means = tf.clip_by_value(means, -100, 100)

        return means, stds

    means, stds = build_network(**weights)

    return means, stds, weights
Exemple #5
0
    def __init__(self,
                 z,
                 d_hidden,
                 d_x,
                 w1=None,
                 w2=None,
                 b1=None,
                 b2=None,
                 **kwargs):

        z_shape = util.extract_shape(z) if isinstance(z,
                                                      tf.Tensor) else z.shape
        self.d_z = z.shape[-1]
        self.d_hidden = d_hidden
        self.d_x = d_x

        super(NeuralBernoulliTransform, self).__init__(z=z,
                                                       w1=w1,
                                                       w2=w2,
                                                       b1=b1,
                                                       b2=b2,
                                                       **kwargs)
Exemple #6
0
 def linear_transform(self, A):
     n, m = util.extract_shape(A)
     assert (m == self.d)
     new_mean = tf.matmul(A, self.mean)
     new_cov = tf.matmul(tf.matmul(A, self.cov), tf.transpose(A))
     return MVGaussianMeanCov(new_mean, new_cov, d=n)