Пример #1
0
def init_mm_params(nb_components, latent_dims, alpha_scale=.1, beta_scale=1e-5, v_init=10., m_scale=1., C_scale=10.,
                   seed=0, as_variables=True, trainable=False, device='/gpu:0', name='gmm'):

    with tf.name_scope('gmm_initialization'):
        alpha_init = alpha_scale * tf.ones((nb_components,))
        beta_init = beta_scale * tf.ones((nb_components,))
        v_init = tf.tile([float(latent_dims + v_init)], [nb_components])
        means_init = m_scale * tf.random_uniform((nb_components, latent_dims), minval=-1, maxval=1, seed=seed)
        covariance_init = C_scale * tf.tile(tf.expand_dims(tf.eye(latent_dims), axis=0), [nb_components, 1, 1])

        # transform to natural parameters
        A, b, beta, v_hat = niw.standard_to_natural(beta_init, means_init, covariance_init, v_init)
        alpha = dirichlet.standard_to_natural(alpha_init)

        # init variable
        if as_variables:
            with tf.variable_scope(name):
                alpha = variable_on_device('alpha_k', shape=None, initializer=alpha, trainable=trainable, device=device)
                A = variable_on_device('beta_k', shape=None, initializer=A, trainable=trainable, device=device)
                b = variable_on_device('m_k', shape=None, initializer=b, trainable=trainable, device=device)
                beta = variable_on_device('C_k', shape=None, initializer=beta, trainable=trainable, device=device)
                v_hat = variable_on_device('v_k', shape=None, initializer=v_hat, trainable=trainable, device=device)

        params = alpha, A, b, beta, v_hat

        return params
Пример #2
0
    def __init__(self,
                 nb_components,
                 dimension,
                 alpha_scale=0.1,
                 beta_scale=1e-5,
                 v_init=10.,
                 m_scale=1.,
                 C_scale=10.,
                 name='normal_inverse_wishart'):
        super(NormalInverseWishart, self).__init__(name=name)
        with self._enter_variable_scope():
            alpha_init = alpha_scale * tf.ones((nb_components, ))
            beta_init = beta_scale * tf.ones((nb_components, ))
            v_init = tf.tile([float(dimension + v_init)], [nb_components])
            means_init = m_scale * tf.random_uniform(
                (nb_components, dimension), minval=-1, maxval=1)
            covariance_init = tf.matrix_inverse(
                C_scale * tf.tile(tf.expand_dims(tf.eye(dimension), axis=0),
                                  [nb_components, 1, 1]))

            A, b, beta, v_hat = niw.standard_to_natural(
                beta_init, means_init, covariance_init, v_init)
            alpha = dirichlet.standard_to_natural(alpha_init)

            self.alpha = tf.get_variable(
                'alpha_k',
                # shape=(nb_components),
                dtype=tf.float32,
                initializer=alpha,
                trainable=False)
            self.A = tf.get_variable(
                "beta_k",
                # shape=(nb_components, dimension),
                dtype=tf.float32,
                initializer=A,
                trainable=False)
            self.b = tf.get_variable(
                "m_k",
                # shape=(nb_components),
                dtype=tf.float32,
                initializer=b,
                trainable=False)
            self.beta = tf.get_variable(
                "C_k",
                # shape=(nb_components, dimension, dimension),
                initializer=beta,
                dtype=tf.float32,
                trainable=False)
            self.v_hat = tf.get_variable(
                "v_k",
                # shape=(nb_components),
                dtype=tf.float32,
                initializer=v_hat,
                trainable=False)
Пример #3
0
def m_step_smm(smm_prior, r_nk):
    """
    Args:
        smm_prior: Dirichlet prior for Student-t mixture model
        r_nk: responsibilities

    Returns:
        Dirichlet parameter obtained by executing Archambeau and Verleysen's M-step in their VEM algorithm for SMMs
    """

    with tf.name_scope('m_step'):
        # execute SMM-EM m-step to update multinomial
        alpha_0 = dirichlet.natural_to_standard(smm_prior[0])

        N_k = smm.update_Nk(r_nk)
        alpha_k = smm.update_alphak(alpha_0, N_k)
        alpha = dirichlet.standard_to_natural(alpha_k)
        return tf.identity(alpha, name='alpha_star')
Пример #4
0
def m_step(gmm_prior, x_samples, r_nk):
    """
    Args:
        gmm_prior: Dirichlet+NiW prior for Gaussian mixture model
        x_samples: samples of shape (N, S, L)
        r_nk: responsibilities of shape (N, K)

    Returns:
        Dirichlet+NiW parameters obtained by executing Bishop's M-step in the VEM algorithm for GMMs
    """

    with tf.name_scope('m_step'):
        # execute GMM-EM m-step
        beta_0, m_0, C_0, v_0 = niw.natural_to_standard(*gmm_prior[1:])
        alpha_0 = dirichlet.natural_to_standard(gmm_prior[0])

        alpha_k, beta_k, m_k, C_k, v_k, x_k, S_k = gmm.m_step(x_samples, r_nk, alpha_0, beta_0, m_0, C_0, v_0,
                                                              name='gmm_m_step')

        A, b, beta, v_hat = niw.standard_to_natural(beta_k, m_k, C_k, v_k)
        alpha = dirichlet.standard_to_natural(alpha_k)

        return tf.tuple([alpha, A, b, beta, v_hat], name='theta_star')