コード例 #1
0
def test_embed():
    """ Test that embedding is treated like a Variable"""

    embed_dense = L.EmbedID(5, 10)
    embed_sparse = L.EmbedID(5, 10)
    embed_dense.W.data[:] = np.random.randn(5, 10).astype('float32')
    embed_sparse.W.data[:] = np.random.randn(5, 10).astype('float32')
    embed_sparse.W.data[:, 1:] /= 1e5
    dhl_dense_01 = dirichlet_likelihood(embed_dense, alpha=0.1).data
    dhl_sparse_01 = dirichlet_likelihood(embed_sparse, alpha=0.1).data

    msg = "Sparse vector has higher likelihood than dense with alpha=0.1"
    assert dhl_sparse_01 > dhl_dense_01, msg
コード例 #2
0
 def forward(self, doc, wrd, window=5):
     doc, wrd = utils.move(self.xp, doc, wrd)
     proportions = self.proportions(doc)
     ld = dirichlet_likelihood(self.proportions.W)
     context = F.matmul(F.softmax(proportions), self.factors())
     loss = self.loss_func(context, wrd)
     return loss, ld
コード例 #3
0
def test_concentration():
    """ Test that alpha > 1.0 on a dense vector has a higher likelihood
    than alpha < 1.0 on a dense vector, and test that a sparse vector
    has the opposite character. """

    dense = np.random.randn(5, 10).astype('float32')
    sparse = np.random.randn(5, 10).astype('float32')
    sparse[:, 1:] /= 1e5
    weights = Variable(dense)
    dhl_dense_10 = dirichlet_likelihood(weights, alpha=10.0).data
    dhl_dense_01 = dirichlet_likelihood(weights, alpha=0.1).data
    weights = Variable(sparse)
    dhl_sparse_10 = dirichlet_likelihood(weights, alpha=10.0).data
    dhl_sparse_01 = dirichlet_likelihood(weights, alpha=0.1).data

    msg = "Sparse vector has higher likelihood than dense with alpha=0.1"
    assert dhl_sparse_01 > dhl_dense_01, msg
    msg = "Dense vector has higher likelihood than sparse with alpha=10.0"
    assert dhl_dense_10 > dhl_sparse_10, msg
コード例 #4
0
ファイル: lda.py プロジェクト: 313-Ventures/lda2vec
 def forward(self, ids, bow):
     bow, ids = utils.move(self.xp, bow, ids)
     proportions = self.proportions(ids)
     ld = dirichlet_likelihood(proportions)
     doc = F.matmul(F.softmax(proportions), self.factors())
     logp = F.dropout(self.embedding(doc))
     # loss = -F.sum(bow * F.log_softmax(logp))
     sources, targets, counts = [], [], []
     lpi =  F.sum(bow * F.log_softmax(logp), axis=1)
     loss = -F.sum(lpi)
     return loss, ld
コード例 #5
0
ファイル: lda.py プロジェクト: montecarlo1/lda2vec_cemoody
 def forward(self, ids, bow):
     bow, ids = utils.move(self.xp, bow, ids)
     proportions = self.proportions(ids)
     ld = dirichlet_likelihood(proportions)
     doc = F.matmul(F.softmax(proportions), self.factors())
     logp = F.dropout(self.embedding(doc))
     # loss = -F.sum(bow * F.log_softmax(logp))
     sources, targets, counts = [], [], []
     lpi = F.sum(bow * F.log_softmax(logp), axis=1)
     loss = -F.sum(lpi)
     return loss, ld
コード例 #6
0
ファイル: lda2vec_model.py プロジェクト: weiweiyan/lda2vec
 def prior(self):
     dl1 = dirichlet_likelihood(self.mixture.weights)
     return dl1
コード例 #7
0
ファイル: lda2vec_model.py プロジェクト: alei76/lda2vec
 def prior(self):
     dl1 = dirichlet_likelihood(self.mixture.weights)
     return dl1
コード例 #8
0
    def prior(self):
        """ Returns the log likelihood of the observed topic proportions."""

        dl1 = dirichlet_likelihood(self.mixture.weights)
        return dl1
コード例 #9
0
ファイル: lda2vec_model.py プロジェクト: 313-Ventures/lda2vec
 def prior(self):
     dl1 = dirichlet_likelihood(self.mixture_sty.weights)
     dl2 = dirichlet_likelihood(self.mixture_aut.weights)
     return dl1 + dl2
コード例 #10
0
    def prior(self):
        dl1 = dirichlet_likelihood(self.mixture_sty.weights)
#        dl2 = dirichlet_likelihood(self.mixture_aut.weights)
        return dl1# + dl2
コード例 #11
0
 def prior(self):
     # defaults to inialization with uniform prior (1/n_topics)
     return dirichlet_likelihood(self.mixture.W, alpha=self.alpha)