Пример #1
0
# data
labeled_documents = [
    ("example example example example example", ["example"]),
    ("test llda model test llda model test llda model", ["test",
                                                         "llda_model"]),
    ("example test example test example test example test",
     ["example", "test"]),
    ("good perfect good good perfect good good perfect good ", ["positive"]),
    ("bad bad down down bad bad down", ["negative"])
]

# new a Labeled LDA model
# llda_model = llda.LldaModel(labeled_documents=labeled_documents, alpha_vector="50_div_K", eta_vector=0.001)
# llda_model = llda.LldaModel(labeled_documents=labeled_documents, alpha_vector=0.02, eta_vector=0.002)
llda_model = llda.LldaModel(labeled_documents=labeled_documents)
print llda_model

# training
llda_model.training(iteration=10, log=True)

# update
print "before updating: ", llda_model
update_labeled_documents = [
    ("new example test example test example test example test",
     ["example", "test"])
]
llda_model.update(labeled_documents=update_labeled_documents)
print "after updating: ", llda_model

# train again
Пример #2
0
# initialize data
labeled_documents = [
    ("example example example example example" * 10, ["example"]),
    ("test llda model test llda model test llda model" * 10,
     ["test", "llda_model"]),
    ("example test example test example test example test" * 10,
     ["example", "test"]),
    ("good perfect good good perfect good good perfect good " * 10,
     ["positive"]), ("bad bad down down bad bad down" * 10, ["negative"])
]

# new a Labeled LDA model
# llda_model = llda.LldaModel(labeled_documents=labeled_documents, alpha_vector="50_div_K", eta_vector=0.001)
# llda_model = llda.LldaModel(labeled_documents=labeled_documents, alpha_vector=0.02, eta_vector=0.002)
llda_model = llda.LldaModel(labeled_documents=labeled_documents,
                            alpha_vector=0.01)
print(llda_model)

# training
# llda_model.training(iteration=10, log=True)
while True:
    print("iteration %s sampling..." % (llda_model.iteration + 1))
    llda_model.training(1)
    print("after iteration: %s, perplexity: %s" %
          (llda_model.iteration, llda_model.perplexity()))
    print("delta beta: %s" % llda_model.delta_beta)
    if llda_model.is_convergent(method="beta", delta=0.01):
        break

# update
print("before updating: ", llda_model)