Пример #1
0
print()

# train model
lm.train(training_data, epochs=5, backup_directory=work_dir, log_interval=20)
print()

# test trained model
normalized_sentence = normalizer.normalize(sents[0])
print('normalized sentence:')
print(' '.join(normalized_sentence))
print('probability: ', lm.sentence_log_probability(normalized_sentence))
print()
start_tag = normalized_sentence[0]
end_tag = normalized_sentence[-1]
print('sample:')
print(' '.join(lm.sample([start_tag], end_tag=end_tag)))
print()

# save, load and test loaded model
lm.save(lm_file)
print()
lm_clone = LanguageModel(lm_file=lm_file)
print()
print('probability: ', lm_clone.sentence_log_probability(normalized_sentence))
print()
print('sample:')
print(' '.join(lm_clone.sample([start_tag], end_tag=end_tag)))
print()

# use predict and token_probabilities functions
print('predict:')
Пример #2
0
print()

# train model
lm.train(training_data, epochs=5, backup_directory=work_dir, log_interval=20)
print()

# test trained model
normalized_sentence = normalizer.normalize(sents[0])
print('normalized sentence:')
print(' '.join(normalized_sentence))
print('probability: ', lm.sentence_log_probability(normalized_sentence))
print()
start_tag = normalized_sentence[0]
end_tag = normalized_sentence[-1]
print('sample:')
print(' '.join(lm.sample([start_tag], end_tag=end_tag)))
print()

# save, load and test loaded model
lm.save(lm_file)
print()
lm_clone = LanguageModel(lm_file=lm_file)
print()
print('probability: ', lm_clone.sentence_log_probability(normalized_sentence))
print()
print('sample:')
print(' '.join(lm_clone.sample([start_tag], end_tag=end_tag)))
print()

# use predict and token_probabilities functions
print('predict:')