Ejemplo n.º 1
0
# -*-coding:utf-8-*-#
import datetime
import tensorflow.compat.v1 as tf

import os
import sys

import reader
from BatchGenator import BatchGenerator
from GeneratePoetryModel import GeneratePoetryModel

dictionary, poetry_vectors, _ = reader.build_dataset()

empty_key = dictionary.get(' ')

batch_size = 64

batch_generator = BatchGenerator(poetry_vectors, batch_size, empty_key)

# x_data, y_data = batch_generator.next()

input_size = output_size = len(dictionary) + 1

train_data = tf.placeholder(tf.int32, [batch_size, None])
train_label = tf.placeholder(tf.int32, [batch_size, None])

model = GeneratePoetryModel(X=train_data,
                            batch_size=batch_size,
                            input_size=input_size,
                            output_size=output_size)
Ejemplo n.º 2
0
# -*-coding:utf-8-*-#
import numpy as np
import tensorflow.compat.v1 as tf

import reader
from GeneratePoetryModel import GeneratePoetryModel

tf.disable_eager_execution()
dictionary, _, reversed_dictionary = reader.build_dataset()


def to_word(weights):
    """
    通过传入的权重,计算向量的概率分布并通过随机采样获得最接近的词语,
    类似遗传算法的选择步骤。(个人认为不够严谨)
    """
    t = np.cumsum(weights)
    s = np.sum(weights)
    sample = int(np.searchsorted(t, np.random.rand(1) * s))
    return reversed_dictionary[sample]


# 定义输入的只有一个字词,然后根据上一个字词推测下一个词的位置
input_data = tf.placeholder(tf.int32, [1, None])
# 输入和输出的尺寸为1
input_size = output_size = len(reversed_dictionary) + 1
# 定义模型
model = GeneratePoetryModel(X=input_data,
                            batch_size=1,
                            input_size=input_size,
                            output_size=output_size)