def __init__(self, params): super(SequenceToSequence, self).__init__() # load pretrain word2vec weight matrix self.embedding_matrix = load_word2vec(params) self.params = params """ batch_size : 256 embed_size : 256 enc_units : 256 dec_units : 256 attn_units : 256 """ self.encoder = rnn_encoder.Encoder(params["vocab_size"], params["embed_size"], params["enc_units"], params["batch_size"], self.embedding_matrix) # 计算score用的是BahdanauAttention 相加的计算方式 self.attention = rnn_decoder.BahdanauAttention(params["attn_units"]) self.decoder = rnn_decoder.Decoder(params["vocab_size"], params["embed_size"], params["dec_units"], params["batch_size"], self.embedding_matrix)
def __init__(self, params): super(SequenceToSequence, self).__init__() self.embedding_matrix = load_word2vec(params) self.params = params self.encoder = rnn_encoder.Encoder(params["vocab_size"], params["embed_size"], params["enc_units"], params["batch_size"], self.embedding_matrix) self.attention = rnn_decoder.BahdanauAttention(params["attn_units"]) self.decoder = rnn_decoder.Decoder(params["vocab_size"], params["embed_size"], params["dec_units"], params["batch_size"], self.embedding_matrix)
def __init__(self, params): super(SequenceToSequence, self).__init__() self.embedding_matrix = load_word2vec(params) # 词向量矩阵 self.params = params self.encoder = rnn_encoder.Encoder(params["vocab_size"], # 加载字典中词的数量 params["embed_size"], # 词嵌入向量大小 params["enc_units"], # encoder 单元个数 params["batch_size"], # 分批次加载数据的大小 self.embedding_matrix) self.attention = rnn_decoder.BahdanauAttention(params["attn_units"]) # attention 单元个数 self.decoder = rnn_decoder.Decoder(params["vocab_size"], params["embed_size"], params["dec_units"], # decoder 单元个数 params["batch_size"], self.embedding_matrix)