def __init__(self, params):
        super(SequenceToSequence, self).__init__()
        # load pretrain word2vec weight matrix
        self.embedding_matrix = load_word2vec(params)
        self.params = params
        """
        batch_size  : 256
        embed_size  : 256
        enc_units   : 256
        dec_units   : 256
        attn_units  : 256
        """
        self.encoder = rnn_encoder.Encoder(params["vocab_size"],
                                           params["embed_size"],
                                           params["enc_units"],
                                           params["batch_size"],
                                           self.embedding_matrix)

        # 计算score用的是BahdanauAttention 相加的计算方式
        self.attention = rnn_decoder.BahdanauAttention(params["attn_units"])

        self.decoder = rnn_decoder.Decoder(params["vocab_size"],
                                           params["embed_size"],
                                           params["dec_units"],
                                           params["batch_size"],
                                           self.embedding_matrix)
 def __init__(self, params):
     super(PGN, self).__init__()
     self.embedding_matrix = load_word2vec(params)
     self.params = params
     self.encoder = Encoder(params["vocab_size"], params["embed_size"],
                            params["enc_units"], params["batch_size"],
                            self.embedding_matrix)
     self.attention = BahdanauAttention(params["attn_units"])
     self.decoder = Decoder(params["vocab_size"], params["embed_size"],
                            params["dec_units"], params["batch_size"],
                            self.embedding_matrix)
     self.pointer = Pointer()
 def __init__(self, params):
     super(SequenceToSequence, self).__init__()
     self.embedding_matrix = load_word2vec(params)
     self.params = params
     self.encoder = rnn_encoder.Encoder(params["vocab_size"],
                                        params["embed_size"],
                                        params["enc_units"],
                                        params["batch_size"],
                                        self.embedding_matrix)
     self.attention = rnn_decoder.BahdanauAttention(params["attn_units"])
     self.decoder = rnn_decoder.Decoder(params["vocab_size"],
                                        params["embed_size"],
                                        params["dec_units"],
                                        params["batch_size"],
                                        self.embedding_matrix)
Example #4
0
 def __init__(self, params):
     super(SequenceToSequence, self).__init__()
     self.embedding_matrix = load_word2vec(params) # 词向量矩阵
     self.params = params
     self.encoder = rnn_encoder.Encoder(params["vocab_size"], # 加载字典中词的数量
                                        params["embed_size"], # 词嵌入向量大小
                                        params["enc_units"], # encoder 单元个数
                                        params["batch_size"], # 分批次加载数据的大小
                                        self.embedding_matrix)
     self.attention = rnn_decoder.BahdanauAttention(params["attn_units"]) # attention 单元个数
     self.decoder = rnn_decoder.Decoder(params["vocab_size"],
                                        params["embed_size"],
                                        params["dec_units"], # decoder 单元个数
                                        params["batch_size"],
                                        self.embedding_matrix)
Example #5
0
 def __init__(self, params):
     super(PGN, self).__init__()
     self.embedding_matrix = load_word2vec(
         params)  # 才开始涉及到向量:建立embedding表按自定义的字典来,如果没有则置全0
     self.params = params
     self.encoder = rnn_encoder.Encoder(params["vocab_size"],
                                        params["embed_size"],
                                        params["enc_units"],
                                        params["batch_size"],
                                        self.embedding_matrix)
     self.attention = rnn_decoder.BahdanauAttentionCoverage(
         params["attn_units"])
     self.decoder = rnn_decoder.Decoder(params["vocab_size"],
                                        params["embed_size"],
                                        params["dec_units"],
                                        params["batch_size"],
                                        self.embedding_matrix)
     self.pointer = rnn_decoder.Pointer()
    def __init__(self, params):
        super(SequenceToSequence, self).__init__()
        self.embedding_matrix = load_word2vec(params)
        self.params = params
        print(params["batch_size"])
        self.encoder = encoder.Encoder(vocab_size=params["vocab_size"],
                                       embedding_dim=params["embed_size"],
                                       embedding_matrix=self.embedding_matrix,
                                       enc_units=params["enc_units"],
                                       batch_size=params["batch_size"])

        self.attention = attention.BahdanauAttention(units=params["attn_units"])    #attention_units

        self.decoder = decoder.Decoder(vocab_size=params["vocab_size"],
                                       embedding_dim=params["embed_size"],
                                       embedding_matrix=self.embedding_matrix,
                                       dec_units=params["dec_units"],
                                       batch_size=params["batch_size"])
    def __init__(self, params):
        super(PGN, self).__init__()
        self.embedding_matrix = load_word2vec(
            params, max_vocab_size=params["vocab_size"])
        self.params = params
        self.encoder = encoder.Encoder(vocab_size=params["vocab_size"],
                                       embedding_dim=params["embed_size"],
                                       embedding_matrix=self.embedding_matrix,
                                       enc_units=params["enc_units"],
                                       batch_size=params["batch_size"])

        self.attention = BahdanauAttention(units=params["attn_units"])

        self.decoder = Decoder(vocab_size=params["vocab_size"],
                               embedding_dim=params["embed_size"],
                               embedding_matrix=self.embedding_matrix,
                               dec_units=params["dec_units"],
                               batch_size=params["batch_size"])
        self.pointer = Pointer()