def __init__(self, params): super(PGN, self).__init__() self.params = params self.encoder = Encoder(params["vocab_size"], params["embed_size"], params["enc_units"], params["batch_size"]) self.attention = LuongAttention(params["attn_units"]) self.decoder = Decoder(params["vocab_size"], params["embed_size"], params["dec_units"], params["batch_size"]) self.pointer = Pointer()
def __init__(self, params, embeddings_matrix): super(PGN, self).__init__() self.params = params self.encoder = Encoder(params["vocab_size"], params["embed_size"], params["enc_units"], params["batch_size"], embeddings_matrix) self.attention = BahdanauAttention(params["attn_units"]) self.decoder = Decoder(params["vocab_size"], params["embed_size"], params["dec_units"], params["batch_size"], embeddings_matrix) self.pointer = Pointer()
def __init__(self, params): super(PGN, self).__init__() self.params = params self.encoder = Encoder(params["vocab_size"], params["embed_size"], params["enc_units"], params["batch_size"]) self.attention = BahdanauAttention(params["attn_units"]) if params["coverage"]: self.coverage = Coverage(params["attn_units"]) self.decoder = Decoder(params["vocab_size"], params["embed_size"], params["dec_units"], params["batch_size"], params["use_stats"]) self.pointer = Pointer()
def __init__(self, params): super(PGN, self).__init__() word_model_path = os.path.join(os.path.abspath('../'), 'data', 'w2v.model') vocab_path = os.path.join(os.path.abspath('../'), 'data', 'words_frequences.txt') self.params = params self.matrix = get_embedding(vocab_path, word_model_path, params) self.encoder = Encoder(params["vocab_size"], params["embed_size"], self.matrix, params["enc_units"], params["batch_size"]) self.attention = BahdanauAttention(params["attn_units"]) self.decoder = Decoder(params["vocab_size"], params["embed_size"], self.matrix, params["dec_units"], params["batch_size"]) self.pointer = Pointer()