Пример #1
0
 def forward(self, ent, ent_mask, ent_len, rel, rel_mask, graphs):
     device = ent.device
     ent_mask = (ent_mask == 0)  # reverse mask
     rel_mask = (rel_mask == 0)
     init_h = []
     for i in range(graphs.batch_size):
         init_h.append(ent[i][ent_mask[i]])
         init_h.append(rel[i][rel_mask[i]])
     init_h = torch.cat(init_h, 0)
     feats = init_h
     if graphs.number_of_nodes() != len(init_h):
         print('Err', graphs.number_of_nodes(), len(init_h), ent_mask,
               rel_mask)
     else:
         for i in range(self.prop):
             feats = self.gat[i](graphs, feats)
     g_root = feats.index_select(
         0,
         graphs.filter_nodes(
             lambda x: x.data['type'] == NODE_TYPE['root']).to(device))
     g_ent = pad(feats.index_select(
         0,
         graphs.filter_nodes(lambda x: x.data['type'] == NODE_TYPE['entity']
                             ).to(device)).split(ent_len),
                 out_type='tensor')
     return g_ent, g_root
Пример #2
0
def get_sentiment():
    text = request.get_json(force=True)['text']
    words = text_to_word_sequence(text)
    word_indices = [
        words_to_indices[word] + data.offset if word in words_to_indices
        and words_to_indices[word] < data.top_words else data.oov_index
        for word in words
    ]
    sequence = data.pad([[data.start_index] + word_indices])
    with graph.as_default():
        sentiment = model.predict(sequence)
    return '{0:.2f}% positive'.format(sentiment[0][0] * 100)
Пример #3
0
 def forward(self, inp, mask, ent_len=None):
     inp = self.drop(inp)
     lens = (mask == 0).sum(-1).long().tolist()
     pad_seq = pack_padded_sequence(inp,
                                    lens,
                                    batch_first=True,
                                    enforce_sorted=False)
     y, (_h, _c) = self.bilstm(pad_seq)
     _h = _h.transpose(0, 1).contiguous()
     _h = _h[:, -2:].view(_h.size(0), -1)  # two directions of the top-layer
     ret = pad(_h.split(ent_len), out_type='tensor')
     return ret
Пример #4
0
 def forward(self, inp, mask, ent_len=None):
     inp = self.drop(inp)  # (tot_ent, max_ent_len, dim_emb)
     lens = (mask == 0).sum(-1).long().tolist()
     pad_seq = pack_padded_sequence(
         inp, lens, batch_first=True,
         enforce_sorted=False)  # data of shape (sum(ent_len_ij), dim_emb)
     # y: packed sequence (dim=dim_h*2), h_t of the last layer for each t
     y, (_h, _c) = self.bilstm(pad_seq)
     # (tot_ent, num_layers*num_directions, dim_h), hidden state for t=seq_len
     _h = _h.transpose(0, 1).contiguous()
     # two directions of the top-layer  (tot_ent, dim_h*2=d)
     _h = _h[:, -2:].view(_h.size(0), -1)
     # _h.split: list of len bs, each element is a (num_ent_i, d) tensor
     ret = pad(_h.split(ent_len), out_type="tensor")  # (bs, max_num_ent, d)
     return ret