Ejemplo n.º 1
0
    def __init__(self, opt, emb_matrix=None):
        super(PositionAwareRNN, self).__init__()
        self.drop = nn.Dropout(opt['dropout'])
        self.emb = nn.Embedding(opt['vocab_size'], opt['emb_dim'], padding_idx=constant.PAD_ID)
        if opt['pos_dim'] > 0:
            self.pos_emb = nn.Embedding(len(constant.POS_TO_ID), opt['pos_dim'],
                    padding_idx=constant.PAD_ID)
        if opt['ner_dim'] > 0:
            self.ner_emb = nn.Embedding(len(constant.NER_TO_ID), opt['ner_dim'],
                    padding_idx=constant.PAD_ID)
        
        input_size = opt['emb_dim'] + opt['pos_dim'] + opt['ner_dim']
        self.rnn = nn.LSTM(input_size, opt['hidden_dim'], opt['num_layers'], batch_first=True,\
                dropout=opt['dropout'])
        self.linear = nn.Linear(opt['hidden_dim'], opt['num_class'])

        if opt['attn']:
            self.attn_layer = layers.PositionAwareAttention(opt['hidden_dim'],
                    opt['hidden_dim'], 2*opt['pe_dim'], opt['attn_dim'])
            self.pe_emb = nn.Embedding(constant.MAX_LEN * 2 + 1, opt['pe_dim'])

        self.opt = opt
        self.topn = self.opt.get('topn', 1e10)
        self.use_cuda = opt['cuda']
        self.emb_matrix = emb_matrix
        self.init_weights()
Ejemplo n.º 2
0
    def __init__(self, opt, emb_matrix=None, ent_emb_matrix=None):
        super(PositionAwareRNN, self).__init__()
        self.drop = nn.Dropout(opt["dropout"])
        self.emb = nn.Embedding(opt["vocab_size"],
                                opt["emb_dim"],
                                padding_idx=constant.PAD_ID)
        self.ent_emb = None
        if ("ent_emb_dim" in opt and opt["ent_emb_dim"] > 0
                and "ent_vocab_size" in opt and opt["ent_vocab_size"] > 0):
            self.ent_emb = nn.Embedding(opt["ent_vocab_size"],
                                        opt["ent_emb_dim"],
                                        padding_idx=constant.PAD_ID)
        if opt["pos_dim"] > 0:
            self.pos_emb = nn.Embedding(len(constant.POS_TO_ID),
                                        opt["pos_dim"],
                                        padding_idx=constant.PAD_ID)
        if opt["ner_dim"] > 0:
            self.ner_emb = nn.Embedding(len(constant.NER_TO_ID),
                                        opt["ner_dim"],
                                        padding_idx=constant.PAD_ID)

        input_size = (opt["emb_dim"] + opt["ent_emb_dim"] + opt["pos_dim"] +
                      opt["ner_dim"])
        self.rnn = nn.LSTM(
            input_size,
            opt["hidden_dim"],
            opt["num_layers"],
            batch_first=True,
            dropout=opt["dropout"],
        )
        self.linear = nn.Linear(opt["hidden_dim"], opt["num_class"])

        if opt["attn"]:
            self.attn_layer = layers.PositionAwareAttention(
                opt["hidden_dim"], opt["hidden_dim"], 2 * opt["pe_dim"],
                opt["attn_dim"])
            self.pe_emb = nn.Embedding(constant.MAX_LEN * 2 + 1, opt["pe_dim"])

        self.opt = opt
        self.topn = self.opt.get("topn", 1e10)
        self.emb_topn = self.opt.get("emb_topn", 0)
        self.use_cuda = opt["cuda"]
        self.emb_matrix = emb_matrix
        self.ent_emb_matrix = ent_emb_matrix
        self.init_weights()
Ejemplo n.º 3
0
    def __init__(self, opt, emb_matrix=None):
        super(PositionAwareRNN, self).__init__()
        self.object_indices = opt['object_indices']
        self.drop = nn.Dropout(opt['dropout'])
        self.emb = nn.Embedding(opt['vocab_size'], opt['emb_dim'], padding_idx=constant.PAD_ID)
        if opt['pos_dim'] > 0:
            self.pos_emb = nn.Embedding(len(constant.POS_TO_ID), opt['pos_dim'],
                    padding_idx=constant.PAD_ID)
        if opt['ner_dim'] > 0:
            self.ner_emb = nn.Embedding(len(constant.NER_TO_ID), opt['ner_dim'],
                    padding_idx=constant.PAD_ID)
        
        input_size = opt['emb_dim'] + opt['pos_dim'] + opt['ner_dim']
        self.rnn = nn.LSTM(input_size, opt['hidden_dim'], opt['num_layers'], batch_first=True,\
                dropout=opt['dropout'])
        self.linear = nn.Linear(opt['hidden_dim'], opt['num_class'])

        if opt['attn']:
            self.attn_layer = layers.PositionAwareAttention(opt['hidden_dim'],
                    opt['hidden_dim'], 2*opt['pe_dim'], opt['attn_dim'])
            self.pe_emb = nn.Embedding(constant.MAX_LEN * 2 + 1, opt['pe_dim'])

        # LP Model
        if opt.get('link_prediction', None) is not None:
            link_prediction_cfg = opt.get('link_prediction', None)['model']
            self.rel_emb = nn.Embedding(opt['num_relations'], link_prediction_cfg['rel_emb_dim'])
            self.register_parameter('rel_bias', torch.nn.Parameter(torch.zeros((opt['num_relations']))))
            self.object_indices = torch.from_numpy(np.array(self.object_indices))
            if opt['cuda']:
                self.object_indices = self.object_indices.cuda()
            self.lp_model = initialize_link_prediction_model(link_prediction_cfg)

        self.opt = opt
        self.topn = self.opt.get('topn', 1e10)
        self.use_cuda = opt['cuda']
        self.emb_matrix = emb_matrix
        self.init_weights()