Exemple #1
0
    def __init__(self, **kwargs):
        super().__init__(**kwargs)

        # Unpack args
        mention_word_embs = kwargs['mention_word_embs']
        mention_ent_embs = kwargs['mention_ent_embs']
        autoencoder_state_dict = kwargs['autoencoder_state_dict']
        hidden_size = kwargs['hidden_size']
        char_embs = kwargs['char_embs']

        # Mention embeddings
        self.mention_word_embs = nn.Embedding(*mention_word_embs.shape,
                                              padding_idx=0,
                                              sparse=self.args.sparse)
        self.mention_word_embs.weight.data.copy_(
            np_to_tensor(mention_word_embs))
        self.mention_word_embs.requires_grad = self.args.train_mention

        # Entity mention embeddings
        self.mention_ent_embs = nn.Embedding(*mention_ent_embs.shape,
                                             padding_idx=0,
                                             sparse=self.args.sparse)
        self.mention_ent_embs.weight.data.copy_(np_to_tensor(mention_ent_embs))
        self.mention_ent_embs.requires_grad = self.args.train_mention

        # Dropout
        self.dp = nn.Dropout(self.args.dp)

        ##### Autoencoder #####
        max_char_size = self.args.max_char_size
        self.autoencoder = StringAutoEncoder(max_char_size=max_char_size,
                                             hidden_size=hidden_size,
                                             char_embs=char_embs,
                                             dp=self.args.dp,
                                             activate=self.args.activate)
        self.autoencoder.load_state_dict(autoencoder_state_dict)
        self.autoencoder.requires_grad = False

        # Linear
        self.context_linear = nn.Linear(
            self.args.mention_word_dim + self.args.context_word_dim +
            hidden_size, 1)
        self.prior_linear = nn.Linear(
            self.args.mention_word_dim + self.args.context_word_dim +
            hidden_size, 1)
        self.str_linear = nn.Linear(
            self.args.mention_word_dim + self.args.context_word_dim +
            hidden_size, 1)

        init_func = getattr(nn.init, self.args.init_linear)
        logger.info(f'Initializing Linear layers using {init_func.__name__}')
        init_func(self.context_linear.weight)
        init_func(self.prior_linear.weight)
        init_func(self.str_linear.weight)

        # Sigmoid
        self.sigmoid = nn.Sigmoid()
Exemple #2
0
    def __init__(self, **kwargs):
        super().__init__(**kwargs)

        # Unpack args
        mention_word_embs = kwargs['mention_word_embs']
        autoencoder_state_dict = kwargs['autoencoder_state_dict']
        hidden_size = kwargs['hidden_size']
        char_embs = kwargs['char_embs']
        total_dims = self.args.mention_word_dim + self.args.context_word_dim + hidden_size
        ent_embs = torch.load(
            join(self.args.data_path, 'ent_combined_embs.pickle'))

        del self.ent_embs

        # Mention embeddings
        self.mention_word_embs = nn.Embedding(*mention_word_embs.shape,
                                              padding_idx=0,
                                              sparse=self.args.sparse)
        self.mention_word_embs.weight.data.copy_(
            np_to_tensor(mention_word_embs))
        self.mention_word_embs.requires_grad = self.args.train_mention

        # Entity mention embeddings
        self.ent_combined_embs = nn.Embedding(*ent_embs.shape,
                                              padding_idx=0,
                                              sparse=self.args.sparse)
        self.ent_combined_embs.weight.data.copy_(np_to_tensor(ent_embs))

        # Dropout
        self.dp = nn.Dropout(self.args.dp)

        ##### Autoencoder #####
        max_char_size = self.args.max_char_size
        self.autoencoder = StringAutoEncoder(max_char_size=max_char_size,
                                             hidden_size=hidden_size,
                                             char_embs=char_embs,
                                             dp=self.args.dp,
                                             activate=self.args.activate)
        self.autoencoder.load_state_dict(autoencoder_state_dict)
        for p in self.autoencoder.parameters():
            p.requires_grad = False

        self.linear1 = nn.Linear(total_dims, total_dims)
        self.linear2 = nn.Linear(total_dims, total_dims)
        self.relu = nn.ReLU()
        self.sigmoid = nn.Sigmoid()

        init_func = getattr(nn.init, self.args.init_linear)
        init_func(self.linear1.weight)
        init_func(self.linear2.weight)

        if self.args.num_linear == 2:
            self.gate_net = nn.Sequential(self.linear1, self.relu,
                                          self.linear2, self.sigmoid)
        else:
            self.gate_net = nn.Sequential(self.linear1, self.sigmoid)
Exemple #3
0
    def __init__(self, **kwargs):
        super().__init__(**kwargs)

        # Unpack args
        mention_word_embs = kwargs['mention_word_embs']
        mention_ent_embs = kwargs['mention_ent_embs']

        # Mention embeddings
        self.mention_word_embs = nn.Embedding(*mention_word_embs.shape,
                                              padding_idx=0,
                                              sparse=self.args.sparse)
        self.mention_word_embs.weight.data.copy_(
            np_to_tensor(mention_word_embs))

        # Entity mention embeddings
        self.mention_ent_embs = nn.Embedding(*mention_ent_embs.shape,
                                             padding_idx=0,
                                             sparse=self.args.sparse)
        self.mention_ent_embs.weight.data.copy_(np_to_tensor(mention_ent_embs))

        # Dp
        self.dp = nn.Dropout(self.args.dp)
    def __init__(self, **kwargs):
        super().__init__(**kwargs)

        # Unpack args
        mention_word_embs = kwargs['mention_word_embs']
        mention_ent_embs = kwargs['mention_ent_embs']
        autoencoder_state_dict = kwargs['autoencoder_state_dict']
        hidden_size = kwargs['hidden_size']
        char_embs = kwargs['char_embs']

        # Mention embeddings
        self.mention_word_embs = nn.Embedding(*mention_word_embs.shape, padding_idx=0, sparse=self.args.sparse)
        self.mention_word_embs.weight.data.copy_(np_to_tensor(mention_word_embs))
        self.mention_word_embs.requires_grad = self.args.train_mention

        # Entity mention embeddings
        self.mention_ent_embs = nn.Embedding(*mention_ent_embs.shape, padding_idx=0, sparse=self.args.sparse)
        self.mention_ent_embs.weight.data.copy_(np_to_tensor(mention_ent_embs))
        self.mention_ent_embs.requires_grad = self.args.train_mention

        # Dropout
        self.dp = nn.Dropout(self.args.dp)

        ##### Autoencoder #####
        max_char_size = self.args.max_char_size
        self.autoencoder = StringAutoEncoder(max_char_size=max_char_size,
                                             hidden_size=hidden_size,
                                             char_embs=char_embs,
                                             dp=self.args.dp,
                                             activate=self.args.activate)
        self.autoencoder.load_state_dict(autoencoder_state_dict)
        self.autoencoder.requires_grad = False

        total_dims = self.args.mention_word_dim + self.args.context_word_dim + hidden_size
        self.combine_linear = nn.Linear(total_dims, total_dims, bias=False)
        init_func = getattr(nn.init, self.args.init_linear)
        init_func(self.combine_linear.weight)