コード例 #1
0
ファイル: BiLSTM.py プロジェクト: wibruce/deepke
    def __init__(self, vocab_size, config):
        super(BiLSTM, self).__init__()
        self.model_name = 'BiLSTM'
        self.word_dim = config.word_dim
        self.pos_size = config.pos_size
        self.pos_dim = config.pos_dim
        self.hidden_dim = config.hidden_dim
        self.lstm_layers = config.lstm_layers
        self.last_hn = config.last_hn
        self.out_dim = config.relation_type
        self.dropout = config.dropout

        self.embedding = Embedding(vocab_size, self.word_dim, self.pos_size,
                                   self.pos_dim)
        self.input_dim = self.word_dim + self.pos_dim * 2
        self.lstm = VarLenLSTM(self.input_dim,
                               self.hidden_dim,
                               self.lstm_layers,
                               dropout=self.dropout,
                               last_hn=self.last_hn)
        if self.last_hn:
            linear_input_dim = self.hidden_dim * 2
        else:
            linear_input_dim = self.hidden_dim * 2 * self.lstm_layers
        self.fc1 = nn.Linear(linear_input_dim, self.hidden_dim)
        self.fc2 = nn.Linear(self.hidden_dim, self.out_dim)
コード例 #2
0
    def __init__(self, vocab_size, config):
        super(Capsule, self).__init__()
        self.model_name = 'Capsule'
        self.vocab_size = vocab_size
        self.word_dim = config.model.word_dim
        self.pos_size = config.model.pos_size
        self.pos_dim = config.model.pos_dim
        self.hidden_dim = config.model.hidden_dim

        self.num_primary_units = config.capsule.num_primary_units
        self.num_output_units = config.capsule.num_output_units
        self.primary_channels = config.capsule.primary_channels
        self.primary_unit_size = config.capsule.primary_unit_size
        self.output_unit_size = config.capsule.output_unit_size
        self.num_iterations = config.capsule.num_iterations

        self.embedding = Embedding(self.vocab_size, self.word_dim,
                                   self.pos_size, self.pos_dim)
        self.input_dim = self.word_dim + self.pos_dim * 2
        self.lstm = VarLenLSTM(
            self.input_dim,
            self.hidden_dim,
        )
        self.capsule = CapsuleNet(self.num_primary_units,
                                  self.num_output_units, self.primary_channels,
                                  self.primary_unit_size,
                                  self.output_unit_size, self.num_iterations)
コード例 #3
0
 def __init__(self, vocab_size, config):
     super(GCN, self).__init__()
     self.model_name = 'GCN'
     self.vocab_size = vocab_size
     self.word_dim = config.model.word_dim
     self.pos_size = config.model.pos_size
     self.pos_dim = config.model.pos_dim
     self.hidden_dim = config.model.hidden_dim
     self.dropout = config.model.dropout
     self.num_layers = config.gcn.num_layers
     self.out_dim = config.relation_type
     self.embedding = Embedding(self.vocab_size, self.word_dim,
                                self.pos_size, self.pos_dim)
     self.input_dim = self.word_dim + self.pos_dim * 2
     self.fc1 = nn.Linear(self.input_dim, self.hidden_dim)
     self.fc2 = nn.Linear(self.hidden_dim, self.hidden_dim)
     self.fc3 = nn.Linear(self.hidden_dim, self.out_dim)
     self.dropout = nn.Dropout(self.dropout)
コード例 #4
0
    def __init__(self, vocab_size, config):
        super(CNN, self).__init__()
        self.model_name = 'CNN'
        self.vocab_size = vocab_size
        self.word_dim = config.model.word_dim
        self.pos_size = config.model.pos_size
        self.pos_dim = config.model.pos_dim
        self.hidden_dim = config.model.hidden_dim
        self.dropout = config.model.dropout
        self.use_pcnn = config.cnn.use_pcnn
        self.out_channels = config.cnn.out_channels
        self.kernel_size = config.cnn.kernel_size
        self.out_dim = config.relation_type

        if isinstance(self.kernel_size, int):
            self.kernel_size = [self.kernel_size]
        for k in self.kernel_size:
            assert k % 2 == 1, "kernel size has to be odd numbers."

        self.embedding = Embedding(self.vocab_size, self.word_dim,
                                   self.pos_size, self.pos_dim)
        # PCNN embedding
        self.mask_embed = nn.Embedding(4, 3)
        masks = torch.tensor([[0, 0, 0], [100, 0, 0], [0, 100, 0], [0, 0,
                                                                    100]])
        self.mask_embed.weight.data.copy_(masks)
        self.mask_embed.weight.requires_grad = False

        self.input_dim = self.word_dim + self.pos_dim * 2
        self.convs = nn.ModuleList([
            nn.Conv1d(in_channels=self.input_dim,
                      out_channels=self.out_channels,
                      kernel_size=k,
                      padding=k // 2,
                      bias=None) for k in self.kernel_size
        ])
        self.conv_dim = len(self.kernel_size) * self.out_channels
        if self.use_pcnn:
            self.conv_dim *= 3
        self.fc1 = nn.Linear(self.conv_dim, self.hidden_dim)
        self.fc2 = nn.Linear(self.hidden_dim, self.out_dim)
        self.dropout = nn.Dropout(self.dropout)
コード例 #5
0
ファイル: Transformer.py プロジェクト: wibruce/deepke
    def __init__(self, vocab_size, config):
        super(Transformer, self).__init__()
        self.model_name = 'Transformer'
        self.word_dim = config.word_dim
        self.pos_size = config.pos_size
        self.pos_dim = config.pos_dim
        self.hidden_dim = config.hidden_dim
        self.dropout = config.dropout
        self.out_dim = config.relation_type
        self.layers = config.transformer_layers

        self.embedding = Embedding(vocab_size, self.word_dim, self.pos_size,
                                   self.pos_dim)
        self.feature_dim = self.word_dim + self.pos_dim * 2
        self.att = MultiHeadAttention(self.feature_dim, num_head=4)
        self.norm1 = nn.LayerNorm(self.feature_dim)
        self.ffn = nn.Sequential(nn.Linear(self.feature_dim, self.hidden_dim),
                                 nn.ReLU(),
                                 nn.Linear(self.hidden_dim, self.feature_dim),
                                 nn.Dropout(self.dropout))
        self.norm2 = nn.LayerNorm(self.feature_dim)
        self.fc = nn.Linear(self.feature_dim, self.out_dim)