def __init__(self, init_embed,
                 num_classes,
                 hidden_size=64,
                 num_layers=1,
                 kernel_nums=(3, 4, 5),
                 kernel_sizes=(3, 4, 5),
                 padding=0,
                 dropout=0.5):
        super(RCNNText, self).__init__()
        
        #embedding
        self.embed = encoder.Embedding(init_embed)
        
        self.conv_pool = encoder.ConvMaxpool(
            in_channels=self.embed.embedding_dim+hidden_size*2,
            out_channels=kernel_nums,
            kernel_sizes=kernel_sizes,
            padding=padding)
#         #RNN layer
        self.lstm = nn.LSTM(
            input_size=self.embed.embedding_dim,
            hidden_size=hidden_size,
            num_layers=num_layers,
            bias=True,
            bidirectional=True
        )
    
        self.dropout = nn.Dropout(dropout)
        self.fc = nn.Linear(sum(kernel_nums), num_classes)
Exemple #2
0
    def __init__(self, num_classes, vocab_size):
        super(ClassificationModel, self).__init__()

        self.emb = encoder.Embedding(nums=vocab_size, dims=300)
        self.enc = encoder.Conv(
            in_channels=300, out_channels=100, kernel_size=3)
        self.agg = aggregation.MaxPool()
        self.dec = decoder.MLP(size_layer=[100, num_classes])
Exemple #3
0
    def __init__(self, vocab_size, input_size, hidden_layer_size, target_size,
                 dropout):
        super(RNN_Text, self).__init__()

        self.embed = encoder.Embedding((vocab_size, input_size))
        self.LSTM = encoder.lstm.LSTM(input_size=input_size,
                                      hidden_size=hidden_layer_size)
        self.dropout = nn.Dropout(dropout)
        self.hidden2tag = nn.Linear(hidden_layer_size, target_size)
 def __init__(self, init_embed, hidden_dim, num_classes):
     super(RNN, self).__init__()
     self.embed = encoder.Embedding(init_embed)
     self.rnn = encoder.LSTM(
         input_size=self.embed.embedding_dim,
         hidden_size=hidden_dim,
         num_layers=1,
     )
     self.fc = nn.Linear(hidden_dim, num_classes)
 def __init__(self, embed_num, input_size, hidden_size, target_size):
     super(RNN, self).__init__()
     self.embed = encoder.Embedding((embed_num, input_size))
     self.rnn = encoder.LSTM(  
         input_size=input_size, 
         hidden_size=hidden_size, 
         num_layers=1,  
         batch_first=True, 
     )
     self.output = nn.Linear(hidden_size, target_size)
Exemple #6
0
 def __init__(self,
              init_embed,
              num_classes,
              hidden_dim=256,
              num_layers=1,
              nfc=128):
     super(BiLSTMSentiment, self).__init__()
     self.embed = encoder.Embedding(init_embed)
     self.lstm = LSTM(input_size=self.embed.embedding_dim,
                      hidden_size=hidden_dim,
                      num_layers=num_layers,
                      bidirectional=True)
     self.mlp = MLP(size_layer=[hidden_dim * 2, nfc, num_classes])
    def __init__(self, init_embed,
                 num_classes,
                 kernel_nums=(3, 4, 5),
                 kernel_sizes=(3, 4, 5),
                 padding=0,
                 dropout=0.5):
        super(CNN, self).__init__()

        self.embed = encoder.Embedding(init_embed)
        self.conv_pool = encoder.ConvMaxpool(
            in_channels=self.embed.embedding_dim,
            out_channels=kernel_nums,
            kernel_sizes=kernel_sizes,
            padding=padding)
        self.dropout = nn.Dropout(dropout)
        self.fc = nn.Linear(sum(kernel_nums), num_classes)
Exemple #8
0
    def __init__(self, **kwargs):
        super(ESIM, self).__init__()
        self.vocab_size = kwargs["vocab_size"]
        self.embed_dim = kwargs["embed_dim"]
        self.hidden_size = kwargs["hidden_size"]
        self.batch_first = kwargs["batch_first"]
        self.dropout = kwargs["dropout"]
        self.n_labels = kwargs["num_classes"]
        self.gpu = kwargs["gpu"] and torch.cuda.is_available()

        self.drop = nn.Dropout(self.dropout)

        self.embedding = Encoder.Embedding(
            self.vocab_size,
            self.embed_dim,
            dropout=self.dropout,
            init_emb=kwargs["init_embedding"]
            if "inin_embedding" in kwargs.keys() else None,
        )

        self.embedding_layer = Encoder.Linear(self.embed_dim, self.hidden_size)

        self.encoder = Encoder.LSTM(input_size=self.embed_dim,
                                    hidden_size=self.hidden_size,
                                    num_layers=1,
                                    bias=True,
                                    batch_first=self.batch_first,
                                    bidirectional=True)

        self.bi_attention = Aggregator.Bi_Attention()
        self.mean_pooling = Aggregator.MeanPoolWithMask()
        self.max_pooling = Aggregator.MaxPoolWithMask()

        self.inference_layer = Encoder.Linear(self.hidden_size * 4,
                                              self.hidden_size)

        self.decoder = Encoder.LSTM(input_size=self.hidden_size,
                                    hidden_size=self.hidden_size,
                                    num_layers=1,
                                    bias=True,
                                    batch_first=self.batch_first,
                                    bidirectional=True)

        self.output = Decoder.MLP(
            [4 * self.hidden_size, self.hidden_size, self.n_labels],
            'tanh',
            dropout=self.dropout)
 def __init__(self, init_embed,
              num_classes,
              kernel_nums=(10, 10, 10, 10),
              kernel_sizes=(2, 3, 4, 5),
              padding=0,
              dropout=0.5):
     super(CNNText, self).__init__()
     
     # no support for pre-trained embedding currently
     self.embed = encoder.Embedding(init_embed)
     self.conv_pool = encoder.ConvMaxpool(
         in_channels=self.embed.embedding_dim,
         out_channels=kernel_nums,
         kernel_sizes=kernel_sizes,
         padding=padding)
     self.dropout = nn.Dropout(dropout)
     self.fc = nn.Linear(sum(kernel_nums), num_classes)
    def __init__(self,
                 vocab_size,
                 input_size,
                 target_size,
                 kernel_num=(3, 4, 5),
                 kernel_size=(3, 4, 5),
                 padding=0,
                 dropout=0.2):
        super(CNN_Text, self).__init__()

        self.embed = encoder.Embedding((vocab_size, input_size))
        self.conv = encoder.ConvMaxpool(in_channels=input_size,
                                        out_channels=kernel_num,
                                        kernel_sizes=kernel_size,
                                        padding=padding)
        self.dropout = nn.Dropout(dropout)
        self.linear = nn.Linear(sum(kernel_num), target_size)
    def __init__(self,
                 embed_num,
                 embed_dim,
                 num_classes,
                 kernel_nums=(3, 4, 5),
                 kernel_sizes=(3, 4, 5),
                 padding=0,
                 dropout=0.5):
        super(CNNText, self).__init__()

        # no support for pre-trained embedding currently
        self.embed = encoder.Embedding(embed_num, embed_dim)
        self.conv_pool = encoder.ConvMaxpool(in_channels=embed_dim,
                                             out_channels=kernel_nums,
                                             kernel_sizes=kernel_sizes,
                                             padding=padding)
        self.dropout = nn.Dropout(dropout)
        self.fc = encoder.Linear(sum(kernel_nums), num_classes)
        self._loss = nn.CrossEntropyLoss()
 def __init__(self,
              init_embed,
              num_classes,
              hidden_dim=256,
              num_layers=1,
              attention_unit=256,
              attention_hops=1,
              nfc=128):
     super(BiLSTM_SELF_ATTENTION, self).__init__()
     self.embed = encoder.Embedding(init_embed)
     self.lstm = LSTM(input_size=self.embed.embedding_dim,
                      hidden_size=hidden_dim,
                      num_layers=num_layers,
                      bidirectional=True)
     self.attention = SelfAttention(input_size=hidden_dim * 2,
                                    attention_unit=attention_unit,
                                    attention_hops=attention_hops)
     self.mlp = MLP(
         size_layer=[hidden_dim * 2 * attention_hops, nfc, num_classes])
Exemple #13
0
    def __init__(self,
                 init_embed,
                 num_classes,
                 hidden_size=64,
                 num_layers=1,
                 linear_hidden_dim=32,
                 kernel_nums=5,
                 kernel_sizes=4,
                 padding=0,
                 content_dim=100,
                 dropout=0.5):
        super(RCNNTextUpdate, self).__init__()

        #embedding
        self.embed = encoder.Embedding(init_embed)

        #RNN layer
        self.lstm = nn.LSTM(input_size=self.embed.embedding_dim,
                            hidden_size=hidden_size,
                            num_layers=num_layers,
                            bias=True,
                            batch_first=False,
                            bidirectional=True)

        #CNN layer
        self.conv = nn.Sequential(
            nn.Conv1d(in_channels=hidden_size * 2 + self.embed.embedding_dim,
                      out_channels=content_dim,
                      kernel_size=kernel_sizes), nn.BatchNorm1d(content_dim),
            nn.ReLU(inplace=True),
            nn.Conv1d(in_channels=content_dim,
                      out_channels=content_dim,
                      kernel_size=kernel_sizes), nn.BatchNorm1d(content_dim),
            nn.ReLU(inplace=True))

        #fc
        self.fc = nn.Sequential(nn.Linear(content_dim, linear_hidden_dim),
                                nn.BatchNorm1d(linear_hidden_dim),
                                nn.ReLU(inplace=True),
                                nn.Linear(linear_hidden_dim, num_classes))

        self.dropout = nn.Dropout(dropout)