Example #1
0
    def __init__(self, nhidden, **kwargs):
        super(BiLSTM, self).__init__(**kwargs)

        with self.name_scope():
            self.hcell = rnn.BidirectionalCell(
                rnn.LSTMCell(nhidden),
                rnn.LSTMCell(nhidden),
            )
            self.vcell = rnn.BidirectionalCell(
                rnn.LSTMCell(nhidden),
                rnn.LSTMCell(nhidden),
            )
Example #2
0
 def __init__(self,
              cell_type='lstm',
              num_layers=2,
              num_bi_layers=1,
              input_halved_layers='',
              hidden_size=128,
              dropout=0.0,
              use_residual=True,
              i2h_weight_initializer=None,
              h2h_weight_initializer=None,
              i2h_bias_initializer='zeros',
              h2h_bias_initializer='zeros',
              prefix=None,
              params=None):
     super(GNMTEncoder, self).__init__(prefix=prefix, params=params)
     self._cell_type = _get_cell_type(cell_type)
     assert num_bi_layers <= num_layers,\
         'Number of bidirectional layers must be smaller than the total number of layers, ' \
         'num_bi_layers={}, num_layers={}'.format(num_bi_layers, num_layers)
     self._num_bi_layers = num_bi_layers
     self._num_layers = num_layers
     self._hidden_size = hidden_size
     self._dropout = dropout
     self._use_residual = use_residual
     self._input_halved_layers = map(int, input_halved_layers.split(','))
     print("encoder layers: %d", num_layers)
     print("input_halved_layers:", self._input_halved_layers)
     with self.name_scope():
         self.dropout_layer = nn.Dropout(dropout)
         self.rnn_cells = nn.HybridSequential()
         for i in range(num_layers):
             if i < num_bi_layers:
                 self.rnn_cells.add(
                     rnn.BidirectionalCell(
                         l_cell=self._cell_type(
                             hidden_size=self._hidden_size,
                             i2h_weight_initializer=i2h_weight_initializer,
                             h2h_weight_initializer=h2h_weight_initializer,
                             i2h_bias_initializer=i2h_bias_initializer,
                             h2h_bias_initializer=h2h_bias_initializer,
                             prefix='rnn%d_l_' % i),
                         r_cell=self._cell_type(
                             hidden_size=self._hidden_size,
                             i2h_weight_initializer=i2h_weight_initializer,
                             h2h_weight_initializer=h2h_weight_initializer,
                             i2h_bias_initializer=i2h_bias_initializer,
                             h2h_bias_initializer=h2h_bias_initializer,
                             prefix='rnn%d_r_' % i)))
             else:
                 self.rnn_cells.add(
                     self._cell_type(
                         hidden_size=self._hidden_size,
                         i2h_weight_initializer=i2h_weight_initializer,
                         h2h_weight_initializer=h2h_weight_initializer,
                         i2h_bias_initializer=i2h_bias_initializer,
                         h2h_bias_initializer=h2h_bias_initializer,
                         prefix='rnn%d_' % i))
Example #3
0
 def __init__(self, emb_dim, hidden_dim, vocab_size, dropout=.2, **kwargs):
     super(Sentence_Representation, self).__init__(**kwargs)
     self.vocab_size = vocab_size
     self.emb_dim = emb_dim
     self.hidden_dim = hidden_dim
     with self.name_scope():
         self.f_hidden = []
         self.b_hidden = []
         self.embed = nn.Embedding(self.vocab_size, self.emb_dim)
         self.drop = nn.Dropout(.2)
         self.bi_rnn = rnn.BidirectionalCell(
             rnn.LSTMCell(hidden_size=self.hidden_dim // 2),
             rnn.LSTMCell(hidden_size=self.hidden_dim // 2))
Example #4
0
    def __init__(self, n_hidden, vocab_size, embed_dim, max_seq_length,
                 **kwargs):
        super(korean_autospacing, self).__init__(**kwargs)
        # 입력 시퀀스 길이
        self.in_seq_len = max_seq_length
        # 출력 시퀀스 길이
        self.out_seq_len = max_seq_length
        # GRU의 hidden 개수
        self.n_hidden = n_hidden
        # 고유문자개수
        self.vocab_size = vocab_size
        # max_seq_length
        self.max_seq_length = max_seq_length
        # 임베딩 차원수
        self.embed_dim = embed_dim

        with self.name_scope():
            self.embedding = nn.Embedding(input_dim=self.vocab_size,
                                          output_dim=self.embed_dim)

            self.conv_unigram = nn.Conv2D(channels=128,
                                          kernel_size=(1, self.embed_dim))

            self.conv_bigram = nn.Conv2D(channels=256,
                                         kernel_size=(2, self.embed_dim),
                                         padding=(1, 0))

            self.conv_trigram = nn.Conv2D(channels=128,
                                          kernel_size=(3, self.embed_dim),
                                          padding=(1, 0))

            self.conv_forthgram = nn.Conv2D(channels=64,
                                            kernel_size=(3, self.embed_dim),
                                            padding=(2, 0))

            self.conv_fifthgram = nn.Conv2D(channels=32,
                                            kernel_size=(3, self.embed_dim),
                                            padding=(2, 0))

            self.bi_gru = rnn.BidirectionalCell(
                rnn.GRUCell(hidden_size=self.n_hidden),
                rnn.GRUCell(hidden_size=self.n_hidden))
            self.dense_sh = nn.Dense(100, activation='relu', flatten=False)
            self.dense = nn.Dense(1, activation='sigmoid', flatten=False)
Example #5
0
 def __init__(self, embed_size, vocab, hidden, dense, unit=0, headers=0):
     """
     Position Tagging with MHA
     :param embed_size: Int
     Number of embedding dimension
     :param vocab: Int
     Number of token index size
     :param hidden: Int
     Hidden dimension of biLSTM
     :param dense: List of Int
     List of int to create
     :param unit:
     :param headers:
     """
     super(PositionTagging, self).__init__()
     with self.name_scope():
         self.embedding = nn.Embedding(vocab, embed_size)
         # bidirectional LSTM
         self.biLSTM = rnn.BidirectionalCell(
             rnn.LSTMCell(hidden_size=hidden // 2),
             rnn.LSTMCell(hidden_size=hidden // 2))
         if headers > 0:
             cell = DotProductAttentionCell(scaled=True, dropout=0.2)
             cell = MultiHeadAttentionCell(base_cell=cell,
                                           use_bias=False,
                                           query_units=unit,
                                           key_units=unit,
                                           value_units=unit,
                                           num_heads=headers)
             self.att = cell
         else:
             self.att = None
         self.dense = nn.Sequential()
         for each in dense[:-1]:
             self.dense.add(nn.Dense(each, activation="softrelu"))
             self.dense.add(nn.BatchNorm())
         self.dense.add(nn.Dense(dense[-1], activation="sigmoid"))