Esempio n. 1
0
 def __init__(self, **kwargs):
     super(seq2seq, self).__init__(**kwargs)
     with self.name_scope():
         self.Embed = nn.Embedding(vocab_size, 200)
         self.gru_1 = rnn.GRU(hidden_size=1024, layout='NTC', num_layers=4)
         self.gru_2 = rnn.GRU(hidden_size=1024, layout='NTC', num_layers=4)
         self.mlp = nn.Dense(units=vocab_size, flatten=False)
Esempio n. 2
0
 def build_model(self,
                 num_series,
                 conv_hid,
                 gru_hid,
                 skip_gru_hid,
                 skip,
                 ar_window,
                 model_dir=False):
     kernel_size = 6
     dropout_rate = 0.2
     self.skip = skip
     self.ar_window = ar_window
     self.model_dir = model_dir
     self.num_series = num_series
     self.conv_hid = conv_hid
     self.gru_hid = gru_hid
     self.skip_gru_hid = skip_gru_hid
     with self.name_scope():
         self.conv = nn.Conv1D(conv_hid,
                               kernel_size=kernel_size,
                               layout='NCW',
                               activation='relu')
         self.dropout = nn.Dropout(dropout_rate)
         self.gru = rnn.GRU(gru_hid, layout='TNC')
         self.skip_gru = rnn.GRU(skip_gru_hid, layout='TNC')
         self.fc = nn.Dense(num_series)
         self.ar_fc = nn.Dense(1)
Esempio n. 3
0
    def __init__(self, **kwargs):
        super(Network, self).__init__(**kwargs)
        with self.name_scope():
            self.gru = rnn.GRU(100, num_layers=1, bidirectional=True)
            self.gru_out = nn.Sequential()
            self.gru_out.add(nn.MaxPool2D(pool_size=(FIXED_WORD_LENGTH, 1)), )
            self.gru_out.add(nn.Flatten())
            self.gru_out.add(nn.Activation(activation='relu'))

            self.center_att = nn.Sequential()
            self.center_att.add(nn.Dense(1, in_units=200, flatten=False,
                                         activation="sigmoid"))
            self.center_out = nn.Sequential()
            self.center_out.add(nn.Dense(200, activation="relu"))

            self.desc_gru = rnn.GRU(64, num_layers=1, bidirectional=True, layout="NTC", dropout=0.2)
            self.desc_att = nn.Sequential()
            self.desc_att.add(nn.Dense(1, flatten=False,
                                       activation="sigmoid"))
            self.desc_out = nn.Sequential()
            self.desc_out.add(nn.Dense(100, activation="relu"))

            self.output = nn.Sequential()
            self.output.add(nn.Dropout(0.5))
            self.output.add(nn.Dense(7))
Esempio n. 4
0
 def __init__(self, **kwargs):
     super(SMN_Last, self).__init__(**kwargs)
     with self.name_scope():
         self.Embed = nn.Embedding(136334, 200)
         self.conv = nn.Conv2D(channels=8, kernel_size=3, activation='relu')
         self.pooling = nn.MaxPool2D(pool_size=3, strides=3)
         self.mlp_1 = nn.Dense(units=50, activation='tanh', flatten=True)
         self.gru_1 = rnn.GRU(hidden_size=50, layout='NTC')
         self.gru_2 = rnn.GRU(layout='NTC', hidden_size=50)
         self.mlp_2 = nn.Dense(units=2, flatten=False)
         self.W = self.params.get('param_test', shape=(50, 50))
Esempio n. 5
0
 def __init__(self, num_series, conv_hid, gru_hid, skip_gru_hid, skip, ar_window):
     super(LSTNet, self).__init__()
     kernel_size = 6
     dropout_rate = 0.2
     self.skip = skip
     self.ar_window = ar_window
     with self.name_scope():
         self.conv = nn.Conv1D(conv_hid, kernel_size=kernel_size, layout='NCW', activation='relu')
         self.dropout = nn.Dropout(dropout_rate)
         self.gru = rnn.GRU(gru_hid, layout='TNC')
         self.skip_gru = rnn.GRU(skip_gru_hid, layout='TNC')
         self.fc = nn.Dense(num_series)
         self.ar_fc = nn.Dense(1)
Esempio n. 6
0
    def __init__(self,
                 mode,
                 vocab_size,
                 num_embed,
                 num_hidden,
                 num_layers,
                 dropout=0.5,
                 **kwargs):
        super(GluonRNNModel, self).__init__(**kwargs)
        with self.name_scope():
            self.drop = nn.Dropout(dropout)
            self.encoder = nn.Embedding(
                vocab_size, num_embed, weight_initializer=mx.init.Uniform(0.1))

            if mode == 'lstm':
                self.rnn = rnn.LSTM(
                    num_hidden,
                    num_layers,
                    dropout=dropout,
                    input_size=num_embed)
            elif mode == 'gru':
                self.rnn = rnn.GRU(
                    num_hidden,
                    num_layers,
                    dropout=dropout,
                    input_size=num_embed)
            else:
                self.rnn = rnn.RNN(
                    num_hidden,
                    num_layers,
                    activation='relu',
                    dropout=dropout,
                    input_size=num_embed)
            self.decoder = nn.Dense(vocab_size, in_units=num_hidden)
            self.num_hidden = num_hidden
Esempio n. 7
0
    def __init__(self, mode, embed_dim, hidden_dim,
                 num_layers, dropout=0.5, **kwargs):
        super(RNN, self).__init__(**kwargs)
        with self.name_scope():
            self.drop = nn.Dropout(dropout)
            # self.encoder = nn.Embedding(vocab_size, embed_dim,
            #                             weight_initializer=mx.init.Uniform(0.1))
            if mode == 'rnn_relu':
                self.rnn = rnn.RNN(hidden_dim, num_layers, activation='relu',
                                   dropout=dropout, input_size=embed_dim)
            elif mode == 'rnn_tanh':
                self.rnn = rnn.RNN(hidden_dim, num_layers, activation='tanh',
                                   dropout=dropout, input_size=embed_dim)
            elif mode == 'lstm':
                self.rnn = rnn.LSTM(hidden_dim, num_layers, dropout=dropout,
                                    input_size=embed_dim)
            elif mode == 'gru':
                self.rnn = rnn.GRU(hidden_dim, num_layers, dropout=dropout,
                                   input_size=embed_dim)
            else:
                raise ValueError("Invalid mode %s. Options are rnn_relu, "
                                 "rnn_tanh, lstm, and gru"%mode)

            self.decoder = nn.Dense(2, in_units=hidden_dim)
            self.hidden_dim = hidden_dim
Esempio n. 8
0
    def __init__(self,
                 rnn_type,
                 hidden_size,
                 output_size,
                 num_layers,
                 dropout,
                 bidirectional=True,
                 ctx=cpu()):
        """TODO: to be defined.

        :hidden_size: TODO
        :num_layers: TODO
        :dropout: TODO
        :bidirectional: TODO

        """
        nn.Block.__init__(self)

        self._rnn_type = rnn_type.upper()
        self._hidden_size = hidden_size
        self._output_size = output_size
        self._num_layers = num_layers
        self._dropout = dropout
        self._bidirectional = bidirectional
        self.ctx = ctx

        if self._rnn_type == 'LSTM':
            self.rnn = rnn.LSTM(self._hidden_size, self._num_layers, 'NTC',
                                self._dropout, self._bidirectional)
        elif self._rnn_type == 'GRU':
            self.rnn = rnn.GRU(self._hidden_size, self.num_layers, 'NTC',
                               self._dropout, self._bidirectional)
Esempio n. 9
0
 def __init__(self, **kwargs):
     super(FeatureBlock, self).__init__(**kwargs)
     self.gru = rnn.GRU(128,layout='NTC',bidirectional=True, num_layers=1, dropout=0.2)
     self.conv3 = nn.Conv1D(channels=128, kernel_size=5, padding=2, strides=1, activation='relu')
     self.conv5 = nn.Conv1D(channels=128, kernel_size=9, padding=4, strides=1, activation='relu')
     self.conv7 = nn.Conv1D(channels=128, kernel_size=13, padding=6, strides=1, activation='relu')
     self.conv_drop = nn.Dropout(0.2)
 def __init__(self, vocab_size, embed_size, num_hiddens, num_layers,
              attention_size, drop_prob=0, **kwargs):
     super(Decoder, self).__init__(**kwargs)
     self.embedding = nn.Embedding(vocab_size, embed_size)
     self.attention = attention_model(attention_size)
     self.rnn = rnn.GRU(num_hiddens, num_layers, dropout=drop_prob)
     self.out = nn.Dense(vocab_size, flatten=False)
Esempio n. 11
0
def net_define():
    net = nn.Sequential()
    with net.name_scope():
        net.add(nn.Embedding(config.MAX_WORDS, config.EMBEDDING_DIM))
        net.add(
            rnn.GRU(128,
                    layout='NTC',
                    bidirectional=True,
                    num_layers=2,
                    dropout=0.2))
        net.add(transpose(axes=(0, 2, 1)))
        # net.add(nn.MaxPool2D(pool_size=(config.MAX_LENGTH,1)))
        # net.add(nn.Conv2D(128, kernel_size=(101,1), padding=(50,0), groups=128,activation='relu'))
        net.add(PrimeConvCap(8, 32, kernel_size=(1, 1), padding=(0, 0)))
        # net.add(AdvConvCap(8,32,8,32, kernel_size=(1,1), padding=(0,0)))
        net.add(
            CapFullyBlock(8 * (config.MAX_LENGTH),
                          num_cap=12,
                          input_units=32,
                          units=16,
                          route_num=5))
        # net.add(CapFullyBlock(8*(config.MAX_LENGTH-8), num_cap=12, input_units=32, units=16, route_num=5))
        # net.add(CapFullyBlock(8, num_cap=12, input_units=32, units=16, route_num=5))
        net.add(nn.Dropout(0.2))
        # net.add(LengthBlock())
        net.add(nn.Dense(6, activation='sigmoid'))
    net.initialize(init=init.Xavier())
    return net
Esempio n. 12
0
    def __init__(self, num_hiddens, num_outputs, num_layers, max_seq_len,
                 drop_prob, alignment_size, encoder_num_hiddens, **kwargs):
        super(Decoder, self).__init__(**kwargs)
        self.max_seq_len = max_seq_len
        self.encoder_num_hiddens = encoder_num_hiddens
        self.hidden_size = num_hiddens
        self.num_layers = num_layers
        with self.name_scope():
            self.embedding = nn.Embedding(num_outputs, num_hiddens)
            self.dropout = nn.Dropout(drop_prob)
            # 注意力机制。
            self.attention = nn.Sequential()
            with self.attention.name_scope():
                self.attention.add(
                    nn.Dense(alignment_size,
                             in_units=num_hiddens + encoder_num_hiddens,
                             activation="tanh",
                             flatten=False))
                self.attention.add(
                    nn.Dense(1, in_units=alignment_size, flatten=False))

            self.rnn = rnn.GRU(num_hiddens,
                               num_layers,
                               dropout=drop_prob,
                               input_size=num_hiddens)

            self.out = nn.Dense(num_outputs,
                                in_units=num_hiddens,
                                flatten=False)
            self.rnn_concat_input = nn.Dense(num_hiddens,
                                             in_units=num_hiddens +
                                             encoder_num_hiddens,
                                             flatten=False)
Esempio n. 13
0
    def __init__(self, mode, vocab_size, num_embed, num_hidden,
                 num_layers, dropout=0.5, **kwargs):
        super(GluonRNNModel, self).__init__(**kwargs)
        with self.name_scope():
            self.drop = nn.Dropout(dropout)
            self.encoder = nn.Embedding(vocab_size, num_embed,
                                        weight_initializer=mx.init.Uniform(0.1))

            if mode == 'lstm':
                #  we create a LSTM layer with certain number of hidden LSTM cell and layers
                #  in our example num_hidden is 1000 and num of layers is 2
                #  The input to the LSTM will only be passed during the forward pass (see forward function below)
                self.rnn = rnn.LSTM(num_hidden, num_layers, dropout=dropout,
                                    input_size=num_embed)
            elif mode == 'gru':
                #  we create a GRU layer with certain number of hidden GRU cell and layers
                #  in our example num_hidden is 1000 and num of layers is 2
                #  The input to the GRU will only be passed during the forward pass (see forward function below)
                self.rnn = rnn.GRU(num_hidden, num_layers, dropout=dropout,
                                   input_size=num_embed)
            else:
                #  we create a vanilla RNN layer with certain number of hidden vanilla RNN cell and layers
                #  in our example num_hidden is 1000 and num of layers is 2
                #  The input to the vanilla will only be passed during the forward pass (see forward function below)
                self.rnn = rnn.RNN(num_hidden, num_layers, activation='relu', dropout=dropout,
                                   input_size=num_embed)
            self.decoder = nn.Dense(vocab_size, in_units=num_hidden)
            self.num_hidden = num_hidden
Esempio n. 14
0
    def __init__(self, hidden_dim, output_dim, num_layers, max_seq_len,
                 drop_prob, alignment_dim, encoder_hidden_dim, batch_size):
        super(Decoder, self).__init__()
        self.max_seq_len = max_seq_len
        self.encoder_hidden_dim = encoder_hidden_dim
        self.hidden_size = hidden_dim
        self.num_layers = num_layers
        self.batch_size = batch_size

        with self.name_scope():
            self.embedding = nn.Embedding(output_dim, hidden_dim)
            self.dropout = nn.Dropout(drop_prob)
            # 注意力机制。
            self.attention = nn.Sequential()
            with self.attention.name_scope():
                self.attention.add(
                    nn.Dense(alignment_dim,
                             in_units=hidden_dim + encoder_hidden_dim,
                             activation="tanh",
                             flatten=False))
                self.attention.add(
                    nn.Dense(1, in_units=alignment_dim, flatten=False))

            self.rnn = rnn.GRU(hidden_dim,
                               num_layers,
                               dropout=drop_prob,
                               input_size=hidden_dim)
            self.out = nn.Dense(output_dim, in_units=hidden_dim)
            self.rnn_concat_input = nn.Dense(hidden_dim,
                                             in_units=hidden_dim +
                                             encoder_hidden_dim,
                                             flatten=False)
            self.rnn_concat_input = nn.Dense(hidden_dim,
                                             in_units=hidden_dim,
                                             flatten=False)
    def __init__(self, ctx=mx.cpu(), warmup=10, runs=50, inputs=None):
        # Set the default Inputs
        default_parameters = {
            "data": (25, 32, 256),
            "data_initializer": nd.normal,
            "hidden_size": 100,
            "num_layers": 1,
            "layout": "TNC",
            "dropout": 0,
            "bidirectional": False,
            "run_backward": True,
            "dtype": "float32"
        }

        super().__init__(ctx=ctx,
                         warmup=warmup,
                         runs=runs,
                         default_parameters=default_parameters,
                         custom_parameters=inputs)

        self.data = get_mx_ndarray(ctx=self.ctx,
                                   in_tensor=self.inputs["data"],
                                   dtype=self.inputs["dtype"],
                                   initializer=self.inputs["data_initializer"],
                                   attach_grad=self.inputs["run_backward"])

        self.block = rnn.GRU(hidden_size=self.inputs["hidden_size"],
                             num_layers=self.inputs["num_layers"],
                             layout=self.inputs["layout"],
                             dropout=self.inputs["dropout"],
                             bidirectional=self.inputs["bidirectional"],
                             dtype=self.inputs["dtype"])

        self.block.initialize(ctx=self.ctx)
Esempio n. 16
0
 def __init__(self,
              mode,
              num_hidden,
              num_layers,
              num_slices=20,
              dropout=0.3,
              **kwargs):
     super(RNNModel, self).__init__(**kwargs)
     with self.name_scope():
         self.drop = nn.Dropout(dropout)
         if mode == 'rnn_relu':
             self.rnn = rnn.RNN(num_hidden,
                                num_layers,
                                activation='relu',
                                dropout=dropout)
         elif mode == 'rnn_tanh':
             self.rnn = rnn.RNN(num_hidden, num_layers, dropout=dropout)
         elif mode == 'lstm':
             self.rnn = rnn.LSTM(num_hidden, num_layers, dropout=dropout)
         elif mode == 'gru':
             self.rnn = rnn.GRU(num_hidden, num_layers, dropout=dropout)
         else:
             raise ValueError("Invalid mode %s. Options are rnn_relu, "
                              "rnn_tanh, lstm, and gru" % mode)
         self.decoder = nn.Dense(num_slices, in_units=num_hidden)
         self.num_hidden = num_hidden
Esempio n. 17
0
    def __init__(self, input_size, hidden_size, sub_sample_size=3):
        super(AudioEncoderRNN, self).__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.sub_sample_size = sub_sample_size
        self.num_layers = 2

        with self.name_scope():
            self.proj = nn.Dense(hidden_size, flatten=False)
            self.rnn1 = rnn.GRU(hidden_size * 4,
                                input_size=self.hidden_size,
                                num_layers=self.num_layers)
            self.subsampler = SubSampler(self.sub_sample_size)
            self.rnn2 = rnn.GRU(hidden_size * 4,
                                input_size=self.hidden_size * 4,
                                num_layers=self.num_layers)
Esempio n. 18
0
    def __init__(self, mode,
                 num_embed,
                 num_hidden,
                 seq_len, num_layers,
                 dropout=0.0,
                 **kwargs):
        super(RNNClsModel, self).__init__(**kwargs)
        with self.name_scope():
            self.drop = nn.Dropout(dropout)
            # self.emb = nn.Embedding(vocab_size, num_embed,
            #                         weight_initializer=mx.init.Uniform(0.1))
            if mode == 'rnn_relu':
                self.rnn = rnn.RNN(num_hidden, activation='relu', num_layers=num_layers,
                                   layout='NTC', dropout=dropout, input_size=num_embed)
            elif mode == 'rnn_tanh':
                self.rnn = rnn.RNN(num_hidden, num_layers=num_layers, layout='NTC', dropout=dropout,
                                   input_size=num_embed)
            elif mode == 'lstm':
                self.rnn = rnn.LSTM(num_hidden, num_layers=num_layers, layout='NTC', dropout=dropout,
                                    input_size=num_embed)
            elif mode == 'gru':
                self.rnn = rnn.GRU(num_hidden, num_layers=num_layers, layout='NTC', dropout=dropout,
                                   input_size=num_embed)
            else:
                raise ValueError("Invalid mode %s. Options are rnn_relu, "
                                 "rnn_tanh, lstm, and gru" % mode)

            self.fc = nn.Dense(NUM_CLASSES, in_units=num_hidden * seq_len)
            self.num_hidden = num_hidden
            self.seq_len = seq_len
Esempio n. 19
0
    def __init__(self, input_size, hidden_size, n_layers):
        super(EncoderRNN, self).__init__()
        self.hidden_size = hidden_size
        self.n_layers = n_layers

        with self.name_scope():
            self.embedding = nn.Embedding(input_size, hidden_size)
            self.gru = rnn.GRU(hidden_size, input_size=self.hidden_size)
Esempio n. 20
0
 def __init__(self, num_inputs, num_hiddens, num_layers, drop_prob, **kwargs):
     super(Encoder, self).__init__(**kwargs)
     with self.name_scope():
         # 词向量
         self.embedding = nn.Embedding(num_inputs, num_hiddens)
         self.dropout = nn.Dropout(drop_prob)
         self.rnn = rnn.GRU(num_hiddens, num_layers, dropout=drop_prob,
                            input_size=num_hiddens)
Esempio n. 21
0
 def __init__(self, hidden_size, num_layers, drop_rate, input_size, **kwargs):
     super(BiGRU, self).__init__(**kwargs)
     self.input_size = input_size
     with self.name_scope():
         self.rnn = rnn.GRU(hidden_size=hidden_size,
                            num_layers=num_layers,
                            input_size=input_size,
                            dropout=drop_rate,
                            bidirectional=True)
Esempio n. 22
0
 def __init__(self, num_series, conv_hid, gru_hid, skip_gru_hid, skip,
              ar_window):
     super(LSTNet, self).__init__()
     kernel_size = 6  # in this case looks at 6 hour data window
     dropout_rate = 0.2  # for regularization
     self.skip = skip  # determines the seasonality/cycles
     self.ar_window = ar_window
     with self.name_scope():
         # define specific layers for the model
         self.conv = nn.Conv1D(conv_hid,
                               kernel_size=kernel_size,
                               layout='NCW',
                               activation='relu')
         self.dropout = nn.Dropout(dropout_rate)
         self.gru = rnn.GRU(gru_hid, layout='TNC')
         self.skip_gru = rnn.GRU(skip_gru_hid, layout='TNC')
         self.fc = nn.Dense(num_series)
         self.ar_fc = nn.Dense(1)
Esempio n. 23
0
 def __init__(self, dr_rate, **kwargs):
     super(LipNet, self).__init__(**kwargs)
     with self.name_scope():
         self.conv1 = nn.Conv3D(32, kernel_size=(3, 5, 5), strides=(1, 2, 2), padding=(1, 2, 2))
         self.bn1 = nn.InstanceNorm(in_channels=32)
         self.dr1 = nn.Dropout(dr_rate, axes=(1, 2))
         self.pool1 = nn.MaxPool3D((1, 2, 2), (1, 2, 2))
         self.conv2 = nn.Conv3D(64, kernel_size=(3, 5, 5), strides=(1, 1, 1), padding=(1, 2, 2))
         self.bn2 = nn.InstanceNorm(in_channels=64)
         self.dr2 = nn.Dropout(dr_rate, axes=(1, 2))
         self.pool2 = nn.MaxPool3D((1, 2, 2), (1, 2, 2))
         self.conv3 = nn.Conv3D(96, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding=(1, 2, 2))
         self.bn3 = nn.InstanceNorm(in_channels=96)
         self.dr3 = nn.Dropout(dr_rate, axes=(1, 2))
         self.pool3 = nn.MaxPool3D((1, 2, 2), (1, 2, 2))
         self.gru1 = rnn.GRU(256, bidirectional=True)
         self.gru2 = rnn.GRU(256, bidirectional=True)
         self.dense = nn.Dense(27+1, flatten=False)
Esempio n. 24
0
 def __init__(self, input_dim, hidden_dim, num_layers, drop_prob):
     super(Encoder, self).__init__()
     with self.name_scope():
         self.embedding = nn.Embedding(input_dim, hidden_dim)
         self.dropout = nn.Dropout(drop_prob)
         self.rnn = rnn.GRU(hidden_dim,
                            num_layers,
                            dropout=drop_prob,
                            input_size=hidden_dim)
Esempio n. 25
0
 def __init__(self,
              vocab_size,
              embed_size,
              num_hiddens,
              num_layers,
              drop_prob=0,
              **kwargs):
     super(Encoder, self).__init__(**kwargs)
     self.embedding = nn.Embedding(vocab_size, embed_size)
     self.gru = rnn.GRU(num_hiddens, num_layers, dropout=drop_prob)
Esempio n. 26
0
 def __init__(self,**kwargs):
     super(SMN_Last,self).__init__(**kwargs)
     with self.name_scope():
         
         self.Embed = nn.Embedding(411721,256)
         # agg param
         self.gru = rnn.GRU(1024,2,layout='NTC')
         self.mlp_1 = nn.Dense(units=60,flatten=False,activation='relu')
         self.mlp_2 = nn.Dense(units=1,flatten=False)
         # lstm param
         self.topic_embedding = self.params.get('param_test',shape=(1024,2000))
Esempio n. 27
0
    def __init__(self,
                 mode,
                 vocab_size,
                 embed_dim,
                 hidden_dim,
                 num_layers,
                 dropout=0.5,
                 **kwargs):
        """
        mode: RNN网络类型
        vocab_size: 数据集不同字符的数目
        embed_dim: 编码器神经元数目 输入数据特征数目
        hidden_dim: 隐藏状态特征数
        num_layers: 循环神经网络层数
        dropout: 随机失活
        
        stata shape: (隐藏层个数, 批量大小, 隐藏单元个数)
        input shape: (时间步数, 批量大小, 输入个数)
        """
        super(RNN_Model, self).__init__(**kwargs)
        with self.name_scope():
            self.drop = nn.Dropout(dropout)
            self.encoder = nn.Embedding(
                vocab_size, embed_dim, weight_initializer=mx.init.Uniform(0.1))

            if mode == 'rnn_relu':
                self.rnn = rnn.RNN(hidden_dim,
                                   num_layers,
                                   activation='relu',
                                   dropout=dropout,
                                   input_size=embed_dim)
            elif mode == 'rnn_tanh':
                self.rnn = rnn.RNN(hidden_dim,
                                   num_layers,
                                   dropout=dropout,
                                   input_size=embed_dim)
            elif mode == 'lstm':
                self.rnn = rnn.LSTM(hidden_dim,
                                    num_layers,
                                    dropout=dropout,
                                    input_size=embed_dim)
            elif mode == 'gru':
                self.rnn = rnn.GRU(hidden_dim,
                                   num_layers,
                                   dropout=dropout,
                                   input_size=embed_dim)
            else:
                raise ValueError(
                    "Invalid mode %s. Options are rnn_relu, rnn_tanh, lstm, and gru"
                    & mode)

            self.decoder = nn.Dense(vocab_size, in_units=hidden_dim)
            self.hidden_dim = hidden_dim
Esempio n. 28
0
 def __init__(self, input_dim, hidden_dim, num_layers, drop_prob, **kwargs):
     super(Encoder, self).__init__(**kwargs)
     with self.name_scope():
         # input_dim is len(input_vocab)
         # hidden_dim is encoder_hidden_dim, 256
         # embedding.weight.shape is (input_dim, hidden_dim)
         self.embedding = nn.Embedding(input_dim, hidden_dim)
         self.dropout = nn.Dropout(drop_prob)
         # num_layers is 1
         self.rnn = rnn.GRU(hidden_dim,
                            num_layers,
                            dropout=drop_prob,
                            input_size=hidden_dim)
Esempio n. 29
0
    def __init__(self,
                 num_hiddens,
                 num_layers,
                 drop_prob=0,
                 enOutsize=2,
                 **kwargs):
        super(Encoder, self).__init__(**kwargs)
        # self.embedding = nn.Embedding(vocab_size, embed_size)

        self.rnn = rnn.GRU(num_hiddens, num_layers, dropout=drop_prob)
        # self.rnn = rnn.LSTM(num_hiddens, num_layers, dropout=drop_prob)
        self.dense = gluon.nn.Sequential()
        self.dense.add(nn.Dense(enOutsize, use_bias=True, flatten=False))
Esempio n. 30
0
 def __init__(self,
              mode,
              vocab_size,
              num_embed,
              num_hidden,
              num_layers,
              dropout=0.5,
              tie_weights=False,
              **kwargs):
     super(RNNModel, self).__init__(**kwargs)
     with self.name_scope():
         self.drop = nn.Dropout(dropout)
         if 0:
             self.encoder = nn.Embedding(
                 vocab_size,
                 num_embed,
                 weight_initializer=mx.init.Uniform(0.1))
         else:
             self.encoder = None
         if mode == 'rnn_relu':
             self.rnn = rnn.RNN(num_hidden,
                                num_layers,
                                activation='relu',
                                dropout=dropout,
                                input_size=num_embed)
         elif mode == 'rnn_tanh':
             self.rnn = rnn.RNN(num_hidden,
                                num_layers,
                                dropout=dropout,
                                input_size=num_embed)
         elif mode == 'lstm':
             self.rnn = rnn.LSTM(num_hidden,
                                 num_layers,
                                 dropout=dropout,
                                 input_size=num_embed)
         elif mode == 'gru':
             self.rnn = rnn.GRU(num_hidden,
                                num_layers,
                                dropout=dropout,
                                input_size=num_embed)
         else:
             raise ValueError("Invalid mode %s. Options are rnn_relu, "
                              "rnn_tanh, lstm, and gru" % mode)
         if tie_weights:
             self.decoder = nn.Dense(vocab_size,
                                     in_units=num_hidden,
                                     params=self.encoder.params)
         else:
             self.decoder = nn.Dense(vocab_size, in_units=num_hidden)
         self.num_hidden = num_hidden