コード例 #1
0
ファイル: gru.py プロジェクト: yoshall/MF-STN
    def __init__(self, rnn_hiddens, hiddens, embed_dim, prefix):
        super(GRU, self).__init__(prefix=prefix)

        with self.name_scope():
            # gated recurrent units
            self.grus = []
            for i, hidden in enumerate(rnn_hiddens):
                cell = rnn.GRUCell(hidden, prefix='gru%d_' % i)
                self.register_child(cell)
                self.grus += [cell]

            # dense layers (mf dense layers)
            self.denses = nn.Sequential()
            in_dims = [rnn_hiddens[-1]] + hiddens
            out_dims = hiddens + [FLOW_OUTPUT_DIM * FLOW_OUTPUT_LEN]
            for i, (in_dim, out_dim) in enumerate(zip(in_dims, out_dims)):
                activation = None if i == len(in_dims) - 1 else 'relu'
                if embed_dim == 0:
                    self.denses.add(
                        nn.Dense(out_dim,
                                 activation,
                                 flatten=False,
                                 prefix='dense%d_' % i))
                else:
                    self.denses.add(
                        MFDense(N_LOC,
                                embed_dim,
                                in_dim,
                                out_dim,
                                activation,
                                prefix='mf_dense%d_' % i))
コード例 #2
0
ファイル: utils.py プロジェクト: batermj/gluon-nlp
def _get_rnn_cell(mode, num_layers, input_size, hidden_size, dropout,
                  weight_dropout, var_drop_in, var_drop_state, var_drop_out):
    """create rnn cell given specs"""
    rnn_cell = rnn.SequentialRNNCell()
    with rnn_cell.name_scope():
        for i in range(num_layers):
            if mode == 'rnn_relu':
                cell = rnn.RNNCell(hidden_size, 'relu', input_size=input_size)
            elif mode == 'rnn_tanh':
                cell = rnn.RNNCell(hidden_size, 'tanh', input_size=input_size)
            elif mode == 'lstm':
                cell = rnn.LSTMCell(hidden_size, input_size=input_size)
            elif mode == 'gru':
                cell = rnn.GRUCell(hidden_size, input_size=input_size)
            if var_drop_in + var_drop_state + var_drop_out != 0:
                cell = contrib.rnn.VariationalDropoutCell(
                    cell, var_drop_in, var_drop_state, var_drop_out)

            rnn_cell.add(cell)
            if i != num_layers - 1 and dropout != 0:
                rnn_cell.add(rnn.DropoutCell(dropout))

            if weight_dropout:
                apply_weight_drop(rnn_cell, 'h2h_weight', rate=weight_dropout)

    return rnn_cell
コード例 #3
0
    def __init__(self, n_hidden, vocab_size, embed_dim, max_seq_length,
                 **kwargs):
        super(korean_autospacing, self).__init__(**kwargs)
        # 입력 시퀀스 길이
        self.in_seq_len = max_seq_length
        # 출력 시퀀스 길이
        self.out_seq_len = max_seq_length
        # GRU의 hidden 개수
        self.n_hidden = n_hidden
        # 고유문자개수
        self.vocab_size = vocab_size
        # max_seq_length
        self.max_seq_length = max_seq_length
        # 임베딩 차원수
        self.embed_dim = embed_dim

        with self.name_scope():
            self.embedding = nn.Embedding(input_dim=self.vocab_size,
                                          output_dim=self.embed_dim)

            self.conv_unigram = nn.Conv2D(channels=128,
                                          kernel_size=(1, self.embed_dim))

            self.conv_bigram = nn.Conv2D(channels=256,
                                         kernel_size=(2, self.embed_dim),
                                         padding=(1, 0))

            self.conv_trigram = nn.Conv2D(channels=128,
                                          kernel_size=(3, self.embed_dim),
                                          padding=(1, 0))

            self.conv_forthgram = nn.Conv2D(channels=64,
                                            kernel_size=(3, self.embed_dim),
                                            padding=(2, 0))

            self.conv_fifthgram = nn.Conv2D(channels=32,
                                            kernel_size=(3, self.embed_dim),
                                            padding=(2, 0))

            self.bi_gru = rnn.BidirectionalCell(
                rnn.GRUCell(hidden_size=self.n_hidden),
                rnn.GRUCell(hidden_size=self.n_hidden))
            self.dense_sh = nn.Dense(100, activation='relu', flatten=False)
            self.dense = nn.Dense(1, activation='sigmoid', flatten=False)
コード例 #4
0
    def __init__(self, samples_per_step: int = 5, **kwargs):
        """
        Args:
            samples_per_step: How many samples to generate at each decoding step.
            **kwargs:
        """
        super(SinDecoder, self).__init__(**kwargs)

        self._samples_per_step = samples_per_step

        with self.name_scope():
            self._decoder_rnn = rnn.GRUCell(hidden_size=20)
            self._projection_layer = nn.Dense(samples_per_step, flatten=False)
コード例 #5
0
    def __init__(self,
                 n_hidden,
                 vocab_size,
                 embed_dim,
                 max_seq_length,
                 end_idx,
                 attention=False,
                 **kwargs):
        super(korean_english_translator, self).__init__(**kwargs)
        self.end_idx = end_idx
        #입력 시퀀스 길이
        self.in_seq_len = max_seq_length
        #출력 시퀀스 길이
        self.out_seq_len = max_seq_length
        # GRU의 hidden 개수
        self.n_hidden = n_hidden
        #고유문자개수
        self.vocab_size = vocab_size
        #max_seq_length
        self.max_seq_length = max_seq_length
        #임베딩 차원수
        self.embed_dim = embed_dim

        self.attention = attention
        with self.name_scope():
            self.embedding = nn.Embedding(input_dim=vocab_size,
                                          output_dim=embed_dim,
                                          dtype="float16")

            self.encoder = rnn.GRUCell(hidden_size=n_hidden)
            self.decoder = rnn.GRUCell(hidden_size=n_hidden)
            self.batchnorm = nn.BatchNorm(axis=2)
            #flatten을 false로 할 경우 마지막 차원에 fully connected가 적용된다.
            self.dense = nn.Dense(self.vocab_size, flatten=False)
            if self.attention:
                self.dropout = nn.Dropout(0.3)
                self.attdense = nn.Dense(self.max_seq_length, flatten=False)
                self.attn_combine = nn.Dense(self.n_hidden, flatten=False)
コード例 #6
0
ファイル: utils.py プロジェクト: zhaozhengChen/gluon-nlp
def _get_rnn_cell(mode,
                  num_layers,
                  input_size,
                  hidden_size,
                  dropout,
                  weight_dropout,
                  var_drop_in,
                  var_drop_state,
                  var_drop_out,
                  skip_connection,
                  proj_size=None,
                  cell_clip=None,
                  proj_clip=None):
    """create rnn cell given specs

    Parameters
    ----------
    mode : str
        The type of RNN cell to use. Options are 'lstmpc', 'rnn_tanh', 'rnn_relu', 'lstm', 'gru'.
    num_layers : int
        The number of RNN cells in the encoder.
    input_size : int
        The initial input size of in the RNN cell.
    hidden_size : int
        The hidden size of the RNN cell.
    dropout : float
        The dropout rate to use for encoder output.
    weight_dropout: float
        The dropout rate to the hidden to hidden connections.
    var_drop_in: float
        The variational dropout rate for inputs. Won’t apply dropout if it equals 0.
    var_drop_state: float
        The variational dropout rate for state inputs on the first state channel.
        Won’t apply dropout if it equals 0.
    var_drop_out: float
        The variational dropout rate for outputs. Won’t apply dropout if it equals 0.
    skip_connection : bool
        Whether to add skip connections (add RNN cell input to output)
    proj_size : int
        The projection size of each LSTMPCellWithClip cell.
        Only available when the mode=lstmpc.
    cell_clip : float
        Clip cell state between [-cellclip, cell_clip] in LSTMPCellWithClip cell.
        Only available when the mode=lstmpc.
    proj_clip : float
        Clip projection between [-projclip, projclip] in LSTMPCellWithClip cell
        Only available when the mode=lstmpc.
    """

    assert mode == 'lstmpc' or proj_size is None, \
        'proj_size takes effect only when mode is lstmpc'
    assert mode == 'lstmpc' or cell_clip is None, \
        'cell_clip takes effect only when mode is lstmpc'
    assert mode == 'lstmpc' or proj_clip is None, \
        'proj_clip takes effect only when mode is lstmpc'

    rnn_cell = rnn.HybridSequentialRNNCell()
    with rnn_cell.name_scope():
        for i in range(num_layers):
            if mode == 'rnn_relu':
                cell = rnn.RNNCell(hidden_size, 'relu', input_size=input_size)
            elif mode == 'rnn_tanh':
                cell = rnn.RNNCell(hidden_size, 'tanh', input_size=input_size)
            elif mode == 'lstm':
                cell = rnn.LSTMCell(hidden_size, input_size=input_size)
            elif mode == 'gru':
                cell = rnn.GRUCell(hidden_size, input_size=input_size)
            elif mode == 'lstmpc':
                cell = LSTMPCellWithClip(hidden_size,
                                         proj_size,
                                         cell_clip=cell_clip,
                                         projection_clip=proj_clip,
                                         input_size=input_size)
            if var_drop_in + var_drop_state + var_drop_out != 0:
                cell = contrib.rnn.VariationalDropoutCell(
                    cell, var_drop_in, var_drop_state, var_drop_out)

            if skip_connection:
                cell = rnn.ResidualCell(cell)

            rnn_cell.add(cell)

            if i != num_layers - 1 and dropout != 0:
                rnn_cell.add(rnn.DropoutCell(dropout))

            if weight_dropout:
                apply_weight_drop(rnn_cell, 'h2h_weight', rate=weight_dropout)

    return rnn_cell
コード例 #7
0
ファイル: rnn_layer.py プロジェクト: sunbc0120/gluon-ocr
def _get_rnn_cell(mode,
                  num_layers,
                  input_size,
                  hidden_size,
                  dropout,
                  var_drop_in,
                  var_drop_state,
                  var_drop_out,
                  skip_connection,
                  proj_size=None):
    """create rnn cell given specs

    Parameters
    ----------
    mode : str
        The type of RNN cell to use. Options are 'rnn_tanh', 'rnn_relu', 'lstm', 'lstmp', 'gru'.
    num_layers : int
        The number of RNN cells in the encoder.
    input_size : int
        The initial input size of in the RNN cell.
    hidden_size : int
        The hidden size of the RNN cell.
    dropout : float
        The dropout rate to use for encoder output.
    var_drop_in: float
        The variational dropout rate for inputs. Won’t apply dropout if it equals 0.
    var_drop_state: float
        The variational dropout rate for state inputs on the first state channel.
        Won’t apply dropout if it equals 0.
    var_drop_out: float
        The variational dropout rate for outputs. Won’t apply dropout if it equals 0.
    skip_connection : bool
        Whether to add skip connections (add RNN cell input to output)
    proj_size : int
        The projection size of each LSTMPCell cell.
        Only available when the mode=lstmpc.

    """

    if mode == 'lstmps':
        assert proj_size is not None, \
            'proj_size takes effect only when mode is lstmp'

    rnn_cell = rnn.HybridSequentialRNNCell()
    with rnn_cell.name_scope():
        for i in range(num_layers):
            if mode == 'rnn_relu':
                cell = rnn.RNNCell(hidden_size, 'relu', input_size=input_size)
            elif mode == 'rnn_tanh':
                cell = rnn.RNNCell(hidden_size, 'tanh', input_size=input_size)
            elif mode == 'lstm':
                cell = rnn.LSTMCell(hidden_size, input_size=input_size)
            elif mode == 'lstmp':
                cell = gluon.contrib.rnn.LSTMPCell(hidden_size, input_size,
                                                   proj_size)
            elif mode == 'gru':
                cell = rnn.GRUCell(hidden_size, input_size=input_size)

            if var_drop_in + var_drop_state + var_drop_out != 0:
                cell = gluon.contrib.rnn.VariationalDropoutCell(
                    cell, var_drop_in, var_drop_state, var_drop_out)

            if skip_connection:
                cell = rnn.ResidualCell(cell)

            rnn_cell.add(cell)

            if i != num_layers - 1 and dropout != 0:
                rnn_cell.add(rnn.DropoutCell(dropout))

    return rnn_cell
コード例 #8
0
ファイル: cell.py プロジェクト: zhuwentao2020/ST-MetaNet
 def __init__(self, hidden_size, prefix=None):
     super(MyGRUCell, self).__init__(prefix=prefix)
     self.hidden_size = hidden_size
     with self.name_scope():
         self.cell = rnn.GRUCell(self.hidden_size)