コード例 #1
0
    class RNNDecoder2(HybridBlock):
        def __init__(self, vocab_size, hidden_size, prefix=None, params=None, use_tuple=False):
            super(RNNDecoder2, self).__init__(prefix=prefix, params=params)
            self._vocab_size = vocab_size
            self._use_tuple = use_tuple
            with self.name_scope():
                self._embed = nn.Embedding(input_dim=vocab_size, output_dim=hidden_size)
                self._rnn1 = RNNCell(hidden_size=hidden_size)
                self._rnn2 = RNNCell(hidden_size=hidden_size)
                self._map_to_vocab = nn.Dense(vocab_size)

        def begin_state(self, batch_size):
            ret = [self._rnn1.begin_state(batch_size=batch_size,
                                           func=functools.partial(mx.random.normal, loc=0, scale=1)),
                    self._rnn2.begin_state(batch_size=batch_size,
                                           func=functools.partial(mx.random.normal, loc=0, scale=1))]
            if self._use_tuple:
                return tuple(ret)
            else:
                return ret

        def hybrid_forward(self, F, inputs, states):
            if self._use_tuple:
                states1, states2 = states
            else:
                [states1, states2] = states
            out1, states1 = self._rnn1(self._embed(inputs), states1)
            out2, states2 = self._rnn2(out1, states2)
            log_probs = self._map_to_vocab(out2)  # In real-life, we should add a log_softmax after that.
            if self._use_tuple:
                states = (states1, states2)
            else:
                states = [states1, states2]
            return log_probs, states
コード例 #2
0
    class RNNDecoder2(HybridBlock):
        def __init__(self, vocab_num, hidden_size, prefix=None, params=None, use_tuple=False):
            super(RNNDecoder2, self).__init__(prefix=prefix, params=params)
            self._vocab_num = vocab_num
            self._use_tuple = use_tuple
            with self.name_scope():
                self._embed = nn.Embedding(input_dim=vocab_num, output_dim=hidden_size)
                self._rnn1 = RNNCell(hidden_size=hidden_size)
                self._rnn2 = RNNCell(hidden_size=hidden_size)
                self._map_to_vocab = nn.Dense(vocab_num)

        def begin_state(self, batch_size):
            ret = [self._rnn1.begin_state(batch_size=batch_size,
                                           func=functools.partial(mx.random.normal, loc=0, scale=1)),
                    self._rnn2.begin_state(batch_size=batch_size,
                                           func=functools.partial(mx.random.normal, loc=0, scale=1))]
            if self._use_tuple:
                return tuple(ret)
            else:
                return ret

        def hybrid_forward(self, F, inputs, states):
            if self._use_tuple:
                states1, states2 = states
            else:
                [states1, states2] = states
            out1, states1 = self._rnn1(self._embed(inputs), states1)
            out2, states2 = self._rnn2(out1, states2)
            log_probs = self._map_to_vocab(out2)  # In real-life, we should add a log_softmax after that.
            if self._use_tuple:
                states = (states1, states2)
            else:
                states = [states1, states2]
            return log_probs, states
コード例 #3
0
    class RNNDecoder(HybridBlock):
        def __init__(self, vocab_size, hidden_size, prefix=None, params=None):
            super(RNNDecoder, self).__init__(prefix=prefix, params=params)
            self._vocab_size = vocab_size
            with self.name_scope():
                self._embed = nn.Embedding(input_dim=vocab_size, output_dim=hidden_size)
                self._rnn = RNNCell(hidden_size=hidden_size)
                self._map_to_vocab = nn.Dense(vocab_size)

        def begin_state(self, batch_size):
            return self._rnn.begin_state(batch_size=batch_size,
                                         func=functools.partial(mx.random.normal, loc=0, scale=1))

        def hybrid_forward(self, F, inputs, states):
            out, states = self._rnn(self._embed(inputs), states)
            log_probs = self._map_to_vocab(out)  # In real-life, we should add a log_softmax after that.
            return log_probs, states
コード例 #4
0
    class RNNDecoder(HybridBlock):
        def __init__(self, vocab_num, hidden_size, prefix=None, params=None):
            super(RNNDecoder, self).__init__(prefix=prefix, params=params)
            self._vocab_num = vocab_num
            with self.name_scope():
                self._embed = nn.Embedding(input_dim=vocab_num, output_dim=hidden_size)
                self._rnn = RNNCell(hidden_size=hidden_size)
                self._map_to_vocab = nn.Dense(vocab_num)

        def begin_state(self, batch_size):
            return self._rnn.begin_state(batch_size=batch_size,
                                         func=functools.partial(mx.random.normal, loc=0, scale=1))

        def hybrid_forward(self, F, inputs, states):
            out, states = self._rnn(self._embed(inputs), states)
            log_probs = self._map_to_vocab(out)  # In real-life, we should add a log_softmax after that.
            return log_probs, states