import d2lzh as d2l
from mxnet import autograd, nd
from mxnet.gluon import loss as gloss
import math, time, numpy as np

# 读取数据
# corpus_indices 1w字的idx
# char_to_idx    字符转idx
# idx_to_char     idx转字符
# vocab_size不同字的个数
(corpus_indices, char_to_idx, idx_to_char,
 vocab_size) = d2l.load_data_jay_lyrics()

# one-hot向量
print(nd.one_hot(nd.array([1, 2]), vocab_size))  # one-hot一行只有一个1,哪个位置是1呢?1,2位置


def to_onehot(X, size):
    return [nd.one_hot(x, size) for x in X.T]  # X中列是feature,行是sample


# Test
X = nd.arange(10).reshape((2, 5))  # 2:batch_size  5:num_step
inputs = to_onehot(X, vocab_size)  # 转成num_steps个形状为(batch_size,vocab_size)
np.set_printoptions(edgeitems=6)  # 显示个数设置,默认显示3个
print(len(inputs), inputs[0])  # 5个长度, 2*1027

################################################# TODO 初始化模型参数 #####################################################
num_inputs, num_hiddens, num_outputs = vocab_size, 256, vocab_size
ctx = d2l.try_gpu()
print('use ', ctx)
Пример #2
0
import d2lzh as d2l
from mxnet import autograd, nd
from mxnet.gluon import loss as gloss
import math, time, numpy as np


# 读取数据
# corpus_indices 1w字的idx
# char_to_idx    字符转idx
# idx_to_char     idx转字符
# vocab_size不同字的个数
(corpus_indices, char_to_idx, idx_to_char, vocab_size) = d2l.load_data_jay_lyrics()

# one-hot向量
print(nd.one_hot(nd.array([1,2]), vocab_size))  # one-hot一行只有一个1,哪个位置是1呢?1,2位置

def to_onehot(X, size):
    return [nd.one_hot(x, size) for x in X.T]   # X中列是feature,行是sample

# Test
X = nd.arange(10).reshape((2, 5))   # 2:batch_size  5:num_step
inputs = to_onehot(X, vocab_size)   # 转成num_steps个形状为(batch_size,vocab_size)
np.set_printoptions(edgeitems=6)    # 显示个数设置,默认显示3个
print(len(inputs), inputs[0])       # 5个长度, 2*1027


################################################# TODO 初始化模型参数 #####################################################
num_inputs, num_hiddens, num_outputs = vocab_size, 256, vocab_size
ctx = d2l.try_gpu()
print('use ', ctx)
def get_param():
Пример #3
0
            trainer.step(1)
            l_sum += l.asscalar() * y.size
            n += y.size
        if (epoch + 1) % pred_period == 0:
            print('epoch %d, perplexity %f,time %.2f sec' %
                  (epoch + 1, math.exp(l_sum / n), time.time() - start))
            for prefix in prefixes:
                print(
                    ' -',
                    predict_rnn_gluon(prefix, pred_len, model, vocab_size, ctx,
                                      idx_to_char, char_to_idx))


if __name__ == "__main__":
    (corpus_indices, char_to_idx, idx_to_char, vocab_size) \
        = d2l.load_data_jay_lyrics()
    num_hiddens = 256
    rnn_layer = rnn.RNN(num_hiddens)
    rnn_layer.initialize()
    batch_size = 2
    state = rnn_layer.begin_state(batch_size=batch_size)
    # print(state[0].shape)
    num_steps = 35
    X = nd.random.uniform(shape=(num_steps, batch_size, vocab_size))
    T, state_new = rnn_layer(X, state)
    # print(T.shape, len(state_new), state_new[0].shape)
    ctx = d2l.try_gpu()
    model = RNNModel(rnn_layer, vocab_size)
    model.initialize(force_reinit=True, ctx=ctx)
    print(
        predict_rnn_gluon('分开', 10, model, vocab_size, ctx, idx_to_char,