Exemplo n.º 1
0
def init_run_gru(
        num_epochs, batch_size, num_steps, num_hiddens, lr):
    train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)
    vocab_size, device = len(vocab), d2l.try_gpu()
    num_inputs = vocab_size
    gru_layer = nn.GRU(num_inputs, num_hiddens)
    model = d2l.RNNModel(gru_layer, len(vocab))
    model = model.to(device)
    return train_ch8_slim(model, train_iter, vocab, lr, num_epochs, device)
Exemplo n.º 2
0
    W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params
    H, = state
    outputs = []
    for X in inputs:
        Z = torch.sigmoid((X @ W_xz) + (H @ W_hz) + b_z)
        R = torch.sigmoid((X @ W_xr) + (H @ W_hr) + b_r)
        H_tilda = torch.tanh((X @ W_xh) + ((R * H) @ W_hh) + b_h)
        H = Z * H + (1 - Z) * H_tilda
        Y = H @ W_hq + b_q
        outputs.append(Y)
    return torch.cat(outputs, dim=0), (H, )


# Hyperparameters
batch_size, num_steps = 32, 35
train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)
vocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()
num_epochs, lr = 500, 1

model = d2l.RNNModelScratch(len(vocab), num_hiddens, device, get_params,
                            init_gru_state, gru)
print('scratch model')
d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)

num_inputs = vocab_size
gru_layer = nn.GRU(num_inputs, num_hiddens)
model = d2l.RNNModel(gru_layer, len(vocab))
model = model.to(device)
print('concise model')
# d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)
"""