Пример #1
0
dataset = np.array(dataset)

xs = dataset[1:, 1:-1].astype(np.float64)
ys = (dataset[1:, -1] == '是').astype(np.int32)

# train a neural network to learn from watermelon dataset
nn = NN([xs.shape[1], 16, len(set(ys))], ["sigmoid", "softmax"],
        lr_init=0.1,
        regularization=None)
for batch_idx in range(50000):
    nn.train(xs, ys)
    if batch_idx % 100 == 0:
        print("Loss = %.4f" % nn.loss)

# calculate accuracy
preds = nn.forward(xs)
preds = np.argmax(preds, axis=-1)
print("Accuracy: %.4f" % np.mean(preds == ys))

# plot data
positive_xs = xs[ys == 1]
negative_xs = xs[ys == 0]
plt.scatter(positive_xs[:, 0],
            positive_xs[:, 1],
            c='#00CED1',
            s=60,
            label='Great (positive)')
plt.scatter(negative_xs[:, 0],
            negative_xs[:, 1],
            c='#DC143C',
            s=60,
Пример #2
0
# Standard BP
print("### Standard BP ###")
nn = NN([xs.shape[1], 8, len(set(ys))], ["relu", "softmax"],
        lr_init=0.05,
        regularization="L2")
stdBP_loss = []
start = time.time()
for epoch in tqdm(range(epochs)):
    this_epoch_losses = []
    for sample_xs, sample_ys in zip(train_xs, train_ys):
        nn.train(sample_xs.reshape(1, -1), sample_ys.reshape(-1))
        this_epoch_losses.append(nn.loss)
    stdBP_loss.append(np.mean(this_epoch_losses))
end = time.time()
stdBP_time = end - start
stdBP_acc = np.mean(np.argmax(nn.forward(test_xs), axis=-1) == test_ys)

# Accumulated BP
print("\n### Accumulated BP ###")
nn.reset()
acmlBP_loss = []
start = time.time()
for epoch in tqdm(range(epochs)):
    nn.train(train_xs, train_ys)
    acmlBP_loss.append(nn.loss)
end = time.time()
acmlBP_time = end - start
acmlBP_acc = np.mean(np.argmax(nn.forward(test_xs), axis=-1) == test_ys)

# plot loss
plt.figure()
Пример #3
0
test_ys = np.concatenate(
    (positive_ys[-2 * pslide:], negative_ys[-2 * nslide:]))

epochs = 100

# constant lr
print("### Constant Learning Rate ###")
nn = NN([xs.shape[1], 64, 64, len(set(ys))], ["relu", "relu", "softmax"],
        lr_init=0.01,
        regularization="L2",
        regularization_lambda=0.1)
lr_const_BP_loss = []
for epoch in tqdm(range(epochs)):
    nn.train(train_xs, train_ys)
    lr_const_BP_loss.append(nn.loss)
lr_const_BP_acc = np.mean(np.argmax(nn.forward(test_xs), axis=-1) == test_ys)

# exponential decay lr
print("\n### Exponential Decay Learning Rate ###")
nn = NN([xs.shape[1], 64, 64, len(set(ys))], ["relu", "relu", "softmax"],
        lr_init=0.01,
        lr_decay=0.99,
        lr_min=0.0001,
        regularization="L2",
        regularization_lambda=0.1)
lr_decay_BP_loss = []
for epoch in tqdm(range(epochs)):
    nn.train(train_xs, train_ys)
    lr_decay_BP_loss.append(nn.loss)
    # learning rate decay
    nn.lr_update()