Beispiel #1
0
# load data
data = scipy.io.loadmat('../datasets/regularization.mat')
X = data['X']
Y = data['y']

# init model
model = miniml.Model()
model.dense(20, 'relu', 'xavier')
model.dense(3, 'relu', 'xavier')
model.dense(1, 'sigmoid', 'xavier')

# init params
rate = 0.3
epochs = 30000
lamb = 0.7

# train model
optimizer = miniml.GradDescent(cost='bce',
                               epochs=epochs,
                               init_seed=3,
                               dropout_seed=1,
                               store=1000,
                               verbose=10000)

costs = optimizer.train(model, X, Y, rate, lamb=lamb)

# plot results
miniml.print_accuracy(model, X, Y)
miniml.plot_costs(epochs, costs=costs)
miniml.plot_boundaries(model, X, Y)
Beispiel #2
0
# shuffle data
X, Y = miniml.shuffle_data(X, Y, seed=48)

# make the target label virginica = 1 and the rest 0
Y = (Y == 2).astype('int')

# create model
model = miniml.Model()
model.dense(5, 'sigmoid', 'xavier')
model.dense(3, 'sigmoid', 'xavier')
model.dense(1, 'sigmoid', 'xavier')

# init params
rate = 1
epochs = 5000

# train model
# note that original implementation is without averaging across examples in LinearLayer.backward(...)
optimizer = miniml.GradDescent(
    cost = 'bce',
    epochs = epochs,
    init_seed = 48,
    verbose = 1000)

costs = optimizer.train(model, X, Y, rate)

# plot results
miniml.print_accuracy(model, X, Y)
miniml.plot_costs(epochs, costs=costs)
miniml.plot_boundaries(model, X, Y)
Beispiel #3
0
              [-1.0, -1.4]])

y = np.array([0, 0, 1, 0, 2, 1, 1, 1, 1, 0, 0, 2, 2, 2, 1, 0, 1, 2, 2, 2])

# convert to one-hot
Y, cats = miniml.to_categorical(y)
C = len(cats)

# create model
model = miniml.Model()
# model.dense(32, 'relu', 'he')
model.dense(C, 'softmax', 'plain')

# init params
rate = 2
epochs = 40

# train model
optimizer = miniml.GradDescent(cost='ce',
                               epochs=epochs,
                               init_seed=48,
                               store=1,
                               verbose=10)

costs = optimizer.train(model, X, Y, rate)

# plot results
miniml.print_accuracy(model, X, Y)
miniml.plot_costs(epochs, costs=costs)
miniml.plot_boundaries(model, X, Y)
Beispiel #4
0
# Adapted from DeepLearning.AI

# load data
np.random.seed(1)
X, Y = sklearn.datasets.make_circles(n_samples=300, noise=.05)
Y = Y.reshape((len(Y), 1))

# init model
model = miniml.Model()
model.dense(10, 'relu', 'he')
model.dense(5, 'relu', 'he')
model.dense(1, 'sigmoid', 'he')

# init params
rate = 0.01
epochs = 15000

# train model
optimizer = miniml.GradDescent(cost='bce',
                               epochs=epochs,
                               init_seed=3,
                               store=1000)

costs = optimizer.train(model, X, Y, rate=rate)

# plot results
miniml.print_accuracy(model, X, Y)
miniml.plot_costs(epochs, costs=costs)
miniml.plot_boundaries(model, X, Y)
Beispiel #5
0
import numpy as np

# Adapted from:
# https://github.com/RafayAK/NothingButNumPy/blob/master/Understanding_and_Creating_NNs/3_layer_toy_network_XOR.ipynb

# init data
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])

Y = np.array([[0], [1], [1], [0]])

# create model
model = miniml.Model()
model.dense(5, 'sigmoid', 'xavier')
model.dense(3, 'sigmoid', 'xavier')
model.dense(1, 'sigmoid', 'xavier')

# init params
rate = 1
epochs = 20000

# train model
# note that original implementation is without averaging across examples in LinearLayer.backward(...)
optimizer = miniml.GradDescent(cost='mse', epochs=epochs, init_seed=48)

costs = optimizer.train(model, X, Y, rate)

# plot results
miniml.print_accuracy(model, X, Y)
miniml.plot_costs(epochs, costs=costs)
miniml.plot_boundaries(model, X, Y)