Esempio n. 1
0
def model_SGD(X, y, verbose):
    nn = Sequential(learning_rate=learning_rate*0.5, epochs=epochs, batch_size=100,
                    learning_rate_decay=0.95, weight_decay=0.01)
    nn.add(Dense(n=200, in_shape=X.shape[1]))
    nn.add(BatchNorm())
    nn.add(Dense(n=100))
    nn.add(BatchNorm())
    nn.add(Dense(n=80))
    nn.add(BatchNorm())
    nn.add(Dense(n=40))
    nn.add(BatchNorm())
    nn.add(Dense(n=80))
    nn.add(BatchNorm())
    nn.add(Dense(n=100))
    nn.add(BatchNorm())
    nn.add(Dense(n=200))
    nn.add(BatchNorm())
    nn.add(Dense(n=10, activation="softmax"))
    nn.compile(loss="cross_entropy_softmax", optimiser="SGD")

    nn.fit(X, y, verbose)

    return nn
Esempio n. 2
0
learning_rate = 0.001

nn = Sequential(learning_rate=learning_rate,
                epochs=100,
                batch_size=100,
                learning_rate_decay=0.95,
                weight_decay=0.001)

nn.add(Dense(n=200, in_shape=train.shape[1]))
nn.add(BatchNorm())
nn.add(Dense(n=100))
nn.add(BatchNorm())
nn.add(Dense(n=200))
nn.add(BatchNorm())
nn.add(Dense(n=10, activation="softmax"))
nn.compile(loss="cross_entropy_softmax", optimiser="Adam")

indices = list(range(len(train)))
random.shuffle(indices)

train = list(map(train.__getitem__, indices))
label = list(map(label.__getitem__, indices))

X = scale_data(train)
y = np.array(onehot_labels(label), dtype=np.float64)

nn.fit(X[0:50000], y[0:50000], verbose=True)

print('Accuracy is: ' + str(accuracy(nn, X[50000:], label[50000:])))
from dataset import cifar100
import numpy as np
from full import FullLayer
from softmax import SoftMaxLayer
from cross_entropy import CrossEntropyLayer
from sequential import Sequential
from relu import ReluLayer
import matplotlib.pyplot as plt
(x_train, y_train), (x_test, y_test) = cifar100(1212149859)

model = Sequential(layers=(FullLayer(32 * 32 * 3, 256), ReluLayer(),
                           FullLayer(256, 4), SoftMaxLayer()),
                   loss=CrossEntropyLayer())

model.fit(x_train, y_train, epochs=15, lr=0.48, batch_size=128)
pred = model.predict(x_test)
acc = np.mean(pred == y_test)

print('Accuracy = %f' % acc)
index_0 = np.where(y_test == 0)[0]
index_1 = np.where(y_test == 1)[0]
index_2 = np.where(y_test == 2)[0]
index_3 = np.where(y_test == 3)[0]
acc0 = np.mean(y_test[index_0] == pred[index_0])
acc1 = np.mean(y_test[index_1] == pred[index_1])
acc2 = np.mean(y_test[index_2] == pred[index_2])
acc3 = np.mean(y_test[index_3] == pred[index_3])
print('class0 accuracy =%f' % acc0)
print('class1 accuracy =%f' % acc1)
print('class2 accuracy =%f' % acc2)
print('class3 accuracy =%f' % acc3)
Esempio n. 4
0
from conv import ConvLayer
from maxpool import MaxPoolLayer
from flatten import FlattenLayer
from softmax import SoftMaxLayer
from cross_entropy import CrossEntropyLayer
from sequential import Sequential
from relu import ReluLayer
import matplotlib.pyplot as plt
from time import time

(x_train, y_train), (x_test, y_test) = cifar100(1212149859)
model = Sequential(layers=(ConvLayer(3, 16, 3), ReluLayer(), MaxPoolLayer(),
                           ConvLayer(16, 32, 3), ReluLayer(), MaxPoolLayer(),
                           FlattenLayer(), FullLayer(2048, 4), SoftMaxLayer()),
                   loss=CrossEntropyLayer())
t0 = time()
epo = 15
loss = model.fit(x_train, y_train, epochs=epo, lr=0.1, batch_size=128)
space = np.arange(0, epo)
pred = model.predict(x_test)
y_test = np.argmax(y_test, axis=1)
acc = np.mean(pred == y_test)
plt.plot(space, loss, c='r')
print("done in %0.3fs." % (time() - t0))
plt.figure()
plt.plot(space, loss, label='Accuracy =' + str(acc) + ' with lr = 0.1')
plt.xlabel('epochs')
plt.ylabel('loss')
plt.legend()
plt.savefig('loss_plot_new.png')