def neuralNetTriainTest(X, y):
    nn = NeuralNetwork([2, 2, 1], alpha=0.5)
    nn.fit(X, y, epochs=20000)
    # now that our network is trained, loop over the XOR data points
    for (x, target) in zip(X, y):
        # make a prediction on the data point and display the result
        # to our console
        pred = nn.predict(x)[0][0]
        step = 1 if pred > 0.5 else 0  # step function
        print(
            f"[INFO] data={x}, ground-truth={target}, pred={pred}, step={step}")
Example #2
0
from sklearn import datasets
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report

from pyimagesearch.nn.neuralnetwork import NeuralNetwork

print('[INFO] loading MNIST (sample) dataset...')
digits = datasets.load_digits()
data = digits.data.astype('float')
data = (data - data.min()) / (data.max() - data.min())
print('[INFO] samples: {}, dim: {}'.format(data.shape[0], data.shape[1]))

(trainX, testX, trainY, testY) = train_test_split(data,
                                                  digits.target,
                                                  test_size=0.25)
trainY = LabelBinarizer().fit_transform(trainY)
testY = LabelBinarizer().fit_transform(testY)

print('[INFO] training network...')
nn = NeuralNetwork([trainX.shape[1], 32, 16, 10])
print('[INFO] {}'.format(nn))
nn.fit(trainX, trainY, epochs=1000)

print('[INFO] evaluating network...')
predictions = nn.predict(testX)
predictions = predictions.argmax(axis=1)
print(classification_report(testY.argmax(axis=1), predictions))
# import the required packages
from pyimagesearch.nn.neuralnetwork import NeuralNetwork
import matplotlib.pyplot as plt

# define 2-2-1 neural network and train it
# instantiate a NeuralNetwork to have a 2-2-1 architecture --
# 2 input nodes, single hidden layer with 2 nodes, 1 output node
nn = NeuralNetwork(layers=[2, 2, 1], alpha=0.5)
print(nn)

# construct the XOR dataset
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y = np.array([[0], [1], [1], [0]])

epoch_losses = nn.fit(X, y, epochs=1)

# after training network, loop over the XOR data points
for (x, target) in zip(X, y):
    # make a prediction on the data point and display the result
    # to our console
    pred = nn.predict(x)[0][0]
    # apply a step function to binarize output class labels
    step = 1 if pred > 0.5 else 0
    print(
        "[INFO] data = {}, ground-truth = {}, pred = {:.4f}, step = {}".format(
            x, target[0], pred, step))

# construct a figure that plots the loss over time
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, 20100, 100), epoch_losses)
from pyimagesearch.nn.neuralnetwork import NeuralNetwork
import numpy as np

X = np.array([
    [0, 0],
    [0, 1],
    [1, 0],
    [1, 1]
])

y = np.array([
    [0],
    [1],
    [1],
    [0]
])

print("[INFO] training perceptron...")

nn = NeuralNetwork([2, 2, 1], alpha=0.5)
nn.fit(X, y, epochs=40000)

print("[INFO] testing perceptron...")

for (x, target) in zip(X, y):
    pred = nn.predict(x)[0][0]
    step = 1 if pred > 0.5 else 0

    print("[INFO] data={}, ground-truth={}, pred={:.4f}, step={}".format(x, target[0], pred, step))
data = (data - data.min()) / (data.max() - data.min())

print("[INFO] samples: {}, dim: {}".format(data.shape[0], data.shape[1]))

# construct the training and testing splits
(trainX, testX, trainY, testY) = train_test_split(data,
                                                  digits.target,
                                                  test_size=0.25)

# convert the labels from integers to vectors
trainY = LabelBinarizer().fit_transform(trainY)
testY = LabelBinarizer().fit_transform(testY)

print("[INFO] training network...")

nn = NeuralNetwork([data.shape[1], 4, 10])

print("[INFO] {}".format(nn))

nn.fit(trainX, trainY, epochs=1000)

print("[INFO] evaluating network...")

# Predict the test set.
# Note that the predictions array has a shape of (450, 10) because there are
# 450 samples in the test set, and 10 possible outcomes (each value will
# represent the probability)
predictions = nn.predict(testX)

# Note: the argmax function will return the index of the label with the highest
# predicted probability
Example #6
0
data = (data - data.min()) / (data.max() - data.min())

print("[INFO] samples: {}, dim: {}".format(data.shape[0], data.shape[1]))

# construct the training and testing splits
(trainX, testX, trainY, testY) = train_test_split(data,
                                                  digits.target,
                                                  test_size=0.25)

# convert the labels from integers to vectors
trainY = LabelBinarizer().fit_transform(trainY)
testY = LabelBinarizer().fit_transform(testY)

print("[INFO] training network...")

nn = NeuralNetwork([data.shape[1], 32, 16, 10])

print("[INFO] {}".format(nn))

nn.fit(trainX, trainY, epochs=1000)

print("[INFO] evaluating network...")

# Predict the test set.
# Note that the predictions array has a shape of (450, 10) because there are
# 450 samples in the test set, and 10 possible outcomes (each value will
# represent the probability)
predictions = nn.predict(testX)

# Note: the argmax function will return the index of the label with the highest
# predicted probability
# neuralNetTriainTest(X, y)

# for MNIST, built into sklearn
# load the MNIST dataset and apply min/max scaling to scale the
# pixel intensity values to the range [0, 1] (each image is
# represented by an 8 x 8 = 64-dim feature vector)
print("[INFO] loading MNIST (sample) dataset...")
digits = datasets.load_digits()
data = digits.data.astype("float")
data = (data - data.min()) / (data.max() - data.min())
print("[INFO] samples: {}, dim: {}".format(data.shape[0],
                                           data.shape[1]))

# construct the training and testing splits
(trainX, testX, trainY, testY) = train_test_split(data,
digits.target, test_size=0.25)
# convert the labels(the Y's) from integers to vectors
trainY = LabelBinarizer().fit_transform(trainY)
testY = LabelBinarizer().fit_transform(testY)

# train the network
print("[INFO] training network...")
nn = NeuralNetwork([trainX.shape[1], 32, 16, 10]) # op 10 digits, hence 10 nodes in last network
print("[INFO] {}".format(nn))
nn.fit(trainX, trainY, epochs=1000)

# evaluate the network
print("[INFO] evaluating network...")
predictions = nn.predict(testX)
predictions = predictions.argmax(axis=1) # to selct classification with highest probability for each data point
print(classification_report(testY.argmax(axis=1), predictions))