예제 #1
0
def test1():
    print('Test 1')
    nn = NeuralNetwork([2, 1], [nnet.hardlim])

    input_samples = [[[0], [0]], [[0], [2]], [[2], [1]], [[3], [2]]]
    targets = [[0], [0], [1], [1]]

    perceptron.learn(nn, input_samples, targets)
예제 #2
0
def test2():
    print('Test 2')
    nn = NeuralNetwork([9, 2], [nnet.logsigmoid])

    input_samples = [
        [[0], [0], [0], [0], [1], [1], [0], [1], [0]],
        [[0], [0], [0], [0], [1], [1], [0], [1], [1]],
        [[0], [0], [0], [1], [1], [0], [0], [1], [0]],
        [[0], [0], [0], [1], [1], [0], [1], [1], [0]],
        [[0], [1], [0], [0], [1], [1], [0], [0], [0]],
        [[0], [1], [1], [0], [1], [1], [0], [0], [0]],
        [[0], [1], [0], [1], [1], [0], [0], [0], [0]],
        [[1], [1], [0], [1], [1], [0], [0], [0], [0]],
        [[1], [1], [1], [1], [1], [1], [1], [1], [1]],
        [[0], [0], [0], [0], [0], [0], [0], [0], [0]],
        [[1], [0], [0], [1], [0], [0], [1], [0], [0]],
        [[1], [0], [1], [1], [0], [1], [1], [0], [1]],
        [[1], [1], [1], [0], [0], [0], [1], [1], [1]],
        [[0], [0], [0], [0], [0], [0], [1], [1], [1]],
        [[1], [1], [1], [0], [0], [0], [0], [0], [0]],
        [[1], [1], [1], [1], [1], [1], [1], [1], [1]],
        [[1], [1], [1], [1], [0], [1], [1], [1], [1]],
        [[0], [1], [0], [0], [1], [0], [0], [1], [0]],
        [[0], [1], [0], [1], [1], [1], [0], [1], [0]],
        [[0], [0], [0], [1], [1], [1], [0], [0], [0]],
        [[0], [1], [0], [0], [1], [1], [0], [1], [0]],
        [[0], [1], [0], [1], [1], [0], [0], [1], [0]],
        [[0], [0], [0], [1], [1], [1], [0], [1], [0]],
        [[0], [1], [0], [1], [1], [1], [0], [0], [0]],
        [[1], [0], [1], [0], [1], [1], [1], [1], [0]],
        [[0], [0], [1], [0], [1], [1], [1], [1], [1]],
        [[1], [0], [0], [1], [1], [0], [0], [1], [1]],
        [[1], [0], [0], [1], [1], [0], [1], [1], [1]],
        [[1], [1], [0], [0], [1], [1], [0], [0], [1]],
        [[1], [1], [1], [0], [1], [1], [0], [0], [1]],
        [[0], [1], [1], [1], [1], [0], [1], [0], [0]],
        [[1], [1], [1], [1], [1], [0], [1], [0], [0]],
    ]

    targets = [[[1], [0]], [[1], [0]], [[1], [0]], [[1], [0]], [[1], [0]],
               [[1], [0]], [[1], [0]], [[1], [0]], [[1], [0]], [[0], [1]],
               [[0], [1]], [[0], [1]], [[0], [1]], [[0], [1]], [[0], [1]],
               [[0], [1]], [[0], [1]], [[0], [1]], [[0], [1]], [[0], [1]],
               [[0], [1]], [[0], [1]], [[0], [1]], [[0], [1]], [[0], [1]],
               [[0], [1]], [[0], [1]], [[0], [1]], [[0], [1]], [[0], [1]],
               [[0], [1]], [[0], [1]]]

    perceptron.learn(nn, input_samples, targets)

    print('Weights: ' + str(nn.get_weights()))
    weights = nn.get_weights()
    for w in weights:
        for i in w:
            print(i)
예제 #3
0
def learn(X, Y):
    assert (len(X) == len(Y))

    trainSize = int(train_rate * len(X))
    crossvalSize = int(crossval_rate * len(X))
    testSize = len(X) - trainSize - crossvalSize

    trainX, trainY = X[:trainSize], Y[:trainSize]
    crossvalX, crossvalY = X[trainSize: trainSize + crossvalSize], Y[trainSize: trainSize + crossvalSize]
    testX, testY = X[-testSize:], Y[-testSize:]

    check_iters = range(1, max_iters + 1)
    classifiers = perceptron.learnAll(trainX, trainY, max_iters)
    trainErrors = [error(trainX, trainY, classifier) for classifier in classifiers]
    crossvalErrors = [error(crossvalX, crossvalY, classifier) for classifier in classifiers]

    plotErrors(check_iters, trainErrors, crossvalErrors)

    bestError, bestIters = min(zip(crossvalErrors, check_iters))
    bestClassifier = perceptron.learn(trainX, trainY, bestIters)

    return bestClassifier, bestIters, error(testX, testY, bestClassifier), f1score(testX, testY, bestClassifier)
예제 #4
0
    w = n.random.rand(dimensions) * 20 - 10
    data = n.zeros(samples,
                   dtype=[('data', 'f8', (dimensions)), ('res', 'f8'),
                          ('names', 'a4')])
    data['data'] = n.random.rand(samples, dimensions) * 20 - 10
    print("The original weight vector: {0}".format(w))

    for i in range(0, samples):
        if (n.dot(w, data['data'][i]) > 0):
            data['res'][i] = 5
        elif (n.dot(w, data['data'][i]) < 0):
            data['res'][i] = -5
        else:
            del (data[i])

    res = p.learn(data)
    w = res[0]
    error = 0

    print("The learned vector: {0}".format(w))

    for i in range(0, samples):
        if (data['res'][i] > 0 and n.dot(data['data'][i], w) < 0
                or data['res'][i] < 0 and n.dot(data['data'][i], w) > 0):
            error += 1
    print(
        "The number of wrong classifications: {0}\nThe number of adjustments made to the vector: {1}\nThe total number of test iterations: {2}"
        .format(error, res[1], res[2]))

    for i in range(0, samples):
        if (n.dot(data['data'][i], w) < 0):
예제 #5
0
# Initialise a weight vector to a random state using normal distribution centered at 0 and with standard deviation
# of 0.03.  The weight vector format is [w_1 w_2 w_0] - the last parameter is the bias.
w = np.random.randn(26) * 0.03

# Read the shape of the input matrix to determine the number of inputs.
num_points, _  = x.shape

# Train the perceptron for maximum of 1000 epochs
maxEpochs = 1000
for i in range(maxEpochs):
    # Get the new weights after exposing the perceptron to all inputs.  The expected weights are in format
    # [w_1 .... w_M w_0], where M is the number of attributes of a given input.  The returned, updated weight vector
    # is in the same format.  The learning parameter is specified by the the alpha parameter - you can change it to
    # something else, but it should be something < 1.
    w = perceptron.learn(input=x, true_output=y, parameters=w, alpha=0.001)

    # Show the perceptron as a 2D visualisation with region that corresponds to perceptron's output of 1 shaded in
    # blue
    perceptron.show(input=x,output=y,parameters=w)

    # Compute the output of the perceptron -activity
    yhat = perceptron.hypothesis(input=x, parameters=w)

    # Count the number of errors it makes
    nErrors = 0
    for n in range(num_points):
        if yhat[n] != y[n]:
            nErrors += 1

    print("Epoch %d...%d errors." % (i+1, nErrors))
예제 #6
0
                       [green_bin[4] * 1.0], [green_bin[5] * 1.0],
                       [green_bin[6] * 1.0], [green_bin[7] * 1.0],
                       [blue_bin[0] * 1.0], [blue_bin[1] * 1.0],
                       [blue_bin[2] * 1.0], [blue_bin[3] * 1.0],
                       [blue_bin[4] * 1.0], [blue_bin[5] * 1.0],
                       [blue_bin[6] * 1.0], [blue_bin[7] * 1.0]]

        training_data.append(input_data)
        targets.append(target_data)

nn = NeuralNetwork([6, 24], [nnet.purelin])

# input_samples = [[[0], [0]], [[0], [2]], [[2], [1]], [[3], [2]]]
# targets = [[0], [0], [1], [1]]

perceptron.learn(nn, training_data, targets, epoches=2)

print(str(nn.layers[0].weights))
print(str(nn.layers[0].bias))

mse = 0

image = cv2.imread(target_image)
rows, cols, _ = image.shape  # Size of background Image

new_image = np.zeros((rows, cols, 3))

for i in range(rows):
    for j in range(cols):
        input_data = [[i * 1.0 / rows], [j * 1.0 / cols],
                      [np.sin(20 * 3.14 * i * 1.0 / rows)],