# Create an empty array to save all errors of one training and calculate mean error and
        # standard deviation in the end.
        errorlist = []

        # Test the network for 500 angle combinations
        for i in range(500):
            # An array of 3 angles as input. Depending on the degrees of freedom, 1, 2 or 3 angles
            # are randomly chosen.
            testangles = np.asarray(
                [0, random.choice(angles),
                 random.choice(angles)])
            # Set the input to a range between 0.01 and 1.00.
            testinput = (((testangles + 180) / 360) * 0.99) + 0.01

            # Calculate the networks answer for given input.
            testoutput = net.query(testinput)
            # Resize the outputs which are numbers between 0.2 and 0.8 so they match the real answer.
            testguess = (((testoutput - 0.2) / 0.6) * 4) - 2
            testguess[2] = (((testoutput[2] - 0.2) / 0.6) * 4) - 1
            # Calculate the real result for the randomly chosen input with homogenous transformation matrices.
            testresult = manipulator.calculate(testangles[0], testangles[1],
                                               testangles[2])[:3]

            # Calculate the euclidean distance between the result of the neural network and
            # the homogenous transformation matrix.
            error = distance.euclidean(testguess, testresult)
            # Save the errors in an array.
            errorlist.append(error)

            # Calculate the standard deviation for the errors of the network.
            deviation = np.std(errorlist)
        # Test the networks for 500 angle combinations
        for i in range(500):

            # Depending on the degrees of freedom, 1, 2 or 3 angles are drawn randomly from the list
            # containing all integer numbers from -180 to 180.
            testangles = random.sample(list(angles), k=2)
            # Create the input array with value 0.01 for each element.
            testinput = np.zeros(inodes_x) + 0.01
            # Set the value of three indices to 1.0. Those indices result from the following:
            # index1 = angle1 + 180 ; index2 = angle2 + 541 ; index3 = angle3 + 902
            testinput[180] = 1.0
            testinput[testangles[0] + 541] = 1.0
            testinput[testangles[1] + 902] = 1.0

            # Calculate the networks answers for given input.
            output_x = net_x.query(testinput)
            output_y = net_y.query(testinput)
            output_z = net_z.query(testinput)
            # The values being compared to the real result are the indices of the output array of
            # each net holding the biggest value. Resize those outputs which are numbers between
            # 0 and 400 so they match the real answer.
            x_result = (np.argmax(output_x) - 200) / 100
            y_result = (np.argmax(output_y) - 200) / 100
            z_result = (np.argmax(output_z) - 100) / 100
            # Calculate the real result for the randomly chosen input with homogenous transformation matrices.
            realresult = manipulator.calculate(0, testangles[0], testangles[1])

            # Calculate the euclidean distance between the result of the neural network and
            # the homogenous transformation matrix.
            error = distance.euclidean(realresult[:3],
                                       [x_result, y_result, z_result])
    # After 'test'-number of trainings, test the network.
    if e % test == 0:

        # Create empty arrays for each test to plot the function calculated by the network
        # and calculate mean error and deviation.
        outputarray = []
        errorlist = []

        # Test the network for 300 input values between 0 and 1.
        input = np.linspace(0, 1, num=300)
        for i in range(len(input)):
            # Create the input array the same way as in training.
            inputlist = np.zeros(inodes)
            inputlist[int(input[i] * 100)] = 1.00
            # Ask the network for the result, being an array similar to the target array.
            result = net.query(inputlist)
            # The value being compared to the result of the function is the index of the result
            # array of the network holding the biggest value divided by 100.
            output = (np.argmax(result)) / 100
            # Save the outputs in an array.
            outputarray.append(output)
            # Calculate the absolute difference between network and function output.
            error = abs(output - ((3 * input[i] * (1 - input[i]))))
            # Save the errors in an array.
            errorlist.append(error)
            # Calculate the standard deviation for the errors of the network.
            d = np.std(errorlist)
            # Calculate the mean error.
            m = np.mean(errorlist)
            pass
    # Train the neural network.
    net.train(x, target)

    # After 'test'-number of trainings, test the network.
    if e % test == 0:

        # Create empty arrays for each test to plot the function calculated by the network
        # and calculate mean error and deviation.
        outputarray = []
        errorlist = []

        # Test the network for 300 input values between 0 and 1.
        input = (np.linspace(0, 1, num=300))
        for i in range(len(input)):
            # Subtract 0.1 from the output, to compensate the previously set target.
            output = float(net.query(input[i])) - 0.1
            # Save the outputs in an array.
            outputarray.append(output)
            # Calculate the absolute difference between network and function output.
            error = abs(output - (3 * input[i] * (1 - input[i])))
            # Save the errors in an array.
            errorlist.append(error)
            # Calculate the standard deviation for the errors of the network.
            d = np.std(errorlist)
            # Calculate the mean error.
            m = np.mean(errorlist)
            pass

        # Save the mean error and the standard deviation of the whole test run in arrays.
        deviationlist.append(d)
        meanerrorlist.append(m)
    # After 'test'-number of trainings, test the network.
    if e % test == 0:

        # Create empty arrays for each test to plot the function calculated by the network
        # and calculate mean error and deviation.
        outputarray = []
        errorlist = []

        # Test the network for 300 input values between 0 and 1.
        input = np.linspace(0, 1, num=300)
        for i in range(len(input)):
            # Create the input array the same way as in training.
            inputlist = np.zeros(inodes) + 0.01
            inputlist[int(input[i] * 100)] = 1.00
            # Subtract 0.1 from the output, to compensate the previously set target.
            output = float(net.query(inputlist)) - 0.1
            # Save the outputs in an array.
            outputarray.append(output)
            # Calculate the absolute difference between network and function output.
            error = abs(output - (3 * input[i] * (1 - input[i])))
            # Save the errors in an array.
            errorlist.append(error)
            # Calculate the standard deviation for the errors of the network.
            d = np.std(errorlist)
            # Calculate the mean error.
            m = np.mean(errorlist)
            pass

        # Save the mean error and the standard deviation of the whole test run in arrays.
        deviationlist.append(d)
        meanerrorlist.append(m)
Ejemplo n.º 6
0
    # Train the neural network.
    net.train(x, targetlist)

    # After 'test'-number of trainings, test the network.
    if e % test == 0:

        # Create empty arrays for each test to plot the function calculated by the network
        # and calculate mean error and deviation.
        outputarray = []
        errorlist = []

        # Test the network for 300 input values between 0 and 1.
        input = (np.linspace(0, 1, num=300))
        for i in range(len(input)):
            # Ask the network for the result, being an array similar to the target array.
            result = net.query(input[i])
            # The value being compared to the result of the function is the index of the result
            # array of the network holding the biggest value divided by 100.
            output = (np.argmax(result)) / 100
            # Save the outputs in an array.
            outputarray.append(output)
            # Calculate the absolute difference between network and function output.
            error = abs(output - (3 * input[i] * (1 - input[i])))
            # Save the errors in an array.
            errorlist.append(error)
            # Calculate the standard deviation for the errors of the network.
            d = np.std(errorlist)
            # Calculate the mean error.
            m = np.mean(errorlist)
            pass
Theta3 = 23

# Initialize a neural network, named testnet, with the same parameters as the one you want to test.
inputnodes = 3
hiddennodes = 80
outputnodes = 3
learningrate = 1.3
testnet = NeuralNetwork(inputnodes, hiddennodes, outputnodes, learningrate)
# Load the weight matrices from the net you want to test into the testnet.
testnet.load('1DOF')

# Simulate a standard manipulator by initializing a ForwardKinematics object. It should have the same parameters
# like the one, the net was trained with.
manipulator = ForwardKinematics(0, 1, 1, 1)

# Set the input to a range between 0.01 and 1.00.
inputs = (((np.asarray([Theta1, Theta2, Theta3]) + 180) / 360) * 0.99) + 0.01
# Calculate the networks answer for given input.
output = testnet.query(inputs)
# Resize the outputs which are numbers between 0.2 and 0.8 so they match the real answer.
netanswer = (((output - 0.2) / 0.6) * 4) - 2
netanswer[2] = (((output[2] - 0.2) / 0.6) * 4) - 1
# Calculate the solutions of the forward kinematics for given input with homogenous transformation matrices.
realanswer = manipulator.calculate(Theta1, Theta2, Theta3)[:3]

print("Networks answer:   ", "        x:", '%.3f' % float(netanswer[0]),
      "  y:", '%.3f' % float(netanswer[1]), " z:",
      '%.3f' % float(netanswer[2]))
print("Forward Kinematics answer: ", "x:", '%.3f' % realanswer[0], "  y:",
      '%.3f' % realanswer[1], " z:", '%.3f' % realanswer[2])