Пример #1
0
    def iterate(self, measurement, control ):
        if measurement:
            self.lost = False
	
        velocity = control
                
        # known are speeds, convert to absolute movements
        nao_movement_x     = velocity[0]
        nao_movement_y     = velocity[1]
        nao_movement_t     = velocity[2]
        # step forward using control vector u
        self.u[0][0] = nao_movement_x
        self.u[1][0] = nao_movement_y
        muBelief = self.mu
        
        # ________ PREDICTION ________
        
        # rotate using nao_movements theta
        t = nao_movement_t
        rotationmatrix = [[ math.cos( t ), -math.sin(t) ],[ math.sin(t), math.cos(t) ]]

        # Predict new measurement based on nao movement.
        # Nao moves x,y towards the ball 
        muBelief = matrix.subtract( muBelief , self.u )
        #print muBelief
        # Nao rotates theta
        muBelief = matrix.mult( rotationmatrix, muBelief )

        # add noise to motion
        muBelief = sample( muBelief, self.Sigma, 2)
        
        # covariance matrix update
        SigmaBelief = matrix.plus( self.Sigma , self.R)

        # ________ CORRECTION _________

        if measurement:
            self.z[0][0] = measurement[0]
            self.z[1][0] = measurement[1]

            # Since C = [1,0;0,1], drop it
            s = matrix.inverse2D( matrix.plus(  SigmaBelief, self.Q) )
            K = matrix.mult(SigmaBelief, s ) 

            self.mu = matrix.plus(  muBelief,  matrix.mult(K, matrix.subtract(self.z , muBelief)) )
            self.Sigma = matrix.mult(matrix.subtract( self.I, K ), SigmaBelief)
        else:
            # if no ball is found, use the predicted state!
            self.mu = muBelief
            self.Sigma = SigmaBelief

        self.ballPos = self.mu[0][0], self.mu[1][0]    
Пример #2
0
    def cam_transform(self, camera):
        """camera = VCam object
        returns Position object representing the camera's view"""

        # translation inversion
        t = self.translate(-camera.pos[0], -camera.pos[1], -camera.pos[2])
        cam_pos_col = [[camera.pos[0]], [camera.pos[1]], [camera.pos[2]]]
        cam_upvec_col = [[camera.upvec[0]], [camera.upvec[1]],
                         [camera.upvec[2]]]

        # rotation inversion
        # ***maybe try quaternions here instead?
        # 3 perpendicular unit vecs with Z pointing towards the camera
        Z = matrix.normalize(matrix.subtract(t.vec[0:3], cam_pos_col))
        X = matrix.normalize(matrix.crossprod(cam_upvec_col, Z))
        Y = matrix.crossprod(Z, X)

        # camera rotation inversion matrix for the object
        C = [
            [X[0][0], X[1][0], X[2][0], 0],  # [Xaxis.x, Xaxis.y, Xaxis.z, 0]
            [Y[0][0], Y[1][0], Y[2][0], 0],  # [Yaxis.x, Yaxis.y, Yaxis.z, 0]
            [Z[0][0], Z[1][0], Z[2][0], 0],  # [Zaxis.x, Zaxis.y, Zaxis.z, 0]
            [0, 0, 0, 1]
        ]
        # the camera transformation can be done with a single matrix mult
        vec = matrix.multiply(C, t.vec)
        return Position(vec[0][0], vec[1][0], vec[2][0])
Пример #3
0
    def train(self, input_array, target_array):
        # Generating the hidden outputs
        inputs = matrix.fromArray(input_array)
        hidden = matrix.multiply(self.weights_ih, inputs)
        hidden.add(self.bias_h)
        # activation function
        hidden.map(sigmoid)
        # Generating the output layer's output
        outputs = matrix.multiply(self.weights_ho, hidden)
        outputs.map(sigmoid)

        targets = matrix.fromArray(target_array)

        output_errors = matrix.subtract(targets, outputs)

        # gradient = outputs * (1 - outputs)
        # Calculate gradient
        gradients = matrix.map(outputs, dsigmoid)
        # get hadamard product
        gradients.multiply(output_errors)
        # perform scalar multiplication
        gradients.multiply(self.learning_rate)

        # Calculate deltas
        hidden_t = matrix.transpose(hidden)
        weight_ho_deltas = matrix.multiply(gradients, hidden_t)

        # Change weights by the calculated deltas
        self.weights_ho.add(weight_ho_deltas)
        # Adjust bias by the gradient
        self.bias_o.add(gradients)

        # after output errors are calculated, they are backpropagated to hidden layers for hidden layer error calculation
        weights_ho_t = matrix.transpose(self.weights_ho)
        hidden_errors = matrix.multiply(weights_ho_t, output_errors)

        # Calculate hidden gradient
        hidden_gradient = matrix.map(hidden, dsigmoid)
        # hadamard product
        hidden_gradient.multiply(hidden_errors)
        hidden_gradient.multiply(self.learning_rate)

        # Calculate input->hidden deltas
        inputs_t = matrix.transpose(inputs)
        weight_ih_deltas = matrix.multiply(hidden_gradient, inputs_t)

        self.weights_ih.add(weight_ih_deltas)
        self.bias_h.add(hidden_gradient)
Пример #4
0
def main(x, hidden, b, learning, test, w, g, n_d):
    random.seed(1)
    training_data = x[:]
    noise_data = n_d[:]

    # Adding bias to training data
    training_data.append([])
    noise_data.append([])

    for _ in x[0]:
        training_data[1].append(b)
        noise_data[1].append(b)

    # Random weights for synapses
    synapses0 = []
    synapses1 = []

    for _ in range(hidden):
        synapses0.append([random.uniform(w, -w), random.uniform(w, -w)])  # second rand for bias synapses
    for j in range(hidden + 1):  # +1 for bias
        synapses1.append([random.uniform(w, -w)])

    sig_layer2 = []

    global loading_message
    global loading_progress
    # learning loop (learning = iterations)
    for i in xrange(learning):
        temp = i+1
        loading_progress = round((float(temp) / float(iterations)) * 100, 1)

        # loading_progress = (temp / learning) * 100
        # # # Forward pass
        # # Input Layer

        layer1 = matrix.multiply(synapses0, training_data)

        # Activation level
        sig_layer1 = matrix.sig(layer1)

        # # Hidden Layer
        # Adding bias to layer1

        b_sig_layer1 = sig_layer1[:]

        b_sig_layer1.append([])

        for _ in b_sig_layer1[0]:
            b_sig_layer1[len(b_sig_layer1) - 1].append(b)

        layer2 = matrix.multiply(matrix.transpose(synapses1), b_sig_layer1)

        sig_layer2 = matrix.sig(layer2)

        # Calculate net error
        error = [matrix.subtract(test, matrix.transpose(sig_layer2))]

        # if i % 25000 == 0:
        #     temp = 0
        #     for j in range(len(error)):
        #         temp += temp + error[0][j]
        #     print i, temp

        # Delta for neuron in output layer (1 for each training data)
        deriv_sig_layer2 = matrix.derivative(sig_layer2)
        delta_layer2 = [[]]

        for j in range(len(error[0])):
            delta_layer2[0].append(deriv_sig_layer2[0][j] * error[0][j] * g)

        # Delta for neurons in hidden layer
        deriv_sig_layer1 = matrix.derivative(sig_layer1)
        delta_layer1 = []
        delta_weight_sum = []

        for k in range(len(synapses1)):
            delta_weight_sum.append([])
            for j in range(len(delta_layer2[0])):
                delta_weight_sum[k].append(synapses1[k][0] * delta_layer2[0][j])

        for k in range(len(deriv_sig_layer1)):
            delta_layer1.append([])
            for j in range(len(deriv_sig_layer1[0])):
                delta_layer1[k].append(deriv_sig_layer1[k][j] * delta_weight_sum[k][j] * g)

        delta_w_oh = matrix.multiply(delta_layer2, matrix.transpose(b_sig_layer1))
        delta_w_hi = matrix.multiply(delta_layer1, matrix.transpose(training_data))

        # # Update weights
        synapses1 = matrix.add(synapses1, matrix.transpose(delta_w_oh))

        synapses0 = matrix.add(synapses0, delta_w_hi)

        if i > learning * 0.5:
            if i > learning * 0.95:
                loading_message = "I'm nearly done, good training."
            else:
                loading_message = "Well, I'm halfway through."

    # Testing net with noised data
    sig_noise = []
    if len(n_d) > 0:
        # print "testing with noise data"

        l1 = matrix.multiply(synapses0, noise_data)

        sig_l1 = matrix.sig(l1)

        b_sig_l1 = sig_l1[:]

        b_sig_l1.append([])

        for _ in b_sig_l1[0]:
            b_sig_l1[len(b_sig_l1) - 1].append(b)

        l2 = matrix.multiply(matrix.transpose(synapses1), b_sig_l1)

        sig_noise = matrix.sig(l2)

    # formatting net output for plot
    result1 = []  # training data
    result2 = []  # noised data
    for i in range(len(sig_layer2[0])):
        result1.append(sig_layer2[0][i] * 2 - 1)
        result2.append(sig_noise[0][i] * 2 - 1)

    # Plot
    # Some code lines from: https://matplotlib.org/users/legend_guide.html
    neuron_patch = mpatches.Patch(label='Neurons: ' + str(hidden))
    bias_patch = mpatches.Patch(label='Bias: ' + str(b))
    iteration_patch = mpatches.Patch(label='Iterations: ' + str(learning))
    epsilon_patch = mpatches.Patch(label='Epsilon: ' + str(g))
    weight_patch = mpatches.Patch(label='Weight range (0 +/-): ' + str(w))
    time_patch = mpatches.Patch(label=str(round((time.time() - start_time) / 60, 2)) + " min")
    first_legend = plt.legend(
        handles=[bias_patch, time_patch, epsilon_patch, neuron_patch, iteration_patch, weight_patch],
        bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
        ncol=3, mode="expand", borderaxespad=0.)

    line1, = plt.plot(inputData[0], result1, label="Training Data", linewidth=0.75)
    line2, = plt.plot(inputData[0], result2, label="Test Data", linestyle=':', linewidth=0.75)
    line3, = plt.plot(x_data, y_data, label="sin(x)", linestyle='--', linewidth=0.75)
    ax = plt.gca().add_artist(first_legend)
    plt.legend(handles=[line1, line2, line3])
    plt.savefig('./plots/plot' + str(time.time())[2:10] + '.png')

    plt.clf()
    plt.cla()
    plt.close()
Пример #5
0
def main(x, hidden, b, epochs, test, w, g, n_d, m, p_m):
    random.seed(1)
    training_data = x[:]
    noise_data = n_d[:]

    # Adding bias to training data
    training_data.append([])
    noise_data.append([])

    for _ in x[0]:
        training_data[len(training_data)-1].append(b)
        noise_data[len(noise_data)-1].append(b)

    # Random weights for synapses
    synapses0 = []
    synapses1 = []

    for f in range(hidden):
        synapses0.append([])
        for _ in range(len(training_data)):
            synapses0[f].append(random.uniform(w, -w))  # second rand for bias synapses
    for j in range(hidden + 1):  # +1 for bias
        synapses1.append([random.uniform(w, -w)])

    sig_layer2 = []
    error_log = []
    error_log2 = []
    gamma_log = []
    global loading_message
    global loading_progress

    # learning loop (learning = iterations)
    for i in xrange(epochs):
        loading_progress = round((float(i) / float(iterations)) * 100, 1)

        # # # Forward pass
        # # Input Layer
        layer1 = matrix.multiply(synapses0, training_data)

        # Activation level
        sig_layer1 = matrix.sig(layer1)

        # # Hidden Layer
        # Adding bias to layer1
        b_sig_layer1 = sig_layer1[:]

        b_sig_layer1.append([])

        for _ in b_sig_layer1[0]:
            b_sig_layer1[len(b_sig_layer1) - 1].append(b)

        layer2 = matrix.multiply(matrix.transpose(synapses1), b_sig_layer1)

        sig_layer2 = matrix.sig(layer2)

        # # # ----------------
        # # Calculate net error
        error = [matrix.subtract(test, matrix.transpose(sig_layer2))]
        # error = [matrix.error(test, matrix.transpose(sig_layer2))]
        # if i % 5000 == 0:
        #     print(error)

        temp = 0
        for j in range(len(error)):
            temp += temp + error[0][j]

        error_log.append(temp/len(error))

        # Test with test data
        sig_noise = []
        l1 = matrix.multiply(synapses0, noise_data)
        sig_l1 = matrix.sig(l1)
        b_sig_l1 = sig_l1[:]
        b_sig_l1.append([])

        for _ in b_sig_l1[0]:
            b_sig_l1[len(b_sig_l1) - 1].append(b)

        l2 = matrix.multiply(matrix.transpose(synapses1), b_sig_l1)
        sig_noise = matrix.sig(l2)

        error2 = [matrix.subtract(test, matrix.transpose(sig_noise))]

        temp2 = 0
        for j in range(len(error2)):
            temp2 += temp2 + error2[0][j]

        error_log2.append(temp2 / len(error2))

        # # # ----------------
        # # Calculating weight updates
        # Delta for neuron in output layer (1 for each training data)
        deriv_sig_layer2 = matrix.derivative(sig_layer2)
        delta_layer2 = [[]]

        # temp_g = (g/(i+1))
        # gamma_log.append(temp_g)

        for j in range(len(error[0])):
            delta_layer2[0].append(deriv_sig_layer2[0][j] * error[0][j] * g)

        # Delta for neurons in hidden layer
        deriv_sig_layer1 = matrix.derivative(sig_layer1)
        delta_layer1 = []
        delta_weight_sum = []

        for k in range(len(synapses1)):
            delta_weight_sum.append([])
            for j in range(len(delta_layer2[0])):
                delta_weight_sum[k].append(synapses1[k][0] * delta_layer2[0][j])

        for k in range(len(deriv_sig_layer1)):
            delta_layer1.append([])
            for j in range(len(deriv_sig_layer1[0])):
                delta_layer1[k].append(deriv_sig_layer1[k][j] * delta_weight_sum[k][j] * g)

        delta_w_oh = matrix.multiply(delta_layer2, matrix.transpose(b_sig_layer1))
        delta_w_hi = matrix.multiply(delta_layer1, matrix.transpose(training_data))

        # # # Backwards pass
        # # Update weights
        synapses1 = matrix.add(synapses1, matrix.transpose(delta_w_oh))

        synapses0 = matrix.add(synapses0, delta_w_hi)

        if i > epochs * 0.5:
            if i > epochs * 0.95:
                loading_message = "I'm nearly done, good training."
            else:
                loading_message = "Well, I'm halfway through."

        # # # End of learning

    # Testing net with noised/test data
    sig_noise = []
    l1 = matrix.multiply(synapses0, noise_data)
    sig_l1 = matrix.sig(l1)
    b_sig_l1 = sig_l1[:]
    b_sig_l1.append([])

    for _ in b_sig_l1[0]:
        b_sig_l1[len(b_sig_l1) - 1].append(b)

    l2 = matrix.multiply(matrix.transpose(synapses1), b_sig_l1)
    sig_noise = matrix.sig(l2)

    # formatting net output for plot
    result1 = []  # training data
    result2 = []  # noised data
    for i in range(len(sig_layer2[0])):
        result1.append(sig_layer2[0][i] * 2 - 1)
        result2.append(sig_noise[0][i] * 2 - 1)

    if m == "sin":
        # Plot
        # Some code lines from: https://matplotlib.org/users/legend_guide.html
        neuron_patch = mpatches.Patch(label='Neurons: ' + str(hidden))
        bias_patch = mpatches.Patch(label='Bias: ' + str(b))
        iteration_patch = mpatches.Patch(label='Iterations: ' + str(epochs))
        epsilon_patch = mpatches.Patch(label='Gamma: ' + str(g))
        weight_patch = mpatches.Patch(label='Weight range: +/- ' + str(w))
        time_patch = mpatches.Patch(label=str(round((time.time() - start_time) / 60, 2)) + " min")
        first_legend = plt.legend(
            handles=[bias_patch, time_patch, epsilon_patch, neuron_patch, iteration_patch, weight_patch],
            bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
            ncol=3, mode="expand", borderaxespad=0.)

        line4, = plt.plot(error_axis[0], error_log, label="Error", linewidth=0.5)
        line6, = plt.plot(error_axis[0], error_log2, label="Error2", linewidth=0.5)
        line1, = plt.plot(inputData[0], result1, label="Training Data", linewidth=0.75)
        line2, = plt.plot(inputData[0], result2, label="Test Data", linestyle=':', linewidth=0.75)
        line3, = plt.plot(x_data, y_data, label="sin(x)", linestyle='--', linewidth=0.75)
        line5, = plt.plot(x_axis, y_axis, label="Axis", linewidth=0.5)
        ax = plt.gca().add_artist(first_legend)
        plt.legend(handles=[line4, line1, line2, line3, line5, line6])

        if p_m:
            plt.savefig('./plots/' + str(time.time())[2:10] + '.png')
        else:
            plt.show()

        plt.clf()
        plt.cla()
        plt.close()

    elif m == "xor":
        print("-----")
        for i in range(len(sig_noise[0])):
            print "Input: " + str(round(noise_data[0][i], 0)) + " & " \
                  + str(round(noise_data[1][i], 0)) + " = " + str(round(sig_noise[0][i], 0)) + " (" \
                  + str(round(sig_noise[0][i] * 100, 4)) + "% for True)"
Пример #6
0
    def iterate(self, measurement, control):
        now = time.time()
        # timestamps matter. IMPORTANT: Do not use if first iteration has not been set. 
        if self.firstCall:
            self.firstCall = False
            
        timeTaken = time.time() - self.timeStamp
        # major screwup if this happens 
        if timeTaken > 1.0:
            print 'Interval was way too long: ', timeTaken, 'seconds.'
            timeTaken = 0.0
        self.timeStamp = time.time() 

        #velocity = motProxy.getRobotVelocity()
        velocity = control
                
        # known are speeds, convert to absolute movements
        nao_movement_x     = velocity[0] * timeTaken
        nao_movement_y     = velocity[1] * timeTaken
        nao_movement_t     = velocity[2] * timeTaken
        #print timeTaken
        # step forward using control vector u
        self.u[0][0] = nao_movement_x
        self.u[1][0] = nao_movement_y
        print 'Increment control ', self.u 
        muBelief = self.mu
        
        # ________ PREDICTION ________
        
        # rotate using nao_movements theta
        t = nao_movement_t
        rotationmatrix = [[ math.cos( t ), -math.sin(t) ],[ math.sin(t), math.cos(t) ]]

        # Predict new measurement based on nao movement.
        # Nao moves x,y towards the ball 
        muBelief = matrix.subtract( muBelief , self.u )
        #print muBelief
        # Nao rotates theta
        muBelief = matrix.mult( rotationmatrix, muBelief )

        # add noise to motion
        muBelief = sample( muBelief, self.Sigma, 2)
        
        # covariance matrix update
        SigmaBelief = matrix.plus( self.Sigma , self.R)

        # ________ CORRECTION _________

        if measurement:
            self.z[0][0] = measurement[0]
            self.z[1][0] = measurement[1]

            # Since C = [1,0;0,1], drop it
            s = matrix.inverse2D( matrix.plus(  SigmaBelief, self.Q) )
            K = matrix.mult(SigmaBelief, s ) 

            self.mu = matrix.plus(  muBelief,  matrix.mult(K, matrix.subtract(self.z , muBelief)) )
            self.Sigma = matrix.mult(matrix.subtract( self.I, K ), SigmaBelief)
        else:
            # if no ball is found, use the predicted state!
            self.mu = muBelief
            self.Sigma = SigmaBelief

        #print 'Mu:',self.mu
        #print 'Sigma: '
        #matrix.show(self.Sigma)
        #print ''
        return (self.mu[0][0], self.mu[1][0])