def correction(self, H, h, measurements, estimated_cov_matrix):
        H_hat = multiply(H, estimated_cov_matrix, transpose(H))
        K = multiply(estimated_cov_matrix, transpose(H), invert(H_hat + self.Ez))
        self.estimated_position += multiply(K, (measurements - h))

        self.cov_matrix = multiply((np.eye(N=4) - multiply(K, H)), estimated_cov_matrix)
        self.prediction_sequence.append(transpose(self.estimated_position))
    def selectBestPositions(self, estimated_cov_matrix, estimated_position):
        estimated_cov_matrix_inv = invert(estimated_cov_matrix)
        distances = np.ones(self.sensor_size) * -1
        for i in range(self.sensor_size):
            extended_basestation_pos = np.append(self.basestations[i].position, np.array([0, 0]))
            difference = transpose(estimated_position) - extended_basestation_pos
            distances[i] = multiply(difference, estimated_cov_matrix_inv, transpose(difference))

        valid_distances = self.sortWithIndeces(distances)
        return [valid_distances[i][0] for i in range(0, min(3, len(valid_distances)))]
Beispiel #3
0
def main():
    A = build_matrix()
    U, Es = gauss(A)
    E = multiply(multiply(Es[2], Es[1]), Es[0])
    L = gauss_jordan(E)
    T = transpose(A)
    D, new_U = factorize_D(U)
    assert multiply(E, A) == U
    assert multiply(E, A) != multiply(A, E)
    assert multiply(L, U) == A
    assert multiply(multiply(L, D), new_U) == A
    assert transpose(T) == A
def backward_substitution(a):
	# currently supports only (n x n+1) matrix
	n = a.shape[0]
	p = zeros(n)
	sum_of_others = 0

	# last variable
	p[-1] = a[-1][-1] / a[-1][-2];
	
	# other variables
	for i in xrange(n-2,-1,-1):
		for j in xrange(n-1,i,-1):
			sum_of_others += a[i][j]*p[j]
		
		p[i] = (a[i][n] - sum_of_others) / a[i][i]
		sum_of_others = 0
	
	return mo.transpose(p)
def backward_substitution(a):
    # currently supports only (n x n+1) matrix
    n = a.shape[0]
    p = zeros(n)
    sum_of_others = 0

    # last variable
    p[-1] = a[-1][-1] / a[-1][-2]

    # other variables
    for i in xrange(n - 2, -1, -1):
        for j in xrange(n - 1, i, -1):
            sum_of_others += a[i][j] * p[j]

        p[i] = (a[i][n] - sum_of_others) / a[i][i]
        sum_of_others = 0

    return mo.transpose(p)
    def prediction(self):
        self.estimated_position = multiply(self.F, self.estimated_position)
        estimated_cov_matrix = multiply(self.F, self.cov_matrix, transpose(self.F)) + self.Ex
        measurements = self.selectiveMeasurements(estimated_cov_matrix)

        h = np.zeros(self.sensor_size)
        for i in range(0, self.sensor_size):
            if measurements[i] != 0:
                h[i] = self.model.spaceToValue(self.estimated_position[0:2] - self.basestations[i].position)

        H = np.empty((0, 4))
        for i in range(0, len(measurements)):
            if measurements[i] != 0:
                dh_dx, dh_dy = self.model.derivative(self.estimated_position[0:2] - self.basestations[i].position)
            else:
                dh_dx, dh_dy = 0.0, 0.0
            H = np.append(H, np.array([[dh_dx, dh_dy, 0.0, 0.0]]), axis=0)

        return H, h, measurements, estimated_cov_matrix
Beispiel #7
0
        print i
        for j in xrange(0, i):
            factor = a[j][i] / a[i][i]
            for k in xrange(0, n + 1):
                a[j][k] -= factor * a[i][k]
    a = ge.row_scaling(a, False)
    return a


# solve a set of equation
def solve(a, b):
    print "Coefficients:\n", a
    print "Values:\n", b
    p = mo.augmented_matrix(a, b)
    print 'Augmented:\n', p
    p = ge.row_scaling(p)
    print 'Row scaled:\n', p
    # p = partial_pivoting(p)
    # print "Partially pivoted:\n", p
    p = transform_to_identity(p)
    print 'Transformed to identity form:\n', p
    q = ge.backward_substitution(p)
    print "On backward substitution:\n", q


# test
A = array([[2, 3, -1], [4, 4, -3], [-2, 1, -1]])
B = mo.transpose(array([5, 3, -3]))
# A = array([[4,1],[1,3]])
# B = mo.transpose(array([1,2]))
solve(A, B)
	n = a.shape[0]
	for i in xrange(1,n):
		print i
		for j in xrange(0,i):
			factor = a[j][i]/a[i][i]
			for k in xrange(0,n+1):
				a[j][k] -= factor*a[i][k]
	a = ge.row_scaling(a, False)
	return a

# solve a set of equation
def solve(a,b):
	print "Coefficients:\n", a
	print "Values:\n", b
	p = mo.augmented_matrix(a,b)
	print 'Augmented:\n', p
	p = ge.row_scaling(p)
	print 'Row scaled:\n', p
	# p = partial_pivoting(p)
	# print "Partially pivoted:\n", p
	p = transform_to_identity(p)
	print 'Transformed to identity form:\n', p
	q = ge.backward_substitution(p)
	print "On backward substitution:\n", q

# test
A = array([[2,3,-1],[4,4,-3],[-2,1,-1]])
B = mo.transpose(array([5,3,-3]))
# A = array([[4,1],[1,3]])
# B = mo.transpose(array([1,2]))
solve(A,B)
# Lesson6, Task1

# import own module
import matrix_operations as m_op

# import numpy module
import numpy as np

# get machine epsilone
u = np.finfo(float).eps

# set initial data
A = m_op.hilbert_matrix(5)
b = m_op.transpose([[1, 2, 3, 4, 5]])

# print initial data
print("Matrix A:")
m_op.print_matrix(m_op.matrix_in_fractions(A))

print()
print("Matrix b:")
m_op.print_matrix(b)

# first calculations with LU method
x = m_op.solve_equations_by_LU(A, b)
r = m_op.subtract_two_dimensional_matricies(b, \
                     m_op.matrix_multiplication(A,x))

# print first results
print("First x:")
m_op.print_matrix(x)