def direct_sum_decompose(U_basis, V_basis, w): ''' input: A list of Vecs, U_basis, containing a basis for a vector space, U. A list of Vecs, V_basis, containing a basis for a vector space, V. A Vec, w, that belongs to the direct sum of these spaces. output: A pair, (u, v), such that u+v=w and u is an element of U and v is an element of V. >>> U_basis = [Vec({0, 1, 2, 3, 4, 5},{0: 2, 1: 1, 2: 0, 3: 0, 4: 6, 5: 0}), Vec({0, 1, 2, 3, 4, 5},{0: 11, 1: 5, 2: 0, 3: 0, 4: 1, 5: 0}), Vec({0, 1, 2, 3, 4, 5},{0: 3, 1: 1.5, 2: 0, 3: 0, 4: 7.5, 5: 0})] >>> V_basis = [Vec({0, 1, 2, 3, 4, 5},{0: 0, 1: 0, 2: 7, 3: 0, 4: 0, 5: 1}), Vec({0, 1, 2, 3, 4, 5},{0: 0, 1: 0, 2: 15, 3: 0, 4: 0, 5: 2})] >>> w = Vec({0, 1, 2, 3, 4, 5},{0: 2, 1: 5, 2: 0, 3: 0, 4: 1, 5: 0}) >>> direct_sum_decompose(U_basis, V_basis, w) == (Vec({0, 1, 2, 3, 4, 5},{0: 2.0, 1: 4.999999999999972, 2: 0.0, 3: 0.0, 4: 1.0, 5: 0.0}), Vec({0, 1, 2, 3, 4, 5},{0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0})) True ''' joined_list = U_basis + V_basis u_vec = Vec(U_basis[0].D, {}) v_vec = Vec(V_basis[0].D, {}) rep = solve(coldict2mat(joined_list), w) for key in rep.f.keys(): if (joined_list[key] in U_basis): u_vec = u_vec + rep.f[key] * joined_list[key] elif (joined_list[key] in V_basis): v_vec = v_vec + rep.f[key] * joined_list[key] return (u_vec, v_vec)
def is_superfluous(L, i): zero_like = 1e-14 A = coldict2mat(L[:i] + L[i + 1:]) b = L[i] u = solve(A, b) residual = b - A * u if (residual * residual < zero_like): return True else: return False
def find_triangular_matrix_inverse(M): if is_invertible(M) == False: return None I = [] for i in range(len(M.D[0])): row_list = [] for j in range(len(M.D[1])): if i == j: row_list.append(M[j, i]) else: row_list.append(-M[j, i]) I.append(list2vec(row_list)) return coldict2mat(I)
def find_triangular_matrix_inverse(M): if is_invertible(M) == False: return None I = [] for i in range(len(M.D[0])): row_list = [] for j in range(len(M.D[1])): if i == j: row_list.append(M[j,i]) else: row_list.append(-M[j,i]) I.append(list2vec(row_list)) return coldict2mat(I)
def find_matrix_inverse(M): if is_invertible(M) == False: return None I = [] for i in range(len(M.D[0])): row_list = [] for j in range(len(M.D[1])): if i == j: row_list.append(1) else: row_list.append(0) I.append(list2vec(row_list)) b = solve2(M, coldict2mat(I)) return b
def solve2(a, b): mat_list = [] vec_list = [] D = list(a.D[1]) for r in a.D[0]: row_list = [] for c in a.D[1]: row_list.append(a[r, c]) mat_list.append(row_list) for r in b.D[0]: row_list = [] for c in a.D[1]: row_list.append(b[r, c]) vec_list.append(row_list) solution = _solve(mat_list, vec_list)[0] L = [list2vec(v) for v in solution] return coldict2mat(L)
# cost = tf.reduce_mean(tf.square(hypothesis - Y)) hypothesis_vec = mat2coldict(H)[0] W[0, 0] = (hypothesis_vec * hypothesis_vec) / dim return W def minimize(X, Y, W, sigma=0.01): dim = len(X.D[0]) H = X * W - Y for i in range(dim): H[i, 0] = H[i, 0] * X[i, 0] gradient_vec = mat2coldict(H)[0] gradient = (gradient_vec * gradient_vec) / dim W[0, 0] = W[0, 0] - sigma * gradient return W X = coldict2mat([list2vec(v) for v in [[1, 2, 3]]]) Y = coldict2mat([list2vec(v) for v in [[1, 2, 3]]]) W = coldict2mat([list2vec(v) for v in [[rd.random()]]]) tw = W for i in range(10): tw = minimize(X, Y, tw) print(tw[0, 0])
def direct_sum_decompose2(U_basis, V_basis, w): S = [u + v for u in U_basis for v in V_basis] A = coldict2mat(S) u = solve(A, w) print(len(subset_basis(S))) return S
row_list = [] for j in range(len(M.D[1])): if i == j: row_list.append(M[j, i]) else: row_list.append(-M[j, i]) I.append(list2vec(row_list)) return coldict2mat(I) M0 = [ list2vec(v) for v in [[1, 0.5, 0.2, 4], [0, 1, 0.3, 0.9], [0, 0, 1, 0.1], [0, 0, 0, 1]] ] MM = coldict2mat(M0) RM = find_triangular_matrix_inverse(MM) # print(RM) RM2 = find_matrix_inverse(MM) L0 = [list2vec(v) for v in [[1, 3], [2, 1], [3, 1]]] # print(is_invertible(coldict2mat(L0))) L1 = [ list2vec(v) for v in [[1, 0, 0, 0], [0, 2, 0, 0], [1, 1, 3, 0], [0, 0, 1, 4]] ] # print(is_invertible(coldict2mat(L1))) L2 = [list2vec(v) for v in [[1, 0, 2], [0, 1, 1]]] # print(is_invertible(coldict2mat(L2))) L3 = [list2vec(v) for v in [[1, 0], [0, 1]]] # print(is_invertible(coldict2mat(L3)))
dim = len(X.D[0]) H = (X * W) - Y # cost = tf.reduce_mean(tf.square(hypothesis - Y)) hypothesis_vec = mat2coldict(H)[0] W[0, 0] = (hypothesis_vec * hypothesis_vec) / dim return W def minimize(X, Y, W, sigma=0.01): dim = len(X.D[0]) H = X * W - Y for i in range(dim): H[i, 0] = H[i, 0] * X[i, 0] gradient_vec = mat2coldict(H)[0] gradient = (gradient_vec * gradient_vec) / dim W[0,0] = W[0,0] - sigma * gradient return W X = coldict2mat([list2vec(v) for v in [ [1, 2, 3] ]]) Y = coldict2mat([list2vec(v) for v in [ [1, 2, 3] ]]) W = coldict2mat([list2vec(v) for v in [ [rd.random()] ]]) tw = W for i in range(10): tw = minimize(X, Y, tw) print(tw[0,0])
def direct_sum_decompose2(U_basis, V_basis, w): S = [u+v for u in U_basis for v in V_basis] A = coldict2mat(S) u = solve(A, w) print(len(subset_basis(S))) return S
I = [] for i in range(len(M.D[0])): row_list = [] for j in range(len(M.D[1])): if i == j: row_list.append(M[j,i]) else: row_list.append(-M[j,i]) I.append(list2vec(row_list)) return coldict2mat(I) M0 = [list2vec(v) for v in [[1,0.5,0.2,4], [0,1,0.3,0.9], [0,0,1,0.1], [0,0,0,1]]] MM = coldict2mat(M0) RM = find_triangular_matrix_inverse(MM) # print(RM) RM2 = find_matrix_inverse(MM) L0 = [list2vec(v) for v in [[1,3], [2,1], [3,1]]] # print(is_invertible(coldict2mat(L0))) L1 = [list2vec(v) for v in [[1,0,0,0], [0,2,0,0], [1,1,3,0], [0,0,1,4]]] # print(is_invertible(coldict2mat(L1))) L2 = [list2vec(v) for v in [[1,0,2], [0,1,1]]] # print(is_invertible(coldict2mat(L2))) L3 = [list2vec(v) for v in [[1,0], [0,1]]] # print(is_invertible(coldict2mat(L3))) L4 = [list2vec(v) for v in [[1,0,1], [0,1,1], [1,1,0]]] # print(is_invertible(coldict2mat(L4)))