def NM(game, T): # this is the procedure I proposed to find Nash Equilibrium. G1 = g(game.u[0]) G2 = g(game.u[1]) Z2 = np.zeros(game.shape) A1 = game.shape[0] A2 = game.shape[1] Z2[np.random.choice(A1), np.random.choice(A2)] += 1 for t in range(1, T + 1): MZ2 = marginal(Z2 / t) L1 = np.sum(MZ2 * G1, axis=2) L2 = np.sum(np.transpose(MZ2) * G2, axis=2) L1[L1 < 0] = 0 L2[L2 < 0] = 0 # The calculation is similar to CRM except that here we use the product # of the marginal distributions rather than the joint distribution. # The resultant Li is the Nash matrix where # Li[k,j] = z^i(j)z^{-i}(a^{-i})*[u^i(k,a^{-i}) - u^i(j,a^{-i})] D1 = np.diagflat(np.sum(L1, axis=0)) D2 = np.diagflat(np.sum(L2, axis=0)) q1 = prob(np.around(null(L1 - D1), 3)) q2 = prob(np.around(null(L2 - D2), 3)) Z2[np.random.choice(A1, p=q1), np.random.choice(A2, p=q2)] += 1 #Z2 += q1.reshape(2,1)*q2 print("strategy for player 1: \n", np.sum(Z2, axis=1)[None] / t) print("strategy for player 2: \n", np.sum(Z2, axis=0)[None] / t) print("nonpositive portion of player 1's Nash matrix\n", L1) print("nonpositive portion of player 2's Nash matrix\n", L2) print() return marginal(Z2 / T)
def CRM(game, T): Z2 = np.zeros(game.shape) U1 = game.u[0] U2 = game.u[1] A1 = game.shape[0] A2 = game.shape[1] W1 = np.zeros((A1, A1)) W2 = np.zeros((A2, A2)) a1 = np.random.choice(A1) a2 = np.random.choice(A2) Z2[a1, a2] += 1 update_w(W1, U1, a1, a2) update_w(W2, U2, a2, a1) for t in range(1, T + 1): L1 = np.clip(W1, 0, None) / t L2 = np.clip(W2, 0, None) / t D1 = np.diagflat(np.sum(L1, axis=0)) D2 = np.diagflat(np.sum(L2, axis=0)) q1 = np.sum(null(L1 - D1), axis=1) q1 /= np.sum(q1) q2 = np.sum(null(L2 - D2), axis=1) q2 /= np.sum(q2) a1 = np.random.choice(A1, p=q1) a2 = np.random.choice(A2, p=q2) Z2[a1, a2] += 1 update_w(W1, U1, a1, a2) update_w(W2, U2, a2, a1) return Z2 / T
def CRM(game, T): # this function runs a Universal Calibrated Regret Matching procedure as described in the paper. G1 = g(game.u[0]) G2 = g(game.u[1]) Z2 = np.zeros(game.shape) A1 = game.shape[0] A2 = game.shape[1] Z2[np.random.choice(A1), np.random.choice(A2)] += 1 for t in range(1, T + 1): L1 = np.sum(Z2 * G1, axis=2) / t L2 = np.sum(np.transpose(Z2) * G2, axis=2) / t # the expected result is Li[k,j] = sum(prob(ej,a^{-i})[u^i(ek,a^{-i}) - u^i(ej,a^{-i}]) # over all a^{-i}s. L1[L1 < 0] = 0 L2[L2 < 0] = 0 # Li is the lambda matrix described in the paper. It basically is # the nonpositive portion of the correlated regret matrix. D1 = np.diagflat(np.sum(L1, axis=0)) D2 = np.diagflat(np.sum(L2, axis=0)) # Di is a diagonal matrix whose diagonal entries are the sums of Li columns q1 = prob(np.around(null(L1 - D1), 3)) q2 = prob(np.around(null(L2 - D2), 3)) # qi is the strategy played at t+1 that satisfies the condition described # in the paper. Z2[np.random.choice(A1, p=q1), np.random.choice(A2, p=q2)] += 1 # Z2 += q1.reshape(2,1)*q2 print("nonpositive portion of player 1's Correlated Equilibrium matrix\n", L1) print("nonpositive portion of player 2's Correlated Equilibrium matrix\n", L2) return Z2 / T
def get_phys_modes(phi, B, lambd=None, return_as_ix=False): n_lagr = B.shape[0] u_ = phi[0:-n_lagr, :] #physical dofs l_ = phi[-n_lagr:, :] #lagr dofs # Compatibility at interface mask1 = np.all((B @ u_) == 0, axis=0) # Equilibrium at interface L = null(B) g = -B.T @ l_ mask2 = np.all((L.T @ g) == 0, axis=0) phys_ix = np.logical_and(mask1, mask1) if return_as_ix: return phys_ix else: if lambd is None: lambd_phys = None else: lambd_phys = lambd[phys_ix] phi_phys = u_[:, phys_ix] return lambd_phys, phi_phys
def perp_vec(vecs): null1 = null(vecs.T) m = null1.shape[1] rand_coeff = rand.normal(size=[m, 1]) perp = np.dot(null1, rand_coeff) perp = perp / la.norm(perp) return perp
def right_null_projector(self, get_vL=False): _, _, r = self.eigs() R_ = sw(c(self[0])@r, 0, 1) R = R_.reshape(-1, self.d*R_.shape[-1]) vR = sw(null(R).reshape(self.d, R_.shape[-1], -1), 1, 2) pr = ncon([inv(ch(r)), vR, c(vR), inv(ch(r))], [[-2, 2], [-1, 1, 2], [-3, 1, 4], [-4, 4]]) self.vR = vR if get_vR: return pr, vR return pr
def make_frame(z): z = z.reshape((3, 1)) z = z / np.linalg.norm(z) n = null(z.T) x = n[:, 0, None] x = x / np.linalg.norm(x) y = np.cross(z, x, axis=0) y = y / np.linalg.norm(y) R = np.zeros((3, 3), dtype='float32') R[:, 0, None] = x R[:, 1, None] = y R[:, 2, None] = z return R
def __init__(self, nodes, elements, constraints=None, constraint_type='none', domain='3d', features=None, assemble=True): self.nodes = nodes self.elements = elements self.assign_node_dofcounts() self.k, self.m, self.c, self.kg = None, None, None, None self.domain = domain self.dim = 2 if domain=='2d' else 3 if set([el.domain for el in self.elements]) != set([domain]): raise ValueError('Element domains has to match ElDef/Part/Assembly.') # Constraints self.constraints = constraints self.constraint_type = constraint_type self.dof_pairs = self.constraint_dof_ix() #PATCH for compatibility with nlfe2d module self.gdof_ix_from_nodelabels = lambda node_labels, dof_ix: gdof_ix_from_nodelabels(self.get_node_labels(), node_labels, dof_ix=dof_ix) #PATCH for compatibility with nlfe2d module if len(set(self.get_node_labels()))!=len(self.get_node_labels()): raise ValueError('Non-unique node labels defined.') if constraints is not None: self.B = self.compatibility_matrix() self.L = null(self.B) else: self.B = None self.L = None if self.dof_pairs is not None: self.constrained_dofs = self.dof_pairs[self.dof_pairs[:,1]==None, 0] else: self.constrained_dofs = [] # self.unconstrained_dofs = np.delete(np.arange(0, np.shape(self.B)[1]), self.constrained_dofs) if features is None: features = [] self.features = features self.feature_mats = self.global_matrices_from_features() # Update global matrices and vectors self.assign_node_dofcounts() self.assign_global_dofs() self.c = self.feature_mats['c'] if assemble: self.assemble() self.update_tangent_stiffness() self.update_internal_forces() self.update_mass_matrix()
def left_null_projector(self, get_vL=False): """left_null_projector: | - inv(sqrt(l)) - vL = vL- inv(sqrt(l))- | replaces A in TDVP """ _, l, _ = self.eigs() L_ = sw(cT(self[0])@ch(l), 0, 1) L = L_.reshape(-1, self.d*L_.shape[-1]) vL = null(L).reshape((self.d, L.shape[1]//self.d, -1)) pr = ncon([inv(ch(l))@vL, inv(ch(l))@c(vL)], [[-1, -2, 1], [-3, -4, 1]]) self.vL = vL if get_vL: return pr, vL return pr
def left_null_projector(self, n, l=None, get_vL=False, store_envs=False, vL=None): """left_null_projector: | - inv(sqrt(l)) - vL = vL- inv(sqrt(l))- | replaces A(n) in TDVP :param n: site """ if l is None: l, _ = self.get_envs(store_envs) if vL is None: L_ = sw(cT(self[n]) @ ch(l(n - 1)), 0, 1) L = L_.reshape(-1, self.d * L_.shape[-1]) vL = null(L).reshape((self.d, L.shape[1] // self.d, -1)) pr = ncon([inv(ch(l(n - 1))) @ vL, inv(ch(l(n - 1))) @ c(vL)], [[-1, -2, 1], [-3, -4, 1]]) if get_vL: return pr, vL return pr
def enum_sliding_sticking_3d_proj(A, b, T, bt): info = dict() info['time zono'] = 0 info['time lattice'] = 0 cs_modes, cs_lattice, cs_info = enumerate_contact_separating_3d(A, b) all_modes = [] # # manifolds = system.collider.manifolds # n_contacts = len(manifolds) # points = np.zeros((3,n_contacts)) # tangents = np.zeros((3,n_contacts,2)) # normals = np.zeros((3,n_contacts)) # for i in range(n_contacts): # m = manifolds[i] # body_A = m.shape_A # # if body_A.num_dofs() > 0: # # g_wo = body_A.get_transform_world() # g_wc = m.frame_A() # g_oc = SE3.inverse(g_wo) * g_wc # g_oc_m = g_oc.matrix() # print(g_oc_m) # points[:,i] = g_oc_m[1:3,-1] # normals[:, i] = g_oc_m[1:3, 2] # tangents[:, i] = g_oc_m[1:3, 0:1] # print(points) # print(normals) # A, b = build_normal_velocity_constraints(system.collider.manifolds) # T, bt = build_tangential_velocity_constraints(system.collider.manifolds, num_sliding_planes) n_pts = A.shape[0] num_sliding_planes = int(T.shape[0] / n_pts) # A_, b_ = contacts_to_half(points, normals) # print('A') # print(A) # print('A_') # print(A_) #Get linearized sliding sections from number of sliding modes # D = np.array([[np.cos(np.pi*i/num_sliding_planes),np.sin(np.pi*i/num_sliding_planes),0] for i in range(num_sliding_planes)]) # T = np.zeros((n_pts,num_sliding_planes,6)) # sliding plane normals # for i in range(n_pts): # R = np.concatenate((tangents[:, i, :],normals[:, i].reshape(-1,1)), axis=1) # for j in range(num_sliding_planes): # T_i = np.dot(R,D[j]) # T[i,j,0:3] = T_i # T[i,j,3:6] = np.dot(T_i, hat(points[:, i])) # T *= -1 H = np.vstack((A, T.reshape(-1, T.shape[-1]))) num_modes = 0 for layer in cs_lattice.L: for face in layer: cs_mode = face.m print(cs_mode) mask_c = cs_mode == 'c' mask_s = ~mask_c # mask = np.hstack((mask_s, np.array([mask_c] * num_sliding_planes).T.flatten())) mask = np.hstack( (cs_mode == '0', np.array([mask_c] * num_sliding_planes).T.flatten())) if all(mask_s): L = FaceLattice() L.L = [] L.append_empty() mode_sign = np.hstack((np.ones(n_pts, dtype=int), np.zeros(n_pts * num_sliding_planes, dtype=int))) modes = [mode_sign] L.L[0][0].m = mode_sign else: nc = null(A[mask_c], np.finfo(np.float32).eps) if not np.all(nc.shape): mode_sign = np.zeros(H.shape[0]) L = FaceLattice() L.L = [] L.append_empty() L.L[0][0].m = [mode_sign] modes = [mode_sign] elif nc.shape[1] == 1: # only able to move in 1 dim move_direc = np.array([nc, -nc, np.zeros(nc.shape)]) vd = np.dot(H, move_direc) vd[abs(vd) < 1e-6] = 0 mode_sign = np.sign(vd).T.squeeze() mode_cs_sign = mode_sign[:, 0:n_pts] feasible_ind = np.all(mode_cs_sign[:, mask_s] == 1, axis=1) mode_sign = mode_sign[feasible_ind] L = FaceLattice() L.L = [] L.append_empty() L.L[0][0].m = mode_sign modes = mode_sign else: H_proj = np.dot(H[mask], nc) H_proj_u, idu = unique_row(H_proj) # if H_proj_u.shape[0] != H_proj.shape[0]: # print('AHH') # print(H_proj_u) t_start = time() V_all, Sign_all = zonotope_vertex(H_proj_u) if len(V_all) == 0: continue #print(Sign_all) info['time zono'] += time() - t_start Sign_all = Sign_all[:, idu] feasible_ind = np.where( np.all(Sign_all[:, 0:sum(mask_s)] == 1, axis=1))[0] V = V_all[feasible_ind] Sign = Sign_all[feasible_ind] #print(feasible_ind) if not np.all(feasible_ind.shape): continue V_uq, ind_uq = unique_row(V) if not V_uq.shape[0] == V.shape[0]: Sign_uq = np.zeros((V_uq.shape[0], Sign.shape[1])) for i in range(V_uq.shape[0]): s_all = Sign[ind_uq == i] s = np.zeros((1, Sign.shape[1])) if s_all.shape[0] > 1: s[:, np.all(s_all == 1, axis=0)] = 1 s[:, np.all(s_all == -1, axis=0)] = -1 Sign_uq[i] = s V = V_uq Sign = Sign_uq # Hack. V = V_all Sign = Sign_all # sign_cells = I.sign_vectors(I.dim(), I0) # idx_sc = np.where(np.all(sign_cells[:, 0:sum(mask_s)] == 1, axis=1))[0] # sign_cells = sign_cells[idx_sc] # idx_sc = np.lexsort(np.rot90(sign_cells)) # sign_cells = sign_cells[idx_sc] # print(sign_cells) # print('# v', len(V_all)) # idx_s = np.lexsort(np.rot90(Sign)) # print(Sign[idx_s]) t_start = time() L = vertex2lattice(V) info['time lattice'] += time() - t_start mode_sign = np.zeros( (Sign.shape[0], n_pts * (1 + num_sliding_planes))) mode_sign[:, mask] = Sign modes = get_lattice_mode(L, mode_sign) print('# modes', len(modes)) print(np.array(modes)) num_modes += len(modes) all_modes.append(modes) face.ss_lattice = L info['# faces'] = num_modes print(num_modes) # np.set_printoptions(suppress=True) # for cs_layer in cs_lattice.L: # for cs_face in cs_layer: # for ss_layer in cs_face.ss_lattice.L: # for ss_face in ss_layer: # if hasattr(ss_face, 'm'): # print(sample_twist_sliding_sticking(points, normals, tangentials, ss_face.m).T) return all_modes, cs_lattice, info
def enumerate_contact_separating_3d(A, b): # Create solve info. info = dict() # Create halfspace inequalities, Ax - b <= 0. # A, b = build_normal_velocity_constraints(system.collider.manifolds) # b = np.zeros(b.shape) if DEBUG: print('A') print(A) print('b') print(b) n_pts = A.shape[0] info['n'] = n_pts # Get interior point using linear programming. t_lp = time() int_pt = int_pt_cone(A) info['time lp'] = time() - t_lp if DEBUG: print('int_pt2') print(int_pt, '\n', A @ int_pt) # Filter contact points which are always in contact. mask = np.zeros(n_pts, dtype=bool) c = np.where(np.abs(A @ int_pt) < 1e-6)[0] # always contacting. mask[c] = 1 A_c = A[mask, :] b_c = b[mask, :] A = A[~mask, :] b = b[~mask, :] if DEBUG: print(c) print(mask) # Project into null space of contacting points. # [A'; A_c]x ≤ 0, A_c⋅x = 0 ⇒ x ∈ NULL(A_c) # let NULL(A_c) = [x0, ... , xc] # A'⋅NULL(A_c)x' ≤ 0 if np.sum(mask) > 0: N = null(A_c, np.finfo(np.float32).eps) A = A @ N int_pt = np.linalg.lstsq(N, int_pt, None)[0] if DEBUG: print('Null A_c') print(N) print('new int pt') print(int_pt) print(A @ int_pt) # Compute dual points. b_off = b - np.dot(A, int_pt) dual = A / b_off if DEBUG: print('b off') print(b_off) print('dual') print(dual) # Handle degenerate cases when d = 0 or 1. if np.sum(mask) == n_pts: cs_modes = [np.array(['c'] * n_pts)] lattice = FaceLattice() lattice.L = [[Face(range(n_pts), 0)]] lattice.L[0][0].m = cs_modes[0] return cs_modes, lattice, info if dual.shape[1] == 1: lattice = FaceLattice(M=np.ones((1, 1), int), d=1) dual_map = [list(np.where(~mask)[0])] cs_modes = lattice.csmodes(mask, dual_map) lattice.L = lattice.L[1:3] return cs_modes[1:3], lattice, info # Project dual points into affine space. dual = proj_affine(dual.T).T if DEBUG: print('proj dual') print(dual) info['d'] = dual.shape[1] # Filter duplicate points. idx = np.lexsort(np.rot90(dual)) dual_map = [] dual_unique = [] i = 0 while i < len(idx): if i == 0: dual_unique.append(dual[idx[i], :]) dual_map.append([idx[i]]) else: curr = dual[idx[i], :] last = dual_unique[-1] if np.linalg.norm(last - curr) < 1e-6: dual_map[-1].append(idx[i]) else: dual_unique.append(dual[idx[i], :]) dual_map.append([idx[i]]) i += 1 if DEBUG: print('dual map') print(dual_map) print('dual unique') print(dual_unique) # Handle various cases. dual = [list(dual_unique[i]) for i in range(len(dual_unique))] if len(dual_unique) == 1: d = 0 M = np.array([[]]) elif len(dual_unique[0]) == 1: d = 1 M = np.zeros((len(dual), 2), int) i_min = np.argmin(np.array(dual).flatten()) i_max = np.argmax(np.array(dual).flatten()) M[i_min, 0] = 1 M[i_max, 1] = 1 else: d = len(dual[0]) # Compute dual convex hull. t_start = time() ret = pyhull.qconvex('Fv', dual) info['time conv'] = time() - t_start if DEBUG: print(np.array(ret)) # Build facet-vertex incidence matrix. n_facets = int(ret[0]) M = np.zeros((len(dual), n_facets), int) for i in range(1, len(ret)): vert_set = [int(x) for x in ret[i].split(' ')][1:] for v in vert_set: M[v, i - 1] = 1 if DEBUG: print('dual') print(np.array(dual)) if DEBUG: print('M') print(M) # Build face lattice. t_start = time() lattice = FaceLattice(M, d) info['time lattice'] = time() - t_start # Build mode strings. cs_modes = lattice.csmodes(mask, dual_map) if DEBUG: print(cs_modes) info['# 0 faces'] = lattice.num_k_faces(0) info['# d-1 faces'] = lattice.num_k_faces(info['d'] - 1) info['# faces'] = lattice.num_faces() # Return mode strings. return cs_modes, lattice, info
# RA, 2019-12-14 import qsharp import numpy as np from scipy.linalg import null_space as null from Quantum.Simon import SampleY Y = np.vstack([SampleY.simulate() for __ in range(10)]) print(null(Y).T.round())
def signed_covectors(Vecs): #V_, signs = zonotope_vertex(Vecs) V, ind_V = unique_row(Vecs) n = V.shape[0] orth = sp.linalg.orth(V.T) d = orth.shape[1] if d != V.shape[1]: V = np.dot(V, orth) cocir = [] for k in [d - 1]: combs = combinations(V, k) for comb in list(combs): c = np.array(comb) if np.linalg.matrix_rank(c) == d - 1: ns = null(c) vdot = np.dot(V, ns).reshape(-1) sign = np.sign(vdot) sign[abs(vdot) < 1e-6] = 0 cocir.append(sign) cocir = np.unique(cocir, axis=0) #cocir = np.vstack((cocir,-cocir)) covec = np.zeros((0, n)) lower = cocir upper = np.zeros((0, n)) while np.any(lower == 0): for c_i in lower: for c_j in lower: if np.all(c_i == c_j): continue ind = np.all(np.array([c_i, c_j]) != 0, axis=0) if not np.all(c_i[ind] == c_j[ind]): continue ni = np.all([c_i == 0, c_j != 0], axis=0) nj = np.all([c_j == 0, c_i != 0], axis=0) cv_i = np.matlib.repmat(c_i, sum(ni), 1) cv_j = np.matlib.repmat(c_j, sum(nj), 1) id_i = np.zeros(cv_i.shape, bool) id_j = np.zeros(cv_j.shape, bool) row_i = 0 for id in np.where(ni)[0]: id_i[row_i, id] = True row_i += 1 row_i = 0 for id in np.where(nj)[0]: id_j[row_i, id] = True row_i += 1 cv_i[id_i] = c_j[ni] cv_j[id_j] = c_i[nj] # TODO: constrain the change?? upper = np.vstack((upper, cv_i, cv_j)) #upper = np.array(upper).reshape(-1,n) #upper = np.unique(np.vstack((upper,-upper)),axis=0) upper = np.unique(upper, axis=0) covec = np.vstack((covec, upper)) lower = upper upper = np.zeros((0, n)) cc = np.vstack((covec, cocir, -covec, -cocir, np.zeros(n, int))) cc = cc[:, ind_V] #cc = np.unique(np.vstack((cc,-cc)),axis=0) return cc
def affine_dep(X): n_cols = X.shape[1] return null(np.concatenate((X, np.ones((1, n_cols))), axis=0))