def find_direction(self, poly, plot=False): self.build_polys(poly) volumes = [] try: ineq = np.array(cdd.Polyhedron(poly.inner).get_inequalities()) except RuntimeError: raise SteppingException('Numerical inconsistency found') for line in ineq: key = hashlib.sha1(line).hexdigest() if key in poly.volume_dic: volumes.append(poly.volume_dic[key]) else: if key in poly.hrep_dic: A_e = poly.hrep_dic[key] else: A_e = poly.outer.copy() A_e.extend(cdd.Matrix(-line.reshape(1, line.size))) A_e.canonicalize() poly.hrep_dic[key] = A_e if plot: poly.reset_fig() poly.plot_polyhedrons() poly.plot_polyhedron(A_e, 'm', 0.5) poly.show() vol = self.volume_convex(A_e) poly.volume_dic[key] = vol volumes.append(vol) poly.vrep_dic[key] = np.array( cdd.Polyhedron(A_e).get_generators()) maxv = max(volumes) alli = [i for i, v in enumerate(volumes) if v == maxv] i = random.choice(alli) key = hashlib.sha1(ineq[i, :]).hexdigest() self.last_hrep = poly.hrep_dic[key] return -ineq[i, 1:]
def test_vs_cdd(): vrep = [[1.00000000e+00, 6.49999999e-01, 4.91264259e-19, 5.67434186e-07], [1.00000000e+00, -6.49999999e-01, -1.10024414e-19, 5.67434187e-07], [1.00000000e+00, 2.68036827e-19, 6.49999999e-01, 5.67434186e-07], [1.00000000e+00, -2.36423280e-18, -6.49999999e-01, 5.67434187e-07], [1.00000000e+00, -3.38321375e-15, 4.02250642e-14, 6.50000000e-01], [1.00000000e+00, 1.50481564e-14, 1.78805309e-15, -6.50000000e-01]] mat = cdd.Matrix(np.array(vrep), number_type='fraction') mat.rep_type = cdd.RepType.GENERATOR mat.canonicalize() hrep_cdd = np.array(cdd.Polyhedron(mat).get_inequalities()) f = np.vectorize(lambda x: Fraction(x)) poly = pyparma.Polyhedron(vrep=f(np.array(vrep))) hrep_parma = poly.hrep() assert (equal_sorted(np.array(mat), poly.vrep())) print(hrep_cdd) print(hrep_parma) assert (equal_sorted(hrep_cdd, hrep_parma))
def _test_testcdd2(number_type=None, assert_matrix_equal=None): mat = cdd.Matrix([[7, -3, -0], [7, 0, -3], [1, 1, 0], [1, 0, 1]], number_type=number_type) mat.rep_type = cdd.RepType.INEQUALITY assert_matrix_equal(list(mat), [(7, -3, -0), (7, 0, -3), (1, 1, 0), (1, 0, 1)]) gen = cdd.Polyhedron(mat).get_generators() nose.tools.assert_equal(gen.rep_type, cdd.RepType.GENERATOR) assert_matrix_equal(list(gen), [(1, Fraction(7, 3), -1), ( 1, -1, -1, ), (1, -1, Fraction(7, 3)), (1, Fraction(7, 3), Fraction(7, 3))]) # add an equality and an inequality mat.extend([[7, 1, -3]], linear=True) mat.extend([[7, -3, 1]]) assert_matrix_equal(list(mat), [(7, -3, -0), (7, 0, -3), (1, 1, 0), (1, 0, 1), (7, 1, -3), (7, -3, 1)]) nose.tools.assert_equal(list(mat.lin_set), [4]) gen2 = cdd.Polyhedron(mat).get_generators() nose.tools.assert_equal(gen2.rep_type, cdd.RepType.GENERATOR) assert_matrix_equal(list(gen2), [(1, -1, 2), (1, 0, Fraction(7, 3))])
def compute_polytope_vertices(A, b): """ Compute the vertices of a polytope given in halfspace representation by :math:`A x \\leq b`. Parameters ---------- A : array, shape=(m, k) Matrix of halfspace representation. b : array, shape=(m,) Vector of halfspace representation. Returns ------- vertices : list of arrays List of polytope vertices. Notes ----- This method won't work well if your halfspace representation includes equality constraints :math:`A x = b` written as :math:`(A x \\leq b \\wedge -A x \\leq -b)`. If this is your use case, consider using directly the linear set ``lin_set`` of `equality-constraint generatorsin pycddlib <https://pycddlib.readthedocs.io/en/latest/matrix.html>`_. """ b = b.reshape((b.shape[0], 1)) mat = cdd.Matrix(hstack([b, -A]), number_type='float') mat.rep_type = cdd.RepType.INEQUALITY P = cdd.Polyhedron(mat) g = P.get_generators() V = array(g) vertices = [] for i in range(V.shape[0]): if V[i, 0] != 1: # 1 = vertex, 0 = ray raise Exception("Polyhedron is not a polytope") elif i not in g.lin_set: vertices.append(V[i, 1:]) return vertices
def test_facet_incidence_cube(number_type): # We start with the H-representation for a cube mat = cdd.Matrix([[1, 1, 0, 0], [1, 0, 1, 0], [1, 0, 0, 1], [1, -1, 0, 0], [1, 0, -1, 0], [1, 0, 0, -1]], number_type=number_type) mat.rep_type = cdd.RepType.INEQUALITY poly = cdd.Polyhedron(mat) incidence = poly.get_input_incidence() # Family size should equal the number of facets of the cube (6), plus 1 (the empty infinite ray) assert len(incidence) == 7 # All the facets of the cube should have 4 vertices. # The polyhedron is closed, so the last set should be empty assert [len(inc) for inc in incidence] == [4, 4, 4, 4, 4, 4, 0] # The vertices must be numbered consistently # The first vertex is adjacent to the second, fourth and eighth # (note the conversion to a pythonic numbering system) incidence_list = [[4, 5, 6, 7], [0, 1, 6, 7], [0, 3, 5, 7], [0, 1, 2, 3], [2, 3, 4, 5], [1, 2, 4, 6], []] for i in range(7): assert sorted(list(incidence[i])) == incidence_list[i]
def test_make_facet_adjacency_list(number_type): # This matrix is the same as in vtest_vo.ine mat = cdd.Matrix([[0, 0, 0, 1], [5, -4, -2, 1], [5, -2, -4, 1], [16, -8, 0, 1], [16, 0, -8, 1], [32, -8, -8, 1]], number_type=number_type) mat.rep_type = cdd.RepType.INEQUALITY poly = cdd.Polyhedron(mat) adjacencies = [[1, 2, 3, 4, 6], [0, 2, 3, 5], [0, 1, 4, 5], [0, 1, 5, 6], [0, 2, 5, 6], [1, 2, 3, 4, 6], [0, 3, 4, 5]] adjacency_list = poly.get_input_adjacency() for i in range(7): assert list(adjacency_list[i]) == adjacencies[i]
def face_of_span(S): V = vstack([ hstack([zeros((S.shape[1], 1)), S.T]), hstack([1, zeros(S.shape[0])]) ]) # V-representation: first column is 0 for rays mat = cdd.Matrix(V, number_type='float') mat.rep_type = cdd.RepType.GENERATOR P = cdd.Polyhedron(mat) ineq = P.get_inequalities() H = array(ineq) if H.shape == (0, ): # H == [] return H A = [] for i in xrange(H.shape[0]): # H matrix is [b, -A] for A * x <= b if norm(H[i, 1:]) < 1e-10: continue elif abs(H[i, 0]) > 1e-10: # b should be zero for a cone raise Exception("Polyhedron is not a cone") elif i not in ineq.lin_set: A.append(-H[i, 1:]) return array(A)
def _pos_generators(matrix): """Returns a matrix with rows the extreme rays of the pointed cone `matrix x = 0, x >= 0'.""" import cdd if matrix == Matrix(): return matrix S = matrix nr = S.cols # matrix |b -A|, with b = 0, -A = | S^t -S^t I|^t H = [[0] + [int(r[i]) for i in range(len(r))] for r in S.col_join(-S).tolist()] + \ [[0] + [0 if j != i else 1 for j in range(nr)] for i in range(nr)] # polyhedron H = cdd.Matrix(H, number_type="fraction") H.rep_type = cdd.RepType.INEQUALITY # extreme rays ers = cdd.Polyhedron(H).get_generators() ers = [er[1:] for er in ers if er[0] == 0] return Matrix(ers)
def compute_cone_face_matrix(S): """ Compute the face matrix of a polyhedral convex cone from its span matrix. Parameters ---------- S : array, shape=(n, m) Span matrix defining the cone as :math:`x = S \\lambda` with :math:`\\lambda \\geq 0`. Returns ------- F : array, shape=(k, n) Face matrix defining the cone equivalently by :math:`F x \\leq 0`. """ V = vstack([ hstack([zeros((S.shape[1], 1)), S.T]), hstack([1, zeros(S.shape[0])]) ]) # V-representation: first column is 0 for rays mat = cdd.Matrix(V, number_type='float') mat.rep_type = cdd.RepType.GENERATOR P = cdd.Polyhedron(mat) ineq = P.get_inequalities() H = array(ineq) if H.shape == (0, ): # H == [] return H A = [] for i in range(H.shape[0]): # H matrix is [b, -A] for A * x <= b if norm(H[i, 1:]) < 1e-10: continue elif abs(H[i, 0]) > 1e-10: # b should be zero for a cone raise Exception("Polyhedron is not a cone") elif i not in ineq.lin_set: A.append(-H[i, 1:]) return array(A)
def _apply_solver(self): start_time = time.time() M=self.options.dual_bound if not self.options.dual_bound: M=1e6 print(f'Dual bound not specified, set to default {M}') delta = self.options.delta if not self.options.delta: delta = 0.05 #What should default robustness delta be if not specified? Or should I raise an error? print(f'Robustness parameter not specified, set to default {delta}') # matrix representation for bilevel problem matrix_repn = BilevelMatrixRepn(self._instance,standard_form=False) # each lower-level problem submodel = [block for block in self._instance.component_objects(SubModel)][0] if len(submodel) != 1: raise Exception('Problem encountered, this is not a valid bilevel model for the solver.') self._instance.reclassify_component_type(submodel, Block) #varref(submodel) #dataref(submodel) all_vars = {key: var for (key, var) in matrix_repn._all_vars.items()} # get the variables that are fixed for the submodel (lower-level block) fixed_vars = {key: var for (key, var) in matrix_repn._all_vars.items() if key in matrix_repn._fixed_var_ids[submodel.name]} #Is there a way to get integer, continuous, etc for the upper level rather than lumping them all into fixed? # continuous variables in SubModel c_vars = {key: var for (key, var) in matrix_repn._all_vars.items() if key in matrix_repn._c_var_ids - fixed_vars.keys()} # binary variables in SubModel SHOULD BE EMPTY FOR THIS SOLVER b_vars = {key: var for (key, var) in matrix_repn._all_vars.items() if key in matrix_repn._b_var_ids - fixed_vars.keys()} if len(b_vars)!= 0: raise Exception('Problem encountered, this is not a valid bilevel model for the solver. Binary variables present!') # integer variables in SubModel SHOULD BE EMPTY FOR THIS SOLVER i_vars = {key: var for (key, var) in matrix_repn._all_vars.items() if key in matrix_repn._i_var_ids - fixed_vars.keys()} if len(i_vars) != 0: raise Exception('Problem encountered, this is not a valid bilevel model for the solver. Integer variables present!') # get constraint information related to constraint id, sign, and rhs value sub_cons = matrix_repn._cons_sense_rhs[submodel.name] cons= matrix_repn._cons_sense_rhs[self._instance.name] # construct the high-point problem (LL feasible, no LL objective) # s0 <- solve the high-point # if s0 infeasible then return high_point_infeasible xfrm = TransformationFactory('pao.bilevel.highpoint') xfrm.apply_to(self._instance) # # Solve with a specified solver # solver = self.options.solver if not self.options.solver: solver = 'gurobi' for c in self._instance.component_objects(Block, descend_into=False): if 'hp' in c.name: #if '_hp' in c.name: c.activate() with pyomo.opt.SolverFactory(solver) as opt: self.results.append(opt.solve(c, tee=self._tee, timelimit=self._timelimit)) _check_termination_condition(self.results[-1]) c.deactivate() if self.options.do_print==True: print('Solution to the Highpoint Relaxation') for _, var in all_vars.items(): var.pprint() # s1 <- solve the optimistic bilevel (linear/linear) problem (call solver3) # if s1 infeasible then return optimistic_infeasible' with pyomo.opt.SolverFactory('pao.bilevel.blp_global') as opt: opt.options.solver = solver self.results.append(opt.solve(self._instance,tee=self._tee,timelimit=self._timelimit)) _check_termination_condition(self.results[-1]) if self.options.do_print==True: print('Solution to the Optimistic Bilevel') for _, var in all_vars.items(): var.pprint() #self._instance.pprint() #checking for active blocks left over from previous solves # sk <- solve the dual adversarial problem # if infeasible then return dual_adversarial_infeasible # Collect the vertices solutions for the dual adversarial problem #Collect up the matrix B and the vector d for use in all adversarial feasibility problems n=len(c_vars.items()) m=len(sub_cons.items()) K=len(cons.items()) B=np.empty([m,n]) L=np.empty([K,1]) i=0 p=0 for _, var in c_vars.items(): (A, A_q, sign, b) = matrix_repn.coef_matrices(submodel, var) B[:,i]=np.transpose(np.array(A)) i+=1 _ad_block_name='_adversarial' self._instance.add_component(_ad_block_name, Block(Any)) _Vertices_name='_Vertices' _Vertices_B_name='_VerticesB' self._instance.add_component(_Vertices_name,Param(cons.keys()*NonNegativeIntegers*sub_cons.keys(),mutable=True)) Vertices=getattr(self._instance,_Vertices_name) self._instance.add_component(_Vertices_B_name,Param(cons.keys()*NonNegativeIntegers,mutable=True)) VerticesB=getattr(self._instance,_Vertices_B_name) adversarial=getattr(self._instance,_ad_block_name) #Add Adversarial blocks for _cidS, _ in cons.items(): # <for each constraint in the upper-level problem> (_cid,_)=_cidS ad=adversarial[_cid] #shorthand ad.alpha=Var(sub_cons.keys(),within=NonNegativeReals) #sub_cons.keys() because it's a dual variable on the lower level constraints ad.beta=Var(within=NonNegativeReals) Hk=np.empty([n,1]) i=0 d=np.empty([n,1]) ad.cons=Constraint(c_vars.keys()) #B^Talpha+beta*d>= H_k, v-dimension constraints so index by c_vars lhs_expr = {key: 0. for key in c_vars.keys()} rhs_expr = {key: 0. for key in c_vars.keys()} for _vid, var in c_vars.items(): (A, A_q, sign, b) = matrix_repn.coef_matrices(submodel, var) coef = A #+ dot(A_q.toarray(), _fixed) (C, C_q, C_constant) = matrix_repn.cost_vectors(submodel, var) d[i,0]=float(C) lhs_expr[_vid]=float(C)*ad.beta (A,A_q,sign,b)=matrix_repn.coef_matrices(self._instance,var) idx = list(cons.keys()).index(_cidS) Hk[i,0]=A[idx] i+=1 for _cid2 in sub_cons.keys(): idx = list(sub_cons.keys()).index(_cid2) lhs_expr[_vid] += float(coef[idx])*ad.alpha[_cid2] rhs_expr[_vid] = float(A[idx]) expr = lhs_expr[_vid] >= rhs_expr[_vid] if not type(expr) is bool: ad.cons[_vid] = expr else: ad.cons[_vid] = Constraint.Skip ad.Obj=Objective(expr=0) #THIS IS A FEASIBILITY PROBLEM with pyomo.opt.SolverFactory(solver) as opt: self.results.append(opt.solve(ad, tee=self._tee, timelimit=self._timelimit)) _check_termination_condition(self.results[-1]) ad.deactivate() Bd=np.hstack((np.transpose(B),d)) Eye=np.identity(m+1) Bd=np.vstack((Bd,Eye)) Hk=np.vstack((Hk,np.zeros((m+1,1)))) mat=np.hstack((-Hk,Bd)) mat=cdd.Matrix(mat,number_type='float') mat.rep_type=cdd.RepType.INEQUALITY poly=cdd.Polyhedron(mat) ext=poly.get_generators() extreme=np.array(ext) if self.options.do_print==True: print(ext) (s,t)=extreme.shape l=1 for i in range(0,s): j=1 if extreme[0,i]==1: for _scid in sub_cons.keys(): #for j in range(1,t-1): #Need to loop over extreme 1 to t-1 and link those to the cons.keys for alpha? Vertices[(_cidS,l,_scid)]=extreme[i,j] #Vertex l of the k-th polytope j+=1 VerticesB[(_cidS,l)]=extreme[i,t-1] l+=1 L[p,0]=l-1 p+=1 #vertex enumeration goes from 1 to L # Solving the full problem sn0 _model_name = '_extended' _model_name = unique_component_name(self._instance, _model_name) xfrm = TransformationFactory('pao.bilevel.highpoint') #5.6a-c kwds = {'submodel_name': _model_name} xfrm.apply_to(self._instance, **kwds) extended=getattr(self._instance,_model_name) extended.sigma=Var(c_vars.keys(),within=NonNegativeReals,bounds=(0,M)) extended.lam=Var(sub_cons.keys(),within=NonNegativeReals,bounds=(0,M)) #5.d extended.d = Constraint(c_vars.keys()) #indexed by lower level variables d_expr= {key: 0. for key in c_vars.keys()} for _vid, var in c_vars.items(): (C, C_q, C_constant) = matrix_repn.cost_vectors(submodel, var) #gets d_i d_expr[_vid]+=float(C) d_expr[_vid]=d_expr[_vid]-extended.sigma[_vid] (A, A_q, sign, b) = matrix_repn.coef_matrices(submodel, var) for _cid, _ in sub_cons.items(): idx = list(sub_cons.keys()).index(_cid) d_expr[_vid]+=extended.lam[_cid]*float(A[idx]) expr = d_expr[_vid] == 0 if not type(expr) is bool: extended.d[_vid] = expr else: extended.d[_vid] = Constraint.Skip #5.e (Complementarity) extended.e = ComplementarityList() for _cid, _ in sub_cons.items(): idx=list(sub_cons.keys()).index(_cid) expr=0 for _vid, var in fixed_vars.items(): #A_i*x (A, A_q, sign, b) = matrix_repn.coef_matrices(submodel, var) expr+=float(A[idx])*fixed_vars[_vid] for _vid, var in c_vars.items(): #B_i*v (A, A_q, sign, b) = matrix_repn.coef_matrices(submodel, var) expr+=float(A[idx])*c_vars[_vid] expr=expr-float(b[idx]) extended.e.add(complements(extended.lam[_cid] >= 0, expr <= 0)) #5.f (Complementarity) extended.f = ComplementarityList() for _vid,var in c_vars.items(): extended.f.add(complements(extended.sigma[_vid]>=0,var>=0)) #Replace 5.h-5.j with 5.7 Disjunction extended.disjunction=Block(cons.keys()) #One disjunction per adversarial problem, one adversarial problem per upper level constraint k=0 for _cidS,_ in cons.items(): idxS=list(cons.keys()).index(_cidS) [_cid,sign]=_cidS disjunction=extended.disjunction[_cidS] #shorthand disjunction.Lset=RangeSet(1,L[k,0]) disjunction.disjuncts=Disjunct(disjunction.Lset) for i in disjunction.Lset: #defining the L disjuncts l_expr=0 for _vid, var in c_vars.items(): (C, C_q, C_constant) = matrix_repn.cost_vectors(submodel, var) l_expr+=float(C)*var #d^Tv l_expr+=delta l_expr=VerticesB[(_cidS,i)]*l_expr #beta(d^Tv+delta) for _cid, Scons in sub_cons.items(): #SUM over i to ml Ax=0 idx=list(sub_cons.keys()).index(_cid) for _vid, var in fixed_vars.items(): (A, A_q, sign, b) = matrix_repn.coef_matrices(submodel, var) Ax += float(A[idx])*var l_expr+=Vertices[(_cidS,i,_cid)]*(float(b[idx])-Ax) r_expr=0 for _vid,var in fixed_vars.items(): (A, A_q, sign, b) = matrix_repn.coef_matrices(self._instance, var) #get q and G r_expr=r_expr-float(A[idxS])*var r_expr+=float(b[idxS]) disjunction.disjuncts[i].cons=Constraint(expr= l_expr<=r_expr) disjunction.seven=Disjunction(expr=[disjunction.disjuncts[i] for i in disjunction.Lset],xor=False) k+=1 #extended.pprint() TransformationFactory('mpec.simple_disjunction').apply_to(extended) bigm = TransformationFactory('gdp.bigm') bigm.apply_to(extended) with pyomo.opt.SolverFactory(solver) as opt: self.results.append(opt.solve(extended, tee=self._tee, timelimit=self._timelimit)) _check_termination_condition(self.results[-1]) # Return the sn0 solution if self.options.do_print==True: print('Robust Solution') for _vid, _ in fixed_vars.items(): fixed_vars[_vid].pprint() for _vid, _ in c_vars.items(): c_vars[_vid].pprint() extended.lam.pprint() extended.sigma.pprint() stop_time = time.time() self.wall_time = stop_time - start_time return pyutilib.misc.Bunch(rc=getattr(opt, '_rc', None), log=getattr(opt, '_log', None))
def compute_feasible_region_from_block_dir(block_dirs, verbose=False): """ Compute extreme ray representation of feasible assembly region, given blocking direction vectors. The feasible assembly region is constrained by some hyperplanes, which use block_dirs as normals. cdd package allows us to convert the inequality representation to a generator (vertices and rays) of a polyhedron. Adapted from: https://github.com/yijiangh/compas_rpc_example More info on cddlib: https://pycddlib.readthedocs.io/en/latest/index.html Other packages on vertex enumeration: https://mathoverflow.net/questions/203966/computionally-efficient-vertex-enumeration-for-convex-polytopes Parameters ---------- block_dirs : list of 3-tuples a list blocking directions. Returns ------- f_rays: list of 3-tuples extreme rays of the feasible assembly region lin_set: list of int indices of rays that is linear (both directions) """ mat_hrep = [] # "half-space" representation for vec in block_dirs: # For a polyhedron described as P = {x | A x <= b} # the H-representation is the matrix [b -A] mat_hrep.append([0, -vec[0], -vec[1], -vec[2]]) mat = cdd.Matrix(mat_hrep, number_type='fraction') mat.rep_type = cdd.RepType.INEQUALITY poly = cdd.Polyhedron(mat) ext = poly.get_generators() lin_set = list(ext.lin_set) # linear set both directions nt = cdd.NumberTypeable('float') f_verts = [] f_rays = [] # linear_rays = [] for i in range(ext.row_size): if ext[i][0] == 1: f_verts.append(tuple([nt.make_number(num) for num in ext[i][1:4]])) elif ext[i][0] == 0: # TODO: numerical instability? ray_vec = [nt.make_number(num) for num in ext[i][1:4]] ray_vec /= norm(ray_vec) f_rays.append(tuple(ray_vec)) # if i in lin_set: # lin_vec_set.append(tuple([- nt.make_number(num) for num in ext[i][1:4]])) # if f_verts: # assert len(f_verts) == 1 # np.testing.assert_almost_equal f_verts[0] == [0,0,0] # TODO: QR decomposition to make orthogonal if verbose: print('##############') print('ext:\n {}'.format(ext)) print('ext linset:\n {}'.format(ext.lin_set)) print('verts:\n {}'.format(f_verts)) print('rays:\n {}'.format(f_rays)) return f_rays, lin_set
def project_polyhedron(proj, ineq, eq=None, canonicalize=True): """ Apply the affine projection :math:`y = E x + f` to the polyhedron defined by: .. math:: A x & \\leq b \\\\ C x & = d Parameters ---------- proj : pair of arrays Pair (`E`, `f`) describing the affine projection. ineq : pair of arrays Pair (`A`, `b`) describing the inequality constraint. eq : pair of arrays, optional Pair (`C`, `d`) describing the equality constraint. canonicalize : bool, optional Apply equality constraints from `eq` to reduce the dimension of the input polyhedron. May be a blessing or a curse, see notes below. Returns ------- vertices : list of arrays List of vertices of the projection. rays : list of arrays List of rays of the projection. Notes ----- When the equality set `eq` of the input polytope is not empty, it is usually faster to use these equality constraints to reduce the dimension of the input polytope (cdd function: `canonicalize()`) before enumerating vertices (cdd function: `get_generators()`). Yet, on some descriptions this operation may be problematic: if it fails, or if you get empty outputs when the output is supposed to be non-empty, you can try setting `canonicalize=False`. See also -------- This webpage: https://scaron.info/teaching/projecting-polytopes.html """ # the input [b, -A] to cdd.Matrix represents (b - A * x >= 0) # see ftp://ftp.ifor.math.ethz.ch/pub/fukuda/cdd/cddlibman/node3.html (A, b) = ineq b = b.reshape((b.shape[0], 1)) linsys = cdd.Matrix(hstack([b, -A]), number_type='float') linsys.rep_type = cdd.RepType.INEQUALITY # the input [d, -C] to cdd.Matrix.extend represents (d - C * x == 0) # see ftp://ftp.ifor.math.ethz.ch/pub/fukuda/cdd/cddlibman/node3.html if eq is not None: (C, d) = eq d = d.reshape((d.shape[0], 1)) linsys.extend(hstack([d, -C]), linear=True) if canonicalize: linsys.canonicalize() # Convert from H- to V-representation P = cdd.Polyhedron(linsys) generators = P.get_generators() if generators.lin_set: print("Generators have linear set: {}".format(generators.lin_set)) V = array(generators) # Project output wrenches to 2D set (E, f) = proj vertices, rays = [], [] free_coordinates = [] for i in range(V.shape[0]): if generators.lin_set and i in generators.lin_set: free_coordinates.append(list(V[i, 1:]).index(1.)) elif V[i, 0] == 1: # vertex vertices.append(dot(E, V[i, 1:]) + f) else: # ray rays.append(dot(E, V[i, 1:])) return vertices, rays
def main(simplify=False): """This script computes the H-rep of a grasp stability constraint model. There are two main steps. First, find the extreme points in the space of concatenated component vectors. Second, find an inner approximation of this set using guidance from a dynamic model. """ # Step 1: find the extreme points in the space of concatenated # component vectors. # Define equality and inequality constraint: # A_eq f_bar == 0 # A_ineq f_bar <= b_ineq A_eq = np.zeros((3, nvars)) b_eq = np.zeros(3) A_eq[:3, N * 3:N * 3 + 3] = np.eye(3) b_eq[:3] = [0, 0, -PA] A_ineq = np.zeros((4 * N + 2 * N, nvars)) b_ineq = np.zeros(4 * N + 2 * N) for i in range(N): # inner approximation of Colomb friction constraint A_ineq[4 * i:4 * i + 4, 3 * i:3 * i + 3] = [[-1, -1, -mu], [-1, 1, -mu], [1, 1, -mu], [1, -1, -mu]] # max/min bounds on vertical component forces A_ineq[4 * N + 2 * i:4 * N + 2 * i + 2, 3 * i:3 * i + 3] = [[0, 0, -1], [0, 0, 1]] b_ineq[4 * N + 2 * i:4 * N + 2 * i + 2] = [0, fmax] # Transform from H-rep to V-rep t0 = time.time() mat = cdd.Matrix(np.hstack((b_ineq.reshape(-1, 1), -A_ineq)), number_type='float') mat.rep_type = cdd.RepType.INEQUALITY mat.extend(np.hstack((b_eq.reshape(-1, 1), -A_eq)), linear=True) poly = cdd.Polyhedron(mat) ext = poly.get_generators() ext = np.array(ext) t_elapsed = time.time() - t0 print( "Approximate with N={2:d} points:\n\tFound {0:d} extreme points in {1:10.3f} secs" .format(ext.shape[0], t_elapsed, N)) f_extreme_pts = ext[:, 1:1 + 3 * N + 3] # Transform to interacting wrench space: # w_O = F f, where F is defined below F = np.zeros((6, 3 * N + 3)) for i in range(N): F[:3, 3 * i:3 * i + 3] = [[0, -l, -r * np.cos(alphas[i])], [l, 0, r * np.sin(alphas[i])], [r * np.cos(alphas[i]), -r * np.sin(alphas[i]), 0]] F[:3, 3 * N:3 * N + 3] = [[0, -l, 0], [l, 0, 0], [0, 0, 0]] for i in range(N + 1): F[3:, 3 * i:3 * i + 3] = np.eye(3) w0_extreme_pts = f_extreme_pts.dot(F.T) # Step 2: Use a robot model to generate better points. # REQUIRED OUTPUT w0_hull: the convex hull of the vertices of the # set of physically realization wrenches. if simplify: env = orpy.Environment() env.Load( '/home/hung/git/toppra-object-transport/models/denso_ft_sensor_suction.robot.xml' ) robot = env.GetRobots()[0] contact_base = transport.Contact(robot, "denso_suction_cup2", np.eye(4), None, None, raw_data=w0_extreme_pts) solid_object = transport.SolidObject.init_from_dict( robot, { 'object_profile': "bluenb", 'object_attach_to': "denso_suction_cup2", "T_link_object": [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 12.5e-3], [0, 0, 0, 1]], "name": "obj" }) cs = transport.ContactSimplifier(robot, contact_base, solid_object, N_vertices=60) contact_simp, w0_hull = cs.simplify() else: w0_hull = transport.poly_contact.ConvexHull(w0_extreme_pts) transport.utils.preview_plot([(w0_extreme_pts, 'o', { 'markersize': 5 }), (w0_hull.vertices, 'x', { 'markersize': 7 })], dur=100) print("Computed convex hull has {0:d} vertices and {1:d} faces".format( len(w0_hull.get_vertices()), w0_hull.get_halfspaces()[1].shape[0])) # save coefficients id_ = "analytical_rigid" + "123" A, b = w0_hull.get_halfspaces() contact_profile = { id_: { "id": id_, "attached_to_manipulator": "denso_suction_cup2", "orientation": [[1, 0, 0], [0, 1, 0], [0, 0, 1]], "position": [0, 0, 0], "constraint_coeffs_file": id_ + ".npz", "params": { "simplify": simplify, "N": N, "PA": PA, "mu": mu, "r": r, "fmax": fmax } } } print( "db entry (to copy manually)\n\nbegin -----------------\n\n{:}\nend--------" .format(yaml.dump(contact_profile))) cmd = raw_input("Save constraint coefficients A, b y/[N]?") if cmd == "y": np.savez( "/home/hung/Dropbox/ros_data/toppra_application/{:}.npz".format( id_), A=A, b=b) print("Saved coefficients to database!") else: exit("abc")
def get_outer_approx(self, algorithm=None): """Generate an outer approximation. :parameter algorithm: a :class:`~string` denoting the algorithm used: ``None``, ``'linvac'``, ``'irm'``, ``'imrm'``, or ``'lpbelfunc'`` :rtype: :class:`~improb.lowprev.lowprob.LowProb` This method replaces the lower probability :math:`\underline{P}` by a lower probability :math:`\underline{R}` determined by the ``algorithm`` argument: ``None`` returns the original lower probability. >>> pspace = PSpace('abc') >>> lprob = LowProb(pspace, ... lprob={'ab': .5, 'ac': .5, 'bc': .5}, ... number_type='fraction') >>> lprob.extend() >>> print(lprob) : 0 a : 0 b : 0 c : 0 a b : 1/2 a c : 1/2 b c : 1/2 a b c : 1 >>> lprob == lprob.get_outer_approx() True ``'linvac'`` replaces the imprecise part :math:`\underline{Q}` by the vacuous lower probability :math:`\underline{R}=\min` to generate a simple outer approximation. ``'irm'`` replaces :math:`\underline{P}` by a completely monotone lower probability :math:`\underline{R}` that is obtained by using the IRM algorithm of Hall & Lawry [#hall2004]_. The Moebius transform of a lower probability that is not completely monotone contains negative belief assignments. Consider such a lower probability and an event with such a negative belief assignment. The approximation consists of removing this negative assignment and compensating for this by correspondingly reducing the positive masses for events below it; for details, see the paper. The following example illustrates the procedure: >>> pspace = PSpace('abc') >>> lprob = LowProb(pspace, ... lprob={'ab': .5, 'ac': .5, 'bc': .5}, ... number_type='fraction') >>> lprob.extend() >>> print(lprob) : 0 a : 0 b : 0 c : 0 a b : 1/2 a c : 1/2 b c : 1/2 a b c : 1 >>> lprob.is_completely_monotone() False >>> print(lprob.mobius) : 0 a : 0 b : 0 c : 0 a b : 1/2 a c : 1/2 b c : 1/2 a b c : -1/2 >>> belfunc = lprob.get_outer_approx('irm') >>> print(belfunc.mobius) : 0 a : 0 b : 0 c : 0 a b : 1/3 a c : 1/3 b c : 1/3 a b c : 0 >>> print(belfunc) : 0 a : 0 b : 0 c : 0 a b : 1/3 a c : 1/3 b c : 1/3 a b c : 1 >>> belfunc.is_completely_monotone() True The next is Example 2 from Hall & Lawry's 2004 paper [#hall2004]_: >>> pspace = PSpace('ABCD') >>> lprob = LowProb(pspace, lprob={'': 0, 'ABCD': 1, ... 'A': .0895, 'B': .2743, ... 'C': .2668, 'D': .1063, ... 'AB': .3947, 'AC': .4506, ... 'AD': .2959, 'BC': .5837, ... 'BD': .4835, 'CD': .4079, ... 'ABC': .7248, 'ABD': .6224, ... 'ACD': .6072, 'BCD': .7502}) >>> lprob.is_avoiding_sure_loss() True >>> lprob.is_coherent() False >>> lprob.is_completely_monotone() False >>> belfunc = lprob.get_outer_approx('irm') >>> belfunc.is_completely_monotone() True >>> print(lprob) : 0.0 A : 0.0895 B : 0.2743 C : 0.2668 D : 0.1063 A B : 0.3947 A C : 0.4506 A D : 0.2959 B C : 0.5837 B D : 0.4835 C D : 0.4079 A B C : 0.7248 A B D : 0.6224 A C D : 0.6072 B C D : 0.7502 A B C D : 1.0 >>> print(belfunc) : 0.0 A : 0.0895 B : 0.2743 C : 0.2668 D : 0.1063 A B : 0.375789766751 A C : 0.405080300695 A D : 0.259553087227 B C : 0.560442004097 B D : 0.43812301076 C D : 0.399034985143 A B C : 0.710712071543 A B D : 0.603365864737 A C D : 0.601068373065 B C D : 0.7502 A B C D : 1.0 >>> print(lprob.mobius) : 0.0 A : 0.0895 B : 0.2743 C : 0.2668 D : 0.1063 A B : 0.0309 A C : 0.0943 A D : 0.1001 B C : 0.0426 B D : 0.1029 C D : 0.0348 A B C : -0.0736 A B D : -0.0816 A C D : -0.0846 B C D : -0.0775 A B C D : 0.1748 >>> print(belfunc.mobius) : 0.0 A : 0.0895 B : 0.2743 C : 0.2668 D : 0.1063 A B : 0.0119897667507 A C : 0.0487803006948 A D : 0.0637530872268 B C : 0.019342004097 B D : 0.0575230107598 C D : 0.0259349851432 A B C : 3.33066907388e-16 A B D : -1.11022302463e-16 A C D : -1.11022302463e-16 B C D : 0.0 A B C D : 0.0357768453276 >>> sum(lprev for (lprev, uprev) ... in (lprob - belfunc).itervalues())/(2 ** len(pspace)) 0.013595658498933991 .. note:: This algorithm is *not* invariant under permutation of the possibility space. .. warning:: The lower probability must be defined for all events. If needed, call :meth:`~improb.lowprev.lowpoly.LowPoly.extend` first. ``'imrm'`` replaces :math:`\underline{P}` by a completely monotone lower probability :math:`\underline{R}` that is obtained by using an algorithm by Quaeghebeur that is as of yet unpublished. We apply it to Example 2 from Hall & Lawry's 2004 paper [#hall2004]_: >>> pspace = PSpace('ABCD') >>> lprob = LowProb(pspace, lprob={ ... '': 0, 'ABCD': 1, ... 'A': .0895, 'B': .2743, ... 'C': .2668, 'D': .1063, ... 'AB': .3947, 'AC': .4506, ... 'AD': .2959, 'BC': .5837, ... 'BD': .4835, 'CD': .4079, ... 'ABC': .7248, 'ABD': .6224, ... 'ACD': .6072, 'BCD': .7502}) >>> belfunc = lprob.get_outer_approx('imrm') >>> belfunc.is_completely_monotone() True >>> print(lprob) : 0.0 A : 0.0895 B : 0.2743 C : 0.2668 D : 0.1063 A B : 0.3947 A C : 0.4506 A D : 0.2959 B C : 0.5837 B D : 0.4835 C D : 0.4079 A B C : 0.7248 A B D : 0.6224 A C D : 0.6072 B C D : 0.7502 A B C D : 1.0 >>> print(belfunc) : 0.0 A : 0.0895 B : 0.2743 C : 0.2668 D : 0.1063 A B : 0.381007057096 A C : 0.411644226231 A D : 0.26007767078 B C : 0.562748716673 B D : 0.4404197271 C D : 0.394394926787 A B C : 0.7248 A B D : 0.6224 A C D : 0.6072 B C D : 0.7502 A B C D : 1.0 >>> print(lprob.mobius) : 0.0 A : 0.0895 B : 0.2743 C : 0.2668 D : 0.1063 A B : 0.0309 A C : 0.0943 A D : 0.1001 B C : 0.0426 B D : 0.1029 C D : 0.0348 A B C : -0.0736 A B D : -0.0816 A C D : -0.0846 B C D : -0.0775 A B C D : 0.1748 >>> print(belfunc.mobius) : 0.0 A : 0.0895 B : 0.2743 C : 0.2668 D : 0.1063 A B : 0.0172070570962 A C : 0.0553442262305 A D : 0.0642776707797 B C : 0.0216487166733 B D : 0.0598197271 C D : 0.0212949267869 A B C : 2.22044604925e-16 A B D : 0.0109955450242 A C D : 0.00368317620293 B C D : 3.66294398528e-05 A B C D : 0.00879232466651 >>> sum(lprev for (lprev, uprev) ... in (lprob - belfunc).itervalues())/(2 ** len(pspace)) 0.010375479708342836 .. note:: This algorithm *is* invariant under permutation of the possibility space. .. warning:: The lower probability must be defined for all events. If needed, call :meth:`~improb.lowprev.lowpoly.LowPoly.extend` first. ``'lpbelfunc'`` replaces :math:`\underline{P}` by a completely monotone lower probability :math:`\underline{R}_\mu` that is obtained via the zeta transform of the basic belief assignment :math:`\mu`, a solution of the following optimization (linear programming) problem: .. math:: \min\{ \sum_{A\subseteq\Omega}(\underline{P}(A)-\underline{R}_\mu(A)): \mu(A)\geq0, \sum_{B\subseteq\Omega}\mu(B)=1, \underline{R}_\mu(A)\leq\underline{P}(A), A\subseteq\Omega \}, which, because constants in the objective function do not influence the solution and because :math:`\underline{R}_\mu(A)=\sum_{B\subseteq A}\mu(B)`, is equivalent to: .. math:: \max\{ \sum_{B\subseteq\Omega}2^{|\Omega|-|B|}\mu(B): \mu(A)\geq0, \sum_{B\subseteq\Omega}\mu(B)=1, \sum_{B\subseteq A}\mu(B) \leq\underline{P}(A), A\subseteq\Omega \}, the version that is implemented. We apply this to Example 2 from Hall & Lawry's 2004 paper [#hall2004]_, which we also used for ``'irm'``: >>> pspace = PSpace('ABCD') >>> lprob = LowProb(pspace, lprob={'': 0, 'ABCD': 1, ... 'A': .0895, 'B': .2743, ... 'C': .2668, 'D': .1063, ... 'AB': .3947, 'AC': .4506, ... 'AD': .2959, 'BC': .5837, ... 'BD': .4835, 'CD': .4079, ... 'ABC': .7248, 'ABD': .6224, ... 'ACD': .6072, 'BCD': .7502}) >>> belfunc = lprob.get_outer_approx('lpbelfunc') >>> belfunc.is_completely_monotone() True >>> print(lprob) : 0.0 A : 0.0895 B : 0.2743 C : 0.2668 D : 0.1063 A B : 0.3947 A C : 0.4506 A D : 0.2959 B C : 0.5837 B D : 0.4835 C D : 0.4079 A B C : 0.7248 A B D : 0.6224 A C D : 0.6072 B C D : 0.7502 A B C D : 1.0 >>> print(belfunc) : 0.0 A : 0.0895 B : 0.2743 C : 0.2668 D : 0.1063 A B : 0.3638 A C : 0.4079 A D : 0.28835 B C : 0.5837 B D : 0.44035 C D : 0.37355 A B C : 0.7248 A B D : 0.6224 A C D : 0.6072 B C D : 0.7502 A B C D : 1.0 >>> print(lprob.mobius) : 0.0 A : 0.0895 B : 0.2743 C : 0.2668 D : 0.1063 A B : 0.0309 A C : 0.0943 A D : 0.1001 B C : 0.0426 B D : 0.1029 C D : 0.0348 A B C : -0.0736 A B D : -0.0816 A C D : -0.0846 B C D : -0.0775 A B C D : 0.1748 >>> print(belfunc.mobius) : 0.0 A : 0.0895 B : 0.2743 C : 0.2668 D : 0.1063 A B : 0.0 A C : 0.0516 A D : 0.09255 B C : 0.0426 B D : 0.05975 C D : 0.00045 A B C : 0.0 A B D : 1.11022302463e-16 A C D : 0.0 B C D : 0.0 A B C D : 0.01615 >>> sum(lprev for (lprev, uprev) ... in (lprob - belfunc).itervalues())/(2 ** len(pspace) ... ) # doctest: +ELLIPSIS 0.00991562... .. note:: This algorithm is *not* invariant under permutation of the possibility space or changes in the LP-solver: there may be a nontrivial convex set of optimal solutions. .. warning:: The lower probability must be defined for all events. If needed, call :meth:`~improb.lowprev.lowpoly.LowPoly.extend` first. """ if algorithm is None: return self elif algorithm == 'linvac': prob, coeff = self.get_precise_part() return prob.get_linvac(1 - coeff) elif algorithm == 'irm': # Initialize the algorithm pspace = self.pspace bba = SetFunction(pspace, number_type=self.number_type) bba[False] = 0 def mass_below(event): subevents = pspace.subsets(event, full=False, empty=False) return sum(bba[subevent] for subevent in subevents) def basin_for_negmass(event): mass = 0 index = len(event) while bba[event] + mass < 0: index -= 1 subevents = pspace.subsets(event, size=index) mass += sum(bba[subevent] for subevent in subevents) return (index, mass) lprob = self.set_function # The algoritm itself: # we climb the algebra of events, calculating the belief assignment # for each and compensate negative ones by proportionally reducing # the assignments in the smallest basin of subevents needed for cardinality in range(1, len(pspace) + 1): for event in pspace.subsets(size=cardinality): bba[event] = lprob[event] - mass_below(event) if bba[event] < 0: index, mass = basin_for_negmass(event) subevents = chain.from_iterable( pspace.subsets(event, size=k) for k in range(index, cardinality)) for subevent in subevents: bba[subevent] = (bba[subevent] * (1 + (bba[event] / mass))) bba[event] = 0 return LowProb(pspace, lprob=dict((event, bba.get_zeta(event)) for event in bba.iterkeys())) elif algorithm == 'imrm': # Initialize the algorithm pspace = self.pspace number_type = self.number_type bba = SetFunction(pspace, number_type=number_type) bba[False] = 0 def mass_below(event, cardinality=None): subevents = pspace.subsets(event, full=False, empty=False, size=cardinality) return sum(bba[subevent] for subevent in subevents) def basin_for_negmass(event): mass = 0 index = len(event) while bba[event] + mass < 0: index -= 1 subevents = pspace.subsets(event, size=index) mass += sum(bba[subevent] for subevent in subevents) return (index, mass) lprob = self.set_function # The algorithm itself: cardinality = 1 while cardinality <= len(pspace): temp_bba = SetFunction(pspace, number_type=number_type) for event in pspace.subsets(size=cardinality): bba[event] = lprob[event] - mass_below(event) offenders = dict((event, basin_for_negmass(event)) for event in pspace.subsets(size=cardinality) if bba[event] < 0) if len(offenders) == 0: cardinality += 1 else: minindex = min(pair[0] for pair in offenders.itervalues()) for event in offenders: if offenders[event][0] == minindex: mass = mass_below(event, cardinality=minindex) scalef = (offenders[event][1] + bba[event]) / mass for subevent in pspace.subsets(event, size=minindex): if subevent not in temp_bba: temp_bba[subevent] = 0 temp_bba[subevent] = max( temp_bba[subevent], scalef * bba[subevent]) for event, value in temp_bba.iteritems(): bba[event] = value cardinality = minindex + 1 return LowProb(pspace, lprob=dict((event, bba.get_zeta(event)) for event in bba.iterkeys())) elif algorithm == 'lpbelfunc': # Initialize the algorithm lprob = self.set_function pspace = lprob.pspace number_type = lprob.number_type n = 2**len(pspace) # Set up the linear program mat = cdd.Matrix(list( chain( [[-1] + n * [1], [1] + n * [-1]], [[0] + [int(event == other) for other in pspace.subsets()] for event in pspace.subsets()], [[lprob[event]] + [-int(other <= event) for other in pspace.subsets()] for event in pspace.subsets()])), number_type=number_type) mat.obj_type = cdd.LPObjType.MAX mat.obj_func = (0, ) + tuple(2**(len(pspace) - len(event)) for event in pspace.subsets()) lp = cdd.LinProg(mat) # Solve the linear program and check the solution lp.solve() if lp.status == cdd.LPStatusType.OPTIMAL: bba = SetFunction(pspace, data=dict( izip(list(pspace.subsets()), list(lp.primal_solution))), number_type=number_type) return LowProb(pspace, lprob=dict((event, bba.get_zeta(event)) for event in bba.iterkeys())) else: raise RuntimeError('No optimal solution found.') else: raise NotImplementedError
def main(): global pos, normals #bar = sum(pos)/float(len(pos)) #pos = [p-bar for p in pos] mu = 0.2 contacts = [stab.Contact(mu, p, n) for p, n in zip(pos, normals)] poly = stab.StabilityPolygon(60, dimension=2, force_lim=1000.) poly.contacts = contacts[0:8] for c in poly.contacts[0:4]: c.r[2] = 0. c.n = np.array([[0.4], [0.4], [np.sqrt(1 - 2 * (0.4**2))]]) #c.n = np.array([[0., 1., 0.]]).T for c in poly.contacts[4:]: c.r[2] = 0. #c.n = np.array([[0., 0., 1.]]).T c.n = np.array([[-0.4], [-0.4], [np.sqrt(1 - 2 * (0.4**2))]]) poly.reset_fig() poly.plot_contacts() poly.show() sol = 'plain' #Compute the unconstrained and save ineqs poly.compute(stab.Mode.iteration, maxIter=20, epsilon=2e-3, solver=sol, plot_error=False, plot_step=False, plot_init=False, plot_final=False) poly_ineq = poly.backend.doublepoly.inner.inequalities radius = 0.11 fc = 4 nc = 8 for c in range(fc, nc): poly.addForceConstraint([poly.contacts[c]], radius) poly.compute(stab.Mode.iteration, maxIter=20, epsilon=2e-3, solver=sol, plot_error=False, plot_step=False, plot_init=False, plot_final=False) poly.plot() poly.show() assert (poly.dimension == 2) assert (len(poly.gravity_envelope) == 1) A1, A2, t = poly.A1, poly.A2, poly.t sphere_ineq = np.array([[1., -1., 1.], [1., -1., -1.], [1., 1., -1.], [1., 1., 1.], [-1., 1., -1.], [-1., 1., 1.], [-1., -1., -1.], [-1., -1., 1.]]) sphere = np.zeros((8 * (nc - fc), 1 + poly.nrVars())) for contact_id in range(fc, nc): line = 8 * (contact_id - fc) col = 1 + 3 * contact_id sphere[line:line + 8, col:col + 3] = sphere_ineq sphere[line:line + 8, 0] = radius * poly.mass * 9.81 nr_lines = poly_ineq.shape[0] exp_poly_ineq = np.hstack([ poly_ineq[:, 0:1], np.zeros((nr_lines, poly.nrVars() - 2)), poly_ineq[:, 1:] ]) eq = np.hstack((t, -A1, -A2)) mat = cdd.Matrix(sphere, number_type='fraction') mat.rep_type = cdd.RepType.INEQUALITY mat.extend(exp_poly_ineq) mat.extend(eq, linear=True) print("Let's goooooo") cdd_poly = cdd.Polyhedron(mat) vertices = np.array(cdd_poly.get_generators()) print(vertices.shape) if len(vertices.shape) > 1: point_mask = vertices[:, 0] == 1 points = vertices[point_mask, -2:] rays = vertices[~point_mask, -2:] hull = ConvexHull(points) poly.reset_fig() poly.plot_contacts() poly.plot_polyhedron(poly.inner, 'blue', 0.5) poly.ax.plot(hull.points[hull.vertices, 0], hull.points[hull.vertices, 1], 'red', label='cdd', marker='^', markersize=10) for ray in rays: if np.linalg.norm(ray) > 1e-10: print(ray) pp = np.vstack([i * ray for i in np.linspace(0.01, 1)]) poly.ax.plot(pp[:, 0], pp[:, 1], 'red') else: print("This is a zero ray") poly.show() else: print("No vertices")
plt.subplot(1, len(ds) + 1, 1) plt.imshow(FINAL.reshape((K, K)), vmin=0., vmax=len(ds), extent=[-2, 2, -2, 2], cmap=cmap, norm=norm, origin='lower') plt.yticks([]) plt.xticks([]) for M in regions: final = utils.in_region(xx, regions[M]['ineq']).astype('float32') plt.tricontour(xx[:,0], xx[:,1], final-0.5, levels=[0], linewidths=2) plt.tight_layout() plt.savefig('partition_building.pdf') plt.close() flips, ineq = list(regions.items())[1] m = cdd.Matrix(np.hstack([ineq[:, [0]], ineq[:, 1:]])) m.rep_type = cdd.RepType.INEQUALITY v = np.array(cdd.Polyhedron(m).get_generators())[:, 1:] #################################### simplices = utils.get_simplices(v) plt.figure(figsize=((len(simplices) + 1)*4, 4)) plt.subplot(1, len(simplices) + 1, 1) mask = utils.in_region(xx, ineq[:, 1:], ineq[:, 0]).astype('float32') plt.imshow(mask.reshape((K, K)) * 2, aspect='auto', cmap=cmap, norm=norm, origin='lower', extent=[-2, 2, -2, 2]) plt.xticks([]) plt.yticks([])
def conserved_moieties(self, mip_info, findCM='null', deadend=True, nameCM=0): '''Find conserved moieties by computing extreme rays. Called by compute_met_form Return conserved_moiety_info object containing the conserved moiety information: cm: list of conserved moieties, each being a metabolite-coefficient dictionary cm_generic: list of entries in cm that are generic (no known metabolites involved) cm_generic_dict: {fomrula: cm_generic} dictionary. formula is the generic_formula object with the defaulted or inputted name as the formula for the conserved moiety. ''' model = self.model if (not deadend) or nameCM == 1: activeMets, activeRxns = active_met_rxn(model) else: activeMets, activeRxns = model.metabolites, model.reactions if not deadend: cmMets, cmRxns = activeMets, activeRxns else: cmMets, cmRxns = model.metabolites, model.reactions #(matrix format: [[b_1, a_11, a_12, ..., a_1N], ..., [b_M, a_M1, a_M2, ..., a_MN]] for Ax + b >= 0 #where A = [a_ij], b = [b_1, ..., b_M]) if findCM == 'null': #transpose of S S = [[j._metabolites[i] if i in j._metabolites else 0 for i in cmMets] for j in cmRxns] #This method calculates a rational basis for transpose(S) from the reduced row echelon form, usually find a subset of extreme rays, quite probably the whole set. N = extreme_rays_from_null_basis(S) cm = [{cmMets[i]: N[i,k] for i in range(len(cmMets)) if N[i,k] != 0} for k in range(N.shape[1])] elif findCM == 'cdd': #transpose(S) >= 0 S = [[0] + [j._metabolites[i] if i in j._metabolites else 0 for i in cmMets] for j in cmRxns] # #transpose(S) <= 0 S += [[-i for i in j] for j in S] #all entries >= 0 S += [[0] + [1 if i == j else 0 for i in range(len(cmMets))] for j in range(len(cmMets))] print('Matrix size for cdd extreme ray calculation: %d x %d' %(len(S), len(S[0]))) # The cdd library seems unable to cope with genome-scale models. The best is to call EFMtool. To be implemented. mat = cdd.Matrix(S, number_type='float') mat.rep_type = cdd.RepType.INEQUALITY poly = cdd.Polyhedron(mat) ext = poly.get_generators() cm = [{cmMets[i]: ext.__getitem__(k)[i+1] for i in range(len(cmMets)) if ext.__getitem__(k)[i+1] != 0} for k in range(ext.row_size)] #generic conserved moieties involing no known metabolites cm_generic = [c for c in cm if all([i in self.pre.met_unknown for i in c])] cm_generic_dict = {} NcmDefault = 0 for c in cm_generic: #Use defaulted names for dead end metabolites if nameCM = 1, or always use defaulted names if nameCM = 0 if nameCM == 0 or (any([not i in activeMets for i in c.keys()]) and nameCM == 1): #defaulted names NcmDefault += 1 cmNameCur = 'Conserve_' + num2alpha(NcmDefault) else: print('\n\n') for i in c.keys(): toPrint = self.pre.met_known[i].formula if i in self.pre.met_known else mip_info.formulae[i].formula if toPrint == 'Mass0': toPrint = '' toPrint += formula_dict2str({"Conserve": c[i]}) print('%s\t%s\t%s' %(i.id, i.name, toPrint)) while True: cmNameCur = raw_input("\nEnter the formula for the conserved moiety: " \ + "(e.g. C2HRab_cd0.5Charge-1 -> {C:2, H:1, Rab_cd: 0.5, Charge: -1}, " \ + "hit return to use default name 'Conserve_xxx')\n") #check if the input is empty or a correct formula if cmNameCur == "" or ''.join([''.join(k) for k in element_re.findall(cmNameCur)]) == cmNameCur: break print('Incorrect format of the input formula!\n') if cmNameCur == '': #empty string means using the default name NcmDefault += 1 cmNameCur = 'Conserve_' + num2alpha(NcmDefault) cm_generic_dict[Formula(cmNameCur)] = c cm_info = conserved_moiety_info() cm_info.cm, cm_info.cm_generic, cm_info.cm_generic_dict = cm, cm_generic, cm_generic_dict return cm_info
def check_feasible(self, Node, channel_alloc, SU_index): vertex_list = Node['vertex'] n = SU_index p_dim = int(np.sum(channel_alloc[n, :])) def feasible_region(): # store the constraints for the feasible region hx <= b h_list = [] b_list = [] vertex_ld = [] for v in vertex_list: vertex_ld.append(v[np.where(channel_alloc[n, :] == 1)]) for i in range(p_dim + 1): vertex_hyperlane = copy.deepcopy(vertex_ld) vertex_out = vertex_hyperlane[i] del vertex_hyperlane[i] b = np.ones(p_dim) A = np.zeros((p_dim, p_dim)) for k in range(len(vertex_hyperlane)): A[k, :] = vertex_hyperlane[k] if (np.linalg.matrix_rank(A) == p_dim): h = np.linalg.solve(A, b) b = b[0] else: while (True): translation = np.random.rand(p_dim) vertex_hyperlane_new = copy.deepcopy(vertex_hyperlane) for k in range(len(vertex_hyperlane_new)): vertex_hyperlane_new[k] = vertex_hyperlane[k] + translation for k in range(len(vertex_hyperlane_new)): A[k, :] = vertex_hyperlane_new[k] if (np.linalg.matrix_rank(A) == p_dim): h = np.linalg.solve(A, b) b = b[0] - np.inner(h, translation) break if (np.inner(h, vertex_out) > b): h = -h b = -b h_list.append(np.round(h, 8)) b_list.append(np.round(b, 8)) return h_list, b_list # feasible region for node i a_feasible_list, b_feasible_list = feasible_region() A_arr = np.zeros(((p_dim + 1) + p_dim + len(self.minRate_h), 1 + p_dim)) b_arr = np.zeros(((p_dim + 1) + p_dim + len(self.minRate_b), 1)) # feasible region constraint for k in range(p_dim + 1): A_arr[k, 0] = 0 A_arr[k, 1:] = a_feasible_list[k] b_arr[k, 0] = b_feasible_list[k] # QAM capacity constraint A_arr[(p_dim + 1): (p_dim + 1) + p_dim, 1:] = np.identity(p_dim) b_arr[(p_dim + 1): (p_dim + 1) + p_dim, 0] = self.QAM_max_power[np.where(channel_alloc[n, :] == 1)] # minimum data rate constraint A_arr[(p_dim + 1) + p_dim:, 0] = 0 A_arr[(p_dim + 1) + p_dim:, 1:] = self.minRate_h b_arr[(p_dim + 1) + p_dim:, 0] = self.minRate_b # b_arr - A_arr * x >= 0 A = np.hstack((b_arr, -A_arr)) A = np.round(A, 8) mat = cdd.Matrix(A, number_type='fraction') mat.rep_type = cdd.RepType.INEQUALITY poly = cdd.Polyhedron(mat) vertices = poly.get_generators() vertices_array = np.array(vertices, dtype=float) if (vertices_array.size == 0): # print("Cannot find a feasible solution!") return False else: return True
def finite_power_solver(vertex_list, vertex_total): # feasible region for node i a_feasible_list, b_feasible_list = feasible_region() vertex_total = copy.deepcopy(vertex_total) # If previous power allocation is out of feasible region, add a vertex for j in range(p_dim + 1): i = len(vertex_total) - 1 if (np.dot(a_feasible_list[j], vertex_total[i][np.where(channel_alloc[n, :] == 1)]) > ( b_feasible_list[j] + 0.01)): del vertex_total[i] # Find gravity center for vertex_list v_gravity = np.zeros(n_channel) for v in vertex_list: v_gravity[np.where(channel_alloc[n, :] == 1)] \ = v_gravity[np.where(channel_alloc[n, :] == 1)] + v[np.where(channel_alloc[n, :] == 1)] v_gravity = v_gravity / (p_dim + 1) vertex_total.append(copy.deepcopy(v_gravity)) break # delete the vertices that are out of feasible region of node i for j in range(p_dim + 1): i = len(vertex_total) - 3 while (True): if (i < 0): break if (np.dot(a_feasible_list[j], vertex_total[i][np.where(channel_alloc[n, :] == 1)]) > ( b_feasible_list[j] + 0.01)): del vertex_total[i] i = i - 1 A_arr = np.zeros((len(vertex_total) + (p_dim + 1) + p_dim + len(self.minRate_h), 1 + p_dim)) b_arr = np.zeros((len(vertex_total) + (p_dim + 1) + p_dim + len(self.minRate_b), 1)) # fM(p) constraint: t - fM(p) <= 0 A_arr[0: len(vertex_total), 0] = 1 k = 0 for v in vertex_total: tmp = - df(v) tmp = tmp[np.where(channel_alloc[n, :] == 1)] A_arr[k, 1:] = copy.deepcopy(tmp) b_arr[k, 0] = f(v) - np.dot(v.T, df(v)) k = k + 1 # feasible region constraint for k in range(p_dim + 1): A_arr[len(vertex_total) + k, 0] = 0 A_arr[len(vertex_total) + k, 1:] = a_feasible_list[k] b_arr[len(vertex_total) + k, 0] = b_feasible_list[k] # QAM capacity constraint A_arr[len(vertex_total) + (p_dim + 1): len(vertex_total) + (p_dim + 1) + p_dim, 1:] = np.identity(p_dim) b_arr[len(vertex_total) + (p_dim + 1): len(vertex_total) + (p_dim + 1) + p_dim, 0] = \ self.QAM_max_power[np.where(channel_alloc[n, :] == 1)] # minimum data rate constraint A_arr[len(vertex_total) + (p_dim + 1) + p_dim:, 0] = 0 A_arr[len(vertex_total) + (p_dim + 1) + p_dim:, 1:] = self.minRate_h b_arr[len(vertex_total) + (p_dim + 1) + p_dim:, 0] = self.minRate_b # b_arr - A_arr * x >= 0 A = np.hstack((b_arr, -A_arr)) A = np.round(A, 8) mat = cdd.Matrix(A, number_type='fraction') mat.rep_type = cdd.RepType.INEQUALITY poly = cdd.Polyhedron(mat) vertices = poly.get_generators() vertices_array = np.array(vertices, dtype=float) if (vertices_array.size == 0): # print("Cannot find a feasible solution!") return # os._exit() upperbound_max = -float("inf") p_sol_lowd = np.zeros(p_dim) for i in range(vertices_array.shape[0]): if (vertices_array[i, 0] == 1): # v = vertices_array[i, 1:] # q = np.round(np.matmul(A_arr, v) - b_arr.reshape(-1), 4) # if (np.amax(q) < 0 + 0.001): upperbound = vertices_array[i, 1] - g(vertices_array[i, 2:], low_dim=True) if (upperbound > upperbound_max): p_sol_lowd = vertices_array[i, 2:] upperbound_max = upperbound p_sol = np.zeros(n_channel) p_lowd_index = 0 for k in range(n_channel): if (channel_alloc[n, k] == 1): p_sol[k] = p_sol_lowd[p_lowd_index] p_lowd_index = p_lowd_index + 1 return p_sol, upperbound_max
def detect_minimum_cycles(graph): """ Return both V and H representation of polyhedra correspond to minimal cycles """ minimal_cycles = [] for v in graph: for w in v.get_connections(): # The first step should not be a boundary edge if not v.adjacent[w]['is_boundary'] and not v.adjacent[w]['visited']: #print('\n') #print(v) v.adjacent[w]['visited'] = True cycle = [v.id, w.id] rightmost_search(graph, v, v, w, cycle) cycle = cycle[:-1] minimal_cycles.append(cycle) #print('\nNumber of minimal cycles = ', len(minimal_cycles)) #for cycle in minimal_cycles: # print(cycle, end='\n\n') #print(minimal_cycles) # Sanity check #print('\nSanity check all unvisited edges: ') num_unvisited_edges = 0 for v in graph: for w in v.get_connections(): if v.adjacent[w]['visited'] == False: num_unvisited_edges += 1 vid, wid = v.id, w.id #print(vid, '-->', wid) assert v.adjacent[w]['is_boundary'] == True, 'Unvisited edge must be a boundary edge' # The following assert is activated only when abstract regtion is partioned by refined lasers if graph.num_vertices != graph.num_edges/2: assert w.adjacent[v]['visited'] == True, 'Counterpart of an unvisited boundary edge must be visited' # TODO: Check unvisted edges cover workspace and obstacle boundaries #print('Number of unvisited edges = ', num_unvisited_edges) # Convert V-reprsentation of polyhedra to H-Reprentation # NOTE: Region orders are same in V and H representations poly_H_rep = [] for cycle in minimal_cycles: # V-representation required by pycddlib vertices = [] for vertex in cycle: vertices.append([1, vertex[0], vertex[1]]) #print(vertices, end='\n\n') # Convert by pycddlib mat = cdd.Matrix(vertices, number_type='float') poly = cdd.Polyhedron(mat) ine = poly.get_inequalities() # TODO: need canonicalize() to remove redundancy? #ine.canonicalize() # Represent inequality constraints as A x <= b A, b = [], [] for row in ine: b.append(row[0]) a = [-x for x in list(row[1:])] A.append(a) #print('b = ', b) #print('A = ', A, end='\n\n') poly_H_rep.append({'A': A, 'b': b}) #print(poly_H_rep) return minimal_cycles, poly_H_rep
def sampleKnots(t0, tk, k, b=None, d=None, N=1): """sample knots given a set of rules""" # check input assert t0 <= tk assert k >= 2 if d is not None: assert d.shape == (k, 2) and sum(d[:, 0]) <= 1.0 and\ np.all(d >= 0.0) and np.all(d <= 1.0) else: d = np.repeat(np.array([[0.0, 1.0]]), k, axis=0) if b is not None: assert b.shape == (k - 1, 2) and\ np.all(b[:, 0] <= b[:, 1]) and\ np.all(b[:-1, 1] <= b[1:, 1]) and\ np.all(b >= 0.0) and np.all(b <= 1.0) else: b = np.repeat(np.array([[0.0, 1.0]]), k - 1, axis=0) d = d * (tk - t0) b = b * (tk - t0) + t0 d[0] += t0 d[-1] -= tk # find vertices of the polyhedron D = -colDiffMat(k - 1) I = np.identity(k - 1) A1 = np.vstack((-D, D)) A2 = np.vstack((-I, I)) b1 = np.hstack((-d[:, 0], d[:, 1])) b2 = np.hstack((-b[:, 0], b[:, 1])) A = np.vstack((A1, A2)) b = np.hstack((b1, b2)) mat = np.insert(-A, 0, b, axis=1) mat = cdd.Matrix(mat) mat.rep_type = cdd.RepType.INEQUALITY poly = cdd.Polyhedron(mat) ext = poly.get_generators() vertices_and_rays = np.array(ext) if vertices_and_rays.size == 0: print('there is no feasible knots') return None if np.any(vertices_and_rays[:, 0] == 0.0): print('polyhedron is not closed, something is wrong.') return None else: vertices = vertices_and_rays[:, 1:] # sample from the convex combination of the vertices n = vertices.shape[0] s_simplex = sampleSimplex(n, N=N) s = s_simplex.dot(vertices) s = np.insert(s, 0, t0, axis=1) s = np.insert(s, k, tk, axis=1) return s
def check_com_positions(self, com_positions): X = bodies.FOOT_X Y = bodies.FOOT_Y m = self.robot.mass g = 9.81 mu = 0.7 CWC = array([ # fx fy fz taux tauy tauz [-1, 0, -mu, 0, 0, 0], [+1, 0, -mu, 0, 0, 0], [0, -1, -mu, 0, 0, 0], [0, +1, -mu, 0, 0, 0], [0, 0, -Y, -1, 0, 0], [0, 0, -Y, +1, 0, 0], [0, 0, -X, 0, -1, 0], [0, 0, -X, 0, +1, 0], [-Y, -X, -(X + Y) * mu, +mu, +mu, -1], [-Y, +X, -(X + Y) * mu, +mu, -mu, -1], [+Y, -X, -(X + Y) * mu, -mu, +mu, -1], [+Y, +X, -(X + Y) * mu, -mu, -mu, -1], [+Y, +X, -(X + Y) * mu, +mu, +mu, +1], [+Y, -X, -(X + Y) * mu, +mu, -mu, +1], [-Y, +X, -(X + Y) * mu, -mu, +mu, +1], [-Y, -X, -(X + Y) * mu, -mu, -mu, +1] ]) nb_contacts = len(self.contacting_links) C = zeros((4, 6 * nb_contacts)) d = array([0, 0, -m * g, 0]) # [pGx, pGy] = D * w_all D = zeros((2, 6 * nb_contacts)) for i, link in enumerate(self.contacting_links): # check orientation assumption pose = link.GetTransformPose() assert norm(pose[:4] - array([1., 0., 0., 0.])) < 5e-2, \ str(float(norm(pose[:4] - array([1., 0., 0., 0.])))) x, y, z = link.GetTransformPose()[4:] Ci = array([[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [-y, x, 0, 0, 0, 1]]) Di = 1. / (m * g) * array([[-z, 0, x, 0, -1, 0], [0, -z, y, 1, 0, 0]]) C[0:4, (6 * i):(6 * (i + 1))] = +Ci D[:, (6 * i):(6 * (i + 1))] = Di CWC_all = block_diag(*([CWC] * nb_contacts)) _zeros = zeros((CWC_all.shape[0], 1)) # A * w_all + b >= 0 # input to cdd.Matrix is [b, A] F = cdd.Matrix(hstack([_zeros, -CWC_all]), number_type='float') F.rep_type = cdd.RepType.INEQUALITY # C * w_all + d == 0 _d = d.reshape((C.shape[0], 1)) F.extend(hstack([_d, C]), linear=True) P = cdd.Polyhedron(F) V = array(P.get_generators()) poly = [] for i in xrange(V.shape[0]): if V[i, 0] != 1: # 1 = vertex, 0 = ray raise Exception("Not a polygon, V =\n%s" % repr(V)) pG = dot(D, V[i, 1:]) poly.append(pG) if all_plots: # Check 1: plot COM trajectory and polygons plot_polygon(poly) if True: # Check 2: using full H-representation # (autonomous but time consuming when designing the motion) self.check_all_inequalities(com_positions, poly)
def make_extreme_n_monotone(cls, pspace, monotonicity=None): """Yield extreme lower probabilities with given monotonicity. .. warning:: Currently this doesn't work very well except for the cases below. >>> lprs = list(LowProb.make_extreme_n_monotone('abc', monotonicity=2)) >>> len(lprs) 8 >>> all(lpr.is_coherent() for lpr in lprs) True >>> all(lpr.is_n_monotone(2) for lpr in lprs) True >>> all(lpr.is_n_monotone(3) for lpr in lprs) False >>> lprs = list(LowProb.make_extreme_n_monotone('abc', monotonicity=3)) >>> len(lprs) 7 >>> all(lpr.is_coherent() for lpr in lprs) True >>> all(lpr.is_n_monotone(2) for lpr in lprs) True >>> all(lpr.is_n_monotone(3) for lpr in lprs) True >>> lprs = list(LowProb.make_extreme_n_monotone('abcd', monotonicity=2)) >>> len(lprs) 41 >>> all(lpr.is_coherent() for lpr in lprs) True >>> all(lpr.is_n_monotone(2) for lpr in lprs) True >>> all(lpr.is_n_monotone(3) for lpr in lprs) False >>> all(lpr.is_n_monotone(4) for lpr in lprs) False >>> lprs = list(LowProb.make_extreme_n_monotone('abcd', monotonicity=3)) >>> len(lprs) 16 >>> all(lpr.is_coherent() for lpr in lprs) True >>> all(lpr.is_n_monotone(2) for lpr in lprs) True >>> all(lpr.is_n_monotone(3) for lpr in lprs) True >>> all(lpr.is_n_monotone(4) for lpr in lprs) False >>> lprs = list(LowProb.make_extreme_n_monotone('abcd', monotonicity=4)) >>> len(lprs) 15 >>> all(lpr.is_coherent() for lpr in lprs) True >>> all(lpr.is_n_monotone(2) for lpr in lprs) True >>> all(lpr.is_n_monotone(3) for lpr in lprs) True >>> all(lpr.is_n_monotone(4) for lpr in lprs) True >>> # cddlib hangs on larger possibility spaces >>> #lprs = list(LowProb.make_extreme_n_monotone('abcde', monotonicity=2)) """ pspace = PSpace.make(pspace) # constraint for empty set and full set matrix = cdd.Matrix([ [0] + [1 if event.is_false() else 0 for event in pspace.subsets()], [-1] + [1 if event.is_true() else 0 for event in pspace.subsets()] ], linear=True, number_type='fraction') # constraints for monotonicity constraints = [ dict(constraint) for constraint in cls.get_constraints_n_monotone( pspace, xrange(1, monotonicity + 1)) ] matrix.extend( [[0] + [constraint.get(event, 0) for event in pspace.subsets()] for constraint in constraints]) matrix.rep_type = cdd.RepType.INEQUALITY # debug: simplify matrix #print(pspace, monotonicity) # debug #print("original:", len(matrix)) #matrix.canonicalize() #print("new :", len(matrix)) #print(matrix) # debug # calculate extreme points poly = cdd.Polyhedron(matrix) # convert these points back to lower probabilities #print(poly.get_generators()) # debug for vert in poly.get_generators(): yield cls(pspace=pspace, lprob=dict( (event, vert[1 + index]) for index, event in enumerate(pspace.subsets())), number_type='fraction')
def get_mul_comparisons(vertices, lin_set, num_vars, prime_of_index): """ Returns a list of objects of the form (m1, m2, const, comp), where m1 and m2 are mulpairs, const is an int, comp is terms.GE/GT/LE/LT, and const * m1 * m2 comp 1 """ if all(v[1] == 0 for v in vertices): p = terms.MulPair(terms.IVar(0), 1) return [(p, p, 1, terms.LT)] new_comparisons = [] for (i, j) in itertools.combinations(range(num_vars), 2): base_matrix = [ [vertices[k][0], vertices[k][i + 2], vertices[k][j + 2]] + vertices[k][num_vars + 2:] for k in range(len(vertices)) if k not in lin_set ] matrix = cdd.Matrix(base_matrix, number_type='fraction') matrix.rep_type = cdd.RepType.GENERATOR for k in lin_set: matrix.extend( [[vertices[k][0], vertices[k][i + 2], vertices[k][j + 2]] + vertices[k][num_vars + 2:]], linear=True) ineqs = cdd.Polyhedron(matrix).get_inequalities() for ind in range(len(ineqs)): c = ineqs[ind] if c[2] == c[1] == 0: # no comp continue strong = not any(v[1] != 0 and v[i + 2] * c[1] + v[j + 2] * c[2] + sum(c[k] * v[num_vars + k - 1] for k in range(3, len(c))) == 0 for v in vertices) const = 1 #Don't want constant to a non-int power scale = int( num_util.lcmm( fractions.Fraction(c[k]).denominator for k in range(3, len(c)))) if scale != 1: c = [c[0]] + [scale * v for v in c[1:]] skip = False for k in range(3, len(c)): if c[k] != 0: if c[k] >= 1000000 or c[k] <= -1000000: # Not going to get much here. Causes arithmetic errors. skip = True break else: if c[k] > 0: const *= (prime_of_index[k + num_vars - 3]**c[k]) else: const *= fractions.Fraction( 1, prime_of_index[k + num_vars - 3]**(-c[k])) if skip: continue if ind in ineqs.lin_set: new_comp = terms.EQ else: new_comp = terms.GT if strong else terms.GE new_comparisons.append( (terms.MulPair(terms.IVar(i), c[1]), terms.MulPair(terms.IVar(j), c[2]), const, new_comp)) return new_comparisons
def get_vertices(inequalities): # create the matrix the inequalities are a matrix of the form # [b, -A] from b-Ax>=0 m = cdd.Matrix(inequalities) m.rep_type = cdd.RepType.INEQUALITY return cdd.Polyhedron(m).get_generators()
import numpy as np import cdd import matplotlib.pyplot as plt import utils mu = np.zeros(2) + 4.3 cov = np.array([[2.1, 0.3], [0.3, 0.8]]) m = cdd.Matrix([[30, 1., 0], [30, 0., 1.1]]) #print(utils.get_vertices(np.array(m))) m.rep_type = cdd.RepType.INEQUALITY print(utils.phis_w(np.array(m), mu, cov), np.outer(mu, mu) + cov) print('\n\n\n') m = cdd.Matrix([[40, 0., 1], [40, -1., 0], [40, 1., -1.]]) #print(utils.get_vertices(np.array(m))) m.rep_type = cdd.RepType.INEQUALITY print(utils.phis_w(np.array(m), mu, cov)) print('\n\n\n') m = cdd.Matrix([[40, 1., 0], [40, -1., 0], [40, 0., -1.], [40, 0, 1.]]) #print(utils.get_vertices(np.array(m))) m.rep_type = cdd.RepType.INEQUALITY print(utils.phis_w(np.array(m), mu, cov)) print('\n\n\n') m = cdd.Matrix([[40, 1., 0], [40, -1., 0], [40, 0., -1.], [40, 0, 1.], [40, 1., 1], [40, -1, -1]]) print('v', utils.get_vertices(np.array(m))) m.rep_type = cdd.RepType.INEQUALITY print('p', utils.phis_w(np.array(m), mu, cov))
# -*- coding: utf-8 -*- # Copyright 2015-2017 CNRS-AIST JRL # This file is part of StabiliPy. # StabiliPy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # StabiliPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with StabiliPy. If not, see <http://www.gnu.org/licenses/>. from __future__ import print_function import cdd import numpy as np radius = 0.1 points = radius * np.vstack([np.eye(3), -np.eye(3)]) mat_p = cdd.Matrix(np.hstack([np.ones((6, 1)), points])) mat_p.rep_type = cdd.RepType.GENERATOR sphere_ineq = np.array(cdd.Polyhedron(mat_p).get_inequalities()) print(sphere_ineq)
def __init__(self, cdd_hrepr): start = time.time() # krelu on variables in varsid #self.varsid = varsid self.k = len(cdd_hrepr[0]) - 1 self.cdd_hrepr = cdd_hrepr #print("LENGTH ", len(cdd_hrepr[0])) #cdd_hrepr = self.get_ineqs(varsid) check_pt1 = time.time() # We get orthant points using exact precision, because it allows to guarantee soundness of the algorithm. cdd_hrepr = cdd.Matrix(cdd_hrepr, number_type='fraction') cdd_hrepr.rep_type = cdd.RepType.INEQUALITY pts = self.get_orthant_points(cdd_hrepr) # Generate extremal points in the space of variables before and # after relu pts = [([1] + row + [x if x > 0 else 0 for x in row]) for row in pts] adjust_constraints_to_make_sound = False # Floating point CDD is much faster then the precise CDD, however for some inputs it fails # due to numerical errors. If that is the case we fall back to using precise CDD. try: cdd_vrepr = cdd.Matrix(pts, number_type='float') cdd_vrepr.rep_type = cdd.RepType.GENERATOR # Convert back to H-repr. cons = cdd.Polyhedron(cdd_vrepr).get_inequalities() adjust_constraints_to_make_sound = True # I don't adjust linearities, so just setting lin_set to an empty set. self.lin_set = frozenset([]) except: cdd_vrepr = cdd.Matrix(pts, number_type='fraction') cdd_vrepr.rep_type = cdd.RepType.GENERATOR # Convert back to H-repr. cons = cdd.Polyhedron(cdd_vrepr).get_inequalities() self.lin_set = cons.lin_set cons = np.asarray(cons, dtype=np.float64) # If floating point CDD was run, then we have to adjust constraints to make sure taht if adjust_constraints_to_make_sound: pts = np.asarray(pts, dtype=np.float64) cons_abs = np.abs(cons) pts_abs = np.abs(pts) cons_x_pts = np.matmul(cons, np.transpose(pts)) cons_x_pts_err = np.matmul(cons_abs, np.transpose(pts_abs)) # Since we use double precision number of bits to represent fraction is 52. # I'll use generous over-approximation by using 2^-40 as a relative error coefficient. rel_err = pow(2, -40) cons_x_pts_err *= rel_err cons_x_pts -= cons_x_pts_err for ci in range(len(cons)): min_val = np.min(cons_x_pts[ci, :]) if min_val < 0: cons[ci, 0] -= min_val # normalize constraints for numerical stability # more info: http://files.gurobi.com/Numerics.pdf absmax = np.absolute(cons).max(axis=1) self.cons = cons / absmax[:, None] end = time.time() return
def test_issue25(): mat = cdd.Matrix([]) cdd_poly = cdd.Polyhedron(mat)
def __init__(self, cdd_hrepr, approx=True): assert KAct.type in ["ReLU", "Tanh", "Sigmoid"] self.k = len(cdd_hrepr[0]) - 1 self.cdd_hrepr = np.array(cdd_hrepr) # nikos: poly approximation array_2d_double = np.ctypeslib.ndpointer(dtype=np.uintp, ndim=1, flags='C') global boolean_flag if config.poly_dynamic is False: sapolib = cdll.LoadLibrary("../../Sapo/libsapo_dyn_lib.so") sapolib.computeSapo_small.argtypes = [ c_int, c_int, c_int, array_2d_double, array_2d_double, POINTER(c_double), POINTER(c_double), array_2d_double ] else: sapolib = cdll.LoadLibrary("../../Sapo/libsapo_dyn_lib.so") sapolib.computeSapo_many.argtypes = [ c_int, c_int, c_int, array_2d_double, array_2d_double, POINTER(c_double), POINTER(c_double), array_2d_double, POINTER(c_float), c_int ] #if boolean_flag: coeffs = poly_approx() deg = coeffs.shape[0] # boolean_flag=False sapolib.computeSapo_small.restype = int sapolib.computeSapo_many.restype = int start = time.time() # KAct on variables in varsid # self.varsid = varsid self.k = len(cdd_hrepr[0]) - 1 self.cdd_hrepr = cdd_hrepr # print("LENGTH ", len(cdd_hrepr[0])) # cdd_hrepr = self.get_ineqs(varsid) check_pt1 = time.time() input_cons = np.asarray( cdd_hrepr, dtype=np.double) # this is a list of constraints # Ax + b >=0 with A = input_cons[1:3][i] b = input_cons[0][i] dim = input_cons.shape n_var = dim[1] - 1 n_dir = dim[0] // 2 # added call to polyfit if n_var == 1: output_cons = np.concatenate( (np.tanh(input_cons[:, [0]]), input_cons[:, [1]]), axis=1) n_cons = dim[0] else: modelSapo = py_sapo(n_var, n_dir, input_cons) output_cons_temp = np.empty([dim[0], n_var + 1], dtype=np.double) output_cons_val = np.empty(0, dtype=np.double) [L, T, n_bundle] = modelSapo.LTmatrix() #modelSapo.offset(input_cons, dim[0]) cL = (L.__array_interface__['data'][0] + np.arange(L.shape[0]) * L.strides[0]).astype(np.uintp) cT = (T.__array_interface__['data'][0] + np.arange(T.shape[0]) * T.strides[0]).astype(np.uintp) cA = (output_cons_temp.__array_interface__['data'][0] + np.arange(output_cons_temp.shape[0]) * output_cons_temp.strides[0]).astype(np.uintp) if config.splitting: regions = modelSapo.createRegions() for i in range(pow(3, n_var)): temp_cdd = cdd_hrepr.copy() for j in range(2 * n_var): temp_cdd.append(regions[j + i * 2 * n_var]) temp_cdd = cdd.Matrix(temp_cdd, number_type='fraction') temp_cdd.rep_type = cdd.RepType.INEQUALITY pts = cdd.Polyhedron(temp_cdd).get_generators() pts_np_temp = np.array(pts, dtype=np.double) if len(pts_np_temp) > 0: print('Region', i + 1, 'is not empty!') pts_np = pts_np_temp[::, 1::] if i in [0, 2, 6, 8, 18, 20, 24, 26]: n_cons = 2 * n_dir #pow(3, n_var) - 1 output_cons_val_temp = modelSapo.comput_valOutputcons( i) output_cons = modelSapo.emptyoutputcons() output_cons_val = np.concatenate( (output_cons_val, output_cons_val_temp), axis=0) else: # Reshape the input constraints pts_np = pts_np.transpose() val = L @ pts_np offp_temp = np.max(val, 1) #Lx <= b offm_temp = np.max(-val, 1) #-Lx <= b # Call sapo coffp = offp_temp.ctypes.data_as(POINTER(c_double)) coffm = offm_temp.ctypes.data_as(POINTER(c_double)) if config.poly_dynamic is False: n_cons = sapolib.computeSapo_small( n_var, n_dir, n_bundle, cL, cT, coffp, coffm, cA) else: # add coeffs c_coeffs = coeffs.ctypes.data_as( POINTER(c_float)) n_cons = sapolib.computeSapo_many( n_var, n_dir, n_bundle, cL, cT, coffp, coffm, cA, c_coeffs, deg) # Reshape the output constraints (restrict to [-1,1]^n_var) # Ax + b >= 0 # x_1 >= -1 x_1+1>=0 # x1 <= 1 -x_1+1>=0 # -------------------- # [b A] such Ax+b>=0 (se n_var=2) # b0 1 0 # b1 0 1 # b2 1 1 # b3 1 -1 # b4 -1 0 # b5 0 -1 # b6 -1 -1 # b7 -1 1 if config.sanity_check: if n_var == 2: output_cons_temp[ [0, 1, n_dir, n_dir + 1], 0] = np.maximum( np.minimum( output_cons_temp[ [0, 1, n_dir, n_dir + 1], 0], 1), -1) output_cons_temp[[ 2, 3, n_dir + 2, n_dir + 3 ], 0] = np.maximum( np.minimum( output_cons_temp[ [2, 3, n_dir + 2, n_dir + 3], 0], 2), -2) elif n_var == 3: output_cons_temp[ [0, 1, 2, n_dir, n_dir + 1, n_dir + 2], 0] = np.maximum( np.minimum( output_cons_temp[[ 0, 1, 2, n_dir, n_dir + 1, n_dir + 2 ], 0], 1), -1) output_cons_temp[[ 3, 4, 5, 6, 7, 8, n_dir + 3, n_dir + 4, n_dir + 5, n_dir + 6, n_dir + 7, n_dir + 8 ], 0] = np.maximum( np.minimum( output_cons_temp[[ 3, 4, 5, 6, 7, 8, n_dir + 3, n_dir + 4, n_dir + 5, n_dir + 6, n_dir + 7, n_dir + 8 ], 0], 2), -2) output_cons_temp[[ 9, 10, 11, 12, n_dir + 9, n_dir + 10, n_dir + 11, n_dir + 12 ], 0] = np.maximum( np.minimum( output_cons_temp[[ 9, 10, 11, 12, n_dir + 9, n_dir + 10, n_dir + 11, n_dir + 12 ], 0], 3), -3) else: print('\nNo sanity check was performed\n') # Append the bounds output_cons_val = np.concatenate( (output_cons_val, output_cons_temp[:, 0]), axis=0) output_cons = np.copy(output_cons_temp) # Make the union of the output sets output_cons_val = np.reshape(output_cons_val, (-1, n_cons)) output_cons_val = np.max(output_cons_val, 0) output_cons[:, 0] = output_cons_val else: # No splitting # Call sapo offp_temp = modelSapo.offp offm_temp = modelSapo.offm coffp = offp_temp.ctypes.data_as(POINTER(c_double)) coffm = offm_temp.ctypes.data_as(POINTER(c_double)) if config.poly_dynamic is False: n_cons = sapolib.computeSapo_small(n_var, n_dir, n_bundle, cL, cT, coffp, coffm, cA) else: # add coeffs c_coeffs = coeffs.ctypes.data_as(POINTER(c_float)) n_cons = sapolib.computeSapo_many(n_var, n_dir, n_bundle, cL, cT, coffp, coffm, cA, c_coeffs, deg) #output_cons_val = np.reshape(output_cons_temp, (-1, n_cons)) #output_cons_val = np.max(output_cons_val, 0) output_cons = np.copy(output_cons_temp) # output_cons[:, 0] = output_cons_val # Collect all the input-output constraints elaborate_input_cons = np.concatenate( (input_cons, np.zeros([dim[0], n_var], dtype=np.double)), axis=1) elaborate_output_cons = np.concatenate( (output_cons[:, [0]], np.zeros([n_cons, n_var], dtype=np.double), output_cons[:, range(n_var)]), axis=1) cons = np.concatenate((elaborate_input_cons, elaborate_output_cons), axis=0) ''' # We get orthant points using exact precision, because it allows to guarantee soundness of the algorithm. cdd_hrepr = cdd.Matrix(cdd_hrepr, number_type='fraction') cdd_hrepr.rep_type = cdd.RepType.INEQUALITY pts = self.get_orthant_points(cdd_hrepr) df = pd.DataFrame(pts) df.to_csv('filename.csv', index=False) # Generate extremal points in the space of variables before and # after relu # HERE is the point to be changed ELE! pts = [([1] + row + [x if x > 0 else 0 for x in row]) for row in pts] adjust_constraints_to_make_sound = False # Floating point CDD is much faster then the precise CDD, however for some inputs it fails # due to numerical errors. If that is the case we fall back to using precise CDD. try: cdd_vrepr = cdd.Matrix(pts, number_type='float') cdd_vrepr.rep_type = cdd.RepType.GENERATOR # Convert back to H-repr. cons = cdd.Polyhedron(cdd_vrepr).get_inequalities() adjust_constraints_to_make_sound = True # I don't adjust linearities, so just setting lin_set to an empty set. self.lin_set = frozenset([]) except: cdd_vrepr = cdd.Matrix(pts, number_type='fraction') cdd_vrepr.rep_type = cdd.RepType.GENERATOR # Convert back to H-repr. cons = cdd.Polyhedron(cdd_vrepr).get_inequalities() self.lin_set = cons.lin_set ''' cons = np.asarray(cons, dtype=np.float64) # If floating point CDD was run, then we have to adjust constraints to make sure taht if 0: #adjust_constraints_to_make_sound: pts = np.asarray(pts, dtype=np.float64) cons_abs = np.abs(cons) pts_abs = np.abs(pts) cons_x_pts = np.matmul(cons, np.transpose(pts)) cons_x_pts_err = np.matmul(cons_abs, np.transpose(pts_abs)) # Since we use double precision number of bits to represent fraction is 52. # I'll use generous over-approximation by using 2^-40 as a relative error coefficient. rel_err = pow(2, -40) cons_x_pts_err *= rel_err cons_x_pts -= cons_x_pts_err for ci in range(len(cons)): min_val = np.min(cons_x_pts[ci, :]) if min_val < 0: cons[ci, 0] -= min_val # normalize constraints for numerical stability # more info: http://files.gurobi.com/Numerics.pdf #absmax = np.absolute(cons).max(axis=1) #self.cons = cons / absmax[:, None] end = time.time() self.cons = cons return