def optimal_kernel_combinations( kernel_values: List[torch.Tensor]) -> torch.Tensor: # use quadratic program to get optimal kernel num_kernel = len(kernel_values) kernel_values_numpy = array( [float(k.detach().cpu().data.item()) for k in kernel_values]) if np.all(kernel_values_numpy <= 0): beta = solve_qp( P=-np.eye(num_kernel), q=np.zeros(num_kernel), A=kernel_values_numpy, b=np.array([-1.]), G=-np.eye(num_kernel), h=np.zeros(num_kernel), ) else: beta = solve_qp( P=np.eye(num_kernel), q=np.zeros(num_kernel), A=kernel_values_numpy, b=np.array([1.]), G=-np.eye(num_kernel), h=np.zeros(num_kernel), ) beta = beta / beta.sum(axis=0) * num_kernel # normalize return sum([k * b for (k, b) in zip(kernel_values, beta)])
def main(): #### Order 2 Polynomial, No Velocity Constraints #### P2 = np.array([[1, 1], [1, 4 / 3]]) A2 = np.array([1, 1]) b2 = np.array([1]) G2 = np.zeros((2, 2)) h2 = np.zeros((2, )) q2 = np.zeros((2, )) p2_star = solve_qp(P2, q2, G2, h2, A2, b2) print("QP for polynomial of degree 2, no velocity constraints") print("Optimal value of cost function: {}".format( p2_star.T @ P2 @ p2_star)) print("QP Solver for N=2, No Velocity Constraints: {} \n".format(p2_star)) #### Order 3 Polynomial, No Velocity Constraints #### P3 = np.array([[1, 1, 1], [1, 4 / 3, 5 / 4], [1, 5 / 4, 9 / 5]]) A3 = np.array([1, 1, 1]) b3 = np.array([1]) G3 = np.zeros((3, 3)) h3 = np.zeros((3, )) q3 = np.zeros((3, )) p3_star = solve_qp(P3, q3, G3, h3, A3, b3) print("QP for polynomial of degree 3, no velocity constraints") print("Optimal value of cost function: {}".format( p3_star.T @ P3 @ p3_star)) print("QP Solver for N=3, No Velocity Constraints: {} \n".format(p3_star)) #### Order 2 Polynomial, Velocity Constraints #### P2v = np.array([[1, 1], [1, 4 / 3]]) A2v = np.array([[1, 2], [1, 1]]) b2v = np.array([-2, 1]).T G2v = np.zeros((2, 2)) h2v = np.zeros((2, )) q2v = np.zeros((2, )) p2v_star = solve_qp(P2v, q2v, G2v, h2v, A2v, b2v) print("QP for polynomial of degree 2, velocity constraints") print("Optimal value of cost function: {}".format( p2v_star.T @ P2v @ p2v_star)) print("QP Solver for N=2, Velocity Constraints: {} \n".format(p2v_star)) #### Order 3 Polynomial, Velocity Constraints #### P3v = np.array([[1, 1, 1], [1, 4 / 3, 5 / 4], [1, 5 / 4, 9 / 5]]) A3v = np.array([[1, 2, 3], [1, 1, 1]]) b3v = np.array([-2, 1]).T G3v = np.zeros((3, 3)) h3v = np.zeros((3, )) q3v = np.zeros((3, )) p3v_star = solve_qp(P3v, q3v, G3v, h3v, A3v, b3v) print("QP for polynomial of degree 3, velocity constraints") print("Optimal value of cost function: {}".format( p3v_star.T @ P3v @ p3v_star)) print("QP Solver for N=3, Velocity Constraints: {} \n".format(p3v_star))
def solve_PGIRL( estimated_gradients, verbose=False, solver='quadprog', seed=1234, ): num_episodes, num_parameters, num_objectives = estimated_gradients.shape[:] mean_gradients = np.mean(estimated_gradients, axis=0) ns = scipy.linalg.null_space(mean_gradients) P = np.dot(mean_gradients.T, mean_gradients) if ns.shape[1] > 0: if (ns >= 0).all() or (ns <= 0).all(): print("Jacobian has a null space:", ns[:, 0] / np.sum(ns[:, 0])) weights = ns[:, 0] / np.sum(ns[:, 0]) loss = np.dot(np.dot(weights.T, P), weights) return weights, loss else: weights = solve_polyhedra(ns) print("Null space:", ns) if weights is not None and (weights != 0).any(): print("Linear programming sol:", weights) weights = np.dot(ns, weights.T) weights = weights / np.sum(weights) loss = np.dot(np.dot(weights.T, P), weights) print("Weights from non positive null space:", weights) return weights, loss else: print("Linear prog did not find positive weights") q = np.zeros(num_objectives) A = np.ones((num_objectives, num_objectives)) b = np.ones(num_objectives) G = np.diag(np.diag(A)) h = np.zeros(num_objectives) normalized_P = P / np.linalg.norm(P) try: weights = solve_qp(P, q, -G, h, A=A, b=b, solver=solver) except ValueError: try: weights = solve_qp(normalized_P, q, -G, h, A=A, b=b, solver=solver) except: #normalize matrix print("Error in Girl") print(P) print(normalized_P) u, s, v = np.linalg.svd(P) print("Singular Values:", s) ns = scipy.linalg.null_space(mean_gradients) print("Null space:", ns) weights, loss = solve_girl_approx(P, seed=seed) loss = np.dot(np.dot(weights.T, P), weights) if verbose: print('loss:', loss) print(weights) return weights, loss
def check_same_solutions(tol=0.05): sol0 = solve_qp(P, q, G, h, solver=sparse_solvers[0]) for solver in sparse_solvers: sol = solve_qp(P, q, G, h, solver=solver) relvar = norm(sol - sol0) / norm(sol0) assert relvar < tol, "%s's solution offset by %.1f%%" % ( solver, 100. * relvar) for solver in dense_solvers: sol = solve_qp(P_array, q, G_array, h, solver=solver) relvar = norm(sol - sol0) / norm(sol0) assert relvar < tol, "%s's solution offset by %.1f%%" % ( solver, 100. * relvar)
def check_same_solutions(tol=0.05): sol0 = solve_qp(P, q, G, h, solver=sparse_solvers[0]) for solver in sparse_solvers: sol = solve_qp(P, q, G, h, solver=solver) relvar = norm(sol - sol0) / norm(sol0) assert relvar < tol, "%s's solution offset by %.1f%%" % (solver, 100. * relvar) for solver in dense_solvers: sol = solve_qp(P_array, q, G_array, h, solver=solver) relvar = norm(sol - sol0) / norm(sol0) assert relvar < tol, "%s's solution offset by %.1f%%" % (solver, 100. * relvar)
def test_qpsolvers() -> None: """Tests qpsolvers and numpy conflicts.""" # pylint: disable=C0415 import numpy as np from qpsolvers import solve_qp M = np.array([[1.0, 2.0, 0.0], [-8.0, 3.0, 2.0], [0.0, 1.0, 1.0]]) P = np.dot(M.T, M) # this is a positive definite matrix q = np.dot(np.array([3.0, 2.0, 3.0]), M).reshape((3,)) G = np.array([[1.0, 2.0, 1.0], [2.0, 0.0, 1.0], [-1.0, 2.0, -1.0]]) h = np.array([3.0, 2.0, -2.0]).reshape((3,)) A = np.array([1.0, 1.0, 1.0]) b = np.array([1.0]) solve_qp(P, q, G, h, A, b)
def move(n, robot_def, vel_ctrl, vd): global jobid try: w = 0.2 Kq = .01 * np.eye(n) #small value to make sure positive definite KR = np.eye(3) #gains for position and orientation error q_cur = vel_ctrl.joint_position() J = robotjacobian(robot_def, q_cur) #calculate current Jacobian Jp = J[3:, :] JR = J[:3, :] H = np.dot(np.transpose(Jp), Jp) + Kq + w * np.dot(np.transpose(JR), JR) H = (H + np.transpose(H)) / 2 robot_pose = fwdkin(robot_def, q_cur.reshape((n, 1))) R_cur = robot_pose.R ER = np.dot(R_cur, np.transpose(R_ee.R_ee(0))) k, theta = R2rot(ER) k = np.array(k, dtype=float) s = np.sin(theta / 2) * k #eR2 wd = -np.dot(KR, s) f = -np.dot(np.transpose(Jp), vd) - w * np.dot(np.transpose(JR), wd) qdot = 0.5 * normalize_dq(solve_qp(H, f)) vel_ctrl.set_velocity_command(qdot) jobid = top.after(10, lambda: move(n, robot_def, vel_ctrl, vd)) except: traceback.print_exc() return
def project(self, x): proj_mat = self.metric_aug * 2 q = -2 * np.dot(self.metric_aug, x) x_hat = solve_qp(proj_mat, q, self.constraints_mat, self.cons_rhs + 1e-5) return x_hat
def runOptimiser(K, u, preOptw, initialValue, maxWeight=10000): """ Args: K (double 2d array): Similarity/distance matrix u (double array): Mean similarity of each prototype preOptw (double): Weight vector initialValue (double): Initialize run maxWeight (double): Upper bound on weight Returns: Prototypes, weights and objective values """ d = u.shape[0] lb = np.zeros((d, 1)) ub = maxWeight * np.ones((d, 1)) x0 = np.append(preOptw, initialValue / K[d - 1, d - 1]) G = np.vstack((np.identity(d), -1 * np.identity(d))) h = np.vstack((ub, -1 * lb)) # Solve a QP defined as follows: # minimize # (1/2) * x.T * P * x + q.T * x # subject to # G * x <= h # A * x == b sol = solve_qp(K, -u, G, h, A=None, b=None, solver='cvxopt', initvals=x0) # compute objective function value x = sol.reshape(sol.shape[0], 1) P = K q = -u.reshape(u.shape[0], 1) obj_value = 1 / 2 * np.matmul(np.matmul(x.T, P), x) + np.matmul(q.T, x) return (sol, obj_value[0, 0])
def opt(k): global w v = [] b = [] for i in range(data_qnum): df_data = data_ls[i] v.append( feature_map(k, data_ls[i], y_hat_ls[i]) - feature_map(k, data_ls[i])) [data_Row, data_Col] = df_data.shape docs_num = data_Row r = np.arange(1, docs_num + 1) bq = 1 - NDCG(k, df_data, r) b.append(bq) v_matrix = np.asarray(v) #q*n K = np.dot(v_matrix, v_matrix.T) b_matrix = np.asarray(b) #1*q G = -1 * np.eye(data_qnum) h = np.zeros(data_qnum) A = np.ones(data_qnum) b = np.array([10]) # print(K.shape, b_matrix.shape, G.shape, h.shape, A.shape, b.shape) try: alpha = solve_qp(K, b_matrix, G, h, A, b) except ValueError: print("ValueError") return 0 w = np.dot(alpha, v_matrix)
def optimize(t, sigma, pi): nonlocal last_solution nr_of_assets = len(sigma) # only optimize if we have a re-balance trigger (early exit) if last_solution is not None and last_solution.sum() > 0.99: # so we had at least one valid solution in the past # we can early exit if we do not have any signal or or no signal for any currently hold asset if len(t.shape) > 1 and t.shape[1] == nr_of_assets: if t[:, last_solution >= 0.01].sum().any() < 1: return keep_solution else: if t.sum().any() < 1: return keep_solution # make sure covariance matrix is positive definite simga = cov_nearest(sigma) # we perform optimization except when all expected returns are < 0 # then we early exit with an un-invest command if len(pi[:, pi[0] < 0]) == pi.shape[1]: return uninvest else: try: sol = solve_qp(risk_aversion * sigma, -pi.T, G=G, h=h, A=A, b=b, solver=solver) if sol is None: _log.error("no solution found") return uninvest else: return sol except Exception as e: _log.error(traceback.format_exc()) return uninvest
def servo_cb(self, goal): if goal.stamped_pose.header.frame_id == '': goal.stamped_pose.header.frame_id = self.base_frame transformed = self.tf_listener.transformPose(self.base_frame, goal.stamped_pose) wTep = transforms.pose_msg_to_trans(transformed.pose) Y = 0.005 Q = Y * np.eye(7) arrived = False rate = rospy.Rate(200) while not arrived and self.state.errors == 0: msg = JointVelocity() v, arrived = rp.p_servo(self.configuration.T, wTep, goal.scaling if goal.scaling else 0.6) Aeq = self.configuration.Je beq = v.reshape((6, )) c = -self.configuration.Jm.reshape((7, )) dq = qp.solve_qp(Q, c, None, None, Aeq, beq) self.joint_velocity_cb(JointVelocity(joints=dq.tolist())) rate.sleep() return self.pose_servo_server.set_succeeded( ServoToPoseResult(result=0 if self.state.errors == 0 else 1))
def qplcp(P, q, G, h, A, b): P = 0.5 * (P + P.T) # make sure P is symmetric q = -q.flatten() h = h.flatten() b = b.flatten() return solve_qp(P, q, -G, -h, A, b)
def qp(P): C = differentiate(P) P_new = np.zeros((MaxStep - 1, N, N, N)) for k in range(len(P)): for i in range(N): for j in range(N): connected_roads = [] q = [] for l in range(N): if G[i, l, 0] != -1 and (MaxStep - 1 - k >= shortestPaths()[l, j]): connected_roads.append(l) q.append(C[k, i, j, l] - P[k, i, j, l]) q = np.asarray(q) n = len(connected_roads) M = np.identity(n) A = np.ones(n) b = np.array([1]) I = -np.identity(n) h = np.zeros(n) #print(len(q)) raw_sol = [] if len(q) != 0: raw_sol = solve_qp(M, q, I, h, A, b) sol = [] for t in range(N): if t in connected_roads: sol.append(raw_sol[connected_roads.index(t)]) else: sol.append(0) P_new[k, i, j] = sol return P_new
def quad_solver(self, ref_act, Xf, deltaW): ## set constraints self.M = np.zeros((self.Nc * 4, self.Nc), dtype=np.double) self.gamma = np.zeros((self.Nc * 4, 1), dtype=np.double) for i in range(0, self.Nc): for j in range(0, self.Np): if i == j: self.M[i, j] = 1 self.gamma[i, ] = self.rate_constraint self.M[i + self.Nc, j] = -1 self.gamma[i + self.Nc, ] = self.rate_constraint if i >= j: self.M[i + 2 * self.Nc, j] = 1 self.M[i + 3 * self.Nc, j] = -1 self.gamma[i + 2 * self.Nc, ] = self.amplitude_constraint - self.u0 self.gamma[i + 3 * self.Nc, ] = self.amplitude_constraint + self.u0 E = 2 * (self.PhiT_phi + self.rw * np.eye(self.Nc)) Xf = np.reshape(Xf, (6, )) deltaW = np.reshape(deltaW, (self.Nc, )) Fconstraint = -2 * (np.matmul(self.PhiT_Rs, ref_act) - np.matmul( self.PhiT_F, Xf) - np.matmul(self.PhiT_Omega, deltaW)) self.gamma = np.reshape(self.gamma, (self.Nc * 4, )) Fconstraint = np.reshape(Fconstraint, (self.Nc, )) x = solve_qp(E, Fconstraint, self.M, self.gamma) return x[0]
def svm_qp(x, y, is_bias=1, is_wconstrained=1): """svm_qp(x,y,is_bias=1,is_wconstrained=1) returns the weights, bias and margin if the given pattern set X with labels Y is linearly separable, and 0s otherwise. x is the input matrix with dimension N (number of neurons) by P (number of patterns). y is the desired output vector of dimension P. y vector should consist of -1 and 1 only""" import qpsolvers R = x.shape[1] G = -(x * y).T if is_bias: N = x.shape[0] + 1 G = np.append(G.T, -y) G = G.reshape(N, R) G = G.T P = np.identity(N) P[-1, -1] = 1e-12 # regularization #for j in range(N): #P[j,j] += 1e-16 #P += 1e-10 else: N = x.shape[0] P = np.identity(N) if is_wconstrained: if is_bias: G = np.append(G, -np.identity(N)[:N - 1, :]) G = G.reshape(R + N - 1, N) h = np.array([-1.] * R + [0] * (N - 1)) else: G = np.append(G, -np.identity(N)) G = G.reshape(R + N, N) h = np.array([-1.] * R + [0] * N) else: h = np.array([-1.] * R) w = qpsolvers.solve_qp(P, np.zeros(N), G, h) if is_bias: return w[:-1], w[-1], 2 / pylab.norm(w[:-1]) else: return w, 2 / pylab.norm(w)
def getRowLNSM(v, mInp, idx=-1): nObj = mInp.shape[0] ar = np.zeros(nObj) for i, inp in enumerate(mInp): ar[i] = utils.getTanimotoScore(v, inp) if idx >= 0: ar[idx] = -10 args = np.argsort(ar)[::-1][:const.KNN] P = np.ndarray((const.KNN, const.KNN)) for i in range(const.KNN): for j in range(i, const.KNN): P[i][j] = np.dot(v - mInp[args[i]], v - mInp[args[j]]) P[j][i] = P[i][j] I = np.diag(np.ones(const.KNN)) P = P + I q = np.zeros(const.KNN) gg = np.ndarray(const.KNN) gg.fill(-1) G = np.diag(gg) h = np.zeros(const.KNN) b = np.ones(1) A = np.ones(const.KNN) re = solve_qp(P, q, G, h, A, b) out = np.zeros(nObj) for i in range(const.KNN): out[args[i]] = re[i] return out
def __find_optimal_solution(self, S, D): lo_bounds = [] up_bounds = [] bounds = [] for i in range(D): lb, ub = self.__get_bounds(self.x[i]) lo_bounds.append(lb) up_bounds.append(ub) bounds.append((lb, ub)) bounds = tuple(bounds) S_d = S[:D] P = self.A[:, S_d].transpose()[:, S_d].transpose() q = self.gradient[S_d] G = np.zeros((D,)) A = np.zeros((D,)) h = np.zeros((D,)) b = np.zeros((D,)) solution_quadprog = solve_qp(P=P, q=q, G=G, A=A, h=h, b=b, lb=np.array(lo_bounds), ub=np.array(up_bounds)) solution_minimize = minimize(fun=objective_function, x0=np.array([1] * D), args=(D, S, self.A, self.gradient), bounds=bounds) return solution_quadprog
def weight_descriptors(descs0, descs1, descs2, descs3, method='std'): """ Arguments: - descs0,1,2,3: (D, ) Returns: - w0, w1, w2, w3, weights """ H = np.vstack([descs0, descs1, -descs2, -descs3]).T H = H.T.dot(H) c = np.zeros(H.shape[0]) # constraint Ae = np.array([[1, 1, 0, 0], [0, 0, 1, 1]]) be = np.array([1, 1]) Ai = np.eye(4) bi = np.zeros(4) x0 = np.array([1, 0, 1, 0]) def tofloat(*args): return [x.astype(np.float) for x in args] [H, c, Ae, be, Ai, bi, x0] = tofloat(H, c, Ae, be, Ai, bi, x0) if method == 'std': import qpsolvers Ai = -Ai bi = -bi x = qpsolvers.solve_qp(H, c, Ai, bi, Ae, be) else: x, status = quadprog(H, c, x0, Ae, be, Ai, bi) # diff = descs0 * x[0] + descs1 * x[1] - descs2 * x[2] - descs3 * x[3] return x
def solve_svm(n, x, d, kernel, tol): # Define matrices for quadprog P = np.array( [d[i] * d[j] * kernel(x[i], x[j]) for i in range(n) for j in range(n)]).reshape(n, n) + 1e-9 * np.eye(n) q = -np.ones(n) G = -np.eye(n) h = np.zeros(n) A = np.array(d) b = np.zeros(1) # Run the solver a = qp.solve_qp(P, q, G, h, A, b) # Remove all small alphas a[a < tol] = 0 # Find the support vectors isv = np.nonzero(a)[0] asv = [a[i] for i in isv] xsv = [x[i] for i in isv] dsv = [d[i] for i in isv] # Compute the separator theta = dsv[0] - sum( [asv[i] * dsv[i] * kernel(xsv[i], xsv[0]) for i in range(len(isv))]) sep = lambda z: sum( [asv[i] * dsv[i] * kernel(xsv[i], z) for i in range(len(isv))]) + theta # Return separator and support vectors return sep, xsv, dsv
def solve_qp(self, X, y): """ Solves a quadratic programming problem. In QP formulation (dual): m variables, 2m+1 constraints (1 equation, 2m inequations). :param X: array of size [n_samples, n_features] holding the training samples :param y: array of size [n_samples] holding the class labels """ m = len(y) # m = n_samples self.K = self.kernel(X) # gram matrix P = np.vstack(( np.hstack((self.K, -self.K)), # alphas_p, alphas_n np.hstack((-self.K, self.K)))) # alphas_n, alphas_p q = np.hstack((-y, y)) + self.epsilon lb = np.zeros(2 * m) # lower bounds ub = np.ones(2 * m) * self.C # upper bounds A = np.hstack((np.ones(m), -np.ones(m))) # equality matrix b = np.zeros(1) # equality vector alphas = solve_qp(P, q, A=A, b=b, lb=lb, ub=ub, solver='cvxopt', sym_proj=True, verbose=self.verbose) self.alphas_p = alphas[:m] self.alphas_n = alphas[m:]
def test(self): P, q, G, h, A, b = self.get_problem() print(solver) x = solve_qp(P, q, G, h, A, b, solver=solver) self.assertIsNotNone(x) self.assertTrue((dot(G, x) <= h).all()) self.assertTrue(allclose(dot(A, x), b))
def quadratic_solver(mu, d_beta, d_ri, epsilon): #xk = (lambda, mu, beta) beta = pd.Series(d_beta) ri = pd.Series(d_ri) indx = ri.index beta = beta.values ri = ri.values mu = np.array([mu]) i = len(ri) L = mu[0] + np.multiply(beta, ri) xk = np.concatenate((L, mu, beta)) P = 2 * np.eye(2 * i + 1) q = -2 * xk G = np.diag(np.concatenate((-np.ones(i), np.zeros(1 + i)))) h = -np.concatenate((np.ones(i) * epsilon, np.zeros(i + 1))) A = np.concatenate((np.eye(i), -1 * np.ones((i, 1)), -ri * np.eye(i)), axis=1) b = np.zeros(i) x_r = solve_qp(P, q, G, h, A, b) lambda_r = pd.Series(x_r[:i], index=indx).to_dict() mu_r = x_r[i] beta_r = pd.Series(x_r[-i:], index=indx).to_dict() return lambda_r, mu_r, beta_r
def x_star(self): if not hasattr(self, 'x_opt'): self.x_opt = solve_qp(P=self.f.Q, q=self.f.q, lb=np.zeros_like(self.f.q), ub=self.ub, solver='quadprog') return self.x_opt
def solve_qp2(P, q, G, h, lb, ub): Gl = -np.eye(np.shape(P)[0]) hl = -lb Gu = np.eye(np.shape(P)[0]) hu = ub G = np.vstack((G, Gl, Gu)) h = np.hstack((h, hl, hu)) return solve_qp(P, q, G, h)
def test_qp(): M = np.array([[1., 2., 0.], [-8., 3., 2.], [0., 1., 1.]]) P = np.dot(M.T, M) # quick way to build a symmetric matrix q = np.dot(np.array([3., 2., 3.]), M).reshape((3, )) G = np.array([[1., 2., 1.], [2., 0., 1.], [-1., 2., -1.]]) h = np.array([3., 2., -2.]).reshape((3, )) sol = solve_qp(P, q, G, h) print('QP solution:', sol)
def lcs_ind_qps(wr, wmin, wmax, alpha): N = 200 T = len(wr) vmin = 0 vs = np.linspace(0, 1, N) bets = np.zeros((T + 1, len(vs), 2)) log_wealth = np.log(0.5) * np.ones(len(vs)) neg_log_alpha = np.log(1 / alpha) psi = -0.77258872224 # 2 - 4 * ln(2) for i, v in enumerate(vs): from qpsolvers import solve_qp G = -np.array([[ww - 1, ww * rr - v] for ww in (0, wmax) for rr in (0, 1)] + [(0, 1)]) h = -np.array([-0.5 for ww in (0, wmax) for rr in (0, 1)] + [0]) A = np.zeros((2, 2)) b = np.zeros(2) A0 = np.zeros((2, 2)) A1 = np.zeros((2, 2)) A2 = np.zeros((2, 2)) b0 = np.zeros(2) b1 = np.zeros(2) z = np.zeros(2) z2 = np.zeros((2, 2)) z2[1, 1] = 1 z3 = np.zeros(2) z3[1] = -1 z4 = np.zeros((2, 2)) reg = 1e-6 * np.eye(2) cur_bet = np.zeros(2) local_capital = np.log(0.5) for t, (wi, ri) in enumerate(wr): local_capital += np.log1p(cur_bet[0] * (wi - 1) + cur_bet[1] * (wi * ri - v)) if local_capital > neg_log_alpha: break z[:] = [wi - 1, wi * ri] z4 *= 0 z4[:, 1] -= z z4[1, :] -= z A0 = t / (t + 1.0) * A0 + np.outer(z, z) / (t + 1.0) A1 = t / (t + 1.0) * A1 + z4 / (t + 1.0) A2 = t / (t + 1.0) * A2 + z2 / (t + 1.0) b0 = t / (t + 1.0) * b0 + z / (t + 1.0) b1 = t / (t + 1.0) * b1 + z3 / (t + 1.0) A[...] = 2 * (-psi * (A0 + v * (A1 + v * A2)) + reg) b[...] = -(b0 + v * b1) cur_bet[:] = solve_qp(A, b, G, h, solver='quadprog') bets[t, i, :] = cur_bet these_bets = np.zeros((N, 2)) for t, (wi, ri) in enumerate(wr): multi_update_wealth(log_wealth, these_bets, vs, wi, ri) newvmin = update_lb_2d(log_wealth, vs, neg_log_alpha) vmin = max(vmin, newvmin) yield vmin these_bets[...] = bets[t, :, :]
def testQP(): A = np.array([[1., 2., 0.], [-8., 3., 2.], [0., 1., 1.]]) b = np.array([3., 2., 3.]) G = np.array([[1., 2., 1.], [2., 0., 1.], [-1., 2., -1.]]) h = np.array([3., 2., -2.]) P = np.dot(A.T, A) q = -np.dot(A.T, b) return qpsolvers.solve_qp(P=P, q=q, G=G, h=h)
def get_spline_expansion(self, f_eval, xgrid=None): """ Returns the coefficient of the spline expansion for f_eval """ out = super().get_spline_expansion(f_eval, xgrid) proj = solve_qp(self.metric * 2, -2 * np.dot(self.metric, out), self.constraints_mat, self.cons_rhs) return proj
def cost_function(t_input): global p_x_final global p_y_final global p_z_final t = [0.2] sum = 0.2 for i in range(len(t_input)): sum += t_input[i] t.append(sum) #print(t) Q_1 = form_Q(1, t) Q_2 = form_Q(2, t) Q_3 = form_Q(3, t) Q = block_diag(Q_1, Q_2, Q_3) #converts to block diagonal form in given order Q = Q + (0.0001 * np.identity(n * m)) A = comp_A(t) b_x = [x[0], 0, 0, x[m], 0, 0, x[1], x[2]] b_x.extend(np.zeros(shape=(3 * (m - 1)))) b_y = [y[0], 0, 0, y[m], 0, 0, y[1], y[2]] b_y.extend(np.zeros(shape=(3 * (m - 1)))) b_z = [z[0], 0, 0, z[m], 0, 0, z[1], z[2]] b_z.extend(np.zeros(shape=(3 * (m - 1)))) q = np.zeros(shape=(n * m, 1)).reshape((n * m, )) G = np.zeros(shape=((4 * m) + 2, n * m)) h = np.zeros(shape=((4 * m) + 2, 1)).reshape(((4 * m) + 2, )) p_x = solve_qp(Q, q, G, h, A, b_x) p_y = solve_qp(Q, q, G, h, A, b_y) p_z = solve_qp(Q, q, G, h, A, b_z) p_x_final = np.copy(p_x) p_y_final = np.copy(p_y) p_z_final = np.copy(p_z) K = 10000000 J_x = (0.00001 * (np.matmul(np.matmul(np.transpose(p_x), Q), p_x))) + ( 0.00001 * (np.matmul(np.matmul(np.transpose(p_y), Q), p_y)) ) + (0.00001 * (np.matmul(np.matmul(np.transpose(p_z), Q), p_z))) + (K * (t[-1] - t[0])) return J_x / 100000
def _solve_optimization(self, agent, avoid_collision, p_mat, q_mat, g_mat, h_mat): """To find acceleration input by solvin the qp problem. If no solution is found whithin constraints, will try to lower relaxation. 1/2 x.T * p * x + q.T * x s.t. g*x <= h Args: agent (:obj:`Agent`): Current agent avoid_collisiont (:obj:`bool`): If solving for collision or not p_mat (:obj:`np.Array`) q_mat (:obj:`np.Array`) g_mat (:obj:`np.Array`) h_mat (:obj:`np.Array`) Returns: :obj:`np.Array`: Acceleration input """ cur_relaxation = self.relaxation_max_bound # To locally increase relaxation bound find_solution = True accel_input = None relax_vals = None while find_solution: try: accel_input = solve_qp(p_mat, q_mat, G=g_mat, h=h_mat, solver='quadprog') relax_vals = accel_input[3*self.steps_in_horizon:] accel_input = accel_input[0:3*self.steps_in_horizon] accel_input = accel_input.reshape(3*self.steps_in_horizon, 1) find_solution = False # No solution whithin constraints except ValueError: cur_relaxation -= self.relaxation_inc # Relax until 2*min is reached if cur_relaxation > self.relaxation_min_bound*2 and avoid_collision: print "No solution, relaxing constraints: %.2f" % cur_relaxation # Update constraint in h matrix n_collision = len(agent.close_agents.keys()) for i in range(n_collision): h_mat[(-1 - i*3), 0] = -cur_relaxation # Max relaxation reached else: self.in_collision = True find_solution = False if self.verbose: print "ERROR: No solution in constraints, Check max space" return accel_input, relax_vals
if __name__ == "__main__": if get_ipython() is None: print "Usage: ipython -i %s" % basename(__file__) exit() dense_instr = { solver: "u = solve_qp(P, q, G, h, solver='%s')" % solver for solver in dense_solvers} sparse_instr = { solver: "u = solve_qp(P_csc, q, G_csc, h, solver='%s')" % solver for solver in sparse_solvers} print "\nTesting all QP solvers on a dense quadratic program...""" sol0 = solve_qp(P, q, G, h, solver=dense_solvers[0]) for solver in dense_solvers: sol = solve_qp(P, q, G, h, solver=solver) delta = norm(sol - sol0) assert delta < 1e-4, "%s's solution offset by %.1e" % (solver, delta) for solver in sparse_solvers: sol = solve_qp(P_csc, q, G_csc, h, solver=solver) delta = norm(sol - sol0) assert delta < 1e-4, "%s's solution offset by %.1e" % (solver, delta) print "\nDense solvers", print "\n-------------" for solver, instr in dense_instr.iteritems(): print "%s: " % solver, get_ipython().magic(u'timeit %s' % instr)
# You should have received a copy of the GNU Lesser General Public License # along with qpsolvers. If not, see <http://www.gnu.org/licenses/>. from numpy import array, dot from qpsolvers import solve_qp from time import time M = array([[1., 2., 0.], [-8., 3., 2.], [0., 1., 1.]]) P = dot(M.T, M) # quick way to build a symmetric matrix q = dot(array([3., 2., 3.]), M).reshape((3,)) G = array([[1., 2., 1.], [2., 0., 1.], [-1., 2., -1.]]) h = array([3., 2., -2.]).reshape((3,)) t_start = time() solver = "quadprog" # see qpsolvers.available_solvers x_sol = solve_qp(P, q, G, h, solver=solver) t_end = time() print "" print " min. 1/2 x^T P x + q^T x" print " s.t. G * x <= h" print "" print "P =", P print "q =", q print "G =", G print "h =", h print "" print "Solution: x =", x_sol print "Solve time:", 1000. * (t_end - t_start), "[ms]" print "Solver:", solver
def solve_random_qp(n, solver): M, b = random.random((n, n)), random.random(n) P, q = dot(M.T, M), dot(b, M).reshape((n,)) G = toeplitz([1., 0., 0.] + [0.] * (n - 3), [1., 2., 3.] + [0.] * (n - 3)) h = ones(n) return solve_qp(P, q, G, h, solver=solver)