def ERC(X, w0=None, up_bound=1., low_bound=0.): r""" Get weights of Equal Risk Contribution portfolio allocation. Notes ----- Weights of Equal Risk Contribution, as described by S. Maillard, T. Roncalli and J. Teiletche [1]_, verify the following problem: .. math:: w = \text{arg min } f(w) \\ u.c. \begin{cases}w'e = 1 \\ 0 \leq w_i \leq 1 \\ \end{cases} With: .. math:: f(w) = N \sum_{i=1}^{N}w_i^2 (\Omega w)_i^2 - \sum_{i,j=1}^{N} w_i w_j (\Omega w)_i (\Omega w)_j Where :math:`\Omega` is the variance-covariance matrix of `X` and :math:`N` the number of assets. Parameters ---------- X : array_like Each column is a series of price or return's asset. w0 : array_like, optional Initial weights to maximize. up_bound, low_bound : float, optional Respectively maximum and minimum values of weights, such that low_bound :math:`\leq w_i \leq` up_bound :math:`\forall i`. Default is 0 and 1. Returns ------- array_like Weights that minimize the Equal Risk Contribution portfolio. References ---------- .. [1] http://thierry-roncalli.com/download/erc-slides.pdf """ T, N = X.shape SIGMA = np.cov(X, rowvar=False) up_bound = max(up_bound, 1 / N) def f_ERC(w): w = w.reshape([N, 1]) arg = N * np.sum(w**2 * (SIGMA @ w)**2) return arg - np.sum(w * (SIGMA @ w) * np.sum(w * (SIGMA @ w))) # Set inital weights if w0 is None: w0 = np.ones([N]) / N const_sum = LinearConstraint(np.ones([1, N]), [1], [1]) const_ind = Bounds(low_bound * np.ones([N]), up_bound * np.ones([N])) result = minimize(f_ERC, w0, method='SLSQP', constraints=[const_sum], bounds=const_ind) return result.x.reshape([N, 1])
def test_individual_constraint_objects(self): fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2 + (x[2] - 0.75)**2 x0 = [2, 0, 1] cone = [] # with equality constraints (can't use cobyla) coni = [] # only inequality constraints (can use cobyla) methods = ["slsqp", "cobyla", "trust-constr"] # nonstandard area_data types for constraint equality bounds cone.append(NonlinearConstraint(lambda x: x[0] - x[1], 1, 1)) cone.append(NonlinearConstraint(lambda x: x[0] - x[1], [1.21], [1.21])) cone.append( NonlinearConstraint(lambda x: x[0] - x[1], 1.21, np.array([1.21]))) # multiple equalities cone.append( NonlinearConstraint(lambda x: [x[0] - x[1], x[1] - x[2]], 1.21, 1.21)) # two same equalities cone.append( NonlinearConstraint(lambda x: [x[0] - x[1], x[1] - x[2]], [1.21, 1.4], [1.21, 1.4])) # two different equalities cone.append( NonlinearConstraint(lambda x: [x[0] - x[1], x[1] - x[2]], [1.21, 1.21], 1.21)) # equality specified two ways cone.append( NonlinearConstraint(lambda x: [x[0] - x[1], x[1] - x[2]], [1.21, -np.inf], [1.21, np.inf])) # equality + unbounded # nonstandard area_data types for constraint inequality bounds coni.append(NonlinearConstraint(lambda x: x[0] - x[1], 1.21, np.inf)) coni.append(NonlinearConstraint(lambda x: x[0] - x[1], [1.21], np.inf)) coni.append( NonlinearConstraint(lambda x: x[0] - x[1], 1.21, np.array([np.inf]))) coni.append(NonlinearConstraint(lambda x: x[0] - x[1], -np.inf, -3)) coni.append( NonlinearConstraint(lambda x: x[0] - x[1], np.array(-np.inf), -3)) # multiple inequalities/equalities coni.append( NonlinearConstraint(lambda x: [x[0] - x[1], x[1] - x[2]], 1.21, np.inf)) # two same inequalities cone.append( NonlinearConstraint(lambda x: [x[0] - x[1], x[1] - x[2]], [1.21, -np.inf], [1.21, 1.4])) # mixed equality/inequality coni.append( NonlinearConstraint(lambda x: [x[0] - x[1], x[1] - x[2]], [1.1, .8], [1.2, 1.4])) # bounded above and below coni.append( NonlinearConstraint(lambda x: [x[0] - x[1], x[1] - x[2]], [-1.2, -1.4], [-1.1, -.8])) # - bounded above and below # quick check of LinearConstraint class (very little new code to test) cone.append(LinearConstraint([1, -1, 0], 1.21, 1.21)) cone.append(LinearConstraint([[1, -1, 0], [0, 1, -1]], 1.21, 1.21)) cone.append( LinearConstraint([[1, -1, 0], [0, 1, -1]], [1.21, -np.inf], [1.21, 1.4])) for con in coni: funs = {} for method in methods: with suppress_warnings() as sup: sup.filter(UserWarning) result = minimize(fun, x0, method=method, constraints=con) funs[method] = result.fun assert_allclose(funs['slsqp'], funs['trust-constr'], rtol=1e-3) assert_allclose(funs['cobyla'], funs['trust-constr'], rtol=1e-3) for con in cone: funs = {} for method in methods[::2]: # skip cobyla with suppress_warnings() as sup: sup.filter(UserWarning) result = minimize(fun, x0, method=method, constraints=con) funs[method] = result.fun assert_allclose(funs['slsqp'], funs['trust-constr'], rtol=1e-3)
def get_slope(y): k = np.arange(30) K = len(k) def fun(x): eta, beta, a, b = x return np.sum((eta + beta * (1 + a * k + b * k**2) - y)**2) def jac(x): eta, beta, a, b = x err = (eta + beta * (1 + a * k + b * k**2) - y) d_eta = np.sum(2 * err) d_beta = np.sum(2 * err * (1 + a * k + b * k**2)) d_a = np.sum(2 * err * beta * k) d_b = np.sum(2 * err * beta * k**2) return np.array([d_eta, d_beta, d_a, d_b]) def hess(x): eta, beta, a, b = x pol = (1 + a * k + b * k**2) err = (eta + beta * pol - y) d_eta2 = 2 * K d_beta2 = np.sum(2 * pol**2) d_a2 = np.sum(2 * (beta * k)**2) d_b2 = np.sum(2 * (beta * k**2)**2) d_eta_beta = np.sum(2 * pol) d_eta_a = np.sum(2 * beta * k) d_eta_b = np.sum(2 * beta * k**2) d_beta_a = 2 * np.sum(k * beta * pol + err * k) d_beta_b = 2 * np.sum(k**2 * beta * pol + err * k**2) d_a_b = 2 * np.sum(beta**2 * k**3) return np.array([[d_eta2, d_eta_beta, d_eta_a, d_eta_b], [d_eta_beta, d_beta2, d_beta_a, d_beta_b], [d_eta_a, d_beta_a, d_a2, d_a_b], [d_eta_b, d_beta_b, d_a_b, d_b2]]) def const(x): eta, beta, a, b = x return [[eta + beta * (1 + a * kk + b * kk**2)] for kk in k] def jac_const(x): eta, beta, a, b = x jjac = np.zeros((len(k), 4)) for kk in k: d_eta = 1 d_beta = 1 + a * kk + b * kk**2 d_a = beta * kk d_b = beta * kk**2 jjac[kk] = np.array([d_eta, d_beta, d_a, d_b]) return jjac def hess_const(x, v): eta, beta, a, b = x hess = np.zeros((len(k), 4, 4)) for kk in k: d_eta2 = 0 d_beta2 = 0 d_a2 = 0 d_b2 = 0 d_eta_beta = 0 d_eta_a = 0 d_eta_b = 0 d_beta_a = kk d_beta_b = kk**2 d_a_b = 0 hess[kk] = np.array([[d_eta2, d_eta_beta, d_eta_a, d_eta_b], [d_eta_beta, d_beta2, d_beta_a, d_beta_b], [d_eta_a, d_beta_a, d_a2, d_a_b], [d_eta_b, d_beta_b, d_a_b, d_b2]]) return np.sum([v[l] * hess[l] for l in k], 0) constr_matrix = np.zeros((3 + len(k) - 1, 4)) constr_matrix[0, 0] = 1 constr_matrix[1, 1] = 1 constr_matrix[2, :2] = 1 for j_k, k_val in enumerate(k[1:]): constr_matrix[j_k + 3, 2] = k_val constr_matrix[j_k + 3, 3] = k_val**2 constr_left = np.zeros(3 + len(k) - 1) constr_left[1] = np.min(y) / 2 constr_left[2] = np.min(y) constr_left[3:] = -1 constr_right = np.zeros(3 + len(k) - 1) + np.max(y) constr_right[3:] = np.max(y) / np.min(y) - 1 constr_right lin_const = LinearConstraint(constr_matrix, constr_left, constr_right) non_lin_const = NonlinearConstraint(const, np.min(y), np.max(y), jac_const, hess_const) res = minimize(fun, np.array([np.min(y) / 2, np.min(y) / 2, 0, 0]), jac=jac, hess=hess, method='trust-constr', constraints=[lin_const, non_lin_const]) return res.x
def equilibrate(self, composition, temperature): occs = self.independent_cluster_occupancies.T c_arr = self.compositional_array(composition) def energy(rxn_amounts, p_ind_start, rxn_matrix): p_ind = p_ind_start + rxn_matrix.T.dot(rxn_amounts) # tweak to ensure feasibility of solution p_s = occs.dot(p_ind) invalid = (p_s < 0.) if any(invalid): f = min( abs(p_s_max[invalid] / (p_s_max[invalid] - p_s[invalid]))) f -= 1.e-6 # a little extra nudge into the valid domain p_ind = f * p_ind + (1. - f) * p_ind_max self.set_composition_from_p_ind(p_ind) # grad = rxn_matrix.dot(self.molar_chemical_potentials) return self.molar_helmholtz # , grad self.set_state(temperature) # Find ordered and disordered states to use as starting points p_cl_grd = self.ground_state_cluster_proportions_from_composition( c_arr) p_cl_max = self.maximum_entropy_cluster_proportions_from_composition( c_arr) p_ind_max = self.A_ind_flat.T.dot(p_cl_max) p_s_max = occs.dot(p_ind_max) # minimize using near-ground state as a starting point p_cl = 0.95 * p_cl_grd + 0.05 * p_cl_max p_ind0 = self.A_ind_flat.T.dot(p_cl) # keep_feasible=True requires some future version of scipy cons = LinearConstraint(occs.dot(self.isochemical_reactions.T), 0. - occs.dot(p_ind0), 1. - occs.dot(p_ind0)) guess = np.array([0. for i in range(self.n_reactions)]) # minimize using near-ground state as a starting point res = minimize(energy, guess, method='SLSQP', constraints=cons, args=(p_ind0, self.isochemical_reactions), jac=False) # store c and E p_ind = self.p_ind E = res.fun # minimize using near-maximum entropy as a starting point p_cl = 0.05 * p_cl_grd + 0.95 * p_cl_max p_ind0 = self.A_ind_flat.T.dot(p_cl) # keep_feasible=True requires some future version of scipy cons = LinearConstraint(occs.dot(self.isochemical_reactions.T), 0. - occs.dot(p_ind0), 1. - occs.dot(p_ind0)) guess = np.array([0. for i in range(self.n_reactions)]) # minimize using near-ground state as a starting point res = minimize(energy, guess, method='SLSQP', constraints=cons, args=(p_ind0, self.isochemical_reactions), jac=False) if res.fun > E: self.set_composition_from_p_ind(p_ind) self.equilibrated_clusters = True self.set_cluster_proportions()
def cons_J(x): return [[2 * x[0], 1], [2 * x[0], -1]] def cons_H(x, v): return v[0] * np.array([[2, 0], [0, 0]]) + v[1] * np.array([[2, 0], [0, 0] ]) nonlinear_constraint = NonlinearConstraint(cons_f, -np.inf, 1, jac=cons_J, hess=cons_H) bounds = Bounds([0, -0.5], [1.0, 2.0]) linear_constraint = LinearConstraint([[1, 2], [2, 1]], [-np.inf, 1], [1, 1]) x0 = np.array([0.5, 0]) res = optimize.minimize(rosen, x0, method='trust-constr', constraints=[linear_constraint, nonlinear_constraint], options={'verbose': 1}, bounds=bounds) print(res.x)
def trust_constr_solve(mesh, etas, hkl, tths, intensity, directions, strains, omegas, dtys, weights, beam_width, grad_constraint): ''' ''' nelm = mesh.shape[0] A, bad_equations = calc_A_matrix(mesh, directions, omegas, dtys, beam_width) A = np.delete(A, bad_equations, axis=0) strains = np.delete(strains, bad_equations, axis=0) weights = np.delete(weights, bad_equations, axis=0) W = np.diag(weights) WA = np.dot(W, A) m = strains Wm = np.dot(W, m) WATWA = np.dot(WA.T, WA) WATWm = np.dot(WA.T, Wm) def func(x): return 0.5 * np.linalg.norm((np.dot(WA, x) - Wm))**2 def jac(x): return (np.dot(WATWA, x) - WATWm) def hess(x): return WATWA from scipy.optimize import LinearConstraint from scipy.optimize import minimize lb, c, ub = constraints(mesh, -grad_constraint, grad_constraint) linear_constraint = LinearConstraint(c, lb, ub, keep_feasible=True) x0 = np.zeros(6 * nelm) def callback(xk, state): out = " {} {}" if state.nit == 1: print(out.format("iteration", "cost")) print(out.format(state.nit, np.round(state.fun, 5))) return state.nit == 10 res = minimize(func, x0, method='trust-constr', jac=jac, hess=hess,\ callback=callback, tol=1e-8, \ constraints=[linear_constraint],\ options={'disp': True, 'maxiter':10}) s_tilde = res.x #conditions = np.dot(c,s_tilde) if 0: omegas = np.delete(omegas, bad_equations, axis=0) dtys = np.delete(dtys, bad_equations, axis=0) directions = np.delete(directions, bad_equations, axis=0) etas = np.delete(etas, bad_equations, axis=0) hkl = np.delete(hkl, bad_equations, axis=0) tths = np.delete(tths, bad_equations, axis=0) intensity = np.delete(intensity, bad_equations, axis=0) # np.save('/home/axel/Desktop/A.npy', A) # np.save('/home/axel/Desktop/W.npy', W) # np.save('/home/axel/Desktop/s.npy', s_tilde) # np.save('/home/axel/Desktop/m.npy',m) # np.save('/home/axel/Desktop/omegas.npy',omegas) # np.save('/home/axel/Desktop/dtys.npy', dtys) # np.save('/home/axel/Desktop/directions.npy', directions) # np.save('/home/axel/Desktop/etas.npy', etas) # np.save('/home/axel/Desktop/hkl.npy', hkl) # np.save('/home/axel/Desktop/tths.npy', tths) # np.save('/home/axel/Desktop/intensity.npy', intensity) WAs = np.dot(WA, s_tilde) #print(directions) xm = [] xstrainm = [] xc = [] xstrainc = [] for i in range(directions.shape[0]): d = directions[i, :] #print(d) ang = np.degrees(np.arccos(abs(np.dot(d, np.array([1, 0, 0]))))) #print(ang) if ang < 10.0: xm.append(dtys[i]) xstrainm.append(Wm[i]) xc.append(dtys[i]) xstrainc.append(WAs[i]) s = 30 t = 23 plt.figure(1) plt.scatter(dtys, omegas, s=7, c=np.dot(WA, s_tilde), cmap='viridis') plt.xlabel(r'sample y-translation [$\mu$m]', size=s) plt.ylabel(r'sample rotation, $\omega$ [$^o$]', size=s) plt.title('Computed average strains') c1 = plt.colorbar() c1.ax.tick_params(labelsize=t) plt.tick_params(labelsize=t) plt.figure(2) plt.scatter(dtys, omegas, s=7, c=Wm, cmap='viridis') plt.xlabel(r'sample y-translation [$\mu$m]', size=s) plt.ylabel(r'sample rotation, $\omega$ [$^o$]', size=s) plt.title('Measured average strains') c2 = plt.colorbar() c2.ax.tick_params(labelsize=t) plt.tick_params(labelsize=t) plt.figure(3) plt.scatter(xm, xstrainm, s=85, marker="^", label=r'Measured strain ($\mathbf{Wm}$)') plt.scatter(xc, xstrainc, s=85, marker="o", label=r'Fitted strain ($\mathbf{WAs}$)') plt.xlabel(r'x', size=s) plt.ylabel(r'Integrated weighted strain', size=s) plt.legend(fontsize=s) plt.tick_params(labelsize=t) plt.show() # reformat, each row is strain for the element s_tilde = s_tilde.reshape(nelm, 6) return s_tilde[:, 0], s_tilde[:, 1], s_tilde[:, 2], s_tilde[:, 3], s_tilde[:, 4], s_tilde[:, 5]
async def poa(self, ctx): """ temp """ # take numerical inputs, set an initial guess of evenly distributed pi, attempt to iterate Newton's method until convergence of result betas = [float(a) for a in ctx.message.content[5:].split()] if any(b <= 0 or b > 1 for b in betas[1:]): return await ctx.send( "Invalid router probability detected. Range is (0, 1].") players = int(betas.pop(0)) routers = len(betas) def q_ary_n_seq(q, n): if n <= 1: return [[k] for k in range(q)] sequences_to_add = q_ary_n_seq(q, n - 1) cur_found = [] for i in range(q): for j in sequences_to_add: cur_found.append([i] + j) return cur_found #betas = symbols(" ".join([f'beta{i}' for i in range(1, routers + 1)])) pis = symbols(" ".join([f'pi{i}' for i in range(1, routers + 1)])) all_routers = q_ary_n_seq(routers, players) equations = [0] * routers total_payoff = 0 for outcome in all_routers: router = outcome[0] people_sharing = outcome.count(router) coefficient = betas[router] / people_sharing equations[router] += coefficient * np.prod( [pis[i] for i in outcome[1:]]) for i in range(len(outcome)): total_payoff += betas[outcome[i]] * np.prod( [pis[j] for j in outcome]) / outcome.count(outcome[i]) def f(mypis): nonlocal pis, total_payoff return -1 * total_payoff.subs(list(zip(pis, mypis))) res = optimize.minimize( f, [1 / routers for i in range(routers)], constraints=[LinearConstraint([np.ones(routers)], [1], [1])], bounds=Bounds(np.zeros(routers), np.inf * np.ones(routers))) await ctx.send(res.x) await ctx.send(sum(res.x)) equationset = [equations[0] - i for i in equations[1:]] + [sum(pis) - 1] diffmatrix = np.array([[diff(eq, pi) for pi in pis] for eq in equationset]) initial_pis = [1 / routers for i in range(routers)] a = time.time() while True: await asyncio.sleep(0) if time.time() - a > 10: return await ctx.send( "10 seconds have passed; iteration did not converge in time." ) cursub = list(zip(pis, initial_pis)) subber = np.vectorize(lambda x: x.subs(cursub)) A = subber(diffmatrix) b = -1 * np.array([eq.subs(cursub) for eq in equationset], dtype='float') delta = np.linalg.solve(A.astype(np.float64), b) initial_pis += delta if (np.abs(delta) <= 0.001 * np.ones(routers)).all(): break await ctx.send(initial_pis) await ctx.send("Sum = " + str(sum(initial_pis)))
def obtain_sol(self, curr_x, g_xs): """ calculate the optimal inputs Args: curr_x (numpy.ndarray): current state, shape(state_size, ) g_xs (numpy.ndarrya): goal trajectory, shape(plan_len+1, state_size) Returns: opt_input (numpy.ndarray): optimal input, shape(input_size, ) """ temp_1 = np.matmul(self.phi_mat, curr_x.reshape(-1, 1)) temp_2 = np.matmul(self.gamma_mat, self.history_u[-1].reshape(-1, 1)) error = g_xs[1:].reshape(-1, 1) - temp_1 - temp_2 G = np.matmul(self.theta_mat.T, np.matmul(self.Qs, error)) H = np.matmul(self.theta_mat.T, np.matmul(self.Qs, self.theta_mat)) \ + self.Rs H = H * 0.5 # constraints A = [] b = [] if self.W is not None: A.append(self.W) b.append(self.omega.reshape(-1, 1)) if self.F is not None: b_F = - np.matmul(self.F1, self.history_u[-1].reshape(-1, 1)) \ - self.f.reshape(-1, 1) A.append(self.F) b.append(b_F) A = np.array(A).reshape(-1, self.input_size * self.pred_len) ub = np.array(b).flatten() # using cvxopt def optimized_func(dt_us): return (np.dot(dt_us, np.dot(H, dt_us.reshape(-1, 1))) \ - np.dot(G.T, dt_us.reshape(-1, 1)))[0] # constraint lb = np.array([-np.inf for _ in range(len(ub))]) # one side cons cons = LinearConstraint(A, lb, ub) # solve opt_sol = minimize(optimized_func, self.prev_sol.flatten(),\ constraints=[cons]) opt_dt_us = opt_sol.x """ using cvxopt ver, if you want to solve more quick please use cvxopt instead of scipy # make cvxpy problem formulation P = 2*matrix(H) q = matrix(-1 * G) A = matrix(A) b = matrix(ub) # solve the problem opt_sol = solvers.qp(P, q, G=A, h=b) opt_dt_us = np.array(list(opt_sol['x'])) """ # to dt form opt_dt_u_seq = np.cumsum(opt_dt_us.reshape(self.pred_len,\ self.input_size), axis=0) self.prev_sol = opt_dt_u_seq.copy() opt_u_seq = opt_dt_u_seq + self.history_u[-1] # save self.history_u.append(opt_u_seq[0]) # check costs costs = self.calc_cost( curr_x, opt_u_seq.reshape(1, self.pred_len, self.input_size), g_xs) logger.debug("Cost = {}".format(costs)) return opt_u_seq[0]
def lasso(func, func_jac, func_hess, init_guess, lamda, options): nunknowns = init_guess.shape[0] nslack_variables = nunknowns def obj(lamda, x): vals = func(x[:nunknowns]) if func_jac == True: grad = vals[1] vals = vals[0] else: grad = func_jac(x[:nunknowns]) vals += lamda * np.sum(x[nunknowns:]) grad = np.concatenate([grad, lamda * np.ones(nslack_variables)]) return vals, grad def hess(x): H = sp.lil_matrix((x.shape[0], x.shape[0]), dtype=float) H[:nunknowns, :nunknowns] = func_hess(x[:nunknowns]) return H if func_hess is None: hess = None I = sp.identity(nunknowns) tmp = np.array([[1, -1], [-1, -1]]) A_con = sp.kron(tmp, I) lb_con = -np.inf * np.ones(nunknowns + nslack_variables) ub_con = np.zeros(nunknowns + nslack_variables) linear_constraint = LinearConstraint(A_con, lb_con, ub_con, keep_feasible=False) constraints = [linear_constraint] #print(A_con.A) lbs = np.zeros(nunknowns + nslack_variables) lbs[:nunknowns] = -np.inf ubs = np.inf * np.ones(nunknowns + nslack_variables) bounds = Bounds(lbs, ubs) x0 = np.concatenate([init_guess, np.absolute(init_guess)]) method = get_method(options) #method = options.get('method','slsqp') if 'method' in options: del options['method'] if method != 'ipopt': res = minimize(partial(obj, lamda), x0, method=method, jac=True, hess=hess, options=options, bounds=bounds, constraints=constraints) else: #jac_structure_old = lambda : np.nonzero(np.tile(np.eye(nunknowns), (2, 2))) def jac_structure(): rows = np.repeat(np.arange(2 * nunknowns), 2) cols = np.empty_like(rows) cols[::2] = np.hstack([np.arange(nunknowns)] * 2) cols[1::2] = np.hstack([np.arange(nunknowns, 2 * nunknowns)] * 2) return rows, cols #assert np.allclose(jac_structure()[0],jac_structure_old()[0]) #assert np.allclose(jac_structure()[1],jac_structure_old()[1]) #jac_structure=None def hess_structure(): h = np.zeros((2 * nunknowns, 2 * nunknowns)) h[:nunknowns, :nunknowns] = np.tril(np.ones( (nunknowns, nunknowns))) return np.nonzero(h) if hess is None: hess_structure = None from ipopt import minimize_ipopt from scipy.optimize._constraints import new_constraint_to_old con = new_constraint_to_old(constraints[0], x0) res = minimize_ipopt(partial(obj, lamda), x0, method=method, jac=True, options=options, constraints=con, jac_structure=jac_structure, hess_structure=hess_structure, hess=hess) return res.x[:nunknowns], res
def make_worst(eta, bm, bs, trials=500000, invtemp=10.): """Make matrix that is as far removed as possible from betatheo""" bm, bv, betatheo = hebbian_getbstats(np.ones((eta.shape[0], eta.shape[0])), eta, bm=bm, bv=bs**2) def bilin(etai, etaj, mat): def bi(x, A, B, C, D): return A * x[0] * x[1] + B * x[0] + C * x[1] + D res = curve_fit(bi, np.array([etai, etaj]), mat) # print res return res[0][0] def collision(phi, v1x, v2x, v1y, v2y): a = np.tan(phi) d2 = 2 * (v1x - v2x + a * (v1y - v2y)) / (1 + a**2) / 2. pv1x = v1x - d2 pv2x = v2x + d2 pv1y = v1y - a * d2 pv2y = v2y + a * d2 return pv1x, pv2x, pv1y, pv2y beta = create_synthetic_network_LV_gam0(eta, bm, bs) off = offdiag(beta) offtheo = offdiag(betatheo) ii = np.multiply.outer(np.arange(len(eta)), np.ones(len(eta))).astype('int') i, j = offdiag(ii), offdiag(ii.T) matcons = np.array([(i == k) * (eta[list(j)]) for k in range(len(eta))]) veccons = 1 - eta etai, etaj = eta[list(i)], eta[list(j)] slop = bilin(etai, etaj, offtheo) # code_debugger() def dist(x): if True or 'bilin' in sys.argv: s = bilin(etai, etaj, x) return s * np.sign(slop) return -np.sum(np.abs(x - offtheo)) if 0: oldbeta = beta.copy() offold = offdiag(oldbeta) rej1, rej2 = 0, 0 preverr = 0 for trial in range(trials): if trial % 10000 == 0: print trial i, j, k, l = np.random.choice(range(len(off)), replace=False, size=4) phi = np.random.uniform(0, np.pi * 2) prevbetas = off[[i, j, k, l]] newbetas = np.array(collision(phi, *prevbetas)) off2 = off.copy() off2[[i, j, k, l]] = newbetas if np.random.random() < np.exp( invtemp * np.sum(-(offtheo[[i, j, k, l]] - prevbetas) * (newbetas - prevbetas))): err = np.mean((np.dot(matcons, off2) - veccons)**2)**.5 if np.random.random() < np.exp(invtemp * (-err + preverr)): off = off2 preverr = err else: rej2 += 1 else: rej1 += 1 beta[np.diag(np.ones(eta.shape[0])) == 0] = off code_debugger() from scipy.optimize import minimize, NonlinearConstraint, LinearConstraint empbm = np.mean(off) empbs = np.std(off) if 0: eq_bm = { 'type': 'eq', 'fun': lambda x: np.mean(x) - empbm, # 'jac': lambda x: np.ones(x.shape)*1./len(x) } eq_bs = { 'type': 'eq', 'fun': lambda x: np.var(x) - empbs**2, # 'jac': lambda x: (2*x - 2.*empbm)/len(x) } eq_eta = { 'type': 'eq', 'fun': lambda x: np.sum((np.dot(matcons, x) - veccons)**2), } res = minimize( dist, off, method='SLSQP', constraints=[eq_bm, eq_bs, eq_eta], options={ 'ftol': 1e-9, 'maxiter': 5000, 'disp': True }, ) else: b = empbm * np.ones(len(off)) eq_bm = LinearConstraint(np.ones((len(off), len(off))), lb=b, ub=b) b = veccons eq_eta = LinearConstraint(matcons, lb=b, ub=b) eq_bs = NonlinearConstraint(lambda x: np.var(x), 0, empbs**2) eq_bm = NonlinearConstraint(lambda x: np.mean(x) - empbm, 0, 0) res = minimize( dist, off, method='trust-constr', constraints=[eq_bm, eq_bs, eq_eta], options={ 'maxiter': 5000, 'disp': False }, ) beta[np.diag(np.ones(eta.shape[0])) == 0] = res.x # compmats = [offdiag(create_synthetic_network_LV_gam0(eta, bm, bs)) for z in range(100)] # scatter([np.std(b) for b in compmats], [bilin(etai, etaj, b) for b in compmats]) # code_debugger() if np.std(res.x) < empbs: corr = snap_beta(create_synthetic_network_LV_gam0(eta, 0, (empbs**2 - np.var(res.x))**.5, val=0), eta, val=0, diag=0) beta = beta + corr return beta
mtest = model2() #defing an objective function to minimze , equation 11 from the pdf def ob_func32(arguments): Hf = arguments[0] Hnf = arguments[1] Y = (mtest.Af * Hf**((mtest.rho - 1) / mtest.rho) + ctw * mtest.Anf * Hnf**((mtest.rho - 1) / mtest.rho))**( mtest.rho / (mtest.rho - 1)) return -(Y - mtest.kf * Hf - mtest.knf * Hnf - mtest.omega * (1 - mtest.gamma) / mtest.N * beta * mtest.io * Hf**2) # defining a constraints: Hf+Hnf<=1, Hf,Hnf belong to [0,1] interval} linear_constraint = LinearConstraint([[1, 1]], [0], [1]) bounds = Bounds([0, 0], [1.0, 1.0]) #Defining matrixes where we store results Hf = np.empty((101, 101)) Hnf = np.empty((101, 101)) #Solving for Hf,Hnf for i, beta in enumerate(np.linspace(0, 1, 101)): for j, ctw in enumerate(np.linspace(0, 1, 101)): try: res = minimize(ob_func32, [0.5, 0.5], method='trust-constr', constraints=[linear_constraint], options={'verbose': 1}, bounds=bounds)
def fit(self, X, y, src_index, tgt_index, tgt_index_labeled=None, **fit_params): """ Fit KMM. Parameters ---------- X : numpy array Input data. y : numpy array Output data. src_index : iterable indexes of source labeled data in X, y. tgt_index : iterable indexes of target unlabeled data in X, y. tgt_index_labeled : iterable, optional (default=None) indexes of target labeled data in X, y. fit_params : key, value arguments Arguments given to the fit method of the estimator (epochs, batch_size...). Returns ------- self : returns an instance of self """ check_indexes(src_index, tgt_index, tgt_index_labeled) if tgt_index_labeled is None: Xs = X[src_index] ys = y[src_index] else: Xs = X[np.concatenate((src_index, tgt_index_labeled))] ys = y[np.concatenate((src_index, tgt_index_labeled))] Xt = X[tgt_index] n_s = len(Xs) n_t = len(Xt) # Get epsilon if self.epsilon is None: self.epsilon = (np.sqrt(n_s) - 1) / np.sqrt(n_s) # Compute Kernel Matrix K = pairwise.pairwise_kernels(Xs, Xs, metric=self.kernel, **self.kernel_params) K = (1 / 2) * (K + K.transpose()) # Compute q kappa = pairwise.pairwise_kernels(Xs, Xt, metric=self.kernel, **self.kernel_params) kappa = (n_s / n_t) * np.dot(kappa, np.ones((n_t, 1))) constraints = LinearConstraint(np.ones((1, n_s)), lb=n_s * (1 - self.epsilon), ub=n_s * (1 + self.epsilon)) def func(x): return (1 / 2) * x.T @ (K @ x) - kappa.T @ x weights = minimize(func, x0=np.ones((n_s, 1)), bounds=[(0, self.B)] * n_s, constraints=constraints)['x'] self.weights_ = np.array(weights).ravel() self.estimator_ = check_estimator(self.get_estimator, **self.kwargs) try: self.estimator_.fit(Xs, ys, sample_weight=self.weights_, **fit_params) except: bootstrap_index = np.random.choice(len(Xs), size=len(Xs), replace=True, p=self.weights_ / self.weights_.sum()) self.estimator_.fit(Xs[bootstrap_index], ys[bootstrap_index], **fit_params) return self
def ord_params_GLLVM(y_ord, nj_ord, lambda_ord_old, ps_y, pzl1_ys, zl1_s, AT,\ tol = 1E-5, maxstep = 100): ''' Determine the GLLVM coefficients related to ordinal coefficients by optimizing each column coefficients separately. y_ord (numobs x nb_ord nd-array): The ordinal data nj_ord (list of int): The number of modalities for each ord variable lambda_ord_old (list of nb_ord_j x (nj_ord + r1) elements): The ordinal coefficients of the previous iteration ps_y ((numobs, S) nd-array): p(s | y) for all s in Omega pzl1_ys (nd-array): p(z1 | y, s) zl1_s ((M1, r1, s1) nd-array): z1 | s AT ((r1 x r1) nd-array): Var(z1)^{-1/2} tol (int): Control when to stop the optimisation process maxstep (int): The maximum number of optimization step. ---------------------------------------------------------------------- returns (list of nb_ord_j x (nj_ord + r1) elements): The new ordinal coefficients ''' #**************************** # Ordinal link parameters #**************************** r0 = zl1_s.shape[1] S0 = zl1_s.shape[2] nb_ord = len(nj_ord) new_lambda_ord = [] for j in range(nb_ord): enc = OneHotEncoder(categories='auto') y_oh = enc.fit_transform(y_ord[:,j][..., n_axis]).toarray() # Define the constraints such that the threshold coefficients are ordered nb_constraints = nj_ord[j] - 2 nb_params = nj_ord[j] + r0 - 1 lcs = np.full(nb_constraints, -1) lcs = np.diag(lcs, 1) np.fill_diagonal(lcs, 1) lcs = np.hstack([lcs[:nb_constraints, :], \ np.zeros([nb_constraints, nb_params - (nb_constraints + 1)])]) linear_constraint = LinearConstraint(lcs, np.full(nb_constraints, -np.inf), \ np.full(nb_constraints, 0), keep_feasible = True) opt = minimize(ord_loglik_j, lambda_ord_old[j] ,\ args = (y_oh, zl1_s, S0, ps_y, pzl1_ys, nj_ord[j]), tol = tol, method='trust-constr', jac = ord_grad_j, \ constraints = linear_constraint, hess = '2-point',\ options = {'maxiter': maxstep}) res = opt.x if not(opt.success): # If the program fail, keep the old estimate as value print(opt) res = lambda_ord_old[j] warnings.warn('One of the ordinal optimisations has failed', RuntimeWarning) # Ensure identifiability for Lambda_j new_lambda_ord_j = (res[-r0: ].reshape(1, r0) @ AT[0]).flatten() new_lambda_ord_j = np.hstack([deepcopy(res[: nj_ord[j] - 1]), new_lambda_ord_j]) new_lambda_ord.append(new_lambda_ord_j) return new_lambda_ord
def MDP(X, w0=None, up_bound=1., low_bound=0.): r""" Get weights of Maximum Diversified Portfolio allocation. Notes ----- Weights of Maximum Diversification Portfolio, as described by Y. Choueifaty and Y. Coignard [5]_, verify the following problem: .. math:: w = \text{arg max } D(w) \\ u.c. \begin{cases}w'e = 1 \\ 0 \leq w_i \leq 1 \\ \end{cases} Where :math:`D(w)` is the diversified ratio of portfolio weighted by `w`. Parameters ---------- X : array_like Each column is a series of price or return's asset. w0 : array_like, optional Initial weights to maximize. up_bound, low_bound : float, optional Respectively maximum and minimum values of weights, such that low_bound :math:`\leq w_i \leq` up_bound :math:`\forall i`. Default is 0 and 1. Returns ------- array_like Weights that maximize the diversified ratio of the portfolio. See Also -------- diversified_ratio References ---------- .. [5] tobam.fr/wp-content/uploads/2014/12/TOBAM-JoPM-Maximum-Div-2008.pdf """ T, N = X.shape up_bound = max(up_bound, 1 / N) # Set function to minimze def f_max_divers_weights(w): return -metrics.diversified_ratio(X, w=w).flatten() # Set inital weights if w0 is None: w0 = np.ones([N]) / N # Set constraints and minimze const_sum = LinearConstraint(np.ones([1, N]), [1], [1]) const_ind = Bounds(low_bound * np.ones([N]), up_bound * np.ones([N])) result = minimize(f_max_divers_weights, w0, method='SLSQP', constraints=[const_sum], bounds=const_ind) return result.x.reshape([N, 1])
def calculate(self, key): date = key[0] seccode = key[1] time = key[2] step_vola_length = key[3] backward_num_steps = key[4] num_steps = key[5] step_length = key[6] try: DIV = DIVIDER[seccode] except KeyError: DIV = 1 volume_to_liquidate = key[7] / DIV lam = key[8] key_tc = [date, seccode, time] key_vola = [ date, seccode, time, step_vola_length, backward_num_steps, num_steps, step_length ] transactioncosts = self._transactioncostsagent.get(key_tc) vola = self._volaagent.get(key_vola) tc_params = np.array(transactioncosts.get_data().params[0][0]) tc_covparams = np.array(transactioncosts.get_data().cov_params[0]) vola_estimates = np.array(vola.get_data().vola_estimates[0]) *\ DIV**2 atol = 0.000000000001 x0 = np.array([volume_to_liquidate / num_steps] * num_steps) bounds = Bounds(0, volume_to_liquidate, keep_feasible=True) linear_constraint = LinearConstraint([[1] * num_steps], [volume_to_liquidate - atol], [volume_to_liquidate + atol], keep_feasible=True) print(np.sum(x0)) print(volume_to_liquidate - atol, volume_to_liquidate + atol) V = volume_to_liquidate s2 = vola_estimates def functional(v): v2_new = np.cumsum(v[::-1])[::-1]**2 square = np.sqrt( np.sum(s2 * v2_new) + np.sum(self._var_costs(v, tc_covparams))) first = lam * square second = np.sum(self._expected_costs(v, tc_params)) func = first + second return func def functional_jac(v): coeff = lam / 2 v_new = np.cumsum(v[::-1])[::-1] v2_new = v_new**2 denumerator = np.sqrt( np.sum(s2 * v2_new) + np.sum(self._var_costs(v, tc_covparams))) jac = np.empty(shape=len(v)) for i in range(len(v)): v_i = v[i] sl = (s2 * v_new)[:i + 1] numerator_i = (self._var_costs_dd(v_i, tc_covparams) + 2 * np.sum(sl)) additional_i = self._expected_costs_d(v_i, tc_params) jac_i = coeff * numerator_i / denumerator + additional_i jac[i] = jac_i return jac def functional_hess(v): coeff = lam / 2 v_new = np.cumsum(v[::-1])[::-1] v2_new = v_new**2 g = (np.sqrt( np.sum(s2 * v2_new) + np.sum(self._var_costs(v, tc_covparams)))) hess = np.zeros(shape=(len(v), len(v))) for i in range(len(v)): for j in range(len(v)): f_i = (self._var_costs_d(v[i], tc_covparams) + 2 * np.sum( (s2 * v_new)[:i])) f_ij = (self._var_costs_dd(v[j], tc_covparams) * (i == j) + 2 * np.sum(s2[:np.min([i, j])])) g_j = (0.5 * (self._var_costs_d(v[j], tc_covparams) + 2 * np.sum( (s2 * v_new)[:j])) / np.sqrt( np.sum(s2 * v2_new) + np.sum(self._var_costs(v, tc_covparams)))) brackets = f_ij * g - g_j * f_i hess_ij = (coeff * brackets / g**2 + self._expected_costs_dd(v[j], tc_params) * (i == j)) hess[i, j] = hess_ij return hess minimizer_kwargs = { 'method': 'trust-constr', 'jac': functional_jac, 'hess': functional_hess, 'constraints': [linear_constraint], 'bounds': bounds } #result = basinhopping(functional, x0, minimizer_kwargs=minimizer_kwargs) result = minimize(functional, x0, method='trust-constr', jac=functional_jac, hess=functional_hess, constraints=minimizer_kwargs['constraints'], bounds=minimizer_kwargs['bounds']) print(result) strategy = result.x * DIV strategy_left = np.round(key[7] - np.cumsum(strategy), 0) strategy = key[7] - strategy_left for j in range(len(strategy) - 1, 0, -1): strategy[j] = strategy[j] - strategy[j - 1] strategy = np.array(list(map(int, strategy))) row = pd.DataFrame( columns=self._strategyoptimaltable.get_column_names()) key_names = self._strategyoptimaltable.get_key() row[key_names[0]] = [key[0]] row[key_names[1]] = [key[1]] row[key_names[2]] = [key[2]] row[key_names[3]] = [key[3]] row[key_names[4]] = [key[4]] row[key_names[5]] = [key[5]] row[key_names[6]] = [key[6]] row[key_names[7]] = [key[7]] row[key_names[8]] = [key[8]] row['strategy'] = [strategy.tolist()] optimal_strategy = StrategyOptimal() optimal_strategy.set_key(key) optimal_strategy.set_data(row) return optimal_strategy
def nonlinear_basis_pursuit(func, func_jac, func_hess, init_guess, options, eps=0, return_full=False): nunknowns = init_guess.shape[0] nslack_variables = nunknowns def obj(x): val = np.sum(x[nunknowns:]) grad = np.zeros(x.shape[0]) grad[nunknowns:] = 1.0 return val, grad def hessp(x, p): matvec = np.zeros(x.shape[0]) return matvec I = sp.identity(nunknowns) tmp = np.array([[1, -1], [-1, -1]]) A_con = sp.kron(tmp, I) #A_con = A_con.A#dense lb_con = -np.inf * np.ones(nunknowns + nslack_variables) ub_con = np.zeros(nunknowns + nslack_variables) #print(A_con.A) linear_constraint = LinearConstraint(A_con, lb_con, ub_con, keep_feasible=False) constraints = [linear_constraint] def constraint_obj(x): val = func(x[:nunknowns]) if func_jac == True: return val[0] return val def constraint_jac(x): if func_jac == True: jac = func(x[:nunknowns])[1] else: jac = func_jac(x[:nunknowns]) if jac.ndim == 1: jac = jac[np.newaxis, :] jac = sp.hstack( [jac, sp.csr_matrix((jac.shape[0], jac.shape[1]), dtype=float)]) jac = sp.csr_matrix(jac) return jac if func_hess is not None: def constraint_hessian(x, v): # see https://prog.world/scipy-conditions-optimization/ # for example how to define NonlinearConstraint hess H = func_hess(x[:nunknowns]) hess = sp.lil_matrix((x.shape[0], x.shape[0]), dtype=float) hess[:nunknowns, :nunknowns] = H * v[0] return hess else: constraint_hessian = BFGS() # experimental parameter. does not enforce interpolation but allows some # deviation nonlinear_constraint = NonlinearConstraint(constraint_obj, 0, eps, jac=constraint_jac, hess=constraint_hessian, keep_feasible=False) constraints.append(nonlinear_constraint) lbs = np.zeros(nunknowns + nslack_variables) lbs[:nunknowns] = -np.inf ubs = np.inf * np.ones(nunknowns + nslack_variables) bounds = Bounds(lbs, ubs) x0 = np.concatenate([init_guess, np.absolute(init_guess)]) method = get_method(options) #method = options.get('method','slsqp') if 'method' in options: del options['method'] if method != 'ipopt': res = minimize(obj, x0, method=method, jac=True, hessp=hessp, options=options, bounds=bounds, constraints=constraints) else: from ipopt import minimize_ipopt from scipy.optimize._constraints import new_constraint_to_old con = new_constraint_to_old(constraints[0], x0) ipopt_bounds = [] for ii in range(len(bounds.lb)): ipopt_bounds.append([bounds.lb[ii], bounds.ub[ii]]) res = minimize_ipopt(obj, x0, method=method, jac=True, options=options, constraints=con, bounds=ipopt_bounds) if return_full: return res.x[:nunknowns], res else: return res.x[:nunknowns]
# Optimize: Maximize. Expected Return - Risk - Correlation # s.t. Percent of each company allocated Sum = 1 # No negative allocations. # (=>) # Minimize. -Expected Return + Risk + Correlation. # # Normalize to make all three equally important. # # #total = pc_matrix.dot(x).sum() + cov_matrix.dot(x).sum() + corr_matrix.dot(x).sum() #return -alpha*(pc_matrix.dot(x).sum()/total) + (1-alpha)*((cov_matrix.dot(x).sum()/total) + (corr_matrix.dot(x).sum()/total)) return pc_matrix.dot(x).sum() #AMZN exploits max PC return #return cov_matrix.dot(x).sum() #MSFT and APPL minimize risk/volatility #return corr_matrix.dot(x).sum() #MSFT and APPL maximize diversification/correlation DV_SZ = len(tickers) x0 = np.random.randn(DV_SZ) x_bounds = [(0, None) for j in range(DV_SZ)] A = [np.ones(DV_SZ)] lb = [1] ub = [1] x_constraints = LinearConstraint(A, lb, ub) results = {} for a in np.arange(0, 1, 0.1): print(a) res = minimize(f, x0, bounds=x_bounds, constraints=[x_constraints]) results[a] = res.x df_results = pd.DataFrame(results) print(df_results)
xys = ls.find_xys_given_abg(n, alpha_arr, beta_arr, gamma_arr) xys = xys.reshape(n, 3) xys = xys[:, 0:2] xy_target_arr = xys.flatten() # Initial guesses and bounds for stiffness abc_arr = 0.5 + np.zeros((3 * n + 1, )) bounds = np.zeros((3 * n + 1, 2)) bounds[:, 1] = 1 alphas = [0, stiff_max] #[0, stiff_max] betas = [0, stiff_max] gammas = [0, stiff_max] # Constraints on stiffness cnts = LinearConstraint(np.identity(3 * n + 1), 0, 1) # Determining alpha, beta, gamma without contraints '''res = minimize(ls.obj_func_L2, abc_arr, args=(xy_target_arr, alphas, betas, gammas, n), method='BFGS', options={'gtol': 1e-6, 'disp': True})''' # Determining alpha, beta, gamma with contraints res = minimize(ls.obj_func_L2, abc_arr, args=(xy_target_arr, alphas, betas, gammas, n), method='SLSQP', constraints=cnts, options={ 'gtol': 1e-6, 'disp': True
async def shaderecursive(level, prev, probabilities): nonlocal fail wd = points[level + 1][0] hd = points[level + 1][1] sdensity = 2 try: density = max( 3, int(round(routers * 30 / ((routers - 1)**sdensity)))) except: return for k in range(density + 1): await asyncio.sleep(0) prob = k / density newprobabilities = probabilities[:] newprobabilities.append(prob) cx = wd - (1 - prob) * (wd - prev[0]) cy = hd - (1 - prob) * (hd - prev[1]) mid = [int(round(cx)), int(round(cy))] if level == routers - 1: if not social: initial_pis = [1 / routers for i in range(routers)] a = time.time() try: while True: if time.time() - a > 0.25: fail = True raise Exception() cursub = list(zip(pis, initial_pis)) + list( zip(betas, newprobabilities)) subber = np.vectorize(lambda x: x.subs(cursub)) A = subber(diffmatrix) b = -1 * np.array( [eq.subs(cursub) for eq in equationset], dtype='float') delta = np.linalg.solve( A.astype(np.float64), b) initial_pis += delta if (np.abs(delta) <= 0.001 * np.ones(routers)).all(): break expected_packets = simplify( total_payoff.subs( list(zip(pis, initial_pis)) + list(zip(betas, newprobabilities)))) except: expected_packets = 0 else: def f(mypis): nonlocal pis, total_payoff, betas, newprobabilities return -1 * total_payoff.subs( list(zip(pis, mypis)) + list(zip(betas, newprobabilities))) res = optimize.minimize( f, [1 / routers for i in range(routers)], constraints=[ LinearConstraint([np.ones(routers)], [1], [1]) ], bounds=Bounds(np.zeros(routers), np.inf * np.ones(routers))) expected_packets = simplify( total_payoff.subs( list(zip(pis, res.x)) + list(zip(betas, newprobabilities)))) col = colorsys.hls_to_rgb(expected_packets / players, 0.5, 1) col = [ int(round(255 * col[0])), int(round(255 * col[1])), int(round(255 * col[2])), 255 ] r, g, b, _ = base.getpixel((mid[0], mid[1])) if (r, g, b) != (255, 255, 255): col[0] = (col[0] + r) // 2 col[1] = (col[1] + g) // 2 col[2] = (col[2] + b) // 2 constant = 10 draw.ellipse([(mid[0] - constant, mid[1] - constant), (mid[0] + constant, mid[1] + constant)], fill=tuple(col), outline=tuple(col)) #base.putpixel(mid, tuple(col)) else: await shaderecursive(level + 1, mid, newprobabilities)
def minimize_and_plot(X, Y, kernel, C, thresh): n = len(Y) # arguments to pass to minimize function args = (Y, X, kernel) # define the constraints (page 20) as instances of {scipy.optimize.LinearConstraint} # constraints each alpha to be from 0 to C alpha_constr = LinearConstraint( np.eye(n), lb=0, ub=C) # todo write your code here (one-liner) # constraints sum of (alpha * y) alpha_y_constr = LinearConstraint( Y, lb=0, ub=0) # todo write your code here (one-liner) print("Starting computations...") # minimization. we are using ready QP solver 'trust-constr' result = minimize(fun=function_to_optimize, method='trust-constr', x0=np.empty(shape=(n, )), jac='2-point', hess=BFGS(exception_strategy='skip_update'), constraints=[alpha_constr, alpha_y_constr], args=args) # prints the results. If status==0, then the optimizer failed to find the optimal value print("status:", result.status) print("message:", result.message) alphas = result.x # indexes of support vectors sv_inds = find_support_vector_inds(alphas, thresh) print(sv_inds) print("alphas of support vectors:", '\n', alphas[sv_inds]) w, b = find_w_b(alphas, Y, X, sv_inds, kernel, thresh, C) # create a mesh to plot points and predictions x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xrange, yrange = np.meshgrid(np.arange(x_min, x_max, 0.02), np.arange(y_min, y_max, 0.02)) # form a grid by taking each point point from x and y range grid = np.c_[xrange.ravel(), yrange.ravel()] grid = grid.astype(float) # make predictions for each point of the grid grid_predictions = predict(alphas, Y, X, grid, b, sv_inds, kernel) grid_predictions = grid_predictions.reshape(xrange.shape) # plot color grid points according to the prediction made for each point plt.contourf(xrange, yrange, grid_predictions, cmap='copper', alpha=0.8) # plot initial data points plt.scatter(X[:, 0], X[:, 1], c=Y, s=50, cmap='autumn') # plot support vectors plt.scatter(X[sv_inds, 0], X[sv_inds, 1], s=3, c="black") if w is not None: # print lines on which support vectors should reside x_plot = np.linspace(x_min, x_max - 0.02, 1000) y_plot_1 = (-w[0] * x_plot - b + 1) / w[1] y_plot_2 = (-w[0] * x_plot - b - 1) / w[1] plt.plot(x_plot, y_plot_1) plt.plot(x_plot, y_plot_2) plt.title('SVM Results ' + kernel.__name__) plt.xlabel('x') plt.ylabel('y') plt.show()
Theta.append(np.var(theta)) Theta_hat.append(np.var(theta_hat)) theta.clear() z.clear() plt.plot(np.log(N), np.log(Theta), label='without_control variate') plt.plot(np.log(N), np.log(Theta_hat), label='with control variate') plt.legend() plt.show() # 4.a x0 = [0.1] * 10 A = np.ones(10) L = [0] * 10 U = [1] * 10 bounds = Bounds(L, U) linear_constraint = LinearConstraint(A, [1], [1]) w1 = scipy.optimize.minimize(mini_cov, x0, constraints=[linear_constraint], bounds=bounds) print(w1) # 4.b x0 = [0.1] * 10 A = np.ones(10) L = [0] * 10 U = [1] * 10 bounds = Bounds(L, U) linear_constraint = LinearConstraint(A, [1], [1]) w2 = scipy.optimize.minimize(optimal_port, x0,
def test_loss_intercept_only(loss, sample_weight): """Test that fit_intercept_only returns the argmin of the loss. Also test that the gradient is zero at the minimum. """ n_samples = 50 if not loss.is_multiclass: y_true = loss.link.inverse(np.linspace(-4, 4, num=n_samples)) else: y_true = np.arange(n_samples).astype(float) % loss.n_classes y_true[::5] = 0 # exceedance of class 0 if sample_weight == "range": sample_weight = np.linspace(0.1, 2, num=n_samples) a = loss.fit_intercept_only(y_true=y_true, sample_weight=sample_weight) # find minimum by optimization def fun(x): if not loss.is_multiclass: raw_prediction = np.full(shape=(n_samples), fill_value=x) else: raw_prediction = np.ascontiguousarray( np.broadcast_to(x, shape=(n_samples, loss.n_classes))) return loss( y_true=y_true, raw_prediction=raw_prediction, sample_weight=sample_weight, ) if not loss.is_multiclass: opt = minimize_scalar(fun, tol=1e-7, options={"maxiter": 100}) grad = loss.gradient( y_true=y_true, raw_prediction=np.full_like(y_true, a), sample_weight=sample_weight, ) assert a.shape == tuple() # scalar assert a.dtype == y_true.dtype assert_all_finite(a) a == approx(opt.x, rel=1e-7) grad.sum() == approx(0, abs=1e-12) else: # The constraint corresponds to sum(raw_prediction) = 0. Without it, we would # need to apply loss.symmetrize_raw_prediction to opt.x before comparing. opt = minimize( fun, np.zeros((loss.n_classes)), tol=1e-13, options={"maxiter": 100}, method="SLSQP", constraints=LinearConstraint(np.ones((1, loss.n_classes)), 0, 0), ) grad = loss.gradient( y_true=y_true, raw_prediction=np.tile(a, (n_samples, 1)), sample_weight=sample_weight, ) assert a.dtype == y_true.dtype assert_all_finite(a) assert_allclose(a, opt.x, rtol=5e-6, atol=1e-12) assert_allclose(grad.sum(axis=0), 0, atol=1e-12)
def optimize(file, actif_1, actif_2, period_1, period_2): data = pd.read_excel(file) df = data[['Date', actif_1, actif_2]] year1, month1, day1 = period_1.split('-') year1 = int(year1) month1 = int(month1) day1 = int(day1) year2, month2, day2 = period_2.split('-') year2 = int(year2) month2 = int(month2) day2 = int(day2) df = df[datetime.datetime(year1, month1, day1, 0, 0, 0) <= df.Date] df = df[df.Date <= datetime.datetime(year2, month2, day2, 0, 0, 0)] returns = df.mean().to_dict() ecart_type = df.std().to_dict() R_f = 0.001 rho = df.corr().to_dict()[actif_1][actif_2] beta = 1 # optimize from scipy.optimize import minimize from scipy.optimize import LinearConstraint #Create initial point. x0 = [0.3, 0.8] #Create function to be minimized def objective(x): alpha_1 = x[0] alpha_2 = x[1] max_ = alpha_1 * returns[actif_1] + alpha_2 * returns[actif_2] - R_f min_ = beta * (alpha_1**2) * (ecart_type[actif_1]**2) + ( alpha_2**2) * (ecart_type[actif_2]** 2) + 2 * alpha_1 * alpha_2 * ecart_type[ actif_1] * ecart_type[actif_2] * rho return max_ - min_ A = np.array([1, 1]).reshape(1, 2) lbnd = upbnd = 1 lin_cons = LinearConstraint(A, lbnd, upbnd) sol = minimize(objective, x0, constraints=lin_cons)['x'] # #return sol['x'], df pivoted = df.set_index('Date') pivoted.rename_axis(columns="ticker") cov_matrix = pivoted.apply(lambda x: np.log(1 + x)).cov() e_r = pivoted.resample('Y').last().pct_change().mean() sd = pivoted.apply(lambda x: np.log(1 + x)).std().apply( lambda x: x * np.sqrt(250)) assets = pd.concat([e_r, sd], axis=1) assets.columns = ['Returns', 'Volatility'] p_ret = [] p_vol = [] p_weights = [] num_portfolios = 1 for portfolio in range(num_portfolios): weights = sol p_weights.append(weights) returns = np.dot(weights, e_r) p_ret.append(returns) var = cov_matrix.mul(weights, axis=0).mul(weights, axis=1).sum().sum() sd = np.sqrt(var) ann_sd = sd * np.sqrt(250) p_vol.append(ann_sd) data = {'Returns': p_ret, 'Volatility': p_vol} portfolios = pd.DataFrame(data) portfolios.index = ['portfolio1'] op_space = pd.concat([portfolios, assets]) return op_space
def optimize_portfolio(sd=dt.datetime(2008,1,1), ed=dt.datetime(2009,1,1), \ syms=['GOOG','AAPL','GLD','XOM'], gen_plot=False): # Read in adjusted closing prices for given symbols, date range dates = pd.date_range(sd, ed) prices_all = get_data(syms, dates) # automatically adds SPY prices = prices_all[syms] # only portfolio symbols prices_SPY = prices_all['SPY'] prices_SPY_nrm = prices_SPY / prices_SPY[ 0] # only SPY, for comparison later #print(prices.ix[1:].head()) # find the allocations for the optimal portfolio # note that the values here ARE NOT meant to be correct for a test case alloc = [1.0 / prices.shape[1]] * prices.shape[1] allocs = np.array(alloc) # add code here to find the allocations #print("allocs\n",allocs) def optimize_alloc(allocs): prices_nrm = prices.copy() prices_nrm = prices.iloc[0:] / prices.iloc[0] #print("prices_nrm\n",prices_nrm.head()) ## find values after allocation allocated = prices_nrm * allocs #print("allocated\n",allocated.head()) ## daily portfolio values port_val = allocated.sum(axis=1) #print("port_val\n",port_val.head()) ## find daily returns daily_rets = port_val.copy() daily_rets[1:] = port_val[1:] / (port_val[:-1].values) - 1 daily_rets[0] = 0 #print("daily_rets\n",daily_rets.head()) ## sharpe ratio #print("daily_mean:",daily_rets.mean()) #print("daily_std:",daily_rets.std()) sr = np.sqrt(252.0) * daily_rets.mean() / daily_rets.std() #print("Sharpe Ratio\n",sr) ## optimize sharpe ratio to get optimal allocs return -sr bounds = Bounds([0] * prices.shape[1], [1.0] * prices.shape[1]) linear_constraint = LinearConstraint([1] * prices.shape[1], [1], [1]) res = spo.minimize(optimize_alloc, allocs, method='trust-constr', bounds=bounds, constraints=linear_constraint, options={'disp': False}) #print("res",res['x']) #print("sumOfRes",res['x'].sum()) allocs = res['x'] #[0.2,0.2,0.2,0.2,0.2]# prices_nrm = prices.copy() prices_nrm = prices.iloc[0:] / prices.iloc[0] #print("prices_nrm\n",prices_nrm.head()) ## find values after allocation allocated = prices_nrm * allocs #print("allocated\n",allocated.head()) ## daily portfolio values port_val = allocated.sum(axis=1) #print("port_val\n",port_val.head()) ## find daily returns daily_rets = port_val.copy() daily_rets[1:] = port_val[1:] / (port_val[:-1].values) - 1 daily_rets[0] = 0 #print("daily_rets\n",daily_rets.head()) ## sharpe ratio #print("daily_mean:",daily_rets.mean()) #print("daily_std:",daily_rets.std()) sr = np.sqrt(252.0) * daily_rets.mean() / daily_rets.std() #print("Sharpe Ratio\n",sr) ## Cummulative Returns cr = port_val[-1] / port_val[0] - 1 ## Average Daily return adr = daily_rets.mean() ## Volatility (stdev of daily returns) sddr = daily_rets.std() #cr, adr, sddr, sr = [0.25, 0.001, 0.0005, 2.1] # add code here to compute stats # Get daily portfolio value #port_val = prices_SPY # add code here to compute daily portfolio values # Compare daily portfolio value with SPY using a normalized plot if gen_plot: # add code to plot here df_temp = pd.concat([port_val, prices_SPY_nrm], keys=['Portfolio', 'SPY'], axis=1) ax = df_temp.plot(figsize=(12, 7.5)) ax.xaxis.set_major_locator(mdates.MonthLocator()) ax.xaxis.set_major_formatter(mdates.DateFormatter('%b %Y')) plt.grid(b=True) plt.xlabel("Date") plt.ylabel("Price") plt.title("Daily Portfolio Value and SPY") plt.legend() plt.savefig('Figure1.png') #plt.show() return allocs, cr, adr, sddr, sr
def solve_constraints(targets, surface, l=None): global target global x_t global phi_t # Reset surface, and flux history. x_t = [] phi_t = [] # "Starting value guess" x0 = surface.flatten(order='F') target = targets # Create A matrix of length constraints A_s = np.array([ [1, -1, 0, 0], [0, 1, -1, 0], [0, 0, 1, -1], [-1, 0, 0, 1], ]) A = np.zeros((12, 12)) A[0:4,0:4] = A_s A[4:8,4:8] = A_s A[8:, 8:] = A_s AT = np.transpose(A) A_c = np.zeros((12)) lc_1 = LinearConstraint(A, A_c, A_c) # inequality constraints on velocity # time step dt = 0.05 # max velocity v = 3 B = np.identity(12) # lower bound for the velocity B_l = np.ones((12)) * -1 * v * dt # upper bound for the velocity B_u = np.ones((12)) * 1 * v * dt lc_2 = LinearConstraint(B, B_l, B_u) n_vertices = surface.shape[0] # number of degrees of freedom n = n_vertices * 3 phi = 0 phi_max = 4.5 d_phi_p = 0 alpha_max = 1000 for i in tqdm.tqdm(range(0, iterations)): #alpha = alpha_max / np.linalg.norm(p - ((r7 + r2) * 1 / 2)) # flux through the left face of the bounding box phi = flux(x0) phi_ref = np.exp(0.1 * phi) J = jacobian_flux(x0) J = np.array(J) # least square approach a = np.identity(n) + (alpha * np.outer(J, J)) + (beta * np.matmul(AT, A)) b = alpha * J * phi_ref delta_c = np.linalg.solve(a, b) # save a copy of the positions x_t.append(np.copy(x0)) # update verticles x0 = x0 + delta_c # dont allow the centre of the front face to come within 10cm of the point x0_f = x0.reshape((4, 3), order='F') d = (x0_f[0] + x0_f[2]) * 0.5 if np.linalg.norm(target - ((x0_f[2] + x0_f[0]) * 1 / 2)) < xtol: break x_j = np.reshape(x_t, (len(x_t), 4, 3), order='F') return x_j, phi_t
# A_omega_o A_omega_o = ((1 - phi_omega_o) * 2**0.5) / (phi_omega_o**1.5) # mass_ro K_0 = A_io / A_omega_o C_o = r_ko - r_bxo / r_kso f = np.pi * r_kso**2 * (2 * rho_o * delta_p)**0.5 g = (phi_omega_o**-2 + (A_io**2 * K_0**2) / (1 - phi_omega_o) + Xi_yo * n_o * (A_io / C_o)**2)**0.5 mass_ro = f / g print("mass_ro = {}, u = {}".format(mass_ro, u)) delta_mass = mass_ro - mass_ro_target return delta_mass**2 u0 = np.array([r_kso0, r_bxo0, r_ko0]) con = LinearConstraint([-1, 0, 1], [0], [np.inf]) result = minimize(func, u0, bounds=bnds, method="SLSQP", tol=1e-8, constraints=con) result_new = [i for i in result.x] print('r_kso={:.1E},r_bxo={:.1E},r_ko={:.1E},l_bxo={:.1E}'.format( result_new[0], result_new[1], result_new[2], 6 * result_new[1]))
def row_L0(Y, Phi, noiseTol, maxK=None, l1oracle=0): """ NOT used in [1] - still may need work Optimal solver real-valued X's (non-integer constrained) Solve min_X ( ||X||_{row-0} ) s.t. ||Phi X - Y||_{frob} <= noiseTol X is real-valued matrix Try up to max K If multiple solutions at same sparsity within noise tolerance, pick that with least measurement error If none in noise tolerance through maxK, still return that with least meas. error Parameters ---------- maxK : int, optional Maximum sparsity to consider. The default is None. l1oracle : float [0, inf), optional The known sum of the signal matrix X (note X known to be >= 0). The default is 0. If set to 0, no l1 is used Returns ------- xHat : array_like recovered signal matrix. Shape ``(N, D)`` """ M, N = Phi.shape _, D = Y.shape if maxK is None: maxK = M - 1 if l1oracle < 0: raise ValueError("l1oracle cannot be negative") elif l1oracle == 0 and maxK > (M): raise ValueError( "maxK must be <= M-1 if l1oracle=0 (or none is provided)") # elif l1oracle > 0: # l1constr = LinearConstraint([ [1]*N ], l1oracle, l1oracle) allErrs = [] minErr = np.inf for k in range(maxK): # Solve for all k-column submatrices of Phi subIndices = list(combinations(np.arange(N), k + 1)) #subIndices = [[3, 16]] for i in range(len(subIndices)): subPhi = Phi[:, subIndices[i]] phiStack = block_diag(*[subPhi] * D) yStack = np.reshape(Y.transpose(), (D * M)) if l1oracle == 0: #xTemp = lsq_linear(phiStack, yStack, bounds=(0, np.inf)) #xTemp = lsq_linear(phiStack, yStack) #xTemp = xTemp.x #print(subPhi) #print(subPhi.shape) xTemp = np.linalg.inv( subPhi.transpose() @ subPhi) @ subPhi.transpose() @ Y normErr = np.linalg.norm(subPhi @ xTemp - Y) allErrs.append(normErr) xHat = np.zeros((N, D)) xHat[subIndices[i], :] = np.reshape(xTemp, (len(subIndices[i]), D)) #x0 = np.ones((subPhi.shape[1], D)) / l1oracle #xTemp = minimize(cs_lsq, x0, args=(subPhi, Y), bounds=(np.zeros(np.shape(x0)), np.ones(np.shape(x0))*np.inf)) #normErr = np.linalg.norm(phiStack @ xTemp - yStack) else: #raise ValueError("l1oracle not coded yet") #x0 = np.ones((subPhi.shape[1], D)) / l1oracle #xTemp = minimize(cs_lsq, x0, args=(subPhi, Y), bounds=(0, np.inf), constraints=(l1constr)) dim = D * subPhi.shape[1] #Slow l1constr = LinearConstraint([[1] * dim], l1oracle, l1oracle) x0 = np.ones(dim) / l1oracle B = Bounds(0, np.inf) xTemp = minimize(cs_lsq, x0, args=(phiStack, yStack), bounds=B, constraints=(l1constr)) xTemp = xTemp.x #normErr = np.linalg.norm(subPhi@Y - xTemp) normErr = np.linalg.norm(phiStack @ xTemp - yStack) xHat = np.zeros((N, D)) xHat[subIndices[i], :] = np.transpose( np.reshape(xTemp, (D, len(subIndices[i])))) """ #cvx opt PHI = matrix(phiStack) y = matrix(yStack) #G = matrix(np.ones(dim)) A = matrix([1.0]*dim, (1, dim)) b = matrix(l1oracle) x = solvers.coneqp(PHI.T*PHI, -PHI.T*y, A=A, b=b) #x = solvers.coneqp(A.T*A, -A.T*b, G, h, dims)['x'] xTemp = np.array(x['x']) normErr = np.linalg.norm(phiStack@xTemp - yStack) """ #pdb.set_trace() allErrs.append(normErr) if normErr < minErr: minErr = normErr xOut = xHat #xHat = np.zeros((N, D)) #xHat[subIndices[i], :] = #xHat[subIndices[i],:] = #print('Completed: k'+str(k+1) + '; subPhi ' + str(i+1) + '/' + str(len(subIndices))) if minErr < noiseTol: break return xOut, allErrs
thet = x[0] t = x[2] k = get_dir(thet, phi) n1, n2 = get_n(k, nd) return np.linalg.norm(n2 * k - ninit * kinit - t * sigma) def fresnel(x): return check(get_dir(x[0], phi), x[1], nd) def thetaconst(x): return x[0] tc = LinearConstraint(thetaconst, -np.pi / 2, np.pi / 2) nlc = NonlinearConstraint(fresnel, 0, 0) n1, n2 = get_n(kinit, nd) sol1 = minimize(f1, (theta, n1, 0), constraints=nlc) sol2 = minimize(f2, (theta, n2, 0), constraints=nlc) ksol1 = sol1.x[1] * get_dir(sol1.x[0], phi) ksol1plane = sol1.x[1] * np.array([np.sin(sol1.x[0]), np.cos(sol1.x[0])]) ksol2 = sol2.x[1] * get_dir(sol2.x[0], phi) ksol2plane = sol2.x[1] * np.array([np.sin(sol2.x[0]), np.cos(sol2.x[0])]) ''' print(np.arcsin(np.sin(theta)*ninit/nd[0])) print(np.arcsin(np.sin(theta)*ninit/n1))
def optimize_on_day_with_starting_values(date_number, method, theta0): """ a copy of theta0 should be supplied for L-BFGS-B and Powell, i.e. np.copy() """ if method == 'L-BFGS-B': theta0[2] = theta0[2] + theta0[1] loss_func, grad = rep_loss_functions[date_number] bounds = Bounds([0.01, 0, 0, -np.inf], [30, np.inf, np.inf, np.inf]) start = dt.datetime.now() res = minimize(loss_func, theta0, method=method, options={'disp': False}, bounds=bounds) execution_time = (dt.datetime.now() - start).total_seconds() theta = res.x theta[2] = theta[2] - theta[1] return theta, execution_time elif method == 'Powell': theta0[2] = theta0[2] + theta0[1] loss_func, _ = rep_loss_functions[date_number] bounds = ((0.01, 30), (0, 1), (0, 1), (-1, 1)) start = dt.datetime.now() res = minimize(loss_func, theta0, method=method, options={'disp': False}, bounds=bounds) print(res) execution_time = (dt.datetime.now() - start).total_seconds() theta = res.x theta[2] = theta[2] - theta[1] return theta, execution_time elif method == 'trust-constr': loss_func = loss_functions[date_number] bounds = Bounds([0.01, 0, -1, -1], [30, 1, 1, 1]) linear_constraint = LinearConstraint([[0, 1, 1, 0]], [0], [np.inf]) res = minimize(loss_func, theta0, method=method, constraints=[linear_constraint], options={'disp': False}, bounds=bounds) return res.x, res.execution_time elif method == 'Nelder-Mead': loss_func = constrained_loss_functions[date_number] start = dt.datetime.now() res = minimize(loss_func, theta0, method=method, options={'disp': False}) execution_time = (dt.datetime.now() - start).total_seconds() return res.x, execution_time
def constr(self): A = [[1, 2]] b = 1 return LinearConstraint(A, -np.inf, b)