def update_sigma(t, security): global SECURITIES global NEW_SECURITY_SIGMA global case_length global Ss S = Ss[-1] dt = (case_length - t) / case_length / 12 K = int(security[1:-1]) C = SECURITIES[security]['price'] if security[-1:] == "C": # is call option SECURITIES[security]['sigma'].append( solve(Black_Scholes_Call(C, S, K, dt), 0.4)[0]) # print(SECURITIES[security]['sigma'][-1]) else: SECURITIES[security]['sigma'].append( solve(Black_Scholes_Put(C, S, K, dt), 0.4)[0])
def find_coefs(self, fun, freqs, modes): popt, pconv = solve(self.F, modes, freqs, bounds=([40, -10, -500], [300, 100, 500]), method='trf') perr = np.sqrt(np.diag(pconv)) x, y, z = popt return x, y, z
def slow_manifold(alpha_0, dx=None, f_def_obs=None, eps_=None, time=None, rho=1.e-3, measure=None, verbose=False, offset=0.2, method='BFGS', isrecuit=True, niter=50, maxiter=[2000, 250], n_recuit=50): '''Identify geometric constra ins based on observability analysis''' if dx is None: print('run f_explore first') return -1 if time is None: print('time needed') return -1 if eps_ is None: print('need precision') return -1 if f_def_obs is None: print(' obserabl def as y=sum alpha_i x_i') f_def_obs = lambda x, alpha: np.dot(alpha, x) argums = (f_def_obs, rho, dx, eps_, time, False, False, False, measure, offset, verbose) if type(maxiter) is not list: if type(maxiter) is int or type(maxiter) is float: maxiter = [maxiter] else: print('maxiter ill defined') return -1 if isrecuit is True: #Annealing opt = {'maxiter': maxiter[1]} min_kwargs = {'args': argums, 'method': method, 'options': opt} np.random.seed(111111) alpha_0 = recuit(cost_gramian, alpha_0, niter=n_recuit, minimizer_kwargs=min_kwargs, T=10.) np.random.seed() alpha_0 = alpha_0.x opt = {'maxiter': maxiter[0]} #if no annealing, we finish with a regular minimization a = solve(cost_gramian, alpha_0, method=method, tol=1.e-6, args=argums, options=opt) return a
def bootstrap1(face_values, coupons, lifetimes, times, freqs, bond_prix, t=0, freq2=0, y0=None): r""" Description: -------------- Complete bootstrapping. This function finds the zero rates when the number of unknowns is exactly the number of market prices we have at disposal. Parameters: ---------- `face_values`: list Face values for the bonds whose prices are given `coupons`: list Annual coupon rates for the bonds whose prices are given `lifetimes`: list The lifetimes of the existing bonds/Money Market Instruments existing `times`: list Represents the times for which we find the corresponding zero rates `freqs`: list frequences of payments `bond_prix`: list Market bond prices from where the bootstrapping is started For the case when the number of available data is bigger than the parameters to be found, please refer to bootstrap2 function. """ from bond_prices import bond_price f = lambda r:[bond_price(face_values[i],coupons[i],lifetimes[i],\ (times,r.tolist()),freqs[i],t,freq2) - bond_prix[i] \ for i in range(len(face_values))] if y0 == None: y0 = np.zeros((1, len(face_values))) return optim.fsolve(f, y0) else: return optim.solve(f, np.array(y0))
def find_coefs(self, f, freqs, modes, limits): """freqs são as frequências medidas limits são os limites dados pelo pso""" popt, pconv = solve(f, modes, freqs, bounds=(limits - self.delta, limits + self.delta), method='trf') perr = np.sqrt(np.diag(pconv)) x, y, z = popt return x, y, z
def missing_at_random(X, Y, D): # print('missing_at_random') def score_of_phi(phi): res = [0, 0] for i in range(n): t = D[i] - get_pi(phi, X[i]) res[0] += t res[1] += t * X[i] return res phi_init = np.array([random_init(), random_init()]) phi_mle = solve(score_of_phi, phi_init) return get_mle_of_theta(phi_mle, Y, D)
def gaussian_mixture(X, Y, D): # print('gaussian_mixture') def score_of_phi_ck(phi): res = [0, 0] for i in range(n): t = D[i] / get_pi(phi, Y[i]) - 1 res[0] += t res[1] += t * X[i] return res phi_init = np.array([random_init(), random_init()]) phi_mle = solve(score_of_phi_ck, phi_init) return get_mle_of_theta(phi_mle, Y, D)
def fully_parametric(X, Y, D): # print('fully_parametric') # Here we use MCEM. def score_of_beta_for_mcem(beta): res = [0, 0] for j in range(m): for i in range(n): t = Y_new[j][i] - beta[0] - beta[1] * X[i] res[0] += t res[1] += t * X[i] return res def get_sigma_sq(beta): res = 0 for j in range(m): for i in range(n): t = Y_new[j][i] - beta[0] - beta[1] * X[i] res += t * t return res / (n * m) def is_end(cur, prev): diff = 0 for i in range(3): t = cur[i] - prev[i] diff += t * t return diff < eps * 100 eta = [random_init(), random_init(), abs(random_init())] eta_prev = [eta[0] - 1, eta[1] - 1, eta[2] + 1] while not is_end(eta, eta_prev): eta_prev = deepcopy(eta) Y_new = [[ Y[i] if D[i] == 1 else sample_for_mcem(X[i], eta_prev) for i in range(n) ] for _ in range(m)] beta_prev = np.array([eta_prev[0], eta_prev[1]]) beta = solve(score_of_beta_for_mcem, beta_prev) sigma_sq = get_sigma_sq(beta) eta = [beta[0], beta[1], sigma_sq] beta = eta[:2] return np.average( [Y[i] if D[i] == 1 else (beta[0] + beta[1] * X[i]) for i in range(n)])
def generateOutputs(scaleFile, zInitial, zFinal, numSnapShots): from scipy.optimize import brentq as solve aInitial = gt.a(zInitial) aFinal = gt.a(zFinal) fInitial = outputsFunc(aInitial) fFinal = outputsFunc(aFinal) fw = open(scaleFile, "w") for i in range(numSnapShots): if i == 0: acurr = aInitial elif i == numSnapShots - 1: acurr = aFinal else: f = fInitial + (fFinal - fInitial) * i / float(numSnapShots - 1) def func(a): return outputsFunc(a) - f acurr = solve(func, aInitial, aFinal) fw.write(str(acurr) + "\n") fw.close()
def sample(n, test_integrality=False): ''' assigns all n^2 edge-weights of a K_{n,n} independently as U(0, 1). returns min-weight matching ''' # weights iid U(0, 1) w = rand(size=n**2) # constraint matrix # Note: linprog solver uses x>= 0 as a default # so these constraints are implicit. A = np.zeros((2 * n, n**2)) for i in range(n): for j in range(n): A[i][i * n + j] = 1 A[n + j][i * n + j] = 1 # right-hand side b = np.ones((2 * n, 1)) # solve for x and objective value full_results = solve(w, A_eq=A, b_eq=b) x = full_results.x obj = full_results.fun if test_integrality: integrality_error = 0 for i in range(n): for j in range(n): ndx = i * n + j integrality_error += abs(x[ndx] - round(x[ndx])) return integrality_error return obj
def reconverge(self): return solve(self.eqGperp, self.merge_uh(self.u_n, self.h_n)).x
def fun_owen(ep): sigma = np.imag(ep)/np.abs(ep-1)**2 fun = lambda h:h-2/np.pi*sigma/(1-np.sinc(2*h)**2) init = 1/np.imag(np.sqrt(ep))/2/np.pi return solve(fun,init)[0]
def slow_manifold(alpha_0, dx=None, f_def_obs=None, eps_=None, time=None, rho=1.e-3, measure=None, verbose=False, offset=0.2, offtype='slow', method='Powell', isrecuit=True, niter=10, maxiter=[10, 1], n_recuit=100, r=None): ''' Identify geometric constrains based on observability analysis alpha_0 array. inital conditions for the minimization. Size is the number of constrains. dx array. Precomputed arrays for the gramian. Output from f_explore. f_def_obs function that define the observable. obs = f_def_obs(x) = sum a_i x_i + sum b_i x_i**2 + sum c_i x_i**3 eps_ real. parameter for the gramian time array. times corresponding to the snapshots rho real. parameter for the minimization measure str. metric for the gramian, such as 'trace' verbose bool. offset real. offset for focus on given time scale. Has to be between 0 and 1, e.g. 0.2 offtype str. time scale of interst, e.g. 'slow' method str. minimzation method, e.g. 'Powell' or 'BFGS' isrecuit bool. annealing niter int. number of anneling maxiter arrat. number of iteration and function evaluation. n_recuit int. number of annealing ''' #verbose = True if dx is None: print('run f_explore first') return -1 if time is None: print('time needed') return -1 if eps_ is None: print('need precision') return -1 if f_def_obs is None: print(' obserabl def as y=sum alpha_i x_i') f_def_obs = lambda x, alpha: np.dot(alpha, x) argums = (f_def_obs, rho, dx, eps_, time, False, False, False, measure, offset, offtype, False, r) if type(maxiter) is not list: if type(maxiter) is int or type(maxiter) is float: maxiter = [maxiter] else: print('maxiter ill defined') return -1 if isrecuit is True: #Annealing opt = {'maxiter': maxiter[1], 'disp': True, 'maxfev': 10} opt = {'maxiter': 0, 'disp': True, 'maxfev': len(alpha_0)} print(opt) min_kwargs = {'args': argums, 'method': 'Powell', 'options': opt} np.random.seed(111111) alpha_0 = recuit(cost_gramian, alpha_0, niter=n_recuit, minimizer_kwargs=min_kwargs, T=20., disp=True) np.random.seed() print(alpha_0) alpha_0 = alpha_0.x opt = {'maxiter': maxiter[0], 'disp': True} #opt = {'maxiter':5,'disp':True} #if no annealing, we finish with a regular minimization try: a = solve(cost_gramian, alpha_0, method=method, tol=1.e-5, args=argums, options=opt) except: a = solve(cost_gramian, alpha_0, method='BFGS', tol=1.e-5, args=argums, options=opt) return a
def new_method(X, Y, D): # print('new_method') beta = [0, 0] def score(d, y, phi): t = d - get_pi(phi, y) return t, t * y def odds(y, phi): pi = get_pi(phi, y) return (1 - pi) / pi def score_of_phi_for_em(phi): res = [0, 0] for i in range(n): if D[i] == 1: s0, s1 = score(D[i], Y[i], phi) res[0] += s0 res[1] += s1 else: w_sum = 0 ans = [0, 0] for j in range(n): if D[j] == 1: w = W[i][j] w_sum += w s0, s1 = score(D[i], Y[j], phi) ans[0] += w * s0 ans[1] += w * s1 res[0] += ans[0] / w_sum res[1] += ans[1] / w_sum return res def score_of_beta(beta): res = [0, 0] for i in range(n): if D[i] == 1: t = Y[i] - beta[0] - beta[1] * X[i] res[0] += t res[1] += t * X[i] return res def get_sigma_sq(beta): r = 0 res = 0 for i in range(n): if D[i] == 1: t = Y[i] - beta[0] - beta[1] * X[i] res += t * t r += 1 return res / r def get_diff(cur, prev): diff = 0 for i in range(2): t = cur[i] - prev[i] diff += t * t # print(diff) return diff # return diff < 2e-2 my_iter = 0 while True: beta_init = np.array([random_init(), random_init()]) if my_iter < 10: try: beta = solve(score_of_beta, beta_init) break except RuntimeWarning: print('No Convergence Error! in new method') my_iter += 1 else: beta = solve(score_of_beta, beta_init) break # print('beta: ' + str(beta)) sigma_sq = get_sigma_sq(beta) # print('sigma_sq:' + str(sigma_sq)) def f1(i, j): t = Y[j] - beta[0] - beta[1] * X[i] return math.exp(-t * t / (2 * sigma_sq)) / math.sqrt(2 * math.pi * sigma_sq) def coeff(j): ans = 0 for l in range(n): if D[l] == 1: ans += f1(l, j) return ans C = [coeff(i) if D[i] == 1 else 1 for i in range(n)] W_base = [[0 for _ in range(n)] for __ in range(n)] for i in range(n): for j in range(n): if D[j] == 1: W_base[i][j] = f1(i, j) / C[j] # phi = np.array([random_init(), random_init()]) phi = np.array(deepcopy(phi_true)) min_diff = 100 phi_best = deepcopy(phi) my_iter = 0 while True: phi_prev = deepcopy(phi) W = [[odds(Y[j], phi_prev) * W_base[i][j] for j in range(n)] for i in range(n)] phi = solve(score_of_phi_for_em, np.array(phi_prev)) diff = get_diff(phi, phi_prev) if min_diff > diff: phi_best = deepcopy(phi) min_diff = diff my_iter = 0 else: my_iter += 1 if diff < eps or my_iter > 10: break # print('phi_best: ' + str(phi_best)) return get_mle_of_theta(phi_best, Y, D)