def minimize(self, error_func, x): #print("start seed", np.count_nonzero(self.dev_steps)) if self.dev_steps == None or len(self.dev_steps) != len(x): print("initial simplex is None") isim = None elif np.count_nonzero(self.dev_steps) != len(x): print("There is zero step. Initial simplex is None") isim = None else: #step = np.ones(len(x))*0.05 isim = np.zeros((len(x) + 1, len(x))) isim[0, :] = x for i in range(len(x)): vertex = np.zeros(len(x)) vertex[i] = self.dev_steps[i] isim[i + 1, :] = x + vertex print("ISIM = ", isim) #res = optimize.minimize(error_func, x, method='Nelder-Mead', tol=self.xtol, # options = {'disp': False, 'initial_simplex': [0.05, 0.05], 'maxiter': self.max_iter}) if scipy.__version__ < "0.18": res = optimize.fmin(error_func, x, maxiter=self.max_iter, maxfun=self.max_iter, xtol=self.xtol) else: res = optimize.fmin(error_func, x, maxiter=self.max_iter, maxfun=self.max_iter, xtol=self.xtol, initial_simplex=isim) #print("finish seed") return res
def test_consistency(self): if not SP: raise nose.SkipTest("SciPy not installed.") # 1D case a = 1 rho = .8 sigma = .1 r = rarlognormal(a, sigma, rho, size=1000) opt = fmin(self.like, (.8, .4, .9), args=([r],), disp=0) assert_array_almost_equal(opt, [rho, sigma, a], 1) # 2D case a = (1, 2) sigma = .1 rho = .7 r = rarlognormal(a, sigma, rho, size=1000) opt = fmin( self.like, (.75, .15, 1.1, 2.1), xtol=.05, args=(r, ), disp=0) assert_array_almost_equal(opt, (rho, sigma) + a, 1)
def periodic_model(lomb_model): """ Compute features related to the extreme points of the fitted Lomb Scargle model. """ out_dict = {} A = lomb_model['freq_fits'][0]['amplitude'] ph = lomb_model['freq_fits'][0]['rel_phase'] def model_f(t): return (A[0] * np.sin(2. * np.pi * t + ph[0]) + A[1] * np.sin(2. * np.pi * 2. * t + ph[1]) + A[2] * np.sin(2. * np.pi * 3. * t + ph[2]) + A[3] * np.sin(2. * np.pi * 4. * t + ph[3]) + A[4] * np.sin(2. * np.pi * 5. * t + ph[4]) + A[5] * np.sin(2. * np.pi * 6. * t + ph[5]) + A[6] * np.sin(2. * np.pi * 7. * t + ph[6]) + A[7] * np.sin(2. * np.pi * 8. * t + ph[7])) def model_neg(t): return -1. * model_f(t) # Start finding 1st minima, at 5% of phase (fudge/magic number) > 0.018 min_1_a = optimize.fmin(model_neg, 0.05, disp=False)[0] max_2_a = optimize.fmin(model_f, min_1_a + 0.01, disp=False)[0] min_3_a = optimize.fmin(model_neg, max_2_a + 0.01, disp=False)[0] max_4_a = optimize.fmin(model_f, min_3_a + 0.01, disp=False)[0] # TODO !!! is this wrong? seems like it should be a minus out_dict['phi1_phi2'] = (min_3_a - max_2_a) / (max_4_a / min_3_a) out_dict['min_delta_mags'] = abs(model_f(min_1_a) - model_f(min_3_a)) out_dict['max_delta_mags'] = abs(model_f(max_2_a) - model_f(max_4_a)) return out_dict
def get_rmin(a, e, w, i, omega, direction): if direction == 'horizontal': func_min = so.fmin(partial(get_rhor, a=a, e=e, w=w, i=i, omega=omega), pi/2, disp=False) elif direction == 'vertical': func_min = so.fmin(partial(get_rver, a=a, e=e, w=w, i=i, omega=omega), pi/2, disp=False) # func_min = so.fmin(func, pi/2) return func_min
def fit_coupled_lsq(v1,v2): ns=float(len(v1)) co1=mean(v1) co2=mean(v2) ff1,a11,p11,res=maxharm_lsq(v1-co1) ff2,a22,p22,res=maxharm_lsq(v2-co2) t=arange(ns) a12,p12=_get_harm_coef(t,v1,ff2) a21,p21=_get_harm_coef(t,v2,ff1) x=a12,p12 def ftomin(x): a12,p12=x vv1=mk_harmreal_vec(t,[0,ff1,ff2],[co1,a11,a12],[0,p11,p12]) res=sum( (vv1-v1)**2) return res x=fmin(ftomin,x,xtol=1e-10, ftol=1e-10,disp=False) res=ftomin(x) a21,p21=x x=a21,p21 def ftomin(x): a21,p21=x vv2=mk_harmreal_vec(t,[0,ff1,ff2],[co1,a21,a22],[0,p21,p22]) res=sum( (vv2-v2)**2) return res x=fmin(ftomin,x,xtol=1e-10, ftol=1e-10,disp=False) a21,p21=x res+=ftomin(x) return co1,co2,ff1,ff2,a11,a12,a21,a22,p11,p12,p21,p22,res
def run_round(timestep): # Recreate the neighborhood for each node for i in range(m): neighborhoods[i] = make_neighborhood(i, blissi, ai[timestep-1], neighbors, n) # Utility maximization for central authority def UC(x): mean = sum(ai[timestep-1]) / m return (-wC[0] * (x - blissC) ** 2 - wC[1] * (x - mean) ** 2) aC[timestep] = optimize.fmin(lambda x: -UC(x), 0, disp=0)[0] # Utility maximization for non-central authority def UN(x): mean = sum(ai[timestep-1]) / m return (-wN[0] * (x - blissN) ** 2 - wN[1] * (x - mean) ** 2 - gamma * (x-aC[timestep]) ** 2) aN[timestep] = optimize.fmin(lambda x: -UN(x), 0, disp=0)[0] # Utility maximization for each agent def UI(x, node): actions = [] for neighbor in neighborhoods[node]: actions.append(ai[timestep-1][neighbor]) mean_neighbors = sum(actions) / len(neighborhoods[node]) return (-w[0] * (x - blissi[node]) ** 2 - w[1] * (x - mean_neighbors) ** 2 - w[2] * (x-aN[timestep]) ** 2 - w[3] * (x - aC[timestep]) ** 2) for i in range(m): if i == 0: ai[timestep] = [] ai[timestep].append(optimize.fmin(lambda x: -UI(x, i),0, disp=0)[0]) return
def fit_stretched_exponential(_x, _y=None, x_min=None, x_max=None, x_min_guess=None, y0=None, tau=None, beta=None, show=False, x_scale='linear', y_scale='linear', ax='None', title='None'): """Fits a stretched exponential y0 -exp([x / tau]^beta) for each x > x_min. """ x_, y_ = prepare_xy(_x, _y) x_min = x_min_2_x_min(x_min, x_, y_, _x, _y) x, y = chop_xy(x_, y_, x_min, x_max) # x -= x_min if x_min != x_min_guess or y0 is None or tau is None or beta is None: x_no_zeros, y_no_zeros = remove_zeros(x, y) logy = log(y_no_zeros) param_lambda, logy0, _, _, _ = linregress(x_no_zeros, logy) tau, beta, y0 = 1. / param_lambda, 0.7, exp(logy0) tau, beta, y0 = fmin(cost_function, (tau, beta, y0), args=(x, y), disp=0) tau, beta, y0 = fmin(cost_function_integral, (tau, beta, y0), args=(x, y), disp=0) if show: show_fit('stretched exponential', x_=x_, y_=y_, x=x, y=y, y0=y0, tau=tau, beta=beta, x_scale=x_scale, y_scale=y_scale, ax=ax, title=title) return x_min, tau, beta, y0
def estimateGaussianParams(inputdata): sigma,mean = 1,0 mean_estimated = optimize.fmin(lambda m: gauss_likelyhood(m,sigma,inputdata),mean) sigma_estimated = optimize.fmin(lambda s: gauss_likelyhood(mean_estimated,s,inputdata),sigma) mean_estimated = round(mean_estimated,2) sigma_estimated = round(sigma_estimated,2) return mean_estimated,sigma_estimated
def fit_circle(rad_guess,x_guess,y_guess,pts,method,verbose=True): def error_function(params): center = np.matrix((params[0],params[1])).T rad = params[2] #print 'pts.shape', pts.shape #print 'center.shape', center.shape #print 'ut.norm(pts-center).shape',ut.norm(pts-center).shape err = ut.norm(pts-center).A1 - rad res = np.dot(err,err) return res params_1 = [x_guess,y_guess,rad_guess] if method == 'fmin': r = so.fmin(error_function,params_1,xtol=0.0002,ftol=0.000001,full_output=1,disp=verbose) opt_params_1,fopt_1 = r[0],r[1] elif method == 'fmin_bfgs': r = so.fmin_bfgs(error_function,params_1,full_output=1,disp=verbose) opt_params_1,fopt_1 = r[0],r[1] else: raise RuntimeError('unknown method: '+method) params_2 = [x_guess,y_guess+2*rad_guess,rad_guess] if method == 'fmin': r = so.fmin(error_function,params_2,xtol=0.0002,ftol=0.000001,full_output=1,disp=verbose) opt_params_2,fopt_2 = r[0],r[1] elif method == 'fmin_bfgs': r = so.fmin_bfgs(error_function,params_2,full_output=1,disp=verbose) opt_params_2,fopt_2 = r[0],r[1] else: raise RuntimeError('unknown method: '+method) if fopt_2<fopt_1: return opt_params_2[2],opt_params_2[0],opt_params_2[1] else: return opt_params_1[2],opt_params_1[0],opt_params_1[1]
def fit_posterior ( fx, x ): post = [] I = 10000 N = 10000 mu = x[0][np.argmax(fx[0])] fx[0] /= fx[0].max() mprm = fmin ( error_gauss, [1.,mu,1.5], args=(fx[0],x[0]), maxfun=N, maxiter=I ) print mprm post.append ( "Gauss(%g,%g)" % ( mprm[1],mprm[2]**2 ) ) fx[1] /= fx[1].max() wprm = fmin ( error_gamma, [1.,2,4], args=(fx[1],x[1]), maxfun=N, maxiter=I ) post.append ( "Gamma(%g,%g)" % ( wprm[1]**2,wprm[2]**2 ) ) fx[2] /= fx[2].max() lprm = fmin ( error_beta, [1.,2,20], args=(fx[2],x[2]), maxfun=N, maxiter=I ) post.append ( "Beta(%g,%g)" % ( lprm[1]**2,lprm[2]**2 ) ) if len(fx)>3: fx[3] /= fx[3].max() gprm = fmin ( error_beta, [1.,2,20], args=(fx[3],x[3]), maxfun=N, maxiter=I ) post.append ( "Beta(%g,%g)" % ( gprm[1]**2,gprm[2]**2 ) ) return post
def optimizer(model,data,opt_function,cost_function,**kwargs): print 'Guessed Variables' print_vars(model.opt_dict) c = model.opt_dict.values() if kwargs: model.calc_freq_data(data.f) initial_error = cost_function(model,data,kwargs) else: initial_error = cost_function(model,data) print 'Initial Error: %s'%(initial_error,) if kwargs: new_var_dict = fmin(opt_function,c,args=(model,data,cost_function,kwargs)) else: new_var_dict = fmin(opt_function,c,args=(model,data,cost_function)) optimized_dict = var_dict_w_new_values(new_var_dict,model) opt_model = model.copy(var_dict=optimized_dict) if kwargs: opt_model.calc_freq_data(data.f) final_error = cost_function(opt_model,data,kwargs) else: final_error = cost_function(opt_model,data) print 'Final Error: %s'%(final_error,) print 'Optimized Variables' print_vars(optimized_dict) return optimized_dict
def getErrorAllBins(): if searchAllRange: #we want to find results of those values, then we will look for the minimum numOfBins=np.r_[2:40:1] modulo=np.r_[sigma/2:sigma*4:.1] #modulo=np.r_[0.5:4:.1] allOptions=np.matrix(list(itertools.product(numOfBins,modulo))).T #find errors if parallel: errors=np.matrix((Parallel(n_jobs=num_cores)(delayed(getErrorVec)(i[1],i[0],samples) for i in allOptions.T.tolist()))) else: errors=getErrorVec(allOptions[1],allOptions[0],samples).A1 else:#not maintain anymore... print fmin(getError,0.1,args=(4,samples)) #when using brent should try only >0 because at 0 you will get divide by 0 #find the minimum mse for each number of bins: first we sort, then we take the smallest #we double sort by mse then by #bins #errors=(sorted(sorted(errors.tolist(),key=lambda e:e[0], reverse=True),key=lambda e:e[2], reverse=True)) errors=sorted(errors.tolist(),key=lambda e:e[0], reverse=True) errors=(sorted(errors,key=lambda e:e[2], reverse=True)) #take the last value for each # of bins errors={i[2]:[i[0],i[1],i[3],i[4]] for i in errors} return errors
def fit(self, x0, maxiter=None): """ Greene p. 487 for weight matrix. """ x0 = np.array(x0).reshape(len(self.exog), 1) round_one = optimize.fmin(self.mom_gen, x0=x0, maxiter=maxiter) ### Now Solve for Optimal W # Greene p. 490; Check if this is right. # Using White's (1980) estimator. round_one = round_one.reshape(len(self.exog), 1) y = self.data[self.endog].values X = self.data[self.exog].values if self.form == 'exp': e = y - np.exp(dot(X, round_one)) else: e = y - np.exp(dot(X, round_one)) Z = self.data[self.exog + self.instruments].values for i, obs in enumerate(Z): zi = obs.reshape(Z.shape[1], -1) if i == 0: W = dot(zi, zi.T) * (e[i]) ** 2 else: W += dot(zi, zi.T) * (e[i]) ** 2 self.W = inv(W / self.n) # round_two implicitly uses W. Probably a better way. # This also caches W forever until explicity removed. round_two = optimize.fmin(self.mom_gen, x0=round_one, maxiter=maxiter) return round_two
def find_bells(self, sigmin, varsig, max_peaks=None, sh_type="Gaus"): "my new not so good algorithm" self.sh_type = sh_type self.sh_func = _SH_FUNCTIONS[sh_type] wmin = 2. * sigmin ** 2 y_ar = self.y_ar x_ar = self.x_ar area = np.trapz(y_ar, x_ar) hght = y_ar.max() if self.lambda21: area /= 1. + self.I2 hght /= 1. + self.I2 self.max_h = hght mp = (len(x_ar)) // 4 if max_peaks is None or max_peaks <= 0 or max_peaks > mp: max_peaks = mp sigma2 = (y_ar ** 2).sum() / len(y_ar) self.peaks = None proc_search = True done = 0 peak_add = True opt_x = np.array([]) while True: prev_opt_x = opt_x prev_sigma2 = sigma2 done += 1 xh = np.zeros(done * 2) h = hght / done w = ((area / done) / h) ** 2 / np.pi xh = xh.reshape(done, 2) xh[:, 0] = np.linspace(x_ar[0], x_ar[-1], done + 2)[1: -1] xh[:, 1] = h opt_x = np.zeros(done * 2 + 1) opt_x[:-1] = xh.reshape(done * 2) opt_x[-1] = w opt_x, sig2, itr, fcs, wflg = \ fmin(self.calc_deviat3, opt_x, full_output=True, disp=False) if prev_sigma2 < sigma2: if done > 1: opt_x = prev_opt_x done -= 1 break if done == max_peaks: break bls = np.zeros((done, 3)) bls[:, :2] = opt_x[:-1].reshape(done, 2) bls[:, 2] = opt_x[-1] bls = bls.reshape(done * 3) if varsig: bls, sig2 = fmin(self.calc_deviat, bls, full_output=True, disp=False)[:2] bft = [i for i in bls.reshape(done, 3) if i[2] > wmin] if len(bft) < done: done = len(bft) bls = np.array(bft).reshape(done * 3) if done > 0: bls, sig2 = fmin(self.calc_deviat, bls, full_output=True, disp=False)[:2] self.peaks = zip(*bls.reshape(done, 3).transpose()) return self.peaks, np.sqrt(sig2)
def problem_1(): """ This problem uses scipy.optimize.fmin_slsqp to solve the 1 variable optimization problem for the local min and max noted above. Inputs: None Outputs: y: List of y values for the local min f: The function evaluated at the point in y. """ func_min = lambda y: -28*y**3 + 39*y**2 + 39*y - 117 func_max = lambda y: 28*y**3 - 39*y**2 - 39*y + 117 ymin = opt.fmin(func_min, -.3) ymax = opt.fmin(func_max, 1.0) val_min = func_min(ymin) vam_max = func_min(ymax) y = [ymin, ymax] val = [val_min, vam_max] return y, val
def bad_obs_check(p, ps=0.0175797): # Palomar PS = 0.021, KP PS = 0.0175797 pix_rad = [] pix_vals = [] core_pix_rad = [] core_pix_vals = [] # Icy, Icx = numpy.unravel_index(p.argmax(), p.shape) x_shape = p.shape[1] y_shape = p.shape[0] for x in range(x_shape / 2 - 20, x_shape / 2 + 20 + 1): for y in range(y_shape / 2 - 20, y_shape / 2 + 20 + 1): r = sqrt((x - x_shape / 2) ** 2 + (y - y_shape / 2) ** 2) if r > 3: # remove core pix_rad.append(r) pix_vals.append(p[y][x]) else: core_pix_rad.append(r) core_pix_vals.append(p[y][x]) try: p0 = [0.0, np.max(pix_vals), 20.0, 2.0] p = fmin(residuals, p0, args=(pix_rad, pix_vals), maxiter=1000000, maxfun=1000000, ftol=1e-3, xtol=1e-3, disp=False) p0 = [0.0, np.max(core_pix_vals), 5.0, 2.0] core_p = fmin(residuals, p0, args=(core_pix_rad, core_pix_vals), maxiter=1000000, maxfun=1000000, ftol=1e-3, xtol=1e-3, disp=False) except OverflowError: return 0, 10 _core = core_p[2] * ps _halo = p[2] * ps return _core, _halo
def create_VmaxRvmax(vmax,rvmax): """ Generates a new NFWModel with the given Vmax and R_Vmax. :param float vmax: maximum circular velocity in km/s. :param float rvmax: radius of maximum circular velocity in kpc. See Bullock et al. 2001 for reflated reference. :returns: A :class:`NFWModel` object matching the supplied `vmax` and `rvmax` """ from scipy.optimize import fmin #generate an approximate "best guess" from the scalings at z=0 m = NFWModel.create_Mvir(NFWModel.Vmax_to_Mvir(vmax),z=0) #from Bullock+ 01 m.rc = rvmax/2.16 def toopt(v,vmaxwanted,model): model.rho0 = v[0] return (model.getVmax()[0]-vmaxwanted)**2 fmin(toopt,(m.rho0,),(vmax,m),disp=0) #rho0 is now set correctly return m
def approximate_polygon_center2(pts, r=None): """ this is the ideal solution however it doesnt work as well as approximage_polygon_center when there are outliers iteratively remove points that are R from the xm,ym is faster and prefered approximate_polygon_center """ from scipy.optimize import fmin from numpy import linalg def err(p, X, Y): w, v, r = p npts = [linalg.norm([(x - w, y - v)]) - r for x, y in zip(X, Y)] return (array(npts) ** 4).sum() def fixed_radius(p, e, X, Y): w, v = p npts = [linalg.norm([(x - w, y - v)]) - r for x, y in zip(X, Y)] return (array(npts) ** 2).sum() def make_new_point_list(p, r, tol=1): """ filter points """ X, Y = p.T xm = X.mean() ym = Y.mean() def dist(pt): return ((pt[0] - xm) ** 2 + (pt[1] - ym) ** 2) ** 0.5 mask = array([dist(pp) - r < tol for pp in p], dtype=bool) # print mask return p[mask] pxs = array([]) pys = array([]) for i in range(1000): # make new point list X, Y = pts.T xm = X.mean() ym = Y.mean() if r is not None: xf, yf = fmin(fixed_radius, [xm, ym], args=(r, X, Y), disp=False) else: xf, yf, r = fmin(err, [xm, ym, 1], args=(X, Y), disp=False) pts = make_new_point_list(pts, r) if i > 5: if abs(pxs.mean() - xf) < 1e-5 and abs(pys.mean() - yf) < 1e-5: return xf, yf, r pxs = hstack((pxs[-5:], xf)) pys = hstack((pys[-5:], yf)) return xf, yf, r
def track(self, eye): self.threshold = cv2.getTrackbarPos('threshold', 'controls') self.mini = cv2.getTrackbarPos('mini', 'controls') self.maxi = cv2.getTrackbarPos('maxi', 'controls') if self.mini <= self.maxi: self.mini = min(self.mini, self.threshold-1) self.maxi = max(self.maxi, self.threshold+1) else: self.mini = max(self.mini, self.threshold+1) self.maxi = min(self.maxi, self.threshold-1) self.xdrift = (cv2.getTrackbarPos('xdrift', 'controls')/50.-1)*self.xdriftmax self.maxcontour = cv2.getTrackbarPos('maxcontour', 'controls') self.alpha = cv2.getTrackbarPos('alpha', 'controls')/100. self.dotrack = cv2.getTrackbarPos('dotrack', 'controls') self.fmintol = 10**(-cv2.getTrackbarPos('fmintol', 'controls')/10) self.maxiter = cv2.getTrackbarPos('maxiter', 'controls') eye2, eyecontour = self.preprocess(eye) # opt = dict(xtol=0.0001, ftol=0.0001, maxiter=None, maxfun=None, disp=False) opt = dict(xtol=self.fmintol, ftol=self.fmintol, maxiter=self.maxiter, maxfun=None, disp=False) if self.dotrack: self.fit = fmin(self.energycalc, self.fit, (eye2, eyecontour), **opt) else: self.fit = fmin(self.energycalc, (self.nx/2, self.ny/2, 10), (eye2, eyecontour), **opt) self.fit = np.maximum(self.fit, 0) self.fit = np.minimum(self.fit, np.max([self.nx, self.ny])) self.energycalc(self.fit, eye2, eyecontour, showimage=True) if self.dosave: self.xshift.append(self.fit[1]) self.yshift.append(self.fit[0]) self.rshift.append(self.fit[2])
def _partial_optimize(self, optimize_nodes, evaluate_nodes, fall_to_simplex): """Optimize part of the model. :Arguments: nodes : iterable list nodes to optimize. """ non_observeds = filter(lambda x: not x.observed, optimize_nodes) init_vals = [node.value for node in non_observeds] # define function to be optimized def opt(values): for value, node in zip(values, optimize_nodes): node.value = value try: logp_optimize = [node.logp for node in optimize_nodes] logp_evaluate = [node.logp for node in evaluate_nodes] return -np.sum(logp_optimize) - np.sum(logp_evaluate) except pm.ZeroProbability: return np.inf #optimize try: fmin_powell(opt, init_vals) except Exception as e: if fall_to_simplex: print "Warning: Powell optimization failed. Falling back to simplex." fmin(opt, init_vals) else: raise e
def likelihood_method(self, full_fit=True, max_sum=1000, guess=[10, -5], disp=True): """ Using likelihood method to estimate the parameters of the truncated negative binomial (mup and kp) and the host survival fxn (a, b) Parameters ---------- data : array-like Observed host parasite data full_fit : bool If True, fits mup, kp, a, b. If False, used preset mup and kp max_sum: int Upper bound on normalizing constant. Technically bounded by positive infinity, but in practice a lower upper bound works fine. guess : list Guess for a and b of the survival function disp : bool If True minimization convergence results are printed. If False, they are not printed. Returns ------- : tuple (mu, k, a, b) Notes ----- When dealing with small sample sizes and trying to fit the full distribution (i.e., not using the Crofton Method), the convergence of the likelihood method will often fail and/or multiple likelihood peaks will exist. One way around this is to use the approach of Ferguson et al. 2011 who essentailly assume a fixed k (e.g. k = 1 in Ferguson et al. 2011). """ if not full_fit: if None in list(self.get_premort_params())[1:]: raise TypeError("mup, kp must be preset") params = opt.fmin(likefxn2, guess, args=(self.data, self.mup, self.kp), disp=disp, maxiter=10000, maxfun=10000) out_params = [self.mup, self.kp] + list(params) else: mu_guess = np.mean(self.data) k_guess = 1 out_params = opt.fmin(likefxn1, [mu_guess, k_guess] + guess, args=(self.data,), disp=disp, maxiter=10000, maxfun=10000) self.mup, self.kp, self.a, self.b = out_params return tuple(out_params)
def likelihood_method(data, crof_params=None, max_sum=1000, guess=[10, -5]): """ Using likelihood method to estimate parameter of truncated NBD and survival fxn Parameters ---------- data : array-like Observed host parasite data crof_params : tuple or None If tuple contains (N_p, mu_p, k_p): Pre-mortality abundance, pre-mortality mean parasites per host, pre-mortality k. If None will to estimate all of the parameters from the data. max_sum: int Upper bound on normalizing constant. Technically bounded by positive infinity, but in practice a lower upper bound works fine. guess : list Guess for a and b of the survival function Returns ------- : tuple (mu, k, a, b) Notes ----- """ # kern = lambda x, mu, k, a, b: surv_prob(x, a, b) * \ # mod.nbinom.pmf(x, mu, k) # pmf = lambda x, mu, k, a, b: kern(x, mu, k, a, b) / \ # sum(kern(np.arange(0, max_sum), mu, k, a, b)) # def likefxn1(params, x): # """ Likelihood fxn for all parameters """ # mu, k, a, b = params # return -np.sum(np.log(pmf(x, mu, k, a, b))) # def likefxn2(params, x, mu, k): # """ Likelihood fxn for just a and b """ # a, b = params # return -np.sum(np.log(pmf(x, mu, k, a, b))) if crof_params: N, mu, k = crof_params params = opt.fmin(likefxn2, guess, args=(data, mu, k), disp=1) out_params = [mu, k] + list(params) else: mu_guess = np.mean(data) k_guess = 1 out_params = opt.fmin(likefxn1, [mu_guess, k_guess] + guess, args=(data,)) return tuple(out_params)
def parms_from_likelihood( df, offset = 0.0, initial_guess = [1.0,1.0,1.0] ) : ts = obslist( df ) + offset # tuple([ts]) notation seems to be necessary to avoid converting ts to a long tuple itself fminhood = fmin( nlog_likelihood_min, initial_guess, args=tuple([ts]) ) fminhat = fmin( nlog_likelihat, initial_guess, args=tuple([ts]) ) mle = fsolve( mleeqn, fminhood, args=ts ) #return fminhat return (fminhood, fminhat, mle)
def fitPairs(self, fx1, fy1, fx2, fy2): vx = fmin(self.RSS, self.startpoint, args = (fx1, fx2), maxiter = self.maxiter, maxfun = self.maxfun, disp = self.disp) vy = fmin(self.RSS, self.startpoint, args = (fy1, fy2), maxiter = self.maxiter, maxfun = self.maxfun, disp = self.disp) vx[1] = -vx[1] * len(fx1) * self.p / (self.lambda_*self.L) vy[1] = vy[1] * len(fy1) * self.p / (self.lambda_*self.L) return [vx[0], vx[1], vy[1]]
def test(fn, twod=True, dim=0): data = np.loadtxt( os.popen("sed 's/ch//g' %s | awk NF==10 | awk '{if \ ($4==1) { print $0} }'" % fn), usecols=[4,5,9] ) data[:,-1] -= 1 if twod: plotdata( data ) alpha, foptalpha = fit_GLC_alpha( data ) print "Final alpha: ", alpha print "alphanegloglik: ", foptalpha alpha_params = np.hstack( get_params_from_alpha( alpha, data ) ) sigma_init = var_from_b( alpha_params[:-1], data ) alpha_params = np.hstack( [[sigma_init], alpha_params] ) print "Params from alpha optimization: ", alpha_params outputs = optimize.fmin( negloglike_reduced , x0 = alpha_params , args=[ insert_ones_col( data ) ] , full_output=True ) xmin=outputs[0] fopt=outputs[1] print "Params: ", xmin print "negloglike: ", fopt print "alphaerror: ", foptalpha - fopt #thisfit = twodfit( data ) #params = thisfit.params #b, c0 = get_params_from_alpha( alpha, data ) import pylab as pl plotline( *alpha_params[1:], linestyle='--', color='k', label="Result of alpha opt" ) plotline( *xmin[1:], linestyle='-', color='r', label="Final result." ) pl.legend(loc=0) pl.title( fn ) else: #plotdata( data ) data = data[:,[dim,2]] alpha, foptalpha = fit_GLC_alpha( data ) print "Final alpha: ", alpha print foptalpha #thisfit = twodfit( data ) alpha_params = np.hstack( get_params_from_alpha( alpha, data ) ) sigma_init = var_from_b( alpha_params[:-1], data ) alpha_params = np.hstack( [[sigma_init], alpha_params] ) print "Params from alpha optimization: ", alpha_params outputs = optimize.fmin( negloglike_reduced , x0 = alpha_params , args=[ insert_ones_col( data ) ] , full_output=True ) xmin=outputs[0] fopt=outputs[1] print "Params: ", xmin print "negloglike: ", fopt print "alphaerror: ", foptalpha - fopt #thisfit = twodfit( data ) #params = thisfit.params b, c0 = get_params_from_alpha( alpha, data )
def _extrapolatePhi(phi0, V=None, tails = .2): """ Returns a list of points along the path, going linearly beyond the path to include the nearest minima. Parameters ---------- phi0 : array_like The (multi-dimensional) path to extend. V : callable or None The potential to minimize, or None if the path should be extended a fixed amount beyond its ends. tails : float The amount relative to the path length to extrapolate beyond the end of the path (if V is None) or beyond the minima (if V is not None). Returns ------- phi : array_like The extended list of points. The spacing between points in the extended regions should be approximately the same as the spacing between the input points. s : array_like The distance along the path (starting at ``phi0[0]``). L : float Total length of the path excluding tails. """ phi1 = phi = phi0 dphi = np.append(0,np.sum((phi1[1:]-phi1[:-1])**2,1)**.5) s1 = np.cumsum(dphi) L = s1[-1] npoints = phi1.shape[0] phi_hat0 = (phi[1]-phi[0])/np.sum((phi[1]-phi[0])**2)**.5 if V == None: s0min = 0.0 else: V0 = lambda x: V( phi[0] + phi_hat0*x*L) s0min = optimize.fmin(V0, 0.0, disp=0, xtol=1e-5)[0]*L if s0min > 0: s0min = 0.0 s0 = np.linspace(s0min - L*tails, 0.0, npoints*tails)[:-1] phi0 = phi[0] + phi_hat0*s0[:,np.newaxis] phi_hat2 = (phi[-1]-phi[-2])/np.sum((phi[-1]-phi[-2])**2)**.5 if V == None: s2min = 0.0 else: V2 = lambda x: V( phi[-1] + phi_hat2*(x-1)*L) s2min = optimize.fmin(V2, 1, disp=0, xtol=1e-5)[0]*L if s2min < L: s2min = L s2 = np.linspace(L, s2min + L*tails, npoints*tails)[1:] phi2 = phi[-1] + phi_hat2*(s2[:,np.newaxis]-L) phi = np.append(phi0, np.append(phi1, phi2, 0), 0) s = np.append(s0, np.append(s1, s2)) return _extrapolatePhi_rtype(phi, s, L)
def find_peaks(self, bg_sigma2, max_peaks=None): "fit by multiple gausians" y_ar = self.y_ar x_ar = self.x_ar mp = (len(x_ar)) // 4 if max_peaks is None or max_peaks <= 0 or max_peaks > mp: max_peaks = mp # No reducing self.red_allow = 0 # TODO: Improve algorithm sigma2 = (y_ar ** 2).sum() / len(y_ar) opt_x = np.array([]) self.peaks = None proc_search = True done = 0 peak_add = True while proc_search: prev_opt_x = opt_x prev_sigma2 = sigma2 dy_ar = y_ar - self.calc_shape(opt_x) maxpt = x_ar[dy_ar.argmax()] self.x0 = maxpt self.dy_ar = dy_ar wdth = (x_ar[-1] - x_ar[0]) ** 2 / 16. if self.lambda21: self.h = dy_ar.max() / 1.5 else: self.h = dy_ar.max() if peak_add: hw = fmin(self.calc_deviat2, np.array([wdth]), disp=False) opt_x = np.array(list(opt_x) + [maxpt, self.h] + hw.tolist()) else: peak_add = True opt_x, sigma2, itr, fcs, wflg = \ fmin(self.calc_deviat, opt_x, full_output=True, disp=False) done += 1 tpx = opt_x.reshape(len(opt_x) / 3, 3).transpose() if sigma2 >= prev_sigma2 or \ opt_x.min() <= 0.: print('Warning: on fail') opt_x = prev_opt_x break if done >= max_peaks: print('Warning: on max') break if sigma2 <= bg_sigma2: opt_x, reduced, sgm = self.reduce_x(opt_x) if reduced: peak_add = False sigma2 = sgm else: break self.peaks = zip(*opt_x.reshape(len(opt_x) / 3, 3).transpose()) if len(self.peaks) == 0: print(x_ar) print(y_ar) return self.peaks
def __init__(self,factor=0): self.factor = factor circ = lambda t: exp(CC(0,pi*float(t))) #Shell Thron boundary st = lambda z: exp(z*exp(-z)) self.stb0 = lambda t: st(circ(t)) self.stbtymax = fmin(lambda t: -self.stb(t).imag(), 0.5)[0] self.stbtxmax = fmin(lambda t: -self.stb(t).real(), 0.1)[0] self.stbtxmin = fmin(lambda t: self.stb(t).real(), 0.6)[0]
def optimize_fmin(self, data): def cost(pars): self.set_parameters_from_pars(pars) c = self.chisq(data) print c return c firstpars = self.pars() op.fmin(cost, firstpars, maxfun=np.Inf, maxiter=np.Inf, ftol=1.e-5) self.set_parameters_from_pars(bestpars) return None
def imf_B_factor_get(res_N,x,ctf_params): from scipy.optimize import fmin nx = len(res_N)*2 ctf = ctf_1d(nx, ctf_params) p = [1,1] xopt = fmin(residuals_B1, p, (res_N,x)) p = xopt xopt1 = fmin(residuals_B2, p, (res_N,ctf[1][0:nx-1], x)) print xopt return xopt
def fit_to_data(experiment, parameter_space, guess=[0, 0, 1, 0]): ''' Fit to some experimental data ''' p1 = fmin(err_func, guess, args=(experiment, parameter_space)) return p1
def long_time_errorbars(fnames, fv, frac=0.1, additional_fixed=None, plotresult=False): # finding error bar in the "fixed" parameters... I think this is similar to what # I did above but with average RChi2 across all traces # I tried to recalculate a lower Fx value because DOF increased from 3282 to 3282*len(fnames), # but the calc. only could handle <10000 DOF, and Fx only changed from 1.001858 to 1.001845. lkeys = ['l0', 'l1', 'l2', 'l3'] akeys = ['a0', 'a1', 'a2', 'a3'] fixedparams = [fv] if additional_fixed is not None: fixedparams += additional_fixed #Fx = 1.001845 # Threshold found from F-statistic on 9999 points. (max of calculator, faking 3282*21). Fx = 1.001858 # Threshold found from F-statistic on 3282 pts. Fx_list = [ ] # later we'll sort these in order of increasing parameter value to enable interpolation # first get best_avg_RChi2: bestfits = [] chi2 = [] for fname in fnames: bestfit, bestacorr = load_wire(fname) bestfits.append(bestfit) chi2.append(bestfit['ReducedChi2']) best_avg_RChi2 = np.mean(chi2) # setup initial coarse scan assert bestfit.has_key("l3") fv_best = bestfit[fv] val_step = frac * fv_best / 2.0 argvals = arange(fv_best * (1.0 - frac), fv_best * (1.0 + frac) + val_step, val_step) def fmin_kernel(twoT, *args): avg_RChi2 = 0.0 fv_value = args[0] longT_floating = [key for key in ['l1', 'l2', 'l3'] if key != fv ] # the two longTime components we're optimizing chi2 = [] for i, fname in enumerate(fnames): bestfit = bestfits[i].copy() bestfit[fv] = fv_value bestfit[longT_floating[0]] = twoT[0] bestfit[longT_floating[1]] = twoT[1] l = [bestfit[key] for key in lkeys] a = [bestfit[key] for key in akeys] irf = bestfit['irf_dispersion'] # all three long-components are fixed for this fit, but the # values they are fixed at are set at different levels: # fv is the component we're finding an errorbar for, and # it is set by the function long_time_errorbars. # The other two are allowed to "float" in response, but # not float freely for each trace individually; we're # looking for an error bar for the global fit across all # cavities on a given sample, so we constrain them for each # individual fit but let fmin play with them to minimize # the mean reduced Chi squared across all data sets. params = do_fit(fname, l, a, ['l1', 'l2', 'l3'], irf) chi2.append(params['ReducedChi2']) return np.mean(chi2) # do a coarse (5-pt) run across the data twoT_guess = [bestfit[key] for key in ['l1', 'l2', 'l3'] if key != fv] # the two longTime components we're optimizing for val in argvals: res = fmin(fmin_kernel, twoT_guess, args=(val, ), xtol=0.005, ftol=0.005, full_output=1) avg_RChi2 = res[1] Fx_list.append([val, avg_RChi2 / best_avg_RChi2]) assert not all(array(Fx_list)[:, 1] > Fx) # if the left side (low param value) didn't exceed Fx threshold, extend val = argvals[0] while Fx_list[0][1] < Fx: val -= val_step if val < 0: if fv in ['l1', 'l2', 'l3']: raise ValueError("long-time component just went negative...") else: break res = fmin(fmin_kernel, twoT_guess, args=(val, ), xtol=0.005, ftol=0.005, full_output=1) avg_RChi2 = res[1] Fx_list.append([val, avg_RChi2 / best_avg_RChi2]) Fx_list.sort( key=lambda x: x[0]) # sort by first element (parameter value) # if the right side (high param value) didn't exceed Fx threshold, extend val = argvals[-1] while Fx_list[-1][1] < Fx: val += val_step res = fmin(fmin_kernel, twoT_guess, args=(val, ), xtol=0.005, ftol=0.005, full_output=1) avg_RChi2 = res[1] Fx_list.append([val, avg_RChi2 / best_avg_RChi2]) Fx_list.sort( key=lambda x: x[0]) # sort by first element (parameter value) # interpolate to find values at threshold Fx_array = array(Fx_list) splines = cspline1d(Fx_array[:, 1]) interp_val = linspace(Fx_array[0, 0], Fx_array[-1, 0], 500) interp_Fx = cspline1d_eval(splines, interp_val, dx=val_step, x0=Fx_array[:, 0].min()) error_bar = [ interp_val[find(interp_Fx < Fx)[0]], interp_val[find(interp_Fx < Fx)[-1]] ] if plotresult: fig = figure(1) #fig.clf() ax_chi = gca() #ax_chi = fig.add_subplot(111) #ax_chi.cla() ax_chi.plot(interp_val, interp_Fx, label=fv) ax_chi.plot(interp_val, [Fx] * len(interp_val), '--k') ax_chi.plot(Fx_array[:, 0], Fx_array[:, 1], 'sk') ax_chi.plot(error_bar, [Fx] * 2, '-k', lw=3.0) #ax_chi.set_ylim([0.99, 1.01]) fig.show() fig.canvas.draw() return error_bar
# !/usr/bin/env python # -*- coding:utf-8 -*- import scipy.optimize as spo import numpy as np __author__ = 'frm.kpmg' output=False def f((x,y)): z=np.sin(x)+0.05*x**2+0.05*y**2 if output==True: print '%f %f %f '%(x,y,z) return z # output=True ret= spo.brute(f,((-10,10.1,5),(-10.,10.1,5)),finish=None) print ret, f(ret) # output=True ret1= spo.brute(f,((-10,10.1,0.1),(-10.,10.1,0.1)),finish=None) print ret1,f(ret1) ret3=spo.fmin(f,ret1) print ret3,f(ret3) ret3=spo.fmin(f,ret) print ret3,f(ret3)
def fit(self, saveto=None): bad_fit = False self.m = PowerLaw(free=[True, False], e0=(self.energy_band.emin * self.energy_band.emax)**0.5) # fix index to 2 f = self.energyBandLikelihoodExtended self.fit = fmin(f, self.m.get_parameters(), disp=0, full_output=1, args=(self.m, )) def upper_limit(): flux_copy = self.m[0] zp = self.energyBandLikelihoodExtended(np.asarray([-20]), self.m) # NB -- the 95% upper limit is calculated by assuming the likelihood is peaked at # 0 flux and finding the flux at which it has fallen by 1.35; this is a two-sided # 90% limit, or a one-sided 95% limit -- that's how it works, right? def f95(parameters): return abs( self.energyBandLikelihoodExtended(parameters, self.m) - zp - 1.35) # for some reason, can't get fsolve to work here. good ol' fmin to the rescue self.energy_band.uflux = 10**fmin(f95, np.asarray([-11.75]), disp=0)[0] self.energy_band.lflux = None self.energy_band.flux = None self.m[0] = flux_copy # if flux below a certain level, set an upper limit if self.m[0] < 1e-20: bad_fit = True upper_limit() else: try: err = self.normUncertaintyExtended() except: bad_fit = True err = 0 self.energy_band.flux = self.m[0] self.energy_band.uflux = self.energy_band.flux * (1 + err) self.energy_band.lflux = max(self.energy_band.flux * (1 - err), 1e-30) if saveto is not None: for b, mb in zip(self.bands, self.mybands): b.__dict__[saveto] = (b.expected(self.m) * mb.er if not bad_fit else -1) if bad_fit: self.energy_band.ts = 0 else: null_ll = sum( self.bandLikelihoodExtended([0], b, mb) for b, mb in zip(self.bands, self.mybands)) alt_ll = sum( self.bandLikelihoodExtended([b.expected(self.m) * mb.er], b, mb) for b, mb in zip(self.bands, self.mybands)) self.energy_band.ts = 2 * (null_ll - alt_ll)
ax = Axes3D(figure(figsize=(10, 7))) ax.plot_surface(X, Y, Z, cmap=cm.rainbow, linewidth=0.4) ax.text2D(0.05, 0.95, '3d function function with multiple local optima', transform=ax.transAxes) show() figure(figsize=(10, 7)) CS = contour(X, Y, Z) clabel(CS, inline=1, fontsize=10) show() initial_random_point = randint(-3, 3) * rand(2) print('initial_random_point =', initial_random_point, '\n') x_min = fmin(function, initial_random_point) figure(figsize=(10, 7)) CS = contour(X, Y, Z) clabel(CS, inline=1, fontsize=10) plot(initial_random_point[0], initial_random_point[1], 'ko', label='initial point') plot(x_min[0], x_min[1], 'ro', label='local minimum point') legend() show() list_of_minima_arg = [] list_of_minima_val = []
# 제일 작은 조합을 출력 opt1 = spo.brute(fo, ((-10, 10.1, 0.1), (-10, 10.1, 0.1)), finish=None) print(opt1) # 제일 작은 값이 나옵니다. #도함수가 없는 비선형 최적화에 사용. # brute force + function minimization ( fmin ) # 이해만 하면 돼요. 써먹을 데가 있을지. # 할강단체법 output = True # tolerance : 공차 ( 어느정도 에러를 허용할 것 인가 0.01이 넘어가면 허용하는 것으로) # function tolerance #횟수 제한 # 위에서 대충 찾은 놈 (초기값) ; opt1 : spo.brute 주면, 정밀하게 (최소점) 찾아줌 opt2 = spo.fmin(fo, opt1, xtol=0.001, ftol=0.001, maxiter=15, maxfun=20) print(opt2) #더 정확하게 나옵니다. fm(opt2) output = False spo.fmin(fo, (2.0, 2.0), maxiter=250) #변수가 하나인 경우에 최적해를 구하는 방법 def f(r): return 2 * np.pi * r**2 + 2 / r r_min = spo.brent(f, brack=(0.1, 4)) print("최소점", f(r_min)) r = np.linspace(0, 2, 100)
import numpy as np from scipy.optimize import fmin norm_list_y = [] norm_list_x = [] for x in range(1, 1000, 5): x = x / 100 norm_list_x.append(x) for a in range(1, 1000, 5): a = a / 100 Argument = np.array([0, 0, 0]) y_transpose = np.array([[1], [a], [1], [a]]) def optimize(parameters): x1, x2, x3 = parameters[0], parameters[1], parameters[2] x = [x1, x2, x3] A = np.array([[1, 1, 2], [1, 2, 1], [2, 1, 1], [2, 2, 1]]) objective = np.abs((A @ x) - y_transpose) norm = np.linalg.norm(objective) return norm optimization = fmin(optimize, Argument, xtol=0.001) print(optimization) norm_list_y.append(np.linalg.norm(optimization)) print(norm_list_y) plt.plot(norm_list_x, norm_list_y) plt.ylabel('norm') plt.xlabel('parameter value') plt.show()
def main(): tension_stress_data = np.genfromtxt(os.path.expanduser('~/phase_transformations/neu_sehitoglu/fig2_tension'), delimiter=',') idx = np.argsort(tension_stress_data[:, 0]) tension_stress_data = tension_stress_data[idx, :] plt.figure(0) plt.plot(tension_stress_data[:, 0], tension_stress_data[:, 1], '-b*', ms=12) tension_volume_data = np.genfromtxt(os.path.expanduser('~/phase_transformations/neu_sehitoglu/fig3_tension'), delimiter=',') idx = np.argsort(tension_volume_data[:, 0]) tension_volume_data = tension_volume_data[idx, :] plt.figure(1) plt.plot(tension_volume_data[:, 0], tension_volume_data[:, 1], '-b*', ms=12) comp_stress_data = np.genfromtxt(os.path.expanduser('~/phase_transformations/neu_sehitoglu/fig2_compression'), delimiter=',') idx = np.argsort(comp_stress_data[:, 0]) comp_stress_data = comp_stress_data[idx, :] plt.figure(0) plt.plot(comp_stress_data[:, 0], comp_stress_data[:, 1], '-r*', ms=12) comp_volume_data = np.genfromtxt(os.path.expanduser('~/phase_transformations/neu_sehitoglu/fig3_compression'), delimiter=',') idx = np.argsort(comp_volume_data[:, 0]) comp_volume_data = comp_volume_data[idx, :] plt.figure(1) plt.plot(comp_volume_data[:, 0], comp_volume_data[:, 1], '-r*', ms=12) plt.figure(2) torsion_stress_data = np.genfromtxt(os.path.expanduser('~/phase_transformations/neu_sehitoglu/fig7'), delimiter=',') torsion_stress_data[:, 0] *= 2 plt.plot(torsion_stress_data[:, 0], torsion_stress_data[:, 1], '-b*', ms=12) plt.xlim(0, 0.14) plt.ylim(0, 2000) plt.figure(3) torsion_volume_data = np.genfromtxt(os.path.expanduser('~/phase_transformations/neu_sehitoglu/fig8'), delimiter=',') plt.plot(torsion_volume_data[:, 0], torsion_volume_data[:, 1], '-b*', ms=12) plt.xlim(0, 2000) experiments = [Experiment(name='tension', temperature=22., boundary_condition=[BC(amplitude=[(0., 0.), (1., 1300)], direction='z', mode='stress')], stress_data=tension_stress_data, volume_data=tension_volume_data), Experiment(name='compression', temperature=22., boundary_condition=[BC(amplitude=[(0., 0.), (1., -2400)], direction='z', mode='stress')], stress_data=comp_stress_data, volume_data=comp_volume_data), Experiment(name='torsion', temperature=22., boundary_condition=[BC(amplitude=[(0., 0.), (1., 4000)], direction='y', mode='stress'), BC(amplitude=[(0., 0.), (1., -4000)], direction='z', mode='stress')], stress_data=torsion_stress_data, volume_data=torsion_volume_data) ] plt.draw() plt.pause(0.001) plt.ion() plt.show() neu_sehitoglu.Ms = 185.51653373817538 parameters = {'a1': 0.028107490537271854, 'a2': 1.2959257140209983e-05, 'a3': 2.8149370838159175e-07, 'R1': -6.014172555589034e-07, 'R2': 0.024553598689201442, 'Mss': -45} print(fmin(residual, list(parameters.values()), args=(list(parameters.keys()), experiments))) plt.show()
def move(self): """ Move method for each tick update. For stated objective, determines best angle to run at given current velocity. """ # We are ignoring \delta_t by calling it unity, so V and A need to be in appropriate # units to reflect that. # Don't move if we are prone if self.state == 0: return # Don't move if we are at objective # NOTE: WHAT ABOUT SPEED? if np.sqrt((self.y_objective-self.y)**2 + (self.x_objective-self.x)**2) == 0.: return # Use Brent method to find best angle to accelerate at to reach objective. pi = 4.*math.atan(1.) # Bracket angle to be at least in hemisphere of objective try: obj_angle = np.tan((self.y_objective-self.y)/(self.x_objective-self.x)) if not np.isfinite(obj_angle): print("non finite",self.y_objective,self.y,self.x_objective,self.x) if self.x_objective-self.x < 0: obj_angle += pi except: # div by zero? print("exc in move",self.y_objective,self.y,self.x_objective,self.x) obj_angle = 0. #try: # best_acc = opt.brent(lambda angle : self.eval_move(angle), brack=(-pi,pi)) #except: # best_acc = obj_angle #acc_angle = best_acc acc_angle = opt.fmin(lambda angle : self.eval_move(angle),obj_angle,xtol=pi/180.,disp=False) self.x, self.y, self.angle, self.current_speed = self.project_move(acc_angle) # Check if we can make it to objective #if np.sqrt((self.x_objective-self.x)**2 + (self.y_objective-self.y)**2) < self.speed: # self.x = self.x_objective # self.y = self.y_objective #else: # if self.y_objective == self.y: # if self.x_objective > self.x: # self.x += self.speed # else: # self.x -= self.speed # else: # self.angle = math.atan2(self.y_objective-self.y,self.x_objective-self.x) # self.x += self.speed * math.cos(self.angle) # self.y += self.speed * math.sin(self.angle) # Ensure players stay in bounds self.x = min(self.x,self.layout.xsize) self.x = max(self.x,0) self.y = min(self.y,self.layout.ysize) self.y = max(self.y,0) # DEBUG if self.pid == self.layout.ball.carrier: print(self.pid,self.team,self.objective,self.layout.ball.carrier)
def func(x,i,j,Q,P): # print i,j,x Q[i,i] += x - Q[i,j] Q[i,j] = x return distance(expm(Q), P) n = Q.shape[0] q = np.copy(Q) for i in range(n): for j in range(n): if(i!=j): if(Q[i,j]>1e-10): calculations+=1 print calculations x = optimize.fmin(func, Q[i,j], args=(i,j,Q,P), maxiter=200)[0]#argmin(i, j, Q, c) #print "UGH:", Q[i,j],x #Q = np.copy(q) #put updated matrix in place of Q # Q[i,i] +=x # Q[i,i] -= Q[i,j] Q[i,j] = x # print "Original:\n", Q[i,:], "\nUpdated:\n", q[i,:] # if(Q[i,i]!=q[i,i]): # quit() # quit() # Q[i,j] = q[i,j] #put updated matrix in place of Q # Q[i,:] = q[i,:] matrixFunctions2d.write2dMatrix(Q, sys.argv[2]) matrixFunctions2d.writeDetailedBalanceftxt(Q)
def fdap_fitting(embryo,this_fit,gui=None): globals()['embryo']=embryo globals()['this_fit']=this_fit globals()['gui']=gui #For good measure, check if ignored vectors are correct embryo=correct_ignored_vecs(embryo) #Counter for function calls global iterations iterations=0 #Check if constrained and if we need xtransform #embryo.fits[this_fit],x0=check_constrained(embryo.fits[this_fit]) #------------------------------------------------------------------------------------------------------------------------------------- #Calling optimization algorithms #------------------------------------------------------------------------------------------------------------------------------------- #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #fit_cnaught==1 and fit_ynaught==1 #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if embryo.fits[this_fit].fit_cnaught==1 and embryo.fits[this_fit].fit_ynaught==1: #Copying x0 into local variable to pass to solver #if embryo.fits[this_fit].transform==0: x0=list(embryo.fits[this_fit].x0) #Getting bounds bnds = ((embryo.fits[this_fit].LB_k, embryo.fits[this_fit].UB_k), (embryo.fits[this_fit].LB_cnaught, embryo.fits[this_fit].UB_cnaught),(embryo.fits[this_fit].LB_ynaught,embryo.fits[this_fit].UB_ynaught)) #Calling optimizers if embryo.fits[this_fit].opt_meth=='brute': kstep=(embryo.fits[this_fit].UB_k-embryo.fits[this_fit].LB_k)/50 cstep=(embryo.fits[this_fit].UB_cnaught-embryo.fits[this_fit].LB_cnaught)/50 ystep=(embryo.fits[this_fit].UB_ynaught-embryo.fits[this_fit].LB_ynaught)/50 ranges=(slice(embryo.fits[this_fit].LB_k,embryo.fits[this_fit].UB_k,kstep),slice(embryo.fits[this_fit].LB_cnaught,embryo.fits[this_fit].UB_cnaught,cstep),slice(embryo.fits[this_fit].LB_ynaught,embryo.fits[this_fit].UB_ynaught,ystep)) res=sopt.brute(calc_exp_ssd, ranges, full_output=bool(embryo.debug_fit),finish=sopt.fmin) elif embryo.fits[this_fit].opt_meth=='Constrained Nelder-Mead': x0=transform_x0(embryo.fits[this_fit].x0,[embryo.fits[this_fit].LB_k,embryo.fits[this_fit].LB_cnaught,embryo.fits[this_fit].LB_ynaught],[embryo.fits[this_fit].UB_k,embryo.fits[this_fit].UB_cnaught,embryo.fits[this_fit].UB_ynaught]) res=sopt.fmin(constr_calc_exp_ssd,x0,ftol=embryo.fits[this_fit].opt_tol,maxiter=embryo.fits[this_fit].maxfun,disp=bool(embryo.debug_fit),full_output=True) else: if embryo.fits[this_fit].opt_meth=='Anneal': random.seed(555) res=sopt.minimize(calc_exp_ssd, x0, method='Anneal') else: res=sopt.minimize(calc_exp_ssd,x0,method=embryo.fits[this_fit].opt_meth,tol=embryo.fits[this_fit].opt_tol,bounds=bnds,options={'maxiter': embryo.fits[this_fit].maxfun, 'disp': bool(embryo.debug_fit)}) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #fit_cnaught==1 and fit_ynaught==0 #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ elif embryo.fits[this_fit].fit_cnaught==1 and embryo.fits[this_fit].fit_ynaught==0: #Copying x0 into local variable to pass to solver #if embryo.fits[this_fit].transform==0: x0=list(embryo.fits[this_fit].x0) x0.pop(2) #Getting bounds bnds = ((embryo.fits[this_fit].LB_k, embryo.fits[this_fit].UB_k), (embryo.fits[this_fit].LB_cnaught, embryo.fits[this_fit].UB_cnaught)) #Calling optimizers if embryo.fits[this_fit].opt_meth=='brute': ranges=(slice(embryo.fits[this_fit].LB_k,embryo.fits[this_fit].UB_k,1),slice(embryo.fits[this_fit].LB_cnaught,embryo.fits[this_fit].UB_cnaught,10)) res=sopt.brute(calc_exp_ssd, ranges, full_output=True,finish=sopt.fmin) elif embryo.fits[this_fit].opt_meth=='Constrained Nelder-Mead': x0=transform_x0(embryo.fits[this_fit].x0,[embryo.fits[this_fit].LB_k,embryo.fits[this_fit].LB_cnaught,embryo.fits[this_fit].LB_ynaught],[embryo.fits[this_fit].UB_k,embryo.fits[this_fit].UB_cnaught,embryo.fits[this_fit].UB_ynaught]) res=sopt.fmin(constr_calc_exp_ssd,x0,ftol=embryo.fits[this_fit].opt_tol,maxiter=embryo.fits[this_fit].maxfun,disp=bool(embryo.debug_fit),full_output=True) else: if embryo.fits[this_fit].opt_meth=='Anneal': random.seed(555) res=sopt.minimize(calc_exp_ssd, x0, method='Anneal') else: res=sopt.minimize(calc_exp_ssd,x0,method=embryo.fits[this_fit].opt_meth,tol=embryo.fits[this_fit].opt_tol,bounds=bnds,options={'maxiter': embryo.fits[this_fit].maxfun, 'disp': bool(embryo.debug_fit)}) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #fit_cnaught==0 and fit_ynaught==1 #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ elif embryo.fits[this_fit].fit_cnaught==0 and embryo.fits[this_fit].fit_ynaught==1: #Copying x0 into local variable to pass to solver #if embryo.fits[this_fit].transform==0: x0=list(embryo.fits[this_fit].x0) x0.pop(1) #Getting bounds bnds = ((embryo.fits[this_fit].LB_k, embryo.fits[this_fit].UB_k), (embryo.fits[this_fit].LB_ynaught, embryo.fits[this_fit].UB_ynaught)) #Calling optimizers if embryo.fits[this_fit].opt_meth=='brute': ranges=(slice(embryo.fits[this_fit].LB_k,embryo.fits[this_fit].UB_k,1),slice(embryo.fits[this_fit].LB_ynaught,embryo.fits[this_fit].UB_ynaught,1)) res=sopt.brute(calc_exp_ssd, ranges, full_output=bool(embryo.debug_fit),finish=sopt.fmin) elif embryo.fits[this_fit].opt_meth=='Constrained Nelder-Mead': x0=transform_x0(embryo.fits[this_fit].x0,[embryo.fits[this_fit].LB_k,embryo.fits[this_fit].LB_cnaught,embryo.fits[this_fit].LB_ynaught],[embryo.fits[this_fit].UB_k,embryo.fits[this_fit].UB_cnaught,embryo.fits[this_fit].UB_ynaught]) res=sopt.fmin(constr_calc_exp_ssd,x0,ftol=embryo.fits[this_fit].opt_tol,maxiter=embryo.fits[this_fit].maxfun,disp=bool(embryo.debug_fit),full_output=True) else: if embryo.fits[this_fit].opt_meth=='Anneal': random.seed(555) res=sopt.minimize(calc_exp_ssd, x0, method='Anneal') else: res=sopt.minimize(calc_exp_ssd,x0,method=embryo.fits[this_fit].opt_meth,tol=embryo.fits[this_fit].opt_tol,bounds=bnds,options={'maxiter': embryo.fits[this_fit].maxfun, 'disp': bool(embryo.debug_fit)}) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #fit_cnaught==0 and fit_ynaught==0 #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ elif embryo.fits[this_fit].fit_cnaught==0 and embryo.fits[this_fit].fit_ynaught==0: #Copying x0 into local variable to pass to solver #if embryo.fits[this_fit].transform==0: x0=list(embryo.fits[this_fit].x0) #Getting bounds bnds = ((embryo.fits[this_fit].LB_k, embryo.fits[this_fit].UB_k),) #Calling optimizers if embryo.fits[this_fit].opt_meth=='brute': ranges=(1,400) res=sopt.brute(calc_exp_ssd, (ranges,), full_output=bool(embryo.debug_fit),finish=sopt.fmin) elif embryo.fits[this_fit].opt_meth=='Constrained Nelder-Mead': x0=transform_x0(embryo.fits[this_fit].x0,[embryo.fits[this_fit].LB_k,embryo.fits[this_fit].LB_cnaught,embryo.fits[this_fit].LB_ynaught],[embryo.fits[this_fit].UB_k,embryo.fits[this_fit].UB_cnaught,embryo.fits[this_fit].UB_ynaught]) print "bla" raw_input() res=sopt.fmin(constr_calc_exp_ssd,x0,ftol=embryo.fits[this_fit].opt_tol,maxiter=embryo.fits[this_fit].maxfun,disp=bool(embryo.debug_fit),full_output=True) else: if embryo.fits[this_fit].opt_meth=='Anneal': random.seed(555) res=sopt.minimize(calc_exp_ssd, x0, method='Anneal') else: res=sopt.minimize(calc_exp_ssd,x0,method=embryo.fits[this_fit].opt_meth,tol=embryo.fits[this_fit].opt_tol,bounds=bnds,options={'maxiter': embryo.fits[this_fit].maxfun, 'disp': bool(embryo.debug_fit)}) #------------------------------------------------------------------------------------------------------------------------------------- #Saving results in embryo object #------------------------------------------------------------------------------------------------------------------------------------- if embryo.fits[this_fit].opt_meth=='brute': if embryo.fits[this_fit].fit_cnaught==1 and embryo.fits[this_fit].fit_ynaught==1: embryo.fits[this_fit].k_opt=res[0] embryo.fits[this_fit].cnaught_opt=res[1] embryo.fits[this_fit].ynaught_opt=res[2] elif embryo.fits[this_fit].fit_cnaught==1 and embryo.fits[this_fit].fit_ynaught==0: embryo.fits[this_fit].k_opt=res[0] embryo.fits[this_fit].cnaught_opt=res[1] embryo.fits[this_fit].ynaught_opt=embryo.fits[this_fit].x0[2] elif embryo.fits[this_fit].fit_cnaught==0 and embryo.fits[this_fit].fit_ynaught==1: embryo.fits[this_fit].k_opt=res[0] embryo.fits[this_fit].cnaught_opt=embryo.fits[this_fit].x0[1] embryo.fits[this_fit].ynaught_opt=res[1] elif embryo.fits[this_fit].fit_cnaught==0 and embryo.fits[this_fit].fit_ynaught==0: embryo.fits[this_fit].k_opt=res[0] embryo.fits[this_fit].cnaught_opt=embryo.fits[this_fit].x0[1] embryo.fits[this_fit].ynaught_opt=embryo.fits[this_fit].x0[2] embryo.fits[this_fit].ssd=res[1] embryo.fits[this_fit].success=True embryo.fits[this_fit].halflife_s=log(2)/embryo.fits[this_fit].k_opt embryo.fits[this_fit].halflife_min=embryo.fits[this_fit].halflife_s/60 embryo.fits[this_fit].iterations=iterations #In bruteforce iterations = fcalls??? embryo.fits[this_fit].fcalls=iterations if embryo.fits[this_fit].fit_ext==1: embryo.fits[this_fit].Rsq=fit_Rsq(embryo.ext_av_data_d,embryo.fits[this_fit].ssd) elif embryo.fits[this_fit].fit_int==1: embryo.fits[this_fit].Rsq=fit_Rsq(embryo.int_av_data_d,embryo.fits[this_fit].ssd) else: embryo.fits[this_fit].Rsq=fit_Rsq(embryo.slice_av_data_d,embryo.fits[this_fit].ssd) elif embryo.fits[this_fit].opt_meth=='Constrained Nelder-Mead': res_new=xtransform(res[0],[embryo.fits[this_fit].LB_k,embryo.fits[this_fit].LB_cnaught,embryo.fits[this_fit].LB_ynaught],[embryo.fits[this_fit].UB_k,embryo.fits[this_fit].UB_cnaught,embryo.fits[this_fit].UB_ynaught]) if embryo.fits[this_fit].fit_cnaught==1 and embryo.fits[this_fit].fit_ynaught==1: embryo.fits[this_fit].k_opt=res_new[0] embryo.fits[this_fit].cnaught_opt=res_new[1] embryo.fits[this_fit].ynaught_opt=res_new[2] elif embryo.fits[this_fit].fit_cnaught==1 and embryo.fits[this_fit].fit_ynaught==0: embryo.fits[this_fit].k_opt=res_new[0] embryo.fits[this_fit].cnaught_opt=res_new[1] embryo.fits[this_fit].ynaught_opt=embryo.fits[this_fit].x0[2] elif embryo.fits[this_fit].fit_cnaught==0 and embryo.fits[this_fit].fit_ynaught==1: embryo.fits[this_fit].k_opt=res_new[0] embryo.fits[this_fit].cnaught_opt=embryo.fits[this_fit].x0[1] embryo.fits[this_fit].ynaught_opt=res_new[1] elif embryo.fits[this_fit].fit_cnaught==0 and embryo.fits[this_fit].fit_ynaught==0: embryo.fits[this_fit].k_opt=res_new[0] embryo.fits[this_fit].cnaught_opt=embryo.fits[this_fit].x0[1] embryo.fits[this_fit].ynaught_opt=embryo.fits[this_fit].x0[2] embryo.fits[this_fit].ssd=res[1] embryo.fits[this_fit].success=not bool(res[4]) embryo.fits[this_fit].iterations=res[2] embryo.fits[this_fit].fcalls=res[3] if embryo.fits[this_fit].model=="exp": embryo.fits[this_fit].halflife_s=log(2)/embryo.fits[this_fit].k_opt elif embryo.fits[this_fit].model=="power": embryo.fits[this_fit].halflife_s=((2**(embryo.fits[this_fit].npower-1)-1)*embryo.fits[this_fit].cnaught_opt**(1-embryo.fits[this_fit].npower))/(embryo.fits[this_fit].k_opt*(embryo.fits[this_fit].npower-1)) embryo.fits[this_fit].halflife_min=embryo.fits[this_fit].halflife_s/60 if embryo.fits[this_fit].fit_ext==1: embryo.fits[this_fit].Rsq=fit_Rsq(embryo.ext_av_data_d,embryo.fits[this_fit].ssd) elif embryo.fits[this_fit].fit_int==1: embryo.fits[this_fit].Rsq=fit_Rsq(embryo.int_av_data_d,embryo.fits[this_fit].ssd) else: embryo.fits[this_fit].Rsq=fit_Rsq(embryo.slice_av_data_d,embryo.fits[this_fit].ssd) else: if embryo.fits[this_fit].fit_cnaught==1 and embryo.fits[this_fit].fit_ynaught==1: embryo.fits[this_fit].k_opt=res.x[0] embryo.fits[this_fit].cnaught_opt=res.x[1] embryo.fits[this_fit].ynaught_opt=res.x[2] elif embryo.fits[this_fit].fit_cnaught==1 and embryo.fits[this_fit].fit_ynaught==0: embryo.fits[this_fit].k_opt=res.x[0] embryo.fits[this_fit].cnaught_opt=res.x[1] embryo.fits[this_fit].ynaught_opt=embryo.fits[this_fit].x0[2] elif embryo.fits[this_fit].fit_cnaught==0 and embryo.fits[this_fit].fit_ynaught==1: embryo.fits[this_fit].k_opt=res.x[0] embryo.fits[this_fit].cnaught_opt=embryo.fits[this_fit].x0[1] embryo.fits[this_fit].ynaught_opt=res.x[1] elif embryo.fits[this_fit].fit_cnaught==0 and embryo.fits[this_fit].fit_ynaught==0: embryo.fits[this_fit].k_opt=res.x[0] embryo.fits[this_fit].cnaught_opt=embryo.fits[this_fit].x0[1] embryo.fits[this_fit].ynaught_opt=embryo.fits[this_fit].x0[2] embryo.fits[this_fit].ssd=res.fun embryo.fits[this_fit].success=res.success embryo.fits[this_fit].iterations=res.nit embryo.fits[this_fit].fcalls=res.nfev if embryo.fits[this_fit].model=="exp": embryo.fits[this_fit].halflife_s=log(2)/embryo.fits[this_fit].k_opt elif embryo.fits[this_fit].model=="power": embryo.fits[this_fit].halflife_s=((2**(embryo.fits[this_fit].npower-1)-1)*embryo.fits[this_fit].cnaught_opt**(1-embryo.fits[this_fit].npower))/(embryo.fits[this_fit].k_opt*(embryo.fits[this_fit].npower-1)) embryo.fits[this_fit].halflife_min=embryo.fits[this_fit].halflife_s/60 if embryo.fits[this_fit].fit_ext==1: embryo.fits[this_fit].Rsq=fit_Rsq(embryo.ext_av_data_d,embryo.fits[this_fit].ssd) elif embryo.fits[this_fit].fit_int==1: embryo.fits[this_fit].Rsq=fit_Rsq(embryo.int_av_data_d,embryo.fits[this_fit].ssd) else: embryo.fits[this_fit].Rsq=fit_Rsq(embryo.slice_av_data_d,embryo.fits[this_fit].ssd) return embryo
nobs = 200 x = np.arange(nobs * 3).reshape(nobs, -1) x = np.random.randn(nobs, 3) xk = np.array([1, 2, 3]) xk = np.array([1., 1., 1.]) #xk = np.zeros(3) beta = xk y = np.dot(x, beta) + 0.1 * np.random.randn(nobs) xk = np.dot(np.linalg.pinv(x), y) epsilon = 1e-6 args = (y, x) from scipy import optimize xfmin = optimize.fmin(fun2, (0, 0, 0), args) print(approx_fprime((1, 2, 3), fun, epsilon, x)) jac = approx_fprime(xk, fun1, epsilon, args) jacmin = approx_fprime(xk, fun1, -epsilon, args) #print(jac) print(jac.sum(0)) print('\nnp.dot(jac.T, jac)') print(np.dot(jac.T, jac)) print('\n2*np.dot(x.T, x)') print(2 * np.dot(x.T, x)) jac2 = (jac + jacmin) / 2. print(np.dot(jac2.T, jac2)) #he = approx_hess(xk,fun2,epsilon,*args) print(approx_hess_old(xk, fun2, 1e-3, args)) he = approx_hess_old(xk, fun2, None, args)
def speed_to_fly(self, climb_rate, explicit=False): if explicit: return fmin(self.calc_total_time, 1, args=[climb_rate], disp=False)[0] else: return np.polyval(self._stf_model, climb_rate)
def Wing_TE_Kink(self): # First thing is to converge the area... print(' ') print( 'Computing the Real Wing Planform...Wing_Ref_Swet = Wing_Real_Swet' ) print(' ') fmin(self.Converge_Wing_Area, 2.0) # Defining the lists... self.geo['Kink_wing']['tc'] = list() self.geo['Kink_wing']['t'] = list() self.geo['Kink_wing']['zu'] = list() self.geo['Kink_wing']['zl'] = list() pi = np.arccos(-1.0) tc = np.zeros(4) t = np.zeros(4) zu = np.zeros(4) zl = np.zeros(4) # Wing Thick and t/c tc[1] = self.geo['wing']['tc'][1] tc[2] = self.geo['wing']['tc'][2] tc[3] = self.geo['wing']['tc'][3] t[1] = tc[1] * float(self.geo['Kink_wing']['chords'][1]) t[2] = tc[2] * float(self.geo['Kink_wing']['chords'][2]) t[3] = tc[3] * float(self.geo['Kink_wing']['chords'][3]) t[0] = t[1] + ((float(self.geo['wing']['y'][0])-float(self.geo['wing']['y'][1])) / \ (float(self.geo['wing']['y'][1])-float(self.geo['wing']['y'][2]))) * \ (t[1]-t[2]) tc[0] = t[0] / float(self.geo['Kink_wing']['chords'][0]) # Zupper and Zlower zu[0] = 0.0 zu[3] = zu[0] - float(self.geo['wing']['y'][3])*np.tan(float(-self.geo['wing']['dihedral'])*pi/180) + \ (t[3]/2.0 - t[0]/2.0) zu[1] = zu[0] + (zu[3]-zu[0])* ( (float(self.geo['wing']['y'][1])-float(self.geo['wing']['y'][0])) / \ (float(self.geo['wing']['y'][3])-float(self.geo['wing']['y'][0])) ) zu[2] = zu[0] + (zu[3]-zu[0])* ( (float(self.geo['wing']['y'][2])-float(self.geo['wing']['y'][0])) / \ (float(self.geo['wing']['y'][3])-float(self.geo['wing']['y'][0])) ) zl[0] = zu[0] - t[0] zl[1] = zu[1] - t[1] zl[2] = zu[2] - t[2] zl[3] = zu[3] - t[3] z0 = zu[0] + float( self.geo['wing']['yappex']) - (1.0 / 3.0) * (zu[0] + zl[0]) z1 = zu[1] + float( self.geo['wing']['yappex']) - (1.0 / 3.0) * (zu[0] + zl[0]) z2 = zu[2] + float( self.geo['wing']['yappex']) - (1.0 / 3.0) * (zu[0] + zl[0]) z3 = zu[3] + float( self.geo['wing']['yappex']) - (1.0 / 3.0) * (zu[0] + zl[0]) z00 = zl[0] + float( self.geo['wing']['yappex']) - (1.0 / 3.0) * (zu[0] + zl[0]) z11 = zl[1] + float( self.geo['wing']['yappex']) - (1.0 / 3.0) * (zu[0] + zl[0]) z22 = zl[2] + float( self.geo['wing']['yappex']) - (1.0 / 3.0) * (zu[0] + zl[0]) z33 = zl[3] + float( self.geo['wing']['yappex']) - (1.0 / 3.0) * (zu[0] + zl[0]) zu[0] = z0 zu[1] = z1 zu[2] = z2 zu[3] = z3 zl[0] = z00 zl[1] = z11 zl[2] = z22 zl[3] = z33 ###--- ...to List()...using a list is easier to later add new sections... for i in range(0, 4): self.geo['Kink_wing']['tc'].append(str(tc[i])) self.geo['Kink_wing']['t'].append(str(t[i])) self.geo['Kink_wing']['zu'].append(str(zu[i])) self.geo['Kink_wing']['zl'].append(str(zl[i])) # Notice that I have upper and lower sides... self.geo['Kink_wing']['swetin'] = (float(self.geo['Kink_wing']['chords'][1])+float(self.geo['Kink_wing']['chords'][2])) * \ (float(self.geo['wing']['y'][2])-float(self.geo['wing']['y'][1])) self.geo['Kink_wing']['swetout'] = (float(self.geo['Kink_wing']['chords'][3])+float(self.geo['Kink_wing']['chords'][2])) * \ (float(self.geo['wing']['y'][3])-float(self.geo['wing']['y'][2])) # I also have right and left wing... self.geo['Kink_wing']['swet'] = 2.0 * ( self.geo['Kink_wing']['swetin'] + self.geo['Kink_wing']['swetout'])
leg = ax1.legend(loc='best') #plt.ylim(-.1, .1) plt.show() if 'fit' in cal: def fitti(fac2, fitb, fix=0, blabla=0): repl_line( 'INEN', 2, '%12.6f%12.6f%12.6f%12.6f%12.6f\n' % (1.0, fac2, 0.0, 0.0, 0.0)) os.system(BINpath + 'DR2END_' + mpii + '.exe') lines_output = [line for line in open('OUTPUT')] for lnr in range(0, len(lines_output)): if lines_output[lnr].find( 'EIGENWERTE DES HAMILTONOPERATORS') >= 0: E_0 = lines_output[lnr + 3].split()[fix] return abs(float(E_0) + fitb) print('>>> commencing fit...') fac = 1.001 ft_lo = fmin(fitti, fac, args=(energy2fit, 0, 1), disp=False) res_lo = fitti(ft_lo[0], 0.0, 0, 0) print('L = %2.2f: %12.4f yields B(2)= %8.4f' % (lam, cloW * ft_lo[0], res_lo)) optLECs[la] = [cloW * ft_lo[0], 0.0] prep_pot_files_pdp(lam, cloW * ft_lo[0], 0., 0., 0., 0., 0., pots) if 'fit' in cal: print(optLECs)
# Always use Euler Angles for trim calculation # Trim Parameter Vector (OptParam): # 1 = Stabilator, rad # 2 = Throttle, % # 3 = Pitch Angle, rad if TRIM >= 1: print('\nTRIM Stabilator, Thrust, and Pitch Angle') print('========================================') OptParam = np.zeros(3).reshape((3, 1)) TrimHist = np.zeros(4).reshape((4, 1)) InitParam = np.array([0.0369, 0.1892, 0.0986]).reshape((3, 1)).ravel() #(OptParam,J,ExitFlag,Output) = fmin(TrimCost,InitParam) InitParam = [0.0369, 0.1992, 0.0986] (OptParam) = fmin(TrimCost, InitParam, xtol=1e-10) print(f'OptParam = {OptParam}') J = TrimCost(OptParam) print(f'J = {J}') print(f'Trim Cost = {str(J)}') ## Optimizing Trim Error Cost with respect to dSr, dT, and Theta Index = list(range(0, len(TrimHist[0]))) # row lenght of TrimHist TrimStabDeg = 57.2957795 * OptParam[0] TrimThrusPer = 100 * OptParam[1] TrimPitchDeg = 57.2957795 * OptParam[2] TrimAlphaDeg = TrimPitchDeg - gamma print( f'Stabilator = {str(TrimStabDeg)} deg, Thrust = {str(TrimThrusPer)} x 100%' ) print(
def calc_stf_model(self): distance = 1 lower_limit = 0.0001 climb_range = np.arange(lower_limit, 10.5, 1) stf_values = [fmin(self.calc_total_time, 1, args=(x, distance), disp=False)[0] for x in climb_range] self._stf_model = np.polyfit(climb_range, stf_values, 4)
mycube.readfile(args.filefrag2) x, y, z = np.array(mycube.get_grid_xyz()) data = mycube.get_data() print('Interpolating...' + args.filefrag2) my_interpolating_function = RegularGridInterpolator((x, y, z), data, method='linear') y2 = my_interpolating_function(pts) print('Interpol 1D..linear.') ydiff = (y2 - y1)**4 fdiff = interp1d(xpt, ydiff, kind='linear') try: isodensity_point = fmin( fdiff, args.initialseed ) # find a root Note that your stating point should be close to the final result print('isodensity_point =', isodensity_point) except ValueError: print('Oops problem in newton algorithm') if (args.axis == 'z'): isodensity_value = my_interpolating_function( [0.0, 0.0, float(isodensity_point)]) print('isodensity_value=', isodensity_value) if (args.axis == 'y'): isodensity_value = my_interpolating_function( [0.0, float(isodensity_point), 0.0]) print('isodensity_value=', isodensity_value)
def stopping_criterion(self, xpts, ftilde, m): r = self.stopping_crit.order ftilde = ftilde.squeeze() n = 2**m success = False lna_range = [ -5, 0 ] # reduced from [-5, 5], to avoid kernel values getting too big causing error # search for optimal shape parameter if self.stopping_crit.one_theta == True: lna_MLE = fminbnd( lambda lna: self.objective_function(exp(lna), xpts, ftilde)[0], x1=lna_range[0], x2=lna_range[1], xtol=1e-2, disp=0) aMLE = exp(lna_MLE) _, vec_lambda, vec_lambda_ring, RKHS_norm = self.objective_function( aMLE, xpts, ftilde) else: if self.stopping_crit.use_gradient == True: pass else: # Nelder-Mead Simplex algorithm theta0 = np.ones((xpts.shape[1], 1)) * (0.05) theta0 = np.ones((1, xpts.shape[1])) * (0.05) lna_MLE = fmin(lambda lna: self.objective_function( exp(lna), xpts, ftilde)[0], theta0, xtol=1e-2, disp=False) aMLE = exp(lna_MLE) # print(n, aMLE) _, vec_lambda, vec_lambda_ring, RKHS_norm = self.objective_function( aMLE, xpts, ftilde) # Check error criterion # compute DSC if self.errbd_type == 'full_Bayes': # full Bayes if self.avoid_cancel_error: DSC = abs(vec_lambda_ring[0] / n) else: DSC = abs((vec_lambda[0] / n) - 1) # 1-alpha two sided confidence interval err_bd = self.uncert * sqrt(DSC * RKHS_norm / (n - 1)) elif self.errbd_type == 'GCV': # GCV based stopping criterion if self.avoid_cancel_error: DSC = abs(vec_lambda_ring[0] / (n + vec_lambda_ring[0])) else: DSC = abs(1 - (n / vec_lambda[0])) temp = vec_lambda temp[0] = n + vec_lambda_ring[0] mC_inv_trace = sum(1. / temp(temp != 0)) err_bd = self.uncert * sqrt(DSC * RKHS_norm / mC_inv_trace) else: # empirical Bayes if self.avoid_cancel_error: DSC = abs(vec_lambda_ring[0] / (n + vec_lambda_ring[0])) else: DSC = abs(1 - (n / vec_lambda[0])) err_bd = self.uncert * sqrt(DSC * RKHS_norm / n) if self.arb_mean: # zero mean case muhat = ftilde[0] / n else: # non zero mean case muhat = ftilde[0] / vec_lambda[0] self.error_bound = err_bd muhat = np.abs(muhat) muminus = muhat - err_bd muplus = muhat + err_bd if 2 * err_bd <= max(self.abs_tol, self.rel_tol * abs(muminus)) + max( self.abs_tol, self.rel_tol * abs(muplus)): if err_bd == 0: err_bd = np.finfo(float).eps # stopping criterion achieved success = True return success, muhat, r, err_bd
def find_fingers(im, features): """ Find the period and locations of the grid fingers """ row_profile = np.mean(im, axis=1) # if you smooth too much you lose fingers row_profile = ndimage.gaussian_filter1d(row_profile, sigma=0.5) row_profile /= row_profile.max() if False: view = ImageViewer(im) plt.figure() plt.plot(row_profile) plt.show() sys.exit() # find the local minimums grid_rows_mask = np.logical_and(row_profile < np.roll(row_profile, 1), row_profile < np.roll(row_profile, -1)) grid_rows_mask[[0, -1]] = False peaks = np.where(row_profile > 0.2)[0] start, stop = peaks[0], peaks[-1] # make sure we don't have any fingers inside the cell edges if 'cell_edge_tb' in features: start = max(features['cell_edge_tb'], start) stop = min(stop, im.shape[0] - features['cell_edge_tb']) grid_rows_mask[:start] = False grid_rows_mask[stop:] = False grid_rows = np.where(grid_rows_mask)[0] # compute the period local_mins = grid_rows[5:-5] if len(local_mins) == 0: raise CellFingersException period = (local_mins[-1] - local_mins[0]) / float(len(local_mins) - 1) # features['mask_grid_rows'] = grid_rows_mask features['_finger_row_nums'] = grid_rows features['finger_period'] = period features['_peak_row_nums'] = (grid_rows[:-1] + (period // 2)).astype( np.int32) if False: # finger spacing analysis grid_rows_interpolated = [] for i in range(1, len(grid_rows) - 1): x = grid_rows[i] xs = [x - 1, x, x + 1] ys = [row_profile[j] for j in xs] f = interpolate.interp1d(xs, ys, kind='quadratic', bounds_error=False, fill_value=max(ys)) grid_rows_interpolated.append(optimize.fmin(f, x, disp=False)[0]) if False: print grid_rows_interpolated[-1], f(grid_rows_interpolated[-1]) xnew = np.linspace(xs[0], xs[-1], num=100, endpoint=True) ynew = f(xnew) plt.figure() plt.plot(xs, ys) plt.plot(xnew, ynew) plt.plot(grid_rows_interpolated[-1], f(grid_rows_interpolated[-1]), 'o') plt.show() grid_rows_interpolated = np.array(grid_rows_interpolated) diffs = grid_rows_interpolated[1:] - grid_rows_interpolated[:-1] xs = grid_rows[1:-1][1:] - (period / 2.0) f = np.poly1d(np.polyfit(xs, diffs, deg=2)) xnew = np.linspace(xs[0], xs[-1], num=100, endpoint=True) plt.figure() plt.plot(xs, diffs) plt.plot(xnew, f(xnew)) plt.show() if False: print "Period: {}".format(features['finger_period']) ImageViewer(im) plt.figure() plt.plot(grid_rows, row_profile[grid_rows], 'o') plt.plot(features['_peak_row_nums'], row_profile[features['_peak_row_nums']], 'o') plt.plot(row_profile) # if False: plt.vlines([grid_rows[0]+(i*features['finger_period']) for i in range(len(grid_rows))], row_profile.min(), row_profile.max()) # else: plt.vlines(grid_rows_interpolated, row_profile.min(), row_profile.max()) plt.show() sys.exit()
Muitas vezes precisamos encontrar o máximo ou mínimo de uma função particular $f(x)$, onde $f$ é uma função escalar, mas $x$ poderia ser um vetor. As aplicações típicas são a minimização de algumas variáveis, tais como custo, risco e erro, ou a maximização de produtividade, eficiência e lucro. Rotinas de otimização normalmente fornecem um método para minimizar uma determinada função: se precisamos maximizar $f(x)$, criamos uma nova função $g(x)$ que inverte o sinal de $f$, ou seja, $g(x) = - f(x)$ e minimizamos $g(x)$. Abaixo, damos um exemplo mostrando (i) a definição da função de teste e (ii) a chamada da função `scipy.optimize.fmin`, que toma como argumento a função $f$ a ser minimizada e um valor inicial $x_0$ a partir do qual se inicia a busca pelo mínimo, e retorna o valor de $x$ para o qual $f(x)$ é (localmente) minimizado. Normalmente, a busca pelo mínimo é uma busca local, i.e, o algoritmo segue o gradiente local. Nós repetimos a busca pelo mínimo para dois valores ($x_0 = 1.0$ e $x_0 = 2.0$, respectivamente) para demonstrar que, dependendo do valor de partida, podemos encontrar diferentes mínimos para a função $f$. A maioria dos comandos (após as duas chamadas de `fmin`) no arquivo `fmin1.py` cria o gráfico da função, os pontos de partida para as buscas e o mínimo obtido: from scipy import arange, cos, exp from scipy.optimize import fmin import pylab def f(x): return cos(x) - 3 * exp( -(x - 0.2) ** 2) # encontra mínimos de f(x), # começa de 1.0 e 2.0 respectivamente minimum1 = fmin(f, 1.0) print("Busca iniciada em x=1., minimo é", minimum1) minimum2 = fmin(f, 2.0) print("Busca iniciada em x=2., minimo é", minimum2) # plota função x = arange(-10, 10, 0.1) y = f(x) pylab.plot(x, y, label='$\cos(x)-3e^{-(x-0.2)^2}$') pylab.xlabel('x') pylab.grid() pylab.axis([-5, 5, -2.2, 0.5]) # adiciona minimo1 para plot pylab.plot(minimum1, f(minimum1), 'vr', label='minimo 1')
def fit_binned_mol(mol,pinned,plot=False,fit_cnaught=True): #Make molecule global so we can do stuff with it globals()['mol']=mol #Grab first fit as reference fit=mol.sel_fits[0] if fit.fit_ext==1 and fit.fit_slice==0 and fit.fit_int==0: region="ext" elif fit.fit_ext==0 and fit.fit_slice==1 and fit.fit_int==0: region="slice" elif fit.fit_ext==0 and fit.fit_slice==0 and fit.fit_int==1: region="int" #Get binned vectors and put it into molecule tvec_bin,r_bin=bin_tvec_data(mol,pinned,region,plot=plot) #Pasting into molecule object and compute errors mol.tvec_avg=mean_bin(tvec_bin) mol.tvec_errors=std_bin(tvec_bin) setattr(mol,region+"_avg",mean_bin(r_bin)) setattr(mol,region+"_err",std_bin(r_bin)) #Grab data and error for further use mol.data_av=getattr(mol,region+"_avg") mol.data_errors=getattr(mol,region+"_err") #Plot if selected if plot: fig=plt.figure() fig.show() ax=fig.add_subplot(111) ax.errorbar(mol.tvec_avg,mol.data_av,yerr=mol.data_errors,xerr=mol.tvec_errors,fmt='ro') plt.draw() raw_input() #Define x0 and other paramters mol.x0=[fit.k_opt,mol.data_av[0]-mol.data_av[-1],mol.data_av[-1]] mol.LB_k=0 mol.UB_k=None mol.LB_ynaught=0 mol.UB_ynaught=None mol.LB_cnaught=0 mol.UB_cnaught=None #Transform x0 mol.x0=transform_x0(mol.x0,[mol.LB_k,mol.LB_cnaught,mol.LB_ynaught],[mol.UB_k,mol.UB_cnaught,mol.UB_ynaught]) #Throw out x0 for cnaught x0=list(mol.x0) if not fit_cnaught: x0.pop(1) #Pass to optimization algorithm res=sopt.fmin(fit_simple_obj,x0,full_output=True) #Put results into molecule to be passed back res_new=xtransform(res[0],[mol.LB_k,mol.LB_cnaught,mol.LB_ynaught],[mol.UB_k,mol.UB_cnaught,mol.UB_ynaught]) if fit_cnaught: mol.k_opt_refit=res_new[0] mol.cnaught_opt_refit=res_new[1] mol.ynaught_opt_refit=res_new[2] else: mol.k_opt_refit=res_new[0] mol.ynaught_opt_refit=res_new[1] mol.ssd_refit=res[1] mol.success_refit=not bool(res[4]) mol.iterations_refit=res[2] mol.fcalls_refit=res[3] if plot: fig=plt.figure() fig.show() ax=fig.add_subplot(111) ax.errorbar(mol.tvec_avg,mol.data_av,yerr=mol.data_errors,xerr=mol.tvec_errors,fmt='ro') ax.plot(mol.tvec_avg,mol.fit_av,'b-') plt.draw() raw_input() return mol
def func_fit(x, y, fitting_function, p0, cost=least_squares): f = fitting_function p = fmin(cost, p0, args=(x, y, f), maxiter=10000, maxfun=10000) return p
# Define the log-likelihood via the Huber loss function def huber_loss(m, b, x, y, dy, c=2): y_fit = m * x + b t = abs((y - y_fit) / dy) flag = t > c return np.sum((~flag) * (0.5 * t**2) - (flag) * c * (0.5 * c - t), -1) f_squared = lambda beta: squared_loss(beta[0], beta[1], x=x, y=y, dy=dy) f_huber = lambda beta: huber_loss(beta[0], beta[1], x=x, y=y, dy=dy, c=1) #------------------------------------------------------------ # compute the maximum likelihood using the huber loss beta0 = (2, 30) beta_squared = optimize.fmin(f_squared, beta0) beta_huber = optimize.fmin(f_huber, beta0) print beta_squared print beta_huber #------------------------------------------------------------ # Plot the results fig = plt.figure(figsize=(6, 6)) ax = fig.add_subplot(111) x_fit = np.linspace(0, 350, 10) ax.errorbar(x, y, dy, fmt='.k', lw=1, ecolor='gray') ax.plot(x_fit, beta_squared[0] * x_fit + beta_squared[1], '--k',
def lomb(time, signal, error, f1, df, numf, ltau=2., lvar=-2.6, do_fit=True): """ C version of lomb_scargle Inputs: time: time vector signal: data vector error: data uncertainty vector df: frequency step numf: number of frequencies to consider ltau,lvar: DRW model parameters, initial guesses if do_fit=True Output: psd: power spectrum on frequency grid: f1,f1+df,...,f1+numf*df """ numt = len(time) dt = abs(time[1:] - time[:-1]) dtm = log10(dt.min()) maxt = log10(time.max() - time.min()) wth = (1. / error).astype('float64') s0 = dot(wth, wth) wth /= sqrt(s0) cn = (signal * wth).astype('float64') cn -= dot(cn, wth) * wth if (do_fit): def fit_fun(par): par[0] = par[0].clip(-6., 2) par[1] = par[1].clip(dtm - 1, maxt + 1) result = qso_engine(time, signal, error, lvar=par[0], ltau=par[1]) chi = (result['chi2_qso/nu'] + result['chi2_qso/nu_extra']) * result['nu'] return chi rs = fmin(fit_fun, [lvar, ltau], disp=0) lvar, ltau = rs[0], rs[1] #print ("Noise parameters: lvar=%.3f ltau=%.3f") % (lvar,ltau) # sparse matrix form: ab[u + i - j, j] == a[i,j] i<=j, (here u=1) T = zeros((2, numt), dtype='float64') arg = dt * exp(-log(10) * ltau) ri = exp(-arg) ei = 1. / (1. / ri - ri) T[0, 1:] = -ei T[1, :-1] = 1. + ri * ei T[1, 1:] += ri * ei T[1, numt - 1] += 1. T0 = median(T[1, :]) T /= T0 lvar0 = log10(0.5) + lvar + ltau fac = exp(log(10) * lvar0) * s0 / T0 Tp = 1. * T Tp[1, :] += wth * wth * fac Tpi = solveh_banded(Tp, identity(numt)) # # CI[i,j] = T[1+i-k,k] Tpi[k,j] (k>=i), k=i is diagonal # CI[i,j] = T[1,i] * Tpi[i,j] + T[0,i+1]*Tpi[i+1,j] + T[0,i]*Tpi[i-1,j] CI = empty((numt, numt), dtype='float64') CI[0, :] = T[1, 0] * Tpi[0, :] + T[0, 1] * Tpi[1, :] CI[numt - 1, :] = T[1, numt - 1] * Tpi[numt - 1, :] + T[0, numt - 1] * Tpi[numt - 2, :] for i in xrange(numt - 2): CI[i + 1, :] = T[1, i + 1] * Tpi[i + 1, :] + T[0, i + 2] * Tpi[ i + 2, :] + T[0, i + 1] * Tpi[i, :] # cholesky factorization m0[i,j] (j>=i elements non-zero) dot(m0.T,m0) = CI CI = dot(1. / wth * identity(numt), dot(CI, wth * identity(numt))) m0 = cholesky(CI, lower=False) #v = dot(dot(m0.T,m0),wth*wth*identity(numt)) #print (v[:,20]/v[20,20]) wth1 = dot(m0, wth) s0 = dot(wth1, wth1) wth1 /= sqrt(s0) cn = dot(m0, cn) cn -= dot(cn, wth1) * wth1 tt = 2 * pi * time.astype('float64') sinx, cosx = sin(tt * f1) * wth, cos(tt * f1) * wth wpi = sin(df * tt) wpr = sin(0.5 * df * tt) wpr = -2. * wpr * wpr psd = empty(numf, dtype='float64') vcn = var(cn, ddof=1) lomb_scargle_support = """ inline void update_sincos (int numt, double wpi[], double wpr[], double sinx[], double cosx[]) { double tmp; for (int i=0;i<numt;i++) { sinx[i] = (wpr[i]*(tmp=sinx[i]) + wpi[i]*cosx[i]) + sinx[i]; cosx[i] = (wpr[i]*cosx[i] - wpi[i]*tmp) + cosx[i]; } } inline double lomb_scargle(int numt, double cn[], double sinx[], double cosx[], double st, double ct, double cst) { double cs=0.,s2=0.,c2=0.,sh=0.,ch=0.,px=0.,detm; for (int i=0;i<numt;i++) { cs += cosx[i]*sinx[i]; s2 += sinx[i]*sinx[i]; c2 += cosx[i]*cosx[i]; sh += sinx[i]*cn[i]; ch += cosx[i]*cn[i]; } cs -= cst; s2 -= st; c2 -= ct; detm = c2*s2 - cs*cs; if (detm>0) px = ( c2*sh*sh - 2.*cs*ch*sh + s2*ch*ch ) / detm; return px; } inline void calc_dotprod(int numt, double sinx[], double cosx[], double wt[], double *st, double *ct, double *cst) { double a=0,b=0; for (int i=0;i<numt;i++) { a += sinx[i]*wt[i]; b += cosx[i]*wt[i]; } *st = a*a; *ct = b*b; *cst =a*b; } inline void dered_sincos(int numt, double sinx[], double cosx[], double sinx1[], double cosx1[], double m0[]) { int i,k; unsigned long inumt; double tmpa,tmpb,tmpc,tmpc0,s1,s2; for (i=0;i<numt;i++) { tmpc0 = m0[i+i*numt]; s1 = tmpc0*(tmpa=sinx[i]); s2 = tmpc0*(tmpb=cosx[i]); inumt = i*numt; for (k=i+1;k<numt;k++) { tmpc=m0[k+inumt]; if (fabs(tmpc)<tmpc0*1.e-3) break; s1 += tmpc*tmpa; s2 += tmpc*tmpb; } sinx1[i] = s1; cosx1[i] = s2; } } """ lomb_code = """ double sinx1[numt],cosx1[numt],ct,st,cst; for (unsigned long j=0;j<numf;j++) { dered_sincos(numt,sinx,cosx,sinx1,cosx1,m0); calc_dotprod(numt,sinx1,cosx1,wth1,&st,&ct,&cst); psd[j] = lomb_scargle(numt,cn,sinx1,cosx1,st,ct,cst); update_sincos (numt, wpi, wpr, sinx, cosx); } """ weave.inline(lomb_code, ['cn','numt','numf','psd','wpi','wpr','sinx','cosx','m0','wth1'],\ support_code = lomb_scargle_support,force=0) return 0.5 * psd / vcn, lvar, ltau, vcn
from scipy import optimize def f1(x, a, b): return x[0]**2 - a * x[0] + x[1]**2 - b * x[1] x0 = [0, 0] a = 2 b = 4 xopt = optimize.fmin(f1, x0, xtol=1e-8, args=( a, b, )) print(xopt)
def joint_problem(r_max, r_min, bailout_cost, deadweight_cost, discount_rate, nr_of_periods): # Joint problem: after each stage Bank promises pi <= pi_state, but recalculates it's value opt_pi_bank_joint = [0] * nr_of_periods opt_pi_state_joint = [0] * nr_of_periods v_bank_joint = [0] * nr_of_periods v_state_joint = [0] * nr_of_periods # n=0 f_opt_bank_0 = lambda x: -x * (r_min + (r_max - r_min) * (1 - x)) / (1 - discount_rate * x) f_state_0 = lambda x: math.pow( -deadweight_cost + bailout_cost + discount_rate * deadweight_cost * (1 - x) / (1 - discount_rate * x), 2) opt_pi_bank_joint[0] = optimize.fmin( f_opt_bank_0, 0.5) # optimization using: Simplex method: the Nelder-Mead opt_pi_state_joint[0] = optimize.root(f_state_0, 0.5, method='lm').x # Bank "promises" the threshold pi in order to be saved at period n=1 even it it's optimal pi is lower: if opt_pi_bank_joint[0] < opt_pi_state_joint[0]: opt_pi_bank_joint[0] = opt_pi_state_joint[0] v_bank_joint[0] = -f_opt_bank_0(opt_pi_bank_joint[0]) v_state_joint[0] = deadweight_cost * (1 - opt_pi_bank_joint[0]) / ( 1 - discount_rate * opt_pi_bank_joint[0]) # Previous periods: f_bank = lambda x, y: -((x * (r_min + (r_max - r_min) * (1 - x)) + (1 - x) * (bailout_cost + discount_rate * y)) / (1 - discount_rate * x)) f_state = lambda x,y: math.pow(- deadweight_cost + bailout_cost + discount_rate * (bailout_cost + discount_rate * y) * \ (1 - x) / (1 - discount_rate * x), 2) # Options = optimset('TolFun', 1e-15): for i in range(1, nr_of_periods): f_opt_bank = lambda x: f_bank(x, v_bank_joint[i - 1]) f_state_opt = lambda x: f_state(x, v_state_joint[i - 1]) opt_pi_bank_joint[i] = optimize.fmin(f_opt_bank, 0.7) opt_pi_state_joint[i] = optimize.root(f_state_opt, 0.5, method='lm').x # BANKS's PROBABILITES CHANGED IF THEY ALE LOWER THAN PI_STATE AND THEN THEY ARE TAKEN FOR V's: if opt_pi_bank_joint[i] < opt_pi_state_joint[i]: opt_pi_bank_joint[i] = opt_pi_state_joint[i] v_bank_joint[i] = -f_opt_bank(opt_pi_bank_joint[i]) v_state_joint[i] = (bailout_cost + discount_rate * v_state_joint[i-1]) * (1 - opt_pi_bank_joint[i]) /\ (1 - discount_rate * opt_pi_bank_joint[i]) nr_per = [0] * nr_of_periods for i in range(0, nr_of_periods): nr_per[i] = i # Create plots and pdf file. Save pdf file into graphics folder fig8, ax = pyplot.subplots() ax.plot(nr_per, flipud(opt_pi_bank_joint), label='Bank', color='red', linestyle=':') ax.plot(nr_per, flipud(opt_pi_state_joint), label='State', color='blue', linestyle='--') ax.set_xlabel('Stage of the Game') ax.set_ylabel('Probability') ax.set_title('Optimal probabilities in joint game') ax.legend(loc='lower left', frameon=True, fontsize='medium', title='', fancybox=True) html_text_fig8 = mpld3.fig_to_html(fig8) pyplot.savefig( 'tbtf/modelfinitebounded/graphics/FB_Optimal_Probabilities_in_Joint_Game.pdf', format='pdf') pyplot.close(fig8) fig9, bx = pyplot.subplots() bx.plot(nr_per, flipud(v_state_joint), color='red') bx.set_title('State value in the joint game') bx.legend(loc='lower left', frameon=True, fontsize='medium', title='', fancybox=True) html_text_fig9 = mpld3.fig_to_html(fig9) pyplot.savefig( 'tbtf/modelfinitebounded/graphics/FB_State_Value_in_the_Joint_Game.pdf', format='pdf') pyplot.close(fig9) return html_text_fig8, html_text_fig9
# Simulate with shocks # Simulate with no shocks to find SS ndays = 100000 T = ndays*q # number of periods to simulate #epshist = sig*np.random.normal(0., 1., T+1) epshist = sig*pkl.load(open('epshist.pkl', 'rb')) inparams =np.array([xi, sig]) extraparams = (q, nu_S, nu_W, kappa, muW, muS, lambd, chiS, eta, mu, \ gamma, yvect, rho) f = lambda inparams: SMM(inparams, T, epshist, extraparams) from scipy.optimize import fmin soln, junk = fmin(f, inparams, xtol=0.001, ftol=0.001, retall=1) # soln, junk = fmin_powell(f, soln, xtol=0.001, ftol=0.001, retall=1) xi = soln[0] sig = soln[1] mparams = (q, nu_S, nu_W, kappa, muW, muS, lambd, chiS, eta, xi, mu, \ gamma, yvect, rho, sig) Hhist, yhist, Ahist, Chist, bhist, Uhist, zhist = runsim(T, epshist, mparams) HrsSlept = np.zeros(ndays) for d in range(0,ndays): HrsSlept[d] = np.sum(Ahist[d:d+q-1])/pph HrsMean = np.mean(HrsSlept)
def banks_problem_analytical_results(r_max, r_min, bailout_cost, discount_rate, nr_of_periods, R): # Bank's problem: # bank's problem at the last stage: max(pi) pi*R(pi)/(1-delta*pi) f_opt_bank_0 = lambda x: -x * (r_min + (r_max - r_min) * (1 - x)) / (1 - discount_rate * x) # Analytical results: # optimal value for period 0 opt_pi_bank = [0] * nr_of_periods v_bank = [0] * nr_of_periods opt_pi_bank[0] = optimize.fmin( f_opt_bank_0, 0.5) # optimization using: Simplex method: the Nelder-Mead v_bank[0] = -f_opt_bank_0(opt_pi_bank[0]) # bank problem in the rest of the periods, eq.(28) f_bank = lambda x, y: -((x * (r_min + (r_max - r_min) * (1 - x)) + (1 - x) * (bailout_cost + discount_rate * y)) / (1 - discount_rate * x)) for k in range(1, nr_of_periods): f_opt_bank_0 = lambda x: f_bank(x, v_bank[k - 1]) opt_pi_bank[k] = optimize.fmin(f_opt_bank_0, 0.7) v_bank[k] = -f_opt_bank_0(opt_pi_bank[k]) # Analytical results: # check that analytical solutions coinside with optimal values: pi_bank_analytical = [0] * nr_of_periods v_bank_analytical = [0] * nr_of_periods temp_v_vheck = [0] * nr_of_periods pi_bank_analytical[0] = (1 - math.sqrt(1 - r_max * discount_rate / (r_max - r_min))) / discount_rate v_bank_analytical[0] = pi_bank_analytical[0] * R( pi_bank_analytical[0]) / (1 - discount_rate * pi_bank_analytical[0]) for i in range(1, nr_of_periods): pi_bank_analytical[i] = (1 - math.sqrt( 1 - discount_rate * (r_max - (bailout_cost + discount_rate * v_bank_analytical[i - 1]) * (1 - discount_rate)) / (r_max - r_min))) / discount_rate v_bank_analytical[i] = ( pi_bank_analytical[i] * R(pi_bank_analytical[i]) + (1 - pi_bank_analytical[i]) * (bailout_cost + discount_rate * v_bank_analytical[i - 1])) / ( 1 - discount_rate * pi_bank_analytical[i]) nr_per = [0] * nr_of_periods for i in range(0, nr_of_periods): nr_per[i] = i # Create plots and pdf file. Save pdf file into graphics folder fig1, ax = pyplot.subplots() ax.plot(nr_per, flipud(opt_pi_bank), label='Machine Results', color='red', linestyle=':') ax.plot(nr_per, flipud(pi_bank_analytical), label='Analytical Results', color='blue', linestyle='--') ax.set_xlabel('Number of Periods') ax.set_ylabel('Probabilities') ax.set_title('Optimal probabilities for the bank') ax.legend(loc='upper left', frameon=0, fontsize='medium', title='', fancybox=True) html_text_fig1 = mpld3.fig_to_html(fig1) pyplot.savefig( 'tbtf/modelfinitebounded/graphics/FB_Optimal_Probabilities_for_the_Bank.pdf', format='pdf') pyplot.close(fig1) return html_text_fig1, opt_pi_bank, pi_bank_analytical
yerr = 2 * yerr / (max(y) - min(y)) y = 2 * y / (max(y) - min(y)) y = y - np.median(y) # # generate fake data # pars = [1., 0., .5, .5, 3] # yerr = np.ones_like(y)*0.01 # y = simple_s_data(x, yerr, pars) # # median normalise # yerr /= np.median(y) # y = y/np.median(y) -1 theta_init = [0.1, .3, .6, .4, 6.5] # better parameterisation # optimise hyperparameters to find right ball-park print "optimising.." ml_theta = so.fmin(neglnlike, theta_init, args=(x, y, yerr)) print 'max likelihood theta = ', ml_theta # initial hyperparameters (logarithmic) # A, P, l2 (sin), l1 (exp) # theta = pars # same as pars # theta = [0., 0, .5, .5, 0.] # inital try theta = [0., .2, .5, .5, 6.2] # better parameterisation theta = ml_theta # plot data pl.clf() pl.errorbar(x, y, yerr=yerr, fmt='k.') xs = np.linspace(min(x), max(x), 500) pl.plot(xs, predict(xs, x, y, yerr, theta)[0], 'r-') pl.xlabel('time (days)')