def light_params(im,pts): params = []; im_cut = im[pts[1]:pts[1]+31,pts[0]:pts[0]+31] h,w = im_cut.shape y,x = np.where(im_cut<=250) datapts = np.vstack((x,y)).T data = im_cut[datapts[:,1],datapts[:,0]] args=[data,x,y] x0 = [1000.,0.02,0.02,0,20,16,16] #initial k = optimize.fmin_powell(chisq,x0,args) min_fval = chisq(list(k),*args) min_fval_params =k niter=3 seed=314 np.random.seed(seed) #sol = np.array(k) for i in np.arange(niter): x0 = k*(0.6*np.random.rand()+0.7) k = optimize.fmin_powell(chisq,x0,args=[data,x,y]) if min_fval > chisq(list(k),*args): min_fval = chisq(list(k),*args) min_fval_params =k #sol = np.vstack([sol,k]) return min_fval_params
def __init__(self, ifit, iom0, isig, p0, valueAtUnity=0.05): """ find out the fitting parameters """ # fetch matsubara axis xfit = iom0[0:ifit] # imag part for self-energy function yfit_i = array(isig)[0:ifit,1] # real part for self-energy function yfit_r = array(isig)[0:ifit,0] # fit the real part and imag part respectively self.expan_i = optimize.fmin_powell(fparab, [0,0,0], args=(xfit,yfit_i), disp=0) self.expan_r = optimize.fmin_powell(fparab, [0,0,0], args=(xfit,yfit_r), disp=0) self.expan = self.expan_r.tolist() + self.expan_i.tolist() # fitting parameters self.a0 = abs(self.expan_i[0]) self.a1 = self.expan_r[1] self.a2 = abs(self.expan_i[2]) # parabola would otherwise becomes negative at some point if abs(self.a1) > sqrt(2*self.a0*self.a2): self.a1 = sqrt(2*self.a0*self.a2)*sign(self.a1) p0n = ((self.a0 + self.a1 + 0.5*self.a2) / valueAtUnity - 1) * (2/self.a2)**4 # function should be smaller than valueAtUnity at x=1.0 if p0 > p0n: self.p0 = p0 else: self.p0 = p0n # normalization for the function self.pf = sqrt(2.)*self.a2**2*self.p0**(3/4.) / (2*pi*(2+self.a0*self.a2*sqrt(self.p0)))
def _partial_optimize(self, optimize_nodes, evaluate_nodes, fall_to_simplex): """Optimize part of the model. :Arguments: nodes : iterable list nodes to optimize. """ non_observeds = filter(lambda x: not x.observed, optimize_nodes) init_vals = [node.value for node in non_observeds] # define function to be optimized def opt(values): for value, node in zip(values, optimize_nodes): node.value = value try: logp_optimize = [node.logp for node in optimize_nodes] logp_evaluate = [node.logp for node in evaluate_nodes] return -np.sum(logp_optimize) - np.sum(logp_evaluate) except pm.ZeroProbability: return np.inf #optimize try: fmin_powell(opt, init_vals) except Exception as e: if fall_to_simplex: print "Warning: Powell optimization failed. Falling back to simplex." fmin(opt, init_vals) else: raise e
def light_params(im,pts): params = []; for idx in np.arange(len(pts)): im_cut = im[pts[idx][1]:pts[idx][1]+31,pts[idx][0]:pts[idx][0]+31] h,w = im_cut.shape y,x = np.where(im_cut<=250) datapts = np.vstack((x,y)).T data = im_cut[datapts[:,1],datapts[:,0]] x0 = [1000.,0.02,0.02,0,20,16,16] #initial args=[data,x,y] k = optimize.fmin_powell(chisq,x0,args) min_fval = chisq(list(k),*args) min_fval_params =k niter=3 seed=314 np.random.seed(seed) for i in np.arange(niter): x0 = k*(0.6*np.random.rand()+0.7) k = optimize.fmin_powell(chisq,x0,args=[data,x,y]) if min_fval > chisq(list(k),*args): min_fval = chisq(list(k),*args) min_fval_params =k params.append(min_fval_params) return params
def fit_doms_imgs(doms,imgs,aspect=1): def cost(v): v=np.concatenate( (v,[aspect])) projs=fwd7(v)(doms[:,0]+1j*doms[:,1]) err=np.abs(imgs[:,0]+1j*imgs[:,1] - projs) return (err**2).sum() def cost2(v): projs=fwd7(v)(doms[:,0]+1j*doms[:,1]) err=np.abs(imgs[:,0]+1j*imgs[:,1] - projs) return (err**2).sum() v0=np.array( [-1,-1,1,0,1,0,1] ) # seems like it needs to fit without aspect ratio first # maybe. # but if we fit without aspect ratio first, then have to # be sure that at least the sign is correct (i.e. dom and # img are both right-handed coordinate systems) # with 3 nodes, there can be multiple exact answers. # with 4 nodes, it came close but wasn't exact. print("Starting f: ",cost(v0[:6])) vopt=fmin_powell(cost,v0[:6]) print("midway f: ",cost(vopt)) vopt=np.concatenate( (vopt,[aspect]) ) vopt=fmin_powell(cost2,vopt) print("final f: ",cost2(vopt)) print("optimized parameters: ",vopt) return vopt
def update_single_shift((p0, psf_model, datum, dq, ref_shift, parms)): """ Update a single shift """ # fmin or fmin_powell seems to perform better than # fmin_bfgs or fmin_l_bfgs_b. Powell seems to be as # good as fmin, and quicker. res = fmin_powell(shift_loss, p0, full_output=True, disp=False, args=(psf_model, datum, dq, ref_shift, parms)) # if hits shift min/max shift = res[0].copy() ind = np.abs(shift) > parms.shift_test_thresh if np.any(ind): # flip sign to see if it likes other side new_p0 = shift new_p0[ind] *= -1. new = fmin_powell(shift_loss, new_p0, full_output=True, disp=False, args=(psf_model, datum, dq, np.zeros((1, 2)), parms)) if (new[1] < res[1]): ref_shift = np.array([new[0].copy()]) # in case maxiter > 1 return new else: return res return res
def make_and_fit_one_model(model, log2NK, log2K, iteration): log2N = log2NK - log2K dirname = "./{:02d}".format(iteration) mkdir_p(dirname) prefix = "{:02d}_{:02d}_{:02d}".format(log2N,log2K,iteration) picklefn = dirname + "/model_" + prefix + ".pkl" if log2N < 2: print(prefix, "is an absurd case; skipping") return None # don't do absurd cases if len(glob.glob(picklefn)) > 0: print(prefix, "already exists; skipping") return None # noclobber print("starting run", prefix) # lay down a "lock file" pickle_to_file(picklefn, 0.) # make fake data if iteration == 0: np.random.seed(23) # for testing purposes: identical P_n else: np.random.seed(3**log2N + 5**log2K + 2**iteration) # repeatability data = make_fake_data(N=2**log2N, K=2**log2K) model.set_data(data) # initialize empirically empvar = np.mean(data * data) x0 = np.log(1. / np.array([1.1 * empvar, empvar, 0.9 * empvar])) x0 = np.sort(x0) # optimize direc = np.array([[1., 1., 1.], [1., 0., -1.], [-1., 2., -1.]]) / 10. def bar(x): print(prefix, x, np.exp(-x)) x1 = op.fmin_powell(model, x0, callback=bar, direc=direc, xtol=1.e-3, ftol=1.e-5) x1 = np.sort(x1) x2 = op.fmin_powell(model, x1, callback=bar, direc=direc, xtol=1.e-5, ftol=1.e-5) x2 = np.sort(x2) # check size of P sampling sixf = np.zeros(6) sixf[0] = model(x2[[0,1,2]]) sixf[1] = model(x2[[1,2,0]]) sixf[2] = model(x2[[2,0,1]]) sixf[3] = model(x2[[2,1,0]]) sixf[4] = model(x2[[1,0,2]]) sixf[5] = model(x2[[0,2,1]]) # save model.set_ivar_from_vector(x2) # restore final answer pickle_to_file(picklefn, (model, x0, x1, x2, sixf)) print(prefix, "start", x0, np.exp(-x0), model(x0)) print(prefix, "middle", x1, np.exp(-x1), model(x1)) print(prefix, "end", x2, np.exp(-x2), model(x2)) print(prefix, "badness of the sampling:", np.std(sixf) / np.sqrt(2 ** log2N)) return None
def powell(x0, f, f_prime, hessian=None): all_x_i = [x0[0]] all_y_i = [x0[1]] all_f_i = [f(x0)] def store(X): x, y = X all_x_i.append(x) all_y_i.append(y) all_f_i.append(f(X)) optimize.fmin_powell(f, x0, callback=store, ftol=1e-12) return all_x_i, all_y_i, all_f_i
def simulation_experiment(): data,bvals,bvecs=get_data('101_32') S,sticks=simulations_dipy(bvals,bvecs,angles=[(0,0)],fractions=[0]) xopt=fmin_powell(call,[10],(S,100,bvals,bvecs)) print np.round(xopt,2) S,sticks=simulations_dipy(bvals,bvecs,angles=[(90,0)],fractions=[60]) xopt=fmin_powell(call,[0,90,0,20],(S,100,bvals,bvecs)) print np.round(xopt,2) S,sticks=simulations_dipy(bvals,bvecs,angles=[(0,0),(90,0)],fractions=[50,50]) xopt=fmin_powell(call,[20,10,10,90,0,20,30],(S,100,bvals,bvecs)) print np.round(xopt,2) S,sticks=simulations_dipy(bvals,bvecs,angles=[(0,0),(90,0),(90,90)],fractions=[33,33,33]) xopt=fmin_powell(call,[20,10,0,90,0,45,50,20,30,20],(S,100,bvals,bvecs)) print np.round(xopt,2)
def fit(self, y_pred, y_true): self._data = np.c_[y_pred, y_pred, y_true[None].T] if self._all_fit: self._offset = fmin_powell(self._offset_qwk_score, self._offset) else: for j in range(9): flg = self._data[:, 0].astype(int) == j self._data[flg, 1] = self._data[flg, 0] + self._offset[j] for j in range(9): train_func = lambda x: self._score_offset(x, j) self._offset[j] = fmin_powell(train_func, self._offset[j]) print(self._offset) return self
def register(A,B,guess,metric='sd',binA=None,binB=None,xtol=0.1,ftol=0.01,order=0,map_type='affine2d'): ''' Register source A to target B using modified powell's method Powell's method tries to minimize the objective function ''' if metric=='mi': finalT=fmin_powell(objective_mi,x0=guess,args=(A,B,binA,binB,order,map_type),xtol=xtol,ftol=ftol) #finalT=leastsq(func=objective_mi,x0=np.array(guess),args=(A,B,binA,binB,order,map_type)) if metric=='sd': finalT=fmin_powell(objective_sd,x0=guess,args=(A,B,order,map_type),xtol=xtol,ftol=ftol) #finalT=leastsq(func=objective_sd,x0=np.array(guess),args=(A,B,order,map_type)) return finalT
def PRFfits(args): # start time proctime = time.time() # extract image from the time series xdim = shape(args[6])[1] ydim = shape(args[6])[0] DATimg = numpy.empty((ydim,xdim)) DATerr = numpy.empty((ydim,xdim)) n = 0 for i in range(ydim): for j in range(xdim): DATimg[i,j] = args[0][n] DATerr[i,j] = args[1][n] n += 1 # minimize data and model if args[14] and args[15]: argm = (args[2],args[3],DATimg,DATerr,args[4],args[5],args[6],args[7],args[10],args[18],args[19]) ans = fmin_powell(kepfunc.PRFwithFocusAndBackground,args[11],args=argm,xtol=args[12], ftol=args[13],disp=False) elif args[14] and not args[15]: argm = (args[2],args[3],DATimg,DATerr,args[4],args[10],args[18],args[19]) ans = fmin_powell(kepfunc.PRFwithFocus,args[11],args=argm,xtol=args[12], ftol=args[13],disp=False) elif args[15] and not args[14]: argm = (args[2],args[3],DATimg,DATerr,args[4],args[5],args[6],args[7],args[10],args[18],args[19]) ans = fmin_powell(kepfunc.PRFwithBackground,args[11],args=argm,xtol=args[12], ftol=args[13],disp=False) else: argm = (args[2],args[3],DATimg,DATerr,args[4],args[10],args[18],args[19]) ans = fmin_powell(kepfunc.PRF,args[11],args=argm,xtol=args[12], ftol=args[13],disp=False) # print progress if args[20]: txt = '\r%3d%% ' % ((float(args[16]) + 1.0) / float(args[17]) * 100.0) txt += 'nrow = %d ' % (args[16]+1) txt += 't = %.1f sec' % (time.time() - proctime) txt += ' ' * 5 sys.stdout.write(txt) sys.stdout.flush() return ans
def call_fmin(self, xtol, ftol, steps): opt.fmin_powell( self.f, self.x0(), # args=(), xtol=xtol, ftol=ftol, maxiter=steps, # maxfun=None, # full_output=1, disp=0, # retall=0, callback=self.callback, direc=self.direc, )
def testMaxent(self): k = 2 data = csb.io.load(self.data_fn) model = MaxentModel(k) model.sample_weights() posterior = MaxentPosterior(model, data[:100000] / 180. * numpy.pi) model.get() * 1. x0 = posterior.model.get().flatten() target = lambda w:-posterior(w, n=50) x = fmin_powell(target, x0, disp=False) self.assertTrue(x != None) self.assertTrue(len(x) == k * k * 4) posterior.model.set(x) posterior.model.normalize(True) xx = numpy.linspace(0 , 2 * numpy.pi, 500) fx = posterior.model.log_prob(xx, xx) self.assertAlmostEqual(posterior.model.log_z(integration='simpson'), posterior.model.log_z(integration='trapezoidal'), places=2) self.assertTrue(fx != None) z = numpy.exp(log_sum_exp(numpy.ravel(fx))) self.assertAlmostEqual(z * xx[1] ** 2, 1., places=1)
def reciprocal_lattice(self,marker,return_residual=False): phase_ramp = lambda g: 2.*np.pi*(np.dot(g,[x,y])) def residual(parameters): return np.sum((reference_phase-phase_ramp(parameters[:2])-parameters[2])**2.) reference_phase = self.reference_phase(marker) reference_shape = reference_phase.shape reference_phase = reference_phase.ravel() x,y=np.mgrid[0:reference_shape[0],0:reference_shape[1]] x,y = x.ravel(),y.ravel() initial = np.array(marker.coord).astype(float)/self.fft_image.shape-.5 initial = np.hstack((initial,[0])) optim = fmin_powell(residual,initial,disp=False) if return_residual: optim_phase_ramp = np.reshape(phase_ramp(optim[:2])+optim[2],reference_shape) residual_phase_ramp = np.reshape(reference_phase,reference_shape)-optim_phase_ramp return residual_phase_ramp, optim[:2] else: return optim[:2]
def find_bounds(sizes): """ Use an optimizer to find the bounds. """ bounds = 100.0 * initial_bounds().flatten() norm_sizes = 100.0 * sizes / sizes.sum() def func(x): bounds = x.reshape(4, 2, 2) areas = calc_areas(bounds) error = calc_error(areas, norm_sizes) # add terms to keep edges of sets away from each other separation = calc_separation(bounds) squareness = calc_squareness(bounds) # print error, separation, squareness return error + separation / 10.0 + squareness / 50.0 # xopt, fopt, iter, funcalls, warnflag = O.fmin(func, bounds, full_output=True) # xopt, fopt, gopt, Bopt, func_calls, grad_calls, warnflag = O.fmin_bfgs(func, bounds, full_output=True) # xopt, fopt, func_calls, grad_calls, warnflag = O.fmin_cg(func, bounds, full_output=True) xopt, fopt, direct, iter, funcalls, warnflag = O.fmin_powell(func, bounds, full_output=True) if 1 == warnflag: warnings.warn("Maximum number of function evaluations made.") elif 2 == warnflag: warnings.warn("Maximum number of iterations reached.") return xopt.reshape(4, 2, 2) / 10.0
def optimize_FullML(tree, model, paramsDict, initialGuessDict): initialGuessList = [ initialGuessDict['<e>'] ] #Need to provide an initial guess for e, the extinction rate for key in paramsDict.keys(): initialGuessList.append( initialGuessDict[ key ] ) initialGuessList = scipy.log(initialGuessList) #v = optimize.fmin_bfgs( likelihood_FullML, v = optimize.fmin_powell( likelihood_FullML, initialGuessList, args = (model, tree, paramsDict), full_output=True, disp=1, callback=None) solution = scipy.exp(v[0]) nLogL = v[1] bestEst = OrderedDict() bestEst['nLogL'] = nLogL bestEst['<e>'] = solution[0] for i, key in enumerate(paramsDict.keys()): bestEst[key] = solution[i+1] # i+1 because paramsDict does not # include the parameter 'e', but # solutions does contain an estimate # for 'e' in its zeroth element return bestEst
def estimate(self, hyp0, covfunc, X, y, optimizer='cg'): """ Function to estimate the model """ if optimizer.lower() == 'cg': # conjugate gradients out = optimize.fmin_cg(self.loglik, hyp0, self.dloglik, (covfunc, X, y), disp=True, gtol=self.tol, maxiter=self.n_iter, full_output=1) elif optimizer.lower() == 'powell': # Powell's method out = optimize.fmin_powell(self.loglik, hyp0, (covfunc, X, y), full_output=1) else: raise ValueError("unknown optimizer") self.hyp = out[0] self.nlZ = out[1] self.optimizer = optimizer return self.hyp
def test_joint_skew_kurt(self, skew0, kurt0, return_weights=False): """ Returns - 2 x log-likelihood and the p-value for the joint hypothesis test for skewness and kurtosis Parameters ---------- skew0 : float Skewness value to be tested kurt0 : float Kurtosis value to be tested return_weights : bool If True, function also returns the weights that maximize the likelihood ratio. Default is False. Returns ------- test_results : tuple The log-likelihood ratio and p-value of the joint hypothesis test. """ self.skew0 = skew0 self.kurt0 = kurt0 start_nuisance = np.array([self.endog.mean(), self.endog.var()]) llr = optimize.fmin_powell(self._opt_skew_kurt, start_nuisance, full_output=1, disp=0)[1] p_val = chi2.sf(llr, 2) if return_weights: return llr, p_val, self.new_weights.T return llr, p_val
def _doFit(self): #get some useful variables to help choose starting fit vals self.params = optimize.fmin_powell(self._getErr, self.params, (self.xx,self.yy,self.sems),disp=self.display) # self.params = optimize.fmin_bfgs(self._getErr, self.params, None, (self.xx,self.yy,self.sems),disp=self.display) self.ssq = self._getErr(self.params, self.xx, self.yy, 1.0) self.chi = self._getErr(self.params, self.xx, self.yy, self.sems) self.rms = self.ssq/len(self.xx)
def optcmltv(lcs, spline, verbose=True): """ I will optimize the coefficients of the spline so to minimize the mltv. I do not use the microlensing of the lcs at all ! Simple powell optimization, slow. A pity. Add BOK and time shifts in there and it might be bingo ! Would be more efficient if we add knots on the fly """ inic = spline.getc(m=2) def setc(c): spline.setc(c, m=2) def errorfct(c): setc(c) (tv, dist) = mltv(lcs, spline, weight=False) print "put weight" return tv + 0.1 * spline.tv() minout = spopt.fmin_powell(errorfct, inic, full_output=1, disp=verbose) copt = minout[0] # We find a common shift to all coeffs so that the level matches meanc = np.mean(spline.getc(m=2)) meanmag = np.mean(np.concatenate([l.getmags(noml=True) for l in lcs])) setc(copt) spline.c += meanmag - meanc
def optcflat(self, verbose=False): """ Optimizes only the "border coeffs" so to get zero slope at the extrema Run optc() first ... This has to be done with an iterative optimizer """ full = self.getc(m=1) inip = self.getc(m=1)[[0, 1, -2, -1]] # 4 coeffs def setp(p): full[[0, 1, -2, -1]] = p self.setcflat(full) if verbose: print "Starting flat coeff optimization ..." print "Initial pars : ", inip def errorfct(p): setp(p) return self.r2( nostab=False) # To get the same as optc would return ! minout = spopt.fmin_powell(errorfct, inip, full_output=1, disp=verbose) popt = minout[0] if popt.shape == (): popt = np.array([popt]) if verbose: print "Optimal pars : ", popt setp(popt) return self.r2( nostab=False) # We include the stab points, like optc does.
def _quadfit_onewave(spec,specerr,*args,**kwargs): """Do a quadratic fit to one wavelength""" # Initialize the fit initscatter= numpy.var(spec)-numpy.median(specerr)**2. if initscatter < 0.: initscatter= numpy.std(spec) else: initscatter= numpy.sqrt(initscatter) initscatter= numpy.log(initscatter) # fit as log # Setup the matrices vstackIn= (numpy.ones(len(spec)),) # Linear components for ii in range(len(args)): vstackIn= vstackIn+(args[ii],) # Quadratic components for ii in range(len(args)): for jj in range(ii,len(args)): vstackIn= vstackIn+(args[ii]*args[jj],) labelA= numpy.vstack(vstackIn).T outscatter=\ numpy.exp(optimize.fmin_powell(_quadfit_scatter_mloglike,initscatter, args=(spec,specerr,labelA,args), disp=False)) out= (_polyfit_coeffs(spec,specerr,outscatter,labelA),outscatter,) if kwargs.get('return_residuals',False): out= out+(_quadfit_residuals_onewave(out[0],spec,*args),) return out
def _fit_powell(f, score, start_params, fargs, kwargs, disp=True, maxiter=100, callback=None, retall=False, full_output=True, hess=None): xtol = kwargs.setdefault('xtol', 0.0001) ftol = kwargs.setdefault('ftol', 0.0001) maxfun = kwargs.setdefault('maxfun', None) start_direc = kwargs.setdefault('start_direc', None) retvals = optimize.fmin_powell(f, start_params, args=fargs, xtol=xtol, ftol=ftol, maxiter=maxiter, maxfun=maxfun, full_output=full_output, disp=disp, retall=retall, callback=callback, direc=start_direc) if full_output: if not retall: xopt, fopt, direc, niter, fcalls, warnflag = retvals else: xopt, fopt, direc, niter, fcalls, warnflag, allvecs =\ retvals converged = not warnflag retvals = {'fopt': fopt, 'direc': direc, 'iterations': niter, 'fcalls': fcalls, 'warnflag': warnflag, 'converged': converged} if retall: retvals.update({'allvecs': allvecs}) else: xopt = retvals retvals = None return xopt, retvals
def __solver__(self, p): def iterfcn(x): p.xk, p.fk = x, p.f(x) p.iterfcn() iter = p.iter - 1 if p.istop: raise isSolved try: iterfcn(p.x0) xf = fmin_powell(p.f, p.x0, xtol=p.xtol, ftol=p.ftol, disp=0, maxiter=p.maxIter, maxfun=p.maxFunEvals, callback=iterfcn) except isSolved: xf = p.xk ff = p.f(p.xk) p.xk = p.xf = xf p.fk = p.ff = ff p.istop = 1000 p.iterfcn()
def single_fit(z, zmin=0, zmax=3, p0=[1.0], func=negsech2): fitnum = len(z) if fitnum > 100: res = opt.fmin_powell(func, p0, args=(z, zmin, zmax)) return res, fitnum else: return float('NaN'), float('Nan'), fitnum
def test_powell(self): """ Powell (direction set) optimization routine """ retval = optimize.fmin_powell(self.func, self.startparams, \ args=(), maxiter=self.maxiter, \ full_output=True, disp=False, retall=False) (params, fopt, direc, numiter, func_calls, warnflag) = retval err = abs(self.func(params) - self.func(self.solution)) #print "Powell: Difference is: " + str(err) assert_(err < 1e-6) # Ensure that function call counts are 'known good'; these are from # Scipy 0.7.0. Don't allow them to increase. # # However, some leeway must be added: the exact evaluation # count is sensitive to numerical error, and floating-point # computations are not bit-for-bit reproducible across # machines, and when using e.g. MKL, data alignment # etc. affect the rounding error. # assert_(self.funccalls <= 116 + 20, self.funccalls) assert_(self.gradcalls == 0, self.gradcalls) # Ensure that the function behaves the same; this is from Scipy 0.7.0 assert_(np.allclose(self.trace[34:39], [[ 0.72949016, -0.44156936, 0.47100962], [ 0.72949016, -0.44156936, 0.48052496], [ 1.45898031, -0.88313872, 0.95153458], [ 0.72949016, -0.44156936, 0.47576729], [ 1.72949016, -0.44156936, 0.47576729]], atol=1e-14, rtol=1e-7), self.trace[34:39])
def _predict(self): train = self.train test = self.test param = self.param num_rounds = self.num_rounds num_classes = 8 all_test_preds = '' dtrain = xgb.DMatrix(train.drop(['Id', 'Response'], axis=1), train['Response'].values) dtest = xgb.DMatrix(test.drop(['Id', 'Response'], axis=1), label=self.test['Response'].values) model = xgb.train(param, dtrain, num_rounds) train_preds = model.predict(dtrain, ntree_limit=model.best_iteration) print('Train score is:', self._eval_wrapper(train_preds, train['Response'])) test_preds = model.predict(dtest, ntree_limit=model.best_iteration) train_preds = np.clip(train_preds, -0.99, 8.99) test_preds = np.clip(test_preds, -0.99, 8.99) offsets = np.ones(num_classes) * -0.5 offset_train_preds = np.vstack( (train_preds, train_preds, train['Response'].values)) for j in range(num_classes): train_offset = lambda x: -self._apply_offset( offset_train_preds, x, j) offsets[j] = fmin_powell(train_offset, offsets[j]) data = np.vstack((test_preds, test_preds, test['Response'].values)) for j in range(num_classes): data[1, data[0].astype(int) == j] = data[0, data[0].astype(int) == j] + offsets[j] final_test_preds = np.round(np.clip(data[1], 1, 8)).astype(int) return final_test_preds
def _doFit(self): # get some useful variables to help choose starting fit vals self.params = optimize.fmin_powell(self._getErr, self.params, (self.xx, self.yy, self.sems), disp=self.display) # self.params = optimize.fmin_bfgs(self._getErr, self.params, None, (self.xx,self.yy,self.sems),disp=self.display) self.ssq = self._getErr(self.params, self.xx, self.yy, 1.0) self.chi = self._getErr(self.params, self.xx, self.yy, self.sems) self.rms = self.ssq / len(self.xx)
def __call__(self, x, y, sigma=None, npt=40, k=3): """Function like class instance... :param float[:] x: coordinates along the horizontal axis :param float[:] y: coordinates along the vertical axis :param float[:] sigma: error along the vertical axis :param int npt: number of points of the fitting spline :param int k: order of the fitted spline. :return: the background for y :rtype: float[:] Nota: Due to spline function, one needs: npt >= k + 1 """ if sigma is None: # assume sigma=sqrt(yobs) ! w_obs = 1.0 / numpy.sqrt(y) else: w_obs = 1.0 / sigma # deal with 0-variance points mask = numpy.logical_not(numpy.isnan(w_obs)) x_obs = x[mask] y_obs = y[mask] w_obs = w_obs[mask] x0 = numpy.linspace(x.min(), x.max(), npt) y0 = numpy.zeros(npt) + y_obs.mean() # Minimize y1 = optimize.fmin_powell(self.func_min, y0, args=(x_obs, y_obs, w_obs, x0, k), disp=False) # Result y_calc = UnivariateSpline(x0, y1, s=0, k=k)(x) return y_calc
def minimize(func, x0, method='CG', options=None, jac=None, callback=None): method = method.lower() if 'disp' in options: disp = options['disp'] else: disp = False if 'maxiter' in options: maxiter = options['maxiter'] else: maxiter = None if method == 'nelder-mead': x = fmin(func=func, x0=x0, disp=disp, maxiter=maxiter, callback=callback) elif method == 'powell': x = fmin_powell(func=func, x0=x0, disp=disp, maxiter=maxiter, callback=callback) elif method == 'cg': x = fmin_cg(f=func, x0=x0, fprime=jac, disp=disp, maxiter=maxiter, callback=callback) elif method == 'bfgs': x = fmin_bfgs(f=func, x0=x0, fprime=jac, disp=disp, maxiter=maxiter, callback=callback) elif method == 'l-bfgs-b': d = ceil(1000000 / len(x0)) print(d) x, _, _ = fmin_l_bfgs_b(func=func, x0=x0, fprime=jac, disp=(d if disp else 0)) elif method == 'newton-cg': x = fmin_ncg(f=func, x0=x0, fprime=jac, disp=disp, maxiter=maxiter, callback=callback) class Result(object): def __init__(self, x): self.x = x return Result(x)
def refine_frequency(self, time, amplitude, guess, verbose=False): # set up to do do a mininzer fit to best freq p = ParamState( 't', 'y_true', a=1, b=1, f=guess, ) p.given(t=time, y_true=amplitude) def model(p): return (p.a * np.sin(2 * np.pi * p.f * p.t) + p.b * np.cos(2 * np.pi * p.f * p.t)) def cost(args, p): p.ingest(args) err = model(p) - p.y_true energy = np.sum(err**2) return energy x0 = p.array xf = fmin_powell(cost, x0, args=(p, ), disp=verbose) p.ingest(xf) if (p.f - guess) > 3**2: raise ValueError( f'Guess freq: {self.f0}, Fit Freq: {p.f} too far apart') return p.f
def _fit_powell(f, score, start_params, fargs, kwargs, disp=True, maxiter=100, callback=None, retall=False, full_output=True, hess=None): xtol = kwargs.setdefault('xtol', 0.0001) ftol = kwargs.setdefault('ftol', 0.0001) maxfun = kwargs.setdefault('maxfun', None) start_direc = kwargs.setdefault('start_direc', None) retvals = optimize.fmin_powell(f, start_params, args=fargs, xtol=xtol, ftol=ftol, maxiter=maxiter, maxfun=maxfun, full_output=full_output, disp=disp, retall=retall, callback=callback, direc=start_direc) if full_output: if not retall: xopt, fopt, direc, niter, fcalls, warnflag = retvals else: xopt, fopt, direc, niter, fcalls, warnflag, allvecs =\ retvals converged = not warnflag retvals = {'fopt': fopt, 'direc': direc, 'iterations': niter, 'fcalls': fcalls, 'warnflag': warnflag, 'converged': converged} if retall: retvals.update({'allvecs': allvecs}) else: xopt = None return xopt, retvals
def optcflat(self, verbose = False): """ Optimizes only the "border coeffs" so to get zero slope at the extrema Run optc() first ... This has to be done with an iterative optimizer """ full = self.getc(m=1) inip = self.getc(m=1)[[0, 1, -2, -1]] # 4 coeffs def setp(p): full[[0, 1, -2, -1]] = p self.setcflat(full) if verbose: print "Starting flat coeff optimization ..." print "Initial pars : ", inip def errorfct(p): setp(p) return self.r2(nostab=False) # To get the same as optc would return ! minout = spopt.fmin_powell(errorfct, inip, full_output=1, disp=verbose) popt = minout[0] if popt.shape == (): popt = np.array([popt]) if verbose: print "Optimal pars : ", popt setp(popt) return self.r2(nostab=False) # We include the stab points, like optc does.
def test_powell(self): """ Powell (direction set) optimization routine """ retval = optimize.fmin_powell(self.func, self.startparams, \ args=(), maxiter=self.maxiter, \ full_output=True, disp=False, retall=False) (params, fopt, direc, numiter, func_calls, warnflag) = retval err = abs(self.func(params) - self.func(self.solution)) #print "Powell: Difference is: " + str(err) assert_(err < 1e-6) # Ensure that function call counts are 'known good'; these are from # Scipy 0.7.0. Don't allow them to increase. assert_(self.funccalls == 116, self.funccalls) assert_(self.gradcalls == 0, self.gradcalls) # Ensure that the function behaves the same; this is from Scipy 0.7.0 assert_(np.allclose(self.trace[34:39], [[ 0.72949016, -0.44156936, 0.47100962], [ 0.72949016, -0.44156936, 0.48052496], [ 1.45898031, -0.88313872, 0.95153458], [ 0.72949016, -0.44156936, 0.47576729], [ 1.72949016, -0.44156936, 0.47576729]], atol=1e-14, rtol=1e-7), self.trace[34:39])
def optcmltv(lcs, spline, verbose=True): """ I will optimize the coefficients of the spline so to minimize the mltv. I do not use the microlensing of the lcs at all ! Simple powell optimization, slow. A pity. Add BOK and time shifts in there and it might be bingo ! Would be more efficient if we add knots on the fly """ inic = spline.getc(m=2) def setc(c): spline.setc(c, m=2) def errorfct(c): setc(c) (tv, dist) = mltv(lcs, spline, weight=False) print "put weight" return tv + 0.1*spline.tv() minout = spopt.fmin_powell(errorfct, inic, full_output=1, disp=verbose) copt = minout[0] # We find a common shift to all coeffs so that the level matches meanc = np.mean(spline.getc(m=2)) meanmag = np.mean(np.concatenate([l.getmags(noml = True) for l in lcs])) setc(copt) spline.c += meanmag - meanc
def maximize(L, DL, D2L, x, method=None, disp=False): mL = lambda x: -L(x) mDL = lambda x: -DL(x) mD2L = lambda x: -D2L(x) if method == None or method == 'ncg': func = lambda x0: opt.fmin_ncg(mL, x0, fprime=mDL, fhess=mD2L,\ disp=disp, full_output=True,\ avextol=1e-10) xm = check_max(func, x, 5, 'NCG', disp) if xm != None: return xm if method == None or method == 'bfgs': func = lambda x0: opt.fmin_bfgs(mL, x0, fprime=mDL,\ disp=disp, full_output=True,\ gtol=1e-10) xm = check_max(func, x, 6, 'BFGS', disp) if xm != None: return xm if method == None or method == 'powell': func = lambda x0: opt.fmin_powell(mL, x0, disp=disp, full_output=True,\ ftol=1e-10) xm = check_max(func, x, 5, 'POWELL', disp) if xm != None: return xm func = lambda x0: opt.fmin(mL, x0, disp=disp, full_output=True, ftol=1e-10) xm = check_max(func, x, 4, 'DOWNHILL_SIMPLEX', disp) return xm
def estimate(self, hyp0, covfunc, X, y, optimizer='cg'): """ Function to estimate the model """ if len(X.shape) == 1: X = X[:, np.newaxis] self.hyp0 = hyp0 if optimizer.lower() == 'cg': # conjugate gradients out = optimize.fmin_cg(self.loglik, hyp0, self.dloglik, (covfunc, X, y), disp=True, gtol=self.tol, maxiter=self.n_iter, full_output=1) elif optimizer.lower() == 'powell': # Powell's method out = optimize.fmin_powell(self.loglik, hyp0, (covfunc, X, y), full_output=1) else: raise ValueError("unknown optimizer") # Always return a 1d array. The optimizer sometimes changes dimesnions if len(out[0].shape) > 1: self.hyp = out[0].flatten() else: self.hyp = out[0] self.nlZ = out[1] self.optimizer = optimizer return self.hyp
def maximize(L, DL, D2L, x, method=None, disp=False): """Main function to perform numerical optimization. L, DL and D2L are the objective function and its derivative and Hessian, and x is the initial guess (current rating). It will attempt the maximization using four different methods, from fastest and least robust, to slowest and most robust. It returns the argmin, or None if an error occured.""" mL = lambda x: -L(x) mDL = lambda x: -DL(x) mD2L = lambda x: -D2L(x) # Newton Conjugate Gradient if method == None or method == 'ncg': func = lambda x0: opt.fmin_ncg(mL, x0, fprime=mDL, fhess=mD2L, disp=disp, full_output=True, avextol=1e-10) xm = check_max(func, x, 5, 'NCG', disp) if xm != None: return xm # Broyden-Fletcher-Goldfarb-Shanno if method == None or method == 'bfgs': func = lambda x0: opt.fmin_bfgs(mL, x0, fprime=mDL, disp=disp, full_output=True, gtol=1e-10) xm = check_max(func, x, 6, 'BFGS', disp) if xm != None: return xm # Powell if method == None or method == 'powell': func = lambda x0: opt.fmin_powell(mL, x0, disp=disp, full_output=True, ftol=1e-10) xm = check_max(func, x, 5, 'POWELL', disp) if xm != None: return xm # Downhill simplex (last resort) func = lambda x0: opt.fmin(mL, x0, disp=disp, full_output=True, ftol=1e-10) xm = check_max(func, x, 4, 'DOWNHILL_SIMPLEX', disp) return xm
def update_psf(data, dq, current_flat, current_psf, patch_shape, patch_centers, shifts, background, eps, threads, loss_kind, floor, gain, clip_parms): """ Update the psf model, using bfgs. """ global count count = 0 psf_grid, patch_grid = get_grids(patch_shape, current_psf.shape) if patch_centers is None: c = np.ones(data.shape[0]).astype(np.int) * (patch_shape[0] + 1)/ 2 patch_centers = (c, c) p0 = np.log(current_psf.ravel().copy()) # powell is most efficient w/o gradients, just do one iteration res = fmin_powell(psf_loss, p0, maxiter=1, args=(data, dq, current_flat, psf_grid, patch_grid, patch_centers, shifts, background, eps, threads, loss_kind, floor, gain, clip_parms)) # get ssqe vector ssqe = psf_loss(res, data, dq, current_flat, psf_grid, patch_grid, patch_centers, shifts, background, eps, threads, loss_kind, floor, gain, clip_parms, summation=False) res = np.exp(res.reshape(current_psf.shape[0], current_psf.shape[1])) return res, ssqe
def background_image(self, img, sigma=None, mask=None, npt=10, k=3): shape = img.shape if sigma is not None: assert sigma.shape == shape else: sigma = numpy.sqrt(img) w = 1 / sigma mask_nan = numpy.isnan(w) if mask is not None: assert mask.shape == shape mask = numpy.logical_or(mask_nan, mask) else: mask = mask_nan if mask.sum() == 0: valid = numpy.where(numpy.logical_not(mask)) else: valid = True d0_pos = numpy.arange(0, shape[0]) d1_pos = numpy.arange(0, shape[1]) d0_sparse = numpy.linspace(0, shape[0], npt) d1_sparse = numpy.linspace(0, shape[1], npt) y0 = numpy.zeros((npt, npt)) + img.mean() y1 = optimize.fmin_powell(self.func2d_min, y0, args=(d0_sparse, d1_sparse, d0_pos, d1_pos, img, w, valid, k), disp=True, callback=lambda x: print(x)) values = y1.reshape(d0_sparse.size, d1_sparse.size) spline = RectBivariateSpline(d0_sparse, d1_sparse, values, k, k) bg = spline(d0_pos, d1_pos) return bg
def fit(self,l,b,dist,ext,e_ext): """ NAME: fit PURPOSE: fit the amplitudes of the disk, spiral, and Orion parts of the Drimmel map to other data INPUT: l,b- Galactic longitude and latitude in degree dist - distance in kpc ext - extinction at dist e_ext - error in extinction OUTPUT: (fd,fs,fo,dist_stretch) amplitudes of disk, spiral, and Orion parts and a 'distance stretch' applied to the model (applied as self(l,b,dist*dist_stretch)) HISTORY: 2013-12-16 - Written - Bovy (IAS) """ #Fit consists of #a) overall amplitude A #b) relative amplitude fd/A, fs/A #c) distance stretch pars= numpy.array([0.,numpy.log(1./3.),numpy.log(1./3.),0.]) pars=\ optimize.fmin_powell(_fitFunc,pars,args=(self,l,b,dist,ext,e_ext)) amp= numpy.exp(pars[0]) fd= amp*numpy.exp(pars[1]) fs= amp*numpy.exp(pars[2]) fo= amp*(1.-fd-fs) return (fd,fs,fo,numpy.exp(pars[3]))
def _fit_orbit(orb,vxvv,vxvv_err,pot,radec=False,lb=False, customsky=False,lb_to_customsky=None, pmllpmbb_to_customsky=None, tintJ=100,ntintJ=1000,integrate_method='dopr54_c', ro=None,vo=None,obs=None,disp=False): """Fit an orbit to data in a given potential""" #Import here, because otherwise there is an infinite loop of imports from galpy.actionAngle import actionAngleIsochroneApprox #Mock this up, bc we want to use its orbit-integration routines class mockActionAngleIsochroneApprox(actionAngleIsochroneApprox): def __init__(self,tintJ,ntintJ,pot,integrate_method='dopr54_c'): self._tintJ= tintJ self._ntintJ=ntintJ self._tsJ= nu.linspace(0.,self._tintJ,self._ntintJ) self._pot= pot self._integrate_method= integrate_method return None tmockAA= mockActionAngleIsochroneApprox(tintJ,ntintJ,pot, integrate_method=integrate_method) opt_vxvv= optimize.fmin_powell(_fit_orbit_mlogl,orb.vxvv, args=(vxvv,vxvv_err,pot,radec,lb, customsky,lb_to_customsky, pmllpmbb_to_customsky, tmockAA, ro,vo,obs), disp=disp) maxLogL= -_fit_orbit_mlogl(opt_vxvv,vxvv,vxvv_err,pot,radec,lb, customsky,lb_to_customsky,pmllpmbb_to_customsky, tmockAA, ro,vo,obs) return (opt_vxvv,maxLogL)
def run(self): optimizer = self.optimizer p = self.problem f = p.f grad = p.grad # coerce return types f = lambda wt: numpy.float64(p.f(wt)) grad = lambda wt: numpy.array(map(numpy.float64, p.grad(wt))) # negate for minimization neg_f = lambda wt: -f(wt) neg_grad = lambda wt: -grad(wt) #if not useGrad or not p.useGrad(): neg_grad = None if not p.usef: neg_f = lambda wt: -p._fDummy(wt) log = logs.getlogger(self.__class__.__name__) if optimizer == "bfgs": params = dict(filter(lambda (k,v): k in ["gtol", "epsilon", "maxiter"], self.optParams.iteritems())) if self.verbose: print "starting optimization with %s... %s" % (optimizer, params) wt, f_opt, grad_opt, Hopt, func_calls, grad_calls, warn_flags = fmin_bfgs(neg_f, self.wt, fprime=neg_grad, full_output=True, **params) if self.verbose: print "optimization done with %s..." % optimizer print "f-opt: %.16f\nfunction evaluations: %d\nwarning flags: %d\n" % (-f_opt, func_calls, warn_flags) elif optimizer == "cg": params = dict(filter(lambda (k,v): k in ["gtol", "epsilon", "maxiter"], self.optParams.iteritems())) log.info("starting optimization with %s... %s" % (optimizer, params)) wt, f_opt, func_calls, grad_calls, warn_flags = fmin_cg(neg_f, self.wt, fprime=neg_grad, args=(), full_output=True, **params) log.info("optimization done with %s..." % optimizer) log.info("f-opt: %.16f\nfunction evaluations: %d\nwarning flags: %d\n" % (-f_opt, func_calls, warn_flags)) elif optimizer == "ncg": params = dict(filter(lambda (k,v): k in ["avextol", "epsilon", "maxiter"], self.optParams.iteritems())) log.info("starting optimization with %s... %s" % (optimizer, params)) wt, f_opt, func_calls, grad_calls, warn_flags = fmin_ncg(neg_f, self.wt, fprime=neg_grad, args=(), full_output=True, **params) log.info("optimization done with %s..." % optimizer) log.info("f-opt: %.16f\nfunction evaluations: %d\nwarning flags: %d\n" % (-f_opt, func_calls, warn_flags)) elif optimizer == "fmin": params = dict(filter(lambda (k,v): k in ["xtol", "ftol", "maxiter"], self.optParams.iteritems())) log.info("starting optimization with %s... %s" % (optimizer, params)) wt = fmin(neg_f, self.wt, args=(), full_output=True, **params) log.info("optimization done with %s..." % optimizer) elif optimizer == "powell": params = dict(filter(lambda (k,v): k in ["xtol", "ftol", "maxiter"], self.optParams.iteritems())) log.info("starting optimization with %s... %s" % (optimizer, params)) wt = fmin_powell(neg_f, self.wt, args=(), full_output=True, **params) log.info("optimization done with %s..." % optimizer) elif optimizer == 'l-bfgs-b': params = dict(filter(lambda (k,v): k in ["gtol", "epsilon", "maxiter", 'bounds'], self.optParams.iteritems())) log.info("starting optimization with %s... %s" % (optimizer, params)) if 'bounds' in params: params['bounds'] = (params['bounds'],) * len(self.wt) wt, f_opt, d = fmin_l_bfgs_b(neg_f, self.wt, fprime=neg_grad, **params) log.info("optimization done with %s..." % optimizer) log.info("f-opt: %.16f\n" % (-f_opt)) else: raise Exception("Unknown optimizer '%s'" % optimizer) return wt
def fit(self, start_params=None, method='bfgs', maxiter=35, tol=1e-08, penalty=False): """ Fit the unconditional maximum likelihood of an AR(p) process. Parameters ---------- start_params : array_like, optional A first guess on the parameters. Defaults is a vector of zeros. method : str, optional Unconstrained solvers: Default is 'bfgs', 'newton' (newton-raphson), 'ncg' (Note that previous 3 are not recommended at the moment.) and 'powell' Constrained solvers: 'bfgs-b', 'tnc' See notes. maxiter : int, optional The maximum number of function evaluations. Default is 35. tol = float The convergence tolerance. Default is 1e-08. penalty : bool Whether or not to use a penalty function. Default is False, though this is ignored at the moment and the penalty is always used if appropriate. See notes. Notes ----- The unconstrained solvers use a quadratic penalty (regardless if penalty kwd is True or False) in order to ensure that the solution stays within (-1,1). The constrained solvers default to using a bound of (-.999,.999). """ self.penalty = penalty method = method.lower() #TODO: allow user-specified penalty function # if penalty and method not in ['bfgs_b','tnc','cobyla','slsqp']: # minfunc = lambda params : -self.loglike(params) - \ # self.penfunc(params) # else: minfunc = lambda params: -self.loglike(params) if method in ['newton', 'bfgs', 'ncg']: super(AR, self).fit(start_params=start_params, method=method, maxiter=maxiter, tol=tol) else: bounds = [(-.999,.999)] # assume stationarity if start_params is None: start_params = np.array([0]) #TODO: assumes AR(1) if method == 'bfgs-b': retval = optimize.fmin_l_bfgs_b(minfunc, start_params, approx_grad=True, bounds=bounds) self.params, self.llf = retval[0:2] if method == 'tnc': retval = optimize.fmin_tnc(minfunc, start_params, approx_grad=True, bounds = bounds) self.params = retval[0] if method == 'powell': retval = optimize.fmin_powell(minfunc,start_params) self.params = retval[None]
def two_exp_fit_simple(r, z, rmin, rmax, zmin, zmax, func=neg2expl): fitnum = len(r) if fitnum > 100: hr, z0 = opt.fmin_powell( func, [1.0, 1.0], args=(r, z, rmin, rmax, zmin, zmax)) return hr, z0, fitnum else: return float('NaN'), float('NaN'), fitnum
def test_minimise(self): print '#################################' print '# Test Equilibrium Loss Wager' print '#################################' wager_multiplier=fmin_powell(Root2, x0=1., maxiter=20) print "highest survivability following loss, multiply wager by %2.4f %% "%(wager_multiplier*100)
def calc_mle_opt(x_data, y_data, ini_ges, m_dim): #解析的に求まるけど無駄に最適化によって求めてみる ini_ges = ini_ges res = optimize.fmin_powell(minus_log_likelihood_function, ini_ges, (x_data, y_data, m_dim), maxiter=5000, maxfun=5000) return res[0:m_dim - 1], res[m_dim - 1]
def _powell(self, f, x, it_nb, dfun=None): """ Function minimalization using simulated annealing """ params = optimize.fmin_powell(lambda x: (f(x)**2).sum(), x, maxiter=it_nb) return params
def optimize_parameters(self, initial_pvector=None, show_fit=False): """Finds the MAP transit parameters to fit our data. Args: initial_pvector (np.array): if None, starts from the initial values given to __init__. Otherwise, if it's a Series, it assumes the initial values are under ['period', 't0', 'rp'] TODO: this needs to be improved show_fit (bool): if True, prints data and plots fit Returns: (wres, result_params, pvector) wres (pd.Series): contains all the params values under keyword indexes, plus derived values (duration) result_params (batman.TransitParams): best-fit params pvector (np.array): array of the best-fit active vector """ # TODO: bring up to speed if initial_pvector is None: initial_pvector = self.get_parameter_vector() elif isinstance(initial_pvector, (pd.Series, dict)): initial_pvector = pd.Series(initial_pvector) self.set_active_vector(initial_pvector.index) else: self.set_parameter_vector(initial_pvector) self.verify_prior(info_str='inside optimize_parameters.') result = fmin_powell(self.neg_lnposterior, initial_pvector, disp=False) # Extract and wrap the results self.set_parameter_vector(result) result_params = self.params wres = pd.Series(self.get_parameter_dict(include_frozen=True)) wres['u'] = self['u'] wres['duration'] = wres['per'] / (np.pi * wres['a']) wres['depth'] = wres['rp']**2 wres['log_llr'] = self.calc_likelihood_ratio(result) wres['snr_estimate'] = self.estimate_snr() wres['b'] = self.get_b() wres['R_p'] = self.get_R_p() # Optional visualization (for testing) if show_fit: print(wres) llh_ratio = self.calc_likelihood_ratio() print("Likelihood ratio of model is:", llh_ratio) f_plot = self.bin_model(self.m.light_curve(result_params)) fig, ax = plt.subplots() ax.plot(self.t_data, self.f_data, 'k.') ax.plot(self.t_data, f_plot, 'r-') fig.show() return wres, result_params, result
def test(): setupFiles() params = initParams() paramOut = opt.fmin_powell(scoreGarlic, params, xtol=0.01, ftol=0.001, maxfun=2000) createParamFile(paramOut, param_file="Final.json")
def _minimize_log_regression_func(self, examples, labels, initial_weights): self._optimal_weight_vector = fmin_powell(func=self._log_regression, x0=initial_weights, args=(examples, labels), disp=False, xtol=0.1, ftol=0.1) self._optimal_weight_vector = np.array(self._optimal_weight_vector)
def conduct_exponential_fit(xlist, ylist, C0, C1): print( '= = Begin exponential fit.' ) xguess=[xlist[0],xlist[1]] yguess=[ylist[0],ylist[1]] guess=obtain_exponential_guess(xguess, yguess, C1) print( '= = = guessed initial tau: ', guess ) fitOut = fmin_powell(powell_expdecay, guess, args=(xlist, ylist, C0, C1), full_output=True) print( '= = = = Tau obtained: ', fitOut[0][0] ) return fitOut[0][0]
def minimizePowell(objectiveFunction, parameter_guess, verbose=False): ''' Minimizes the objective function using a derivative-free Powell algorithm, starting from an initial parameter guess. Parameters ---------- objectiveFunction : function The function to be minimized. It should take only a single argument, which should be a list representing the parameters to be estimated. parameter_guess : [float] A starting point for the Powell algorithm, which must be a valid input for objectiveFunction. verbose : boolean A flag for the amount of output to print. Returns ------- xopt : [float] The values that minimize objectiveFunction. ''' # Execute the minimization, starting from the given parameter guess # Time the process t0 = time() OUTPUT = fmin_powell(objectiveFunction, parameter_guess, full_output=1, maxiter=1000, disp=verbose) t1 = time() # Extract values from optimization output: # Parameters that minimize function. xopt = OUTPUT[0] # Value of function at minimum: ``fopt = func(xopt)``. fopt = OUTPUT[1] direc = OUTPUT[2] # Number of iterations performed. optiter = OUTPUT[3] # Number of function calls made. funcalls = OUTPUT[4] # warnflag : int # 1 : Maximum number of function evaluations made. # 2 : Maximum number of iterations reached. warnflag = OUTPUT[5] # Check that optimization succeeded: if warnflag != 0: warnings.warn("Minimization failed! xopt=" + str(xopt) + ', fopt=' + str(fopt) + ', direc=' + str(direc) + ', optiter=' + str(optiter) + ', funcalls=' + str(funcalls) + ', warnflag=' + str(warnflag)) # Display and return the results: if verbose: print("Time to estimate is " + str(t1 - t0) + " seconds.") return xopt
def bm_sigsq_optim(tree, traits, rate=1): # tree_utils2.assign_sigsq(tree,[rate]) start = [rate] opt = optimize.fmin_powell(calc_like_sigsq, start, args=(tree, traits), full_output=False, disp=True) return [tree.get_newick_repr(True), opt]
def _fit_pixel(fluxes, flux_uncertainties, label_vector_array, **kwargs): """ Return the optimal label vector coefficients and scatter for a pixel, given the fluxes, uncertainties, and the label vector array. :param fluxes: The fluxes for the given pixel, from all stars. :param flux_uncertainties: The 1-sigma flux uncertainties for the given pixel, from all stars. :param label_vector_array: The label vector array. This should have shape `(N_stars, N_terms + 1)`. :returns: The optimised label vector coefficients and scatter for this pixel. """ _ = kwargs.get("max_uncertainty", 1) failed_response = (np.nan * np.ones(label_vector_array.shape[0]), _) if np.all(flux_uncertainties >= _): return failed_response # Get an initial guess of the scatter. scatter = np.var(fluxes) - np.median(flux_uncertainties)**2 scatter = np.sqrt(scatter) if scatter >= 0 else np.std(fluxes) # Optimise the scatter, and at each scatter value we will calculate the # optimal vector coefficients. op_scatter, fopt, direc, n_iter, n_funcs, warnflag = op.fmin_powell( _pixel_scatter_nll, scatter, args=(fluxes, flux_uncertainties, label_vector_array), disp=False, full_output=True) if warnflag > 0: logger.warning("Warning: {}".format([ "Maximum number of function evaluations made during optimisation.", "Maximum number of iterations made during optimisation." ][warnflag - 1])) # Calculate the coefficients at the optimal scatter value. # Note that if we can't solve for the coefficients, we should just set them # as zero and send back a giant variance. try: coefficients, ATCiAinv, variance = _fit_coefficients( fluxes, flux_uncertainties, op_scatter, label_vector_array) except np.linalg.linalg.LinAlgError: logger.exception("Failed to calculate coefficients") if kwargs.get("debug", False): raise return failed_response else: return (coefficients, op_scatter)
def optimise_hp_powell(self, ts=None, initial_hp=None, diagnose=False, save=True, **fmin_args): """Minimises the posterior w.r.t hyperpameters with fmin_powell. Args: ts (pd.DataFrame): the lightcurve to use as basis for optimization (default: self._ts_basis) initial_hp (array, Optional): the initial points for optimization diagnose (bool): if True, prints and returns full output save (bool): whether to save result into hp Cannot be done if diagnose is True. **fmin_args (dict): other inputs into the minimise function choices: 'full_output', 'disp', 'retall', 'direc', 'xtol', 'ftol', 'maxiter', 'maxfun' Returns: result (1D array): the output from fmin_powell, may be more complicated if diagnose==True. """ if ts is None: ts = self.get_basis(only='opt_basis') if initial_hp is None: initial_hp = self.hp if diagnose: for key in ('full_output', 'retall', 'disp'): fmin_args[key] = True else: for key in ('full_output', 'retall', 'disp'): fmin_args[key] = False f = ts['f'].values X = ts[self._X_cols].values self.compute(X) result = fmin_powell(self.neg_lnposterior, x0=initial_hp, args=(f, ), **fmin_args) if not diagnose: if isinstance(result, OptimizeResult) and save: self.set_hp(result.x) elif save: self.set_hp(result) elif diagnose: if self.neg_lnposterior() != result[1][1]: print("Posterior mismatch.") print("Current value:", self.neg_lnposterior()) print("Optimised value:", result[1][1]) return result