def scipyOptimize(recipe): """Optimize the recipe created above using scipy. """ from scipy.optimize.minpack import leastsq print "Fit using scipy's LM optimizer" leastsq(recipe.residual, recipe.getValues()) return
def refine(self): '''Optimize the recipe created above using scipy. ''' from scipy.optimize.minpack import leastsq leastsq(self.recipe.residual, self.recipe.values) self.results = FitResults(self.recipe) print("Fit results:\n") print(self.results) return
def fitting(p): if p >= 1: pars = np.random.rand(p + 1) r = leastsq(PolyResiduals, pars, args=(X, Y)) return r[0] #返回系数 elif p == -1: pars = np.random.rand(3) pars = [-1.488e-07, 4.983, 11] r = leastsq(PowerResiduals, pars, args=(X, Y)) return r[0]
def doBestFit(compositeModel, params0, maxfev=None, factor=None): if not maxfev: maxfev = 500*(len(params0)+1) if not factor: factor = 100 residual = compositeModel.residual if compositeModel.isAnalyticalDerivs: jacobian = compositeModel.jacobian full_output = leastsq(residual, params0,\ maxfev=maxfev, Dfun=jacobian, col_deriv=True, \ factor=factor, full_output=1) else: full_output = leastsq(residual, params0, maxfev=maxfev, \ factor=factor, full_output=1) return full_output
def doBestFit(compositeModel, params0, maxfev=None, factor=None): if not maxfev: maxfev = 500 * (len(params0) + 1) if not factor: factor = 100 residual = compositeModel.residual if compositeModel.isAnalyticalDerivs: jacobian = compositeModel.jacobian full_output = leastsq(residual, params0,\ maxfev=maxfev, Dfun=jacobian, col_deriv=True, \ factor=factor, full_output=1) else: full_output = leastsq(residual, params0, maxfev=maxfev, \ factor=factor, full_output=1) return full_output
def fit(x, y): fun = _Leastsq_Exp._exp df = {'x': x, 'y': y} resid = lambda p, x, y: y - fun(*p)(x) ls = leastsq(resid, np.array([1.0, 1.0, 1.0, 1.0]), args=(df['x'], df['y'])) a, b, c, d = ls[0] return a, b, c, d
def minimize1(self, dim, theta, delta): p0Cor = numpy.random.uniform(-1, 1, dim**2).reshape(dim, dim) p0Cor = p0Cor - numpy.diag(p0Cor) + numpy.identity(dim) p0 = reverse(dim, numpy.identity(dim), numpy.ones(dim) / 20) popt, _, infodict, mesg, _ = leastsq(errfunc, p0, args=(theta, delta, self.gbest.position), full_output=True) print(mesg) ss_err = (infodict['fvec']**2).sum() ss_tot = ((delta - delta.mean())**2).sum() rsquared = 1 - (ss_err / ss_tot) print("rsquared", rsquared) corrm, var, mu = transform(dim, popt) var = var * self.gbest.position _cov = corr2cov(corrm, var) print("used mu:", mu) print("found _cov:\n", _cov) sigma = numpy.sqrt(numpy.diag(_cov)) print("=> found sigma:", sigma) return _cov
def test_input_untouched(self): p0 = array([0, 0, 0], dtype=float64) p0_copy = array(p0, copy=True) full_output = leastsq(self.residuals, p0, args=(self.y_meas, self.x), full_output=True) params_fit, cov_x, infodict, mesg, ier = full_output assert_(ier in (1, 2, 3, 4), "solution not found: %s" % mesg) assert_array_equal(p0, p0_copy)
def minimize1(self, dim, theta, delta): # p0Cor = numpy.random.uniform(-1,1,dim**2).reshape(dim, dim) # p0Cor = p0Cor - numpy.diag(p0Cor) + numpy.identity(dim) #p0 = reverse(dim, numpy.identity(dim), numpy.ones(dim)/20) p0 = numpy.ones(dim) popt, _,infodict,mesg,_ = leastsq(errfunc, p0, args=(theta, delta),full_output=True) print(mesg) ss_err=(infodict['fvec']**2).sum() ss_tot=((delta-delta.mean())**2).sum() rsquared=1-(ss_err/ss_tot) print("rsquared", rsquared) # corrm, var, mu = transform(dim, popt) # var = var * self.gbest.position # _cov = corr2cov(corrm, var) print("found diag_inv_cov:\n", abs(popt)) _cov = numpy.linalg.inv(numpy.diag(abs(popt))) #print("used mu:", mu) print("found _cov:\n", _cov) #sigma = numpy.sqrt(numpy.diag(_cov)) #print( "=> found sigma:", sigma) return _cov
def lsqE_tq(prm, qray, dray, constants, domidx): Rpr, wRd, qYaw = constants pr = geom.YPRfromR(Rpr)[1:] # pitch and roll return opt.leastsq(esserrf_tq, prm, args=(qray, dray, pr, wRd, domidx), warning=False)[0]
def test_full_output(self): p0 = array([0,0,0]) full_output = leastsq(self.residuals, p0, args=(self.y_meas, self.x), full_output=True) params_fit, cov_x, infodict, mesg, ier = full_output assert_(ier in (1,2,3,4), 'solution not found: %s'%mesg)
def test_basic(self): p0 = array([0,0,0]) params_fit, ier = leastsq(self.residuals, p0, args=(self.y_meas, self.x)) assert_(ier in (1,2,3,4), 'solution not found (ier=%d)'%ier) # low precision due to random assert_array_almost_equal(params_fit, self.abc, decimal=2)
def get_nd_parameters(k1, k2, k3, nd, focal_length, sensor_diagonal): x = numpy.arange(0, 1, 0.001) y_vig = 1 + k1 * x**2 + k2 * x**4 + k3 * x**6 y_filter = 10**(nd * (1 - numpy.sqrt(1 + x**2 / (2 * focal_length / sensor_diagonal)**2))) y_total = y_vig * y_filter return leastsq(error_function, [k1, k2, k3], args=(x, y_total))[0]
def summarize(self, report, index): func1 = lambda v, x, mu: v[0] + v[1] * np.exp(-np.abs((np.array(x) - mu) / v[2])) full1 = lambda v, x: v[0] + v[1] * np.exp(-np.abs((np.array(x) - v[2]) / v[3])) func2 = lambda v, x, mu: v[0] + v[1] * np.exp(-((x - mu) / v[2]) ** 2) + v[3] * np.exp(-np.abs((np.array(x) - mu) / v[4])) full2 = lambda v, x: v[0] + v[1] * np.exp(-((x - v[2]) / v[3]) ** 2) + v[4] * np.exp(-np.abs((np.array(x) - v[2]) / v[5])) func3 = lambda v, x, mu: v[0] + v[1] * np.exp(-((np.array(x) - mu) / v[2]) ** 2) full3 = lambda v, x: v[0] + v[1] * np.exp(-np.abs((np.array(x) - v[2]) / v[3]) ** 2) # Force E to go to zero at minimum func4 = lambda v, x, mu: v[0] - v[0] * np.exp(-np.abs((np.array(x) - mu) / v[1])) full4 = lambda v, x: v[0] - v[0] * np.exp(-np.abs((np.array(x) - v[1]) / v[2])) funce = lambda v, x, y, mu: (func1(v, x, mu) - y) fulle = lambda v, x, y: (full1(v, x) - y) use = self.sensels # pylab.figure() for p in self.s: if p in use: f = report.figure(cols=2) e1_list = self.e1_lists[use.index(p)] for c, clist in enumerate(e1_list): E1 = np.sum(np.array(clist), axis=0) / self.n_sampels[c] V = np.std(np.array(clist), axis=0) v0 = np.array([1, -1, self.area / 2, 1]) mu = np.argmin(V) v, _ = leastsq(funce, v0, args=(range(self.area), V, mu), maxfev=10000) v0 = [v[0], v[1], mu , v[3]] v, _ = leastsq(fulle, v0, args=(range(self.area), V), maxfev=10000) x = np.linspace(0, self.area - 1, 100) d = x[np.argmin(full1(v, x))] - self.area / 2 # pylab.subplot(len(self.sensels), len(self.commands), use.index(p) * 2 + c + 1) with f.plot('p%g_c%g' % (p, c), caption='Pixel: %g Command: %g Samples: %g' % (p, c, self.n_sampels[c])) as pylab: pylab.hold(True) pylab.errorbar(range(self.area), E1 * 0, V) pylab.xlim((0, self.area)) pylab.plot(x, full1(v, x), color='g') report.text('test%g_cmd_p%g_c%g' % (index, p, c), str(d))
def test_input_untouched(self): p0 = array([0,0,0],dtype=float64) p0_copy = array(p0, copy=True) full_output = leastsq(self.residuals, p0, args=(self.y_meas, self.x), full_output=True) params_fit, cov_x, infodict, mesg, ier = full_output assert_(ier in (1,2,3,4), 'solution not found: %s'%mesg) assert_array_equal(p0, p0_copy)
def fitDecayFunc(x,y): isFitted=False function=decayFunc A0 = [y[0], 0.9, y[-1]] param = (x, y, function) A1, cov_x, infodict, mesg, ier = leastsq(objective, A0, args=param, full_output=True) if ier in [1,2,3,4]: isFitted=True return A1,isFitted
def interpolate(x, df, fun): """ Interpolate Y from X based on df, a dataframe with columns 'x' and 'y'. """ resid = lambda p, x, y: y - fun(*p)(x) ls = leastsq(resid, [1.0, 1.0, 1.0, 1.0], args=(df['x'], df['y'])) a, b, c, d = ls[0] y = fun(a, b, c, d)(x) return y
def scipyOptimize(recipe): """Optimize the recipe created above using scipy. The FitRecipe we created in makeRecipe has a 'residual' method that we can be minimized using a scipy optimizer. The details are described in the source. """ # We're going to use the least-squares (Levenberg-Marquardt) optimizer from # scipy. We simply have to give it the function to minimize # (recipe.residual) and the starting values of the Variables # (recipe.getValues()). from scipy.optimize.minpack import leastsq print "Fit using scipy's LM optimizer" leastsq(recipe.residual, recipe.getValues()) return
def fitGaussianNLLS(xp,yp,patch): #print("patch: ",patch) paramsGuess = array([0,patch[hw,hw],hw,hw,1]) tA = time.time() (paramsOut, cov, infodict, msg, ier) = minpack.leastsq(residualFunction,paramsGuess,(xp,yp,patch), full_output=1, ftol = .1, xtol = .1) #print("leastsq") #print("nfev = %d" % infodict['nfev']) #print(time.time()-tA) return paramsOut
def test_mp_pool(self): """test using multiprocessing pool""" p0 = matrix([0,0,0]) mpool = multiprocessing.Pool(2) full_output = leastsq(residual_func, p0, args=(self.y_meas, self.x), full_output=True, mp_pool=mpool) params_fit, cov_x, infodict, mesg, ier = full_output assert_(ier in (1,2,3,4), 'solution not found: %s' % mesg)
def test_reentrant_func(self): def func(*args): self.test_basic() return self.residuals(*args) p0 = array([0, 0, 0]) params_fit, ier = leastsq(func, p0, args=(self.y_meas, self.x)) assert_(ier in (1, 2, 3, 4), 'solution not found (ier=%d)' % ier) # low precision due to random assert_array_almost_equal(params_fit, self.abc, decimal=2)
def test_reentrant_func(self): def func(*args): self.test_basic() return self.residuals(*args) p0 = array([0,0,0]) params_fit, ier = leastsq(func, p0, args=(self.y_meas, self.x)) assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier) # low precision due to random assert_array_almost_equal(params_fit, self.abc, decimal=2)
def interpolate(x, df, fun=None): """ Interpolate Y from X based on df, a dataframe with columns 'x' and 'y'. """ if fun is None: fun = _Leastsq_Exp._exp resid = lambda p, x, y: y - fun(*p)(x) ls = leastsq(resid, np.array([1.0, 1.0, 1.0, 1.0]), args=(df['x'], df['y'])) a, b, c, d = ls[0] y = fun(a, b, c, d)(x) return y, a, b, c, d
def simulateSteadyState(self): """ Perform a stady state simulation. Call self.clear() before using this method! This function computes one steady state solution, of the system of differential equations. Which solution of the potentially many solutions is found, depends on the initial guess. A steady state solution is a vector of all variables. This vector is appended to the array of results. Usually one will compute a series of stady state solutions, each with slightly different parameters. Initial guess: When there are no prior results, the initial values are (ab)used as an initial guess; otherwise the latest results are used as the initial guess. In the time array the count of current simulation is stored. This way the graph function still produces useful graphs with steady state simulations. The results can be displayed with the graph(...) function and stored with the store function. The funcion getAttributes(...) returns the simulation result of a speciffic attribute. """ raise Exception('This method is currently broken!') #TODO: Use flag self._isSteadyStateStart instead of self.time if not hasattr(self, 'time'): #this is the first call in a row of steady state simulations - setup everything lastResult = -1 self.resultArray = array([[0]], 'float64') self.time = array([0], 'float64') self.initialize() #initial guess for root finder: initial values abused x0 = self.initialValues t0 = -1 else: lastResult = shape(self.resultArray)[0]-1 x0 = self.resultArray[lastResult, 0:self.stateVectorLen] #initial guess for root finder: last result t0 = self.time[lastResult] #compute the state variables of the steady state solution (xmin, msg) = minpack.leastsq(self.dynamic, x0, (t0)) #funcion will also report local minima that are no roots. Caution! ## xmin = optimize.fsolve(self.dynamic, x0, (0)) #function is always stuck in one (the trivial) minimum #also compute the algebraic variables currRes = ones(4) #dummy to make pydev show no error #currRes = self.outputEquations(xmin) #expand the storage and save the results self.resultArray = resize(self.resultArray, (lastResult+2, self.stateVectorLen + self.algVectorLen)) self.resultArray[lastResult+1,:] = currRes[0,:] self.time = resize(self.time, (lastResult+2,)) self.time[lastResult+1] = t0 + 1
def coherence(self, referenceStream, referenceAzimuth, NFFT=None, noverlap=None, Fs=None): """ Determine angle of a north and east channel based on a reference north channel. @param referenceStream stream containing a reference North channel. @param referenceAzimuth reference angle of North channel. @param NFFT number of points to use in FFT. @param noverlap number of overlapping points between FFTs. @param Fs sampling frequency. If omitted, uses referenceStream sampling_rate. @return relative angle from unknown north to reference north (referenceAngle-angle = actual angle of unknown channel). """ referenceExtent = referenceStream.getTimeExtent() unknownExtent = self.getTimeExtent() if referenceExtent['start'] != unknownExtent['start'] or referenceExtent['end'] != unknownExtent['end']: raise StreamsException("reference and unknown streams must have same time extent") if Fs is None: Fs = referenceStream.data.traces[0].stats.sampling_rate if noverlap is None: noverlap = NFFT / 4 # internal function to determine the coherence of unrotated and rotated data def cohere1(theta): theta_r = math.radians(theta) rotated = (self.data[0].data)*math.cos(theta_r) + (self.data[1].data)*math.sin(theta_r) coh,fre = mlab.cohere(referenceStream.data[0].data, rotated, NFFT=NFFT, noverlap=noverlap, Fs=Fs) return (coh - 1).sum() # most coherent angle of rotation theta1 = solv.leastsq(cohere1, 0) theta1 = normalize360(theta1[0][0]) # rotate data and compare against reference stream rotated = self.rotate(theta1) rotatedData1 = rotated.data[0].data.astype('Float64') referenceData1 = referenceStream.data[0].data.astype('Float64') scale1 = sum(abs(rotatedData1)) / sum(abs(referenceData1)) residual1 = sum(referenceData1**2-rotatedData1*scale1)**2 rotatedData2 = rotated.data[1].data.astype('Float64') referenceData2 = referenceStream.data[1].data.astype('Float64') scale2 = sum(abs(rotatedData2)) / sum(abs(referenceData2)) residual2 = sum(referenceData2**2-rotatedData2*scale2)**2 return { 'theta': theta1, 'azimuth': referenceAzimuth - theta1, 'rotated': rotated, 'scale1': scale1, 'residual1': residual1, 'scale2': scale2, 'residual2': residual2 }
def least_example(): print('least example') def f(x, b0, b1): return b0*sin(b1*x) def res(params, xdata, ydata, function): return function(xdata, *params) - ydata x = linspace(20,40, 100) y = f(x,2.4,1.14) args = (x,y,f) out = leastsq(res, [2.2, 1.2], args=args) print(out)
def __init__(self, t, z, tau0, f0, amp0, phi0): if issubclass(z.dtype.type, complex): y = z.real else: y = z # x0 = [amp0, 1.0 / tau0, f0, phi0] if (y[1] < 0): x0[0] = -x0[0] fe = leastsq(fitall_residuals, x0, (t, y)) xf = fe[0] if 0 < fe[1] < 4: res = fitall_residuals(xf, t, y) self.err = sqrt(sum(res**2) / sum(y**2)) else: self.err = 1e10 self.f = float(xf[2]) self.tau = float(1.0 / xf[1]) self.amp = float(abs(xf[0]))
def aproximate_lineal(t_exp, y_exp, test=False): # define cost function - adapt to your usage # # single exponential function = single x0 = [0, y_exp[-1], t_exp.shape[0]] param = (t_exp, y_exp, function) # perform least squares fit A_final, cov_x, infodict, mesg, ier = leastsq(objective, x0, args=param, full_output=True) if ier != 1: print "No fit! %s " % mesg return None y_final = function(A_final, t_exp) chi2 = sum((y_exp-y_final)**2 / y_final) if not test: return y_final else: return y_final,chi2
def curve_fit(f, xdata, ydata, p0=None, sigma=None, **kw): """This is scipy.optimize.curve_fit, which is only available in very recent scipy. It is overwritten below by the version from scipy if it exists there. """ if p0 is None or isscalar(p0): # determine number of parameters by inspecting the function nargs = getNumArgs(f) if nargs < 2: msg = "Unable to determine number of fit parameters." raise ValueError(msg) if p0 is None: p0 = 1.0 p0 = [p0] * (nargs - 1) args = (xdata, ydata, f) if sigma is None: func = _general_function else: func = _weighted_general_function args += (1.0 / asarray(sigma), ) # Remove full_output from kw, otherwise we're passing it in twice. return_full = kw.pop('full_output', False) res = leastsq(func, p0, args=args, full_output=1, **kw) (popt, pcov, infodict, errmsg, ier) = res # pylint: disable=unbalanced-tuple-unpacking if ier not in [1, 2, 3, 4]: msg = "Optimal parameters not found: " + errmsg raise RuntimeError(msg) if (len(ydata) > len(p0)) and pcov is not None: s_sq = (func(popt, *args)**2).sum() / (len(ydata) - len(p0)) pcov = pcov * s_sq else: pcov = inf if return_full: return popt, pcov, infodict, errmsg, ier else: return popt, pcov
def __init__(self, *args): apply(QWidget.__init__, (self,) + args) # make a QwtPlot widget self.plot = QwtPlot('A PyQwt and MinPack Demonstration', self) # initialize the noisy data scatter = 0.05 x = arrayrange(-5.0, 5.0, 0.1) y = RandomArray.uniform(1.0-scatter, 1.0+scatter, shape(x)) * \ function([1.0, 1.0, -2.0, 2.0], x) # fit from a reasonable initial guess guess = asarray([0.5, 1.5, -1.0, 3.0]) yGuess = function(guess, x) solution = leastsq(function, guess, args=(x, y)) yFit = function(solution[0], x) print solution # insert a few curves c1 = self.plot.insertCurve('data') c2 = self.plot.insertCurve('guess') c3 = self.plot.insertCurve('fit') # set curve styles self.plot.setCurvePen(c1, QPen(Qt.black)) self.plot.setCurvePen(c2, QPen(Qt.red)) self.plot.setCurvePen(c3, QPen(Qt.green)) # copy the data self.plot.setCurveData(c1, x, y) self.plot.setCurveData(c2, x, yGuess) self.plot.setCurveData(c3, x, yFit) # set axis titles self.plot.setAxisTitle(QwtPlot.xBottom, 'x -->') self.plot.setAxisTitle(QwtPlot.yLeft, 'y -->') self.plot.enableLegend(1) self.plot.replot()
def minimize1(self, dim, theta, delta): p0 = reverse(dim, numpy.identity(dim), numpy.ones(dim) / 20) popt, _, infodict, mesg, _ = leastsq(errfunc, p0, args=(theta, delta, self.gbest.position), full_output=True) print(mesg) ss_err = (infodict["fvec"] ** 2).sum() ss_tot = ((delta - delta.mean()) ** 2).sum() rsquared = 1 - (ss_err / ss_tot) print("rsquared", rsquared) corrm, var, mu = transform(dim, popt) var = var * self.gbest.position _cov = corr2cov(corrm, var) print("used mu:", mu) print("found _cov:\n", _cov) sigma = numpy.sqrt(numpy.diag(_cov)) print("=> found sigma:", sigma) return _cov
def fit_data(data, fct): ''' try to fit specified model to measured data ''' f = lambda p, dat: fct(dat[:, 0], *p) - dat[:, 1] # objective fct num_params = fct.__code__.co_argcount - 1 out = [] for l in xrange(len(data)): out_l = [] for i in xrange(len(data[l])): cell = np.array(data[l][i]) * 1e9 gtz = cell[:, 1] > 0 try: popt, pcov = curve_fit(fct, cell[gtz, 0], cell[gtz, 1]) except: print 'automatic fitting failed. trying ls' try: popt, pcov = leastsq(f, (1, ) * num_params, args=(cell, )) except: raise Exception( 'Could not find parameters for cell %s-%s' % (l, i)) out_l.append(popt) out.append(np.array(out_l)) return out
def fit_circle_with_endpoints(x, A, B): """ Fit a circle which always passes through A and B and is as close as possible to the points in x """ if x.ndim > 1 and x.shape[1] > 1: x0 = x[:, x.shape[1] / 2].reshape(2) (x1, flag) = spmin.leastsq(quadratic_error, x0, (x, A, B), maxfev=5000) (r, C) = circumcircle(A, B, x1) es = quadratic_fit_objective(x, r, C) # x0 = x[:,x.shape[1]/2].reshape(2) # d = B - A # n = np.array([d[1],-d[0]]) # a0 = np.dot(n.T,x0 - 0.5*(A+B)) # (a1, flag) = spmin.leastsq(quadratic_error_aparam, a0, (x,A,B), maxfev = 2000) # x1 = 0.5*(A+B) + a1*n # (r,C) = circumcircle(A,B,x1) # es = quadratic_fit_objective(x,r,C) # a0 = 0 # (a1, flag) = spmin.leastsq(quadratic_error_alphaParam, a0, (x,A,B), maxfev = 2000) # d = B - A # n = np.array([d[1],-d[0]]) # if np.abs(a1) < 1e-16: # normd = np.sqrt(np.sum(np.power(d,2.0))) # r = np.inf # C = None # es = np.dot(n.T/normd,x-0.5*(A+B).reshape(2,1)) # else: # C = 0.5*(A+B) + (1.0/a1)*n # r = 0.5*(np.sqrt(np.sum(np.power(A.reshape(2) - C.reshape(2),2.0))) + \ # np.sqrt(np.sum(np.power(B.reshape(2) - C.reshape(2),2.0)))) # es = np.sqrt(np.sum(np.power(x - C.reshape(2,1),2.0),0)) - r else: x1 = x.reshape(2) (r, C) = circumcircle(A, B, x1) es = quadratic_fit_objective(x, r, C) return (C, r, es)
def fit(x_array, y_array, function, A_start): """ used to fit things 20101209/RB: started INPUT: x_array: the array with time or something y-array: the array with the values that have to be fitted function: one of the functions, in the format as in the file "functions" A_start: a starting point for the fitting OUTPUT: A_final: the final parameters of the fitting WARNING: Always check the result, it might sometimes be sensitive to a good starting point. """ param = (x_array, y_array, function) A_final, cov_x, infodict, mesg, ier = leastsq(minimize, A_start, args=param, full_output=True) # , warning=True) return A_final
def fit(X_train, y_train): a0 = 10.0 a1 = 30.0 b1 = 3.0 c1 = 1.0 a2 = 80.0 b2 = 8.0 c2 = 2.0 a3 = 60.0 b3 = 16.0 c3 = 2.0 p = [a0, a1, b1, c1, a2, b2, c2, a3, b3, c3] def res(p, y, x): a0, a1, b1, c1, a2, b2, c2, a3, b3, c3 = p y_fit = funcArray(x, a0, a1, b1, c1, a2, b2, c2, a3, b3, c3) err = y - y_fit return err plsq = leastsq(res, p, args=(y_train, X_train)) a0 = plsq[0][0] a1 = plsq[0][1] b1 = plsq[0][2] c1 = plsq[0][3] a2 = plsq[0][4] b2 = plsq[0][5] c2 = plsq[0][6] a3 = plsq[0][7] b3 = plsq[0][8] c3 = plsq[0][9] return [a0, a1, b1, c1, a2, b2, c2, a3, b3, c3]
def run(self, parDict={}, tmesh=None, verbose=False): parDict_new = copy(parDict) if tmesh is None: tmesh = self.get_tmesh() else: if len(tmesh) < 4 + self.numFreePars: print "Warning: supplied tmesh contains too few points for "\ + "algorithm to run accurately -- it may fail." if 'args' in parDict: parDict_new['args'] = (tmesh,)+parDict['args'] else: parDict_new['args'] = (tmesh,) parsOrig = [] for i in xrange(self.numFreePars): parsOrig.append(self.testModel.query(self.parTypeStr[i])\ [self.freeParNames[i]]) parsOrig = array(parsOrig) parDict_new['p_start'] = parsOrig parDict_new['Dfun'] = self.Jacobian if 'residuals' not in parDict: parDict_new['residuals'] = self._residual_fn # No need to compute the trajectory here. It gets done in # the residual function computation. # Setting default minimizer pars if not self._algParamsSet: self.setAlgParams(parDict_new) # store these for use within residual functions self._tmesh_len = len(tmesh) self._memory = zeros(self._tmesh_len*self._depvar_len, 'Float64') # perform least-squares fitting rout.start() rerr.start() try: results = minpack.leastsq(self.__residuals, self.__p_start, args = self.__args, Dfun = self.__Dfun, full_output = self.__full_output, col_deriv = self.__col_deriv, ftol = self.__ftol, xtol = self.__xtol, gtol = self.__gtol, maxfev = self.__maxfev, epsfcn = self.__epsfcn, factor = self.__factor, diag = self.__diag) except: print "Calculating residual failed for pars:", \ parsOrig raise out = rout.stop() err = rerr.stop() # build return information success = results[2] == 1 if isinstance(results[0], float): res_par_list = [results[0]] orig_par_list = [parsOrig[0]] else: res_par_list = results[0].tolist() orig_par_list = parsOrig.tolist() pestReturn = {'success': success, 'pars_fit': dict(zip(self.freeParNames, res_par_list)), 'pars_orig': dict(zip(self.freeParNames, orig_par_list)), 'alg_results': results[1], 'sys_fit': self.testModel } if verbose: if success or results[3].find('at most') != -1: if success: print 'Solution of ', self.freeParNames, ' = ', results[0] else: ## parvals = [self.testModel.pars[p] for p in \ ## self.freeParNames] print 'Closest values of ', self.freeParNames, ' = ', \ results[0] ## parvals print 'Original values = ', parsOrig print 'Number of mesh points = ', len(tmesh) print 'Number of fn evals = ', results[1]["nfev"], "(# iterations)" if not success: print 'Solution not found: '+results[3] else: print 'Solution not found: '+results[3] return pestReturn
# main program random.seed(12345) # seed or the random number generator # loads data from file data = loadtxt(os.path.join(datadir,'count.txt'), skiprows = 1) yvec = data[:, 0] xmat = data[:, 1:data.shape[1]] xmat = hstack([ones((data.shape[0], 1)), xmat]) data ={'yvec':yvec, 'xmat':xmat} # use bayesian regression to initialise bayesreg = BayesRegression(yvec, xmat) sig, beta0 = bayesreg.posterior_mean() init_beta, info = leastsq(minfunc, beta0, args = (yvec, xmat)) data['betaprec'] =-llhessian(data, init_beta) scale = linalg.inv(data['betaprec']) # Initialise the random walk MH algorithm samplebeta = RWMH(posterior, scale, init_beta, 'beta') ms = MCMC(20000, 4000, data, [samplebeta], loglike = (logl, xmat.shape[1], 'yvec')) ms.sampler() ms.output() ms.plot('beta')
def calculate_vignetting(input_file, original_file, exif_data, distance): basename = os.path.splitext(input_file)[0] all_points_filename = ("%s.all_points.dat" % basename) bins_filename = ("%s.bins.dat" % basename) pdf_filename = ("%s.pdf" % basename) gp_filename = ("%s.gp" % basename) vig_filename = ("%s.vig" % basename) if os.path.exists(vig_filename): return print("Generating vignetting data for %s ... " % input_file, flush=True) # This loads the pgm file and we get the image data and an one dimensional array # image_data = [1009, 1036, 1071, 1106, 1140, 1169, 1202, 1239, ...] width, height, image_data = load_pgm(input_file) # Get the half diagonal of the image half_diagonal = math.hypot(width // 2, height // 2) maximal_radius = 1 # Only remember pixel intensities which are in the given radius radii, intensities = [], [] for y in range(image_data.shape[0]): for x in range(image_data.shape[1]): radius = math.hypot(x - width // 2, y - height // 2) / half_diagonal if radius <= maximal_radius: radii.append(radius) intensities.append(image_data[y, x]) with open(all_points_filename, 'w') as f: for radius, intensity in zip(radii, intensities): f.write("%f %d\n" % (radius, intensity)) number_of_bins = 16 bins = [[] for i in range(number_of_bins)] for radius, intensity in zip(radii, intensities): # The zeroth and the last bin are only half bins which means that their # means are skewed. But this is okay: For the zeroth, the curve is # supposed to be horizontal anyway, and for the last, it underestimates # the vignetting at the rim which is a good thing (too much of # correction is bad). bin_index = int(round(radius / maximal_radius * (number_of_bins - 1))) bins[bin_index].append(intensity) radii = [ i / (number_of_bins - 1) * maximal_radius for i in range(number_of_bins) ] intensities = [np.median(bin) for bin in bins] with open(bins_filename, 'w') as f: for radius, intensity in zip(radii, intensities): f.write("%f %d\n" % (radius, intensity)) radii, intensities = np.array(radii), np.array(intensities) A, k1, k2, k3 = leastsq(lambda p, x, y: y - fit_function(x, *p), [30000, -0.3, 0, 0], args=(radii, intensities))[0] vig_config = configparser.ConfigParser() vig_config[exif_data['lens_model']] = { 'focal_length': exif_data['focal_length'], 'aperture': exif_data['aperture'], 'distance': distance, 'A': ('%.7f' % A), 'k1': ('%.7f' % k1), 'k2': ('%.7f' % k2), 'k3': ('%.7f' % k3), } with open(vig_filename, "w") as vigfile: vig_config.write(vigfile) if distance == float("inf"): distance = "∞" with codecs.open(gp_filename, "w", encoding="utf-8") as c: c.write('set term pdf\n') c.write('set print "%s"\n' % (input_file)) c.write('set output "%s"\n' % (pdf_filename)) c.write('set fit logfile "/dev/null"\n') c.write('set grid\n') c.write('set title "%s, %0.1f mm, f/%0.1f, %s m\\n%s" noenhanced\n' % (exif_data['lens_model'], exif_data['focal_length'], exif_data['aperture'], distance, original_file)) c.write('plot "%s" with dots title "samples", ' % all_points_filename) c.write('"%s" with linespoints lw 4 title "average", ' % bins_filename) c.write( '%f * (1 + (%f) * x**2 + (%f) * x**4 + (%f) * x**6) title "fit"\n' % (A, k1, k2, k3)) plot_pdf(gp_filename)
def evaluate_image_set(exif_data, filepaths): output_filename = "{0}--{1}--{2}--{3}".format(*exif_data).replace(" ", "_").replace("/", "__").replace(":", "___"). \ replace("*", "++").replace("=", "##") gnuplot_filename = "{0}.gp".format(output_filename) try: gnuplot_line = codecs.open(gnuplot_filename, encoding="utf-8").readlines()[3] match = re.match(r' [-e.0-9]+ \* \(1 \+ \((?P<k1>[-e.0-9]+)\) \* x\*\*2 \+ \((?P<k2>[-e.0-9]+)\) \* x\*\*4 \+ ' r'\((?P<k3>[-e.0-9]+)\) \* x\*\*6\) title "fit"', gnuplot_line) k1, k2, k3 = [float(k) for k in match.groups()] except IOError: radii, intensities = [], [] for filepath in filepaths: maximal_radius = 1 try: sidecar_file = open(os.path.splitext(filepath)[0] + ".txt") except FileNotFoundError: pass else: for line in sidecar_file: if line.startswith("maximal_radius"): maximal_radius = float(line.partition(":")[2]) dcraw_process = subprocess.Popen(generate_raw_conversion_call(filepath, ["-4", "-M", "-o", "0", "-c"] + h_option), stdout=subprocess.PIPE) image_data = subprocess.check_output( ["convert", "tiff:-", "-set", "colorspace", "RGB", "-resize", "250", "pgm:-"], stdin=dcraw_process.stdout, stderr=open(os.devnull, "w")) width, height = None, None header_size = 0 for i, line in enumerate(image_data.splitlines(True)): header_size += len(line) if i == 0: assert line == b"P5\n", "Wrong image format (must be NetPGM binary)" else: line = line.partition(b"#")[0].strip() if line: if not width: width, height = line.split() width, height = int(width), int(height) else: assert line == b"65535", "Wrong grayscale depth: {} (must be 65535)".format(int(line)) break half_diagonal = math.hypot(width // 2, height // 2) image_data = struct.unpack("!{0}s{1}H".format(header_size, width * height), image_data)[1:] for i, intensity in enumerate(image_data): y, x = divmod(i, width) radius = math.hypot(x - width // 2, y - height // 2) / half_diagonal if radius <= maximal_radius: radii.append(radius) intensities.append(intensity) all_points_filename = "{0}-all_points.dat".format(output_filename) with open(all_points_filename, "w") as outfile: for radius, intensity in zip(radii, intensities): outfile.write("{0} {1}\n".format(radius, intensity)) number_of_bins = 16 bins = [[] for i in range(number_of_bins)] for radius, intensity in zip(radii, intensities): # The zeroth and the last bin are only half bins which means that their # means are skewed. But this is okay: For the zeroth, the curve is # supposed to be horizontal anyway, and for the last, it underestimates # the vignetting at the rim which is a good thing (too much of # correction is bad). bin_index = int(round(radius / maximal_radius * (number_of_bins - 1))) bins[bin_index].append(intensity) radii = [i / (number_of_bins - 1) * maximal_radius for i in range(number_of_bins)] intensities = [numpy.median(bin) for bin in bins] bins_filename = "{0}-bins.dat".format(output_filename) with open(bins_filename, "w") as outfile: for radius, intensity in zip(radii, intensities): outfile.write("{0} {1}\n".format(radius, intensity)) radii, intensities = numpy.array(radii), numpy.array(intensities) def fit_function(radius, A, k1, k2, k3): return A * (1 + k1 * radius**2 + k2 * radius**4 + k3 * radius**6) A, k1, k2, k3 = leastsq(lambda p, x, y: y - fit_function(x, *p), [30000, -0.3, 0, 0], args=(radii, intensities))[0] lens_name, focal_length, aperture, distance = exif_data if distance == float("inf"): distance = "∞" codecs.open(gnuplot_filename, "w", encoding="utf-8").write("""set grid set title "{6}, {7} mm, f/{8}, {9} m" plot "{0}" with dots title "samples", "{1}" with linespoints lw 4 title "average", \\ {2} * (1 + ({3}) * x**2 + ({4}) * x**4 + ({5}) * x**6) title "fit" pause -1 """.format(all_points_filename, bins_filename, A, k1, k2, k3, lens_name, focal_length, aperture, distance)) return (k1, k2, k3)
# As usual, we add variables for the overall scale of the PDF and a delta2 # parameter for correlated motion of neighboring atoms. niFit.addVar(niPDF.scale, 1) niFit.addVar(niPDF.nickel.delta2, 5) # We fix Qdamp based on prior information about our beamline. niFit.addVar(niPDF.qdamp, 0.03, fixed=True) # Turn off printout of iteration number. niFit.clearFitHooks() # We can now execute the fit using scipy's least square optimizer. print("Refine PDF using scipy's least-squares optimizer:") print(" variables:", niFit.names) print(" initial values:", niFit.values) leastsq(niFit.residual, niFit.values) print(" final values:", niFit.values) print() # Obtain and display the fit results. niResults = FitResults(niFit) print("FIT RESULTS\n") print(niResults) # Plot the observed and refined PDF. # Get the experimental data from the recipe r = niFit.nickel.profile.x gobs = niFit.nickel.profile.y # Get the calculated PDF and compute the difference between the calculated and
def fit(x_array, y_array, function, A_start, return_all = False): """ Fit data Arguments --------- x_array : ndarray the array with time or something y-array : ndarray the array with the values that have to be fitted function : function one of the functions, in the format: function(A, t), where A are the function arguments and t is the variable. A_start : list a starting point for the fitting return_all : bool the function used to return only the final result. The leastsq method does however return more data, which may be useful for debugging. When the this flag is True, it will return these extras as well. For legacy purposes the default is False. See reference of leastsq method for the extra output: http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.leastsq.html Returns ------- A_final : list the final parameters of the fitting When return_all == True: cov_x : ndarray Uses the fjac and ipvt optional outputs to construct an estimate of the jacobian around the solution. None if a singular matrix encountered (indicates very flat curvature in some direction). This matrix must be multiplied by the residual variance to get the covariance of the parameter estimates - see curve_fit. infodict : (dict) a dictionary of optional outputs with the key s: - "nfev" : the number of function calls - "fvec" : the function evaluated at the output - "fjac" : A permutation of the R matrix of a QR factorization of the final approximate Jacobian matrix, stored column wise. Together with ipvt, the covariance of the estimate can be approximated. - "ipvt" : an integer array of length N which defines a permutation matrix, p, such that fjac*p = q*r, where r is upper triangular with diagonal elements of nonincreasing magnitude. Column j of p is column ipvt(j) of the identity matrix. - "qtf" : the vector (transpose(q) * fvec). mesg : str A string message giving information about the cause of failure. ier : int An integer flag. If it is equal to 1, 2, 3 or 4, the solution was found. Otherwise, the solution was not found. In either case, the optional output variable "mesg" gives more information. Examples -------- Fit some data to this function from Equations: :: def linear(A, t): return A[0] + A[1] * t ### x = x-axis y = some data A = [0,1] # initial guess A_final = fit(x, y, Equations.linear, A) ### WARNING: Always check the result, it might sometimes be sensitive to a good starting point. Notes ----- - 2010-12-09/RB: started - 2013-01-31/RB: imported in Crocodile, added example to doc-string """ if scipy_import: param = (x_array, y_array, function) A_final, cov_x, infodict, mesg, ier = leastsq(minimize, A_start, args=param, full_output=True) if return_all: return A_final, cov_x, infodict, mesg, ier else: return A_final else: DEBUG.printError("Scipy.leastsq is not loaded. Fit is not done", inspect.stack()) return False
def curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False, check_finite=True, bounds=(-np.inf, np.inf), method=None, jac=None, **kwargs): if p0 is None: # determine number of parameters by inspecting the function from scipy._lib._util import getargspec_no_self as _getargspec args, varargs, varkw, defaults = _getargspec(f) if len(args) < 2: raise ValueError("Unable to determine number of fit parameters.") n = len(args) - 1 else: p0 = np.atleast_1d(p0) n = p0.size lb, ub = prepare_bounds(bounds, n) if p0 is None: p0 = mp._initialize_feasible(lb, ub) bounded_problem = np.any((lb > -np.inf) | (ub < np.inf)) if method is None: if bounded_problem: method = 'trf' else: method = 'lm' if method == 'lm' and bounded_problem: raise ValueError("Method 'lm' only works for unconstrained problems. " "Use 'trf' or 'dogbox' instead.") # NaNs can not be handled if check_finite: ydata = np.asarray_chkfinite(ydata) else: ydata = np.asarray(ydata) if isinstance(xdata, (list, tuple, np.ndarray)): # `xdata` is passed straight to the user-defined `f`, so allow # non-array_like `xdata`. if check_finite: xdata = np.asarray_chkfinite(xdata) else: xdata = np.asarray(xdata) weights = 1.0 / np.asarray(sigma) if sigma is not None else None func = mp._wrap_func(f, xdata, ydata, weights) if callable(jac): jac = mp._wrap_jac(jac, xdata, weights) elif jac is None and method != 'lm': jac = '2-point' # Remove full_output from kwargs, otherwise we're passing it in twice. return_full = kwargs.pop('full_output', False) if method == 'lm': res = mp.leastsq(func, p0, Dfun=jac, full_output=1, **kwargs) popt, pcov, infodict, errmsg, ier = res cost = np.sum(infodict['fvec'] ** 2) else: res = least_squares(func, p0, jac=jac, bounds=bounds, method=method, **kwargs) cost = 2 * res.cost # res.cost is half sum of squares! popt = res.x # Do Moore-Penrose inverse discarding zero singular values. _, s, VT = svd(res.jac, full_matrices=False) threshold = np.finfo(float).eps * max(res.jac.shape) * s[0] s = s[s > threshold] VT = VT[:s.size] pcov = np.dot(VT.T / s**2, VT) # infodict = dict(nfev=res.nfev, fvec=res.fun, fjac=res.jac, ipvt=None, # qtf=None) infodict = None ier = res.status errmsg = res.message if ier not in [1, 2, 3, 4]: raise RuntimeError("Optimal parameters not found: " + errmsg) warn_cov = False if pcov is None: # indeterminate covariance pcov = np.zeros((len(popt), len(popt)), dtype=float) pcov.fill(np.inf) warn_cov = True elif not absolute_sigma: if ydata.size > p0.size: s_sq = cost / (ydata.size - p0.size) pcov = pcov * s_sq else: pcov.fill(np.inf) warn_cov = True if warn_cov: warnings.warn('Covariance of the parameters could not be estimated', category=OptimizeWarning) if return_full: return popt, pcov, infodict, errmsg, ier else: return popt, pcov
def func(p,*args): a,b=p x,y=args cost = y- (a*x + b); return cost x = np.arange(1,10,1); y_true = 3*x+ 4; y_mean = y_true + 10*np.random.rand(len(x)) p0= np.array([1,2]); print p0 rs1= fmin_bfgs(func1,[1,2],args=(x,y_mean)) rs2= fmin_cg(func1,[1,2],args=(x,y_mean)) rs = leastsq(func,p0,args=(x,y_mean)); # # rs1=fmin_bfgs(func,p0,args=(x,y_mean)) print "rs=",rs # print "rs1=",rs1 print "rs2=",rs2 y1= rs[0][0]*x + rs[0][1] y2 = rs1[0]*x + rs1[1] pl.plot(x,y1,'r',label="y1"); pl.plot(x,y2,'b',label="y2"); pl.plot(x,y_mean,'og',label='y_mean'); pl.legend() pl.show()
# As usual, we add variables for the overall scale of the PDF and a delta2 # parameter for correlated motion of neighboring atoms. niFit.addVar(niPDF.scale, 1) niFit.addVar(niPDF.nickel.delta2, 5) # We fix Qdamp based on prior information about our beamline. niFit.addVar(niPDF.qdamp, 0.03, fixed=True) # Turn off printout of iteration number. niFit.clearFitHooks() # We can now execute the fit using scipy's least square optimizer. print "Refine PDF using scipy's least-squares optimizer:" print " variables:", niFit.names print " initial values:", niFit.values leastsq(niFit.residual, niFit.values) print " final values:", niFit.values print # Obtain and display the fit results. niResults = FitResults(niFit) print "FIT RESULTS\n" print niResults # Plot the observed and refined PDF. # Get the experimental data from the recipe r = niFit.nickel.profile.x gobs = niFit.nickel.profile.y # Get the calculated PDF and compute the difference between the calculated and
# We fix Qdamp based on prior information about our beamline. mnofit.addVar(nucpdf.qdamp, 0.03, fixed=True) # add the mPDF variables mnofit.addVar(totpdf.parascale, 4) mnofit.addVar(totpdf.ordscale, 1.5) # Turn off printout of iteration number. mnofit.clearFitHooks() # Initial structural fit print "Refine PDF using scipy's least-squares optimizer:" print " variables:", mnofit.names print " initial values:", mnofit.values leastsq(mnofit.residual, mnofit.values) print " final values:", mnofit.values print # Obtain and display the fit results. mnoresults = FitResults(mnofit) print "FIT RESULTS\n" print mnoresults # Get the experimental data from the recipe r = mnofit.totpdf.profile.x gobs = mnofit.totpdf.profile.y # Get the calculated PDF and compute the difference between the calculated and # measured PDF gcalc = mnofit.totpdf.evaluate()
def _curve_fit(f, xdata, ydata, degrees, version, p0=None, absolute_sigma=False, method=None, jac=None, **kwargs): from scipy.optimize.optimize import OptimizeWarning from scipy.optimize._lsq.least_squares import prepare_bounds from scipy.optimize.minpack import leastsq, _wrap_jac bounds = (-np.inf, np.inf) lb, ub = prepare_bounds(bounds, np.sum(degrees)) if p0 is None: if version == "C": p0 = np.ones(np.sum(degrees) + 2) else: p0 = np.ones(np.sum(degrees) + 1) method = 'lm' ydata = np.asarray_chkfinite(ydata, float) if isinstance(xdata, (list, tuple, np.ndarray)): # `xdata` is passed straight to the user-defined `f`, so allow # non-array_like `xdata`. xdata = np.asarray_chkfinite(xdata, float) func = _wrap_func(f, xdata, ydata, degrees) # Modification here !!! if callable(jac): jac = _wrap_jac(jac, xdata, None) elif jac is None and method != 'lm': jac = '2-point' if 'args' in kwargs: raise ValueError("'args' is not a supported keyword argument.") # Remove full_output from kwargs, otherwise we're passing it in twice. return_full = kwargs.pop('full_output', False) res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs) popt, pcov, infodict, errmsg, ier = res ysize = len(infodict['fvec']) cost = np.sum(infodict['fvec']**2) if ier not in [1, 2, 3, 4]: raise RuntimeError("Optimal parameters not found: " + errmsg) warn_cov = False if pcov is None: # indeterminate covariance pcov = zeros((len(popt), len(popt)), dtype=float) pcov.fill(inf) warn_cov = True elif not absolute_sigma: if ysize > p0.size: s_sq = cost / (ysize - p0.size) pcov = pcov * s_sq else: pcov.fill(inf) warn_cov = True if warn_cov: warnings.warn('Covariance of the parameters could not be estimated', category=OptimizeWarning) if return_full: return popt, pcov, infodict, errmsg, ier else: return popt, pcov
# Here is a simple we to assign the zoomscale to the structure. Note that this # only works for NON-PERIODIC structure lattice = cdsePDF.CdSe.phase.getLattice() cdseFit.constrain(lattice.a, zoomscale) cdseFit.constrain(lattice.b, zoomscale) cdseFit.constrain(lattice.c, zoomscale) # Turn off printout of iteration number. cdseFit.clearFitHooks() # We can now execute the fit using scipy's least square optimizer. print("Refine PDF using scipy's least-squares optimizer:") print(" variables:", cdseFit.names) print(" initial values:", cdseFit.values) leastsq(cdseFit.residual, cdseFit.values) print(" final values:", cdseFit.values) print() # Obtain and display the fit results. cdseResults = FitResults(cdseFit) print("FIT RESULTS\n") print(cdseResults) # Plot the observed and refined PDF. # Get the experimental data from the recipe r = cdseFit.CdSe.profile.x gobs = cdseFit.CdSe.profile.y # Get the calculated PDF and compute the difference between the calculated and
def fitting(self, p): pars = np.random.rand(p + 1) r = leastsq(PolyResiduals, pars, args=(X, Y)) return r[0] # 返回系数
def fit(self): """ Fits the paraboloid to the swarm particles :return: the mean = global best position and the estimated covariance matrix """ scale = 10**0 dim = len(self.gbest.position) x = numpy.array([particle.position * scale for particle in self.swarm]) theta = (x - self.gbest.position * scale) / (self.gbest.position * scale) norms = numpy.array(list(map(norm, theta))) #print(Counter(b)) b = (norms < 0.1) theta = theta[b] fitness = numpy.array( [particle.fitness * scale for particle in self.swarm]) # b = numpy.logical_and((norms < 0.1), fitness != -numpy.inf) fitness = fitness[b] delta = -2 * (fitness - self.gbest.fitness * scale) p0 = numpy.zeros(dim * (dim + 1) / 2 + dim) popt, _cov, infodict, mesg, ier = leastsq(errfunc, p0, args=(theta, delta), full_output=True) print(mesg) ss_err = (infodict['fvec']**2).sum() ss_tot = ((delta - delta.mean())**2).sum() rsquared = 1 - (ss_err / ss_tot) print("rsquared", rsquared) R, mu = transform(dim, popt) print(mu) # print("found R:\n", R) _cov = rescale(R, self.gbest.position, dim) print("found _cov:\n", _cov) # cons = ( # {'type': 'ineq', # 'fun' : lambda x: bound(x)}) # # res = minimize(errfunc2, p0, args=(theta, delta), constraints=cons, method='SLSQP', options={'disp': True, "ftol":10**-10}) # popt=res.x # R, mu = transform(dim, popt) # print(mu) # # print("found R:\n", R) # _cov = rescale(R, self.gbest.position, dim) # print("found _cov:\n", _cov) # eigen = numpy.linalg.eigvals(R) # print("-->eigen:", min(eigen), max(eigen), min(eigen)/max(eigen)) # R2 = numpy.empty((dim, dim)) # for i in range(dim): # for j in range(dim): # R2[i,j] = R[i,j]/self.gbest.position[i]/self.gbest.position[j] # # print("R\n", R2) # _cov = rescale(R, self.gbest.position, dim) # print("found _cov:\n", _cov) # print("=> _cov diag", cov2.diagonal()) # sigma = numpy.sqrt(numpy.diag(cov2)) # print( "=> found sigma:", sigma) return self.gbest.position, _cov
int0 = numpy.argsort(tau) tau = tau[int0]; delflx2 = delflx2[int0]; delflxerr = delflxerr[int0] # Fitting function; exponential model; sf = sf_inf*(1-e^(-t/Tau))^0.5 fp = lambda v, x: v[0]*numpy.power((1.-numpy.exp(-numpy.abs(x)/v[1])),0.5) # Error function e = lambda v, x, y, dy: (fp(v,x)-y)/dy # error function for the leastsq # Initial guess v0 = [0.2, 100.] # initial guess for exp # leastsq v_whole, cov, info, mesg, ier = sc.leastsq(e, v0, args=(tau,numpy.sqrt(delflx2),delflxerr),full_output=True) if ier != 1: print 'Fail in fitting SF for ' + str(inFiles[i]) if cov == None: t0 = 0.0; t1 = 0.0 else: t0 = numpy.sqrt(cov[0][0]); t1 = numpy.sqrt(cov[1][1]) expected = v_whole[0]*numpy.power((1.-numpy.exp(-numpy.abs(tau)/v_whole[1])),0.5) # Expected Value chi2 = numpy.sum((numpy.sqrt(delflx2)-expected)**2/expected) WriteColumns(outDir+'tau%s.dat' % (file),((reshape(tau,(len(tau),1))),), header='# SF') WriteColumns(outDir+'sf%s.dat' % (file),((reshape(numpy.sqrt(delflx2),(len(tau),1))),), header='# SF') outf.write('%.5f %.5f %.5f %.5f %.5f %i\n' % (v_whole[0],t0,v_whole[1],t1,chi2,len(tau))) outf.close()
def lsqE_tq(prm,qray,dray,constants,domidx): Rpr, wRd, qYaw = constants pr = geom.YPRfromR(Rpr)[1:] # pitch and roll return opt.leastsq(esserrf_tq,prm,args=(qray,dray,pr,wRd,domidx),warning=False)[0]