def testMonteCarlo1(self): print("====== MonteCarlo 1 ===================") N = 100 ran = numpy.random ran.seed(12345) noise = ran.standard_normal(N) x = numpy.arange(N, dtype=float) - 3 nn = 0.1 for k in range(5): y = noise * nn m = PolynomialModel(0) ftr = Fitter(x, m) par = ftr.fit(y) std = ftr.getStandardDeviations() chisq = ftr.chisq mc = MonteCarlo(x, m, ftr.covariance) mc.mcycles = 1000 lmce = ftr.monteCarloError(monteCarlo=mc) print("noise : ", fmt(nn), "===========================================") print("params : ", fmt(par, format="%8.5f")) print("stdevs : ", fmt(std, format="%8.5f")) print("scale : ", fmt(ftr.scale, format="%8.5f"), fmt(nn)) print("chisq : ", fmt(chisq, format="%8.5f"), fmt(mc._eigenvalues, format="%8.5f"), fmt(mc._eigenvectors, format="%8.5f")) print("covar : ", fmt(ftr.covariance, format="%8.5f")) print("mcerr : ", fmt(lmce[0], format="%8.5f")) self.assertTrue(abs(std[0] - lmce[0]) < 0.1 * std[0]) self.assertTrue(par[0] < 0.05 * nn) nn *= 10
def test5( self, plot=None ) : print( "====test5============================" ) nn = 10 x = numpy.linspace( 0, 2, nn, dtype=float ) ym = 0.3 + 0.5 * x nf = 0.1 numpy.random.seed( 2345 ) noise = numpy.random.randn( nn ) y = ym + nf * noise limits = [-1,2] model = PolynomialModel( 1 ) model.setLimits( lowLimits=limits[0], highLimits=limits[1] ) s = 0.0 s2 = 0.0 mr = 10 for k in range( mr ) : dis = GaussErrorDistribution( x, y, scale=0.5 ) ns = NestedSampler( x, model, y, distribution=dis, verbose=0, seed=k ) yfit = ns.sample() par2 = ns.parameters logz2 = ns.logZ dlz2 = ns.logZprecision s += logz2 s2 += logz2 * logz2 print( "pars ", fmt( par2 ), " logZ ", fmt( logz2 ), " +- ", fmt( dlz2 ) ) logz = s / mr dlz = math.sqrt( s2 / mr - logz * logz ) print( "Average ", fmt( logz ), " +- ", fmt( dlz ) )
def test2_1( self, plot=False ) : print( "====test2_1============================" ) nn = 100 x = numpy.arange( nn, dtype=float ) / 50 ym = 0.2 + 0.5 * x nf = 0.1 numpy.random.seed( 2345 ) noise = numpy.random.randn( nn ) y = ym + nf * noise limits = [-1,2] pm = PolynomialModel( 1 ) bf = Fitter( x, pm ) pars = bf.fit( y ) logz0 = bf.getLogZ( limits=limits ) logl0 = bf.logLikelihood print( "pars ", fmt( pars ) ) print( "stdv ", fmt( bf.stdevs ) ) print( "logZ ", fmt( logz0 ), " logL ", fmt( logl0 ) ) errdis = GaussErrorDistribution ( x, y ) logz1, logl1 = plotErrdis2d( errdis, pm, limits=limits, max=0, plot=plot ) if plot : plt.plot( pars[0], pars[1], 'k.' ) print( "logZ ", fmt( logz1 ), " logL ", fmt( logl1 ) ) model = PolynomialModel( 1 ) model.setLimits( lowLimits=limits[0], highLimits=limits[1] ) ns = NestedSampler( x, model, y, verbose=0 ) yfit = ns.sample() par2 = ns.parameters logz2 = ns.logZ dlz2 = ns.logZprecision print( "pars ", fmt( par2 ) ) print( "stdv ", fmt( ns.stdevs ) ) print( "logZ ", fmt( logz2 ), " +- ", fmt( dlz2 ) ) self.assertTrue( abs( logz2 - logz0 ) < dlz2 ) # print( logz0 - logz1, logz0 - logz2 ) samples = ns.samples parevo =samples.getParameterEvolution() llevo = samples.getLogLikelihoodEvolution() lwevo = samples.getLogWeightEvolution() assertAAE( numpy.sum( numpy.exp( lwevo ) ), 1.0 ) if plot : plt.show()
def normalizetest( self, fitter ) : p = numpy.asarray( [2.0, 1.3] ) x = numpy.asarray( [1.0, 1.3, 1.5, 1.8, 2.0] ) numpy.random.seed( 12345 ) y = p[0] + p[1] * x + 0.5 * numpy.random.randn( 5 ) m = PolynomialModel( 1 ) ftr = fitter( x, m ) print( "=============================================================" ) print( str( ftr ) ) print( fmt( x ) ) print( fmt( y ) ) par = ftr.fit( y ) print( fmt( p ) ) print( fmt( par ) ) conpr = numpy.asarray( [1.0,0.0] ) for w in [0,1,10,100,1000] : m2 = m.copy() ftr = fitter( x, m2 ) ftr.normalize( conpr, p[0], weight=w ) par = ftr.fit( y ) print( fmt( w ), fmt( par ), fmt( ftr.chisq ) ) self.assertTrue( abs( p[0] - par[0] ) < 1e-3 ) print( fmt( ftr.hessian ) )
def doVerbose(self, name, chisq, par, force=False): if self.verbose > 1 and (self.iter % 100 == 0 or force): mx = 5 if self.verbose < 4 else None if self.verbose == 4 else self.verbose print("%6d %-8.8s " % (self.iter, name), ("%6.1f" % (self.temp) if (self.temp > 0) else ""), " %8.1f " % chisq, fmt(par, max=mx))
def histo(self, x, pr, fun=None): print(pr) print(fmt(x)) print(fmt(fun[0])) print(fmt(fun[0])) num_bins = 50 # the histogram of the data n, bins, patches = plt.hist(x, num_bins, normed=1, facecolor='green', alpha=0.5) # add a 'best fit' line if fun is not None: plt.plot(fun[0], fun[1], 'r--') plt.xlabel('Error') plt.ylabel('Probability') plt.title('Histogram of ' + str(pr)) # Tweak spacing to prevent clipping of ylabel plt.subplots_adjust(left=0.15) plt.show()
def test2(self): print("===== formatter test2 ===========================") arr = numpy.asarray([k for k in range(36)], dtype=float) arr = arr.reshape((3, 12)) fmtinit(max=None) print(fmt(arr, max=None)) fmtinit(linelength=60) print(fmt(arr, max=None)) fmtinit(linelength=80, format={"float64": " %7.2f"}) print(fmt(arr, max=None)) print("arr", fmt(arr, indent=4, format=" %7.2f")) print(fmt(arr, max=2)) alist = [1, 2, 3, 4, 5] print(fmt(alist)) self.assertTrue(isinstance(alist, list)) print(fmt(3), fmt(3.4))
def test1(self): print("===== formatter test1 ===========================") arr = numpy.asarray([k for k in range(120)], dtype=float) arr = arr.reshape((3, 40)) print(fmt(arr, max=None)) print("arr", fmt(arr[1], indent=4, format=" %7.2f", max=20)) print(fmt(arr.reshape((3, 4, 10)), max=2)) alist = [1, 2, 3, 4, 5] print(fmt(alist)) self.assertTrue(isinstance(alist, list)) print(fmt(3), fmt(3.4))
def dofit(self, ns, pp): yfit = ns.sample() par = ns.parameters std = ns.standardDeviations mlp = ns.samples.maxLikelihoodParameters scale = ns.scale scdev = ns.stdevScale print("truth ", fmt(pp)) print("par ", fmt(par)) print("st dev ", fmt(std)) print("ML par ", fmt(mlp)) print("scale ", fmt(scale), " +- ", fmt(scdev))
def sample(self, keep=None): """ Sample the posterior and return the weighted average result of the Model. The more sensible result of this method is a SampleList which contains samples taken from the posterior distribution. Parameters ---------- keep : None or dict of {int:float} Dictionary of indices (int) to be kept at a fixed value (float) Hyperparameters follow model parameters The values will override those at initialization. They are only used in this call of fit. """ if keep is None: keep = self.keep fitlist = self.makeFitlist(keep=keep) self.initWalkers(fitlist=fitlist) for eng in self.engines: eng.walkers = self.walkers self.distribution.ncalls = 0 # reset number of calls self.plotData() if self.verbose >= 1: print("Fit", ("all" if keep is None else fitlist), "parameters of") print(" ", self.model._toString(" ")) print("Using a", self.distribution, "with") np = self.model.npchain for name, hyp in zip(self.distribution.PARNAMES, self.distribution.hyperpar): print(" %-7.7s " % name, end="") if np in fitlist: print("unknown") else: print(" (fixed) ", hyp.hypar) # print( "%7.2f (fixed)" % hyp.hypar ) np += 1 print("Moving the walkers with") for eng in self.engines: print(" ", eng) if self.verbose >= 2: print("Iteration logZ H LowL npar parameters") explorer = Explorer(self) self.logZ = -sys.float_info.max self.info = 0 logWidth = math.log(1.0 - math.exp((-1.0 * self.discard) / self.ensemble)) if self.optionalRestart(): logWidth -= self.iteration * (1.0 * self.discard) / self.ensemble while self.iteration < self.getMaxIter(): # find worst walker(s) in ensemble worst = self.findWorst() worstLogW = logWidth + self.walkers[worst[-1]].logL # Keep posterior samples self.storeSamples(worst, worstLogW - math.log(self.discard)) # Update Evidence Z and Information H logZnew = numpy.logaddexp(self.logZ, worstLogW) self.info = (math.exp(worstLogW - logZnew) * self.lowLhood + math.exp(self.logZ - logZnew) * (self.info + self.logZ) - logZnew) self.logZ = logZnew if self.verbose >= 3 or (self.verbose >= 2 and self.iteration % 100 == 0): kw = worst[0] pl = self.walkers[kw].parlist[self.walkers[kw].fitIndex] np = len(pl) print( "%8d %8.1f %8.1f %8.1f %6d " % (self.iteration, self.logZ, self.info, self.lowLhood, np), fmt(pl)) self.plotResult(worst[0], self.iteration) self.samples.weed(self.maxsize) # remove overflow in samplelist self.copyWalker(worst) # Explore the copied walker(s) explorer.explore(worst, self.lowLhood, fitlist) # Shrink the interval logWidth -= (1.0 * self.discard) / self.ensemble self.iteration += 1 self.optionalSave() # End of Sampling self.addEnsembleToSamples(logWidth) # Calculate weighted average and stdevs for the parameters; self.samples.LogZ = self.logZ self.samples.info = self.info self.samples.normalize() # put the info into the model self.model.parameters = self.samples.parameters self.model.stdevs = self.samples.stdevs if self.verbose >= 1: self.report() return self.samples.average(self.xdata)
def test1(self, plot=False): print("====test1============================") nn = 10000 x = numpy.linspace(0, 1, nn, dtype=float) ym = 0.0 * x nf = 0.01 model = PolynomialModel(0) model.parameters = 0.0 numpy.random.seed(2345) noise = numpy.random.randn(nn) errdis = GaussErrorDistribution(x, ym) print(errdis) for k in range(5): noise = numpy.random.randn(nn) y = ym + nf * noise errdis.data = y print(fmt(k), fmt(nf), fmt(errdis.getScale(model))) nf *= 10 parlist = [0.0, 1.0] if plot: self.ploterrdis(noise, errdis, model, parlist) nf = 0.01 errdis = LaplaceErrorDistribution(x, ym) print(errdis) for k in range(5): noise = numpy.random.laplace(size=nn) y = ym + nf * noise errdis.data = y print(fmt(k), fmt(nf), fmt(errdis.getScale(model))) nf *= 10 nf = 0.01 errdis = CauchyErrorDistribution(x, ym) cp = CauchyPrior() print(errdis) for k in range(5): noise = numpy.random.rand(nn) noise = cp.unit2Domain(noise) y = ym + nf * noise errdis.data = y print(fmt(k), fmt(nf), fmt(errdis.getScale(model))) nf *= 10 nf = 0.01 errdis = GenGaussErrorDistribution(x, ym, power=1) print(errdis, " power=1") for k in range(5): noise = numpy.random.laplace(size=nn) y = ym + nf * noise errdis.data = y print(fmt(k), fmt(nf), fmt(errdis.getScale(model))) nf *= 10 nf = 0.01 errdis = GenGaussErrorDistribution(x, ym, power=2) print(errdis, " power=2") for k in range(5): noise = numpy.random.randn(nn) y = ym + nf * noise errdis.data = y print(fmt(k), fmt(nf), fmt(errdis.getScale(model))) nf *= 10 nf = 0.01 power = 10 errdis = GenGaussErrorDistribution(x, ym, power=power) print(errdis, " power=%d" % power) for k in range(5): noise = 2 * numpy.random.rand(nn) - 1.0 y = ym + nf * noise errdis.data = y print(fmt(k), fmt(nf), fmt(errdis.getScale(model))) nf *= 10 parlist = [0.0, 1.0, power] if plot: self.ploterrdis(noise, errdis, model, parlist)
def test1(self, plot=False): c0 = 3.2 c1 = -0.1 c2 = 0.3 c3 = 1.1 c4 = 2.1 y = (self.x - c1) / c2 y = c0 * numpy.exp(-y * y) + self.noise print("++++++++++++++++++++++++++++++++++++++++++++++++++") print("Testing Nonlinear Fitters: LevenbergMarquardt (lm)") print("++++++++++++++++++++++++++++++++++++++++++++++++++") modl1 = GaussModel() amfit = CurveFitter(self.x, modl1) print([c0, c1, c2, c3, c4]) par1 = amfit.fit(y) print(self.x) print(y) print(par1) modl2 = GaussModel() modl2.addModel(PolynomialModel(1)) z = y + c4 * self.x + c3 modl2.parameters = numpy.append(par1, [0, 0]) lmfit = CurveFitter(self.x, modl2) par2 = lmfit.fit(z) print(z) print(par2) print("chisq1 = ", amfit.chisq, " chisq2 = ", lmfit.chisq) self.assertTrue(self.eq(par2[0], par1[0], 0.1)) self.assertTrue(self.eq(par2[1], par1[1], 0.1)) self.assertTrue(self.eq(abs(par2[2]), abs(par1[2]), 0.1)) self.assertTrue(self.eq(par2[0], c0, self.ss)) self.assertTrue(self.eq(par2[1], c1, self.ss)) self.assertTrue(self.eq(abs(par2[2]), c2, self.ss)) self.assertTrue(self.eq(par2[3], c3, self.ss)) self.assertTrue(self.eq(par2[4], c4, self.ss)) if plot: xx = numpy.linspace(-1, +1, 1001) plt.plot(self.x, y, 'k+') plt.plot(xx, modl1.result(xx), 'k-') plt.plot(self.x, z, 'r+') plt.plot(xx, modl2.result(xx), 'r-') plt.show() print(fmt(amfit.hessian)) print(fmt(amfit.design, max=None)) print("++++++++++++++++++++++++++++++++++++++++++++++++++") print("Testing LevenbergMarquardt (normalized)") print("++++++++++++++++++++++++++++++++++++++++++++++++++") modl1 = GaussModel() amfit = CurveFitter(self.x, modl1) conpr1 = numpy.asarray([1.0, 0.0, 0.0]) amfit.normalize(conpr1, c0, weight=10.0) print([c0, c1, c2]) par3 = amfit.fit(y) print(fmt(amfit.hessian)) print(fmt(amfit.design, max=None)) print(par1) print(par3)
def test7( self, plot=False ) : print( "====test7 Poisson ================" ) nn = 100 x = numpy.linspace( 0, 10, nn, dtype=float ) ym = 1.9 + 2.2 * x numpy.random.seed( 2345 ) y = numpy.random.poisson( ym, size=nn ) limits = [0,4] if plot : plt.plot( x, ym, 'k-' ) plt.plot( x, y, 'r.' ) model = PolynomialModel( 1 ) model.setLimits( lowLimits=limits[0], highLimits=limits[1] ) bf = AmoebaFitter( x, model, errdis="poisson" ) pars = bf.fit( y, tolerance=1e-20 ) print( "pars ", fmt( pars ) ) print( "stdv ", fmt( bf.stdevs ) ) logz0 = bf.getLogZ( limits=limits ) logl0 = bf.logLikelihood print( "logZ ", fmt( logz0 ), " logl ", fmt( logl0 ) ) errdis = PoissonErrorDistribution( x, y ) logz1, logl1 = plotErrdis2d( errdis, model, limits=limits, max=0, plot=plot ) if plot : plt.plot( pars[0], pars[1], 'k.' ) print( "logZ ", fmt( logz1 ), " logL ", fmt( logl1 ) ) model = PolynomialModel( 1 ) model.setLimits( lowLimits=limits[0], highLimits=limits[1] ) ns = NestedSampler( x, model, y, distribution='poisson', verbose=0 ) yfit = ns.sample() par2 = ns.parameters logz2 = ns.logZ dlz2 = ns.logZprecision samples = ns.samples print( "pars ", fmt( par2 ), fmt( samples.maxLikelihoodParameters ) ) print( "stdv ", fmt( ns.stdevs ) ) print( "logZ ", fmt( logz2 ), " +- ", fmt( dlz2 ) ) self.assertTrue( abs( logz2 - logz1 ) < dlz2 ) parevo =samples.getParameterEvolution() llevo = samples.getLogLikelihoodEvolution() lwevo = samples.getLogWeightEvolution() assertAAE( numpy.sum( numpy.exp( lwevo ) ), 1.0 ) if plot : plt.plot( parevo[:,0], parevo[:,1], 'k,' ) plt.show()
def stdFittertest(myfitter, npt, xmin=-10.0, xmax=10.0, noise=0.1, plot=False, map=False, keep=None, errdis=None, scale=None, power=2.0, options={}): numpy.set_printoptions(precision=3, suppress=True) tc = unittest.TestCase() ## make data x = numpy.linspace(xmin, xmax, npt, dtype=float) m = SincModel() p = [3.0, 1.0, 2.0] ym = m.result(x, p) numpy.random.seed(5753258) y = ym + noise * numpy.random.randn(npt) knots = numpy.linspace(xmin, xmax, 13, dtype=float) lmdl = BSplinesModel(knots) lftr = Fitter(x, lmdl) lpar = lftr.fit(y) lchi = lftr.chisq lfit = lmdl(x) mdl = BSplinesModel(knots) ftr = myfitter(x, mdl, map=map, keep=keep, errdis=errdis, scale=scale, power=power) print("############### Test ", ftr, ' ###################################') par = ftr.fit(y, **options) chi = ftr.chisq yfit = mdl(x) print("lpar ", fmt(lpar, indent=4)) print("lstd ", fmt(lftr.stdevs, indent=4)) print("lchi ", fmt(lchi), " scale ", fmt(lftr.scale)) print("par ", fmt(par, indent=4)) print("std ", fmt(ftr.stdevs, indent=4)) print("chi ", fmt(chi), " scale ", fmt(ftr.scale), " iter ", fmt(ftr.iter)) lmce = ftr.monteCarloError(x) # tc.assertTrue( abs( lchi - chi ) < noise ) # tc.assertTrue( numpy.all( numpy.abs( yfit - lfit ) < 2 * lmce ) ) if plot: plt.figure(str(ftr)) plt.plot(x, ym, 'k-') plt.plot(x, y, 'k*') plt.plot(x, lfit, 'g-') plt.plot(x, yfit, 'r-') plt.plot(x, yfit - lmce, 'm-') plt.plot(x, yfit + lmce, 'm-')
def test1( self, plot=False ) : print( "====test1============================" ) nn = 100 x = numpy.zeros( nn, dtype=float ) ym = 0.2 + 0.5 * x nf = 1.0 nf = 0.1 numpy.random.seed( 2345 ) noise = numpy.random.randn( nn ) y = ym + nf * noise limits = [-2,2] pm = PolynomialModel( 0 ) bf = Fitter( x, pm ) pars = bf.fit( y ) logz0 = bf.getLogZ( limits=limits ) logl0 = bf.logLikelihood print( "pars ", fmt( pars ) ) print( "stdv ", fmt( bf.stdevs ) ) print( "logZ ", fmt( logz0 ) ) print( "logl ", fmt( logl0 ) ) errdis = GaussErrorDistribution ( x, y ) logz1, maxll = plotErrdis( errdis, pm, limits=limits, max=0, plot=plot ) print( "logZ ", fmt( logz1 ) ) model = PolynomialModel( 0 ) model.setLimits( lowLimits=limits[0], highLimits=limits[1] ) ns = NestedSampler( x, model, y ) yfit = ns.sample() par2 = ns.parameters stdv = ns.stdevs logz2 = ns.logZ dlz2 = ns.logZprecision print( "pars ", fmt( par2 ) ) print( "stdv ", fmt( stdv ) ) print( "logZ ", fmt( logz2 ), " +- ", fmt( dlz2 ) ) self.assertTrue( abs( logz2 - logz0 ) < dlz2 ) samples = ns.samples parevo =samples.getParameterEvolution() llevo = samples.getLogLikelihoodEvolution() lwevo = samples.getLogWeightEvolution() assertAAE( numpy.sum( numpy.exp( lwevo ) ), 1.0 ) if plot : plt.plot( parevo, numpy.exp( llevo ), 'r,' ) mxl = numpy.exp( numpy.max( llevo ) ) * 1.2 plt.plot( [pars,pars], [0.0,mxl], 'b-' ) plt.plot( [par2,par2], [0.0,mxl], 'r-' ) plt.plot( [par2,par2]+stdv, [0.0,mxl], 'g-' ) plt.plot( [par2,par2]-stdv, [0.0,mxl], 'g-' ) plt.show()
def test1( self, plot=False ): x,y,p1 = self.makeData() print( "++++++++++++++++++++++++++++++++++++++++++++++++++" ) print( "Testing Nonlinear Fitters: LevenbergMarquardt (lm)" ) print( "++++++++++++++++++++++++++++++++++++++++++++++++++" ) modl1 = GaussModel( ) amfit = LMFitter( x, modl1 ) print( fmt( p1 ) ) par1 = amfit.fit( y ) print( fmt( par1 ) ) assertAAE( par1, p1, 1 ) x,z,p2 = self.makeData( bg=True ) modl2 = GaussModel( ) modl2.addModel( PolynomialModel(1) ) modl2.parameters = numpy.append( par1, [0,0] ) lmfit = LMFitter( x, modl2 ) par2 = lmfit.fit( z ) print( fmt( p2 ) ) print( fmt( par2 ) ) print( "chisq1 = ", amfit.chisq, " chisq2 = ", lmfit.chisq ) assertAAE( par2, p2, 1 ) if plot : xx = numpy.linspace( -1, +1, 1001 ) plt.plot( x, y, 'k+' ) plt.plot( xx, modl1.result( xx ), 'k-' ) plt.plot( x, z, 'r+' ) plt.plot( xx, modl2.result( xx ), 'r-' ) print( fmt( amfit.hessian ) ) print( fmt( amfit.design[-4:,:], max=None ) ) print( "++++++++++++++++++++++++++++++++++++++++++++++++++" ) print( "Testing LevenbergMarquardt (normalized)" ) print( "++++++++++++++++++++++++++++++++++++++++++++++++++" ) modl1 = GaussModel( ) bmfit = LMFitter( x, modl1 ) conpr1 = numpy.asarray( [1.0,0.0,0.0] ) bmfit.normalize( conpr1, p1[0], weight=10.0 ) print( "Normweight = ", bmfit.normweight ) print( fmt( p1 ) ) par3 = bmfit.fit( y ) print( fmt( par1 ) ) print( fmt( par3 ) ) print( fmt( amfit.chisq ), fmt( bmfit.chisq ) ) hes1 = amfit.getHessian( params=par3 ) hes3 = bmfit.getHessian( params=par3 ) print( fmt( hes1 ) ) print( fmt( hes3 ) ) f = bmfit.normweight for h1,h3 in zip( hes1.flat, hes3.flat ) : self.assertTrue( abs( h1 + f - h3 ) < 1e-8 ) f = 0.0 if plot : plt.plot( xx, modl1.result( xx ), 'g-' ) plt.show()
def test4( self, plot=False ) : print( "====test4============================" ) nn = 100 x = numpy.arange( nn, dtype=float ) / 50 ym = 0.4 + 0.0 * x nf = 0.5 numpy.random.seed( 2345 ) noise = numpy.random.randn( nn ) y = ym + nf * noise limits = [0,1] nslim = [0.1,1.0] pm = PolynomialModel( 0 ) bf = Fitter( x, pm ) pars = bf.fit( y ) scale = bf.scale logz0 = bf.getLogZ( limits=limits, noiseLimits=nslim ) logl0 = bf.logLikelihood print( "pars ", fmt( pars ), " scale ", fmt( scale ) ) print( "stdv ", fmt( bf.stdevs ) ) print( "logZ ", fmt( logz0 ), " logL ", fmt( logl0 ) ) if plot : plt.figure( "model" ) plotFit( x, data=y, model=pm, ftr=bf, truth=ym, show=False ) errdis = GaussErrorDistribution ( x, y ) logz1, logl1 = plotErrdis2d( errdis, pm, limits=limits, nslim=nslim, plot=plot ) if plot : plt.plot( pars[0], scale, 'k.' ) print( "logZ ", fmt( logz1 ), " logL ", fmt( logl1 ) ) model = PolynomialModel( 0 ) model.setLimits( lowLimits=limits[0], highLimits=limits[1] ) dis = GaussErrorDistribution( x, y ) dis.setLimits( nslim ) ns = NestedSampler( x, model, y, distribution=dis, verbose=0 ) yfit = ns.sample() par2 = ns.parameters scl2 = ns.scale logz2 = ns.logZ dlz2 = ns.logZprecision print( "pars ", fmt( par2 ), " scale ", fmt( scl2 ) ) print( "stdv ", fmt( ns.stdevs ) ) print( "logZ ", fmt( logz2 ), " +- ", fmt( dlz2 ) ) self.assertTrue( abs( logz2 - logz1 ) < 2 * dlz2 ) samples = ns.samples parevo =samples.getParameterEvolution() scevo =samples.getScaleEvolution() llevo = samples.getLogLikelihoodEvolution() lwevo = samples.getLogWeightEvolution() assertAAE( numpy.sum( numpy.exp( lwevo ) ), 1.0 ) if plot : plt.plot( parevo[:,0], scevo, 'k,' ) plt.figure( "model" ) # grab again err = samples.monteCarloError( x ) plt.plot( x, yfit + err, 'b-' ) plt.plot( x, yfit - err, 'b-' ) plt.show()
def test3( self, plot=False ) : print( "====test3============================" ) nn = 10 x = numpy.linspace( 0, 2, nn, dtype=float ) ym = 0.3 + 0.5 * x nf = 0.1 numpy.random.seed( 2345 ) noise = numpy.random.randn( nn ) y = ym + nf * noise limits = [-1,2] pm = PolynomialModel( 1 ) bf = Fitter( x, pm, fixedScale=0.5 ) pars = bf.fit( y ) logz0 = bf.getLogZ( limits=limits ) logl0 = bf.logLikelihood print( "pars ", fmt( pars ) ) print( "stdv ", fmt( bf.stdevs ) ) print( "logZ ", fmt( logz0 ), " logL ", fmt( logl0 ) ) if plot : plt.figure( "model" ) plotFit( x, data=y, model=pm, ftr=bf, truth=ym, show=False ) errdis = GaussErrorDistribution ( x, y, scale=0.5 ) logz1, logl1 = plotErrdis2d( errdis, pm, limits=limits, max=0, plot=plot ) if plot : plt.plot( pars[0], pars[1], 'k.' ) print( "logZ ", fmt( logz1 ), " logL ", fmt( logl1 ) ) model = PolynomialModel( 1 ) model.setLimits( lowLimits=limits[0], highLimits=limits[1] ) dis = GaussErrorDistribution( x, y, scale=0.5 ) ns = NestedSampler( x, model, y, distribution=dis, verbose=0 ) yfit = ns.sample() par2 = ns.parameters logz2 = ns.logZ dlz2 = ns.logZprecision print( "pars ", fmt( par2 ) ) print( "stdv ", fmt( ns.stdevs ) ) print( "logZ ", fmt( logz2 ), " +- ", fmt( dlz2 ) ) # print( logz0 - logz1, logz0 - logz2 ) self.assertTrue( abs( logz2 - logz0 ) < dlz2 ) samples = ns.samples parevo =samples.getParameterEvolution() llevo = samples.getLogLikelihoodEvolution() lwevo = samples.getLogWeightEvolution() assertAAE( numpy.sum( numpy.exp( lwevo ) ), 1.0 ) if plot : plt.plot( parevo[:,0], parevo[:,1], 'k,' ) plt.figure( "model" ) # grab again err = samples.monteCarloError( x ) plt.plot( x, yfit + err, 'b-' ) plt.plot( x, yfit - err, 'b-' ) plt.show()
def test1( self, plot=False ): """ # test slope fit """ print( "\n Robust fitter Test 1 \n" ) ndata = 101 aa = 5.0 # average offset bb = 2.0 # slope ss = 0.3 # noise Normal distr. x = numpy.linspace( -1, +1, ndata, dtype=float ) numpy.random.seed( 12345 ) y = aa + bb * x + ss * numpy.random.randn( ndata ) model = PolynomialModel( 1 ) fitter = Fitter( x, model ) print( "Testing Straight Line fit" ) par = fitter.fit( y ) std = fitter.stdevs chisq = fitter.chisq print( "truth " + fmt( aa ) + fmt( bb ) ) print( "params " + fmt( par ) ) print( "stdevs " + fmt( std ) ) print( "chisq " + fmt( chisq ) ) assertAAE( par, numpy.asarray( [aa, bb] ), 2 ) if plot : plt.plot( x, y, 'k.' ) plt.plot( x, model( x ), 'k-' ) # make some outliers ko = [10*k+4 for k in range( 10 )] ko = numpy.asarray( ko ) y[ko] += numpy.linspace( -5, 2, 10 ) romod = PolynomialModel( 1 ) altfit = Fitter( x, romod ) alt = altfit.fit( y ) ast = altfit.stdevs altch = altfit.chisq print( "params " + fmt( alt ) ) print( "stdevs " + fmt( ast ) ) print( "chisq " + fmt( altch ) ) if plot : plt.plot( x, romod( x ), 'b-' ) rf = RobustShell( altfit ) Tools.printclass( rf ) alt = rf.fit( y ) ast = altfit.stdevs altch = altfit.chisq print( rf ) print( "params " + fmt( alt ) ) print( "stdevs " + fmt( ast ) ) print( "chisq " + fmt( altch ) + fmt( rf.scale ) ) print( "weight " + fmt( rf.weights[ko], max=None ) ) if plot : plt.plot( x, romod( x ), 'g-' ) plt.plot( x, rf.weights, 'g-' ) assertAAE( par, alt, 1 ) assertAAE( std, ast, 1 ) rf = RobustShell( altfit, kernel=Cosine(), domain=4.0 ) alt = rf.fit( y ) ast = altfit.stdevs altch = altfit.chisq print( rf ) print( "params " + fmt( alt ) ) print( "stdevs " + fmt( ast ) ) print( "chisq " + fmt( altch ) + fmt( rf.scale ) ) print( "weight " + fmt( rf.weights[ko], max=None ) ) if plot : plt.plot( x, romod( x ), 'r-' ) plt.plot( x, rf.weights, 'r-' ) assertAAE( par, alt, 1 ) assertAAE( std, ast, 1 ) rf = RobustShell( altfit, kernel=Huber, domain=1.0 ) # rf.setNoiseScale( 0.3 ) alt = rf.fit( y ) ast = altfit.stdevs altch = altfit.chisq print( rf ) print( "params " + fmt( alt ) ) print( "stdevs " + fmt( ast ) ) print( "chisq " + fmt( altch ) + fmt( rf.scale ) ) print( "weight " + fmt( rf.weights[ko], max=None ) ) if plot : plt.plot( x, romod( x ), 'c-' ) plt.plot( x, rf.weights, 'c-' ) assertAAE( par, alt, 1 ) assertAAE( std, ast, 1 ) rf = RobustShell( altfit, kernel=Uniform ) alt = rf.fit( y ) ast = altfit.stdevs altch = altfit.chisq print( rf ) print( "params " + fmt( alt ) ) print( "stdevs " + fmt( ast ) ) print( "chisq " + fmt( altch ) + fmt( rf.scale ) ) print( "weight " + fmt( rf.weights[ko], max=None ) ) if plot : plt.plot( x, romod( x ), 'm-' ) plt.plot( x, rf.weights, 'm-' ) assertAAE( par, alt, 1 ) assertAAE( std, ast, 1 )
def test2( self, plot=False ): ndata = 101 c0 = 3.2 c1 = -0.1 c2 = 0.3 c3 = 1.1 c4 = 2.1 ss = 0.2 x = numpy.linspace( -1, +1, ndata, dtype=float ) y = ( x - c1 ) / c2 numpy.random.seed( 12345 ) y = c0 * numpy.exp( -0.5 * y * y ) + x * c3 + c4 + ss * numpy.random.randn( ndata ) print( "Testing Nonlinear Fitters with RobustShell." ) print( fmt( [c0,c1,c2,c4,c3] ) ) modl2 = GaussModel( ) modl2 += PolynomialModel( 1 ) # lmfit = CurveFitter( x, modl2 ) lmfit = LevenbergMarquardtFitter( x, modl2 ) par2 = lmfit.fit( y ) std2 = lmfit.stdevs print( fmt( par2, max=5 ) ) print( fmt( std2, max=5 ) ) # make some outliers ko = [10*k+4 for k in range( 10 )] ko = numpy.asarray( ko ) y[ko] += numpy.linspace( -5, 2, 10 ) if plot : plt.plot( x, y, 'k.' ) plt.plot( x, modl2( x ), 'k-' ) # lmfit = CurveFitter( x, modl2 ) lmfit = LevenbergMarquardtFitter( x, modl2 ) # lmfit.setParameters( initpar2 ) rf = RobustShell( lmfit ) # rf.setVerbose( 1 ) print( str( rf ) ) par1 = rf.fit( y ) std1 = rf.stdevs print( fmt( par1, max=5 ) ) print( fmt( std1, max=5 ) ) print( fmt( par2, max=5 ) ) print( fmt( rf.weights[ko], max=20 ) ) if plot : plt.plot( x, modl2( x ), 'g-' ) plt.plot( x, rf.weights, 'g-' ) assertAAE( par2, par1, 1 ) assertAAE( std2, std1, 1 ) # lmfit = CurveFitter( x, modl2 ) lmfit = LevenbergMarquardtFitter( x, modl2 ) # lmfit.setParameters( initpar2 ) rf = RobustShell( lmfit, onesided="negative" ) # rf.setVerbose( 1 ) print( rf ) par3 = rf.fit( y ) std3 = rf.stdevs print( fmt( par3, max=5 ) ) print( fmt( std3, max=5 ) ) print( fmt( par2, max=5 ) ) print( fmt( rf.weights[ko], max=20 ) ) if plot : plt.plot( x, modl2( x ), 'r-' ) plt.plot( x, rf.weights, 'r-' ) assertAAE( par2, par3, 1 ) assertAAE( std2, std3, 1 )
def report( self, verbose, param, chi, more=None, force=False ) : if verbose > 1 and ( self.iter % 100 == 0 or force ) : mr = "" if more is None else fmt( more, format=' %6.1f ' ) mx = 5 if verbose < 4 else None if verbose == 4 else verbose print( fmt( self.iter, format='%6d' ), mr, fmt( chi, format="%8.1f " ), fmt( param, max=mx ) )
def test6_1( self, plot=False ) : print( "====test6_1 Laplace ================" ) nn = 20 x = numpy.linspace( 0, 2, nn, dtype=float ) ym = 0.3 + 0.5 * x nf = 0.9 numpy.random.seed( 2345 ) noise = numpy.random.laplace( size=nn ) y = ym + nf * noise limits = [-1,2] if plot : plt.plot( x, ym, 'k-' ) plt.plot( x, y, 'r.' ) model = PolynomialModel( 1 ) model.setLimits( lowLimits=limits[0], highLimits=limits[1] ) bf = AmoebaFitter( x, model, errdis="laplace" ) pars = bf.fit( y, tolerance=1e-20 ) print( "pars ", fmt( pars ) ) print( "stdv ", fmt( bf.stdevs ) ) logz0 = bf.getLogZ( limits=limits ) logl0 = bf.logLikelihood print( "logZ ", fmt( logz0 ), " logL ", fmt( logl0 ) ) errdis = LaplaceErrorDistribution( x, y, scale=nf ) logz1, logl1 = plotErrdis2d( errdis, model, limits=limits, max=0, plot=plot ) if plot : plt.plot( pars[0], pars[1], 'k.' ) print( "logZ ", fmt( logz1 ), " logL ", fmt( logl1 ) ) model = PolynomialModel( 1 ) model.setLimits( lowLimits=limits[0], highLimits=limits[1] ) ns = NestedSampler( x, model, y, distribution='laplace', verbose=0 ) yfit = ns.sample() par2 = ns.parameters logz2 = ns.logZ dlz2 = ns.logZprecision print( "pars ", fmt( par2 ) ) print( "stdv ", fmt( ns.stdevs ) ) print( "logZ ", fmt( logz2 ), " +- ", fmt( dlz2 ) ) self.assertTrue( abs( logz2 - logz0 ) < 5 * dlz2 ) samples = ns.samples parevo =samples.getParameterEvolution() llevo = samples.getLogLikelihoodEvolution() lwevo = samples.getLogWeightEvolution() assertAAE( numpy.sum( numpy.exp( lwevo ) ), 1.0 ) if plot : plt.plot( parevo[:,0], parevo[:,1], 'k,' ) plt.show()