def test1a(self): print("====test1a============================") nn = 100 x = numpy.zeros(nn, dtype=float) ym = 0.2 + 0.5 * x nf = 1.0 nf = 0.1 numpy.random.seed(2345) noise = numpy.random.randn(nn) y = ym + nf * noise limits = [-20, 20] pm = PolynomialModel(0) bf = Fitter(x, pm) pars = bf.fit(y) logz0 = bf.getLogZ(limits=limits) logl0 = bf.logLikelihood print("pars ", fmt(pars)) print("stdv ", fmt(bf.stdevs)) print("logZ ", fmt(logz0), " logl ", fmt(logl0)) errdis = GaussErrorDistribution() problem = ClassicProblem(pm, xdata=x, ydata=y) logz1, maxll = plotErrdis(errdis, problem, limits=limits, max=0, plot=self.doplot) print("logZ ", fmt(logz1)) model = PolynomialModel(0) model.setLimits(lowLimits=limits[0], highLimits=limits[1]) ns = NestedSampler(x, model, y, verbose=0) logE = ns.sample() par2 = ns.parameters stdv = ns.stdevs logz2 = ns.logZ dlz2 = ns.logZprecision print("pars ", fmt(par2)) print("stdv ", fmt(stdv)) print("logZ ", fmt(logz2), " +- ", fmt(dlz2)) self.assertTrue(abs(logz2 - logz0) < 2 * dlz2) samples = ns.samples parevo = samples.getParameterEvolution() llevo = samples.getLogLikelihoodEvolution() lwevo = samples.getLogWeightEvolution() assertAAE(numpy.sum(numpy.exp(lwevo)), 1.0)
def test6_0(self): print("====test6_0 Laplace ================") plot = self.doplot nn = 20 x = numpy.linspace(0, 2, nn, dtype=float) ym = 0.3 + 0.0 * x nf = 0.9 numpy.random.seed(2345) noise = numpy.random.laplace(size=nn) y = ym + nf * noise limits = [-1, 2] if plot: plt.plot(x, ym, 'k-') plt.plot(x, y, 'r.') model = PolynomialModel(0) model.setLimits(lowLimits=limits[0], highLimits=limits[1]) bf = PowellFitter(x, model, errdis="laplace") pars = bf.fit(y, tolerance=1e-20) print("pars ", pars) print("stdv ", fmt(bf.stdevs)) logz0 = bf.getLogZ(limits=limits) logl0 = bf.logLikelihood print("logZ ", fmt(logz0), " logL ", fmt(logl0)) errdis = LaplaceErrorDistribution(scale=nf) problem = ClassicProblem(model, xdata=x, ydata=y) logz1, logl1 = plotErrdis(errdis, problem, limits=limits, max=0, plot=plot) if plot: plt.plot(pars[0], logl1, 'k.') print("logZ ", fmt(logz1), " logL ", fmt(logl1)) model = PolynomialModel(0) model.setLimits(lowLimits=limits[0], highLimits=limits[1]) ns = NestedSampler(x, model, y, distribution='laplace', verbose=0) logE = ns.sample() par2 = ns.parameters stdv = ns.stdevs logE = ns.logZ dlz2 = ns.logZprecision logz2 = ns.logZ print("pars ", fmt(par2)) print("stdv ", fmt(stdv)) print("logZ ", fmt(logz2), " +- ", fmt(dlz2)) # self.assertTrue( abs( logz2 - logz0 ) < dlz2 ) print(logz1 - logz2) samples = ns.samples parevo = samples.getParameterEvolution() llevo = samples.getLogLikelihoodEvolution() lwevo = samples.getLogWeightEvolution() assertAAE(numpy.sum(numpy.exp(lwevo)), 1.0) if plot: plt.plot(parevo, numpy.exp(llevo), 'r,') mxl = numpy.exp(numpy.max(llevo)) * 1.2 plt.plot([pars, pars], [0.0, mxl], 'b-') plt.plot([par2, par2], [0.0, mxl], 'r-') plt.plot([par2, par2] + stdv, [0.0, mxl], 'g-') plt.plot([par2, par2] - stdv, [0.0, mxl], 'g-') plt.show()
def test1( self, plot=False ) : print( "====test1============================" ) nn = 100 x = numpy.zeros( nn, dtype=float ) ym = 0.2 + 0.5 * x nf = 1.0 nf = 0.1 numpy.random.seed( 2345 ) noise = numpy.random.randn( nn ) y = ym + nf * noise limits = [-2,2] pm = PolynomialModel( 0 ) bf = Fitter( x, pm ) pars = bf.fit( y ) logz0 = bf.getLogZ( limits=limits ) logl0 = bf.logLikelihood print( "pars ", fmt( pars ) ) print( "stdv ", fmt( bf.stdevs ) ) print( "logZ ", fmt( logz0 ) ) print( "logl ", fmt( logl0 ) ) errdis = GaussErrorDistribution ( x, y ) logz1, maxll = plotErrdis( errdis, pm, limits=limits, max=0, plot=plot ) print( "logZ ", fmt( logz1 ) ) model = PolynomialModel( 0 ) model.setLimits( lowLimits=limits[0], highLimits=limits[1] ) ns = NestedSampler( x, model, y ) yfit = ns.sample() par2 = ns.parameters stdv = ns.stdevs logz2 = ns.logZ dlz2 = ns.logZprecision print( "pars ", fmt( par2 ) ) print( "stdv ", fmt( stdv ) ) print( "logZ ", fmt( logz2 ), " +- ", fmt( dlz2 ) ) self.assertTrue( abs( logz2 - logz0 ) < dlz2 ) samples = ns.samples parevo =samples.getParameterEvolution() llevo = samples.getLogLikelihoodEvolution() lwevo = samples.getLogWeightEvolution() assertAAE( numpy.sum( numpy.exp( lwevo ) ), 1.0 ) if plot : plt.plot( parevo, numpy.exp( llevo ), 'r,' ) mxl = numpy.exp( numpy.max( llevo ) ) * 1.2 plt.plot( [pars,pars], [0.0,mxl], 'b-' ) plt.plot( [par2,par2], [0.0,mxl], 'r-' ) plt.plot( [par2,par2]+stdv, [0.0,mxl], 'g-' ) plt.plot( [par2,par2]-stdv, [0.0,mxl], 'g-' ) plt.show()