Exemple #1
0
    def test2_1(self):
        print("====test2_1============================")
        nn = 100
        x = numpy.arange(nn, dtype=float) / 50
        ym = 0.2 + 0.5 * x
        nf = 0.1
        numpy.random.seed(2345)
        noise = numpy.random.randn(nn)

        y = ym + nf * noise
        limits = [-1, 2]

        pm = PolynomialModel(1)
        bf = Fitter(x, pm)

        pars = bf.fit(y)
        logz0 = bf.getLogZ(limits=limits)
        logl0 = bf.logLikelihood
        print("pars  ", fmt(pars))
        print("stdv  ", fmt(bf.stdevs))
        print("logZ  ", fmt(logz0), "   logL  ", fmt(logl0))

        errdis = GaussErrorDistribution()
        problem = ClassicProblem(pm, xdata=x, ydata=y)

        logz1, logl1 = plotErrdis2d(errdis,
                                    problem,
                                    limits=limits,
                                    max=0,
                                    plot=self.doplot)
        if self.doplot:
            plt.plot(pars[0], pars[1], 'k.')

        print("logZ  ", fmt(logz1), "   logL  ", fmt(logl1))

        model = PolynomialModel(1)
        model.setLimits(lowLimits=limits[0], highLimits=limits[1])
        ns = NestedSampler(x, model, y, verbose=0)

        logE = ns.sample()

        par2 = ns.parameters
        logz2 = ns.logZ
        dlz2 = ns.logZprecision
        print("pars  ", fmt(par2))
        print("stdv  ", fmt(ns.stdevs))
        print("logZ  ", fmt(logz2), " +- ", fmt(dlz2))

        self.assertTrue(abs(logz2 - logz0) < 1.0)
        #        print( logz0 - logz1, logz0 - logz2 )

        samples = ns.samples
        parevo = samples.getParameterEvolution()
        llevo = samples.getLogLikelihoodEvolution()
        lwevo = samples.getLogWeightEvolution()

        assertAAE(numpy.sum(numpy.exp(lwevo)), 1.0)

        if self.doplot:
            plt.show()
Exemple #2
0
    def test7(self):
        print("====test7  Poisson ================")
        plot = self.doplot

        nn = 100
        x = numpy.linspace(0, 10, nn, dtype=float)
        ym = 1.9 + 2.2 * x
        numpy.random.seed(2345)
        y = numpy.random.poisson(ym, size=nn)

        limits = [0, 4]

        if plot:
            plt.plot(x, ym, 'k-')
            plt.plot(x, y, 'r.')

        model = PolynomialModel(1)
        model.setLimits(lowLimits=limits[0], highLimits=limits[1])

        bf = AmoebaFitter(x, model, errdis="poisson")

        pars = bf.fit(y, tolerance=1e-20)
        print("pars  ", fmt(pars))
        print("stdv  ", fmt(bf.stdevs))
        logz0 = bf.getLogZ(limits=limits)
        logl0 = bf.logLikelihood
        print("logZ  ", fmt(logz0), "   logl  ", fmt(logl0))

        errdis = PoissonErrorDistribution()
        problem = ClassicProblem(model, xdata=x, ydata=y)
        logz1, logl1 = plotErrdis2d(errdis,
                                    problem,
                                    limits=limits,
                                    max=0,
                                    plot=plot)
        if plot:
            plt.plot(pars[0], pars[1], 'k.')

        print("logZ  ", fmt(logz1), "   logL  ", fmt(logl1))

        model = PolynomialModel(1)
        model.setLimits(lowLimits=limits[0], highLimits=limits[1])
        ns = NestedSampler(x,
                           model,
                           y,
                           distribution='poisson',
                           verbose=0,
                           seed=23456)

        logE = ns.sample()

        par2 = ns.parameters
        logE = ns.logZ
        dlz2 = ns.logZprecision
        logz2 = ns.logZ
        samples = ns.samples

        print("pars  ", fmt(par2), fmt(samples.maxLikelihoodParameters))
        print("stdv  ", fmt(ns.stdevs))
        print("logZ  ", fmt(logz2), " +- ", fmt(dlz2))

        self.assertTrue(abs(logz2 - logz1) < 3 * dlz2)

        parevo = samples.getParameterEvolution()
        llevo = samples.getLogLikelihoodEvolution()
        lwevo = samples.getLogWeightEvolution()

        assertAAE(numpy.sum(numpy.exp(lwevo)), 1.0)

        if plot:
            plt.plot(parevo[:, 0], parevo[:, 1], 'k,')
            plt.show()
Exemple #3
0
    def test6(self):
        print("====test6  Laplace ================")
        plot = self.doplot

        nn = 20
        x = numpy.linspace(0, 2, nn, dtype=float)
        ym = 0.3 + 0.5 * x
        nf = 0.9
        numpy.random.seed(12345)
        noise = numpy.random.laplace(size=nn)

        y = ym + nf * noise
        limits = [-1, 2]

        if plot:
            plt.plot(x, ym, 'k-')
            plt.plot(x, y, 'r.')

        model = PolynomialModel(1)
        model.setLimits(lowLimits=limits[0], highLimits=limits[1])

        bf = AmoebaFitter(x, model, errdis="laplace")

        pars = bf.fit(y, tolerance=1e-20)
        print("pars  ", fmt(pars))
        print("stdv  ", fmt(bf.stdevs))
        logz0 = bf.getLogZ(limits=limits)
        logl0 = bf.logLikelihood
        print("logZ  ", fmt(logz0), "   logL  ", fmt(logl0))

        errdis = LaplaceErrorDistribution(scale=nf)
        problem = ClassicProblem(model, xdata=x, ydata=y)
        logz1, logl1 = plotErrdis2d(errdis,
                                    problem,
                                    limits=limits,
                                    max=0,
                                    plot=plot)
        if plot:
            plt.plot(pars[0], pars[1], 'k.')

        print("logZ  ", fmt(logz1), "   logL  ", fmt(logl1))

        model = PolynomialModel(1)
        model.setLimits(lowLimits=limits[0], highLimits=limits[1])
        ns = NestedSampler(x,
                           model,
                           y,
                           distribution='laplace',
                           seed=8945,
                           verbose=0,
                           rate=0.5)

        logE = ns.sample()

        par2 = ns.parameters
        logE = ns.logZ
        dlz2 = ns.logZprecision
        logz2 = ns.logZ
        print("pars  ", fmt(par2))
        print("stdv  ", fmt(ns.stdevs))
        print("logZ  ", fmt(logz2), " +- ", fmt(dlz2))

        self.assertTrue(abs(logz2 - logz1) < 4 * dlz2)
        #        print( logz0 - logz1, logz0 - logz2 )

        samples = ns.samples
        parevo = samples.getParameterEvolution()
        llevo = samples.getLogLikelihoodEvolution()
        lwevo = samples.getLogWeightEvolution()

        assertAAE(numpy.sum(numpy.exp(lwevo)), 1.0)

        if plot:
            plt.plot(parevo[:, 0], parevo[:, 1], 'k,')

            plt.show()
Exemple #4
0
    def test4(self):
        print("====test4===unknown noisescale=========================")
        plot = self.doplot

        nn = 100
        x = numpy.arange(nn, dtype=float) / 50
        ym = 0.4 + 0.0 * x
        nf = 0.5
        numpy.random.seed(2345)
        noise = numpy.random.randn(nn)

        y = ym + nf * noise
        limits = [0, 1]
        nslim = [0.1, 1.0]

        pm = PolynomialModel(0)
        bf = Fitter(x, pm)

        pars = bf.fit(y)
        scale = bf.scale
        logz0 = bf.getLogZ(limits=limits, noiseLimits=nslim)
        logl0 = bf.logLikelihood
        print("pars  ", fmt(pars), "  scale  ", fmt(scale))
        print("stdv  ", fmt(bf.stdevs))
        print("logZ  ", fmt(logz0), "   logL  ", fmt(logl0))

        if plot:
            plt.figure("model")
            plotFit(x, data=y, model=pm, ftr=bf, truth=ym, show=False)

        errdis = GaussErrorDistribution()
        problem = ClassicProblem(pm, xdata=x, ydata=y)

        logz1, logl1 = plotErrdis2d(errdis,
                                    problem,
                                    limits=limits,
                                    nslim=nslim,
                                    plot=plot)
        if plot:
            plt.plot(pars[0], scale, 'k.')

        print("logZ  ", fmt(logz1), "   logL  ", fmt(logl1))

        model = PolynomialModel(0)
        model.setLimits(lowLimits=limits[0], highLimits=limits[1])

        dis = GaussErrorDistribution()
        dis.setLimits(nslim)
        ns = NestedSampler(x, model, y, distribution=dis, verbose=0)

        logE = ns.sample()
        par2 = ns.parameters
        scl2 = ns.scale
        logz2 = ns.logZ
        dlz2 = ns.logZprecision
        print("pars  ", fmt(par2), "  scale  ", fmt(scl2))
        print("stdv  ", fmt(ns.stdevs))
        print("logZ  ", fmt(logz2), " +- ", fmt(dlz2))

        self.assertTrue(abs(logz2 - logz1) < 2 * dlz2)

        samples = ns.samples
        parevo = samples.getParameterEvolution()
        scevo = samples.getScaleEvolution()
        llevo = samples.getLogLikelihoodEvolution()
        lwevo = samples.getLogWeightEvolution()

        assertAAE(numpy.sum(numpy.exp(lwevo)), 1.0)

        if plot:
            plt.plot(parevo[:, 0], scevo, 'k,')

            plt.figure("model")  # grab again
            yfit = ns.yfit
            err = samples.monteCarloError(x)
            plt.plot(x, yfit + err, 'b-')
            plt.plot(x, yfit - err, 'b-')

            plt.show()
Exemple #5
0
    def test3(self):
        print("====test3======fixed noisescale======================")
        plot = self.doplot

        nn = 10
        x = numpy.linspace(0, 2, nn, dtype=float)
        ym = 0.3 + 0.5 * x
        nf = 0.2
        numpy.random.seed(2345)
        noise = numpy.random.randn(nn)

        y = ym + nf * noise
        limits = [-1, 2]

        pm = PolynomialModel(1)
        bf = Fitter(x, pm, fixedScale=nf)

        pars = bf.fit(y)
        logz0 = bf.getLogZ(limits=limits)
        logl0 = bf.logLikelihood
        print("pars  ", fmt(pars))
        print("stdv  ", fmt(bf.stdevs))
        print("logZ  ", fmt(logz0), "   logL  ", fmt(logl0))

        if plot:
            plt.figure("model")
            plotFit(x, data=y, model=pm, ftr=bf, truth=ym, show=False)

        errdis = GaussErrorDistribution(scale=nf)
        problem = ClassicProblem(pm, xdata=x, ydata=y)

        logz1, logl1 = plotErrdis2d(errdis,
                                    problem,
                                    limits=limits,
                                    max=0,
                                    plot=plot)

        if plot:
            plt.plot(pars[0], pars[1], 'k.')

        print("logZ  ", fmt(logz1), "   logL  ", fmt(logl1))

        model = PolynomialModel(1)
        model.setLimits(lowLimits=limits[0], highLimits=limits[1])

        dis = GaussErrorDistribution(scale=nf)
        ns = NestedSampler(x,
                           model,
                           y,
                           distribution=dis,
                           verbose=0,
                           seed=34512)

        logE = ns.sample()
        par2 = ns.parameters
        logz2 = ns.logZ
        dlz2 = ns.logZprecision
        print("pars  ", fmt(par2))
        print("stdv  ", fmt(ns.stdevs))
        print("logZ  ", fmt(logz2), " +- ", fmt(dlz2))

        #        print( logz0 - logz1, logz0 - logz2 )
        self.assertTrue(abs(logz2 - logz0) < 2 * dlz2)

        samples = ns.samples
        parevo = samples.getParameterEvolution()
        llevo = samples.getLogLikelihoodEvolution()
        lwevo = samples.getLogWeightEvolution()

        assertAAE(numpy.sum(numpy.exp(lwevo)), 1.0)

        if plot:
            plt.plot(parevo[:, 0], parevo[:, 1], 'k,')

            plt.figure("model")  # grab again
            yfit = ns.modelfit
            err = samples.monteCarloError(x)
            plt.plot(x, yfit + err, 'b-')
            plt.plot(x, yfit - err, 'b-')

            plt.show()