Пример #1
0
    def normalizetest( self, fitter ) :

        p = numpy.asarray( [2.0, 1.3] )
        x = numpy.asarray( [1.0, 1.3, 1.5, 1.8, 2.0] )
        numpy.random.seed( 12345 )
        y = p[0] + p[1] * x + 0.5 * numpy.random.randn( 5 )

        m = PolynomialModel( 1 )

        ftr = fitter( x, m )
        print( "=============================================================" )
        print( str( ftr ) )
        print( fmt( x ) )
        print( fmt( y ) )

        par = ftr.fit( y )
        print( fmt( p ) )
        print( fmt( par ) )

        conpr = numpy.asarray( [1.0,0.0] )
        for w in [0,1,10,100,1000] :
            m2 = m.copy()
            ftr = fitter( x, m2 )

            ftr.normalize( conpr, p[0], weight=w )
            par = ftr.fit( y )
            print( fmt( w ), fmt( par ), fmt( ftr.chisq ) )

        self.assertTrue( abs( p[0] - par[0] ) < 1e-3 )

        print( fmt( ftr.hessian ) )
Пример #2
0
    def test5( self, plot=None ) :
        print( "====test5============================" )
        nn = 10
        x = numpy.linspace( 0, 2, nn, dtype=float )
        ym = 0.3 + 0.5 * x
        nf = 0.1
        numpy.random.seed( 2345 )
        noise = numpy.random.randn( nn )

        y = ym + nf * noise
        limits = [-1,2]

        model = PolynomialModel( 1 )
        model.setLimits( lowLimits=limits[0], highLimits=limits[1] )

        s = 0.0
        s2 = 0.0
        mr = 10
        for k in range( mr ) :
            dis = GaussErrorDistribution( x, y, scale=0.5 )
            ns = NestedSampler( x, model, y, distribution=dis, verbose=0, seed=k )

            yfit = ns.sample()
            par2 = ns.parameters
            logz2 = ns.logZ
            dlz2 = ns.logZprecision
            s += logz2
            s2 += logz2 * logz2
            print( "pars  ", fmt( par2 ), "  logZ  ", fmt( logz2 ), " +- ", fmt( dlz2 ) )

        logz = s / mr
        dlz = math.sqrt( s2 / mr - logz * logz )

        print( "Average  ", fmt( logz ), " +- ", fmt( dlz ) )
Пример #3
0
    def testLaplace(self, plot=None):
        print("====testEvidence for Laplace============")
        nn = 100
        x = np.arange(nn, dtype=float) / (nn / 2) - 1
        ym = 1.3
        nf = 0.1
        noise = np.random.laplace(0.0, 1.0, nn)

        y = ym + nf * noise

        pm = PolynomialModel(0)
        bf = PowellFitter(x, pm, errdis="laplace")

        pars = bf.fit(y, tolerance=1e-10)
        print("pars  ", pars)
        yfit = pm.result(x, pars)
        std = bf.stdevs
        print("stdv  ", std)
        print("scale %f  sumwgt %f" % (bf.scale, bf.sumwgt))

        errdis = LaplaceErrorDistribution(x, y, scale=nf)
        p0 = np.linspace(1.2, 1.5, 201)
        L0 = np.ndarray(201, dtype=float)
        for k, p in enumerate(p0):
            pp = [p, 0.1]
            L0[k] = errdis.logLikelihood(pm, pp)
        L0 -= np.max(L0)

        if plot:
            gm = GaussModel()
            gm.parameters = [1.0, pars[0], std[0]]
            plt.plot(p0, gm(p0), 'b-')
            plt.plot(p0, np.exp(L0), 'k-')

            plt.show()
Пример #4
0
    def testImplicit( self, plot=False ) :
        print( "====  BracketModel Test =======================" )
        m1 = GaussModel( )
        m1 += PolynomialModel( 0 )              # Gauss on a constant background
        m2 = BracketModel( m1 )
        m3 = SineModel( )
        m3 *= m2                                # sine * ( gauss + const )
        print( "Explicit Use" )
        print( m3 )

        g1 = GaussModel( )
        g1 += PolynomialModel( 0 )              # m1 is a chain of models
        g3 = SineModel( )
        g3 *= g1                                # sine * ( gauss + const )
        print( "Implicit Use" )
        print( g3 )                             # exactly the same

        p = [8.0, 1.0, 0.0, 1.0, 0.0, 0.2, 1.0]
        x = numpy.asarray( [ -1.0, -0.8, -0.6, -0.4, -0.2, 0.0, 0.2, 0.4, 0.6, 0.8, 1.0] )

        assertAE( g3.result( x, p ), m3.result( x, p ) )
        assertAE( g3.partial( x, p ), m3.partial( x, p ) )
        assertAE( g3.derivative( x, p ), m3.derivative( x, p ) )


        stdModeltest( g3, p, plot=plot )
Пример #5
0
 def testPolynomialModel(self, plot=False):
     x = numpy.asarray(
         [-1.0, -0.8, -0.6, -0.4, -0.2, 0.0, 0.2, 0.4, 0.6, 0.8, 1.0])
     print("******POLYNOMIAL**********************")
     m = PolynomialModel(3)
     self.assertTrue(m.getNumberOfParameters() == 4)
     self.assertTrue(m.npbase == 4)
     p = numpy.asarray([1, -2, 3, -2], dtype=float)
     stdModeltest(m, p, plot=plot)
Пример #6
0
    def testMonteCarlo3(self, doplot=False):
        print("====== MonteCarlo 3 ===================")

        N = 101
        x = numpy.arange(N, dtype=float) * 0.1
        ran = numpy.random
        ran.seed(1235)
        noise = ran.standard_normal(N)

        ym = x * x + 0.03 * x + 0.05
        y1 = ym + 10 * noise

        pm = PolynomialModel(2)

        ftr = Fitter(x, pm)

        pars1 = ftr.fit(y1)
        stdv1 = ftr.getStandardDeviations()
        print("parameters : ", pars1)
        print("std devs   : ", stdv1)
        print("chisquared : ", ftr.chisq)

        lmce = ftr.monteCarloError()
        chisq = ftr.chisq

        mce = MonteCarlo(x, pm, ftr.covariance)
        mce1 = mce.getError()
        assertAAE(lmce, mce1)

        yfit = pm.result(x)
        s2 = numpy.sum(numpy.square((yfit - ym) / lmce))
        print(s2, math.sqrt(s2 / N))

        integral = numpy.sum(yfit)
        s1 = 0
        s2 = 0
        k = 0
        for k in range(1, 100001):
            rv = mce.randomVariant(x)
            s1 += numpy.sum(rv)
            s2 += numpy.sum(numpy.square(rv - yfit))
            if k % 10000 == 0:
                print("%6d  %10.3f %10.3f %10.3f" %
                      (k, integral, s1 / k, math.sqrt(s2 / k)))

        ### TBC  dont know why the factor 1000 is there. ########
        print(abs(integral - s1 / k), math.sqrt(s2 / (k * 1000)))
        self.assertTrue(abs(integral - s1 / k) < math.sqrt(s2 / (k * 1000)))

        if doplot:
            pyplot.plot(x, y1, 'b.')

            pyplot.plot(x, ym, 'k-')
            pyplot.plot(x, yfit, 'g-')
            pyplot.plot(x, yfit + lmce, 'r-')
            pyplot.plot(x, yfit - lmce, 'r-')
            pyplot.show()
Пример #7
0
    def test2_1( self, plot=False ) :
        print( "====test2_1============================" )
        nn = 100
        x = numpy.arange( nn, dtype=float ) / 50
        ym = 0.2 + 0.5 * x
        nf = 0.1
        numpy.random.seed( 2345 )
        noise = numpy.random.randn( nn )

        y = ym + nf * noise
        limits = [-1,2]

        pm = PolynomialModel( 1 )
        bf = Fitter( x, pm )

        pars = bf.fit( y )
        logz0 = bf.getLogZ( limits=limits )
        logl0 = bf.logLikelihood
        print( "pars  ", fmt( pars ) )
        print( "stdv  ", fmt( bf.stdevs ) )
        print( "logZ  ", fmt( logz0 ), "   logL  ", fmt( logl0 ) )

        errdis = GaussErrorDistribution ( x, y )

        logz1, logl1 = plotErrdis2d( errdis, pm, limits=limits, max=0,
                    plot=plot )
        if plot :
            plt.plot( pars[0], pars[1], 'k.' )

        print( "logZ  ", fmt( logz1 ), "   logL  ", fmt( logl1 ) )

        model = PolynomialModel( 1 )
        model.setLimits( lowLimits=limits[0], highLimits=limits[1] )
        ns = NestedSampler( x, model, y, verbose=0 )

        yfit = ns.sample()

        par2 = ns.parameters
        logz2 = ns.logZ
        dlz2 = ns.logZprecision
        print( "pars  ", fmt( par2 ) )
        print( "stdv  ", fmt( ns.stdevs ) )
        print( "logZ  ", fmt( logz2 ), " +- ", fmt( dlz2 ) )

        self.assertTrue( abs( logz2 - logz0 ) < dlz2 )
#        print( logz0 - logz1, logz0 - logz2 )

        samples = ns.samples
        parevo =samples.getParameterEvolution()
        llevo = samples.getLogLikelihoodEvolution()
        lwevo = samples.getLogWeightEvolution()

        assertAAE( numpy.sum( numpy.exp( lwevo ) ), 1.0 )

        if plot :
            plt.show()
Пример #8
0
    def testToString(self):
        model = PadeModel(1, 2)
        model += PolynomialModel(0)
        name = model.__str__()
        print(name)
        self.assertTrue(name[-1] == '4')

        model = PadeModel(1, 2)
        model += PolynomialModel(1)
        print(model)
    def testThreeModel(self):
        print("  Test three models")
        m = GaussModel()
        self.assertTrue(m.chainLength() == 1)
        p = PolynomialModel(1)
        m.addModel(p)
        self.assertTrue(m.chainLength() == 2)
        s = VoigtModel()
        m.addModel(s)
        self.assertTrue(m.chainLength() == 3)

        params = numpy.arange(9, dtype=float)
        m.parameters = params

        self.assertTrue(m._next == p)
        self.assertTrue(m._next._next == s)
        self.assertTrue(p._next == s)
        self.assertTrue(s._head == m)
        self.assertTrue(p._head == m)
        self.assertTrue(m._head == m)
        self.assertTrue(m.getNumberOfParameters() == 9)
        self.assertTrue(p.getNumberOfParameters() == 9)
        self.assertTrue(s.getNumberOfParameters() == 9)
        self.assertTrue(m.npbase == 3)
        self.assertTrue(p.npbase == 2)
        self.assertTrue(s.npbase == 4)
        self.assertTrue(len(m.parameters) == 9)
        self.assertTrue(p.parameters is None)
        self.assertTrue(s.parameters is None)

        print("Isolate second model into p1")
        p1 = m.isolateModel(1)
        print(p1)
        print(p1.parameters)
        self.assertTrue(isinstance(p1, PolynomialModel))

        self.assertTrue(p1._next == None)
        self.assertTrue(p1._head == p1)
        self.assertTrue(p1.npbase == 2)
        self.assertTrue(p1.getNumberOfParameters() == 2)

        self.assertTrue(m._next == p)
        self.assertTrue(m._next._next == s)
        self.assertTrue(p._next == s)
        self.assertTrue(s._head == m)
        self.assertTrue(p._head == m)
        self.assertTrue(m._head == m)
        self.assertTrue(m.getNumberOfParameters() == 9)
        self.assertTrue(p.getNumberOfParameters() == 9)
        self.assertTrue(s.getNumberOfParameters() == 9)
        self.assertTrue(m.npbase == 3)
        self.assertTrue(p.npbase == 2)
        self.assertTrue(s.npbase == 4)
Пример #10
0
    def testEtalonDrift2Model2(self, plot=False):
        x = numpy.asarray([[-1.0, -0.8], [-0.6, -0.4], [-0.2, 0.0], [0.2, 0.4],
                           [0.6, 0.8], [1.0, -1.0], [-0.8, -0.6], [-0.4, -0.2],
                           [0.0, 0.2], [0.4, 0.6], [0.8, 1.0]])
        print("******ETALON DRIFT 2.2 ********************")

        pm = PolynomialModel(1)
        pm.parameters = [1.0, 0.5]
        m = EtalonDrift2Model(model=pm)
        self.assertTrue(m.npchain == 5)
        self.assertTrue(m.npbase == 5)
        p = [-1.1, 0.5, 0.04, 1.2, -0.5]
        self.stdModeltest(m, p, plot=plot)
Пример #11
0
    def initEngine(self, order=1, np=101):
        m = PolynomialModel(order)

        up1 = UniformPrior(limits=[-1.0, 1.0])
        up2 = UniformPrior(limits=[-1.0, 1.0])

        m.priors = [up1, up2]

        xdata = numpy.linspace(-1.0, 1.0, np, dtype=float)
        data = -0.4 * xdata + 0.5

        numpy.random.seed(345345)
        numpy.set_printoptions(precision=3, suppress=True)
        return (m, xdata, data)
Пример #12
0
    def testWeights1(self, plot=None):
        print("====testWeights 1====================")
        nn = 5
        x = np.arange(nn, dtype=float)
        y = x % 2
        y[2] = 0.5
        x4 = np.arange(4 * nn, dtype=float)
        y4 = x4 % 2
        y4[range(2, 20, 5)] = 0.5

        pm = PolynomialModel(0)
        bf = Fitter(x, pm)
        pars = bf.fit(y)
        var = bf.makeVariance()
        print("======= 5 data points; no weights; no noiseScale ===")
        print("pars  ", pm.parameters, "stdv  ", bf.stdevs, bf.hessian,
              bf.covariance, var)
        print("chisq %f  scale %f %f  sumwgt %f" %
              (bf.chisq, bf.scale, bf.makeVariance(), bf.sumwgt))

        pm = PolynomialModel(0)
        bf = Fitter(x4, pm)
        pars = bf.fit(y4)
        print("======= 20 data points; no weights; no noiseScale ===")
        print("pars  ", pm.parameters, "stdv  ", bf.stdevs, bf.hessian,
              bf.covariance)
        print("chisq %f  scale %f %f  sumwgt %f" %
              (bf.chisq, bf.scale, bf.makeVariance(), bf.sumwgt))

        print("======= 5 data points; weights = 4; no noiseScale ===")
        pm = PolynomialModel(0)
        bf = Fitter(x, pm)
        w = np.zeros(nn, dtype=float) + 4
        pars = bf.fit(y, w)
        print("pars  ", pm.parameters, "stdv  ", bf.stdevs, bf.hessian,
              bf.covariance)
        print("chisq %f  scale %f %f  sumwgt %f" %
              (bf.chisq, bf.scale, bf.makeVariance(), bf.sumwgt))

        pm = PolynomialModel(0)
        bf = Fitter(x, pm)
        pars = bf.fit(y)
        print("======= 5 data points; no weights; noiseScale ===")
        print("pars  ", pm.parameters, "stdv  ", bf.stdevs, bf.hessian,
              bf.covariance)
        print("chisq %f  scale %f %f  sumwgt %f" %
              (bf.chisq, bf.scale, bf.makeVariance(), bf.sumwgt))
        print("noise ", bf.scale, " +- ", bf.stdevScale, "  keep ", bf.keep)
        """
Пример #13
0
    def initEngine(self, order=1, np=101):
        m = PolynomialModel(order)

        up1 = UniformPrior(limits=[-10, 10])
        up2 = UniformPrior(limits=[0, 10])

        m.priors = [up1, up2]

        xdata = numpy.linspace(0.0, 10.0, np)
        data = numpy.ceil(numpy.arange(np, dtype=float) / 2.3 + 2.4)
        numpy.random.seed(345345)
        data += 0.4 * numpy.random.randn(np)
        #        print( data )
        numpy.set_printoptions(precision=3, suppress=True)
        return (m, xdata, data)
Пример #14
0
    def testFixedModel( self ):
        print( "====  BracketModel Test 3 =======================" )
        x = [ -1.0, -0.8, -0.6, -0.4, -0.2, 0.0, 0.2, 0.4, 0.6, 0.8, 1.0]

        fix = {2:0.3, 4:0.01, 7:-0.002}
        pm = PolynomialModel( 8 )
        self.assertRaises( AttributeError, BracketModel, pm, fixed=fix )
    def testThreeModelLimits(self):
        print("  Test three models: limits and domain <> unit")

        m = GaussModel()
        p = PolynomialModel(1)
        m.addModel(p)
        s = VoigtModel()
        m.addModel(s)

        print(m.npchain)

        lo = numpy.zeros(9)
        hi = numpy.arange(9) + 10.0
        m.setLimits(lo, hi)

        par = numpy.arange(9, dtype=float)
        unp = m.domain2Unit(par)
        print(par)
        print(unp)
        print(m.unit2Domain(unp))
        print(m.partialDomain2Unit(par))

        numpy.testing.assert_array_almost_equal(m.unit2Domain(unp), par, 8)
        self.assertTrue(m.domain2Unit(par[2], 2) == unp[2])
        self.assertTrue(m.domain2Unit(par[3], 3) == unp[3])
        self.assertTrue(m.domain2Unit(par[6], 6) == unp[6])
Пример #16
0
    def testExceptions(self):
        print("====testExceptions======================")
        x1 = np.arange(10, dtype=float)
        x2 = np.ndarray([10, 2])
        pm = PolynomialModel(1)
        gm = GaussModel()
        pm += gm

        self.assertRaises(ValueError, BaseFitter, x1, gm)
        self.assertRaises(ValueError, BaseFitter, x2, pm)

        bf = BaseFitter(x1, pm)
        y = np.arange(10, dtype=float)
        self.assertRaises(NotImplementedError, bf.fit, y)

        self.assertRaises(AttributeError, bf.__getattr__, 'sumwgt')
        self.assertRaises(AttributeError, bf.__getattr__, 'chisq')
        self.assertRaises(AttributeError, bf.__getattr__, 'logOccam')
        self.assertRaises(AttributeError, bf.__getattr__, 'logLikelihood')
        self.assertRaises(AttributeError, bf.__getattr__, 'xyz')
        bf.chisq = -1.0
        print('chisq', bf.chisq)

        y[2] = -np.inf
        w = np.ones(10, dtype=float)
        self.assertRaises(ValueError, bf.checkNan, y, weights=w)
        w[8] = np.nan
        y[2] = 0
        self.assertRaises(ValueError, bf.checkNan, y, weights=w)

        x1[3] = np.nan
        self.assertRaises(ValueError, BaseFitter, x1, pm)
    def test3(self, plot=False):
        print("=========== Nested Sampler test 3 ======================")

        pp, y0, x, y, w = self.makeData(3)

        gm = GaussModel()
        gm.addModel(PolynomialModel(1))
        gm.addModel(SineModel())

        print(gm.shortName())
        print(gm._next.shortName())
        print(gm._next._next.shortName())
        print(gm._next._next._next)

        lolim = numpy.asarray([-10, -10, 0, -10, -10, 0, -10, -10],
                              dtype=float)
        hilim = numpy.asarray([10, 10, 10, 10, 10, 2, 10, 10], dtype=float)

        gm.setLimits(lolim, hilim)

        ns = NestedSampler(x, gm, y, w)
        ns.verbose = 2
        ns.distribution.setLimits([0.01, 100])

        Tools.printclass(ns)

        print("truth  ", pp)
        self.dofit(ns, pp)

        if plot:
            plotFit(x, y, gm, ftr=ns.samples)
Пример #18
0
    def testMonteCarlo1(self):
        print("====== MonteCarlo 1 ===================")

        N = 100
        ran = numpy.random
        ran.seed(12345)
        noise = ran.standard_normal(N)
        x = numpy.arange(N, dtype=float) - 3
        nn = 0.1
        for k in range(5):
            y = noise * nn

            m = PolynomialModel(0)
            ftr = Fitter(x, m)
            par = ftr.fit(y)
            std = ftr.getStandardDeviations()
            chisq = ftr.chisq

            mc = MonteCarlo(x, m, ftr.covariance)
            mc.mcycles = 1000
            lmce = ftr.monteCarloError(monteCarlo=mc)

            print("noise  : ", fmt(nn),
                  "===========================================")
            print("params : ", fmt(par, format="%8.5f"))
            print("stdevs : ", fmt(std, format="%8.5f"))
            print("scale  : ", fmt(ftr.scale, format="%8.5f"), fmt(nn))
            print("chisq  : ", fmt(chisq, format="%8.5f"),
                  fmt(mc._eigenvalues, format="%8.5f"),
                  fmt(mc._eigenvectors, format="%8.5f"))
            print("covar  : ", fmt(ftr.covariance, format="%8.5f"))
            print("mcerr  : ", fmt(lmce[0], format="%8.5f"))
            self.assertTrue(abs(std[0] - lmce[0]) < 0.1 * std[0])
            self.assertTrue(par[0] < 0.05 * nn)
            nn *= 10
    def testPoissonErrorDistribution(self):
        print("====== Test Poisson Error Distribution ======================")
        poly = PolynomialModel(1)
        param = numpy.asarray([12, 10], dtype=float)
        data = numpy.asarray(self.data + 12, dtype=int)
        print("Data : ", data)
        ped = PoissonErrorDistribution(self.x, data)
        self.assertFalse(ped.acceptWeight())

        logL = ped.logLikelihood(poly, param)
        print("logL  = %8.3f" % (logL))
        scale = 0.1
        logL = ped.logLikelihood(poly, param)
        mok = poly.result(self.x, param)
        altL = numpy.sum(data * numpy.log(mok) - mok - logFactorial(data))

        print("logL  = %8.3f  %8.3f" % (logL, altL))
        assertAAE(logL, altL)

        logL = ped.logLikelihood(poly, [-5, 0])
        print("logL  = %8.3f" % (logL))
        self.assertTrue(math.isinf(logL))

        scale = 1.0
        fitIndex = [0, 1]
        dL = ped.partialLogL(poly, param, fitIndex)
        nL = ped.numPartialLogL(poly, param, fitIndex)
        print("partial = ", dL)
        print("numpart = ", nL)
        assertAAE(dL, nL, 5)

        scale = 0.5
        dL = ped.partialLogL(poly, param, fitIndex)
        nL = ped.numPartialLogL(poly, param, fitIndex)
        print("partial = ", dL)
        print("numpart = ", nL)
        assertAAE(dL, nL, 5)

        scale = 1.0
        for i in range(10):
            param = numpy.asarray([8 + i, 4], dtype=float)
            print(param, "  :  ", end="")
            for k in range(9):
                print(" %8.3f" % ped.logLikelihood(poly, param), end="")
                param[1] += 1
            print("")
    def testLaplaceErrorDistribution(self):
        print("\n   Test Laplace Error Distribution\n")
        poly = PolynomialModel(1)
        ced = LaplaceErrorDistribution(self.x, self.data)
        self.assertTrue(ced.acceptWeight())

        param = numpy.asarray([1, 10, 1], dtype=float)
        logL = ced.logLikelihood(poly, param)
        print("logL  =  %8.3f" % (logL))
        scale = 0.1
        param[2] = scale
        s2 = scale * scale
        logL = ced.logLikelihood(poly, param)

        altL = -len(self.data) * math.log(2.0 * scale)
        res = ced.getResiduals(poly, param[:2])
        altL -= numpy.sum(numpy.abs(res)) / scale
        print("logL  =  %8.3f  alt %8.3f" % (logL, altL))
        assertAAE(logL, altL)

        scale = 1.0
        param[2] = scale
        fi = [0, 1, 2]
        dL = ced.partialLogL(poly, param, fi)
        nL = ced.numPartialLogL(poly, param, fi)
        print("params  = ", param, scale)
        print("partial = ", dL)
        print("numpart = ", nL)
        assertAAE(dL, nL, 2)

        scale = 0.5
        param[2] = scale
        ced.weights = self.wgt
        dL = ced.partialLogL(poly, param, fi)
        nL = ced.numPartialLogL(poly, param, fi)
        print("params  = ", param, scale)
        print("partial = ", dL)
        print("numpart = ", nL)
        assertAAE(dL, nL, 2)

        logL = ced.logLikelihood(poly, param)
        cced = ced.copy()
        logLc = cced.logLikelihood(poly, param)
        print(cced)
        print("logL  =  %8.3f  copy %8.3f" % (logL, logLc))
        dLc = cced.partialLogL(poly, param, fi)
        print("params  = ", param, scale)
        print("partial = ", dL)
        assertAAE(logL, logLc, 6)
        assertAAE(dL, dLc, 6)

        for i in range(11):
            param = numpy.asarray([i - 5, 5, 1], dtype=float)
            print(param, ":  ", end="")
            for k in range(9):
                print(" %8.3f" % ced.logLikelihood(poly, param), end="")
                param[1] += 1
            print("")
Пример #21
0
 def testInit(self):
     print("====testInit======================")
     x1 = np.arange(10, dtype=float)
     pm = PolynomialModel(1)
     bf = BaseFitter(x1, pm)
     print(bf)
     print(bf.model)
     print(bf.xdata)
     print(bf.nxdata, bf.ndim)
Пример #22
0
    def testEtalonModel2(self, plot=False):
        x = numpy.asarray(
            [-1.0, -0.8, -0.6, -0.4, -0.2, 0.0, 0.2, 0.4, 0.6, 0.8, 1.0])
        print("******ETALON 2***********************")
        fm = PolynomialModel(1)
        m = EtalonModel(fixed={0: 1.5, 1: fm})
        p = numpy.asarray([2.0, 0.2, 1.0, 0.9], dtype=float)

        stdModeltest(m, p, plot=plot)
Пример #23
0
    def __init__(self,
                 knots,
                 order=3,
                 degree=1,
                 copy=None,
                 fixed=None,
                 **kwargs):
        """
        Sine model of a fixed frequency with a splineslike changing
        amplitude/phase and polynomially changing frequency.

        Number of parameters is 2 * ( len(knots) + order - 1 ) + degree + 1.

        Parameters
        ----------
        frequency : float
            the frequency
        copy : SineSplineDriftModel
            model to be copied
        fixed : dict
            If not None raise AttributeError.

        Raises
        ------
        AttributeError : When fixed is not None

        """
        if fixed is not None:
            raise AttributeError("FreeShapeModel cannot have fixed parameters")

        np = 2 * (len(knots) + order - 1) + degree + 1
        super(SineSplineDriftModel, self).__init__(np, copy=copy, **kwargs)

        self.knots = knots
        self.order = order
        self.degree = degree
        if copy is not None:
            self.pm = copy.pm.copy()
            self.cm = copy.cm.copy()
            self.sm = copy.sm.copy()
        else:
            self.pm = PolynomialModel(self.degree)
            self.cm = SplinesModel(knots, order=order)
            self.sm = SplinesModel(knots, order=order)
Пример #24
0
    def __init__(self,
                 ndim=1,
                 copy=None,
                 fixedModel=None,
                 values=None,
                 table=None,
                 **kwargs):
        """
        The ConstantModel implementation.
        <br>
        Number of parameters = 0.

        Parameters
        ----------
        ndim : int
            number of dimensions for the model. (default: 1)
        copy : ConstantModel
            model to be copied. (default: None)
        fixedModel : Model
            a fixed model to be returned. (default: 0 everywhere)
        values : array_like
            parameters to be used in the fixedModel. (default: None)
        table : array_like
            array of tabulated results

        """
        super(ConstantModel, self).__init__(0, ndim=ndim, copy=copy, **kwargs)
        if copy is not None:  # copy from model
            self.fixedModel = copy.fixedModel
            self.table = copy.table
        elif table is not None:  # store tabular results
            self.table = table
            self.fixedModel = None
        else:  # make a fixedModel
            self.table = None
            if fixedModel is not None:
                self.fixedModel = fixedModel
            else:
                self.fixedModel = PolynomialModel(
                    0) if ndim == 1 else PolySurfaceModel(0)
            if values is not None:
                self.fixedModel.parameters = values
    def testGaussErrorDistribution(self):

        print("=======   Test Gauss Error Distribution  ==================")

        poly = PolynomialModel(1)
        param = numpy.asarray([1, 10, 1], dtype=float)
        ged = GaussErrorDistribution(self.x, self.data)
        self.assertTrue(ged.acceptWeight())

        #   data = { -11, -9, -5, -5, -1, 1, 1, 5, 5, 8, 11 }
        #   f( x ) = { -10, -8, -6, -4, -2, 0, 2, 4, 6, 8, 10 }
        #   f - d=     1   1  -1   1  -1 -1  1 -1  1  0  -1

        chisq = ged.getChisq(ged.getResiduals(poly, param[:2]), param[2])
        print("chisq = %8.3f" % (chisq))
        logL = ged.logLikelihood(poly, param)
        altL = -0.5 * (11 * math.log(2 * math.pi) + chisq)
        print("logL  = %8.3f  %8.3f" % (logL, altL))
        assertAAE(logL, altL)

        scale = 0.1
        param[2] = scale
        logL = ged.logLikelihood(poly, param)
        altL = -11 * (0.5 * math.log(2 * math.pi) +
                      math.log(scale)) - 0.5 * chisq / (scale * scale)
        print("logL  = %8.3f  %8.3f" % (logL, altL))
        assertAAE(logL, altL)

        scale = 1.0
        param[2] = scale
        fitIndex = numpy.arange(3)
        dL = ged.partialLogL(poly, param, fitIndex)
        nL = ged.numPartialLogL(poly, param, fitIndex)
        print("partial = ", dL)
        print("numpart = ", nL)
        assertAAE(dL, nL, 5)

        scale = 0.5
        param[2] = scale
        ged.weights = self.wgt
        dL = ged.partialLogL(poly, param, fitIndex)
        nL = ged.numPartialLogL(poly, param, fitIndex)
        print("partial = ", dL)
        print("numpart = ", nL)
        assertAAE(dL, nL, 5)

        scale = 1.0
        for i in range(11):
            param = numpy.asarray([i - 5, 5, 1], dtype=float)
            print(param, "  :  ", end="")
            for k in range(9):
                print(" %8.3f" % ged.logLikelihood(poly, param), end="")
                param[1] += 1
            print("")
    def __init__(self, order, frequency, copy=None, fixed=None, **kwargs):
        """
        Sine model of a fixed frequency and polynomials as coefficients.

        Number of parameters is 2n+2.

        Parameters
        ----------
        order : int
            order of the polynomials
        frequency : float
            the frequency
        copy : PolySineAmpModel
            model to be copied
        fixed : dict
            If not None raise AttributeError.

        Raises
        ------
        AttributeError : When fixed is not None

        """
        if fixed is not None:
            raise AttributeError(
                "PolySineAmpModel cannot have fixed parameters")

        super(PolySineAmpModel, self).__init__(2 * order + 2,
                                               ndim=2,
                                               copy=copy,
                                               **kwargs)

        if copy is None:
            self.frequency = frequency
            self.order = order
        else:
            self.frequency = copy.frequency
            self.order = copy.order

        self.frequency = frequency
        self.order = order
        self._pm = PolynomialModel(order)
Пример #27
0
    def testEtalonModel3(self, plot=False):
        x = numpy.asarray(
            [-1.0, -0.8, -0.6, -0.4, -0.2, 0.0, 0.2, 0.4, 0.6, 0.8, 1.0])
        print("******ETALON 2***********************")
        fm = PolynomialModel(1)
        fm += SineModel()
        am = SplinesModel(nrknots=2, min=-1, max=1)
        m = EtalonModel(fixed={0: am, 1: fm})
        par = [10.0, 0.2, 1.0, 0.1, 0.0, 0.02, 1.0, 0.5, 2.0, 0.1, 0.0]
        p = numpy.asarray(par, dtype=float)

        stdModeltest(m, p, plot=plot)
Пример #28
0
    def testGaussPlusBackgroundModel(self, plot=False):
        print("******GAUSS + BG**********************")
        gm = GaussModel()
        print(gm)
        print(gm.parameters)
        pm = PolynomialModel(1)
        print(pm)
        print(pm.parameters)
        gm.addModel(pm)
        par = numpy.asarray([3, 0.2, 0.2, 0.1, 0.1], dtype=float)

        stdModeltest(gm, par, plot=plot)
    def testProductModel( self, plot=False ):
        rng = numpy.random
        x = rng.rand( 100, 2 )
        x[:,0] *= 3
        x[:,1] *= 2

        xy  = numpy.asarray( [[-1.0, -0.8], [-0.6, -0.4], [-0.2, 0.0], [0.2, 0.4], [0.6, 0.8],
                [1.0, -1.0], [-0.8, -0.6], [-0.4, -0.2], [0.0, 0.2], [0.4, 0.6], [0.8, 1.0]] )
        print( "******2D PRODUCT*****************" )
        gm = GaussModel()
        pm = PolynomialModel( 2 )
        self.assertRaises( AttributeError, ProductModel, [gm,pm], fixed={3:1.1} )
Пример #30
0
    def test3(self, plot=False):

        c0 = 3.2
        c1 = -0.1
        c2 = 0.3
        c3 = 1.1
        c4 = 2.1
        y = (self.x - c1) / c2
        y = c0 * numpy.exp(-y * y) + self.noise

        print("++++++++++++++++++++++++++++++++++++++++++++++++++")
        print("Testing Nonlinear Fitters: dogbox")
        print("++++++++++++++++++++++++++++++++++++++++++++++++++")
        modl1 = GaussModel()

        amfit = CurveFitter(self.x, modl1, method='dogbox')
        par1 = amfit.fit(y)

        print([c0, c1, c2])
        print(par1)

        print(self.x)
        print(y)

        modl2 = GaussModel()
        modl2.addModel(PolynomialModel(1))
        z = y + c4 * self.x + c3

        #        modl2.parameters = numpy.append( par1, [0,0] )
        lmfit = CurveFitter(self.x, modl2, method='dogbox')

        par2 = lmfit.fit(z)
        print([c0, c1, c2, c3, c4])
        print(par2)
        print(z)

        print("chisq1 = ", amfit.chisq, "  chisq2 = ", lmfit.chisq)

        self.assertTrue(self.eq(par2[0], par1[0], 0.1))
        self.assertTrue(self.eq(par2[1], par1[1], 0.1))
        self.assertTrue(self.eq(abs(par2[2]), abs(par1[2]), 0.1))
        self.assertTrue(self.eq(par2[0], c0, self.ss))
        self.assertTrue(self.eq(par2[1], c1, self.ss))
        self.assertTrue(self.eq(abs(par2[2]), c2, self.ss))
        self.assertTrue(self.eq(par2[3], c3, self.ss))
        self.assertTrue(self.eq(par2[4], c4, self.ss))
        if plot:
            xx = numpy.linspace(-1, +1, 1001)
            plt.plot(self.x, y, 'k+')
            plt.plot(xx, modl1.result(xx), 'k-')
            plt.plot(self.x, z, 'r+')
            plt.plot(xx, modl2.result(xx), 'r-')
            plt.show()