Esempio n. 1
0
    def _get_trial_stats(self):
        import mcmc

        g1vals=self.trials[:,2]
        g2vals=self.trials[:,3]

        prior = self.gprior(g1vals,g2vals)

        w,=numpy.where(prior > 0)
        if w.size == 0:
            print_pars(self._lm_result['pars'],front='lm pars:', stream=stderr)
            print_pars(self._lm_result['perr'],front='lm perr:', stream=stderr)
            print >>stderr,"no prior values > 0!"
            return None

        dpri_by_g1 = self.gprior.dbyg1(g1vals,g2vals)
        dpri_by_g2 = self.gprior.dbyg2(g1vals,g2vals)

        # prior is already in the distribution of points.  This is simpler for
        # most things but for lensfit sensitivity we need a factor of
        # (1/P)dP/de

        pars,pcov = mcmc.extract_stats(self.trials, weights=self.iweights)

        g = pars[2:4].copy()
        gcov = pcov[2:4, 2:4].copy()

        g1diff = g[0]-g1vals
        g2diff = g[1]-g2vals

        gsens = numpy.zeros(2)
        gsens[0]= 1.-(g1diff[w]*dpri_by_g1[w]/prior[w]).mean()
        gsens[1]= 1.-(g2diff[w]*dpri_by_g2[w]/prior[w]).mean()

        return pars, pcov, g, gcov, gsens
Esempio n. 2
0
def test_coellip2(nstep=10000, burnin=1000, s2n=None):
    ngauss=2
    dims=array([21,21])
    cen=(dims-1)/2.
    covar1=[2.0,0.0,2.0]
    covar2=[2.5,0.0,2.5]
    counts1=1.0
    counts2=0.3
    sky=0.0
    skysig=0.001

    im1 = model_image('gauss',
                     dims,
                     cen,covar1,
                     counts=counts1,
                     nsub=1)
    im2 = model_image('gauss',
                     dims,
                     cen,covar2,
                     counts=counts2,
                     nsub=1)

    im = im1 + im2 + skysig*randn(dims[0]*dims[1]).reshape(dims)

    guess=zeros(2+3+2*ngauss)
    guess[0:2] = cen + 0.1*(random(2)-0.5)
    guess[2:5] = covar + 0.1*(random(3)-0.5)
    guess[5:7] = array([0.5,0.5]) # p vals
    guess[7:9] = array([1.0,1.0]) # f vals

    print 'guess:',guess

    # step widths
    #[censig,covsig,psig,fsig]
    stepsize=array([0.01,0.01,0.01,0.01])

    # prior widths
    width=array([0.2,0.2,0.2,0.2])

    obj=MCMCCoellip(im, sky, skysig, guess, width, stepsize)
    m=mcmc.MCMC(obj)

    res = m.run(nstep, guess)

    means, errs = mcmc.extract_stats(res, burnin,sigma_clip=False)
    print 'means +/- err'
    for i in xrange(len(means)):
        print '  %.16g +/- %.16g' % (means[i],errs[i])

     
    return res
Esempio n. 3
0
    def _calc_result(self):
        """
        We marginalize over all parameters but g1,g2, which
        are index 0 and 1 in the pars array
        """
        import mcmc

        g=numpy.zeros(2)
        gsum=numpy.zeros(2)
        gcov=numpy.zeros((2,2))

        gsum[0] = self._trials[:,2].sum()
        gsum[1] = self._trials[:,3].sum()

        pars,pcov = mcmc.extract_stats(self._trials)

        g[:] = pars[2:4]
        gcov[:,:] = pcov[2:4, 2:4]
 
        arates = self.sampler.acceptance_fraction
        arate = arates.mean()

        max_epars=self._get_maxprob_epars()
        gmix=self._get_convolved_gmix(max_epars)

        stats=calculate_some_stats(self.image, 
                                   self.ivar, 
                                   gmix,
                                   self.npars)

        Tmean=pars[4]
        Terr=sqrt(pcov[4,4])
        Ts2n=pars[4]/sqrt(pcov[4,4])

        self._result={'model':self.model,
                      'g':g,
                      'gcov':gcov,
                      'gsum':gsum,
                      'nsum':self._trials.shape[0],
                      'pars':pars,
                      'perr':sqrt(diag(pcov)),
                      'pcov':pcov,
                      'Tmean':Tmean,
                      'Terr':Terr,
                      'Ts2n':Ts2n,
                      'arate':arate}

        self._result.update(stats)
Esempio n. 4
0
    def _calc_result(self):
        import mcmc
        pars,pcov=mcmc.extract_stats(self.trials)

        d=diag(pcov)
        perr = sqrt(d)

        self.result={'arate':self.arate,
                     'A':pars[0],
                     'A_err':perr[0],
                     'b':pars[1],
                     'b_err':perr[1],
                     'c':pars[2],
                     'c_err':perr[2],
                     'pars':pars,
                     'pcov':pcov,
                     'perr':perr}
Esempio n. 5
0
    def _calc_result_nogprior(self):
        import mcmc

        g=numpy.zeros(2)
        gcov=numpy.zeros((2,2))

        pars,pcov = mcmc.extract_stats(self._trials)

        g[:] = pars[0:0+2]
        gcov[:,:] = pcov[0:0+2, 0:0+2]
 
        arate = self.get_arate()

        self._result={'g':g,
                      'gcov':gcov,
                      'gsens':numpy.ones(2),
                      'pars':pars,
                      'pcov':pcov,
                      'arate':arate}
Esempio n. 6
0
    def _dofit_mcmc(self):
        import emcee
        import mcmc
        sampler = emcee.EnsembleSampler(self.nwalkers, 
                                        self.npars, 
                                        self._get_lnprob,
                                        a=self.a)

        pos_burn, prob, state = sampler.run_mcmc(self.guess, self.burnin)
        pos, prob, state = sampler.run_mcmc(pos_burn, self.nstep)

        trials  = sampler.flatchain
        pars, pcov = mcmc.extract_stats(trials)

        self.sampler=sampler
        self.trials=trials
        perr=numpy.sqrt(numpy.diag(pcov))

        self._result={'pars':pars, 'pcov':pcov, 'perr':perr}
Esempio n. 7
0
    def _calc_result(self):
        import mcmc
        pars,pcov=mcmc.extract_stats(self.trials)

        d=diag(pcov)
        perr = sqrt(d)

        self.result={'arate':self.arate,
                     'A':pars[0],
                     'A_err':perr[0],
                     'a':pars[1],
                     'a_err':perr[1],
                     'g0':pars[2],
                     'g0_err':perr[2],
                     #'s':pars[4],
                     #'s_err':perr[4],
                     #'r':pars[5],
                     #'r_err':perr[5],
                     'pars':pars,
                     'pcov':pcov,
                     'perr':perr}
Esempio n. 8
0
    def _calc_result_gprior(self):
        import mcmc

        g=numpy.zeros(2)
        gcov=numpy.zeros((2,2))
        gsens = numpy.zeros(2)

        g1vals=self._trials[:,0]
        g2vals=self._trials[:,1]

        prior = self._gprior(g1vals,g2vals)
        dpri_by_g1 = self._gprior.dbyg1(g1vals,g2vals)
        dpri_by_g2 = self._gprior.dbyg2(g1vals,g2vals)

        psum = prior.sum()

        pars,pcov = mcmc.extract_stats(self._trials)

        g[:] = pars[0:0+2]
        gcov[:,:] = pcov[0:0+2, 0:0+2]

        g1diff = g[0]-g1vals
        g2diff = g[1]-g2vals

        w,=numpy.where(prior > 0)
        if w.size == 0:
            raise ValueError("no prior values > 0!")

        gsens[0]= 1.-(g1diff[w]*dpri_by_g1[w]/prior[w]).mean()
        gsens[1]= 1.-(g2diff[w]*dpri_by_g2[w]/prior[w]).mean()
 
        arate = self.get_arate()

        self._result={'g':g,
                      'gcov':gcov,
                      'gsens':gsens,
                      'pars':pars,
                      'pcov':pcov,
                      'arate':arate}
Esempio n. 9
0
def fit_gprior_m_style(cat_type, version=None,
                       a=0.25, g0=0.1, gmax=0.87, gmax_min=None, Awidth=1.0,
                       binsize=0.02, doplot=False):
    """
    cat_type should be "galfit" or "ngmix-exp" "ngmix-dev" "ngmix-bdf"

    If cat_type=="galfit" then fit to the shapes from the sersic fits.
    
    If cat=="ngmix-exp" use my fits, same for dev.  Must send version= as well

    This works much better than an lm fitter

    for all cosmos galaxies I get
        [840.0, 1.05, 0.087, 0.810]
    """
    import mcmc
    import emcee
    import esutil as eu
    from esutil.random import srandu

    g=get_shapes(cat_type, version=version)

    bs=eu.stat.Binner(g)
    bs.dohist(binsize=binsize)
    bs.calc_stats()
    xdata=bs['center']
    ydata=bs['hist']

    nwalkers=200
    burnin=500
    nstep=100

    print 'fitting exp'

    A=ydata.sum()*(xdata[1]-xdata[0])

    pcen=[A,a,g0,gmax]
    npars=4
    guess=numpy.zeros( (nwalkers,npars) )
    guess[:,0] = pcen[0]*(1.+0.1*srandu(nwalkers))
    guess[:,1] = pcen[1]*(1.+0.1*srandu(nwalkers))
    guess[:,2] = pcen[2]*(1.+0.1*srandu(nwalkers))
    guess[:,3] = pcen[3]*(1.+0.1*srandu(nwalkers))

    ivar = numpy.ones(xdata.size)
    w,=numpy.where(ydata > 0)
    ivar[w] = 1./ydata[w]
    gfitter=GPriorMFitter(xdata, ydata, ivar, Aprior=A, Awidth=Awidth, gmax_min=gmax_min)

    print 'pcen:',pcen

    sampler = emcee.EnsembleSampler(nwalkers, 
                                    npars,
                                    gfitter.get_lnprob,
                                    a=2)

    pos, prob, state = sampler.run_mcmc(guess, burnin)
    sampler.reset()
    pos, prob, state = sampler.run_mcmc(pos, nstep)

    trials  = sampler.flatchain

    pars,pcov=mcmc.extract_stats(trials)

    d=numpy.diag(pcov)
    perr = numpy.sqrt(d)

    res={'A':pars[0],
         'A_err':perr[0],
         'a':pars[1],
         'a_err':perr[1],
         'g0':pars[2],
         'g0_err':perr[2],
         'gmax': pars[3],
         'gmax_err':perr[3],
         'pars':pars,
         'pcov':pcov,
         'perr':perr}


    fmt="""
A:    %(A).6g +/- %(A_err).6g
a:    %(a).6g +/- %(a_err).6g
g0:   %(g0).6g +/- %(g0_err).6g
gmax: %(gmax).6g +/- %(gmax_err).6g
    """.strip()

    print fmt % res

    if doplot:
        import mcmc
        import ngmix
        mcmc.plot_results(trials,names=['A','a','g0','gmax'],
                          title=cat_type)
        p=ngmix.priors.GPriorM(pars)
        gsamp=p.sample1d(g.size)
        plt=eu.plotting.bhist(g, binsize=binsize, show=False)
        eu.plotting.bhist(gsamp, binsize=binsize,
                          plt=plt, color='blue',
                          xlabel='|g|',
                          xrange=[0.,1.],
                          title=cat_type)

    return res
Esempio n. 10
0
def fit_gprior_exp_mcmc(xdata, ydata, ivar, a=0.25, g0=0.1, gmax=0.87, gmax_min=None, Awidth=1.0):
    """
    This works much better than the lm fitter
    Input is the histogram data.
    """
    import mcmc
    import emcee

    nwalkers=200
    burnin=100
    nstep=100

    print 'fitting exp'

    A=ydata.sum()*(xdata[1]-xdata[0])

    pcen=[A,a,g0,gmax]
    npars=4
    guess=zeros( (nwalkers,npars) )
    guess[:,0] = pcen[0]*(1.+0.1*srandu(nwalkers))
    guess[:,1] = pcen[1]*(1.+0.1*srandu(nwalkers))
    guess[:,2] = pcen[2]*(1.+0.1*srandu(nwalkers))
    guess[:,3] = pcen[3]*(1.+0.1*srandu(nwalkers))


    gfitter=GPriorExpFitter(xdata, ydata, ivar, Aprior=A, Awidth=Awidth, gmax_min=gmax_min)

    print 'pcen:',pcen

    sampler = emcee.EnsembleSampler(nwalkers, 
                                    npars,
                                    gfitter.get_lnprob,
                                    a=2)

    pos, prob, state = sampler.run_mcmc(guess, burnin)
    sampler.reset()
    pos, prob, state = sampler.run_mcmc(pos, nstep)

    trials  = sampler.flatchain

    pars,pcov=mcmc.extract_stats(trials)

    d=diag(pcov)
    perr = sqrt(d)

    res={'A':pars[0],
         'A_err':perr[0],
         'a':pars[1],
         'a_err':perr[1],
         'g0':pars[2],
         'g0_err':perr[2],
         'gmax': pars[3],
         'gmax_err':perr[3],
         'pars':pars,
         'pcov':pcov,
         'perr':perr}


    fmt="""
A:    %(A).6g +/- %(A_err).6g
a:    %(a).6g +/- %(a_err).6g
g0:   %(g0).6g +/- %(g0_err).6g
gmax: %(gmax).6g +/- %(gmax_err).6g
    """.strip()

    print fmt % res

    return res
Esempio n. 11
0
def test_mcmc1(sigma, nstep=10000, burnin=1000, ntrial=1, s2n=35.0):
    """
    S/N is adaptive weighted S/N
    """
    covar=[sigma**2,0.0,sigma**2]
    ngauss=1
    dim=int(2*4*sigma)
    if (dim % 2) == 0:
        dim += 1
    dims=array([dim,dim])
    cen=(dims-1)/2.
    det = covar[0]*covar[2] - covar[1]**2


    counts=1.0
    sky=0.0
    #skysig = counts/s2n/sqrt(4*pi*det)
    skysig = counts/sqrt(dims[0]*dims[1])/s2n

    print 'dims:  ',dims
    print 'cen:   ',cen
    print 'skysig:',skysig

    im0 = model_image('gauss',
                      dims,
                      cen,covar,
                      counts=counts,
                      nsub=1)
    allmeans = zeros( (ntrial, 2+3+2*ngauss) )
    allerrs  = zeros( (ntrial, 2+3+2*ngauss) )
    for j in xrange(ntrial):
        print '-'*70
        print '%d/%d' % ((j+1),ntrial)
        im = im0 + skysig*randn(dims[0]*dims[1]).reshape(dims)

        # guess is prior
        guess=zeros(2+3+2*ngauss)
        guess[0:2] = cen + 0.1*(random(2)-0.5)
        guess[2:5] = covar + 0.1*(random(3)-0.5)
        guess[5] = 1.0
        guess[6] = 1.0

        print 'guess:',guess


        # prior widths, generally broad
        width=array([0.1, # pixels
                     1.0, # pixels**2
                     1.0, 
                     1.0])

        # step widths
        #[censig,covsig,psig,fsig]
        stepsize=array([0.01,0.01,0.01,0.01])
        
        obj=MCMCCoellip(im, sky, skysig, guess, width, stepsize)
        m=mcmc.MCMC(obj)

        res = m.run(nstep, guess)

        means, errs = mcmc.extract_stats(res, burnin,sigma_clip=False)
        print 'means +/- err'
        for i in xrange(len(means)):
            print '  %.16g +/- %.16g' % (means[i],errs[i])

        allmeans[j,:] = means
        allerrs[j,:] = errs
     
    return allmeans, allerrs
Esempio n. 12
0
def fit_gprior_2gauss_cut(xdata, ydata, ivar):
    """
    This works much better than the lm fitter
    Input is the histogram data.
    """
    import mcmc
    import emcee

    nwalkers=800
    burnin=1000
    nstep=100

    A=ydata.sum()#*(xdata[1]-xdata[0])

    A1 = 0.6*A
    A2 = 0.4*A

    sigma1 = 0.02
    sigma2 = 0.3

    pcen = numpy.array([A1,sigma1,A2,sigma2])

    npars=pcen .size
    guess=zeros( (nwalkers,npars) )
    guess[:,0] = pcen[0]*(1.+0.2*srandu(nwalkers))
    guess[:,1] = pcen[1]*(1.+0.2*srandu(nwalkers))
    guess[:,2] = pcen[2]*(1.+0.2*srandu(nwalkers))
    guess[:,3] = pcen[3]*(1.+0.2*srandu(nwalkers))

    gfitter=GPrior2GaussCutFitter(xdata, ydata, ivar)

    print 'pcen:',pcen

    sampler = emcee.EnsembleSampler(nwalkers, 
                                    npars,
                                    gfitter.get_lnprob,
                                    a=2)

    pos, prob, state = sampler.run_mcmc(guess, burnin)
    sampler.reset()
    pos, prob, state = sampler.run_mcmc(pos, nstep)

    arate = sampler.acceptance_fraction.mean()
    print 'arate:',arate
    trials  = sampler.flatchain
    mcmc.plot_results(trials, ptypes=['log','linear','log','linear'])
    

    pars,pcov=mcmc.extract_stats(trials)

    d=diag(pcov)
    perr = sqrt(d)

    gprior=GPrior2GaussCut(pars)

    res={'A1':pars[0],
         'A1_err':perr[0],
         'sigma1':pars[1],
         'sigma1_err':perr[1],
         'A2':pars[2],
         'A2_err':perr[2],
         'sigma2':pars[3],
         'sigma2_err':perr[3],

         'pars':pars,
         'pcov':pcov,
         'perr':perr}


    fmt="""
A1:        %(A1).6g +/- %(A1_err).6g
sigma1:    %(sigma1).6g +/- %(sigma1_err).6g
A2:        %(A2).6g +/- %(A2_err).6g
sigma2:    %(sigma2).6g +/- %(sigma2_err).6g
    """.strip()

    print fmt % res

    return gprior,res