示例#1
0
    def fitSky(self, sci, unc, dqa, ddt):

        xg, yg = indices.one2two(ddt.xyg, sci.shape)

        y0 = max(np.amin(yg) - self.skypars['width'], 0)
        y1 = min(np.amax(yg) + self.skypars['width'], sci.shape[1] - 1)
        x0, x1 = np.amin(xg), np.amax(xg)
        box = slice(y0, y1 + 1), slice(x0, x1 + 1)

        sky = np.zeros([y1 - y0 + 1, x1 - x0 + 1])
        yy = np.arange(y0, y1 + 1)

        for j, xu in enumerate(indices.unique(xg)):
            y, f, u, g = self.crossDispersion(sci, unc, dqa, xg, yg, xu)

            if self.models['source'].form == 'gaussian':
                w = np.absolute(f)
                yave = np.average(y, weights=w)
                ysig = np.sqrt(np.average((y - yave)**2, weights=w))

                self.models['source'].p0[0] = np.amax(f)
                self.models['source'].p0[1] = yave
                if ysig > 0.:
                    self.models['source'].p0[2] = ysig
            elif self.models['source'].form == 'tabulated':
                v = np.array(ddt.val)[g]
                tabv, taby = indices.decimate(yg[g], v)
                tabv = tabv / np.sum(tabv)
                self.models['source'].table = {'y': taby, 'v': tabv}

            # update the initial conditions
            p0 = []
            for name, model in self.models.items():
                p0.extend(model.p0)

            # sometimes curve_fit fails, not sure why?
            try:
                p,pcov=optimize.curve_fit(self.totModel,y,f,sigma=u,p0=p0,\
                                          absolute_sigma=True)
            except:
                p = p0

            sky[:, j] = self.models['sky'](yy, p[-self.models['sky'].npar:])

        return sky, box
示例#2
0
def makeOMTs(flt,sources,grismconf,path,remake,nsub):
    print("[info]Making the OMTs")
    # create the table
    tab=h5table.H5Table(flt.dataset,'omt',path=path)

    # remake the table?
    if remake and os.path.isfile(tab.filename):
        os.remove(tab.filename)

    with tab as h5:

        # add some stuff to that header
        h5utils.writeAttr(h5,'segmap',sources.segmap)
        h5utils.writeAttr(h5,'nsource',np.uint16(len(sources)))
        h5utils.writeAttr(h5,'detimage',sources.obsdata.detImage)
        h5utils.writeAttr(h5,'detband',sources.obsdata.detName)
        h5utils.writeAttr(h5,'maglimit',np.float32(sources.maglimit))


        for det,detconf in grismconf:

            detgrp=h5.require_group(det)

            #if det in h5:
            #    detgrp=h5[det]
            #else:
            #    detgrp=h5.create_group(det)

            # get the center of the detector
            xc,yc=detconf.naxis/2.
            thisGrism=flt[det]
                
            for beam,beamconf in detconf:
                beamgrp=detgrp.require_group(beam)
                if remake:
                    sourcesDone=[]
                else:
                    sourcesDone=list(beamgrp.keys())
                    
                    
                #if beam in detgrp:
                #    beamgrp=detgrp[beam]
                #    sourcesDone=list(beamgrp.keys())
                #else:
                #    beamgrp=detgrp.create_group(beam)
                #    sourcesDone=[]
                    
                wav=beamconf.wavelengths(xc,yc,1)      # force nsub=1

                # add some stuff to table
                h5utils.writeAttr(beamgrp,'wav0',np.float32(wav[0]))
                h5utils.writeAttr(beamgrp,'wav1',np.float32(wav[-1]))
                h5utils.writeAttr(beamgrp,'dwav',np.float32(wav[1]-wav[0]))

                # process each source
                for segid,src in sources:
                    if src.name not in sourcesDone:
                        
                        xd,yd=src.convexHull
                        xg,yg=src.xy2xy(xd,yd,thisGrism)
                        xyg,lam,val=beamconf.specDrizzle(xg,yg,wav)
                        if len(xyg)!=0:
                            omt=h5table.OMT(segid)
                            xyg=indices.unique(np.array(xyg))
                            omt.extend(xyg)
                            omt.writeH5(beamgrp,RA=src.adc[0],\
                                        Dec=src.adc[1],\
                                        xc=xyc[0],yc=xyc[1],\
                                        mag=np.float32(src.mag),\
                                        area=np.float32(src.area),\
                                        npix=np.uint32(src.npix))
                            #omt.writeH5(beamgrp)
    return tab.filename
示例#3
0
    def loadBeams(self,h5det,detconf,detimg,unc,gpx,sources,grismFF,\
                  thresh=-np.inf):
        thresh=np.float64(thresh)
        
        # output stuff
        #i,j,aij,xyg=np.array([],int),np.array([],int),np.array([],float),np.array([],int)
        i = []
        j = []
        aij = []
        xyg = []
        
        
        # loop over beams in question
        for beam,beamconf in detconf:
            h5beam=h5det[beam]
            
            # loop over the sources
            for srcindex,(segid,src) in enumerate(sources):

                if self.TTYPE=='ODT':                 # Read the ODT
                    odt=h5table.ODT(src.segid)
                    odt.readH5(h5beam)
                    ddt=odt.decimate()
                    del(odt)
                elif self.TTYPE=='DDT':               # Read the DDT
                    ddt=h5table.DDT(src.segid)
                    ddt.readH5(h5beam)
                else:
                    msg="Invalid Table Type: {}".format(self.TTYPE)
                    raise NotImplementedError(msg)

                if len(ddt)!=0:
                    
                    # get limits 
                    limits=src.limits
                    wav0=np.amin(limits)
                    wav1=np.amax(limits)
                    
                    # remove pixels out of range and/or in GPX
                    xg,yg=indices.one2two(ddt.xyg,detimg.naxis)
                    g=np.where((ddt.wav >=wav0) & (ddt.wav<=wav1) & \
                               (gpx[yg,xg]) & (ddt.val>thresh))[0]
                    if len(g)!=0:
                        # select the items that are good
                        ddt.select(g)
                        xg,yg=xg[g],yg[g]
                        del g
                                                
                        # compute the scaling terms
                        ff=grismFF(xg,yg,ddt.wav,detconf.detector)
                        pa=detimg.pixelArea(xg,yg)    # pixel area map
                        sens=beamconf.sensitivity(ddt.wav)*FLUXSCALE
                        
                        
                        # scale the DDT
                        ddt*=(ff*pa*sens)
                        del ff,pa,sens
                                                
                        # compute the wavelength indices
                        lamind=np.digitize(ddt.wav,limits)-1
                        
                        # scale the matrix elements by uncer
                        val=ddt.val/unc[yg,xg]
                        
                        # compute the matrix element
                        iii=ddt.xyg.astype(np.uint64)+\
                             self.imgindex*detimg.npix
                        jjj=lamind+self.cwav[srcindex]
                        ij=jjj+self.npar*iii
                        ij=ij.astype(np.uint64)
                        del iii,jjj
                        
                        # decimate over repeated indices
                        aiju,iju=indices.decimate(ij,val)
                        
                        # compute matrix coordinates
                        iu,ju=np.divmod(iju,self.npar)
                    
                        # compute pixel positions
                        imgind,xygind=np.divmod(iu,detimg.npix)

                        # downtype to save space
                        if self.downtype:
                            iu=iu.astype(np.uint32)
                            ju=ju.astype(np.uint32)
                            aiju=aiju.astype(np.float32)

                        
                        # save the matrix elements
                        # i.extend(list(iu))
                        # j.extend(list(ju))
                        # aij.extend(list(aiju))

                        #i = np.hstack((i,iu))
                        #j = np.hstack((j,ju))
                        #aij = np.hstack((aij,aiju))
                        i.append(iu)
                        j.append(ju)
                        aij.append(aiju)
                        del iu,aiju
                        
                        # compute the unique positions
                        #imgind=indices.unique(imgind)   # this is not needed
                        xygind=indices.unique(xygind)
                        #xyg.extend(list(xygind))
                        #xyg = np.hstack((xyg,xygind))
                        xyg.append(xygind)
                        del xygind
                                                
        i = np.hstack(i)
        j = np.hstack(j)
        aij = np.hstack(aij)
        xyg = np.hstack(xyg)
        return i,j,aij,xyg
示例#4
0
    def loadFLT(self,flt,sources,extconf,mskconf,grismFF,pb,path):

        # output stuff
        i = []
        j = []
        aij = []
        
        # make mask for this FLT
        masks=self.maskBeams(flt,mskconf,path)
        import pickle,os,psutil      
        pid = os.getpid()
        py = psutil.Process(pid)
        # open the H5Table
        with h5table.H5Table(flt.dataset,self.TTYPE,path=path) as h5:
            #if __RAM__:
            #    print("start loadFLT:",py.memory_info()[0]/1024/1024/1024)
            # loop over detectors
            for detname,detimg in flt:
                h5det=h5[detname]              # get the group
                detconf=extconf[detname]     # grism config

                # save this for easy access later
                self.npix=detimg.npix
                
                # read the images
                sci,scihdr=flt.readfits(detconf.sciext,detconf.extver)
                unc,unchdr=flt.readfits(detconf.uncext,detconf.extver)
                dqa,dqahdr=flt.readfits(detconf.dqaext,detconf.extver)
                xyg=[]         # a container

                # make a good pixel mask
                gpx=(dqa == 0) & (unc > 0)
                if len(masks)!=0:
                    gpx &= masks[detname]
                del dqa,dqahdr,unchdr      # don't need these anymore
                
                #if __RAM__:
                #    print("calling loadBeams:",py.memory_info()[0]/1024/1024/1024)
                # call a load beam
                data=self.loadBeams(h5det,detconf,detimg,unc,gpx,sources,\
                                    grismFF)
                self.imgindex+=1
                #if __RAM__:
                #    print("back from loadBeams:",py.memory_info()[0]/1024/1024/1024)

                # collect the results
                if len(data[3])!=0:
                    # collect the matrix terms
                    # i.extend(data[0])
                    # j.extend(data[1])
                    # aij.extend(data[2])

                    #i = np.hstack((i,data[0]))
                    #j = np.hstack((j,data[1]))
                    #aij = np.hstack((aij,data[2]))
                    i.append(data[0])
                    j.append(data[1])
                    aij.append(data[2])

                    # compute pixel (x,y) pairs
                    xyg=indices.unique(np.array(data[3]))

                    # the following line was encapsulated in unqiify
                    # (written by R Ryan), but needs to be explicitly
                    # put in for the differences with the way unique was
                    # implemented (could put sort flag in indices.unique)
                    xyg=np.sort(xyg)
                    
                    xg,yg=indices.one2two(xyg,detimg.naxis)
                    xg=xg.astype(int)
                    yg=yg.astype(int)
                    bi=sci[yg,xg]/unc[yg,xg]
                    del xg,yg     # clean up memory usage
                    
                    
                    # check for bad values in bi
                    g=np.where(np.isinf(bi))[0]
                    if len(g)!=0:
                        print('[warn]Infinite values in bi; is UNC image ok?')
                        print(bi[g])
                        raise RuntimeError("Infinite values. aborting.")

                    # like IDL's push
                    #self.bi.extend(bi)
                    self.bi = np.hstack((self.bi,bi))
                    del bi    # again, every little bit helps
                    
                # save the memory usage
                del data
        i = np.hstack(i)
        j = np.hstack(j)
        aij = np.hstack(aij)
        #if __RAM__:
        #    print("done with loadBeams:",py.memory_info()[0]/1024/1024/1024)

        return i,j,aij
示例#5
0
文件: omt.py 项目: npirzkal/pyLINEAR
 def uniqify(self):
     xyg=indices.unique(self.xyg)
     self.xyg=columns.XYG(xyg)
示例#6
0
def groupFLT(flt, sources, extconf, path, minarea=0.1):

    #print('loading the polygons for {}'.format(flt.dataset))

    # get the polygons for this FLT:
    with h5table.H5Table(flt.dataset, TTYPE, path=path) as h5:
        for detname, detimg in flt:
            h5det = h5[detname]
            detconf = extconf[detname]

            for beam, beamconf in detconf:
                h5beam = h5det[beam]

                ids = []
                polys = []

                for segid, src in sources:
                    # read the DDT
                    ddt = h5table.DDT(src.segid)
                    ddt.readH5(h5beam)
                    if len(ddt) != 0:

                        # collect the points accordingly
                        xyg = ddt.xyg.to_numpy
                        xyg = indices.unique(xyg)
                        x, y = indices.one2two(xyg, detimg.naxis)
                        del xyg

                        # get the vertices
                        xy = convexhull.vertices(x, y)

                        # reform to (x,y) pairs
                        xy = list(zip(*xy))

                        # make into a polygon from Shapely
                        poly = Polygon(xy)

                        # save the results
                        ids.append([segid])
                        polys.append(poly)
    # At this point, we've made shapely.Polygons out of a given DDT

    #print('grouping the polygons for {}'.format(flt.dataset))

    # group those sources with Shapely math
    data = list(zip(ids, polys))
    nnew = ndata = len(ids)
    if nnew == 0:
        #print('[warn]No objects to group for {}'.format(flt.dataset))
        return []

    while nnew != 0:
        groups = []

        while len(data) != 0:
            thisid, thispoly = data.pop(0)

            #ids=thisid
            for i, (testid, testpoly) in enumerate(data):
                inter = thispoly.intersection(testpoly)
                #inter=inter.area   # intersection area

                r1 = inter.area / testpoly.area
                r2 = inter.area / thispoly.area
                if r1 > minarea and r2 > minarea:
                    #if area>minarea:
                    data.pop(i)  # it was grouped, so remove it from the list

                    #print(r1,r2)
                    #print(inter.area,testpoly.area,thispoly.area)
                    #fig,ax=plt.subplots(1,1)
                    #ax.plot(*thispoly.exterior.xy, color='#6699cc', alpha=0.7,
                    #        linewidth=3, solid_capstyle='round', zorder=2)
                    #ax.plot(*testpoly.exterior.xy, color='#cccccc', alpha=0.7,
                    #        linewidth=3, solid_capstyle='round', zorder=2)
                    #ax.set_title('Polygon')
                    #plt.show()

                    # update the this
                    thispoly = thispoly.union(testpoly)
                    thisid.extend(testid)
                    #print(i,area,thisid)

            groups.append((thisid, thispoly))
        data = groups
        nnew = ndata - len(data)
        ndata = len(data)

    # get just the IDs
    groups = list(zip(*groups))[0]

    # return a list of sets
    ids = [set(group) for group in groups]
    #print(len(ids))

    return ids
示例#7
0
    def fitSource(self, sci, unc, dqa, ddt, beamconf):
        xg, yg = indices.one2two(ddt.xyg, sci.shape)

        # outputs
        flam, fvar, wave, chi2 = [], [], [], []

        skymod = self.models['sky']

        # remove the skymodel
        del self.models['sky']
        for xu in indices.unique(xg):
            y, f, u, g = self.crossDispersion(sci, unc, dqa, xg, yg, xu)

            # get moments of the cross dispersion
            w = np.absolute(f)
            yave = np.average(y, weights=w)
            ysig = np.sqrt(np.average((y - yave)**2, weights=w))

            if self.models['source'].form == 'gaussian':
                # update the init
                self.models['source'].p0[0] = np.amax(f)
                self.models['source'].p0[1] = yave
                if ysig > 0.:
                    self.models['source'].p0[2] = ysig

            elif self.models['source'].form == 'tabulated':
                v = np.array(ddt.val)[g]
                tabv, taby = indices.decimate(yg[g], v)
                tabv = tabv / np.sum(tabv)
                self.models['source'].table = {'y': taby, 'v': tabv}

            # update the initial conditions
            p0 = []
            for name, model in self.models.items():
                p0.extend(model.p0)

            # get wavelength values from DDT
            wav = np.array(ddt.wav)[g]
            val = np.array(ddt.val)[g]
            w = np.average(wav, weights=val)

            # sometimes curve_fit fails, not sure why?
            try:
                p,pcov=optimize.curve_fit(self.totModel,y,f,sigma=u,p0=p0,\
                                          absolute_sigma=True)
                r = (f - self.totModel(y, *p)) / u

                # get grism corrections
                sens = beamconf.sensitivity(w) * self.fluxscale  # sensitivity
                disp = beamconf.dispersion(float(xu), yave)  # dispersion
                unit = disp * sens

                # apply the dispersion and sensitivity curve
                f = p[0] / unit
                v = pcov[0, 0] / (unit * unit)

                # save the results
                flam.append(f)
                fvar.append(v)
                wave.append(w)
                chi2.append(np.sum(r * r))
            except:
                pass

        self.models['sky'] = skymod
        return flam, fvar, wave, chi2