Esempio n. 1
0
def computeResiduals(conf, grisms, grismconf, mat, results):
    if not conf['perform']:
        return
    print("[info]computing the residuals")

    # compute the model
    model = mat.A.matvec(result.x)

    # get the image and xy indices
    imgindex, pixindex = divmod(mat.iuniq, mat.npix)
    index = 0  # a counter
    for fltfile, flt in grisms:
        # make some output array
        hdul = fits.HDUList()

        # process each detector within the FLT
        for detname, det in flt:
            detconf = grismconf[detname]

            # get the pixels if there are some valid ones
            g = np.where(imgindex == index)[0]
            if len(g) != 0:
                # read the images
                sci, scihdr = flt.readfits(detconf.sciext, detconf.extver)
                unc, unchdr = flt.readfits(detconf.sciext, detconf.extver)
                dtype = np.dtype(sci.dtype.name)

                # make a zeroth extension
                hdr = fits.Header()
                hdul.append(fits.PrimaryHDU(header=hdr))

                # put the science image in for safe keeping
                hdul.append(fits.ImageHDU(data=sci, header=scihdr))

                # make a model image
                mod = np.zeros_like(sci)
                modhdr = det.mkhdr(dtype, extname='MOD', extver=detconf.extver)

                # get teh (x,y)
                x, y = indices.one2two(pixindex[g], det.naxis)
                mod[y, x] = (model[g] * unc[y, x])
                hdul.append(fits.ImageHDU(data=mod, header=modhdr))

                # make the residual image
                res = sci - mod
                reshdr = det.mkhdr(dtype, extname='RES', extver=detconf.extver)
                hdul.append(fits.ImageHDU(data=res, header=reshdr))

        outfile = '{}_res.fits'.format(flt.dataset)
        hdul.writeto(outfile, overwrite=True)
        if conf['gzip']:
            gzip.gzip(outfile)

        index += 1
Esempio n. 2
0
    def fromClassic(self, conf, seglist, imglist):
        ''' load sources via a classic segmentation map '''
        print('[info]Loading sources from CLASSIC segmentation map')

        # load the images
        seg = fitsimage.FitsImage()
        seg.loadHDU(seglist[0])

        img = fitsimage.FitsImage()
        img.loadHDU(imglist[0])

        # get the reverse indices (is potentially slow)
        revind = indices.reverse(seg.data.astype(self.SEGTYPE))
        if revind[0][0] == 0:
            del revind[0]  # remove the sky index from the segmentation

        # get a progress bar
        pb = progressbar.ProgressBar(len(revind))

        # get the detection filter
        detzpt = self.obsdata.detZeropoint

        # process each index
        for segid, ri in revind:
            # set the prefix
            pb.prefix = self.PREFIX.format(segid)

            # compute (x,y) pairs
            x, y = indices.one2two(ri, seg.naxis)

            # get bounding box
            x0, x1 = np.amin(x), np.amax(x)
            y0, y1 = np.amin(y), np.amax(y)

            # call something like hextract
            subseg = seg.extract(x0, x1, y0, y1)
            subimg = img.extract(x0, x1, y0, y1)

            # put the segID in the header
            subseg['SEGID'] = segid

            # create the source
            self[segid]=Source(subimg,subseg,detzpt,segid=segid,\
                               maglim=conf['maglim'],minpix=conf['minpix'])

            # update the progress bar
            pb.increment()
Esempio n. 3
0
    def fitSky(self, sci, unc, dqa, ddt):

        xg, yg = indices.one2two(ddt.xyg, sci.shape)

        y0 = max(np.amin(yg) - self.skypars['width'], 0)
        y1 = min(np.amax(yg) + self.skypars['width'], sci.shape[1] - 1)
        x0, x1 = np.amin(xg), np.amax(xg)
        box = slice(y0, y1 + 1), slice(x0, x1 + 1)

        sky = np.zeros([y1 - y0 + 1, x1 - x0 + 1])
        yy = np.arange(y0, y1 + 1)

        for j, xu in enumerate(indices.unique(xg)):
            y, f, u, g = self.crossDispersion(sci, unc, dqa, xg, yg, xu)

            if self.models['source'].form == 'gaussian':
                w = np.absolute(f)
                yave = np.average(y, weights=w)
                ysig = np.sqrt(np.average((y - yave)**2, weights=w))

                self.models['source'].p0[0] = np.amax(f)
                self.models['source'].p0[1] = yave
                if ysig > 0.:
                    self.models['source'].p0[2] = ysig
            elif self.models['source'].form == 'tabulated':
                v = np.array(ddt.val)[g]
                tabv, taby = indices.decimate(yg[g], v)
                tabv = tabv / np.sum(tabv)
                self.models['source'].table = {'y': taby, 'v': tabv}

            # update the initial conditions
            p0 = []
            for name, model in self.models.items():
                p0.extend(model.p0)

            # sometimes curve_fit fails, not sure why?
            try:
                p,pcov=optimize.curve_fit(self.totModel,y,f,sigma=u,p0=p0,\
                                          absolute_sigma=True)
            except:
                p = p0

            sky[:, j] = self.models['sky'](yy, p[-self.models['sky'].npar:])

        return sky, box
Esempio n. 4
0
    def maskBeams(self,flt,mskconf,path):

        masks={}
        if len(mskconf.beams)!=0:
            print("[info]Making beam mask for: {}".format(flt.filename))
            with h5table.H5Table(flt.dataset,path=path,suffix='omt') as h5:
                
                # loop over detectors
                for detname,detimg in flt:
                    h5det=h5[detname]              # get the group
                    detconf=mskconf[detname]       # grism config
                    mask=np.ones(detimg.naxis,dtype=np.bool)
                    for beam,beamconf in detconf:

                        h5beam=h5det[beam]
                        for segid in h5beam:
                            xyg=h5beam[segid][:]
                            xg,yg=indices.one2two(xyg,detimg.naxis)
                            mask[yg,xg]=False
                    masks[detname]=mask
        return masks
Esempio n. 5
0
    def loadBeams(self,h5det,detconf,detimg,unc,gpx,sources,grismFF,\
                  thresh=-np.inf):
        thresh=np.float64(thresh)
        
        # output stuff
        #i,j,aij,xyg=np.array([],int),np.array([],int),np.array([],float),np.array([],int)
        i = []
        j = []
        aij = []
        xyg = []
        
        
        # loop over beams in question
        for beam,beamconf in detconf:
            h5beam=h5det[beam]
            
            # loop over the sources
            for srcindex,(segid,src) in enumerate(sources):

                if self.TTYPE=='ODT':                 # Read the ODT
                    odt=h5table.ODT(src.segid)
                    odt.readH5(h5beam)
                    ddt=odt.decimate()
                    del(odt)
                elif self.TTYPE=='DDT':               # Read the DDT
                    ddt=h5table.DDT(src.segid)
                    ddt.readH5(h5beam)
                else:
                    msg="Invalid Table Type: {}".format(self.TTYPE)
                    raise NotImplementedError(msg)

                if len(ddt)!=0:
                    
                    # get limits 
                    limits=src.limits
                    wav0=np.amin(limits)
                    wav1=np.amax(limits)
                    
                    # remove pixels out of range and/or in GPX
                    xg,yg=indices.one2two(ddt.xyg,detimg.naxis)
                    g=np.where((ddt.wav >=wav0) & (ddt.wav<=wav1) & \
                               (gpx[yg,xg]) & (ddt.val>thresh))[0]
                    if len(g)!=0:
                        # select the items that are good
                        ddt.select(g)
                        xg,yg=xg[g],yg[g]
                        del g
                                                
                        # compute the scaling terms
                        ff=grismFF(xg,yg,ddt.wav,detconf.detector)
                        pa=detimg.pixelArea(xg,yg)    # pixel area map
                        sens=beamconf.sensitivity(ddt.wav)*FLUXSCALE
                        
                        
                        # scale the DDT
                        ddt*=(ff*pa*sens)
                        del ff,pa,sens
                                                
                        # compute the wavelength indices
                        lamind=np.digitize(ddt.wav,limits)-1
                        
                        # scale the matrix elements by uncer
                        val=ddt.val/unc[yg,xg]
                        
                        # compute the matrix element
                        iii=ddt.xyg.astype(np.uint64)+\
                             self.imgindex*detimg.npix
                        jjj=lamind+self.cwav[srcindex]
                        ij=jjj+self.npar*iii
                        ij=ij.astype(np.uint64)
                        del iii,jjj
                        
                        # decimate over repeated indices
                        aiju,iju=indices.decimate(ij,val)
                        
                        # compute matrix coordinates
                        iu,ju=np.divmod(iju,self.npar)
                    
                        # compute pixel positions
                        imgind,xygind=np.divmod(iu,detimg.npix)

                        # downtype to save space
                        if self.downtype:
                            iu=iu.astype(np.uint32)
                            ju=ju.astype(np.uint32)
                            aiju=aiju.astype(np.float32)

                        
                        # save the matrix elements
                        # i.extend(list(iu))
                        # j.extend(list(ju))
                        # aij.extend(list(aiju))

                        #i = np.hstack((i,iu))
                        #j = np.hstack((j,ju))
                        #aij = np.hstack((aij,aiju))
                        i.append(iu)
                        j.append(ju)
                        aij.append(aiju)
                        del iu,aiju
                        
                        # compute the unique positions
                        #imgind=indices.unique(imgind)   # this is not needed
                        xygind=indices.unique(xygind)
                        #xyg.extend(list(xygind))
                        #xyg = np.hstack((xyg,xygind))
                        xyg.append(xygind)
                        del xygind
                                                
        i = np.hstack(i)
        j = np.hstack(j)
        aij = np.hstack(aij)
        xyg = np.hstack(xyg)
        return i,j,aij,xyg
Esempio n. 6
0
    def loadFLT(self,flt,sources,extconf,mskconf,grismFF,pb,path):

        # output stuff
        i = []
        j = []
        aij = []
        
        # make mask for this FLT
        masks=self.maskBeams(flt,mskconf,path)
        import pickle,os,psutil      
        pid = os.getpid()
        py = psutil.Process(pid)
        # open the H5Table
        with h5table.H5Table(flt.dataset,self.TTYPE,path=path) as h5:
            #if __RAM__:
            #    print("start loadFLT:",py.memory_info()[0]/1024/1024/1024)
            # loop over detectors
            for detname,detimg in flt:
                h5det=h5[detname]              # get the group
                detconf=extconf[detname]     # grism config

                # save this for easy access later
                self.npix=detimg.npix
                
                # read the images
                sci,scihdr=flt.readfits(detconf.sciext,detconf.extver)
                unc,unchdr=flt.readfits(detconf.uncext,detconf.extver)
                dqa,dqahdr=flt.readfits(detconf.dqaext,detconf.extver)
                xyg=[]         # a container

                # make a good pixel mask
                gpx=(dqa == 0) & (unc > 0)
                if len(masks)!=0:
                    gpx &= masks[detname]
                del dqa,dqahdr,unchdr      # don't need these anymore
                
                #if __RAM__:
                #    print("calling loadBeams:",py.memory_info()[0]/1024/1024/1024)
                # call a load beam
                data=self.loadBeams(h5det,detconf,detimg,unc,gpx,sources,\
                                    grismFF)
                self.imgindex+=1
                #if __RAM__:
                #    print("back from loadBeams:",py.memory_info()[0]/1024/1024/1024)

                # collect the results
                if len(data[3])!=0:
                    # collect the matrix terms
                    # i.extend(data[0])
                    # j.extend(data[1])
                    # aij.extend(data[2])

                    #i = np.hstack((i,data[0]))
                    #j = np.hstack((j,data[1]))
                    #aij = np.hstack((aij,data[2]))
                    i.append(data[0])
                    j.append(data[1])
                    aij.append(data[2])

                    # compute pixel (x,y) pairs
                    xyg=indices.unique(np.array(data[3]))

                    # the following line was encapsulated in unqiify
                    # (written by R Ryan), but needs to be explicitly
                    # put in for the differences with the way unique was
                    # implemented (could put sort flag in indices.unique)
                    xyg=np.sort(xyg)
                    
                    xg,yg=indices.one2two(xyg,detimg.naxis)
                    xg=xg.astype(int)
                    yg=yg.astype(int)
                    bi=sci[yg,xg]/unc[yg,xg]
                    del xg,yg     # clean up memory usage
                    
                    
                    # check for bad values in bi
                    g=np.where(np.isinf(bi))[0]
                    if len(g)!=0:
                        print('[warn]Infinite values in bi; is UNC image ok?')
                        print(bi[g])
                        raise RuntimeError("Infinite values. aborting.")

                    # like IDL's push
                    #self.bi.extend(bi)
                    self.bi = np.hstack((self.bi,bi))
                    del bi    # again, every little bit helps
                    
                # save the memory usage
                del data
        i = np.hstack(i)
        j = np.hstack(j)
        aij = np.hstack(aij)
        #if __RAM__:
        #    print("done with loadBeams:",py.memory_info()[0]/1024/1024/1024)

        return i,j,aij
Esempio n. 7
0
def simulateWorker(flt, conf, grismconf, grismflat, sources, overwrite=True):
    ''' helper function to facilitate multiprocessing '''

    path = conf['tables']['path']

    # make the output fits file
    hdul = fits.HDUList()

    # get the primary header from the FLT
    #hdr=flt.phdu
    hdr = fits.Header()

    # make a timestamp
    now = datetime.datetime.now()

    # update the PHDU for the output image
    hdr.append(('ORIGIN', 'pyLINEAR', 'how the file was created'), end=True)
    hdr.append(('VERSION', __version__, 'pyLINEAR version'), end=True)
    hdr.append(('DATE',now.strftime("%Y-%m-%d"),\
                'date this file was written (yyyy-mm-dd)'),end=True)

    hdr.add_blank(value='', after='DATE')
    hdr.add_blank(value='/ Observational Properties')
    hdr.add_blank(value='')
    hdr.append(('TELESCOP',grismconf.telescope,\
                'telescope used to "acquire" data'),end=True)
    hdr.append(('INSTRUME',grismconf.camera,\
                'instrument used to "acquire" data'),end=True)
    hdr.append(('DETECTOR', grismconf.instrument, 'detector in use'), end=True)
    hdr.append(('ROOTNAME',flt.dataset,'rootname of the observation set'),\
               end=True)
    hdr.append(('OBSTYPE','SPECTROSCOPIC',\
                'observation type - imaging or spectroscopic'),end=True)
    hdr.append(('FILTER',grismconf.grism,\
                'element selected from filter wheel'),end=True)

    hdr.add_blank(value='', after='FILTER')
    hdr.add_blank(value='/ Simulation Properties')
    hdr.add_blank(value='')
    hdr.append(('NSOURCE', len(sources), 'number of simulated sources'),
               end=True)
    hdr.append(('SEGMAP', sources.segmap, 'segmentation map'), end=True)
    hdr.append(('DETIMG', sources.obsdata.detImage, 'detection image'),
               end=True)

    hdr.add_blank(value='', after='DETIMG')
    hdr.add_blank(value='/ Noise Properties')
    hdr.add_blank(value='')
    hdr.append(('NOISE', conf['noise']['perform'], 'is noise added?'),
               end=True)
    if conf['noise']['perform']:
        hdr.append(('SKYRATE',conf['noise']['skyrate'],\
                    'sky count rate [e-/s]'),end=True)
        hdr.append(('EXPTIME',conf['noise']['exptime'],\
                    'exposure time [s]'),end=True)
        after = 'EXPTIME'
    else:
        after = 'NOISE'

    hdr.add_blank(value='', after=after)
    hdr.add_blank(value='/ YAML Input')
    hdr.add_blank(value='')
    hdr.add_blank(value='')

    for value in conf:
        hdr.add_comment(value=value)

    # put this in the FITS FILE
    hdul.append(fits.PrimaryHDU(header=hdr))

    # open the H5table
    with h5table.H5Table(flt.dataset, path=path, suffix=TTYPE) as h5:
        # loop over detectors within an FLT
        for detname, det in flt:

            detgrp = h5[detname]

            detconf = grismconf[detname]

            # get the EXTVER, which describes which detector this is
            extver = detconf.extver

            # create an empty array
            sci = np.zeros(np.flip(det.naxis, 0), dtype=SCITYPE)

            for beam, beamconf in detconf:
                beamgrp = detgrp[beam]

                for segid, src in sources:
                    if TTYPE == 'odt':
                        odt = h5table.ODT(segid)
                        odt.readH5(beamgrp)
                        ddt = odt.decimate()
                        del odt
                    elif TTYPE == 'ddt':
                        ddt = h5table.DDT(segid)
                        ddt.readH5(beamgrp)
                    else:
                        raise NotImplementedError("Invalid TTYPE")

                    if len(ddt) != 0:

                        # compute the (x,y) pair for each val in the DDT
                        xg, yg = indices.one2two(ddt.xyg, det.naxis)

                        # get scaling terms
                        s = beamconf.sensitivity(ddt.wav)
                        f = src.sed.interpolate(ddt.wav)
                        p = det.pixelArea(xg, yg)
                        ff = grismflat(xg, yg, ddt.wav, detname)

                        # scale the DDT
                        ddt *= (s * f * p * ff)
                        del s, f, p, ff

                        # sum over pixels
                        val, xyu = indices.decimate(ddt.xyg, ddt.val)
                        del ddt

                        # get unique coordinates
                        xg, yg = indices.one2two(xyu, det.naxis)
                        del xyu

                        # put flux in the image
                        sci[yg, xg] += val
                        del val

            # update the SCI image for noise and make an UNC image
            sci, unc = addNoise(conf['noise'], sci)

            # create a DQA image (set to all 0)
            dqa = np.full_like(sci, 0, dtype=DQATYPE)

            # the SCI image
            hdr = det.mkhdr(SCITYPE, extname=detconf.sciext, extver=extver)
            hdul.append(fits.ImageHDU(data=sci, header=hdr))

            # the UNC image
            hdr = det.mkhdr(UNCTYPE, extname=detconf.uncext, extver=extver)
            hdul.append(fits.ImageHDU(data=unc, header=hdr))

            # the DQA image
            hdr = det.mkhdr(DQATYPE, extname=detconf.dqaext, extver=extver)
            hdul.append(fits.ImageHDU(data=dqa, header=hdr))

    # output the file
    outfile = flt.filename
    print('writing simulated image {}'.format(outfile))
    hdul.writeto(outfile, overwrite=overwrite)

    # do we gzip?
    if conf['gzip']:
        gzip.gzip(outfile)
        outfile += '.gz'

    return outfile
Esempio n. 8
0
def groupFLT(flt, sources, extconf, path, minarea=0.1):

    #print('loading the polygons for {}'.format(flt.dataset))

    # get the polygons for this FLT:
    with h5table.H5Table(flt.dataset, TTYPE, path=path) as h5:
        for detname, detimg in flt:
            h5det = h5[detname]
            detconf = extconf[detname]

            for beam, beamconf in detconf:
                h5beam = h5det[beam]

                ids = []
                polys = []

                for segid, src in sources:
                    # read the DDT
                    ddt = h5table.DDT(src.segid)
                    ddt.readH5(h5beam)
                    if len(ddt) != 0:

                        # collect the points accordingly
                        xyg = ddt.xyg.to_numpy
                        xyg = indices.unique(xyg)
                        x, y = indices.one2two(xyg, detimg.naxis)
                        del xyg

                        # get the vertices
                        xy = convexhull.vertices(x, y)

                        # reform to (x,y) pairs
                        xy = list(zip(*xy))

                        # make into a polygon from Shapely
                        poly = Polygon(xy)

                        # save the results
                        ids.append([segid])
                        polys.append(poly)
    # At this point, we've made shapely.Polygons out of a given DDT

    #print('grouping the polygons for {}'.format(flt.dataset))

    # group those sources with Shapely math
    data = list(zip(ids, polys))
    nnew = ndata = len(ids)
    if nnew == 0:
        #print('[warn]No objects to group for {}'.format(flt.dataset))
        return []

    while nnew != 0:
        groups = []

        while len(data) != 0:
            thisid, thispoly = data.pop(0)

            #ids=thisid
            for i, (testid, testpoly) in enumerate(data):
                inter = thispoly.intersection(testpoly)
                #inter=inter.area   # intersection area

                r1 = inter.area / testpoly.area
                r2 = inter.area / thispoly.area
                if r1 > minarea and r2 > minarea:
                    #if area>minarea:
                    data.pop(i)  # it was grouped, so remove it from the list

                    #print(r1,r2)
                    #print(inter.area,testpoly.area,thispoly.area)
                    #fig,ax=plt.subplots(1,1)
                    #ax.plot(*thispoly.exterior.xy, color='#6699cc', alpha=0.7,
                    #        linewidth=3, solid_capstyle='round', zorder=2)
                    #ax.plot(*testpoly.exterior.xy, color='#cccccc', alpha=0.7,
                    #        linewidth=3, solid_capstyle='round', zorder=2)
                    #ax.set_title('Polygon')
                    #plt.show()

                    # update the this
                    thispoly = thispoly.union(testpoly)
                    thisid.extend(testid)
                    #print(i,area,thisid)

            groups.append((thisid, thispoly))
        data = groups
        nnew = ndata - len(data)
        ndata = len(data)

    # get just the IDs
    groups = list(zip(*groups))[0]

    # return a list of sets
    ids = [set(group) for group in groups]
    #print(len(ids))

    return ids
Esempio n. 9
0
    def fitSource(self, sci, unc, dqa, ddt, beamconf):
        xg, yg = indices.one2two(ddt.xyg, sci.shape)

        # outputs
        flam, fvar, wave, chi2 = [], [], [], []

        skymod = self.models['sky']

        # remove the skymodel
        del self.models['sky']
        for xu in indices.unique(xg):
            y, f, u, g = self.crossDispersion(sci, unc, dqa, xg, yg, xu)

            # get moments of the cross dispersion
            w = np.absolute(f)
            yave = np.average(y, weights=w)
            ysig = np.sqrt(np.average((y - yave)**2, weights=w))

            if self.models['source'].form == 'gaussian':
                # update the init
                self.models['source'].p0[0] = np.amax(f)
                self.models['source'].p0[1] = yave
                if ysig > 0.:
                    self.models['source'].p0[2] = ysig

            elif self.models['source'].form == 'tabulated':
                v = np.array(ddt.val)[g]
                tabv, taby = indices.decimate(yg[g], v)
                tabv = tabv / np.sum(tabv)
                self.models['source'].table = {'y': taby, 'v': tabv}

            # update the initial conditions
            p0 = []
            for name, model in self.models.items():
                p0.extend(model.p0)

            # get wavelength values from DDT
            wav = np.array(ddt.wav)[g]
            val = np.array(ddt.val)[g]
            w = np.average(wav, weights=val)

            # sometimes curve_fit fails, not sure why?
            try:
                p,pcov=optimize.curve_fit(self.totModel,y,f,sigma=u,p0=p0,\
                                          absolute_sigma=True)
                r = (f - self.totModel(y, *p)) / u

                # get grism corrections
                sens = beamconf.sensitivity(w) * self.fluxscale  # sensitivity
                disp = beamconf.dispersion(float(xu), yave)  # dispersion
                unit = disp * sens

                # apply the dispersion and sensitivity curve
                f = p[0] / unit
                v = pcov[0, 0] / (unit * unit)

                # save the results
                flam.append(f)
                fvar.append(v)
                wave.append(w)
                chi2.append(np.sum(r * r))
            except:
                pass

        self.models['sky'] = skymod
        return flam, fvar, wave, chi2