def getSpectrum(self, grism, extconf, path): fluxs, fvars, waves, chi2s = [], [], [], [] with h5table.H5Table(grism.dataset, 'ddt', path=path) as h5: for detname, detimg in grism: # open the detector h5det = h5[detname] detconf = extconf[detname] # read the images sci, scihdr = grism.readfits(detconf.sciext, detconf.extver) unc, unchdr = grism.readfits(detconf.uncext, detconf.extver) dqa, dqahdr = grism.readfits(detconf.dqaext, detconf.extver) # load the beamsy for beam, beamconf in detconf: h5beam = h5det[beam] beamconf = detconf[beam] # read the table ddt = h5table.DDT(self.segid) ddt.readH5(h5beam) # apply flat, pam, etc. to sci/unc # fit the sky sky, box = self.fitSky(sci, unc, dqa, ddt) # clean the sky cln = self.cleanSky(sky) sci[box] -= cln del cln, sky # fit the object vals = self.fitSource(sci, unc, dqa, ddt, beamconf) # clean up del sci, unc, dqa # output the results fluxs.extend(vals[0]) fvars.extend(vals[1]) waves.extend(vals[2]) chi2s.extend(vals[3]) del vals # more cleaning return fluxs, fvars, waves, chi2s
def loadBeams(self,h5det,detconf,detimg,unc,gpx,sources,grismFF,\ thresh=-np.inf): thresh=np.float64(thresh) # output stuff #i,j,aij,xyg=np.array([],int),np.array([],int),np.array([],float),np.array([],int) i = [] j = [] aij = [] xyg = [] # loop over beams in question for beam,beamconf in detconf: h5beam=h5det[beam] # loop over the sources for srcindex,(segid,src) in enumerate(sources): if self.TTYPE=='ODT': # Read the ODT odt=h5table.ODT(src.segid) odt.readH5(h5beam) ddt=odt.decimate() del(odt) elif self.TTYPE=='DDT': # Read the DDT ddt=h5table.DDT(src.segid) ddt.readH5(h5beam) else: msg="Invalid Table Type: {}".format(self.TTYPE) raise NotImplementedError(msg) if len(ddt)!=0: # get limits limits=src.limits wav0=np.amin(limits) wav1=np.amax(limits) # remove pixels out of range and/or in GPX xg,yg=indices.one2two(ddt.xyg,detimg.naxis) g=np.where((ddt.wav >=wav0) & (ddt.wav<=wav1) & \ (gpx[yg,xg]) & (ddt.val>thresh))[0] if len(g)!=0: # select the items that are good ddt.select(g) xg,yg=xg[g],yg[g] del g # compute the scaling terms ff=grismFF(xg,yg,ddt.wav,detconf.detector) pa=detimg.pixelArea(xg,yg) # pixel area map sens=beamconf.sensitivity(ddt.wav)*FLUXSCALE # scale the DDT ddt*=(ff*pa*sens) del ff,pa,sens # compute the wavelength indices lamind=np.digitize(ddt.wav,limits)-1 # scale the matrix elements by uncer val=ddt.val/unc[yg,xg] # compute the matrix element iii=ddt.xyg.astype(np.uint64)+\ self.imgindex*detimg.npix jjj=lamind+self.cwav[srcindex] ij=jjj+self.npar*iii ij=ij.astype(np.uint64) del iii,jjj # decimate over repeated indices aiju,iju=indices.decimate(ij,val) # compute matrix coordinates iu,ju=np.divmod(iju,self.npar) # compute pixel positions imgind,xygind=np.divmod(iu,detimg.npix) # downtype to save space if self.downtype: iu=iu.astype(np.uint32) ju=ju.astype(np.uint32) aiju=aiju.astype(np.float32) # save the matrix elements # i.extend(list(iu)) # j.extend(list(ju)) # aij.extend(list(aiju)) #i = np.hstack((i,iu)) #j = np.hstack((j,ju)) #aij = np.hstack((aij,aiju)) i.append(iu) j.append(ju) aij.append(aiju) del iu,aiju # compute the unique positions #imgind=indices.unique(imgind) # this is not needed xygind=indices.unique(xygind) #xyg.extend(list(xygind)) #xyg = np.hstack((xyg,xygind)) xyg.append(xygind) del xygind i = np.hstack(i) j = np.hstack(j) aij = np.hstack(aij) xyg = np.hstack(xyg) return i,j,aij,xyg
def simulateWorker(flt, conf, grismconf, grismflat, sources, overwrite=True): ''' helper function to facilitate multiprocessing ''' path = conf['tables']['path'] # make the output fits file hdul = fits.HDUList() # get the primary header from the FLT #hdr=flt.phdu hdr = fits.Header() # make a timestamp now = datetime.datetime.now() # update the PHDU for the output image hdr.append(('ORIGIN', 'pyLINEAR', 'how the file was created'), end=True) hdr.append(('VERSION', __version__, 'pyLINEAR version'), end=True) hdr.append(('DATE',now.strftime("%Y-%m-%d"),\ 'date this file was written (yyyy-mm-dd)'),end=True) hdr.add_blank(value='', after='DATE') hdr.add_blank(value='/ Observational Properties') hdr.add_blank(value='') hdr.append(('TELESCOP',grismconf.telescope,\ 'telescope used to "acquire" data'),end=True) hdr.append(('INSTRUME',grismconf.camera,\ 'instrument used to "acquire" data'),end=True) hdr.append(('DETECTOR', grismconf.instrument, 'detector in use'), end=True) hdr.append(('ROOTNAME',flt.dataset,'rootname of the observation set'),\ end=True) hdr.append(('OBSTYPE','SPECTROSCOPIC',\ 'observation type - imaging or spectroscopic'),end=True) hdr.append(('FILTER',grismconf.grism,\ 'element selected from filter wheel'),end=True) hdr.add_blank(value='', after='FILTER') hdr.add_blank(value='/ Simulation Properties') hdr.add_blank(value='') hdr.append(('NSOURCE', len(sources), 'number of simulated sources'), end=True) hdr.append(('SEGMAP', sources.segmap, 'segmentation map'), end=True) hdr.append(('DETIMG', sources.obsdata.detImage, 'detection image'), end=True) hdr.add_blank(value='', after='DETIMG') hdr.add_blank(value='/ Noise Properties') hdr.add_blank(value='') hdr.append(('NOISE', conf['noise']['perform'], 'is noise added?'), end=True) if conf['noise']['perform']: hdr.append(('SKYRATE',conf['noise']['skyrate'],\ 'sky count rate [e-/s]'),end=True) hdr.append(('EXPTIME',conf['noise']['exptime'],\ 'exposure time [s]'),end=True) after = 'EXPTIME' else: after = 'NOISE' hdr.add_blank(value='', after=after) hdr.add_blank(value='/ YAML Input') hdr.add_blank(value='') hdr.add_blank(value='') for value in conf: hdr.add_comment(value=value) # put this in the FITS FILE hdul.append(fits.PrimaryHDU(header=hdr)) # open the H5table with h5table.H5Table(flt.dataset, path=path, suffix=TTYPE) as h5: # loop over detectors within an FLT for detname, det in flt: detgrp = h5[detname] detconf = grismconf[detname] # get the EXTVER, which describes which detector this is extver = detconf.extver # create an empty array sci = np.zeros(np.flip(det.naxis, 0), dtype=SCITYPE) for beam, beamconf in detconf: beamgrp = detgrp[beam] for segid, src in sources: if TTYPE == 'odt': odt = h5table.ODT(segid) odt.readH5(beamgrp) ddt = odt.decimate() del odt elif TTYPE == 'ddt': ddt = h5table.DDT(segid) ddt.readH5(beamgrp) else: raise NotImplementedError("Invalid TTYPE") if len(ddt) != 0: # compute the (x,y) pair for each val in the DDT xg, yg = indices.one2two(ddt.xyg, det.naxis) # get scaling terms s = beamconf.sensitivity(ddt.wav) f = src.sed.interpolate(ddt.wav) p = det.pixelArea(xg, yg) ff = grismflat(xg, yg, ddt.wav, detname) # scale the DDT ddt *= (s * f * p * ff) del s, f, p, ff # sum over pixels val, xyu = indices.decimate(ddt.xyg, ddt.val) del ddt # get unique coordinates xg, yg = indices.one2two(xyu, det.naxis) del xyu # put flux in the image sci[yg, xg] += val del val # update the SCI image for noise and make an UNC image sci, unc = addNoise(conf['noise'], sci) # create a DQA image (set to all 0) dqa = np.full_like(sci, 0, dtype=DQATYPE) # the SCI image hdr = det.mkhdr(SCITYPE, extname=detconf.sciext, extver=extver) hdul.append(fits.ImageHDU(data=sci, header=hdr)) # the UNC image hdr = det.mkhdr(UNCTYPE, extname=detconf.uncext, extver=extver) hdul.append(fits.ImageHDU(data=unc, header=hdr)) # the DQA image hdr = det.mkhdr(DQATYPE, extname=detconf.dqaext, extver=extver) hdul.append(fits.ImageHDU(data=dqa, header=hdr)) # output the file outfile = flt.filename print('writing simulated image {}'.format(outfile)) hdul.writeto(outfile, overwrite=overwrite) # do we gzip? if conf['gzip']: gzip.gzip(outfile) outfile += '.gz' return outfile
def groupFLT(flt, sources, extconf, path, minarea=0.1): #print('loading the polygons for {}'.format(flt.dataset)) # get the polygons for this FLT: with h5table.H5Table(flt.dataset, TTYPE, path=path) as h5: for detname, detimg in flt: h5det = h5[detname] detconf = extconf[detname] for beam, beamconf in detconf: h5beam = h5det[beam] ids = [] polys = [] for segid, src in sources: # read the DDT ddt = h5table.DDT(src.segid) ddt.readH5(h5beam) if len(ddt) != 0: # collect the points accordingly xyg = ddt.xyg.to_numpy xyg = indices.unique(xyg) x, y = indices.one2two(xyg, detimg.naxis) del xyg # get the vertices xy = convexhull.vertices(x, y) # reform to (x,y) pairs xy = list(zip(*xy)) # make into a polygon from Shapely poly = Polygon(xy) # save the results ids.append([segid]) polys.append(poly) # At this point, we've made shapely.Polygons out of a given DDT #print('grouping the polygons for {}'.format(flt.dataset)) # group those sources with Shapely math data = list(zip(ids, polys)) nnew = ndata = len(ids) if nnew == 0: #print('[warn]No objects to group for {}'.format(flt.dataset)) return [] while nnew != 0: groups = [] while len(data) != 0: thisid, thispoly = data.pop(0) #ids=thisid for i, (testid, testpoly) in enumerate(data): inter = thispoly.intersection(testpoly) #inter=inter.area # intersection area r1 = inter.area / testpoly.area r2 = inter.area / thispoly.area if r1 > minarea and r2 > minarea: #if area>minarea: data.pop(i) # it was grouped, so remove it from the list #print(r1,r2) #print(inter.area,testpoly.area,thispoly.area) #fig,ax=plt.subplots(1,1) #ax.plot(*thispoly.exterior.xy, color='#6699cc', alpha=0.7, # linewidth=3, solid_capstyle='round', zorder=2) #ax.plot(*testpoly.exterior.xy, color='#cccccc', alpha=0.7, # linewidth=3, solid_capstyle='round', zorder=2) #ax.set_title('Polygon') #plt.show() # update the this thispoly = thispoly.union(testpoly) thisid.extend(testid) #print(i,area,thisid) groups.append((thisid, thispoly)) data = groups nnew = ndata - len(data) ndata = len(data) # get just the IDs groups = list(zip(*groups))[0] # return a list of sets ids = [set(group) for group in groups] #print(len(ids)) return ids