def bigger_wsdl(self, band, compare=None): """ Want to sample on a grid that is comparable in size (or smaller than) 10% of the psf to ensure we get a reasonable sampling of the grid. """ if compare is None: r10 = band.psf.inverse_integral_on_axis(0.10) compare = r10 self.factor = int(np.ceil(self.pixelsize / compare)) if self.factor == 1: return self.wsdl else: # hold onto this thing since it is needed by downsample_model if not hasattr(self.size, '__iter__'): self.fine_skyimage = SkyImage( self.center, '', float(self.pixelsize) / self.factor, self.size, 1, self.proj, self.galactic, False) else: self.fine_skyimage = SkyImage( self.center, '', float(self.pixelsize) / self.factor, float(self.size[0]), 1, self.proj, self.galactic, False, float(self.size[1])) wsdl = self.fine_skyimage.get_wsdl() return wsdl
def load_skyspect(fn = r'T:\data\galprop\ring_21month_P6v11.fits', # r'D:\fermi\data\galprop\gll_iem_v02.fit', nside=192, show_kw = dict(fun=np.log10, cmap='hot'), ): """ load a galactic diffuse distribution. Save the HEALpix respresentation at an energy (1 GeV default) fn : string filename for the FITS representaion of a SKySpectrum nside: int HEALpix nside to use for represenation -- note that 192 is 12*16, about 0.25 deg show_kw : dict fun: weighting function, cmap, vmin, vmax """ t = SkyImage(fn) galname = os.path.split(fn)[-1] print '%s: nx, ny, layers: %d %d %d' %(galname, t.naxis1(), t.naxis2(), t.layers()) hpdir = Band(nside).dir dmap = map(lambda i:t(hpdir(i)), xrange(12*nside**2)) tdm=DisplayMap(dmap) tdm.fill_ait(fignum=12, source_kw=dict(edgecolor='w',), show_kw=show_kw ) plt.title(galname+' (1 GeV)') sfn = galname.split('.')[0]+'.png' plt.savefig(galname.split('.')[0]+'.png', bbox_inches='tight', pad_inches=0) print 'saved figure to %s' % sfn return tdm
def __init__( self, name, filename, nside=512, ): self.skyimage = SkyImage(filename) super(HPfitscube, self).__init__(name, self.skyimage, nside)
class HPfitscube(HPskyfun): """ generate from a FITS cube """ def __init__(self, name, filename, nside=512,): self.skyimage=SkyImage(filename) super(HPfitscube,self).__init__(name, self.skyimage, nside) def set_layer(self,n): # use 1-based indexing for layer umber return self.skyimage.setLayer(n-1)+1 def layers(self): return self.skyimage.layers()
def __init__(self, roi, **kwargs): """ Note, unlike ZEA, can support non-square images. To specify a nonsquare image, set the size parameter to a lenght two tuple: size=(10,5) # dx=10 degrees, dy=5 degrees. """ keyword_options.process(self, kwargs) if self.size < self.pixelsize: raise Exception("Can only create images with >=1 pixel in them.") self.roi = roi self.selected_bands = tuple(self.roi.bands if self.conv_type < 0 else \ [ band for band in self.roi.bands if band.ct == self.conv_type ]) # by default, use get energy range and image center from roi. if self.center is None: self.center = self.roi.roi_dir # set up, then create a SkyImage object to perform the projection # to a grid and manage an image if not isinstance(self.size, collections.Iterable): # make sure size and pixelsize are commensurate (helpful for # various downsampling code later). self.size = int(self.size / self.pixelsize + 0.01) * self.pixelsize self.skyimage = SkyImage(self.center, '', self.pixelsize, self.size, 1, self.proj, self.galactic, False) else: self.skyimage = SkyImage(self.center, '', self.pixelsize, float(self.size[0]), 1, self.proj, self.galactic, False, float(self.size[1])) self.fill() self.nx, self.ny = self.skyimage.naxis1(), self.skyimage.naxis2() self.image = ROIImage.skyimage2numpy(self.skyimage)
def __init__(self, filename): t = os.path.split(os.path.splitext(filename)[0])[-1].split('_') self.sourcename = ' '.join(t[:-1]).replace('p', '+') self.df = df = SkyImage(filename) wcs = df.projector() self.tsmap = np.array(df.image()) nx, ny = df.naxis1(), df.naxis2() assert nx == ny, 'Array not square?' vals = np.exp(-0.5 * self.tsmap**2) # convert to likelihood from TS norm = 1. / sum(vals) self.peak_fract = norm * vals.max() center, variance = self.moments_analysis(vals) ra, dec = wcs.pix2sph(center[1], center[0]) self.peak = SkyDir(ra, dec) self.scale = wcs.pix2sph(center[0], center[1] + 1)[1] - dec self.size = nx * self.scale self.variance = self.scale**2 * variance rac, decc = wcs.pix2sph(nx / 2, ny / 2) self.offset = np.degrees(self.peak.difference(SkyDir(rac, decc)))
Author: Joshua Lande <*****@*****.**> """ from shutil import copy from os.path import expandvars from skymaps import SkyImage from simulate import get_catalog, get_spatial # First, load 2FGL w44 catalog=get_catalog() w44=catalog.get_source('W44') w44_spatial_map = w44.spatial_model w44_file=expandvars(w44_spatial_map.file) # Define a new analytic shape w44_ring = get_spatial('EllipticalRing') # Create a new spatial map with same binning as 2FGL tempalte, but filled with new analytic shape new_template='pointlike_ring_template.fits' copy(w44_file,new_template) x=SkyImage(new_template) x.fill(w44_ring.get_PySkyFunction()) x.save() print 'original',w44_file print 'new',new_template
class ROIImage(object): """ This object is suitable for creating a SkyImage object and filling it with some physically meaningful quantity gotten from an ROIAnalysis object. The acutal work is done by subclasses. """ defaults = ( ('size', 2, 'size of image in degrees'), ('pixelsize', 0.1, 'size, in degrees, of pixels'), ('galactic', False, 'galactic or equatorial coordinates'), ('proj', 'ZEA', 'projection name: can change if desired'), ('center', None, 'Center of image. If None, use roi center.'), ('conv_type', -1, 'Conversion type'), ) @keyword_options.decorate(defaults) def __init__(self, roi, **kwargs): """ Note, unlike ZEA, can support non-square images. To specify a nonsquare image, set the size parameter to a lenght two tuple: size=(10,5) # dx=10 degrees, dy=5 degrees. """ keyword_options.process(self, kwargs) if self.size < self.pixelsize: raise Exception("Can only create images with >=1 pixel in them.") self.roi = roi self.selected_bands = tuple(self.roi.bands if self.conv_type < 0 else \ [ band for band in self.roi.bands if band.ct == self.conv_type ]) # by default, use get energy range and image center from roi. if self.center is None: self.center = self.roi.roi_dir # set up, then create a SkyImage object to perform the projection # to a grid and manage an image if not isinstance(self.size, collections.Iterable): # make sure size and pixelsize are commensurate (helpful for # various downsampling code later). self.size = int(self.size / self.pixelsize + 0.01) * self.pixelsize self.skyimage = SkyImage(self.center, '', self.pixelsize, self.size, 1, self.proj, self.galactic, False) else: self.skyimage = SkyImage(self.center, '', self.pixelsize, float(self.size[0]), 1, self.proj, self.galactic, False, float(self.size[1])) self.fill() self.nx, self.ny = self.skyimage.naxis1(), self.skyimage.naxis2() self.image = ROIImage.skyimage2numpy(self.skyimage) @staticmethod def skyimage2numpy(skyimage): nx, ny = skyimage.naxis1(), skyimage.naxis2() image = np.array(skyimage.image()).reshape((ny, nx)) return image @abstractmethod def fill(self): pass def get_ZEA(self, axes=None, nticks=None): """ axes and nticks can be created by this object's constructor, but are more logically specified here. If they are not specified, get values from initial object creation. """ # get out of the object all parameters which should be passed to ZEA. if hasattr(self.size, '__iter__'): raise Exception("Can only create ZEA object for square objects.") zea_dict = dict((d[0],self.__dict__[d[0]]) for d in ZEA.defaults if hasattr(d,'__iter__') and \ hasattr(self,d[0])) if axes is not None: zea_dict['axes'] = axes if nticks is not None: zea_dict['nticks'] = nticks from uw.utilities.image import ZEA zea = ZEA(self.center, **zea_dict) zea.skyimage = self.skyimage # recalculate, in case the sky image has changed zea.image = ROIImage.skyimage2numpy(self.skyimage) # The old one gets removed by python's garbage collector (when zea.skyimage is replaced). zea.projector = zea.skyimage.projector() zea.vmin, zea.vmax = zea.skyimage.minimum(), zea.skyimage.maximum() return zea def get_pyfits(self): """ Create and return a pyfits object that corresponds to the ROIImage object. The fits file created is supposed to be consistent with the internal representation that SkyImage/SkyProj uses. """ if self.galactic: ctype1 = "GLON-%s" % self.proj ctype2 = "GLAT-%s" % self.proj # for some reason, SkyDir(0,0,SkyDir.GALACTIC).l() = 360 crval1, crval2 = self.center.l() % 360, self.center.b() else: ctype1 = "RA-%s" % self.proj ctype2 = "DEC-%s" % self.proj crval1, crval2 = self.center.ra(), self.center.dec() cdelt1, cdelt2 = -self.pixelsize, self.pixelsize # from SkyImage.cxx like 92: # "center pixel; WCS convention is that center of a pixel is a half-integer" crpix1, crpix2 = (self.skyimage.naxis1() + 1) / 2.0, (self.skyimage.naxis2() + 1) / 2.0 values = [ ["TELESCOP", "GLAST"], ["INSTRUME", "LAT"], ["DATE-OBS", ""], ["DATE-END", ""], ["EQUINOX", 2000.0, "Equinox of RA & DEC specifications"], [ "CTYPE1", ctype1, "[RA|GLON]---%%%, %%% represents the projection method such as AIT" ], ["CRPIX1", crpix1, "Reference pixel"], ["CRVAL1", crval1, "RA or GLON at the reference pixel"], [ "CDELT1", cdelt1, "X-axis incr per pixel of physical coord at position of ref pixel(deg)" ], [ "CTYPE2", ctype2, "[DEC|GLAT]---%%%, %%% represents the projection method such as AIT" ], ["CRPIX2", crpix2, "Reference pixel"], ["CRVAL2", crval2, "DEC or GLAT at the reference pixel"], [ "CDELT2", cdelt2, "Y-axis incr per pixel of physical coord at position of ref pixel(deg)" ], ["CROTA2", 0, "Image rotation (deg)"], ] for i in values: if len(i) > 2 and len(i[2]) > 47: i[2] = i[2][0:47] cards = [pyfits.Card(*i) for i in values] header = pyfits.Header(cards=cards) hdu = pyfits.PrimaryHDU(data=self.image, header=header) fits = pyfits.HDUList([hdu]) return fits
class ModelImage(ROIImage): """ This ROIImage subclass fills the sky image with the model predicted counts for a fermi sky model described by an ROIAnalysis object. This code is forced to deal with the fact that model intensity can vary significantly across a spatial pixel. The rest of the pointlike code can avoid this whole issue by scaling the healpix pixel size with the PSF to ensure that pixels are always small compared to the instrument's intrisic resolution. But since model predicted counts maps can be generated of arbitary pixel size, this issue must directly be dealt with. The solution that this code uses to deal with this issue is to simply sample from a grid finer by an integer number of pixels in each dimensions. After calculating the model predictions, the nearby blocks of model predictions are averaged to downsample to create the model predictions. This formulation assumes that each of the subpixels has the same solid angle and so it only suitable for relativly small images where pixels have equal area. For that reason, it is advised to use the ZEA projection. For point and extended sources, the characteristic scale with which the convolution must be small compared to is the PSF. So the formula for determining the factor is factor = ceil(pixelsize/r10) Where pxielsize is the plotting pixel size and r10 is the 10% containment radius of the PSF. For background sources, the characteristic scale is not the PSF but the convolution grid pixelsize. So the formula for determining the factor is instead factor = ceil(pixelsize/(conv_pixelsize/4)) Where conv_pixelsize is the size of the convolution grid's pixels. For background sources, this algorithm is generally efficiency since we except the background to vary on this smaller scale all across the image. But for point and (small) extended sources, this algorithm is generally very poor because it requires calculating the PSF (or PDF) at many points where the value is very close to 0. A better algorithm would be an adaptive quadrature integration algorithm which evaluate the integral in each pixel, then did a more accurate integral and iterated until the integral converged. This would avoid having to evaluate the model predictions for a source very finely far from the source. On the other hand, adding this feature (presumably to C++ for optimization) would be very costly, and this code runs fast enough... """ defaults = ROIImage.defaults + ( ('override_point_sources', None, """ If either is specified, use override_point_sources these and override_diffuse_sources to generate the image instead of the sources in the ROI.""" ), ('override_diffuse_sources', None, 'Same as override_point_sources'), ) @keyword_options.decorate(defaults) def __init__(self, *args, **kwargs): if kwargs.has_key('proj') and kwargs['proj'] != 'ZEA': print "Warning, it is strongly advised to use the 'ZEA projection when creating model counts maps." super(ModelImage, self).__init__(*args, **kwargs) def fill(self): self.wsdl = self.skyimage.get_wsdl() self.solid_angle = np.radians(self.pixelsize)**2 model_counts = np.zeros(len(self.wsdl), dtype=float) model_counts += self.all_point_source_counts() model_counts += self.all_diffuse_sources_counts() model_counts *= self.roi.phase_factor # don't forget about the phase factor! #NB -- this will need to be fixed if want to account for bracketing IRFs PythonUtilities.set_wsdl_weights(model_counts, self.wsdl) self.skyimage.set_wsdl(self.wsdl) @staticmethod def downsample(myarr, factor): """ Code taken from http://code.google.com/p/agpy/source/browse/trunk/agpy/downsample.py Downsample a 1D or 2D array by averaging over *factor* pixels in each axis. Crops upper edge if the shape is not a multiple of factor. This code is pure numpy and should be fast. """ assert isinstance(factor, numbers.Integral) assert len(myarr.shape) <= 2 if len(myarr.shape) == 1: xs = myarr.shape[0] assert xs % factor == 0 dsarr = np.concatenate([[myarr[i::factor]] for i in range(factor)]).mean(axis=0) return dsarr elif len(myarr.shape) == 2: xs, ys = myarr.shape assert xs % factor == 0 and ys % factor == 0 dsarr = np.concatenate( [[myarr[i::factor, j::factor] for i in range(factor)] for j in range(factor)]).mean(axis=0) return dsarr def bigger_wsdl(self, band, compare=None): """ Want to sample on a grid that is comparable in size (or smaller than) 10% of the psf to ensure we get a reasonable sampling of the grid. """ if compare is None: r10 = band.psf.inverse_integral_on_axis(0.10) compare = r10 self.factor = int(np.ceil(self.pixelsize / compare)) if self.factor == 1: return self.wsdl else: # hold onto this thing since it is needed by downsample_model if not hasattr(self.size, '__iter__'): self.fine_skyimage = SkyImage( self.center, '', float(self.pixelsize) / self.factor, self.size, 1, self.proj, self.galactic, False) else: self.fine_skyimage = SkyImage( self.center, '', float(self.pixelsize) / self.factor, float(self.size[0]), 1, self.proj, self.galactic, False, float(self.size[1])) wsdl = self.fine_skyimage.get_wsdl() return wsdl def downsample_model(self, rvals): if self.factor == 1: return rvals else: rvals = rvals.reshape( (self.fine_skyimage.naxis2(), self.fine_skyimage.naxis1())) rvals = ModelImage.downsample(rvals, self.factor).flatten() return rvals @staticmethod def get_point_sources(roi, override_point_sources, override_diffuse_sources): if override_point_sources is None and override_diffuse_sources is None: return roi.psm.point_sources if override_point_sources is None: return [] elif not isinstance(override_point_sources, collections.Iterable): return [override_point_sources] else: return override_point_sources def all_point_source_counts(self): """ Calculate the point source contributions. """ point_sources = ModelImage.get_point_sources( self.roi, self.override_point_sources, self.override_diffuse_sources) if len(point_sources) == 0: return 0 point_counts = np.zeros(len(self.wsdl), dtype=float) for band in self.selected_bands: cpsf = band.psf.cpsf # generate a list of skydirs on a finer grid. wsdl = self.bigger_wsdl(band) rvals = np.empty(len(wsdl), dtype=float) for nps, ps in enumerate(point_sources): # evaluate the PSF at the center of each pixel cpsf.wsdl_val(rvals, ps.skydir, wsdl) # average the finer grid back to original resolution. temp = self.downsample_model(rvals) temp *= self.solid_angle #multiply by pixel solid angle temp *= band.expected( ps.model) # scale by total expected counts point_counts += temp return point_counts def extended_source_counts(self, extended_model): rd = self.roi.roi_dir es = extended_model.extended_source sm = es.smodel extended_counts = np.zeros(len(self.wsdl), dtype=float) for band in self.selected_bands: extended_model.set_state(band) exposure = band.exp.value er = exposure(es.spatial_model.center, extended_model.current_energy) / exposure( rd, extended_model.current_energy) es_counts = band.expected(sm) * er wsdl = self.bigger_wsdl(band) es_pix_counts = extended_model._pix_value(wsdl) * self.solid_angle es_pix_counts = self.downsample_model(es_pix_counts) bg_pix_counts = es_pix_counts * es_counts extended_counts += bg_pix_counts return extended_counts def otf_source_counts(self, bg): roi = self.roi mo = bg.smodel background_counts = np.zeros(len(self.wsdl), dtype=float) for band in self.selected_bands: ns, bg_points, bg_vector = ROIDiffuseModel_OTF.sub_energy_binning( band, bg.nsimps) pi_evals = np.empty([len(self.wsdl), ns + 1]) wsdl = self.bigger_wsdl(band, compare=bg.pixelsize / 4.0) for ne, e in enumerate(bg_points): bg.set_state(e, band.ct, band) temp = self.downsample_model(bg._pix_value(wsdl)) pi_evals[:, ne] = temp pi_evals *= (self.solid_angle * bg_vector) mo_evals = mo(bg_points) pi_counts = (pi_evals * mo_evals).sum(axis=1) background_counts += pi_counts return background_counts def diffuse_source_counts(self, bg): if isinstance(bg, ROIDiffuseModel_OTF): return self.otf_source_counts(bg) elif isinstance(bg, ROIExtendedModel): return self.extended_source_counts(bg) else: raise Exception( "Unable to calculate model predictions for diffuse source %s", bg.name) @staticmethod def get_diffuse_sources(roi, override_point_sources, override_diffuse_sources): if override_point_sources is None and override_diffuse_sources is None: return roi.dsm.bgmodels else: mapper = get_default_diffuse_mapper(roi.sa, roi.roi_dir, roi.quiet) if override_diffuse_sources is None: return [] elif not isinstance(override_diffuse_sources, collections.Iterable): return [mapper(override_diffuse_sources)] else: return [mapper(ds) for ds in override_diffuse_sources] def all_diffuse_sources_counts(self): """ Calculate the diffuse source contributions. """ bgmodels = ModelImage.get_diffuse_sources( self.roi, self.override_point_sources, self.override_diffuse_sources) return sum(self.diffuse_source_counts(bg) for bg in bgmodels)