def f16x3_to_rgb(pio, start_depth, clip=1, parallel=None, cli_progress=False): transform = viz.SqrtStretch() + viz.ManualInterval(0, clip) _float_to_rgb(pio, start_depth, ImageMode.F16x3, transform, parallel=parallel, cli_progress=cli_progress)
def plot_norm(self, stretch='linear', power=1.0, asinh_a=0.1, min_cut=None, max_cut=None, min_percent=None, max_percent=None, percent=None, clip=True): """Create a matplotlib norm object for plotting. This is a copy of this function that will be available in Astropy 1.3: `astropy.visualization.mpl_normalize.simple_norm` See the parameter description there! Examples -------- >>> image = SkyImage() >>> norm = image.plot_norm(stretch='sqrt', max_percent=99) >>> image.plot(norm=norm) """ import astropy.visualization as v from astropy.visualization.mpl_normalize import ImageNormalize if percent is not None: interval = v.PercentileInterval(percent) elif min_percent is not None or max_percent is not None: interval = v.AsymmetricPercentileInterval(min_percent or 0., max_percent or 100.) elif min_cut is not None or max_cut is not None: interval = v.ManualInterval(min_cut, max_cut) else: interval = v.MinMaxInterval() if stretch == 'linear': stretch = v.LinearStretch() elif stretch == 'sqrt': stretch = v.SqrtStretch() elif stretch == 'power': stretch = v.PowerStretch(power) elif stretch == 'log': stretch = v.LogStretch() elif stretch == 'asinh': stretch = v.AsinhStretch(asinh_a) else: raise ValueError('Unknown stretch: {0}.'.format(stretch)) vmin, vmax = interval.get_limits(self.data) return ImageNormalize(vmin=vmin, vmax=vmax, stretch=stretch, clip=clip)
def to_fig(self, rowrange, colrange, extension=1, cmap='Greys_r', cut=None, dpi=50): """Turns a fits file into a cropped and contrast-stretched matplotlib figure.""" fts = fitsio.FITS(self.fits_filename) if (np.isfinite(fts[extension].read())).sum() == 0: raise InvalidFrameException() image = fts[extension].read()[rowrange[0]:rowrange[1], colrange[0]:colrange[1]] fts.close() if cut is None: cut = np.percentile(image[np.isfinite(image)], [10, 99.5]) transform = visualization.LogStretch() + visualization.ManualInterval( vmin=cut[0], vmax=cut[1]) image_scaled = transform(image) px_per_kepler_px = 20 dimensions = [ image.shape[0] * px_per_kepler_px, image.shape[1] * px_per_kepler_px ] figsize = [dimensions[1] / dpi, dimensions[0] / dpi] dpi = 440 / float(figsize[0]) fig = pl.figure(figsize=figsize, dpi=dpi) ax = fig.add_subplot(1, 1, 1) ax.matshow(image_scaled, aspect='auto', cmap=cmap, origin='lower', interpolation='nearest') ax.set_xticks([]) ax.set_yticks([]) ax.axis('off') #ax.set_axis_bgcolor('red') fig.subplots_adjust(left=0.0, right=1.0, top=1.0, bottom=0.0) fig.canvas.draw() return fig
def get_im_interval(pmin=10, pmax=99.9, vmin=None, vmax=None): ''' Returns an interval, to feed the ImageNormalize routine from Astropy. :param pmin: lower-limit percentile :type pmin: float :param pmax: upper-limit percentile :type pmax: float :param vmin: absolute lower limit :type vmin: float :param vmax: absolute upper limit :type vmax: float :return: an :class:`astropy.visualization.interval` thingy ... :rtype: :class:`astropy.visualization.interval` .. note:: Specifying *both* vmin and vmax will override pmin and pmax. ''' if vmin is not None and vmax is not None: return astrovis.ManualInterval(vmin, vmax) return astrovis.AsymmetricPercentileInterval(pmin, pmax)
def create_figure(self, frameno=0, binning=1, dpi=None, stretch='log', vmin=1, vmax=5000, cmap='gray', data_col='FLUX', annotate=True, time_format='ut', show_flags=False, label=None): """Returns a matplotlib Figure object that visualizes a frame. Parameters ---------- frameno : int Image number in the target pixel file. binning : int Number of frames around `frameno` to co-add. (default: 1). dpi : float, optional [dots per inch] Resolution of the output in dots per Kepler CCD pixel. By default the dpi is chosen such that the image is 440px wide. vmin : float, optional Minimum cut level (default: 1). vmax : float, optional Maximum cut level (default: 5000). cmap : str, optional The matplotlib color map name. The default is 'gray', can also be e.g. 'gist_heat'. raw : boolean, optional If `True`, show the raw pixel counts rather than the calibrated flux. Default: `False`. annotate : boolean, optional Annotate the Figure with a timestamp and target name? (Default: `True`.) show_flags : boolean, optional Show the quality flags? (Default: `False`.) label : str Label text to show in the bottom left corner of the movie. Returns ------- image : array An array of unisgned integers of shape (x, y, 3), representing an RBG colour image x px wide and y px high. """ # Get the flux data to visualize flx = self.flux_binned(frameno=frameno, binning=binning, data_col=data_col) # Determine the figsize and dpi shape = list(flx.shape) shape = [shape[1], shape[0]] if dpi is None: # Twitter timeline requires dimensions between 440x220 and 1024x512 # so we make 440 the default dpi = 440 / float(shape[0]) # libx264 require the height to be divisible by 2, we ensure this here: shape[0] -= ((shape[0] * dpi) % 2) / dpi # Create the figureand display the flux image using matshow fig = pl.figure(figsize=shape, dpi=dpi) # Display the image using matshow ax = fig.add_subplot(1, 1, 1) if self.verbose: print('{} vmin/vmax = {}/{} (median={})'.format( data_col, vmin, vmax, np.nanmedian(flx))) if stretch == 'linear': stretch_fn = visualization.LinearStretch() elif stretch == 'sqrt': stretch_fn = visualization.SqrtStretch() elif stretch == 'power': stretch_fn = visualization.PowerStretch(1.0) elif stretch == 'log': stretch_fn = visualization.LogStretch() elif stretch == 'asinh': stretch_fn = visualization.AsinhStretch(0.1) else: raise ValueError('Unknown stretch: {0}.'.format(stretch)) transform = (stretch_fn + visualization.ManualInterval(vmin=vmin, vmax=vmax)) flx_transform = 255 * transform(flx) # Make sure to remove all NaNs! flx_transform[~np.isfinite(flx_transform)] = 0 ax.imshow(flx_transform.astype(int), aspect='auto', origin='lower', interpolation='nearest', cmap=cmap, norm=NoNorm()) if annotate: # Annotate the frame with a timestamp and target name? fontsize = 3. * shape[0] margin = 0.03 # Print target name in lower left corner if label is None: label = self.objectname txt = ax.text(margin, margin, label, family="monospace", fontsize=fontsize, color='white', transform=ax.transAxes) txt.set_path_effects([ path_effects.Stroke(linewidth=fontsize / 6., foreground='black'), path_effects.Normal() ]) # Print a timestring in the lower right corner txt2 = ax.text(1 - margin, margin, self.timestamp(frameno, time_format=time_format), family="monospace", fontsize=fontsize, color='white', ha='right', transform=ax.transAxes) txt2.set_path_effects([ path_effects.Stroke(linewidth=fontsize / 6., foreground='black'), path_effects.Normal() ]) # Print quality flags in upper right corner if show_flags: flags = self.quality_flags(frameno) if len(flags) > 0: txt3 = ax.text(margin, 1 - margin, '\n'.join(flags), family="monospace", fontsize=fontsize * 1.3, color='white', ha='left', va='top', transform=ax.transAxes, linespacing=1.5, backgroundcolor='red') txt3.set_path_effects([ path_effects.Stroke(linewidth=fontsize / 6., foreground='black'), path_effects.Normal() ]) ax.set_xticks([]) ax.set_yticks([]) ax.axis('off') fig.subplots_adjust(left=0.0, right=1.0, top=1.0, bottom=0.0) fig.canvas.draw() return fig
neb_subtracted[z, :, :] = neb_subtracted[z, :, :] - neb_spect[z] if not os.path.exists(os.path.join(data_path, 'HH305E_nebsub.fits')): hdr = hdul[0].header now = dt.utcnow().strftime('%Y/%m/%d %H:%M:%S UT') hdr.set('HISTORY', f'Background subtracted {now}') hdu = fits.PrimaryHDU(data=neb_subtracted, header=hdr) hdu.writeto(os.path.join(data_path, 'HH305E_nebsub.fits')) ##------------------------------------------------------------------------- ## Plot mask of low H-beta emission plt.figure(figsize=(8, 8)) plt.subplot(1, 2, 1) plt.title('Sum of H-beta Bins') norm = v.ImageNormalize(image, interval=v.ManualInterval(vmin=image.min() - 5, vmax=image.max() + 10), stretch=v.LogStretch(10)) im = plt.imshow(image, origin='lower', norm=norm, cmap='Greys') plt.colorbar(im) plt.subplot(1, 2, 2) plt.title('Nebular Emission Mask') mimage = np.ma.MaskedArray(image) mimage.mask = ~nmask mimagef = np.ma.filled(mimage, fill_value=0) norm = v.ImageNormalize(mimagef, interval=v.ManualInterval( vmin=image.min() - 5, vmax=np.percentile(image, mask_pcnt) + 5), stretch=v.LinearStretch()) im = plt.imshow(mimagef, origin='lower', norm=norm, cmap='Greys')
def set_normalization(self, stretch=None, interval=None, stretchkwargs={}, intervalkwargs={}, perm_linear=None): if stretch is None: if self.stretch is None: stretch = 'linear' else: stretch = self.stretch if isinstance(stretch, str): print(stretch, ' '.join([f'{k}={v}' for k, v in stretchkwargs.items()])) if self.data is None: #can not calculate objects yet self.stretch_kwargs = stretchkwargs else: kwargs = self.prepare_kwargs( self.stretch_kws_defaults[stretch], self.stretch_kwargs, stretchkwargs) if perm_linear is not None: perm_linear_kwargs = self.prepare_kwargs( self.stretch_kws_defaults['linear'], perm_linear) print( 'linear', ' '.join([ f'{k}={v}' for k, v in perm_linear_kwargs.items() ])) if stretch == 'asinh': # arg: a=0.1 stretch = vis.CompositeStretch( vis.LinearStretch(**perm_linear_kwargs), vis.AsinhStretch(**kwargs)) elif stretch == 'contrastbias': # args: contrast, bias stretch = vis.CompositeStretch( vis.LinearStretch(**perm_linear_kwargs), vis.ContrastBiasStretch(**kwargs)) elif stretch == 'histogram': stretch = vis.CompositeStretch( vis.HistEqStretch(self.data, **kwargs), vis.LinearStretch(**perm_linear_kwargs)) elif stretch == 'log': # args: a=1000.0 stretch = vis.CompositeStretch( vis.LogStretch(**kwargs), vis.LinearStretch(**perm_linear_kwargs)) elif stretch == 'powerdist': # args: a=1000.0 stretch = vis.CompositeStretch( vis.LinearStretch(**perm_linear_kwargs), vis.PowerDistStretch(**kwargs)) elif stretch == 'power': # args: a stretch = vis.CompositeStretch( vis.PowerStretch(**kwargs), vis.LinearStretch(**perm_linear_kwargs)) elif stretch == 'sinh': # args: a=0.33 stretch = vis.CompositeStretch( vis.LinearStretch(**perm_linear_kwargs), vis.SinhStretch(**kwargs)) elif stretch == 'sqrt': stretch = vis.CompositeStretch( vis.SqrtStretch(), vis.LinearStretch(**perm_linear_kwargs)) elif stretch == 'square': stretch = vis.CompositeStretch( vis.LinearStretch(**perm_linear_kwargs), vis.SquaredStretch()) else: raise ValueError('Unknown stretch:' + stretch) else: if stretch == 'linear': # args: slope=1, intercept=0 stretch = vis.LinearStretch(**kwargs) else: raise ValueError('Unknown stretch:' + stretch) self.stretch = stretch if interval is None: if self.interval is None: interval = 'zscale' else: interval = self.interval if isinstance(interval, str): print(interval, ' '.join([f'{k}={v}' for k, v in intervalkwargs.items()])) kwargs = self.prepare_kwargs(self.interval_kws_defaults[interval], self.interval_kwargs, intervalkwargs) if self.data is None: self.interval_kwargs = intervalkwargs else: if interval == 'minmax': interval = vis.MinMaxInterval() elif interval == 'manual': # args: vmin, vmax interval = vis.ManualInterval(**kwargs) elif interval == 'percentile': # args: percentile, n_samples interval = vis.PercentileInterval(**kwargs) elif interval == 'asymetric': # args: lower_percentile, upper_percentile, n_samples interval = vis.AsymmetricPercentileInterval(**kwargs) elif interval == 'zscale': # args: nsamples=1000, contrast=0.25, max_reject=0.5, min_npixels=5, krej=2.5, max_iterations=5 interval = vis.ZScaleInterval(**kwargs) else: raise ValueError('Unknown interval:' + interval) self.interval = interval if self.img is not None: self.img.set_norm( vis.ImageNormalize(self.data, interval=self.interval, stretch=self.stretch, clip=True))
import astropy.visualization as vis from astropy.wcs import utils as wcsutils import pylab as pl import pyspeckit import paths from astropy import modeling from astropy import stats cube = SpectralCube.read( '/Users/adam/work/w51/alma/FITS/longbaseline/velo_cutouts/w51e2e_csv0_j2-1_r0.5_medsub.fits' ) cs21cube = subcube = cube.spectral_slab(16 * u.km / u.s, 87 * u.km / u.s)[::-1] norm = vis.ImageNormalize( subcube, interval=vis.ManualInterval(-0.002, 0.010), stretch=vis.AsinhStretch(), ) pl.rcParams['font.size'] = 12 szinch = 18 fig = pl.figure(1, figsize=(szinch, szinch)) pl.pause(0.1) for ii in range(5): fig.set_size_inches(szinch, szinch) pl.pause(0.1) try: assert np.all(fig.get_size_inches() == np.array([szinch, szinch])) break except AssertionError:
def create_figure(self, output_filename, survey, stretch='log', vmin=1, vmax=None, min_percent=1, max_percent=95, cmap='gray', contour_color='red', data_col='FLUX'): """Returns a matplotlib Figure object that visualizes a frame. Parameters ---------- vmin : float, optional Minimum cut level (default: 0). vmax : float, optional Maximum cut level (default: 5000). cmap : str, optional The matplotlib color map name. The default is 'gray', can also be e.g. 'gist_heat'. raw : boolean, optional If `True`, show the raw pixel counts rather than the calibrated flux. Default: `False`. Returns ------- image : array An array of unisgned integers of shape (x, y, 3), representing an RBG colour image x px wide and y px high. """ # Get the flux data to visualize # Update to use TPF flx = self.TPF.flux_binned() # print(np.shape(flx)) # calculate cut_levels if vmax is None: vmin, vmax = self.cut_levels(min_percent, max_percent, data_col) # Determine the figsize shape = list(flx.shape) # print(shape) # Create the figure and display the flux image using matshow fig = plt.figure(figsize=shape) # Display the image using matshow # Update to generate axes using WCS axes instead of plain axes ax = plt.subplot(projection=self.TPF.wcs) ax.set_xlabel('RA') ax.set_ylabel('Dec') if self.verbose: print('{} vmin/vmax = {}/{} (median={})'.format( data_col, vmin, vmax, np.nanmedian(flx))) if stretch == 'linear': stretch_fn = visualization.LinearStretch() elif stretch == 'sqrt': stretch_fn = visualization.SqrtStretch() elif stretch == 'power': stretch_fn = visualization.PowerStretch(1.0) elif stretch == 'log': stretch_fn = visualization.LogStretch() elif stretch == 'asinh': stretch_fn = visualization.AsinhStretch(0.1) else: raise ValueError('Unknown stretch: {0}.'.format(stretch)) transform = (stretch_fn + visualization.ManualInterval(vmin=vmin, vmax=vmax)) ax.imshow((255 * transform(flx)).astype(int), aspect='auto', origin='lower', interpolation='nearest', cmap=cmap, norm=NoNorm()) ax.set_xticks([]) ax.set_yticks([]) current_ylims = ax.get_ylim() current_xlims = ax.get_xlim() pixels, header = surveyquery.getSVImg(self.TPF.position, survey) levels = np.linspace(np.min(pixels), np.percentile(pixels, 95), 10) ax.contour(pixels, transform=ax.get_transform(WCS(header)), levels=levels, colors=contour_color) ax.set_xlim(current_xlims) ax.set_ylim(current_ylims) fig.canvas.draw() plt.savefig(output_filename, bbox_inches='tight', dpi=300) return fig
img2.data = img2.data * photflam / exptime / 0.0455**2 # get into units of erg/s/cm^2/A/arcsec^2 #Zoom into region of interest cut_ctr = SkyCoord('12h18m57.5s 47d18m14s') cut_dims = np.array([4.0, 4.0]) * u.arcmin cut = Cutout2D(img2.data, cut_ctr, cut_dims, wcs=img.wcs) #Plot first subplot: raw data gathered by the telescope plt.subplot(131, projection=img.wcs) plt.imshow(cut.data, origin='lower', cmap='plasma') plt.grid(color='yellow', ls='solid') plt.title('Raw Telescope Data', weight='bold') plt.ylabel('Declination (J2000)') #Features of raw data are hard to see, so time to stretch the values trans = viz.LogStretch() + viz.ManualInterval(0, 5e-19) cut.data = trans(cut.data) #Plot second subplot: Enhanced so all bright regions are more visible plt.subplot(132, projection=img.wcs) plt.imshow(cut.data, origin='lower', cmap="plasma") plt.grid(color='yellow', ls='solid') plt.title('Enhanced', weight='bold') plt.xlabel('Right Ascension (J2000)') #Gaussian filter to supress point sources of light, like other stars def destar(I, sigma, t): D = np.zeros_like(I) B = gauss(I, sigma) M = I - B
wcs_sio54 = wcs.WCS(e2siored[0].header) #cont3mm_proj,_ = reproject.reproject_interp((cont3mm[0].data.squeeze(), # wcs.WCS(cont3mm[0].header).celestial), # e2siored[0].header) cont1mm_proj, _ = reproject.reproject_interp( (cont1mm[0].data.squeeze(), wcs.WCS(cont1mm[0].header).celestial), e2siored[0].header) fig = pl.figure(1) fig.set_size_inches(8, 8) fig.clf() ax = pl.subplot(projection=wcs_sio54) pixscale = np.mean(wcs.utils.proj_plane_pixel_scales(wcs_sio54)) * u.deg contnorm = vis.ManualInterval(-0.0005, 0.007) siorednorm = vis.ManualInterval(-0.05, 0.15) siobluenorm = vis.ManualInterval(-0.15, 0.60) rgbim = np.array([ siorednorm(e2siored[0].data), contnorm(cont1mm_proj), siobluenorm(e2sioblue[0].data) ]) ax.imshow(rgbim.T.swapaxes(0, 1), origin='lower', interpolation='none') #csblue_reproj,_ = reproject.reproject_interp(e2sioj21blue, e2siored[0].header) ax.contour( #e2CSj21blue[0].data, cs21max.value, #transform=ax.get_transform(wcs.WCS(e2CSj21blue[0].header)),
def make_hiidust_plot( reg, mgpsfile, width=1 * u.arcmin, surveys=['atlasgal'], figure=None, regname='GAL_031', fifth_panel_synchro=False, alpha=-0.12, cmap=None, ): if cmap is None: cmap = pl.cm.viridis cmap.set_bad('w') mgps_fh = fits.open(mgpsfile)[0] frame = wcs.utils.wcs_to_celestial_frame(wcs.WCS(mgps_fh.header)) coordinate = reg.center coordname = "{0:06.3f}_{1:06.3f}".format(coordinate.galactic.l.deg, coordinate.galactic.b.deg) mgps_cutout = Cutout2D(mgps_fh.data, coordinate.transform_to(frame.name), size=width * 2, wcs=wcs.WCS(mgps_fh.header)) print() print(reg.meta['text']) print( f"Retrieving MAGPIS data for {coordname} ({coordinate.to_string()} {coordinate.frame.name})" ) # we're treating 'width' as a radius elsewhere, here it's a full width images = { survey: getimg(coordinate, image_size=width * 2, survey=survey) for survey in surveys } images = {x: y for x, y in images.items() if y is not None} assert len(images) > 0 #images['mgps'] = [mgps_cutout] # coordinate stuff so images can be reprojected to same frame ww = mgps_cutout.wcs.celestial mgps_pixscale = (wcs.utils.proj_plane_pixel_area(ww) * u.deg**2)**0.5 if figure is None: figure = pl.gcf() figure.clf() (survey, img), = images.items() new_img = img[0].data if hasattr(img[0], 'header'): outwcs = wcs.WCS(img[0].header) else: outwcs = img[0].wcs reproj_pixscale = (wcs.utils.proj_plane_pixel_area(outwcs) * u.deg**2)**0.5 agal_bm = tgt_bm = Beam(beam_map[survey]) convbm = tgt_bm.deconvolve(mgps_beam) mgps_sm = convolution.convolve_fft(mgps_cutout.data, convbm.as_kernel(mgps_pixscale)) mgps_reproj, _ = reproject.reproject_interp((mgps_sm, mgps_cutout.wcs), outwcs, shape_out=img[0].data.shape) mgpsMjysr = mgps_cutout.data / mgps_beam.sr.value / 1e6 dust_pred = dust_emissivity.blackbody.modified_blackbody( u.Quantity( [wlmap[survey].to(u.GHz, u.spectral()), mustang_central_frequency]), assumed_temperature, beta=assumed_dustbeta) # assumes "surv" is dust surv_to_mgps = new_img * dust_pred[1] / dust_pred[0] print(f"{regname} {survey}") print(f"{survey} to mgps ratio: {dust_pred[1]/dust_pred[0]}") dusty = surv_to_mgps.value / tgt_bm.sr.value / 1e6 freefree = (mgps_reproj / mgps_beam.sr.value / 1e6 - dusty) assert not hasattr(freefree, 'unit') print("Max values: ", img[0].data.max(), mgps_sm.max()) print("More max values: ", np.nanmax(dusty), np.nanmax(freefree), np.nanmax(mgps_reproj / mgps_beam.sr.value / 1e6)) norm = visualization.ImageNormalize( freefree, interval=visualization.ManualInterval(np.nanpercentile(freefree, 0.1), np.nanpercentile(freefree, 99.9)), stretch=visualization.LogStretch(), ) mgpsnorm = visualization.ImageNormalize( mgps_cutout.data, interval=visualization.PercentileInterval(99.95), stretch=visualization.LogStretch(), ) print(f"interval: {norm.interval.vmin}, {norm.interval.vmax}") assert not hasattr(norm.vmin, 'unit') assert not hasattr(norm.vmax, 'unit') assert not hasattr(mgpsnorm.vmin, 'unit') assert not hasattr(mgpsnorm.vmax, 'unit') Magpis.cache_location = '/Volumes/external/mgps/cache/' ax0 = figure.add_subplot(1, 6, 3, projection=mgps_cutout.wcs) ax0.imshow(mgpsMjysr, origin='lower', interpolation='none', norm=norm, cmap=cmap) ax0.set_title("3 mm") ax1 = figure.add_subplot(1, 6, 1, projection=outwcs) ax1.imshow(dusty, origin='lower', interpolation='none', norm=norm, cmap=cmap) ax1.set_title("870 $\\mu$m scaled") ax1.set_ylabel("Galactic Latitude") ax2 = figure.add_subplot(1, 6, 2, projection=outwcs) ax2.imshow(freefree, origin='lower', interpolation='none', norm=norm, cmap=cmap) ax2.set_title("3 mm Free-Free") for ax in (ax0, ax1, ax2): #ax.set_xlabel("Galactic Longitude") ax.tick_params(direction='in') ax.tick_params(color='w') ax0.coords[1].set_axislabel("") ax0.coords[1].set_ticklabel_visible(False) ax2.coords[1].set_axislabel("") ax2.coords[1].set_ticklabel_visible(False) pl.subplots_adjust(hspace=0, wspace=0) if 'G01' in regname: gps20im = fits.open('/Users/adam/work/gc/20cm_0.fits', ) elif 'G49' in regname: gps20im = fits.open( '/Users/adam/work/w51/vla_old/W51-LBAND-feathered_ABCD.fits') #gps20im = fits.open('/Users/adam/work/w51/vla_old/W51-LBAND_Carray.fits') else: gps20im = getimg(coordinate, image_size=width * 2, survey='gps20new') reproj_gps20, _ = reproject.reproject_interp( (gps20im[0].data.squeeze(), wcs.WCS(gps20im[0].header).celestial), #mgps_fh.header) # refactoring to make a smaller cutout would make this faster.... mgps_cutout.wcs, shape_out=mgps_cutout.data.shape) gps20cutout = Cutout2D( reproj_gps20, #gps20im[0].data.squeeze(), coordinate.transform_to(frame.name), size=width * 2, wcs=mgps_cutout.wcs) #wcs=wcs.WCS(mgps_fh.header)) #wcs.WCS(gps20im[0].header).celestial) ax3 = figure.add_subplot(1, 6, 5, projection=gps20cutout.wcs) gps20_bm = Beam.from_fits_header(gps20im[0].header) print(f"GPS 20 beam: {gps20_bm.__repr__()}") norm20 = visualization.ImageNormalize( gps20cutout.data, interval=visualization.ManualInterval( np.nanpercentile(gps20cutout.data, 0.5), np.nanpercentile(gps20cutout.data, 99.9)), stretch=visualization.LogStretch(), ) # use 0.12 per Loren's suggestion freefree_20cm_to_3mm = (90 * u.GHz / (1.4 * u.GHz))**alpha gps20_Mjysr = gps20cutout.data / gps20_bm.sr.value / 1e6 ax3.imshow((gps20_Mjysr * freefree_20cm_to_3mm).value, origin='lower', interpolation='none', norm=norm, cmap=cmap) ax3.set_title("20 cm scaled") ax3.coords[1].set_axislabel("") ax3.coords[1].set_ticklabel_visible(False) ax3.tick_params(direction='in') ax3.tick_params(color='w') # Fifth Panel: # use freefree_proj to get the 20cm-estimated free-free contribution even # if we're not using it for plotting # MAGPIS data are high-resolution (comparable to but better than MGPS) # Zadeh data are low-resolution, 30ish arcsec # units: Jy/sr freefree_proj, _ = reproject.reproject_interp( (freefree, outwcs), gps20cutout.wcs, shape_out=gps20cutout.data.shape) gps20_pixscale = (wcs.utils.proj_plane_pixel_area(gps20cutout.wcs) * u.deg**2)**0.5 # depending on which image has higher resolution, convolve one to the other try: gps20convbm = tgt_bm.deconvolve(gps20_bm) gps20_Mjysr_sm = convolution.convolve_fft( gps20_Mjysr, gps20convbm.as_kernel(gps20_pixscale)) except ValueError: gps20_Mjysr_sm = gps20_Mjysr ff_convbm = gps20_bm.deconvolve(tgt_bm) freefree_proj = convolution.convolve_fft( freefree_proj, ff_convbm.as_kernel(gps20_pixscale)) if fifth_panel_synchro: ax4 = figure.add_subplot(1, 6, 5, projection=gps20cutout.wcs) # use the central frequency corresponding to an approximately flat spectrum (flat -> 89.72) freefree_3mm_to_20cm = 1 / (90 * u.GHz / (1.4 * u.GHz))**-0.12 #empirical_factor = 3 # freefree was coming out way too high, don't understand why yet synchro = gps20_Mjysr_sm - freefree_proj * freefree_3mm_to_20cm synchro[np.isnan(gps20_Mjysr) | (gps20_Mjysr == 0)] = np.nan synchroish_ratio = gps20_Mjysr_sm / (freefree_proj * freefree_3mm_to_20cm) #synchro = synchroish_ratio normsynchro = visualization.ImageNormalize( gps20_Mjysr_sm, interval=visualization.ManualInterval( np.nanpercentile(gps20_Mjysr_sm, 0.5), np.nanpercentile(gps20_Mjysr_sm, 99.9)), stretch=visualization.LogStretch(), ) ax4.imshow(synchro.value, origin='lower', interpolation='none', norm=normsynchro, cmap=cmap) ax4.set_title("Synchrotron") ax4.tick_params(direction='in') ax4.tick_params(color='w') ax4.coords[1].set_axislabel("") ax4.coords[1].set_ticklabel_visible(False) pl.tight_layout() else: # scale 20cm to match MGPS and subtract it gps20_pixscale = (wcs.utils.proj_plane_pixel_area(gps20cutout.wcs) * u.deg**2)**0.5 if gps20_bm.sr < mgps_beam.sr: # smooth GPS20 to MGPS gps20convbm = mgps_beam.deconvolve(gps20_bm) gps20_Mjysr_sm = convolution.convolve_fft( gps20_Mjysr, gps20convbm.as_kernel(gps20_pixscale)) gps20_Mjysr_sm[~np.isfinite(gps20_Mjysr)] = np.nan gps20_proj = gps20_Mjysr_sm #gps20_proj,_ = reproject.reproject_interp((gps20_Mjysr_sm, gps20cutout.wcs), # ww, # shape_out=mgps_cutout.data.shape) else: gps20_proj = gps20_Mjysr gps20_convbm = gps20_bm.deconvolve(mgps_beam) mgpsMjysr = convolution.convolve_fft( mgpsMjysr, gps20_convbm.as_kernel(mgps_pixscale)) ax4 = figure.add_subplot(1, 6, 4, projection=mgps_cutout.wcs) # use the central frequency corresponding to an approximately flat spectrum (flat -> 89.72) freefree20 = gps20_proj * freefree_20cm_to_3mm dust20 = (mgpsMjysr - freefree20).value dust20[np.isnan(gps20_proj) | (gps20_proj == 0)] = np.nan normdust20 = visualization.ImageNormalize( mgpsMjysr, interval=visualization.ManualInterval( np.nanpercentile(mgpsMjysr, 0.5), np.nanpercentile(mgpsMjysr, 99.9)), stretch=visualization.LogStretch(), ) # show smoothed 20 cm ax3.imshow((freefree20).value, origin='lower', interpolation='none', norm=norm, cmap=cmap) ax4.imshow(dust20, origin='lower', interpolation='none', norm=norm, cmap=cmap) ax4.set_title("3 mm Dust") ax4.tick_params(direction='in') ax4.tick_params(color='w') ax4.coords[1].set_axislabel("") ax4.coords[1].set_ticklabel_visible(False) pl.tight_layout() #elif 'G01' not in regname: # norm.vmin = np.min([np.nanpercentile(dust20, 0.5), np.nanpercentile(freefree, 0.1)]) if np.abs(np.nanpercentile(dust20, 0.5) - np.nanpercentile(freefree, 0.1)) < 1e2: norm.vmin = np.min( [np.nanpercentile(dust20, 0.5), np.nanpercentile(freefree, 0.1)]) if 'arches' in reg.meta['text']: norm.vmin = 0.95 # force 1 to be on-scale if 'w49b' in reg.meta['text']: norm.vmin = np.min( [np.nanpercentile(dust20, 8), np.nanpercentile(freefree, 0.1)]) norm.vmin = -4 norm.vmax = 11 ax0.imshow(mgpsMjysr, origin='lower', interpolation='none', norm=norm, cmap=cmap) ax1.imshow(dusty, origin='lower', interpolation='none', norm=norm, cmap=cmap) ax2.imshow(freefree, origin='lower', interpolation='none', norm=norm, cmap=cmap) ax3.imshow((gps20_proj * freefree_20cm_to_3mm).value, origin='lower', interpolation='none', norm=norm, cmap=cmap) ax4.imshow(dust20, origin='lower', interpolation='none', norm=norm, cmap=cmap) print( f"{reg}: dusty sum: {dusty[dusty>0].sum()} freefreeish sum: {freefree[freefree>0].sum()}" ) area = mgps_reproj.size * (reproj_pixscale**2).to(u.sr) mgps_reproj_Mjysr = mgps_reproj / mgps_beam.sr.value / 1e6 # only label the middle axis for ax in figure.axes: ax.set_xlabel("Galactic Longitude") for ax in figure.axes: ax.set_xlabel(" ") ax0.set_xlabel("Galactic Longitude") lastax = ax3 bbox = lastax.get_position() # this is a painful hack to force the bbox to update while bbox.height > 0.9: print(f"bbox_height = {bbox.height}") pl.pause(0.1) bbox = lastax.get_position() cax = figure.add_axes([bbox.x1 + 0.01, bbox.y0, 0.02, bbox.height]) cb = figure.colorbar(mappable=lastax.images[-1], cax=cax) cb.set_ticks([-3, 0, 10, 50, 100]) if 'w51' in reg.meta['text']: cb.set_ticks([-10, 0, 20, 200]) if 'w49b' in reg.meta['text']: cb.set_ticks([-3, 0, 3, 10]) if 'arches' in reg.meta['text']: cb.set_ticks([0, 1, 5, 10]) cb.set_label('MJy sr$^{-1}$') return { 'dust': dusty[dusty > 0].sum(), 'dust20': dust20[dust20 > 0].sum(), 'freefree': freefree[freefree > 0].sum(), 'freefree20': freefree20[freefree20 > 0].sum(), 'totalpos': mgps_reproj_Mjysr[mgps_reproj_Mjysr > 0].sum(), 'total': mgps_reproj_Mjysr.sum(), 'totalpos20': mgpsMjysr[mgpsMjysr > 0].sum(), 'total20': mgpsMjysr.sum(), }