def extract_class(): """ Set up an Extract class and load a particular shotid """ e = Extract() e.load_shot("20190201012", survey="hdr2.1") return e
def fit_ellipse_for_source( friendid=None, detectid=None, coords=None, shotid=None, subcont=True, convolve_image=False, pixscale=pixscale, imsize=imsize, wave_range=None, ): if detectid is not None: global deth5 detectid_obj = detectid if detectid_obj <= 2190000000: det_info = deth5.root.Detections.read_where("detectid == detectid_obj")[0] linewidth = det_info["linewidth"] wave_obj = det_info["wave"] redshift = wave_obj / (1216) - 1 else: det_info = conth5.root.Detections.read_where("detectid == detectid_obj")[0] redshift = 0 wave_obj = 4500 coords_obj = SkyCoord(det_info["ra"], det_info["dec"], unit="deg") shotid_obj = det_info["shotid"] fwhm = surveyh5.root.Survey.read_where("shotid == shotid_obj")["fwhm_virus"][0] amp = det_info["multiframe"] if wave_range is not None: wave_range_obj = wave_range else: if detectid_obj <= 2190000000: wave_range_obj = [wave_obj - 2 * linewidth, wave_obj + 2 * linewidth] else: wave_range_obj = [4100, 4200] if coords is not None: coords_obj = coords if shotid is not None: shotid_obj = shotid fwhm = surveyh5.root.Survey.read_where("shotid == shotid_obj")[ "fwhm_virus" ][0] try: hdu = make_narrowband_image( coords=coords_obj, shotid=shotid_obj, wave_range=wave_range_obj, imsize=imsize * u.arcsec, pixscale=pixscale * u.arcsec, subcont=subcont, convolve_image=convolve_image, include_error=True, ) except: print("Could not make narrowband image for {}".format(detectid)) return np.nan, np.nan elif friendid is not None: global friend_cat sel = friend_cat["friendid"] == friendid group = friend_cat[sel] coords_obj = SkyCoord(ra=group["icx"][0] * u.deg, dec=group["icy"][0] * u.deg) wave_obj = group["icz"][0] redshift = wave_obj / (1216) - 1 linewidth = group["linewidth"][0] shotid_obj = group["shotid"][0] fwhm = group["fwhm"][0] amp = group["multiframe"][0] if wave_range is not None: wave_range_obj = wave_range else: wave_range_obj = [wave_obj - 2 * linewidth, wave_obj + 2 * linewidth] if shotid is not None: shotid_obj = shotid fwhm = surveyh5.root.Survey.read_where("shotid == shotid_obj")[ "fwhm_virus" ][0] try: hdu = make_narrowband_image( coords=coords_obj, shotid=shotid_obj, wave_range=wave_range_obj, imsize=imsize * u.arcsec, pixscale=pixscale * u.arcsec, subcont=subcont, convolve_image=convolve_image, include_error=True, ) except: print("Could not make narrowband image for {}".format(friendid)) return None elif coords is not None: coords_obj = coords if wave_range is not None: wave_range_obj = wave_range else: print( "You need to supply wave_range=[wave_start, wave_end] for collapsed image" ) if shotid is not None: shotid_obj = shotid fwhm = surveyh5.root.Survey.read_where("shotid == shotid_obj")[ "fwhm_virus" ][0] else: print("Enter the shotid to use (eg. 20200123003)") hdu = make_narrowband_image( coords=coords_obj, shotid=shotid_obj, wave_range=wave_range_obj, imsize=imsize * u.arcsec, pixscale=pixscale * u.arcsec, subcont=subcont, convolve_image=convolve_image, include_error=True, ) else: print("You must provide a detectid, friendid or coords/wave_range/shotid") return np.nan, np.nan w = wcs.WCS(hdu[0].header) if friendid is not None: sel_friend_group = friend_cat["friendid"] == friendid group = friend_cat[sel_friend_group] eps = 1 - group["a2"][0] / group["b2"][0] pa = group["pa"][0] * np.pi / 180.0 - 90 sma = group["a"][0] * 3600 / pixscale coords = SkyCoord(ra=group["icx"][0] * u.deg, dec=group["icy"][0] * u.deg) wave_obj = group["icz"][0] redshift = wave_obj / (1216) - 1 linewidth = np.nanmedian(group["linewidth"]) shotid_obj = group["shotid"][0] fwhm = group["fwhm"][0] geometry = EllipseGeometry( x0=w.wcs.crpix[0], y0=w.wcs.crpix[0], sma=sma, eps=eps, pa=pa ) else: geometry = EllipseGeometry( x0=w.wcs.crpix[0], y0=w.wcs.crpix[0], sma=20, eps=0.2, pa=20.0 ) geometry = EllipseGeometry( x0=w.wcs.crpix[0], y0=w.wcs.crpix[0], sma=20, eps=0.2, pa=20.0 ) # geometry.find_center(hdu.data) # aper = EllipticalAperture((geometry.x0, geometry.y0), geometry.sma, # geometry.sma*(1 - geometry.eps), geometry.pa) # plt.imshow(hdu.data, origin='lower') # aper.plot(color='white') ellipse = Ellipse(hdu[0].data) isolist = ellipse.fit_image() iso_tab = isolist.to_table() if len(iso_tab) == 0: geometry.find_center(hdu[0].data, verbose=False, threshold=0.5) ellipse = Ellipse(hdu[0].data, geometry) isolist = ellipse.fit_image() iso_tab = isolist.to_table() if len(iso_tab) == 0: return np.nan, np.nan, np.nan try: # compute iso's manually in steps of 3 pixels ellipse = Ellipse(hdu[0].data) # reset ellipse iso_list = [] for sma in np.arange(1, 60, 2): iso = ellipse.fit_isophote(sma) if np.isnan(iso.intens): # print('break at {}'.format(sma)) break else: iso_list.append(iso) isolist = IsophoteList(iso_list) iso_tab = isolist.to_table() except: return np.nan, np.nan, np.nan try: model_image = build_ellipse_model(hdu[0].data.shape, isolist) residual = hdu[0].data - model_image except: return np.nan, np.nan, np.nan sma = iso_tab["sma"] * pixscale const_arcsec_to_kpc = cosmo.kpc_proper_per_arcmin(redshift).value / 60.0 def arcsec_to_kpc(sma): dist = const_arcsec_to_kpc * sma return dist def kpc_to_arcsec(dist): sma = dist / const_arcsec_to_kpc return sma dist_kpc = ( sma * u.arcsec.to(u.arcmin) * u.arcmin * cosmo.kpc_proper_per_arcmin(redshift) ) dist_arcsec = kpc_to_arcsec(dist_kpc) # print(shotid_obj, fwhm) # s_exp1d = models.Exponential1D(amplitude=0.2, tau=-50) alpha = 3.5 s_moffat = models.Moffat1D( amplitude=1, gamma=(0.5 * fwhm) / np.sqrt(2 ** (1.0 / alpha) - 1.0), x_0=0.0, alpha=alpha, fixed={"amplitude": False, "x_0": True, "gamma": True, "alpha": True}, ) s_init = models.Exponential1D(amplitude=0.2, tau=-50) fit = fitting.LevMarLSQFitter() s_r = fit(s_init, dist_kpc, iso_tab["intens"]) # Fitting can be done using the uncertainties as weights. # To get the standard weighting of 1/unc^2 for the case of # Gaussian errors, the weights to pass to the fitting are 1/unc. # fitted_line = fit(line_init, x, y, weights=1.0/yunc) # s_r = fit(s_init, dist_kpc, iso_tab['intens'])#, weights=iso_tab['intens']/iso_tab['intens_err'] ) print(s_r) try: r_n = -1.0 * s_r.tau # _0 #* const_arcsec_to_kpc except: r_n = np.nan # r_n = -1. * s_r.tau_0 try: sel_iso = np.where(dist_kpc >= 2 * r_n)[0][0] except: sel_iso = -1 aper = EllipticalAperture( (isolist.x0[sel_iso], isolist.y0[sel_iso]), isolist.sma[sel_iso], isolist.sma[sel_iso] * (1 - isolist.eps[sel_iso]), isolist.pa[sel_iso], ) phottable = aperture_photometry(hdu[0].data, aper, error=hdu[1].data) flux = phottable["aperture_sum"][0] * 10 ** -17 * u.erg / (u.cm ** 2 * u.s) flux_err = phottable["aperture_sum_err"][0] * 10 ** -17 * u.erg / (u.cm ** 2 * u.s) lum_dist = cosmo.luminosity_distance(redshift).to(u.cm) lum = flux * 4.0 * np.pi * lum_dist ** 2 lum_err = flux_err * 4.0 * np.pi * lum_dist ** 2 if detectid: name = detectid elif friendid: name = friendid # Get Image data from Elixer catlib = catalogs.CatalogLibrary() try: cutout = catlib.get_cutouts( position=coords_obj, side=imsize, aperture=None, dynamic=False, filter=["r", "g", "f606W"], first=True, allow_bad_image=False, allow_web=True, )[0] except: print("Could not get imaging for " + str(name)) zscale = ZScaleInterval(contrast=0.5, krej=1.5) vmin, vmax = zscale.get_limits(values=hdu[0].data) fig = plt.figure(figsize=(20, 12)) fig.suptitle( "{} ra={:3.2f}, dec={:3.2f}, wave={:5.2f}, z={:3.2f}, mf={}".format( name, coords_obj.ra.value, coords_obj.dec.value, wave_obj, redshift, amp ), fontsize=22, ) ax1 = fig.add_subplot(231, projection=w) plt.imshow(hdu[0].data, vmin=vmin, vmax=vmax) plt.xlabel("RA") plt.ylabel("Dec") plt.colorbar() plt.title("Image summed across 4*linewidth") ax2 = fig.add_subplot(232, projection=w) plt.imshow(model_image, vmin=vmin, vmax=vmax) plt.xlabel("RA") plt.ylabel("Dec") plt.colorbar() plt.title("model") ax3 = fig.add_subplot(233, projection=w) plt.imshow(residual, vmin=vmin, vmax=vmax) plt.xlabel("RA") plt.ylabel("Dec") plt.colorbar() plt.title("residuals (image-model)") # fig = plt.figure(figsize=(10,5)) im_zscale = ZScaleInterval(contrast=0.5, krej=2.5) im_vmin, im_vmax = im_zscale.get_limits(values=cutout["cutout"].data) ax4 = fig.add_subplot(234, projection=cutout["cutout"].wcs) plt.imshow( cutout["cutout"].data, vmin=im_vmin, vmax=im_vmax, origin="lower", cmap=plt.get_cmap("gray"), interpolation="none", ) plt.text( 0.8, 0.9, cutout["instrument"] + cutout["filter"], transform=ax4.transAxes, fontsize=20, color="w", ) plt.contour(hdu[0].data, transform=ax4.get_transform(w)) plt.xlabel("RA") plt.ylabel("Dec") aper.plot( color="white", linestyle="dashed", linewidth=2, transform=ax4.get_transform(w) ) ax5 = fig.add_subplot(235) plt.errorbar( dist_kpc.value, iso_tab["intens"], yerr=iso_tab["intens_err"] * iso_tab["intens"], linestyle="none", marker="o", label="Lya SB profile", ) plt.plot(dist_kpc, s_r(dist_kpc), color="r", label="Lya exp SB model", linewidth=2) plt.xlabel("Semi-major axis (kpc)") # plt.xlabel('Semi-major axis (arcsec)') plt.ylabel("Flux ({})".format(10 ** -17 * (u.erg / (u.s * u.cm ** 2)))) plt.text(0.4, 0.7, "r_n={:3.2f}".format(r_n), transform=ax5.transAxes, fontsize=16) plt.text( 0.4, 0.6, "L_lya={:3.2e}".format(lum), transform=ax5.transAxes, fontsize=16 ) secax = ax5.secondary_xaxis("top", functions=(kpc_to_arcsec, kpc_to_arcsec)) secax.set_xlabel("Semi-major axis (arcsec)") # secax.set_xlabel('Semi-major axis (kpc)') plt.xlim(0, 100) # plt.plot(sma, s_r(sma), label='moffat psf') # plt.plot(dist_kpc.value, s1(kpc_to_arcsec(dist_kpc.value)), # linestyle='dashed', linewidth=2, # color='green', label='PSF seeing:{:3.2f}'.format(fwhm)) # These two are the exact same # s1 = models.Moffat1D() # s1.amplitude = iso_tab['intens'][0] # alpha=3.5 # s1.gamma = 0.5*(fwhm*const_arcsec_to_kpc)/ np.sqrt(2 ** (1.0 / alpha) - 1.0) # s1.alpha = alpha # plt.plot(r_1d, moffat_1d, color='orange') # plt.plot(dist_kpc.value, (s1(dist_kpc.value)), # linestyle='dashed', linewidth=2, # color='blue', label='PSF seeing:{:3.2f}'.format(fwhm)) E = Extract() E.load_shot(shotid_obj) moffat_psf = E.moffat_psf(seeing=fwhm, boxsize=imsize, scale=pixscale) moffat_shape = np.shape(moffat_psf) xcen = int(moffat_shape[1] / 2) ycen = int(moffat_shape[2] / 2) moffat_1d = ( moffat_psf[0, xcen:-1, ycen] / moffat_psf[0, xcen, ycen] * iso_tab["intens"][0] ) r_1d = moffat_psf[1, xcen:-1, ycen] E.close() plt.plot( arcsec_to_kpc(pixscale * np.arange(80)), iso_tab["intens"][0] * (moffat_psf[0, 80:-1, 80] / moffat_psf[0, 80, 80]), linestyle="dashed", color="green", label="PSF seeing:{:3.2f}".format(fwhm), ) plt.legend() if friendid is not None: ax6 = fig.add_subplot(236, projection=cutout["cutout"].wcs) plot_friends(friendid, friend_cat, cutout, ax=ax6, label=False) plt.savefig("fit2d_{}.png".format(name)) # filename = 'param_{}.txt'.format(name) # np.savetxt(filename, (r_n.value, lum.value)) return r_n, lum, lum_err
def make_data_cube( detectid=None, coords=None, shotid=None, pixscale=0.25 * u.arcsec, imsize=30.0 * u.arcsec, wave_range=[3470, 5540], dwave=2.0, dcont=50.0, convolve_image=True, ffsky=True, subcont=False, ): """ Function to make a datacube from either a detectid or from a coordinate/shotid combination. Paramaters ---------- detectid: int detectid from the continuum or lines catalog. Default is None. Provide a coords/shotid combo if this isn't given coords: SkyCoords object coordinates to define the centre of the data cube pixscale: astropy angle quantity plate scale imsize: astropy angle quantity spatial length of cube (equal dims is only option) wave_range: list start and stop value for the wavelength range in Angstrom dwave step in wavelength range in Angstrom convolve_image: bool option to convolve image with shotid seeing ffsky: bool option to use full frame calibrated fibers. Default is True. subcont: bool option to subtract continuum. Default is False. This will measure the continuum 50AA below and above the input wave_range dcont width in angstrom to measure the continuum. Default is to measure 50 AA wide regions on either side of the line Returns ------- hdu: PrimaryHDU object the data cube 3D array and associated 3d header Units are '10^-17 erg cm-2 s-1 per spaxel' Examples -------- Can either pass in a detectid: >>> detectid_obj=2101602788 >>> hdu = make_data_cube( detectid=detectid_obj) >>> hdu.writeto( str(detectid_obj) + '.fits', overwrite=True) or can put in an SkyCoord object: >>> star_coords = SkyCoord(9.625181, -0.043587, unit='deg') >>> hdu = make_data_cube( coords=star_coords[0], shotid=20171016108, dwave=2.0) >>> hdu.writeto( 'star.fits', overwrite=True) """ global config, detecth5, surveyh5 if detectid is not None: detectid_obj = detectid det_info = detecth5.root.Detections.read_where( 'detectid == detectid_obj')[0] shotid = det_info["shotid"] coords = SkyCoord(det_info["ra"], det_info["dec"], unit="deg") if coords is None or shotid is None: print("Provide a detectid or both a coords and shotid") E = Extract() E.load_shot(shotid, fibers=False) # get spatial dims: ndim = int(imsize / pixscale) center = int(ndim / 2) # get wave dims: nwave = int((wave_range[1] - wave_range[0]) / dwave + 1) w = wcs.WCS(naxis=3) w.wcs.crval = [coords.ra.deg, coords.dec.deg, wave_range[0]] w.wcs.crpix = [center, center, 1] w.wcs.ctype = ["RA---TAN", "DEC--TAN", "WAVE"] w.wcs.cdelt = [-pixscale.to(u.deg).value, pixscale.to(u.deg).value, dwave] rad = imsize.to(u.arcsec).value info_result = E.get_fiberinfo_for_coord(coords, radius=rad, ffsky=False) ifux, ifuy, xc, yc, ra, dec, data, error, mask = info_result # get ifu center: ifux_cen, ifuy_cen = E.convert_radec_to_ifux_ifuy(ifux, ifuy, ra, dec, coords.ra.deg, coords.dec.deg) # get FWHM and PA surveyh5 = tb.open_file(config.surveyh5, "r") shotid_obj = shotid pa = surveyh5.root.Survey.read_where("shotid == shotid_obj")["pa"][0] if convolve_image: fwhm = surveyh5.root.Survey.read_where( "shotid == shotid_obj")["fwhm_virus"][0] else: fwhm = 1.8 # just a dummy variable as convolve_image=False surveyh5.close() # add in rotation sys_rot = 1.55 rot = 360. - (90. + pa + sys_rot) w.wcs.crota = [0, rot, 0] # rrot = np.deg2rad(rot) # w.wcs.pc = [[np.cos(rrot), # np.sin(rrot),0], # [-1.0*np.sin(rrot), # np.cos(rrot),0], [0,0,0]] im_cube = np.zeros((nwave, ndim, ndim)) wave_i = wave_range[0] i = 0 while wave_i <= wave_range[1]: try: im_src = E.make_narrowband_image( ifux_cen, ifuy_cen, ifux, ifuy, data, mask, scale=pixscale.to(u.arcsec).value, wrange=[wave_i, wave_i + dwave], nchunks=1, seeing_fac=fwhm, convolve_image=convolve_image, boxsize=imsize.to(u.arcsec).value, ) im_slice = im_src[0] if subcont: zarray_blue = E.make_narrowband_image( ifux_cen, ifuy_cen, ifux, ifuy, data, mask, seeing_fac=fwhm, scale=pixscale.to(u.arcsec).value, boxsize=imsize.to(u.arcsec).value, nchunks=2, wrange=[wave_i - dcont, wave_i], convolve_image=convolve_image, ) zarray_red = E.make_narrowband_image( ifux_cen, ifuy_cen, ifux, ifuy, data, mask, seeing_fac=fwhm, nchunks=2, scale=pixscale.to(u.arcsec).value, boxsize=imsize.to(u.arcsec).value, wrange=[wave_i + dwave, wave_i + dwave + dcont], convolve_image=convolve_image, ) im_cont = (zarray_blue[0] + zarray_red[0]) / (2 * dcont) im_slice = im_src[0] - dwave * im_cont im_cube[i, :, :] = im_slice except Exception: im_cube[i, :, :] = np.zeros((ndim, ndim)) wave_i += dwave i += 1 hdu = fits.PrimaryHDU(im_cube, header=w.to_header()) E.close() return hdu
def make_narrowband_image( detectid=None, coords=None, shotid=None, pixscale=0.25 * u.arcsec, imsize=30.0 * u.arcsec, wave_range=None, convolve_image=True, ffsky=True, subcont=False, dcont=50., include_error=False, ): """ Function to make narrowband image from either a detectid or from a coordinate/shotid combination. Paramaters ---------- detectid: int detectid from the continuum or lines catalog. Default is None. Provide a coords/shotid combo if this isn't given coords: SkyCoords object coordinates to define the centre of the data cube pixscale: astropy angle quantity plate scale imsize: astropy angle quantity image size wave_range: list or None start and stop value for the wavelength range in Angstrom. If not given, the detectid linewidth is used convolve_image: bool option to convolve image with shotid seeing ffsky: bool option to use full frame calibrated fibers. Default is True. subcont: bool option to subtract continuum. Default is False. This will measure the continuum 50AA below and above the input wave_range dcont width in angstrom to measure the continuum. Default is to measure 50 AA wide regions on either side of the line include_error bool option to include error array Returns ------- hdu: PrimaryHDU object the 2D summed data array and associated 2d header Units are '10^-17 erg cm-2 s-1' If include_error=True will include addiional hdu Examples -------- For a specific detectid: >>> hdu = make_narrowband_image(detectid=2101046271) For a SkyCoords object. You must provide shotid and wavelength range >>> coords = SkyCoord(188.79312, 50.855747, unit='deg') >>> wave_obj = 4235.84 #in Angstrom >>> hdu = make_narrowband_image(coords=coords, shotid=20190524021, wave_range=[wave_obj-10, wave_obj+10]) """ global config, detecth5, surveyh5 if detectid is not None: detectid_obj = detectid det_info = detecth5.root.Detections.read_where( 'detectid == detectid_obj')[0] shotid_obj = det_info["shotid"] wave_obj = det_info["wave"] linewidth = det_info["linewidth"] wave_range = [wave_obj - 2.0 * linewidth, wave_obj + 2.0 * linewidth] coords = SkyCoord(det_info["ra"], det_info["dec"], unit="deg") elif coords is not None: if shotid is not None: shotid_obj = shotid else: print("Provide a shotid") if wave_range is None: print("Provide a wavelength range to collapse. \ Example wave_range=[4500,4540]") else: print("Provide a detectid or both a coords and shotid") fwhm = surveyh5.root.Survey.read_where( "shotid == shotid_obj")["fwhm_virus"][0] pa = surveyh5.root.Survey.read_where("shotid == shotid_obj")["pa"][0] E = Extract() E.load_shot(shotid_obj, fibers=False) # get spatial dims: ndim = int(imsize / pixscale) center = int(ndim / 2) rad = imsize.to(u.arcsec).value # convert to arcsec value, not quantity info_result = E.get_fiberinfo_for_coord(coords, radius=rad, ffsky=ffsky) ifux, ifuy, xc, yc, ra, dec, data, error, mask = info_result # get ifu center: ifux_cen, ifuy_cen = E.convert_radec_to_ifux_ifuy(ifux, ifuy, ra, dec, coords.ra.deg, coords.dec.deg) if include_error: zarray = E.make_narrowband_image( ifux_cen, ifuy_cen, ifux, ifuy, data, mask, error=error, seeing_fac=fwhm, scale=pixscale.to(u.arcsec).value, boxsize=imsize.to(u.arcsec).value, wrange=wave_range, convolve_image=convolve_image, ) imslice = zarray[0] imerror = zarray[1] else: zarray = E.make_narrowband_image( ifux_cen, ifuy_cen, ifux, ifuy, data, mask, seeing_fac=fwhm, scale=pixscale.to(u.arcsec).value, boxsize=imsize.to(u.arcsec).value, wrange=wave_range, convolve_image=convolve_image, ) imslice = zarray[0] if subcont: zarray_blue = E.make_narrowband_image( ifux_cen, ifuy_cen, ifux, ifuy, data, mask, seeing_fac=fwhm, scale=pixscale.to(u.arcsec).value, boxsize=imsize.to(u.arcsec).value, wrange=[wave_range[0] - dcont - 10, wave_range[0] - 10], convolve_image=convolve_image, ) zarray_red = E.make_narrowband_image( ifux_cen, ifuy_cen, ifux, ifuy, data, mask, seeing_fac=fwhm, scale=pixscale.to(u.arcsec).value, boxsize=imsize.to(u.arcsec).value, wrange=[wave_range[1] + 10, wave_range[1] + dcont + 10], convolve_image=convolve_image, ) dwave = wave_range[1] - wave_range[0] im_cont = (zarray_blue[0] + zarray_red[0]) / (2 * dcont) imslice = zarray[0] - dwave * im_cont w = wcs.WCS(naxis=2) imsize = imsize.to(u.arcsec).value w.wcs.crval = [coords.ra.deg, coords.dec.deg] w.wcs.crpix = [center, center] w.wcs.ctype = ["RA---TAN", "DEC--TAN"] w.wcs.cdelt = [-pixscale.to(u.deg).value, pixscale.to(u.deg).value] # get rotation: sys_rot = 1.55 rot = 360. - (90. + pa + sys_rot) rrot = np.deg2rad(rot) # w.wcs.crota = [ 0, rot] w.wcs.pc = [[np.cos(rrot), np.sin(rrot)], [-1.0 * np.sin(rrot), np.cos(rrot)]] hdu = fits.PrimaryHDU(imslice, header=w.to_header()) E.close() if include_error: hdu_error = fits.ImageHDU(imerror, header=w.to_header()) hdu_x = fits.ImageHDU(zarray[2], header=w.to_header()) hdu_y = fits.ImageHDU(zarray[3], header=w.to_header()) return fits.HDUList([hdu, hdu_error, hdu_x, hdu_y]) else: return hdu
def get_source_spectra_mp(source_dict, shotid, manager, args): E = Extract() FibIndex = FiberIndex(args.survey) if args.survey == "hdr1": source_num_switch = 20 else: source_num_switch = 0 if len(args.matched_sources[shotid]) > 0: args.log.info("Working on shot: %s" % shotid) if args.survey == "hdr1": fwhm = args.survey_class.fwhm_moffat[args.survey_class.shotid == shotid][0] else: fwhm = args.survey_class.fwhm_virus[args.survey_class.shotid == shotid][0] moffat = E.moffat_psf(fwhm, 10.5, 0.25) if len(args.matched_sources[shotid]) > source_num_switch: E.load_shot(shotid, fibers=True, survey=args.survey) else: E.load_shot(shotid, fibers=False, survey=args.survey) for ind in args.matched_sources[shotid]: try: info_result = E.get_fiberinfo_for_coord( args.coords[ind], radius=args.rad, ffsky=args.ffsky, return_fiber_info=True, ) except TypeError: info_result = E.get_fiberinfo_for_coord( args.coords, radius=args.rad, ffsky=args.ffsky, return_fiber_info=True, ) if info_result is not None: if np.size(args.ID) > 1: args.log.info("Extracting %s" % args.ID[ind]) else: args.log.info("Extracting %s" % args.ID) ifux, ifuy, xc, yc, ra, dec, data, error, mask, fiberid, \ multiframe = info_result weights = E.build_weights(xc, yc, ifux, ifuy, moffat) # added by EMC 20210609 norm = np.sum(weights, axis=0) weights = weights / norm[np.newaxis, :] result = E.get_spectrum(data, error, mask, weights, remove_low_weights=False) spectrum_aper, spectrum_aper_error = [res for res in result] # apply aperture correction spectrum_aper /= norm spectrum_aper_error /= norm weights *= norm[np.newaxis, :] #add in the total weight of each fiber (as the sum of its weight per wavebin) if args.fiberweights: try: fiber_weights = np.array([ x for x in zip(ra, dec, np.sum(weights * mask, axis=1)) ]) except: fiber_weights = [] else: fiber_weights = [] # get fiber info no matter what so we can flag try: fiber_info = np.array([ x for x in zip(fiberid, multiframe, ra, dec, np.sum(weights * mask, axis=1)) ]) except: args.log.warning( 'Could not get fiber info, no flagging created') fiber_info = [] if len(fiber_info) > 0: try: flags = FibIndex.get_fiber_flags( coord=args.coords[ind], shotid=shotid) except: flags = FibIndex.get_fiber_flags(coord=args.coords, shotid=shotid) else: flags = None if np.size(args.ID) > 1: if args.ID[ind] in source_dict: source_dict[args.ID[ind]][shotid] = [ spectrum_aper, spectrum_aper_error, weights.sum(axis=0), fiber_weights, fiber_info, flags, ] else: source_dict[args.ID[ind]] = manager.dict() source_dict[args.ID[ind]][shotid] = [ spectrum_aper, spectrum_aper_error, weights.sum(axis=0), fiber_weights, fiber_info, flags, ] else: if args.ID in source_dict: source_dict[args.ID][shotid] = [ spectrum_aper, spectrum_aper_error, weights.sum(axis=0), fiber_weights, fiber_info, flags, ] else: source_dict[args.ID] = manager.dict() source_dict[args.ID][shotid] = [ spectrum_aper, spectrum_aper_error, weights.sum(axis=0), fiber_weights, fiber_info, flags, ] E.shoth5.close() FibIndex.close() return source_dict
def get_flim_sig_erin(detectid=None, coord=None, wave=None, datevobs=None, shotid=None): """ Script to grab flim_1sigma and sn_sig on demand from calibrated fiber extractions Parameters ---------- detectid int detectid for source coord astropy SkyCoord object wave central wavelength shotid observation ID Returns ------- flim_1sigma the 1 sigma sensitivity calculated over 7 pixels of the PSF-weighted extracted spectra """ detectid_obj = detectid det_info = source_table[source_table['detectid'] == detectid][0] shotid = det_info['shotid'] wave = det_info['wave'] coord = SkyCoord(ra=det_info['ra'], dec=det_info['dec'], unit='deg') fwhm = det_info['fwhm'] if datevobs is None: datevobs = '{}v{}'.format( str(shotid)[0:8], str(shotid)[8:]) if shotid is None: shotid_obj = int(datevobs[0:8] + datevobs[9:]) else: shotid_obj = shotid try: E = Extract() E.load_shot(datevobs, fibers=False) info_result = E.get_fiberinfo_for_coord(coord, radius=3.5, fiber_lower_limit=2 ) ifux, ifuy, xc, yc, ra, dec, data, error, mask = info_result #print(len(ifux)) moffat = E.moffat_psf(fwhm, 10.5, 0.25) I = None fac = None weights, I, fac = E.build_weights(xc, yc, ifux, ifuy, moffat, I=I, fac=fac, return_I_fac = True) norm = np.sum(weights, axis=0) weights = weights/norm result = E.get_spectrum(data, error, mask, weights, remove_low_weights=False) spec, spec_err = [res for res in result] w_index = np.where(E.wave >= wave)[0][0] nfib = np.shape(weights)[0] flim_1sigma = 2 * np.sqrt( np.nansum( (spec_err[w_index-3:w_index+4])**2)) sn_sig = 2 * np.sum( spec[w_index-3:w_index+4]) / flim_1sigma npix = np.sum(np.isfinite(spec[w_index-3:w_index+4])) apcor = np.sum( norm[w_index-3:w_index+4])/len( norm[w_index-3:w_index+4]) E.close() # XXX divide by apcor .... return flim_1sigma/apcor, apcor except: return 999
select.ygrid.grid_line_color = None range_tool = RangeTool(x_range=plot.x_range) range_tool.overlay.fill_color = "navy" range_tool.overlay.fill_alpha = 0.2 select.add_tools(range_tool) select.toolbar.active_multi = range_tool imageplot.image(image=[image[0]], x=image[1].min(), y=image[2].min(), dw=image[1].max()-image[1].min(), dh=image[2].max()-image[2].min()) imageplot.scatter(sx, sy, marker='circle_x', size=15, line_color="orange", fill_color="red", alpha=0.75) output_file(name+".html", title=name) save(row(column(plot, select), imageplot)) # Initiate class E = Extract() # Load a given shot E.load_shot(sys.argv[1]) RA = E.fibers.hdfile.root.Shot.cols.ra[:][0] Dec = E.fibers.hdfile.root.Shot.cols.dec[:][0] # Get SDSS spectra in the field # pip install --user astroquery from astroquery.sdss import SDSS from astropy import coordinates as coords pos = coords.SkyCoord(RA * u.deg, Dec * u.deg, frame='fk5') xid = SDSS.query_region(pos, radius=11*u.arcmin, spectro=True, photoobj_fields=['ra', 'dec', 'u', 'g', 'r', 'i', 'z'], specobj_fields=['plate', 'mjd', 'fiberID', 'z', 'specobjid', 'run2d', 'instrument'])
def get_source_spectra(shotid, args): E = Extract() source_dict = {} if args.survey == "hdr1": source_num_switch = 20 else: source_num_switch = 0 if len(args.matched_sources[shotid]) > 0: args.log.info("Working on shot: %s" % shotid) if args.survey == "hdr1": fwhm = args.survey_class.fwhm_moffat[args.survey_class.shotid == shotid][0] else: fwhm = args.survey_class.fwhm_virus[args.survey_class.shotid == shotid][0] moffat = E.moffat_psf(fwhm, 10.5, 0.25) if len(args.matched_sources[shotid]) > source_num_switch: E.load_shot(shotid, fibers=True, survey=args.survey) else: E.load_shot(shotid, fibers=False, survey=args.survey) for ind in args.matched_sources[shotid]: try: info_result = E.get_fiberinfo_for_coord( args.coords[ind], radius=args.rad, ffsky=args.ffsky, return_fiber_info=True, ) except TypeError: info_result = E.get_fiberinfo_for_coord( args.coords, radius=args.rad, ffsky=args.ffsky, return_fiber_info=True, ) if info_result is not None: try: args.log.info("Extracting %s" % args.ID[ind]) except: args.log.info("Extracting %s" % args.ID) ifux, ifuy, xc, yc, ra, dec, data, error, mask, fiberid, \ multiframe = info_result weights = E.build_weights(xc, yc, ifux, ifuy, moffat) result = E.get_spectrum(data, error, mask, weights) spectrum_aper, spectrum_aper_error = [res for res in result] #add in the total weight of each fiber (as the sum of its weight per wavebin) if args.fiberweights: try: fiber_weights = np.array( [x for x in zip(ra, dec, np.sum(weights*mask, axis=1))]) except: fiber_weights = [] else: fiber_weights = [] # get fiber info no matter what so we can flag try: fiber_info = np.array( [ x for x in zip(fiberid, multiframe, ra, dec, np.sum(weights*mask, axis=1))]) except: fiber_info = [] if len(fiber_info) > 0: flags = get_flags(fiber_info) else: flags = None if np.size(args.ID) > 1: if args.ID[ind] in source_dict: source_dict[args.ID[ind]][shotid] = [ spectrum_aper, spectrum_aper_error, weights.sum(axis=0), fiber_weights, fiber_info, flags, ] else: source_dict[args.ID[ind]] = dict() source_dict[args.ID[ind]][shotid] = [ spectrum_aper, spectrum_aper_error, weights.sum(axis=0), fiber_weights, fiber_info, flags, ] else: if args.ID in source_dict: source_dict[args.ID][shotid] = [ spectrum_aper, spectrum_aper_error, weights.sum(axis=0), fiber_weights, fiber_info, flags, ] else: source_dict[args.ID] = dict() source_dict[args.ID][shotid] = [ spectrum_aper, spectrum_aper_error, weights.sum(axis=0), fiber_weights, fiber_info, flags, ] E.shoth5.close() return source_dict
from hetdex_api.extract import Extract import argparse import sys from astropy.io import fits parser = argparse.ArgumentParser() parser.add_argument("-s", "--shot", type=str, help="Shotid.") args = parser.parse_args(sys.argv[1:]) shot = args.shot E = Extract() E.load_shot(shot, survey="hdr2") gmag_limit = 22. radius = 50. psf = E.model_psf(gmag_limit=gmag_limit, radius=radius) hdr = fits.Header() hdr["SHOT"] = shot hdr["GMAG_LIMIT"] = gmag_limit hdr["RADIUS"] = radius hdu = fits.PrimaryHDU(psf[0], header=hdr) hdu_x, hdu_y = fits.ImageHDU(psf[1]), fits.ImageHDU(psf[2]) hdul = fits.HDUList([hdu, hdu_x, hdu_y]) hdul.writeto("/data/05865/maja_n/im2d/psf_hdr2/" + shot + ".fits", overwrite=True) print("Wrote to /data/05865/maja_n/im2d/psf_hdr2/" + shot + ".fits")
class ShotSensitivity(object): """ Generate completeness estimates for a shot on the fly, using the Extract class. This is written to be as backward compatible as possible with the user interface of hdf5_sensitivity_cubes:SensitivityCubeHDF5Container A lot of this is adapted from `hetdex_tools/get_spec.py` `hetdex_api/extract.py` and scripts written by Erin Mentuch Cooper. The aperture correction stuff was taken from 'Remedy' by Greg Zeimann: grzeimann Remedy Parameters ---------- datevshot : str the name of the shot in the form YYYYMMDDvSSS release : str (Optional) The name of the release e.g. hdr2.1 defaults to latest flim_model : str The flux limit model to convert the noise to completeness, defaults to the latest (which might not be compatible with the release, see README) rad : float A radius in arcseconds to grab fibers from when computing the flux limit, default 3.5 ffsky : boolean Full frame sky subtraction (default: False) wavenpix : int Number of wave pixels either side of the pixel the source is on to add in quadrature, or sets the size of tophat convolution when producing data cubes as 2*wavenpix + 1 (default 3) d25scale : float Sets the multiplier for the galaxy masks applied (default 3.0) sclean_bad : bool Replace bad data using the sclean tool (see hetdex_api.extract:Extract) verbose : bool Print information about the flux limit model to the screen """ def __init__(self, datevshot, release=None, flim_model=None, rad=3.5, ffsky=False, wavenpix=3, d25scale=3.0, verbose=False, sclean_bad=True, log_level="WARNING"): self.conf = HDRconfig() self.extractor = Extract() self.shotid = int(datevshot.replace("v", "")) self.date = datevshot[:8] self.rad = rad self.ffsky = ffsky self.wavenpix = wavenpix self.sclean_bad = sclean_bad logger = logging.getLogger(name="ShotSensitivity") logger.setLevel(log_level) if verbose: raise DeprecationWarning( "Using verbose is deprecated, set log_level instead") logger.setLevel("DEBUG") logger.info("shotid: {:d}".format(self.shotid)) if not release: self.release = self.conf.LATEST_HDR_NAME else: self.release = release logger.info("Data release: {:s}".format(self.release)) self.survey = Survey(survey=self.release) # Set up flux limit model self.f50_from_noise, self.sinterp, interp_sigmas \ = return_flux_limit_model(flim_model, cache_sim_interp=False, verbose=verbose) # Generate astrometry for this shot survey_sel = (self.survey.shotid == self.shotid) self.shot_pa = self.survey.pa[survey_sel][0] self.shot_ra = self.survey.ra[survey_sel][0] self.shot_dec = self.survey.dec[survey_sel][0] rot = 360.0 - (self.shot_pa + 90.) self.tp = TangentPlane(self.shot_ra, self.shot_dec, rot) #Set up masking logger.info("Using d25scale {:f}".format(d25scale)) self.setup_mask(d25scale) # Set up spectral extraction if release == "hdr1": fwhm = self.survey.fwhm_moffat[survey_sel][0] else: fwhm = self.survey.fwhm_virus[survey_sel][0] logger.info("Using Moffat PSF with FWHM {:f}".format(fwhm)) self.moffat = self.extractor.moffat_psf(fwhm, 3. * rad, 0.25) self.extractor.load_shot(self.shotid, fibers=True, survey=self.release) # Set up the focal plane astrometry fplane_table = self.extractor.shoth5.root.Astrometry.fplane # Bit of a hack to avoid changing pyhetdex with NamedTemporaryFile(mode='w') as tpf: for row in fplane_table.iterrows(): tpf.write( "{:03d} {:8.5f} {:8.5f} {:03d} {:03d} {:03d} {:8.5f} {:8.5f}\n" .format(row['ifuslot'], row['fpx'], row['fpy'], row['specid'], row['specslot'], row['ifuid'], row['ifurot'], row['platesc'])) tpf.seek(0) self.fplane = FPlane(tpf.name) def setup_mask(self, d25scale): """ Setup the masking, to speed up checking if sources are in the mask later. This is run at initialisation, so you need only run again to change `d25scale` Parameters ---------- d25scale : float Sets the multiplier for the galaxy masks applied (default 3.5) """ logger = logging.getLogger(name="ShotSensitivity") # see if this is a bad shot #print("Bad shot from ", self.conf.badshot) badshot = loadtxt(self.conf.badshot, dtype=int) badtpshots = loadtxt(self.conf.lowtpshots, dtype=int) if (self.shotid in badshot) or (self.shotid in badtpshots): logger.warn("Shot is in bad. Making mask zero everywhere") self.badshot = True else: self.badshot = False # set up bad amps logger.info("Bad amps from {:s}".format(self.conf.badamp)) self.bad_amps = Table.read(self.conf.badamp) sel_shot = (self.bad_amps["shotid"] == self.shotid) self.bad_amps = self.bad_amps[sel_shot] # set up galaxy mask logger.info("Galaxy mask from {:s}".format(self.conf.rc3cat)) galaxy_cat = Table.read(self.conf.rc3cat, format='ascii') gal_coords = SkyCoord(galaxy_cat['Coords'], frame='icrs') shot_coords = SkyCoord(ra=self.shot_ra, dec=self.shot_dec, unit="deg") sel_reg = where(shot_coords.separation(gal_coords) < 1. * u.deg)[0] self.gal_regions = [] if len(sel_reg) > 0: for idx in sel_reg: self.gal_regions.append( create_gal_ellipse(galaxy_cat, row_index=idx, d25scale=d25scale)) # set up meteor mask # check if there are any meteors in the shot: logger.info("Meteors from {:s}".format(self.conf.meteor)) self.met_tab = Table.read(self.conf.meteor, format="ascii") self.met_tab = self.met_tab[self.shotid == self.met_tab["shotid"]] def extract_ifu_sensitivity_cube(self, ifuslot, nx=31, ny=31, ifusize=62, generate_sigma_array=True, cache_sim_interp=True): """ Extract the sensitivity cube from IFU `ifuslot` Parameters ---------- ifuslot : string the IFU slot to extract nx, ny : int the dimensions in pixels of the cube (default 31,31) ifusize : float the length of the side of the cube in arcseconds, default is 62 arcseconds generate_sigma_array: bool this fills the 3D array of noise, this makes it quite slow to run, so if you want to just use the cube for the astrometry do not use this option (default: True) cache_sim_interp : bool cache the simulation interpolator to speed up execution (default: True) Returns ------- scube : hetdex_api.flux_limits.sensitivity_cube:SensitivityCube the sensitivity cube """ waves = self.extractor.get_wave() wrange = [waves[0], waves[-1]] nz = len(waves) pa = self.shot_pa ifu = self.fplane.difus_ifuslot[ifuslot.replace("ifuslot_", "")] ra_ifu, dec_ifu = self.tp.xy2raDec(ifu.y, ifu.x) scube = create_sensitivity_cube_from_astrom( float(ra_ifu), float(dec_ifu), pa, nx, ny, nz, ifusize, wrange=wrange, cache_sim_interp=cache_sim_interp) if generate_sigma_array: ix, iy = meshgrid(arange(0, nx, 1.0), arange(0, ny, 1.0)) all_ra, all_dec, junk = scube.wcs.all_pix2world( ix.ravel(), iy.ravel(), [500.], 0) noises, norm, mask = self.get_f50(all_ra, all_dec, None, 1.0, direct_sigmas=True) sigmas = noises.ravel(order="F").reshape(nz, ny, nx) mask = logical_not(mask.reshape(ny, nx)) mask3d = repeat(mask[newaxis, :, :], sigmas.shape[0], axis=0) scube.sigmas = MaskedArray(sigmas, mask=mask3d, fill_value=999.0) return scube def get_f50(self, ra, dec, wave, sncut, direct_sigmas=False, nmax=5000, return_amp=False, linewidth=None): """ Return flux at 50% for the input positions most of this is cut and paste from `hetdex_tools/get_spec.py` and the old sensitivity_cube:SensitivityCube class. This class splits the data up into sets of up to `nmax` to save memory Parameters ---------- ra, dec : array right ascension and dec in degrees wave : array wavelength in Angstroms. If None, then return flux limits for all wave bins. sncut : float cut in detection significance that defines this catalogue direct_sigmas : bool return the noise values directly without passing them through the noise to 50% completeness flux (default = False) nmax : int maximum number of sources to consider at once, otherwise split up and loop. return_amp : bool if True return amplifier information for the closest fiber to each source (default = False) linewidth : array optionally pass the linewidth of the source (in AA) to activate the linewidth dependent part of the completeness model (default = None). Returns ------- f50s : array 50% completeness. If outside of cube return 999. If None was passed for wave this is a 2D array of ra, dec and all wavelengths mask : array Only returned if `wave=None`. This mask is True where the ra/dec positions passed are in good regions of data amp : array Only returned in `return_amp=True`, it's an array of amplifier information for the closest fiber to each source """ if type(wave) != type(None): wave_passed = True else: wave_passed = False try: nsrc = len(ra) if wave_passed: # Trim stuff very far away gal_coords = SkyCoord(ra=ra, dec=dec, unit="deg") shot_coords = SkyCoord(ra=self.shot_ra, dec=self.shot_dec, unit="deg") sel = array(shot_coords.separation(gal_coords) < 2.0 * u.deg) ra_sel = array(ra)[sel] dec_sel = array(dec)[sel] wave_sel = array(wave)[sel] nsel = len(ra_sel) else: # If not passing wave always loop # over all ra/dec in range ra_sel = ra dec_sel = dec wave_sel = None nsel = len(ra) nsplit = int(ceil(float(nsel) / float(nmax))) except TypeError as e: # If the user does not pass arrays nsplit = 1 nsrc = 1 nsel = 1 sel = True ra_sel = array([ra]) dec_sel = array([dec]) wave_sel = array([wave]) # Array to store output actually in the shot f50s_sel = [] mask_sel = [] amp_sel = [] norm_sel = [] wave_rect = self.extractor.get_wave() pixsize_aa = wave_rect[1] - wave_rect[0] # This will give 999 once the noise is scaled suitably badval = 999 * 1e17 / pixsize_aa # Arrays to store full output f50s = badval * ones(nsrc) mask = ones(nsrc) norm = ones(nsrc) amp = array(["notinshot"] * nsrc) if nsel > 0: for i in range(nsplit): tra = ra_sel[i * nmax:(i + 1) * nmax] tdec = dec_sel[i * nmax:(i + 1) * nmax] if wave_passed: twave = wave_sel[i * nmax:(i + 1) * nmax] if not self.badshot: tf50s, tamp, tnorm = self._get_f50_worker( tra, tdec, twave, sncut, direct_sigmas=direct_sigmas, linewidth=linewidth) else: tamp = ["bad"] * len(tra) tf50s = [badval] * len(tra) tnorm = [1.0] * len(tra) else: # if bad shot then the mask is all set to zero tf50s, tmask, tamp, tnorm = \ self._get_f50_worker(tra, tdec, None, sncut, direct_sigmas = direct_sigmas, linewidth = linewidth) mask_sel.extend(tmask) f50s_sel.extend(tf50s) amp_sel.extend(tamp) norm_sel.extend(tnorm) if return_amp: if wave_passed: # copy to output f50s[sel] = f50s_sel amp[sel] = amp_sel norm[sel] = norm_sel return f50s, norm, amp else: return array(f50s_sel), array(norm_sel), array( mask_sel), array(amp_sel) else: if wave_passed: f50s[sel] = f50s_sel norm[sel] = norm_sel return f50s, norm else: return array(f50s_sel), array(norm_sel), array(mask_sel) def _get_f50_worker(self, ra, dec, wave, sncut, direct_sigmas=False, linewidth=None): """ Return flux at 50% for the input positions most of this is cut and paste from `hetdex_tools/get_spec.py` and the old sensitivity_cube:SensitivityCube class. Parameters ---------- ra, dec : array right ascension and dec in degrees wave : array wavelength in Angstroms. If None, then return flux limits for all wave bins. sncut : float cut in detection significance that defines this catalogue direct_sigmas : bool return the noise values directly without passing them through the noise to 50% completeness flux linewidth : array optionally pass the linewidth of the source (in AA) to activate the linewidth dependent part of the completeness model (default = None). Returns ------- f50s : array 50% completeness. If outside of cube return 999. If None was passed for wave this is a 2D array of ra, dec and all wavelengths norm_all : array the aperture corrections mask : array Only returned if `wave=None`. This mask is True where the ra/dec positions passed are in good regions of data amp : array Only returned in `return_amp=True`, it's an array of amplifier information for the closest fiber to each source """ logger = logging.getLogger(name="ShotSensitivity") try: [x for x in ra] except TypeError: ra = array([ra]) dec = array([dec]) wave = array([wave]) coords = SkyCoord(ra=ra, dec=dec, unit="deg") wave_rect = self.extractor.get_wave() pixsize_aa = wave_rect[1] - wave_rect[0] # This will give 999 once the noise is scaled suitably badval = 999 * 1e17 / pixsize_aa # Size of window in wave elements filter_len = 2 * self.wavenpix + 1 if type(wave) != type(None): wave_passed = True else: wave_passed = False convolution_filter = ones(filter_len) mask = True * ones(len(coords), dtype=int) noise = [] info_results = self.extractor.get_fiberinfo_for_coords( coords, radius=self.rad, ffsky=self.ffsky, return_fiber_info=True, fiber_lower_limit=2, verbose=False) id_, aseps, aifux, aifuy, axc, ayc, ara, adec, adata, aerror, afmask, afiberid, \ amultiframe = info_results I = None fac = None norm_all = [] amp = [] nan_fib_mask = [] for i, c in enumerate(coords): sel = (id_ == i) if type(wave) != type(None): logger.debug("Running on source {:f} {:f} {:f}".format( ra[i], dec[i], wave[i])) else: logger.debug("Running on position {:f} {:f}".format( ra[i], dec[i])) logger.debug("Found {:d} fibers".format(sum(sel))) if sum(sel) > 0: # fiber properties xc = axc[sel][0] yc = ayc[sel][0] ifux = aifux[sel] ifuy = aifuy[sel] data = adata[sel] error = aerror[sel] fmask = afmask[sel] fiberid = afiberid[sel] multiframe = amultiframe[sel] seps = aseps[sel] # Flag the zero elements as bad fmask[(abs(data) < 1e-30) | (abs(error) < 1e-30)] = False iclosest = argmin(seps) amp.append(fiberid[iclosest]) if len(self.bad_amps) > 0: amp_flag = amp_flag_from_fiberid(fiberid[iclosest], self.bad_amps) else: amp_flag = True # XXX Could be faster - reloads the file every run meteor_flag = meteor_flag_from_coords(c, self.shotid) if not (amp_flag and meteor_flag): logger.debug("The data here are bad, position is masked") if wave_passed: noise.append(badval) norm_all.append(1.0) # value doesn't matter as in amp flag nan_fib_mask.append(True) continue else: mask[i] = False weights, I, fac = self.extractor.build_weights( xc, yc, ifux, ifuy, self.moffat, I=I, fac=fac, return_I_fac=True) # (See Greg Zeimann's Remedy code) # normalized in the fiber direction norm = sum(weights, axis=0) weights = weights / norm result = self.extractor.get_spectrum( data, error, fmask, weights, remove_low_weights=False, sclean_bad=self.sclean_bad, return_scleaned_mask=True) spectrum_aper, spectrum_aper_error, scleaned = [ res for res in result ] if wave_passed: index = where(wave_rect >= wave[i])[0][0] ilo = index - self.wavenpix ihi = index + self.wavenpix + 1 # If lower index less than zero, truncate if ilo < 0: ilo = 0 if ihi < 0: ihi = 0 # Output lots of information for very detailed debugging if logger.getEffectiveLevel() == logging.DEBUG: logger.debug("Table of fibers:") logger.debug( "# fiberid wave_index ifux ifuy weight noise" ) for fibidx, fid in enumerate(fiberid): for wi, (tw, tnoise) in enumerate( zip((weights * norm)[fibidx, ilo:ihi], error[fibidx, ilo:ihi]), ilo): logger.debug( "{:s} {:d} {:f} {:f} {:f} {:f}".format( fid, wi, ifux[fibidx], ifuy[fibidx], tw, tnoise)) # Mask source if bad values within the central 3 wavebins nan_fib = bad_central_mask(weights * norm, logical_not(fmask), index) nan_fib_mask.append(nan_fib) # Account for NaN and masked spectral bins bad = isnan(spectrum_aper_error[ilo:ihi]) goodfrac = 1.0 - sum(bad) / len(bad) if all(isnan(spectrum_aper_error[ilo:ihi])): sum_sq = badval else: sum_sq = \ sqrt(nansum(square(spectrum_aper_error[ilo:ihi])/goodfrac)) norm_all.append(mean(norm[ilo:ihi])) noise.append(sum_sq) else: logger.debug( "Convolving with window to get flux limits versus wave" ) # Use astropy convolution so NaNs are ignored convolved_variance = convolve(square(spectrum_aper_error), convolution_filter, normalize_kernel=False) std = sqrt(convolved_variance) # Also need to convolve aperture corrections to get # a total apcor across the wavelength window convolved_norm = convolve(norm, convolution_filter, normalize_kernel=True) # To get mean account for the edges in # the convolution for iend in range(self.wavenpix): fac = filter_len / (filter_len + iend - self.wavenpix) convolved_norm[iend] *= fac convolved_norm[-iend - 1] *= fac # Mask wavelengths with too many bad pixels # equivalent to nan_fib in the wave != None mode wunorm = weights * norm for index in range(len(convolved_variance)): if not bad_central_mask(wunorm, logical_not(fmask), index): std[index] = badval noise.append(std) norm_all.append(convolved_norm) else: if wave_passed: noise.append(badval) norm_all.append(1.0) amp.append("000") nan_fib_mask.append(True) else: noise.append(badval * ones(len(wave_rect))) norm_all.append(ones(len(wave_rect))) amp.append("000") mask[i] = False # Apply the galaxy mask gal_mask = ones(len(coords), dtype=int) for gal_region in self.gal_regions: dummy_wcs = create_dummy_wcs(gal_region.center, imsize=2 * gal_region.height) # zero if near galaxy gal_mask = gal_mask & invert(gal_region.contains( coords, dummy_wcs)) noise = array(noise) snoise = pixsize_aa * 1e-17 * noise if wave_passed: bad = (gal_mask < 0.5) | (snoise > 998) | isnan(snoise) | invert(nan_fib_mask) normnoise = snoise / norm_all if not direct_sigmas: normnoise = self.f50_from_noise(normnoise, wave, sncut, linewidth=linewidth) normnoise[bad] = 999. return normnoise, amp, norm_all else: mask[gal_mask < 0.5] = False if self.badshot: mask[:] = False bad = (snoise > 998) | logical_not(isfinite(snoise)) normnoise = snoise / norm_all if not direct_sigmas: normnoise = self.f50_from_noise(normnoise, wave, sncut, linewidth=linewidth) normnoise[bad] = 999 return normnoise, mask, amp, norm_all def return_completeness(self, flux, ra, dec, lambda_, sncut, f50s=None, linewidth=None): """ Return completeness at a 3D position as an array. If for whatever reason the completeness is NaN, it's replaced by 0.0. This is cut and paste from sensitivity_cube:SensitivityCube Parameters ---------- flux : array fluxes of objects ra, dec : array right ascension and dec in degrees lambda_ : array wavelength in Angstrom sncut : float the detection significance (S/N) cut applied to the data f50s : array (optional) optional array of precomputed 50% completeness fluxes. Otherwise the method will compute them itself from the ra/dec/linewidth (default:None) linewidth : array (optional) optionally pass the linewidth of the source (in AA) to activate the linewidth dependent part of the completeness model (default = None). Only does anything when you don't pass the f50s (default: None) Return ------ fracdet : array fraction detected Raises ------ WavelengthException : Annoys user if they pass wavelength outside of VIRUS range """ logger = logging.getLogger(name="ShotSensitivity") try: if lambda_[0] < 3000.0 or lambda_[0] > 6000.0: raise WavelengthException("""Odd wavelength value. Are you sure it's in Angstrom?""") except TypeError as e: if lambda_ < 3000.0 or lambda_ > 6000.0: raise WavelengthException("""Odd wavelength value. Are you sure it's in Angstrom?""") if type(f50s) == type(None): f50s, norm = self.get_f50(ra, dec, lambda_, sncut, linewidth=linewidth) try: # to stop bad values breaking interpolation bad = (f50s > 998) f50s[bad] = 1e-16 fracdet = self.sinterp(flux, f50s, lambda_, sncut) #print(min(flux), max(flux), min(f50s), max(f50s)) # check to see if we're passing multiple fluxes # for one f50 value if any(bad): logger.debug("There are bad values here to mask") if len(f50s) == 1: logger.debug("Just one ra/dec/wave passed.") fracdet[:] = 0.0 f50s[:] = 999.0 else: fracdet[bad] = 0.0 f50s[bad] = 999.0 except IndexError as e: print("Interpolation failed!") print(min(flux), max(flux), min(f50s), max(f50s)) print(min(lambda_), max(lambda_)) raise e try: fracdet[isnan(fracdet)] = 0.0 except TypeError: if isnan(fracdet): fracdet = 0.0 return fracdet def close(self): """ Close the Extractor object (especially if it has a Shot HDF file open) """ self.extractor.close() def __enter__(self): """ Added to support using the `with` statement """ return self def __exit__(self, type_, value, traceback): """ Support tidying up after using the `with` statement """ self.close()
def __init__(self, datevshot, release=None, flim_model=None, rad=3.5, ffsky=False, wavenpix=3, d25scale=3.0, verbose=False, sclean_bad=True, log_level="WARNING"): self.conf = HDRconfig() self.extractor = Extract() self.shotid = int(datevshot.replace("v", "")) self.date = datevshot[:8] self.rad = rad self.ffsky = ffsky self.wavenpix = wavenpix self.sclean_bad = sclean_bad logger = logging.getLogger(name="ShotSensitivity") logger.setLevel(log_level) if verbose: raise DeprecationWarning( "Using verbose is deprecated, set log_level instead") logger.setLevel("DEBUG") logger.info("shotid: {:d}".format(self.shotid)) if not release: self.release = self.conf.LATEST_HDR_NAME else: self.release = release logger.info("Data release: {:s}".format(self.release)) self.survey = Survey(survey=self.release) # Set up flux limit model self.f50_from_noise, self.sinterp, interp_sigmas \ = return_flux_limit_model(flim_model, cache_sim_interp=False, verbose=verbose) # Generate astrometry for this shot survey_sel = (self.survey.shotid == self.shotid) self.shot_pa = self.survey.pa[survey_sel][0] self.shot_ra = self.survey.ra[survey_sel][0] self.shot_dec = self.survey.dec[survey_sel][0] rot = 360.0 - (self.shot_pa + 90.) self.tp = TangentPlane(self.shot_ra, self.shot_dec, rot) #Set up masking logger.info("Using d25scale {:f}".format(d25scale)) self.setup_mask(d25scale) # Set up spectral extraction if release == "hdr1": fwhm = self.survey.fwhm_moffat[survey_sel][0] else: fwhm = self.survey.fwhm_virus[survey_sel][0] logger.info("Using Moffat PSF with FWHM {:f}".format(fwhm)) self.moffat = self.extractor.moffat_psf(fwhm, 3. * rad, 0.25) self.extractor.load_shot(self.shotid, fibers=True, survey=self.release) # Set up the focal plane astrometry fplane_table = self.extractor.shoth5.root.Astrometry.fplane # Bit of a hack to avoid changing pyhetdex with NamedTemporaryFile(mode='w') as tpf: for row in fplane_table.iterrows(): tpf.write( "{:03d} {:8.5f} {:8.5f} {:03d} {:03d} {:03d} {:8.5f} {:8.5f}\n" .format(row['ifuslot'], row['fpx'], row['fpy'], row['specid'], row['specslot'], row['ifuid'], row['ifurot'], row['platesc'])) tpf.seek(0) self.fplane = FPlane(tpf.name)