def get_destriped(self, data_minus, destripe_pattern=64, use_destripe_mask=True, sub_horizontal_median=True, remove_vertical=False): if use_destripe_mask: destrip_mask = ~np.isfinite(data_minus)|self.destripe_mask else: destrip_mask = None from libs.destriper import destriper data_minus_d = destriper.get_destriped(data_minus, destrip_mask, pattern=destripe_pattern, hori=sub_horizontal_median, remove_vertical=remove_vertical) return data_minus_d
def get_data1(self, i, hori=True, vert=False): fn = self.obj_filenames[i] data = pyfits.open(fn)[0].data from libs.destriper import destriper destrip_mask = ~np.isfinite(data)|self.destripe_mask data = destriper.get_destriped(data, destrip_mask, pattern=64, hori=hori) if vert: #m = [np.median(row[4:-4].compressed()) for row in dd1] dd1 = np.ma.array(data, mask=destrip_mask) m = np.ma.median(dd1, axis=1) #m = [np.ma.median(d) for d in dd1] datam = data - m[:,np.newaxis] return datam else: return data
def process(self, recipe, band, obsids, frametypes): igr_path = self.igr_path igr_storage = self.igr_storage if recipe == "A0V_AB": DO_STD = True #FIX_TELLURIC=False elif recipe == "STELLAR_AB": DO_STD = False #FIX_TELLURIC=True elif recipe == "EXTENDED_AB": DO_STD = False #FIX_TELLURIC=True elif recipe == "EXTENDED_ONOFF": DO_STD = False #FIX_TELLURIC=True if 1: obj_filenames = igr_path.get_filenames(band, obsids) master_obsid = obsids[0] tgt_basename = os.path.splitext(os.path.basename(obj_filenames[0]))[0] db = {} basenames = {} db_types = ["flat_off", "flat_on", "thar", "sky"] for db_type in db_types: db_name = igr_path.get_section_filename_base("PRIMARY_CALIB_PATH", "%s.db" % db_type, ) db[db_type] = ProductDB(db_name) # db on output path db_types = ["a0v"] for db_type in db_types: db_name = igr_path.get_section_filename_base("OUTDATA_PATH", "%s.db" % db_type, ) db[db_type] = ProductDB(db_name) # to get basenames db_types = ["flat_off", "flat_on", "thar", "sky"] # if FIX_TELLURIC: # db_types.append("a0v") for db_type in db_types: basenames[db_type] = db[db_type].query(band, master_obsid) if 1: # make aperture from libs.storage_descriptions import SKY_WVLSOL_JSON_DESC sky_basename = db["sky"].query(band, master_obsid) wvlsol_products = igr_storage.load([SKY_WVLSOL_JSON_DESC], sky_basename)[SKY_WVLSOL_JSON_DESC] orders_w_solutions = wvlsol_products["orders"] wvl_solutions = map(np.array, wvlsol_products["wvl_sol"]) from libs.storage_descriptions import ONED_SPEC_JSON_DESC raw_spec_products = igr_storage.load([ONED_SPEC_JSON_DESC], sky_basename) from recipe_wvlsol_sky import load_aperture2 ap = load_aperture2(igr_storage, band, master_obsid, db["flat_on"], raw_spec_products[ONED_SPEC_JSON_DESC]["orders"], orders_w_solutions) # This should be saved somewhere and loaded, instead of making it every time. order_map = ap.make_order_map() slitpos_map = ap.make_slitpos_map() order_map2 = ap.make_order_map(mask_top_bottom=True) if 1: from libs.storage_descriptions import (HOTPIX_MASK_DESC, DEADPIX_MASK_DESC, ORDER_FLAT_IM_DESC, ORDER_FLAT_JSON_DESC, FLAT_MASK_DESC) hotpix_mask = igr_storage.load([HOTPIX_MASK_DESC], basenames["flat_off"])[HOTPIX_MASK_DESC] deadpix_mask = igr_storage.load([DEADPIX_MASK_DESC], basenames["flat_on"])[DEADPIX_MASK_DESC] pix_mask = hotpix_mask.data | deadpix_mask.data # aperture_solution_products = PipelineProducts.load(aperture_solutions_name) orderflat_ = igr_storage.load([ORDER_FLAT_IM_DESC], basenames["flat_on"])[ORDER_FLAT_IM_DESC] orderflat = orderflat_.data orderflat[pix_mask] = np.nan orderflat_json = igr_storage.load([ORDER_FLAT_JSON_DESC], basenames["flat_on"])[ORDER_FLAT_JSON_DESC] order_flat_meanspec = np.array(orderflat_json["mean_order_specs"]) # flat_normed = igr_storage.load([FLAT_NORMED_DESC], # basenames["flat_on"])[FLAT_NORMED_DESC] flat_mask = igr_storage.load([FLAT_MASK_DESC], basenames["flat_on"])[FLAT_MASK_DESC] bias_mask = flat_mask.data & (order_map2 > 0) SLITOFFSET_FITS_DESC = ("PRIMARY_CALIB_PATH", "SKY_", ".slitoffset_map.fits") prod_ = igr_storage.load([SLITOFFSET_FITS_DESC], basenames["sky"])[SLITOFFSET_FITS_DESC] #fn = sky_path.get_secondary_path("slitoffset_map.fits") slitoffset_map = prod_.data if 1: abba_names = obj_filenames def filter_abba_names(abba_names, frametypes, frametype): return [an for an, ft in zip(abba_names, frametypes) if ft == frametype] a_name_list = filter_abba_names(abba_names, frametypes, "A") b_name_list = filter_abba_names(abba_names, frametypes, "B") if recipe in ["A0V_AB", "STELLAR_AB"]: IF_POINT_SOURCE = True elif recipe in ["EXTENDED_AB", "EXTENDED_ONOFF"]: IF_POINT_SOURCE = False else: print "Unknown recipe : %s" % recipe if 1: #ab_names = ab_names_list[0] # master_hdu = pyfits.open(a_name_list[0])[0] a_list = [pyfits.open(name)[0].data \ for name in a_name_list] b_list = [pyfits.open(name)[0].data \ for name in b_name_list] # we may need to detrip # first define extract profile (gaussian). # dx = 100 if IF_POINT_SOURCE: # if point source # for point sources, variance estimation becomes wrong # if lenth of two is different, assert len(a_list) == len(b_list) # a_b != 1 for the cases when len(a) != len(b) a_b = float(len(a_list)) / len(b_list) a_data = np.sum(a_list, axis=0) b_data = np.sum(b_list, axis=0) data_minus = a_data - a_b*b_data #data_minus0 = data_minus from libs.destriper import destriper if 1: data_minus = destriper.get_destriped(data_minus, ~np.isfinite(data_minus), pattern=64) data_minus_flattened = data_minus / orderflat data_minus_flattened[~flat_mask.data] = np.nan #data_minus_flattened[order_flat_meanspec<0.1*order_flat_meanspec.max()] = np.nan # for variance, we need a square of a_b data_plus = (a_data + (a_b**2)*b_data) import scipy.ndimage as ni bias_mask2 = ni.binary_dilation(bias_mask) from libs import instrument_parameters gain = instrument_parameters.gain[band] # random noise variance0 = data_minus variance_ = variance0.copy() variance_[bias_mask2] = np.nan variance_[pix_mask] = np.nan mm = np.ma.array(variance0, mask=~np.isfinite(variance0)) ss = np.ma.median(mm, axis=0) variance_ = variance_ - ss # iterate over fixed number of times. # need to be improved. for i in range(5): st = np.nanstd(variance_, axis=0) variance_[np.abs(variance_) > 3*st] = np.nan #st = np.nanstd(variance_, axis=0) variance = destriper.get_destriped(variance0, ~np.isfinite(variance_), pattern=64) variance_ = variance.copy() variance_[bias_mask2] = np.nan variance_[pix_mask] = np.nan st = np.nanstd(variance_) st = np.nanstd(variance_[np.abs(variance_) < 3*st]) variance_[np.abs(variance_-ss) > 3*st] = np.nan x_std = ni.median_filter(np.nanstd(variance_, axis=0), 11) variance_map0 = np.zeros_like(variance) + x_std**2 variance_map = variance_map0 + np.abs(data_plus)/gain # add poison noise in ADU # we ignore effect of flattening # now estimate lsf # estimate lsf ordermap_bpixed = order_map.copy() ordermap_bpixed[pix_mask] = 0 ordermap_bpixed[~np.isfinite(orderflat)] = 0 # if IF_POINT_SOURCE: # if point source x1, x2 = 800, 1200 bins, lsf_list = ap.extract_lsf(ordermap_bpixed, slitpos_map, data_minus_flattened, x1, x2, bins=None) hh0 = np.sum(lsf_list, axis=0) peak1, peak2 = max(hh0), -min(hh0) lsf_x = 0.5*(bins[1:]+bins[:-1]) lsf_y = hh0/(peak1+peak2) from scipy.interpolate import UnivariateSpline lsf_ = UnivariateSpline(lsf_x, lsf_y, k=3, s=0, bbox=[0, 1]) roots = list(lsf_.roots()) #assert(len(roots) == 1) integ_list = [] from itertools import izip, cycle for ss, int_r1, int_r2 in izip(cycle([1, -1]), [0] + roots, roots + [1]): #print ss, int_r1, int_r2 integ_list.append(lsf_.integral(int_r1, int_r2)) integ = np.abs(np.sum(integ_list)) def lsf(o, x, slitpos): return lsf_(slitpos) / integ # make weight map profile_map = ap.make_profile_map(order_map, slitpos_map, lsf) # extract spec s_list, v_list = ap.extract_stellar(ordermap_bpixed, profile_map, variance_map, data_minus_flattened, slitoffset_map=slitoffset_map) # make synth_spec : profile * spectra synth_map = ap.make_synth_map(order_map, slitpos_map, profile_map, s_list, slitoffset_map=slitoffset_map) sig_map = (data_minus_flattened - synth_map)**2/variance_map ## mark sig_map > 100 as cosmicay. The threshold need to be fixed. # reextract with new variance map and CR is rejected variance_map_r = variance_map0 + np.abs(synth_map)/gain variance_map2 = np.max([variance_map, variance_map_r], axis=0) variance_map2[np.abs(sig_map) > 100] = np.nan # extract spec s_list, v_list = ap.extract_stellar(ordermap_bpixed, profile_map, variance_map2, data_minus_flattened, slitoffset_map=slitoffset_map) else: # if extended source from scipy.interpolate import UnivariateSpline if recipe in ["EXTENDED_AB", "EXTENDED_ABBA"]: delta = 0.01 lsf_ = UnivariateSpline([0, 0.5-delta, 0.5+delta, 1], [1., 1., -1., -1.], k=1, s=0, bbox=[0, 1]) else: lsf_ = UnivariateSpline([0, 1], [1., 1.], k=1, s=0, bbox=[0, 1]) def lsf(o, x, slitpos): return lsf_(slitpos) profile_map = ap.make_profile_map(order_map, slitpos_map, lsf) # we need to update the variance map by rejecting # cosmicray sources, but it is not clear how we do this # for extended source. variance_map2 = variance_map s_list, v_list = ap.extract_stellar(ordermap_bpixed, profile_map, variance_map2, data_minus_flattened, slitoffset_map=slitoffset_map ) if 1: # calculate S/N per resolution sn_list = [] for wvl, s, v in zip(wvl_solutions, s_list, v_list): dw = np.gradient(wvl) pixel_per_res_element = (wvl/40000.)/dw #print pixel_per_res_element[1024] # len(pixel_per_res_element) = 2047. But we ignore it. sn = (s/v**.5)*(pixel_per_res_element**.5) sn_list.append(sn) if 1: # save the product from libs.storage_descriptions import (COMBINED_IMAGE_DESC, VARIANCE_MAP_DESC) from libs.products import PipelineImage r = PipelineProducts("1d specs") r.add(COMBINED_IMAGE_DESC, PipelineImage([], data_minus_flattened)) r.add(VARIANCE_MAP_DESC, PipelineImage([], variance_map2)) # r.add(VARIANCE_MAP_DESC, PipelineImage([], # variance_map.data)) igr_storage.store(r, mastername=obj_filenames[0], masterhdu=None) if 1: # save spectra, variance, sn from libs.storage_descriptions import SKY_WVLSOL_FITS_DESC fn = igr_storage.get_path(SKY_WVLSOL_FITS_DESC, basenames["sky"]) # fn = sky_path.get_secondary_path("wvlsol_v1.fits") f = pyfits.open(fn) d = np.array(s_list) f[0].data = d.astype("f32") from libs.storage_descriptions import (SPEC_FITS_DESC, VARIANCE_FITS_DESC, SN_FITS_DESC) fout = igr_storage.get_path(SPEC_FITS_DESC, tgt_basename) f.writeto(fout, clobber=True) d = np.array(v_list) f[0].data = d.astype("f32") fout = igr_storage.get_path(VARIANCE_FITS_DESC, tgt_basename) f.writeto(fout, clobber=True) d = np.array(sn_list) f[0].data = d.astype("f32") fout = igr_storage.get_path(SN_FITS_DESC, tgt_basename) f.writeto(fout, clobber=True) if 1: # from libs.storage_descriptions import ORDER_FLAT_JSON_DESC prod = igr_storage.load([ORDER_FLAT_JSON_DESC], basenames["flat_on"])[ORDER_FLAT_JSON_DESC] new_orders = prod["orders"] # fitted_response = orderflat_products["fitted_responses"] i1i2_list = prod["i1i2_list"] order_indices = [] for o in ap.orders: o_new_ind = np.searchsorted(new_orders, o) order_indices.append(o_new_ind) if DO_STD: # a quick and dirty flattening for A0V stars from libs.master_calib import get_master_calib_abspath fn = get_master_calib_abspath("A0V/vegallpr25.50000resam5") d = np.genfromtxt(fn) wvl_a0v, flux_a0v, cont_a0v = (d[:,i] for i in [0, 1, 2]) wvl_a0v = wvl_a0v/1000. wvl_limits = [] for wvl_ in wvl_solutions: wvl_limits.extend([wvl_[0], wvl_[-1]]) dwvl = abs(wvl_[0] - wvl_[-1])*0.1 # padding mask_wvl1 = min(wvl_limits) - dwvl mask_wvl2 = max(wvl_limits) + dwvl #print mask_wvl1, mask_wvl2 # if band == "H": # mask_wvl1, mask_wvl2 = 1.450, 1.850 # else: # mask_wvl1, mask_wvl2 = 1.850, 2.550 mask_igr = (mask_wvl1 < wvl_a0v) & (wvl_a0v < mask_wvl2) fn = get_master_calib_abspath("telluric/LBL_A15_s0_w050_R0060000_T.fits") telluric = pyfits.open(fn)[1].data telluric_lam = telluric["lam"] tel_mask_igr = (mask_wvl1 < telluric_lam) & (telluric_lam < mask_wvl2) #plot(telluric_lam[tel_mask_H], telluric["trans"][tel_mask_H]) from scipy.interpolate import interp1d, UnivariateSpline # spl = UnivariateSpline(telluric_lam[tel_mask_igr], # telluric["trans"][tel_mask_igr], # k=1,s=0) spl = interp1d(telluric_lam[tel_mask_igr], telluric["trans"][tel_mask_igr], bounds_error=False ) trans = spl(wvl_a0v[mask_igr]) # ax1.plot(wvl_a0v[mask_igr], flux[mask_igr]/cont[mask_igr]*trans, # color="0.5", zorder=0.5) trans_m = ni.maximum_filter(trans, 128) trans_mg = ni.gaussian_filter(trans_m, 32) zzz0 = (flux_a0v/cont_a0v)[mask_igr] zzz = zzz0*trans mmm = trans/trans_mg > 0.95 zzz[~mmm] = np.nan wvl_zzz = wvl_a0v[mask_igr] #ax2.plot(, zzz) # #ax2 = subplot(212) # if DO_STD: # telluric_cor = [] a0v_flattened = [] for o_index, wvl, s in zip(order_indices, wvl_solutions, s_list): i1, i2 = i1i2_list[o_index] #sl = slice(i1, i2) wvl1, wvl2 = wvl[i1], wvl[i2] #wvl1, wvl2 = wvl[0], wvl[-1] z_m = (wvl1 < wvl_zzz) & (wvl_zzz < wvl2) wvl1, wvl2 = min(wvl), max(wvl) z_m2 = (wvl1 < wvl_zzz) & (wvl_zzz < wvl2) #z_m = z_m2 ss = interp1d(wvl, s) s_interped = ss(wvl_zzz[z_m]) xxx, yyy = wvl_zzz[z_m], s_interped/zzz[z_m] from astropy.modeling import models, fitting p_init = models.Chebyshev1D(domain=[xxx[0], xxx[-1]], degree=6) fit_p = fitting.LinearLSQFitter() x_m = np.isfinite(yyy) p = fit_p(p_init, xxx[x_m], yyy[x_m]) #ax2.plot(xxx, yyy) #ax2.plot(xxx, p(xxx)) res_ = p(wvl) z_interp = interp1d(wvl_zzz[z_m], zzz0[z_m], bounds_error=False) A0V = z_interp(wvl) #res_[res_<0.3*res_.max()] = np.nan s_f = (s/res_)/A0V s_f[:i1] = np.nan s_f[i2:] = np.nan a0v_flattened.append(s_f) d = np.array(a0v_flattened) #d[~np.isfinite(d)] = 0. f[0].data = d.astype("f32") from libs.storage_descriptions import SPEC_FITS_FLATTENED_DESC fout = igr_storage.get_path(SPEC_FITS_FLATTENED_DESC, tgt_basename) f.writeto(fout, clobber=True) db["a0v"].update(band, tgt_basename)
def process(self, recipe, band, obsids, frametypes): igr_path = self.igr_path igr_storage = self.igr_storage if recipe == "A0V_AB": DO_STD = True #FIX_TELLURIC=False elif recipe == "STELLAR_AB": DO_STD = False #FIX_TELLURIC=True elif recipe == "EXTENDED_AB": DO_STD = False #FIX_TELLURIC=True elif recipe == "EXTENDED_ONOFF": DO_STD = False #FIX_TELLURIC=True if 1: obj_filenames = igr_path.get_filenames(band, obsids) master_obsid = obsids[0] tgt_basename = os.path.splitext(os.path.basename( obj_filenames[0]))[0] db = {} basenames = {} db_types = ["flat_off", "flat_on", "thar", "sky"] for db_type in db_types: db_name = igr_path.get_section_filename_base( "PRIMARY_CALIB_PATH", "%s.db" % db_type, ) db[db_type] = ProductDB(db_name) # db on output path db_types = ["a0v"] for db_type in db_types: db_name = igr_path.get_section_filename_base( "OUTDATA_PATH", "%s.db" % db_type, ) db[db_type] = ProductDB(db_name) # to get basenames db_types = ["flat_off", "flat_on", "thar", "sky"] # if FIX_TELLURIC: # db_types.append("a0v") for db_type in db_types: basenames[db_type] = db[db_type].query(band, master_obsid) if 1: # make aperture from libs.storage_descriptions import SKY_WVLSOL_JSON_DESC sky_basename = db["sky"].query(band, master_obsid) wvlsol_products = igr_storage.load( [SKY_WVLSOL_JSON_DESC], sky_basename)[SKY_WVLSOL_JSON_DESC] orders_w_solutions = wvlsol_products["orders"] wvl_solutions = map(np.array, wvlsol_products["wvl_sol"]) from libs.storage_descriptions import ONED_SPEC_JSON_DESC raw_spec_products = igr_storage.load([ONED_SPEC_JSON_DESC], sky_basename) from recipe_wvlsol_sky import load_aperture2 ap = load_aperture2( igr_storage, band, master_obsid, db["flat_on"], raw_spec_products[ONED_SPEC_JSON_DESC]["orders"], orders_w_solutions) # This should be saved somewhere and loaded, instead of making it every time. order_map = ap.make_order_map() slitpos_map = ap.make_slitpos_map() order_map2 = ap.make_order_map(mask_top_bottom=True) if 1: from libs.storage_descriptions import (HOTPIX_MASK_DESC, DEADPIX_MASK_DESC, ORDER_FLAT_IM_DESC, ORDER_FLAT_JSON_DESC, FLAT_MASK_DESC) hotpix_mask = igr_storage.load( [HOTPIX_MASK_DESC], basenames["flat_off"])[HOTPIX_MASK_DESC] deadpix_mask = igr_storage.load( [DEADPIX_MASK_DESC], basenames["flat_on"])[DEADPIX_MASK_DESC] pix_mask = hotpix_mask.data | deadpix_mask.data # aperture_solution_products = PipelineProducts.load(aperture_solutions_name) orderflat_ = igr_storage.load( [ORDER_FLAT_IM_DESC], basenames["flat_on"])[ORDER_FLAT_IM_DESC] orderflat = orderflat_.data orderflat[pix_mask] = np.nan orderflat_json = igr_storage.load( [ORDER_FLAT_JSON_DESC], basenames["flat_on"])[ORDER_FLAT_JSON_DESC] order_flat_meanspec = np.array(orderflat_json["mean_order_specs"]) # flat_normed = igr_storage.load([FLAT_NORMED_DESC], # basenames["flat_on"])[FLAT_NORMED_DESC] flat_mask = igr_storage.load([FLAT_MASK_DESC], basenames["flat_on"])[FLAT_MASK_DESC] bias_mask = flat_mask.data & (order_map2 > 0) SLITOFFSET_FITS_DESC = ("PRIMARY_CALIB_PATH", "SKY_", ".slitoffset_map.fits") prod_ = igr_storage.load([SLITOFFSET_FITS_DESC], basenames["sky"])[SLITOFFSET_FITS_DESC] #fn = sky_path.get_secondary_path("slitoffset_map.fits") slitoffset_map = prod_.data if 1: abba_names = obj_filenames def filter_abba_names(abba_names, frametypes, frametype): return [ an for an, ft in zip(abba_names, frametypes) if ft == frametype ] a_name_list = filter_abba_names(abba_names, frametypes, "A") b_name_list = filter_abba_names(abba_names, frametypes, "B") if recipe in ["A0V_AB", "STELLAR_AB"]: IF_POINT_SOURCE = True elif recipe in ["EXTENDED_AB", "EXTENDED_ONOFF"]: IF_POINT_SOURCE = False else: print "Unknown recipe : %s" % recipe if 1: #ab_names = ab_names_list[0] # master_hdu = pyfits.open(a_name_list[0])[0] a_list = [pyfits.open(name)[0].data \ for name in a_name_list] b_list = [pyfits.open(name)[0].data \ for name in b_name_list] # we may need to detrip # first define extract profile (gaussian). # dx = 100 if IF_POINT_SOURCE: # if point source # for point sources, variance estimation becomes wrong # if lenth of two is different, assert len(a_list) == len(b_list) # a_b != 1 for the cases when len(a) != len(b) a_b = float(len(a_list)) / len(b_list) a_data = np.sum(a_list, axis=0) b_data = np.sum(b_list, axis=0) data_minus = a_data - a_b * b_data #data_minus0 = data_minus from libs.destriper import destriper if 1: destrip_mask = ~np.isfinite(data_minus) | bias_mask data_minus = destriper.get_destriped(data_minus, destrip_mask, hori=True, pattern=64) data_minus_flattened = data_minus / orderflat data_minus_flattened[~flat_mask.data] = np.nan #data_minus_flattened[order_flat_meanspec<0.1*order_flat_meanspec.max()] = np.nan # for variance, we need a square of a_b data_plus = (a_data + (a_b**2) * b_data) import scipy.ndimage as ni bias_mask2 = ni.binary_dilation(bias_mask) from libs import instrument_parameters gain = instrument_parameters.gain[band] # random noise variance0 = data_minus variance_ = variance0.copy() variance_[bias_mask2] = np.nan variance_[pix_mask] = np.nan mm = np.ma.array(variance0, mask=~np.isfinite(variance0)) ss = np.ma.median(mm, axis=0) variance_ = variance_ - ss # iterate over fixed number of times. # need to be improved. for i in range(5): st = np.nanstd(variance_, axis=0) variance_[np.abs(variance_) > 3 * st] = np.nan #st = np.nanstd(variance_, axis=0) variance = destriper.get_destriped(variance0, ~np.isfinite(variance_), pattern=64) variance_ = variance.copy() variance_[bias_mask2] = np.nan variance_[pix_mask] = np.nan st = np.nanstd(variance_) st = np.nanstd(variance_[np.abs(variance_) < 3 * st]) variance_[np.abs(variance_ - ss) > 3 * st] = np.nan x_std = ni.median_filter(np.nanstd(variance_, axis=0), 11) variance_map0 = np.zeros_like(variance) + x_std**2 variance_map = variance_map0 + np.abs( data_plus) / gain # add poison noise in ADU # we ignore effect of flattening # now estimate lsf # estimate lsf ordermap_bpixed = order_map.copy() ordermap_bpixed[pix_mask] = 0 ordermap_bpixed[~np.isfinite(orderflat)] = 0 # if IF_POINT_SOURCE: # if point source x1, x2 = 800, 1200 bins, lsf_list = ap.extract_lsf(ordermap_bpixed, slitpos_map, data_minus_flattened, x1, x2, bins=None) hh0 = np.sum(lsf_list, axis=0) peak1, peak2 = max(hh0), -min(hh0) lsf_x = 0.5 * (bins[1:] + bins[:-1]) lsf_y = hh0 / (peak1 + peak2) from scipy.interpolate import UnivariateSpline lsf_ = UnivariateSpline(lsf_x, lsf_y, k=3, s=0, bbox=[0, 1]) roots = list(lsf_.roots()) #assert(len(roots) == 1) integ_list = [] from itertools import izip, cycle for ss, int_r1, int_r2 in izip(cycle([1, -1]), [0] + roots, roots + [1]): #print ss, int_r1, int_r2 integ_list.append(lsf_.integral(int_r1, int_r2)) integ = np.abs(np.sum(integ_list)) def lsf(o, x, slitpos): return lsf_(slitpos) / integ # make weight map profile_map = ap.make_profile_map(order_map, slitpos_map, lsf) # try to select portion of the slit to extract if self.frac_slit is not None: frac1, frac2 = min(self.frac_slit), max(self.frac_slit) slitpos_msk = (slitpos_map < frac1) | (slitpos_map > frac2) profile_map[slitpos_msk] = np.nan # extract spec s_list, v_list = ap.extract_stellar( ordermap_bpixed, profile_map, variance_map, data_minus_flattened, slitoffset_map=slitoffset_map) # make synth_spec : profile * spectra synth_map = ap.make_synth_map(order_map, slitpos_map, profile_map, s_list, slitoffset_map=slitoffset_map) sig_map = (data_minus_flattened - synth_map)**2 / variance_map ## mark sig_map > 100 as cosmicay. The threshold need to be fixed. # reextract with new variance map and CR is rejected variance_map_r = variance_map0 + np.abs(synth_map) / gain variance_map2 = np.max([variance_map, variance_map_r], axis=0) variance_map2[np.abs(sig_map) > 100] = np.nan # masking this out will affect the saved combined image. data_minus_flattened[np.abs(sig_map) > 100] = np.nan # extract spec s_list, v_list = ap.extract_stellar( ordermap_bpixed, profile_map, variance_map2, data_minus_flattened, slitoffset_map=slitoffset_map) else: # if extended source from scipy.interpolate import UnivariateSpline if recipe in ["EXTENDED_AB", "EXTENDED_ABBA"]: delta = 0.01 lsf_ = UnivariateSpline([0, 0.5 - delta, 0.5 + delta, 1], [1., 1., -1., -1.], k=1, s=0, bbox=[0, 1]) else: lsf_ = UnivariateSpline([0, 1], [1., 1.], k=1, s=0, bbox=[0, 1]) def lsf(o, x, slitpos): return lsf_(slitpos) profile_map = ap.make_profile_map(order_map, slitpos_map, lsf) if self.frac_slit is not None: frac1, frac2 = min(self.frac_slit), max(self.frac_slit) slitpos_msk = (slitpos_map < frac1) | (slitpos_map > frac2) profile_map[slitpos_msk] = np.nan # we need to update the variance map by rejecting # cosmic rays, but it is not clear how we do this # for extended source. variance_map2 = variance_map s_list, v_list = ap.extract_stellar( ordermap_bpixed, profile_map, variance_map2, data_minus_flattened, slitoffset_map=slitoffset_map) if 1: # calculate S/N per resolution sn_list = [] for wvl, s, v in zip(wvl_solutions, s_list, v_list): dw = np.gradient(wvl) pixel_per_res_element = (wvl / 40000.) / dw #print pixel_per_res_element[1024] # len(pixel_per_res_element) = 2047. But we ignore it. sn = (s / v**.5) * (pixel_per_res_element**.5) sn_list.append(sn) if 1: # save the product from libs.storage_descriptions import (COMBINED_IMAGE_DESC, VARIANCE_MAP_DESC) from libs.products import PipelineImage r = PipelineProducts("1d specs") r.add(COMBINED_IMAGE_DESC, PipelineImage([], data_minus_flattened)) r.add(VARIANCE_MAP_DESC, PipelineImage([], variance_map2)) # r.add(VARIANCE_MAP_DESC, PipelineImage([], # variance_map.data)) igr_storage.store(r, mastername=obj_filenames[0], masterhdu=None) if 1: # save spectra, variance, sn from libs.storage_descriptions import SKY_WVLSOL_FITS_DESC fn = igr_storage.get_path(SKY_WVLSOL_FITS_DESC, basenames["sky"]) # fn = sky_path.get_secondary_path("wvlsol_v1.fits") f = pyfits.open(fn) d = np.array(s_list) f[0].data = d.astype("f32") from libs.storage_descriptions import (SPEC_FITS_DESC, VARIANCE_FITS_DESC, SN_FITS_DESC) fout = igr_storage.get_path(SPEC_FITS_DESC, tgt_basename) f.writeto(fout, clobber=True) d = np.array(v_list) f[0].data = d.astype("f32") fout = igr_storage.get_path(VARIANCE_FITS_DESC, tgt_basename) f.writeto(fout, clobber=True) d = np.array(sn_list) f[0].data = d.astype("f32") fout = igr_storage.get_path(SN_FITS_DESC, tgt_basename) f.writeto(fout, clobber=True) if 1: # from libs.storage_descriptions import ORDER_FLAT_JSON_DESC prod = igr_storage.load([ORDER_FLAT_JSON_DESC], basenames["flat_on"])[ORDER_FLAT_JSON_DESC] new_orders = prod["orders"] # fitted_response = orderflat_products["fitted_responses"] i1i2_list_ = prod["i1i2_list"] #order_indices = [] i1i2_list = [] for o in ap.orders: o_new_ind = np.searchsorted(new_orders, o) #order_indices.append(o_new_ind) i1i2_list.append(i1i2_list_[o_new_ind]) if DO_STD: from libs.a0v_spec import (A0VSpec, TelluricTransmission, get_a0v, get_flattend) a0v_spec = A0VSpec() tel_trans = TelluricTransmission() wvl_limits = [] for wvl_ in wvl_solutions: wvl_limits.extend([wvl_[0], wvl_[-1]]) dwvl = abs(wvl_[0] - wvl_[-1]) * 0.2 # padding wvl1 = min(wvl_limits) - dwvl wvl2 = max(wvl_limits) + dwvl a0v_wvl, a0v_tel_trans, a0v_tel_trans_masked = get_a0v( a0v_spec, wvl1, wvl2, tel_trans) a0v_flattened = get_flattend(a0v_spec, a0v_wvl, a0v_tel_trans_masked, wvl_solutions, s_list, i1i2_list=i1i2_list) d = np.array(a0v_flattened) #d[~np.isfinite(d)] = 0. f[0].data = d.astype("f32") from libs.storage_descriptions import SPEC_FITS_FLATTENED_DESC fout = igr_storage.get_path(SPEC_FITS_FLATTENED_DESC, tgt_basename) f.writeto(fout, clobber=True) db["a0v"].update(band, tgt_basename)