def current_sett_file(apath, sfile): # Load archive files ftype = sfile.split('.')[-1] archive_files = glob.glob(apath + 'settings.*.{:s}'.format(ftype)) if len(archive_files) == 0: msgs.warn("No archival files found for {:s}".format(sfile)) return None # Find the most recent dates = [] for afile in archive_files: dates.append(afile.split('.')[-2]) times = Time(dates) imax = np.argmax(times) arch_file = archive_files[imax] # Return return arch_file
def compare_dicts(top_key, dict1, dict2, skip_keys=()): for key in dict1.keys(): if isinstance(dict1[key], dict): if key in dict2.keys(): compare_dicts(top_key + ',' + key, dict1[key], dict2[key]) else: try: test = dict1[key] == dict2[key] except KeyError: pass else: if test: if key not in skip_keys: msgs.info("{:s},{:s} is a duplicate".format( top_key, key)) else: msgs.warn("{:s},{:s} is different".format(top_key, key))
def instconfig(fitsrow=None, binning=None): """ Returns a unique config string Parameters ---------- fitsrow : Row binnings : str, optional Returns ------- config : str """ config_dict = OrderedDict() config_dict['S'] = 'slitwid' config_dict['D'] = 'dichroic' config_dict['G'] = 'dispname' config_dict['T'] = 'dispangle' # config = '' for key in config_dict.keys(): try: comp = str(fitsrow[config_dict[key]]) except (KeyError, TypeError): comp = '0' # val = '' for s in comp: if s.isdigit(): val += s config = config + key+'{:s}-'.format(val) # Binning if binning is None: msgs.warn("Assuming 1x1 binning for your detector") binning = '1x1' val = '' for s in binning: if s.isdigit(): val = val + s config += 'B{:s}'.format(val) # Return return config
def instconfig(fitsrow=None, binning=None): """ Returns a unique config string Parameters ---------- fitsrow : Row binnings : str, optional Returns ------- config : str """ config_dict = OrderedDict() config_dict['S'] = 'slitwid' config_dict['D'] = 'dichroic' config_dict['G'] = 'dispname' config_dict['T'] = 'dispangle' # config = '' for key in config_dict.keys(): try: comp = str(fitsrow[config_dict[key]]) except (KeyError, TypeError): comp = '0' # val = '' for s in comp: if s.isdigit(): val += s config = config + key + '{:s}-'.format(val) # Binning if binning is None: msgs.warn("Assuming 1x1 binning for your detector") binning = '1x1' val = '' for s in binning: if s.isdigit(): val = val + s config += 'B{:s}'.format(val) # Return return config
def bg_subtraction(slf, tilts, det, sciframe, varframe, bpix, crpix, **kwargs): """ Wrapper to run the background subtraction on a series of slits Parameters ---------- slf det : int sciframe : ndarray varframe : ndarray crpix : ndarray kwargs Passed to bg_subtraction_slit Returns ------- bgframe : ndarray """ # Setup bgframe = np.zeros_like(sciframe) gdslits = np.where(~slf._maskslits[det - 1])[0] for slit in gdslits: msgs.info("Working on slit: {:d}".format(slit)) # TODO -- Replace this try/except when a more stable b-spline is used.. try: slit_bgframe = bg_subtraction_slit(slf, det, slit, tilts, sciframe, varframe, bpix, crpix, **kwargs) except ValueError: # Should have been bspline.. msgs.warn( "B-spline sky subtraction failed. Slit {:d} will no longer be processed.." .format(slit)) #msgs.warn("Continue if you wish..") slf._maskslits[det - 1][slit] = True else: bgframe += slit_bgframe # Return return bgframe
def archive(): """ Generate archival file for the baseargf file or spect file and instrument setting files """ settings_path = resource_filename('pypit', '/data/settings/') archive_path = resource_filename('pypit', '/data/settings/archive/') # Files sett_files = glob.glob(settings_path + 'settings.*') for sfile in sett_files: # Extension ext = sfile.split('.')[-1] if ext in ['py', 'pyc']: continue msgs.info("===============================================") sroot = sfile.split('/')[-1] msgs.info("Working on settings file {:s}".format(sroot)) msgs.info("===============================================") # Archive arch_file = current_sett_file(archive_path, sfile) if arch_file is None: match = False arch_root = '' else: # Compare match = filecmp.cmp(sfile, arch_file) arch_root = arch_file.split('/')[-1] if not match: msgs.warn("Current archive {:s} does not match {:s}".format( arch_root, sfile)) new_arch = archive_path + '/settings.{:s}.{:s}'.format( time.strftime("%Y-%m-%d"), ext) msgs.warn("Generating a new archive file: {:s}".format( new_arch.split('/')[-1])) copyfile(sfile, new_arch) msgs.warn("Add it to the repository!") else: msgs.info( "Current archive file {:s} matches current settings file". format(arch_root))
def reduce_frame(slf, sciframe, rawvarframe, modelvarframe, bpix, datasec_img, bgframe, scidx, fitsdict, det, crmask, tilts, mswave, scitrace=None, standard=False): """ Run standard extraction steps on a frame Parameters ---------- sciframe : image Bias subtracted, trimmed, and flatfielded image rawvarframe : ndarray Variance array using the raw detector counts modelvarframe : ndarray Model variance array using the raw detector counts and an image of the sky background frame. bgframe : ndarray Sky background image scidx : int Index of the frame fitsdict : dict Contains relevant information from fits header files det : int Detector index scitrace : list of dict List containing dictionaries of the object trace parameters standard : bool, optional Standard star frame? """ dnum = settings.get_dnum(det) ############### # Determine the final trace of the science objects if scitrace is None: msgs.info("Performing final object trace") scitrace = artrace.trace_objects_in_slits(slf, det, sciframe - bgframe, modelvarframe, crmask, bgreg=20, doqa=(not standard), standard=standard) if standard: # slf._msstd[det-1]['trace'] = scitrace # specobjs = arspecobj.init_exp(slf, scidx, det, fitsdict, scitrace, objtype='standard') # slf._msstd[det-1]['spobjs'] = specobjs specobjs = arspecobj.init_exp(slf, scidx, det, fitsdict, scitrace, objtype='standard') else: # Generate SpecObjExp list specobjs = arspecobj.init_exp(slf, scidx, det, fitsdict, scitrace, objtype='science') slf._scitrace[det - 1] = scitrace slf._specobjs[det - 1] = specobjs ############### # Extract noobj = True for sl in range(len(scitrace)): if 'nobj' in scitrace[sl].keys(): if scitrace[sl]['nobj'] != 0: noobj = False if noobj is True: msgs.warn("No objects to extract for science frame" + msgs.newline() + fitsdict['filename'][scidx]) return True # Boxcar msgs.info("Performing boxcar extraction") bgcorr_box = arextract.boxcar(slf, det, specobjs, sciframe - bgframe, rawvarframe, bpix, bgframe, crmask, scitrace, mswave) # Optimal if not standard: # KBW: Using variance_frame() in arextract leads to a circular # import. I've changed the arextract.optimal_extract() function # to return the object model, then the last step of generating # the new variance image is done here. msgs.info("Attempting optimal extraction with model profile") arextract.obj_profiles(slf, det, specobjs, sciframe - bgframe - bgcorr_box, modelvarframe, bgframe + bgcorr_box, crmask, scitrace, tilts, doqa=False) # newvar = arextract.optimal_extract(slf, det, specobjs, sciframe-bgframe-bgcorr_box, # modelvarframe, bgframe+bgcorr_box, crmask, scitrace) obj_model = arextract.optimal_extract(slf, det, slf._specobjs[det - 1], sciframe - bgframe - bgcorr_box, modelvarframe, bgframe + bgcorr_box, crmask, scitrace, tilts, mswave) newvar = arprocimg.variance_frame(datasec_img, det, sciframe - bgframe - bgcorr_box, -1, settings.spect[dnum], skyframe=bgframe + bgcorr_box, objframe=obj_model) msgs.work("Should update variance image (and trace?) and repeat") # arextract.obj_profiles(slf, det, slf._specobjs[det - 1], sciframe - bgframe - bgcorr_box, newvar, bgframe + bgcorr_box, crmask, scitrace, tilts) # finalvar = arextract.optimal_extract(slf, det, specobjs, sciframe-bgframe-bgcorr_box, # newvar, bgframe+bgcorr_box, crmask, scitrace) obj_model = arextract.optimal_extract(slf, det, specobjs, sciframe - bgframe - bgcorr_box, newvar, bgframe + bgcorr_box, crmask, scitrace, tilts, mswave) finalvar = arprocimg.variance_frame(datasec_img, det, sciframe - bgframe - bgcorr_box, -1, settings.spect[dnum], skyframe=bgframe + bgcorr_box, objframe=obj_model) slf._modelvarframe[det - 1] = finalvar.copy() # Flexure correction? if settings.argflag['reduce']['flexure']['perform'] and (not standard): if settings.argflag['reduce']['flexure']['method'] is not None: flex_list = arwave.flexure_obj(slf, det) arwave.flexure_qa(slf, det, flex_list) # Correct Earth's motion if (settings.argflag['reduce']['calibrate']['refframe'] in ['heliocentric', 'barycentric']) and \ (settings.argflag['reduce']['calibrate']['wavelength'] != "pixel"): if settings.argflag['science']['extraction']['reuse']: msgs.warn( "{0:s} correction will not be applied if an extracted science frame exists, and is used" .format(settings.argflag['reduce']['calibrate']['refframe'])) if slf._specobjs[det - 1] is not None: msgs.info("Performing a {0:s} correction".format( settings.argflag['reduce']['calibrate']['refframe'])) arwave.geomotion_correct(slf, det, fitsdict) else: msgs.info( "There are no objects on detector {0:d} to perform a {1:s} correction" .format(det, settings.argflag['reduce']['calibrate']['refframe'])) else: msgs.info("A heliocentric correction will not be performed") # Final if not standard: slf._bgframe[det - 1] += bgcorr_box # Return return True
def reduce_multislit(slf, tilts, sciframe, bpix, datasec_img, scidx, fitsdict, det, mswave, mspixelflatnrm=None, standard=False, slitprof=None, debug=False): """ Run standard extraction steps on an echelle frame Parameters ---------- sciframe : image Bias subtracted image (using arload.load_frame) bpix : ndarray Bad pixel mask scidx : int Index of the frame fitsdict : dict Contains relevant information from fits header files det : int Detector index standard : bool, optional Standard star frame? """ # dnum = settings.get_dnum(det) sciframe, rawvarframe, crmask = reduce_prepare( slf, sciframe, bpix, datasec_img, scidx, fitsdict, det, mspixelflatnrm=mspixelflatnrm, slitprof=slitprof) # Save sciframe slf._sciframe[det - 1] = sciframe.copy() ############### # Estimate Sky Background if settings.argflag['reduce']['skysub']['perform']: # Perform an iterative background/science extraction if debug: debugger.set_trace() # JXP says THIS MAY NOT WORK AS EXPECTED msgs.warn("Reading background from 2D image on disk") datfil = settings.argflag['run']['directory'][ 'science'] + '/spec2d_{:s}.fits'.format( slf._basename.replace(":", "_")) hdu = fits.open(datfil) bgframe = hdu[1].data - hdu[2].data else: msgs.info("First estimate of the sky background") bgframe = bg_subtraction(slf, tilts, det, sciframe, rawvarframe, bpix, crmask) modelvarframe = arprocimg.variance_frame(datasec_img, det, sciframe, scidx, settings.spect[dnum], fitsdict=fitsdict, skyframe=bgframe) else: modelvarframe = rawvarframe.copy() bgframe = np.zeros_like(sciframe) if not standard: # Need to save slf._modelvarframe[det - 1] = modelvarframe slf._bgframe[det - 1] = bgframe ############### # Find objects and estimate their traces scitrace = artrace.trace_objects_in_slits(slf, det, sciframe - bgframe, modelvarframe, crmask, bgreg=20, doqa=False, standard=standard) if scitrace is None: msgs.info("Not performing extraction for science frame" + msgs.newline() + fitsdict['filename'][scidx[0]]) debugger.set_trace() #continue # Make sure that there are objects noobj = True for sl in range(len(scitrace)): if 'nobj' in scitrace[sl].keys( ): # There can be empty dict's (skipped slits) if scitrace[sl]['nobj'] != 0: noobj = False if noobj is True: msgs.warn("No objects to extract for science frame" + msgs.newline() + fitsdict['filename'][scidx]) return True ############### # Finalize the Sky Background image if settings.argflag['reduce']['skysub']['perform']: # Perform an iterative background/science extraction msgs.info("Finalizing the sky background image") # Create a trace mask of the object trcmask = np.zeros_like(sciframe) for sl in range(len(scitrace)): if 'nobj' in scitrace[sl].keys(): if scitrace[sl]['nobj'] > 0: trcmask += scitrace[sl]['object'].sum(axis=2) trcmask[np.where(trcmask > 0.0)] = 1.0 # Do it bgframe = bg_subtraction(slf, tilts, det, sciframe, modelvarframe, bpix, crmask, tracemask=trcmask) # Redetermine the variance frame based on the new sky model modelvarframe = arprocimg.variance_frame(datasec_img, det, sciframe, scidx, settings.spect[dnum], fitsdict=fitsdict, skyframe=bgframe) # Save if not standard: slf._modelvarframe[det - 1] = modelvarframe slf._bgframe[det - 1] = bgframe ############### # Flexure down the slit? -- Not currently recommended if settings.argflag['reduce']['flexure']['method'] == 'slitcen': flex_dict = arwave.flexure_slit(slf, det) arwave.flexure_qa(slf, det, flex_dict, slit_cen=True) # Perform an optimal extraction msgs.work( "For now, perform extraction -- really should do this after the flexure+heliocentric correction" ) return reduce_frame(slf, sciframe, rawvarframe, modelvarframe, bpix, datasec_img, bgframe, scidx, fitsdict, det, crmask, tilts, mswave, standard=standard)
def reduce_echelle(slf, sciframe, scidx, fitsdict, det, standard=False, triml=1, trimr=1, mspixelflatnrm=None, doqa=True): """ Run standard extraction steps on an echelle frame Parameters ---------- sciframe : image Bias subtracted image (using arload.load_frame) scidx : int Index of the frame fitsdict : dict Contains relevant information from fits header files det : int Detector index standard : bool, optional Standard star frame? triml : int (optional) Number of pixels to trim from the left slit edge trimr : int (optional) Number of pixels to trim from the right slit edge """ msgs.work("Multiprocess this algorithm") nspec = sciframe.shape[0] nord = slf._lordloc[det - 1].shape[1] # Prepare the frames for tracing and extraction sciframe, rawvarframe, crmask = reduce_prepare( slf, sciframe, scidx, fitsdict, det, mspixelflatnrm=mspixelflatnrm, standard=standard, slitprof=slitprof) bgframe = np.zeros_like(sciframe) bgnl, bgnr = np.zeros(nord, dtype=np.int), np.zeros(nord, dtype=np.int) skysub = True if settings.argflag['reduce']['skysub']['perform']: # Identify background pixels, and generate an image of the sky spectrum in each slit for o in range(nord): word = np.where((slf._slitpix[det - 1] == o + 1) & (slf._scimask[det - 1] == 0)) if word[0].size == 0: msgs.warn("There are no pixels in slit {0:d}".format(o + 1)) continue tbgframe, nl, nr = background_subtraction(slf, sciframe, rawvarframe, o, det) bgnl[o], bgnr[o] = nl, nr bgframe += tbgframe if nl == 0 and nr == 0: pass # If just one slit cannot do sky subtraction, don't do sky subtraction # msgs.warn("A sky subtraction will not be performed") # skysub = False # bgframe = np.zeros_like(sciframe) # modelvarframe = rawvarframe.copy() # break if skysub: # Provided the for loop above didn't break early, model the variance frame dnum = settings.get_dnum(det) modelvarframe = arprocimg.variance_frame(datasec_img, det, sciframe, scidx, settings.spect[dnum], fitsdict=fitsdict, skyframe=bgframe) else: modelvarframe = rawvarframe.copy() bgframe = np.zeros_like(sciframe) if not standard: # Need to save slf._modelvarframe[det - 1] = modelvarframe slf._bgframe[det - 1] = bgframe # Obtain a first estimate of the object trace then # fit the traces and perform a PCA for the refinements trccoeff = np.zeros( (settings.argflag['trace']['object']['order'] + 1, nord)) trcxfit = np.arange(nspec) extrap_slit = np.zeros(nord) for o in range(nord): trace, error = artrace.trace_weighted(sciframe - bgframe, slf._lordloc[det - 1][:, o], slf._rordloc[det - 1][:, o], mask=slf._scimask[det - 1], wght="flux") if trace is None: extrap_slit[o] = 1 continue # Find only the good pixels w = np.where((error != 0.0) & (~np.isnan(error))) if w[0].size <= 2 * settings.argflag['trace']['object']['order']: extrap_slit[o] = 1 continue # Convert the trace locations to be a fraction of the slit length, # measured from the left slit edge. trace -= slf._lordloc[det - 1][:, o] trace /= (slf._rordloc[det - 1][:, o] - slf._lordloc[det - 1][:, o]) try: msk, trccoeff[:, o] = arutils.robust_polyfit( trcxfit[w], trace[w], settings.argflag['trace']['object']['order'], function=settings.argflag['trace']['object']['function'], weights=1.0 / error[w]**2, minv=0.0, maxv=nspec - 1.0) except: msgs.info("arproc.reduce_echelle") debugger.set_trace() refine = 0.0 if settings.argflag['trace']['object']['method'] == "pca": # Identify the orders to be extrapolated during reconstruction orders = 1.0 + np.arange(nord) msgs.info("Performing a PCA on the object trace") ofit = settings.argflag['trace']['object']['params'] lnpc = len(ofit) - 1 maskord = np.where(extrap_slit == 1)[0] xcen = trcxfit[:, np.newaxis].repeat(nord, axis=1) trccen = arutils.func_val( trccoeff, trcxfit, settings.argflag['trace']['object']['function'], minv=0.0, maxv=nspec - 1.0).T if np.sum(1.0 - extrap_slit) > ofit[0] + 1: fitted, outpar = arpca.basis( xcen, trccen, trccoeff, lnpc, ofit, skipx0=False, mask=maskord, function=settings.argflag['trace']['object']['function']) if doqa: # arqa.pca_plot(slf, outpar, ofit, "Object_Trace", pcadesc="PCA of object trace") arpca.pca_plot(slf.setup, outpar, ofit, "Object_Trace", pcadesc="PCA of object trace") # Extrapolate the remaining orders requested trccen, outpar = arpca.extrapolate( outpar, orders, function=settings.argflag['trace']['object']['function']) #refine = trccen-trccen[nspec//2, :].reshape((1, nord)) else: msgs.warn("Could not perform a PCA on the object trace" + msgs.newline() + "Not enough well-traced orders") msgs.info("Using direct determination of the object trace instead") pass else: msgs.error("Not ready for object trace method:" + msgs.newline() + settings.argflag['trace']['object']['method']) # Construct the left and right traces of the object profile # The following code ensures that the fraction of the slit # containing the object remains constant along the spectral # direction trcmean = np.mean(trccen, axis=0) trobjl = (trcmean - (1 + bgnl) / slf._pixwid[det - 1].astype(np.float)).reshape( (1, nord)).repeat(nspec, axis=0) trobjl = trccen - trobjl trobjr = (-trcmean + (slf._pixwid[det - 1] - bgnr - 1) / slf._pixwid[det - 1].astype(np.float)).reshape( (1, nord)).repeat(nspec, axis=0) trobjr = trccen + trobjr # Convert trccen to the actual trace locations trccen *= (slf._rordloc[det - 1] - slf._lordloc[det - 1]) trccen += slf._lordloc[det - 1] trobjl *= (slf._rordloc[det - 1] - slf._lordloc[det - 1]) trobjl += slf._lordloc[det - 1] trobjr *= (slf._rordloc[det - 1] - slf._lordloc[det - 1]) trobjr += slf._lordloc[det - 1] # Generate an image of pixel weights for each object. Each weight can # take any floating point value from 0 to 1 (inclusive). For the rec_obj_img, # a weight of 1 means that the pixel is fully contained within the object # region, and 0 means that the pixel is fully contained within the background # region. The opposite is true for the rec_bg_img array. A pixel that is on # the border of object/background is assigned a value between 0 and 1. msgs.work( "Eventually allow ARMED to find multiple objects in the one slit") nobj = 1 rec_obj_img = np.zeros(sciframe.shape + (nobj, )) rec_bg_img = np.zeros(sciframe.shape + (nobj, )) for o in range(nord): # Prepare object/background regions objl = np.array([bgnl[o]]) objr = np.array([slf._pixwid[det - 1][o] - bgnr[o] - triml - trimr]) bckl = np.zeros((slf._pixwid[det - 1][o] - triml - trimr, 1)) bckr = np.zeros((slf._pixwid[det - 1][o] - triml - trimr, 1)) bckl[:bgnl[o]] = 1 if bgnr[o] != 0: bckr[-bgnr[o]:] = 1 tobj_img, tbg_img = artrace.trace_objbg_image(slf, det, sciframe - bgframe, o, [objl, objr], [bckl, bckr], triml=triml, trimr=trimr) rec_obj_img += tobj_img rec_bg_img += tbg_img # Create trace dict scitrace = artrace.trace_object_dict(nobj, trccen[:, 0].reshape(trccen.shape[0], 1), object=rec_obj_img, background=rec_bg_img) for o in range(1, nord): scitrace = artrace.trace_object_dict(nobj, trccen[:, o].reshape( trccen.shape[0], 1), tracelist=scitrace) # Save the quality control if doqa: artrace.obj_trace_qa(slf, sciframe, trobjl, trobjr, None, det, root="object_trace", normalize=False) # Finalize the Sky Background image if settings.argflag['reduce']['skysub']['perform'] and (nobj > 0) and skysub: msgs.info("Finalizing the sky background image") # Identify background pixels, and generate an image of the sky spectrum in each slit bgframe = np.zeros_like(sciframe) for o in range(nord): tbgframe, nl, nr = background_subtraction(slf, sciframe, rawvarframe, o, det, refine=refine) bgnl[o], bgnr[o] = nl, nr bgframe += tbgframe modelvarframe = arprocimg.variance_frame(datasec_img, det, sciframe, scidx, settings.spect[dnum], fitsdict=fitsdict, skyframe=bgframe) # Perform an optimal extraction return reduce_frame(slf, sciframe, rawvarframe, modelvarframe, bgframe, scidx, fitsdict, det, crmask, scitrace=scitrace, standard=standard)
def background_subtraction(slf, sciframe, varframe, slitn, det, refine=0.0, doqa=True): """ Generate a frame containing the background sky spectrum Parameters ---------- slf : Class Science Exposure Class sciframe : ndarray science frame varframe : ndarray variance frame slitn : int Slit number det : int Detector index refine : float or ndarray refine the object traces. This should be a small value around 0.0. If a float, a constant offset will be applied. Otherwise, an array needs to be specified of the same length as sciframe.shape[0] that contains the refinement of each pixel along the spectral direction. Returns ------- bgframe : ndarray An image, the same size as sciframe, that contains the background spectrum within the specified slit. nl : int number of pixels from the left slit edge to use as background pixels nr : int number of pixels from the right slit edge to use as background pixels """ # Obtain all pixels that are within the slit edges, and are not masked word = np.where((slf._slitpix[det - 1] == slitn + 1) & (slf._scimask[det - 1] == 0)) if word[0].size == 0: msgs.warn("There are no pixels in slit {0:d}".format(slitn)) debugger.set_trace() nl, nr = 0, 0 return np.zeros_like(sciframe), nl, nr # Calculate the oversampled object profiles oversampling_factor = 3 # should be an integer according to the description in object_profile() xedges, modvals = object_profile(slf, sciframe, slitn, det, refine=refine, factor=oversampling_factor) bincent = 0.5 * (xedges[1:] + xedges[:-1]) npix = slf._pixwid[det - 1][slitn] tilts = slf._tilts[det - 1].copy() lordloc = slf._lordloc[det - 1][:, slitn] rordloc = slf._rordloc[det - 1][:, slitn] # For each pixel, calculate the fraction along the slit's spatial direction spatval = (word[1] - lordloc[word[0]] + refine) / (rordloc[word[0]] - lordloc[word[0]]) # Cumulative sum and normalize csum = np.cumsum(modvals) csum -= csum[0] csum /= csum[-1] # Find a first guess of the edges of the object profile - assume this is the innermost 90 percent of the flux argl = np.argmin(np.abs(csum - 0.05)) argr = np.argmin(np.abs(csum - 0.95)) # Considering the possible background pixels that are left of the object, # find the first time where the object profile no longer decreases as you # move toward the edge of the slit. This is the beginning of the noisy # object profile, which is where the object can no longer be distinguished # from the background. wl = np.where((modvals[1:] < modvals[:-1]) & (bincent[1:] < bincent[argl])) wr = np.where((modvals[1:] > modvals[:-1]) & (bincent[1:] > bincent[argr])) nl, nr = 0, 0 if wl[0].size != 0: # This is the index of the first time where the object profile # no longer decreases as you move towards the slit edge nl_index = np.max(wl[0]) # Calculate nl, defined as: # "number of pixels from the left slit edge to use as background pixels", # which is just nl_index with the sampling factor taken out nl_index_origscale = int(nl_index / oversampling_factor + 0.5) nl = nl_index_origscale if wr[0].size != 0: # This is the index of the first time where the object profile # no longer decreases as you move towards the slit edge nr_index = np.min(wr[0]) # Calculate nr, defined as: # "number of pixels from the right slit edge to use as background pixels", # which is npix minus nr_index with the sampling factor taken out nr_index_origscale = int(nr_index / oversampling_factor + 0.5) nr = npix - nr_index_origscale if nl + nr < 5: msgs.warn( "The object profile appears to extrapolate to the edge of the slit" ) msgs.info( "A background subtraction will not be performed for slit {0:d}". format(slitn + 1)) nl, nr = 0, 0 return np.zeros_like(sciframe), nl, nr # Find background pixels and fit wbgpix_spatval = np.where( (spatval <= float(nl) / npix) | (spatval >= float(npix - nr) / npix)) # this cannot be used to index the 2D array tilts wbgpix = (word[0][wbgpix_spatval], word[1][wbgpix_spatval] ) # this may be approproate for indexing the 2D array tilts if settings.argflag['reduce']['skysub']['method'].lower() == 'bspline': msgs.info("Using bspline sky subtraction") srt = np.argsort(tilts[wbgpix]) ivar = arutils.calc_ivar(varframe) # Perform a weighted b-spline fit to the sky background pixels mask, bspl = arutils.robust_polyfit( tilts[wbgpix][srt], sciframe[wbgpix][srt], 3, function='bspline', weights=np.sqrt(ivar)[wbgpix][srt], sigma=5., maxone=False, **settings.argflag['reduce']['skysub']['bspline']) bgf_flat = arutils.func_val(bspl, tilts.flatten(), 'bspline') bgframe = bgf_flat.reshape(tilts.shape) if doqa: plt_bspline_sky(tilts, sciframe, bgf_flat, gdp) debugger.set_trace() else: msgs.error('Not ready for this method for skysub {:s}'.format( settings.argflag['reduce']['skysub']['method'].lower())) if np.any(np.isnan(bgframe)): msgs.warn("NAN in bgframe. Replacing with 0") bad = np.isnan(bgframe) bgframe[bad] = 0. return bgframe, nl, nr
def object_profile(slf, sciframe, slitn, det, refine=0.0, factor=3): """ Generate an array of the object profile Parameters ---------- slf : Class Science Exposure Class sciframe : ndarray science frame slitn : int Slit number det : int Detector index refine : float or ndarray refine the object traces. This should be a small value around 0.0. If a float, a constant offset will be applied. Otherwise, an array needs to be specified of the same length as sciframe.shape[0] that contains the refinement of each pixel along the spectral direction. factor : int, optional Sampling factor. factor=1 samples the object profile with the number of pixels along the length of the slit. factor=2 samples with twice the number of pixels along the length of the slit, etc. Returns ------- xedges : ndarray bin edges profile : ndarray object profile """ # Obtain the indices of the pixels that are in slit number 'slitn', and are not masked word = np.where((slf._slitpix[det - 1] == slitn + 1) & (slf._scimask[det - 1] == 0)) if word[0].size == 0: msgs.warn("There are no pixels in slit {0:d}".format(slitn)) return None, None # Determine the width of the slit in pixels, and calculate the # number of bins needed to oversample the object profile. npix = slf._pixwid[det - 1][slitn] nbins = factor * npix # Extract the left and right order locations, and estimate the spatial positions # of all pixels within the slit. lordloc = slf._lordloc[det - 1][:, slitn] rordloc = slf._rordloc[det - 1][:, slitn] spatval = (word[1] - lordloc[word[0]] + refine) / (rordloc[word[0]] - lordloc[word[0]]) # Create an array to store the oversampled object profile profile = np.zeros(nbins) # Determine the bin edges of the oversampled array xedges = np.linspace(np.min(spatval), np.max(spatval), nbins + 1) # Assign each detector pixel within the slit to an oversampled pixel groups = np.digitize(spatval, xedges) flxfr = sciframe[word] # For each oversampled pixel, calculate the median flux msgs.work( "It might be a good idea to use a weighted mean (where weights=flux), instead of the median here" ) for mm in range(1, xedges.size): medpix = flxfr[groups == mm] if medpix.size == 0: profile[mm - 1] = 0.0 else: profile[mm - 1] = np.median(medpix) return xedges, profile
def global_skysub(sciimg, sciivar, piximg, slitmask, edgmask, skymask=None, bsp=0.6, islit=None, sigrej=3., debug=False): # Python indexing ny = sciimg.shape[0] # nslit = np.max(slitmask) sky_image = np.zeros_like(sciimg) if skymask is None: skymask = np.ones_like(slitmask, dtype=int) # Mask edges and more sky_slitmask = slitmask * (skymask * (sciivar > 0) & (edgmask == 0) & (sciimg != maskval)) if islit is None: nreduce = nslit slit_vec = np.arange(nslit) + 1 else: nreduce = 1 slit_vec = [islit] for jj in range(nreduce): slitid = slit_vec[jj] # Select only the pixels on this slit all = (slitmask == slitid) & (sciimg != maskval) & (sciivar > 0.) isky = sky_slitmask == slitid debugger.set_trace() # The 2 lines above seem a bit wrong if (np.sum(isky) < 10): msgs.warn('Not enough sky pixels found in slit ', slitid, np.sum(isky), np.sum(all)) continue # Setup (sort) isrt = np.argsort(piximg[isky]) wsky = piximg[isky][isrt] sky = sciimg[isky][isrt] sky_ivar = sciivar[isky][isrt] pos_sky = np.where((sky > 1.0) & (sky_ivar > 0.))[0] # Pre-fit! if len(pos_sky) > ny: lsky = np.log(sky[pos_sky]) lsky_ivar = lsky * 0. + 0.1 # Init bspline to get the sky breakpoints (kludgy) tmp = bspline(wsky[pos_sky], nord=4, bkspace=bsp) #skybkpt = bspline_bkpts(wsky[pos_sky], nord=4, bkspace=bsp $ #, / silent) lskyset, outmask, lsky_fit, red_chi = dev_extract.bspline_longslit( wsky[pos_sky], lsky, lsky_ivar, np.ones_like(pos_sky), fullbkpt = tmp.breakpoints, upper=sigrej, lower=sigrej, kwargs_reject={'groupbadpix':True}) res = (sky[pos_sky] - np.exp(lsky_fit)) * np.sqrt(sky_ivar[pos_sky]) lmask = (res < 5.0) & (res > -4.0) sky_ivar[pos_sky] = sky_ivar[pos_sky] * lmask # Full full_bspline = bspline(wsky, nord=4, bkspace=bsp) skyset, full_out, yfit, _ = dev_extract.bspline_longslit( wsky, sky, sky_ivar, np.ones_like(sky), fullbkpt=full_bspline.breakpoints, upper=sigrej, lower=sigrej, kwargs_reject={'groupbadpix':True, 'maxrej': 10}) sky_image[all] = skyset.value(piximg[all])[0] #, skyset) #debugger.set_trace() if debug: from matplotlib import pyplot as plt plt.clf() ax = plt.gca() ax.scatter(wsky[full_out], sky[full_out]) ax.scatter(wsky[~full_out], sky[~full_out], color='red') ax.plot(wsky, yfit, color='green') plt.show() # Return return sky_image
PROF_NSIGMA = specobjs[iobj].prof_nsigma, SN_GAUSS =SN_GAUSS) # Update the object profile and the fwhm and mask parameters obj_profiles[ipix[0], ipix[1], ii] = profile_model specobjs[iobj].trace_spat = xnew + mincol specobjs[iobj].fwhmfit = fwhmfit specobjs[iobj].fwhm = np.median(fwhmfit) mask_fact = 1.0 + 0.5*np.log10(np.fmax(np.sqrt(np.fmax(med_sn2,0.0)),1.0)) maskwidth = 3.0*np.median(fwhmfit)*mask_fact if specobjs[iobj].prof_nsigma is None: specobjs[iobj].maskwidth = maskwidth else: specobjs[iobj].maskwidth = specobjs[iobj].prof_nsigma*(specobjs[iobj].fwhm/2.3548) else: msgs.warn("Bad extracted wavelengths in local_skysub") msgs.warn("Skipping this profile fit and continuing.....") sky_bmodel = np.array(0.0) iterbsp = 0 while (sky_bmodel.any()==False) & (iterbsp <=5): bsp_now = (1.2**iterbsp)*bsp # if skysample is set, determine optimal break-point spacing # directly measuring how well we are sampling of the sky. The # bsp in this case correspons to the minimum distance between # breakpoints which we allow. if SKYSAMPLE: sampmask = (waveimg > 0.0) & (thismask == True) # fullbkpt = skybkpts() # TODO Port long_skybkpts.pro code and put it here. else:
def init_exp(lordloc, rordloc, shape, maskslits, det, scidx, fitstbl, tracelist, settings, ypos=0.5, **kwargs): """ Generate a list of SpecObjExp objects for a given exposure Parameters ---------- self Instrument "setup" (min=10,max=99) scidx : int Index of file det : int Detector index tracelist : list of dict Contains trace info ypos : float, optional [0.5] Row on trimmed detector (fractional) to define slit (and object) Returns ------- specobjs : list List of SpecObjExp objects """ # Init specobjs = [] if fitstbl is None: fitsrow = None else: fitsrow = fitstbl[scidx] config = instconfig(fitsrow=fitsrow, binning=settings['detector']['binning']) slits = range(len(tracelist)) gdslits = np.where(~maskslits)[0] # Loop on slits for sl in slits: specobjs.append([]) # Analyze the slit? if sl not in gdslits: specobjs[sl].append(None) continue # Object traces if tracelist[sl]['nobj'] != 0: # Loop on objects #for qq in range(trc_img[sl]['nobj']): for qq in range(tracelist[sl]['traces'].shape[1]): slitid, slitcen, xslit = artraceslits.get_slitid(shape, lordloc, rordloc, sl, ypos=ypos) # xobj _, xobj = get_objid(lordloc, rordloc, sl, qq, tracelist, ypos=ypos) # Generate if tracelist[sl]['object'] is None: specobj = SpecObjExp((tracelist[0]['object'].shape[:2]), config, scidx, det, xslit, ypos, xobj, **kwargs) else: specobj = SpecObjExp((tracelist[sl]['object'].shape[:2]), config, scidx, det, xslit, ypos, xobj, **kwargs) # Add traces specobj.trace = tracelist[sl]['traces'][:, qq] # Append specobjs[sl].append(copy.deepcopy(specobj)) else: msgs.warn("No objects for slit {0:d}".format(sl+1)) specobjs[sl].append(None) # Return return specobjs
def init_exp(lordloc, rordloc, shape, maskslits, det, scidx, fitstbl, tracelist, binning=None, ypos=0.5, objtype='unknown'): #**kwargs): """ Generate a list of SpecObjExp objects for a given exposure Parameters ---------- self Instrument "setup" (min=10,max=99) scidx : int Index of file det : int Detector index tracelist : list of dict Contains trace info ypos : float, optional [0.5] Row on trimmed detector (fractional) to define slit (and object) Returns ------- specobjs : list List of SpecObjExp objects """ # Init specobjs = [] if fitstbl is None: fitsrow = None else: fitsrow = fitstbl[scidx] config = instconfig(fitsrow=fitsrow, binning=binning) slits = range(len(tracelist)) gdslits = np.where(~maskslits)[0] # Loop on slits for sl in slits: specobjs.append([]) # Analyze the slit? if sl not in gdslits: specobjs[sl].append(None) continue # Object traces if tracelist[sl]['nobj'] != 0: # Loop on objects #for qq in range(trc_img[sl]['nobj']): for qq in range(tracelist[sl]['traces'].shape[1]): slitid, slitcen, xslit = artraceslits.get_slitid(shape, lordloc, rordloc, sl, ypos=ypos) # xobj _, xobj = get_objid(lordloc, rordloc, sl, qq, tracelist, ypos=ypos) # Generate spec_obj_shape = (tracelist[0]['object'].shape[:2]) \ if tracelist[sl]['object'] is None \ else (tracelist[sl]['object'].shape[:2]) specobj = SpecObjExp(spec_obj_shape, config, scidx, det, xslit, ypos, xobj, objtype=objtype) #**kwargs) # Add traces specobj.trace = tracelist[sl]['traces'][:, qq] # Append specobjs[sl].append(copy.deepcopy(specobj)) else: msgs.warn("No objects for slit {0:d}".format(sl + 1)) specobjs[sl].append(None) # Return return specobjs