class adjustWCSToReferenceConfig(config.Config): suffix = config.Field("Filename suffix", str, "_wcsCorrected", optional=True) method = config.ChoiceField("Alignment method", str, allowed={ "sources_wcs": "Match sources using WCS", "sources_offsets": "Match sources using telescope offsets", "offsets": "Use telescope offsets only" }, default="sources_wcs") fallback = config.ChoiceField("Fallback method", str, allowed={ "sources_offsets": "Match sources using telescope offsets", "offsets": "Use telescope offsets only" }, default="offsets", optional=True) tolerance = config.RangeField( "Maximum distance from the header offset, " "for the correlation method (arcsec)", float, 1, min=0., optional=True)
class core_stacking_config(config.Config): """Parameters relevant to ALL stacking primitives""" suffix = config.Field("Filename suffix", str, "_stack", optional=True) apply_dq = config.Field("Use DQ to mask bad pixels?", bool, True) statsec = config.Field("Section for statistics", str, None, optional=True, check=statsec_check) operation = config.ChoiceField("Averaging operation", str, allowed = {"mean": "arithmetic mean", "wtmean": "variance-weighted mean", "median": "median", "lmedian": "low-median"}, default="mean", optional=False) reject_method = config.ChoiceField("Pixel rejection method", str, allowed={"none": "no rejection", "minmax": "reject highest and lowest pixels", "sigclip": "reject pixels based on scatter", "varclip": "reject pixels based on variance array"}, default="varclip", optional=False) hsigma = config.RangeField("High rejection threshold (sigma)", float, 3., min=0) lsigma = config.RangeField("Low rejection threshold (sigma)", float, 3., min=0) mclip = config.Field("Use median for sigma-clipping?", bool, True) max_iters = config.RangeField("Maximum number of clipping iterations", int, None, min=1, optional=True) nlow = config.RangeField("Number of low pixels to reject", int, 0, min=0) nhigh = config.RangeField("Number of high pixels to reject", int, 0, min=0) memory = config.RangeField("Memory available for stacking (GB)", float, 1, min=0.1, optional=True) debug_pixel = config.RangeField("Debugging pixel location", int, None, min=0, optional=True)
class adjustWCSToReferenceConfig(config.Config): suffix = config.Field("Filename suffix", str, "_wcsCorrected", optional=True) method = config.ChoiceField("Alignment method", str, allowed={ "header": "Use WCS in header", "sources": "Match sources in images" }, default="sources") fallback = config.ChoiceField("Fallback method", str, allowed={"header": "Use WCS in header"}, default="header", optional=True) first_pass = config.RangeField( "Search radius for source matching (arcseconds)", float, 5., min=0) min_sources = config.RangeField( "Minimum number of sources required to use source matching", int, 3, min=1) cull_sources = config.Field("Use only point sources for alignment?", bool, False) rotate = config.Field("Allow rotation for alignment?", bool, False) scale = config.Field("Allow magnification for alignment?", bool, False)
class makeBPMConfig(config.Config): override_thresh = config.ChoiceField( "Apply user-specified thresholds, overriding any default calculation?", bool, {True: 'Must be True where no default algorithm is implemented'}, default=True, optional=False) dark_lo_thresh = config.Field("Low rejection threshold for dark (ADU)", float, None, optional=True) dark_hi_thresh = config.Field("High rejection threshold for dark (ADU)", float, None, optional=True) flat_lo_thresh = config.RangeField( "Low rejection threshold for normalized flat", float, None, max=1.0, optional=True) flat_hi_thresh = config.RangeField( "High rejection threshold for normalized flat", float, None, min=1.0, optional=True) def validate(self): config.Config.validate(self) if self.dark_lo_thresh is not None and \ self.dark_hi_thresh is not None and \ self.dark_lo_thresh >= self.dark_hi_thresh: raise ValueError( "dark_hi_thresh must be greater than dark_lo_thresh")
class subtractOverscanConfig(config.core_1Dfitting_config): suffix = config.Field("Filename suffix", str, "_overscanSubtracted", optional=True) function = config.ChoiceField("Fitting function", str, allowed={ "none": "Row-by-row values", "spline3": "Cubic spline", "chebyshev": "Chebyshev polynomial" }, default="spline3", optional=False) order = config.RangeField("Order of fitting function", int, None, min=0, optional=True) nbiascontam = config.RangeField( "Number of columns to exclude from averaging", int, 0, min=0) def validate(self): config.Config.validate(self) if self.function == "poly" and self.order is None: raise ValueError("Polynomial order must be specified") if self.function == "spline" and self.order == 0: raise ValueError("Must specify a positive spline order, or None")
class extract1DSpectraConfig(config.Config): suffix = config.Field("Filename suffix", str, "_extracted", optional=True) method = config.ChoiceField("Extraction method", str, allowed={ "standard": "no weighting", "weighted": "inverse-variance weighted", "optimal": "optimal extraction" }, default="standard") width = config.RangeField("Width of extraction aperture (pixels)", float, None, min=1, optional=True) grow = config.RangeField("Source aperture avoidance region (pixels)", float, 10, min=0, optional=True) subtract_sky = config.Field( "Subtract sky spectra if the data have not been sky corrected?", bool, True) debug = config.Field("Draw extraction apertures on image display?", bool, False)
class normalizeFlatConfig(config.Config): suffix = config.Field("Filename suffix", str, "_normalized", optional=True) scale = config.ChoiceField("Statistic for scaling", str, allowed = {"mean": "Scale by mean", "median": "Scale by median"}, default="median") separate_ext = config.Field("Scale extensions separately?", bool, False)
class calRequirementConfig(config.Config): do_cal = config.ChoiceField("Calibration requirement", str, allowed={"procmode": "Use the default rules set by the processing" "mode.", "force": "Require a calibration regardless of the" "processing mode.", "skip": "Skip this correction, no calibration required."}, default="procmode")
class stackFlatsConfig(config.Config): suffix = config.Field("Filename suffix", str, "_stack", optional=True) apply_dq = config.Field("Use DQ to mask bad pixels?", bool, True) scale = config.Field("Scale images to the same intensity?", bool, False) operation = config.ChoiceField("Averaging operation", str, allowed={ "mean": "arithmetic mean", "wtmean": "variance-weighted mean", "median": "median", "lmedian": "low-median" }, default="mean", optional=False) reject_method = config.ChoiceField( "Pixel rejection method", str, allowed={ "none": "no rejection", "minmax": "reject highest and lowest pixels", "sigclip": "reject pixels based on scatter", "varclip": "reject pixels based on variance array" }, default="minmax", optional=False) hsigma = config.RangeField("High rejection threshold (sigma)", float, 3., min=0) lsigma = config.RangeField("Low rejection threshold (sigma)", float, 3., min=0) mclip = config.Field("Use median for sigma-clipping?", bool, True) max_iters = config.RangeField("Maximum number of clipping iterations", int, None, min=1, optional=True) nlow = config.RangeField("Number of low pixels to reject", int, 0, min=0) nhigh = config.RangeField("Number of high pixels to reject", int, 0, min=0) memory = config.RangeField("Memory available for stacking (GB)", float, None, min=0.1, optional=True)
class resampleToCommonFrameConfig(config.Config): suffix = config.Field("Filename suffix", str, "_align", optional=True) interpolator = config.ChoiceField("Type of pixel interpolation", str, allowed={"nearest": "nearest pixel", "linear": "linear interpolation", "spline2": "quadratic spline", "spline3": "cubic spline", "spline4": "quartic spline", "spline5": "qunitic spline"}, default="linear") trim_data = config.Field("Trim to field of view of reference image?", bool, False)
class scaleByIntensityConfig(config.Config): suffix = config.Field("Filename suffix", str, "_scaled", optional=True) section = config.Field("Statistics section", str, None, optional=True) scaling = config.ChoiceField("Statistic for scaling", str, allowed={ "mean": "Scale by mean", "median": "Scale by median" }, default="mean") separate_ext = config.Field("Scale extensions separately?", bool, False)
class determineWavelengthSolutionConfig(config.Config): suffix = config.Field("Filename suffix", str, "_wavelengthSolutionDetermined", optional=True) order = config.RangeField("Order of fitting polynomial", int, 2, min=1) center = config.RangeField("Central row/column to extract", int, None, min=1, optional=True) nsum = config.RangeField("Number of lines to sum", int, 10, min=1) min_snr = config.RangeField("Minimum SNR for peak detection", float, 10., min=1.) min_sep = config.RangeField("Minimum feature separation (pixels)", float, 2., min=1.) weighting = config.ChoiceField("Weighting of identified peaks", str, allowed={ "none": "no weighting", "natural": "natural weighting", "relative": "relative to local peaks" }, default="natural") fwidth = config.RangeField("Feature width in pixels", float, None, min=2., optional=True) min_lines = config.Field("Minimum number of lines to fit each segment", (str, int), '15,20', check=min_lines_check) central_wavelength = config.RangeField("Estimated central wavelength (nm)", float, None, min=300., max=25000., optional=True) dispersion = config.Field("Estimated dispersion (nm/pixel)", float, None, optional=True) linelist = config.Field("Filename of arc line list", str, None, optional=True) alternative_centers = config.Field("Try alternative wavelength centers?", bool, False) debug = config.Field("Make diagnostic plots?", bool, False)
class storeCalibrationConfig(config.Config): caltype = config.ChoiceField("Type of calibration", str, allowed={ "processed_arc": "processed ARC", "processed_bias": "procsessed BIAS", "processed_dark": "processed DARK", "processed_flat": "processed FLAT", "processed_fringe": "processed fringe", "bpm": "bad pixel mask" }, optional=False)
class addCalibrationConfig(config.Config): caltype = config.ChoiceField("Type of calibration required", str, allowed={ "processed_arc": "processed ARC", "processed_bias": "procsessed BIAS", "processed_dark": "processed DARK", "processed_flat": "processed FLAT", "processed_fringe": "processed fringe" }) calfile = config.Field("Filename of calibration", str)
class formatOutputConfig(config.Config): suffix = config.Field("Filename suffix", str, "_formattedOutput", optional=True) detail = config.ChoiceField( "Level of detail", str, { 'default': 'Default output', 'processed_image': 'Include processed CCD image', 'flat_profile': 'Include flat profile', 'sensitivity_curve': 'Include computed sensitivity curve', }, default='default')
class interpolateAndCombineConfig(config.Config): suffix = config.Field("Filename suffix", str, "_interpdAndCombined", optional=True) scale = config.ChoiceField("Output wavelength scale", str, { 'linear': 'Linear wavelength scale', 'loglinear': 'Log-linear wavelength scale' }, default='loglinear') skip = config.Field("No-op this primitive?", bool, False, optional=True) oversample = config.Field( "(Approx.) oversampling of output wavelength " "scale", float, 2.0)
class getCalibrationConfig(parameters_calibdb.getCalibrationConfig): caltype = config.ChoiceField("Type of calibration", str, allowed={ "processed_arc": "processed ARC", "processed_bias": "procsessed BIAS", "processed_dark": "processed DARK", "processed_flat": "processed FLAT", "processed_fringe": "processed fringe", "processed_slitflat": "processed slit-viewer flat", "processed_slit": "processed slit-viewer", "bpm": "bad pixel mask" }, optional=False)
class addReferenceCatalogConfig(config.Config): suffix = config.Field("Filename suffix", str, "_refcatAdded", optional=True) radius = config.RangeField("Search radius (degrees)", float, 0.067, min=0.) source = config.ChoiceField("Name of catalog to search", str, allowed={ "gmos": "Gemini optical catalog", "2mass": "2MASS Infrared catalog", "sdss9": "SDSS DR9 optical catalog", "ukidss9": "UKIDSS DR9 infrared catalog" }, default="gmos", optional=False)
class findSourceAperturesConfig(config.Config): suffix = config.Field("Filename suffix", str, "_aperturesFound", optional=True) max_apertures = config.RangeField("Maximum number of sources to find", int, None, min=1, optional=True) percentile = config.RangeField( "Percentile to determine signal for each spatial pixel", float, 95, min=1, max=100, optional=True) section = config.Field( "Pixel section(s) for measuring the spatial profile", str, None, optional=True, check=check_section) min_sky_region = config.RangeField( "Minimum number of contiguous pixels " "between sky lines", int, 20, min=1) use_snr = config.Field( "Use signal-to-noise ratio rather than data to find peaks?", bool, True) threshold = config.RangeField( "Threshold for automatic width determination", float, 0.01, min=0, max=1) sizing_method = config.ChoiceField( "Method for automatic width determination", str, allowed={ "peak": "height relative to peak", "integral": "integrated flux" }, default="peak")
class write1DSpectraConfig(config.Config): #format = config.Field("Format for writing", str, "ascii") format = config.ChoiceField("Format for writing", str, allowed=table_writing_formats(), default="ascii", optional=False) header = config.Field("Write full FITS header?", bool, False) extension = config.Field("Filename extension", str, "dat") apertures = config.Field("Apertures to write", (str, int), None, optional=True, check=list_of_ints_check) dq = config.Field("Write Data Quality values?", bool, False) var = config.Field("Write Variance values?", bool, False) overwrite = config.Field("Overwrite existing files?", bool, False) def validate(self): config.Config.validate(self) if self.header and not self.format.startswith("ascii"): raise ValueError("FITS header can only be written with ASCII formats")
class storeCalibrationConfig(config.Config): caltype = config.ChoiceField( "Type of calibration", str, allowed={"processed_arc": "processed ARC", "processed_bias": "procsessed BIAS", "processed_dark": "processed DARK", "processed_flat": "processed FLAT", "processed_fringe": "processed fringe", "bpm": "bad pixel mask", "sq": "science quality", "qa": "QA", "ql": "quick look", "processed_standard": "processed standard", "processed_slitillum": "processed slitillum", }, optional=False )
class adjustSlitOffsetToReferenceConfig(config.Config): suffix = config.Field("Filename suffix", str, "_slitOffsetCorrected", optional=True) tolerance = config.RangeField( "Maximum distance from the header offset, " "for the correlation method (arcsec)", float, 1, min=0., optional=True) method = config.ChoiceField("Alignment method", str, allowed={ "offsets": "Use telescope offsets", "correlation": "Correlate the slit profile" }, default="correlation")
class getCalibrationConfig(config.Config): caltype = config.ChoiceField( "Type of calibration required", str, allowed={"processed_arc": "processed ARC", "processed_bias": "procsessed BIAS", "processed_dark": "processed DARK", "processed_flat": "processed FLAT", "processed_fringe": "processed fringe", "processed_standard": "processed standard", "processed_slitillum": "processed slitillum", }, optional=False ) procmode = config.Field("Processing mode", str, None, optional=True) refresh = config.Field( "Refresh existing calibration associations?", bool, True) howmany = config.RangeField( "Maximum number of calibrations to return", int, None, min=1, optional=True)
class subtractOverscanConfig(config.Config): suffix = config.Field("Filename suffix", str, "_overscanSubtracted", optional=True) niterate = config.RangeField("Maximum number of iterations", int, 2, min=1) high_reject = config.RangeField("High rejection limit (standard deviations)", float, 3., min=0., optional=True) low_reject = config.RangeField("Low rejection limit (standard deviations)", float, 3., min=0., optional=True) function = config.ChoiceField("Type of function", str, allowed = {"spline": "Cublic spline", "poly": "Polynomial", "none": "Row-by-row"}, default="spline", optional=True) nbiascontam = config.RangeField("Number of columns to exclude from averaging", int, None, min=0, optional=True) order = config.RangeField("Order of fitting function", int, None, min=0, optional=True) def validate(self): config.Config.validate(self) if self.function == "poly" and self.order is None: raise ValueError("Polynomial order must be specified")
class flagCosmicRaysConfig(config.Config): suffix = config.Field( doc="Filename suffix", dtype=str, default="_CRMasked", optional=True, ) bitmask = config.Field( doc="Bits in the input data quality `flags` that are to be used to " "exclude bad pixels from cosmic ray detection and cleaning. Default " "65535 (all non-zero bits, up to 16 planes).", dtype=int, optional=True, default=65535, ) debug = config.Field( doc="Make diagnostic plots?", dtype=bool, default=False ) # Fit parameters -------------------------------------------------------- x_order = config.Field( doc="Order for fitting and subtracting object continuum and sky line " "models, prior to running the main cosmic ray detection algorithm. " "When None, defaults are used, according to the image size (as in " "the IRAF task gemcrspec). When 0, no fit is done.", dtype=int, optional=True, default=None, ) y_order = config.Field( doc="Order for fitting and subtracting object continuum and sky line " "models, prior to running the main cosmic ray detection algorithm. " "When None, defaults are used, according to the image size (as in " "the IRAF task gemcrspec). When 0, no fit is done.", dtype=int, optional=True, default=None, ) bkgfit_niter = config.Field( doc="Maximum number of iterations for the objects and sky fits.", dtype=int, optional=True, default=3, ) bkgfit_lsigma = config.Field( doc="Rejection threshold in standard deviations below the mean, " "for the objects and sky fits.", dtype=float, optional=True, default=4.0, ) bkgfit_hsigma = config.Field( doc="Rejection threshold in standard deviations above the mean, " "for the objects and sky fits.", dtype=float, optional=True, default=4.0, ) # Astroscrappy's detect_cosmics parameters ------------------------------ sigclip = config.Field( doc="Laplacian-to-noise limit for cosmic ray detection. Lower " "values will flag more pixels as cosmic rays.", dtype=float, optional=True, default=4.5, ) sigfrac = config.Field( doc="Fractional detection limit for neighboring pixels. For cosmic " "ray neighbor pixels, a lapacian-to-noise detection limit of" "sigfrac * sigclip will be used.", dtype=float, optional=True, default=0.3, ) objlim = config.Field( doc="Minimum contrast between Laplacian image and the fine structure " "image. Increase this value if cores of bright stars are flagged as " "cosmic rays.", dtype=float, optional=True, default=5.0, ) niter = config.Field( doc="Number of iterations of the LA Cosmic algorithm to perform", dtype=int, optional=True, default=4, ) sepmed = config.Field( doc="Use the separable median filter instead of the full median " "filter. The separable median is not identical to the full median " "filter, but they are approximately the same and the separable median " "filter is significantly faster and still detects cosmic rays well.", dtype=bool, optional=True, default=True, ) cleantype = config.ChoiceField( doc="Set which clean algorithm is used.", allowed={ 'median': 'An umasked 5x5 median filter', 'medmask': 'A masked 5x5 median filter', 'meanmask': 'A masked 5x5 mean filter', 'idw': 'A masked 5x5 inverse distance weighted interpolation', }, dtype=str, optional=True, default="meanmask", ) fsmode = config.ChoiceField( doc="Method to build the fine structure image.", allowed={ 'median': 'Use the median filter in the standard LA Cosmic ' 'algorithm', 'convolve': 'Convolve the image with the psf kernel to calculate ' 'the fine structure image.', }, dtype=str, optional=True, default='median', ) psfmodel = config.ChoiceField( doc="Model to use to generate the psf kernel if fsmode == 'convolve' " "and psfk is None. The current choices are Gaussian and Moffat " "profiles.", allowed={ 'gauss': 'Circular Gaussian kernel', 'moffat': 'Circular Moffat kernel', 'gaussx': 'Gaussian kernel in the x direction', 'gaussy': 'Gaussian kernel in the y direction', }, dtype=str, optional=True, default="gauss", ) psffwhm = config.Field( doc="Full Width Half Maximum of the PSF to use for the kernel.", dtype=float, optional=True, default=2.5, ) psfsize = config.Field( doc="Size of the kernel to calculate. Returned kernel will have size " "psfsize x psfsize. psfsize should be odd.", dtype=int, optional=True, default=7, ) psfbeta = config.Field( doc="Moffat beta parameter. Only used if psfmodel=='moffat'.", dtype=float, optional=True, default=4.765, ) verbose = config.Field( doc="Print to the screen or not.", dtype=bool, optional=True, default=False, )