def filter(mask, cube, header, clipMethod, threshold, rmsMode, fluxRange, verbose): err.message("Running threshold finder.") # Sanity checks of user input err.ensure( clipMethod in {"absolute", "relative"}, "Threshold finder failed. Illegal clip method: '" + str(clipMethod) + "'.") err.ensure( rmsMode in {"std", "mad", "gauss", "negative"}, "Threshold finder failed. Illegal RMS mode: '" + str(rmsMode) + "'.") err.ensure( fluxRange in {"positive", "negative", "all"}, "Threshold finder failed. Illegal flux range: '" + str(fluxRange) + "'.") # Scale threshold by RMS if requested if clipMethod == "relative": threshold *= GetRMS(cube, rmsMode=rmsMode, fluxRange=fluxRange, zoomx=1, zoomy=1, zoomz=1, verbose=verbose) # Print some information and check sign of threshold err.message(" Using threshold of " + str(threshold) + ".") err.ensure(threshold >= 0.0, "Threshold finder failed. Threshold value is negative.") # Run the threshold finder, setting bit 1 of the mask for |cube| >= |threshold|: np.bitwise_or(mask, np.greater_equal(np.absolute(cube), threshold), out=mask) return
def print_memory_usage(t0): err.message( "\x1B[36mPeak memory usage: {0:.3f} MB at {1:.3f} s\x1B[0m".format( float(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss) / MEM_FACTOR, time() - t0)) return
def apply_weights_file(data, weightsFile, subcube): # Load weights cube err.message("Applying weights cube:\n " + str(weightsFile)) try: f = fits.open(weightsFile, memmap=False) header_weights = f[0].header except: err.error("Failed to read weights cube.") # Extract axis sizes and types n_axes_weights, axis_size_weights, axis_type_weights = extract_axis_size(header_weights) # Ensure correct dimensionality check_cube_dimensions(n_axes_weights, axis_size_weights, cube_name="weights cube", min_dim=1, max_dim=4) # Multiply data by weights # 1-D spectrum if n_axes_weights == 1: err.warning("Weights cube has 1 axis; interpreted as spectrum.\nAdding first and second axis.") if len(subcube): err.ensure(len(subcube) == 6, "Subcube list must have 6 entries ({0:d} given).".format(len(subcube))) data *= np.reshape(f[0].section[subcube[4]:subcube[5]], (-1, 1, 1)) else: data *= reshape(f[0].data, (-1, 1, 1)) # 2-D image elif n_axes_weights == 2: if len(subcube) == 6 or len(subcube) == 4: data *= np.array([f[0].section[subcube[2]:subcube[3], subcube[0]:subcube[1]]]) else: data *= np.array([f[0].data]) # 3-D cube elif n_axes_weights == 3: if len(subcube) == 6: data *= f[0].section[subcube[4]:subcube[5], subcube[2]:subcube[3], subcube[0]:subcube[1]] else: data *= f[0].data # 4-D hypercube else: if len(subcube) == 6: data *= f[0].section[0, subcube[4]:subcube[5], subcube[2]:subcube[3], subcube[0]:subcube[1]] else: data *= f[0].section[0] f.close() err.message(" Weights cube applied.") return data
def regridMaskedChannels(datacube, maskcube, header): import numpy as np import scipy.constants from scipy import interpolate from sofia import error as err if not check_wcs_info(header, spatial=True): err.warning( "Axis descriptors missing from FITS file header.\nIgnoring the effect of CELLSCAL = 1/F." ) return datacube maskcubeFlt = maskcube.astype("float") maskcubeFlt[maskcube > 1] = 1.0 err.message("Regridding...") z = (np.arange(1.0, header["NAXIS3"] + 1) - header["CRPIX3"]) * header["CDELT3"] + header["CRVAL3"] if check_header_keywords(KEYWORDS_VELO, header["CTYPE3"]): pixscale = (1.0 - header["CRVAL3"] / scipy.constants.c) / ( 1.0 - z / scipy.constants.c) elif check_header_keywords(KEYWORDS_FREQ, header["CTYPE3"]): pixscale = header["CRVAL3"] / z else: err.warning( "Cannot convert 3rd axis coordinates to frequency.\nIgnoring the effect of CELLSCAL = 1/F." ) pixscale = np.ones((header["NAXIS3"])) x0 = header["CRPIX1"] - 1 y0 = header["CRPIX2"] - 1 xs = np.arange(datacube.shape[2], dtype=float) - x0 ys = np.arange(datacube.shape[1], dtype=float) - y0 for zz in range(datacube.shape[0]): regrid_channel = interpolate.RectBivariateSpline( ys * pixscale[zz], xs * pixscale[zz], datacube[zz]) datacube[zz] = regrid_channel(ys, xs) regrid_channel_mask = interpolate.RectBivariateSpline( ys * pixscale[zz], xs * pixscale[zz], maskcubeFlt[zz]) maskcubeFlt[zz] = regrid_channel_mask(ys, xs) datacube[abs(maskcubeFlt) <= abs(np.nanmin(maskcubeFlt))] = np.nan del maskcubeFlt return datacube
def apply_weights_function(data, weightsFunction): err.message("Applying weights function:\n " + str(weightsFunction)) # Define whitelist of allowed character sequences and import relevant Numpy functions whitelist = [ "x", "y", "z", "e", "E", "sin", "cos", "tan", "arcsin", "arccos", "arctan", "arctan2", "sinh", "cosh", "tanh", "arcsinh", "arccosh", "arctanh", "exp", "log", "sqrt", "square", "power", "absolute", "fabs", "sign" ] from numpy import sin, cos, tan, arcsin, arccos, arctan, arctan2, sinh, cosh, tanh, arcsinh, arccosh, arctanh, exp, log, sqrt, square, power, absolute, fabs, sign # Search for all keywords consisting of consecutive sequences of alphabetical characters keywordsFound = filter(None, re.split("[^a-zA-Z]+", str(weightsFunction))) # Check for non-whitelisted sequences for keyword in keywordsFound: err.ensure( keyword in whitelist, "Unknown keyword '" + str(keyword) + "' found in weights function:\n" " " + str(weightsFunction) + "\n" "Please check your input.") # Loop over all channels for i in range(data.shape[0]): # Create index arrays over 2-D planes (i.e. of width dz = 1) z, y, x = np.indices((1, data.shape[1], data.shape[2])) z += i # Multiply each plane by weights function try: data[z, y, x] *= eval(str(weightsFunction)) # NOTE: eval() should be safe now as we don't allow for non-whitelisted keywords. except: err.error("Failed to evaluate weights function:\n" " " + str(weightsFunction) + "\n" "Please check your input.") err.message(" Weights function applied.") return data
def flag(cube, regions): if regions: err.message("Flagging data cube.") dim = len(cube.shape) try: for region in regions: for i in range(0, len(region) / 2): if region[2 * i + 1] == "": region[2 * i + 1] = cube.shape[dim - i - 1] if len(region) == 2: cube[0, region[2]:region[3], region[0]:region[1]] = np.nan else: cube[region[4]:region[5], region[2]:region[3], region[0]:region[1]] = np.nan err.message("Cube has been flagged.") except: err.warning( "Flagging did not succeed. Please check the dimensions of your cube and filters." ) return cube
def regridMaskedChannels(datacube, maskcube, header): import numpy as np import scipy.constants from scipy import interpolate from sofia import error as err if not check_wcs_info(header, spatial=True): err.warning("Axis descriptors missing from FITS file header.\nIgnoring the effect of CELLSCAL = 1/F.") return datacube maskcubeFlt = maskcube.astype("float") maskcubeFlt[maskcube > 1] = 1.0 err.message("Regridding...") z = (np.arange(1.0, header["NAXIS3"] + 1) - header["CRPIX3"]) * header["CDELT3"] + header["CRVAL3"] if check_header_keywords(KEYWORDS_VELO, header["CTYPE3"]): pixscale = (1.0 - header["CRVAL3"] / scipy.constants.c) / (1.0 - z / scipy.constants.c) elif check_header_keywords(KEYWORDS_FREQ, header["CTYPE3"]): pixscale = header["CRVAL3"] / z else: err.warning("Cannot convert 3rd axis coordinates to frequency.\nIgnoring the effect of CELLSCAL = 1/F.") pixscale = np.ones((header["NAXIS3"])) x0 = header["CRPIX1"] - 1 y0 = header["CRPIX2"] - 1 xs = np.arange(datacube.shape[2], dtype=float) - x0 ys = np.arange(datacube.shape[1], dtype=float) - y0 for zz in range(datacube.shape[0]): regrid_channel = interpolate.RectBivariateSpline(ys * pixscale[zz], xs * pixscale[zz], datacube[zz]) datacube[zz] = regrid_channel(ys, xs) regrid_channel_mask = interpolate.RectBivariateSpline(ys * pixscale[zz], xs * pixscale[zz], maskcubeFlt[zz]) maskcubeFlt[zz] = regrid_channel_mask(ys, xs) datacube[abs(maskcubeFlt) <= abs(np.nanmin(maskcubeFlt))] = np.nan del maskcubeFlt return datacube
def apply_weights_function(data, weightsFunction): err.message("Applying weights function:\n " + str(weightsFunction)) # Define whitelist of allowed character sequences and import relevant Numpy functions whitelist = ["x", "y", "z", "e", "E", "sin", "cos", "tan", "arcsin", "arccos", "arctan", "arctan2", "sinh", "cosh", "tanh", "arcsinh", "arccosh", "arctanh", "exp", "log", "sqrt", "square", "power", "absolute", "fabs", "sign"] from numpy import sin, cos, tan, arcsin, arccos, arctan, arctan2, sinh, cosh, tanh, arcsinh, arccosh, arctanh, exp, log, sqrt, square, power, absolute, fabs, sign # Search for all keywords consisting of consecutive sequences of alphabetical characters keywordsFound = filter(None, re.split("[^a-zA-Z]+", str(weightsFunction))) # Check for non-whitelisted sequences for keyword in keywordsFound: err.ensure(keyword in whitelist, "Unknown keyword '" + str(keyword) + "' found in weights function:\n" " " + str(weightsFunction) + "\n" "Please check your input.") # Loop over all channels for i in range(data.shape[0]): # Create index arrays over 2-D planes (i.e. of width dz = 1) z, y, x = np.indices((1, data.shape[1], data.shape[2])) z += i # Multiply each plane by weights function try: data[z, y, x] *= eval(str(weightsFunction)) # NOTE: eval() should be safe now as we don't allow for non-whitelisted keywords. except: err.error( "Failed to evaluate weights function:\n" " " + str(weightsFunction) + "\n" "Please check your input.") err.message(" Weights function applied.") return data
def apply_weights_file(data, weightsFile, subcube): # Load weights cube err.message("Applying weights cube:\n " + str(weightsFile)) try: f = fits.open(weightsFile, memmap=False) header_weights = f[0].header except: err.error("Failed to read weights cube.") # Extract axis sizes and types n_axes_weights, axis_size_weights, axis_type_weights = extract_axis_size( header_weights) # Ensure correct dimensionality check_cube_dimensions(n_axes_weights, axis_size_weights, cube_name="weights cube", min_dim=1, max_dim=4) # Multiply data by weights # 1-D spectrum if n_axes_weights == 1: err.warning( "Weights cube has 1 axis; interpreted as spectrum.\nAdding first and second axis." ) if len(subcube): err.ensure( len(subcube) == 6, "Subcube list must have 6 entries ({0:d} given).".format( len(subcube))) data *= np.reshape(f[0].section[subcube[4]:subcube[5]], (-1, 1, 1)) else: data *= reshape(f[0].data, (-1, 1, 1)) # 2-D image elif n_axes_weights == 2: if len(subcube) == 6 or len(subcube) == 4: data *= np.array( [f[0].section[subcube[2]:subcube[3], subcube[0]:subcube[1]]]) else: data *= np.array([f[0].data]) # 3-D cube elif n_axes_weights == 3: if len(subcube) == 6: data *= f[0].section[subcube[4]:subcube[5], subcube[2]:subcube[3], subcube[0]:subcube[1]] else: data *= f[0].data # 4-D hypercube else: if len(subcube) == 6: data *= f[0].section[0, subcube[4]:subcube[5], subcube[2]:subcube[3], subcube[0]:subcube[1]] else: data *= f[0].section[0] f.close() err.message(" Weights cube applied.") return data
def import_data(doSubcube, inFile, weightsFile, maskFile, weightsFunction=None, subcube=[], subcubeMode="pixel", doFlag=False, flagRegions=False, flagFile="", cubeOnly=False): # Basic sanity checks on user input err.ensure(os.path.isfile(inFile), "Data file not found:\n " + str(inFile)) # ------------------------------- # Open input cube and read header # ------------------------------- err.message("Loading input data cube.") try: f = fits.open(inFile, mode="readonly", memmap=False, do_not_scale_image_data=False) header = f[0].header except: err.error("Failed to load primary HDU of data file:\n " + str(inFile)) # Extract axis sizes and types n_axes, axis_size, axis_type = extract_axis_size(header) # Check dimensionality of data cube check_cube_dimensions(n_axes, axis_size, cube_name="data cube") # Print some information err.message(" Data cube has {0:d} axes.".format(header["NAXIS"])) err.message(" Types: " + str(axis_type)) err.message(" Sizes: " + str(axis_size)) # Extract subcube boundaries if requested if len(subcube): subcube = get_subcube_range(header, n_axes, axis_size, subcube, subcubeMode) else: subcube = [] # -------------------------------- # Read requested subregion of data # -------------------------------- # 2-D image if n_axes == 2: fullshape = [axis_size[1], axis_size[0]] if len(subcube): data = np.array( [f[0].section[subcube[2]:subcube[3], subcube[0]:subcube[1]]]) header["CRPIX1"] -= subcube[0] header["CRPIX2"] -= subcube[2] header["NAXIS1"] = subcube[1] - subcube[0] header["NAXIS2"] = subcube[3] - subcube[2] else: data = np.array([f[0].data]) # 3-D cube elif n_axes == 3: fullshape = [axis_size[2], axis_size[1], axis_size[0]] if len(subcube): data = f[0].section[subcube[4]:subcube[5], subcube[2]:subcube[3], subcube[0]:subcube[1]] header["CRPIX1"] -= subcube[0] header["CRPIX2"] -= subcube[2] header["CRPIX3"] -= subcube[4] header["NAXIS1"] = subcube[1] - subcube[0] header["NAXIS2"] = subcube[3] - subcube[2] header["NAXIS3"] = subcube[5] - subcube[4] else: data = f[0].data #4-D hypercube else: fullshape = [axis_size[2], axis_size[1], axis_size[0]] if len(subcube): data = f[0].section[0, subcube[4]:subcube[5], subcube[2]:subcube[3], subcube[0]:subcube[1]] header["CRPIX1"] -= subcube[0] header["CRPIX2"] -= subcube[2] header["CRPIX3"] -= subcube[4] header["NAXIS1"] = subcube[1] - subcube[0] header["NAXIS2"] = subcube[3] - subcube[2] header["NAXIS3"] = subcube[5] - subcube[4] else: data = f[0].section[0] # Close input cube f.close() err.message("Input data cube loaded.") # --------------------------------------------------------- # If no additional actions required, return data and header # --------------------------------------------------------- if cubeOnly: return data, header # --------------------------------------------------- # Otherwise carry out additional actions as requested # --------------------------------------------------- # Weighting if weightsFile: data = apply_weights_file(data, weightsFile, subcube) elif weightsFunction: data = apply_weights_function(data, weightsFunction) # Flagging if doFlag: data = apply_flagging(data, flagFile, flagRegions, subcube) # Masking if maskFile: mask = import_mask(maskFile, header, axis_size, subcube) else: # Create an empty mask if none is provided. mask = np.zeros(data.shape, dtype=bool) return data, header, mask, subcube
def EstimateRel(data, pdfoutname, parNames, parSpace=["snr_sum", "snr_max", "n_pix"], logPars=[1, 1, 1], autoKernel=True, scaleKernel=1, negPerBin=1, skellamTol=-0.5, kernel=[0.15, 0.05, 0.1], usecov=False, doscatter=1, docontour=1, doskellam=1, dostats=0, saverel=1, threshold=0.99, fMin=0, verb=0, makePlot=False): # Always work on logarithmic parameter values; the reliability.logPars parameter should be removed if 0 in logPars: err.warning( " Setting all reliability.logPars entries to 1. This parameter is no longer editable by users." ) logPars = [1 for pp in parSpace] # Import Matplotlib if diagnostic plots requested if makePlot: import matplotlib # The following line is necessary to run SoFiA remotely matplotlib.use("Agg") import matplotlib.pyplot as plt # -------------------------------- # Build array of source parameters # -------------------------------- idCOL = parNames.index("id") ftotCOL = parNames.index("snr_sum") fmaxCOL = parNames.index("snr_max") fminCOL = parNames.index("snr_min") # Get columns of requested parameters parCol = [] for ii in range(len(parSpace)): parCol.append(parNames.index(parSpace[ii])) # Get position and number of positive and negative sources pos = data[:, ftotCOL] > 0 neg = data[:, ftotCOL] <= 0 Npos = pos.sum() Nneg = neg.sum() err.ensure(Npos, "No positive sources found; cannot proceed.") err.ensure(Nneg, "No negative sources found; cannot proceed.") # Get array of relevant source parameters (and take log of them if requested) ids = data[:, idCOL] pars = np.empty((data.shape[0], 0)) for ii in range(len(parSpace)): if parSpace[ii] == "snr_max": parsTmp = data[:, fmaxCOL] * pos - data[:, fminCOL] * neg if logPars[ii]: parsTmp = np.log10(parsTmp) pars = np.concatenate((pars, parsTmp.reshape(-1, 1)), axis=1) elif parSpace[ii] == "snr_sum" or parSpace[ii] == "snr_mean": parsTmp = abs(data[:, parCol[ii]].reshape(-1, 1)) if logPars[ii]: parsTmp = np.log10(parsTmp) pars = np.concatenate((pars, parsTmp), axis=1) else: parsTmp = data[:, parCol[ii]].reshape(-1, 1) if logPars[ii]: parsTmp = np.log10(parsTmp) pars = np.concatenate((pars, parsTmp), axis=1) err.message(" Working in parameter space {0:}".format(str(parSpace))) err.message( " Will convolve the distribution of positive and negative sources in this space to derive the P and N density fields" ) pars = np.transpose(pars) # ---------------------------------------------------------- # Set parameters to work with and gridding/plotting for each # ---------------------------------------------------------- # Axis labels when plotting labs = [] for ii in range(len(parSpace)): labs.append("") if logPars[ii]: labs[ii] += "log " labs[ii] += parSpace[ii] # Axis limits when plotting pmin, pmax = pars.min(axis=1), pars.max(axis=1) pmin, pmax = pmin - 0.1 * (pmax - pmin), pmax + 0.1 * (pmax - pmin) lims = [[pmin[i], pmax[i]] for i in range(len(parSpace))] # Grid on which to evaluate Np and Nn in order to plot contours grid = [[pmin[i], pmax[i], 0.02 * (pmax[i] - pmin[i])] for i in range(len(parSpace))] # Calculate the number of rows and columns in figure projections = [subset for subset in combinations(range(len(parSpace)), 2)] nr = int(np.floor(np.sqrt(len(projections)))) nc = int(np.ceil(float(len(projections)) / nr)) # --------------------------------------- # Set smoothing kernel in parameter space # --------------------------------------- # If autoKernel is True, then the initial kernel is taken as a scaled version of the covariance matrix # of the negative sources. The kernel size along each axis is such that the number of sources per kernel # width (sigma**2) is equal to "negPerBin". Optionally, the user can decide to use only the diagonal # terms of the covariance matrix. The kernel is then grown until convergence is reached on the Skellam # plot. If autoKernel is False, then use the kernel given by "kernel" parameter (argument of EstimateRel); # this is sigma, and is squared to be consistent with the auto kernel above. if autoKernel: # Set the kernel shape to that of the variance or covariance matrix kernel = np.cov(pars[:, neg]) kernelType = "covariance" # Check if kernel matrix can be inverted try: np.linalg.inv(kernel) except: err.error( "The reliability cannot be calculated because the smoothing kernel\n" "derived from " + str(pars[:, neg].shape[1]) + " negative sources cannot be inverted.\n" "This is likely due to an insufficient number of negative sources.\n" "Try to increase the number of negative sources by changing the\n" "source finding and/or filtering settings.", fatal=True, frame=True) if np.isnan(kernel).sum(): err.error( "The reliability cannot be calculated because the smoothing kernel\n" "derived from " + str(pars[:, neg].shape[1]) + " negative sources contains NaNs.\n" "A good kernel is required to calculate the density field of positive\n" "and negative sources in parameter space.\n" "Try to increase the number of negative sources by changing the\n" "source finding and/or filtering settings.", fatal=True, frame=True) if not usecov: kernel = np.diag(np.diag(kernel)) kernelType = "variance" kernelIter = 0.0 deltplot = [] # Scale the kernel size as requested by the user (scaleKernel>0) or use the autoscale algorithm (scaleKernel=0) if scaleKernel > 0: # Scale kernel size as requested by the user # Note that the scale factor is squared because users are asked to give a factor to apply to sqrt(kernel) kernel *= scaleKernel**2 err.message( " Using the {0:s} matrix scaled by a factor {1:.2f} as convolution kernel" .format(kernelType, scaleKernel)) err.message(" The sqrt(kernel) size is:") err.message(" " + str(np.sqrt(np.abs(kernel)))) elif scaleKernel == 0: # Scale kernel size to get started the kernel-growing loop # The scale factor for sqrt(kernel) is elevated to the power of 1.0 / len(parCol) err.message( " Will search for the best convolution kernel by scaling the {0:s} matrix" .format(kernelType)) err.message(" The {0:s} matrix has sqrt:".format(kernelType)) err.message(" " + str(np.sqrt(np.abs(kernel)))) # negPerBin must be >=1 err.ensure( negPerBin >= 1, "The parameter reliability.negPerBin used to start the convolution kernel search was set to {0:.1f} but must be >= 1. Please change your settings." .format(negPerBin)) kernel *= ((negPerBin + kernelIter) / Nneg)**(2.0 / len(parCol)) err.message(" Search starting from the kernel with sqrt:") err.message(" " + str(np.sqrt(np.abs(kernel)))) err.message( " Iteratively growing kernel until the distribution of (P-N)/sqrt(P+N) reaches median/width = {0:.2f} ..." .format(skellamTol)) err.ensure( skellamTol <= 0, "The parameter reliability.skellamTol was set to {0:.2f} but must be <= 0. Please change your settings." .format(skellamTol)) else: err.ensure(scaleKernel>=0,\ "The reliability.scaleKernel parameter cannot be negative.\n"\ "It should be = 0 if you want SoFiA to find the optimal kernel scaling\n"\ "or > 0 if you want to set the scaling yourself.\n"\ "Please change your settings.") #deltOLD=-1e+9 # Used to stop kernel growth if P-N stops moving closer to zero [NOT USED CURRENTLY] if doskellam and makePlot: fig0 = plt.figure() else: # Note that the user must give sigma, which then gets squared err.message( " Using user-defined variance kernel with sqrt(kernel) size: {0}". format(kernel)) err.ensure( len(parSpace) == len(kernel), "The number of entries in the kernel above does not match the number of parameters you requested for the reliability calculation." ) kernel = np.identity(len(kernel)) * np.array(kernel)**2 # Set grow_kernel to 1 to start the kernel growing loop below. grow_kernel = 1 # This loop will estimate the reliability, check whether the kernel is large enough, # and if not pick a larger kernel. If autoKernel = 0 or scaleKernel = 0, we will do # just one pass (i.e., we will not grow the kernel). while grow_kernel: # ------------------------ # Evaluate N-d reliability # ------------------------ if verb: err.message( " estimate normalised positive and negative density fields ..." ) Np = gaussian_kde_set_covariance(pars[:, pos], kernel) Nn = gaussian_kde_set_covariance(pars[:, neg], kernel) # Calculate the number of positive and negative sources at the location of positive sources Nps = Np(pars[:, pos]) * Npos Nns = Nn(pars[:, pos]) * Nneg # Calculate the number of positive and negative sources at the location of negative sources nNps = Np(pars[:, neg]) * Npos nNns = Nn(pars[:, neg]) * Nneg # Calculate the reliability at the location of positive sources Rs = (Nps - Nns) / Nps # The reliability must be <= 1. If not, something is wrong. err.ensure( Rs.max() <= 1, "Maximum reliability greater than 1; something is wrong.\nPlease ensure that enough negative sources are detected\nand decrease your source finding threshold if necessary.", frame=True) # Find pseudo-reliable sources (taking maximum(Rs, 0) in order to include objects with Rs < 0 # if threshold == 0; Rs may be < 0 because of insufficient statistics) # These are called pseudo-reliable because some objects may be discarded later based on additional criteria below pseudoreliable = np.maximum(Rs, 0) >= threshold # Find reliable sources (taking maximum(Rs, 0) in order to include objects with Rs < 0 if # threshold == 0; Rs may be < 0 because of insufficient statistics) #reliable=(np.maximum(Rs, 0)>=threshold) * (data[pos, ftotCOL].reshape(-1,) > fMin) * (data[pos, fmaxCOL].reshape(-1,) > 4) reliable = (np.maximum(Rs, 0) >= threshold) * ( (data[pos, ftotCOL] / np.sqrt(data[pos, parNames.index("n_pix")])).reshape(-1, ) > fMin) if autoKernel: # Calculate quantities needed for comparison to Skellam distribution delt = (nNps - nNns) / np.sqrt(nNps + nNns) deltstd = delt.std() deltmed = np.median(delt) deltmin = delt.min() deltmax = delt.max() if deltmed / deltstd > -100 and doskellam and makePlot: plt.hist(delt / deltstd, bins=np.arange(deltmin / deltstd, max(5.1, deltmax / deltstd), 0.01), cumulative=True, histtype="step", color=(min( 1, float(max(1., negPerBin) + kernelIter) / Nneg), 0, 0), density=True) deltplot.append([((max(1., negPerBin) + kernelIter) / Nneg)**(1.0 / len(parCol)), deltmed / deltstd]) if scaleKernel: grow_kernel = 0 else: err.message( " iteration, median, width, median/width = %3i, %9.2e, %9.2e, %9.2e" % (kernelIter, deltmed, deltstd, deltmed / deltstd)) if deltmed / deltstd > skellamTol or negPerBin + kernelIter >= Nneg: grow_kernel = 0 err.message( " Found good kernel after %i kernel growth iterations. The sqrt(kernel) size is:" % kernelIter) err.message(np.sqrt(np.abs(kernel))) elif deltmed / deltstd < 5 * skellamTol: kernel *= (float(negPerBin + kernelIter + 20) / (negPerBin + kernelIter))**(2.0 / len(parCol)) kernelIter += 20 elif deltmed / deltstd < 2 * skellamTol: kernel *= (float(negPerBin + kernelIter + 10) / (negPerBin + kernelIter))**(2.0 / len(parCol)) kernelIter += 10 elif deltmed / deltstd < 1.5 * skellamTol: kernel *= (float(negPerBin + kernelIter + 3) / (negPerBin + kernelIter))**(2.0 / len(parCol)) kernelIter += 3 else: kernel *= (float(negPerBin + kernelIter + 1) / (negPerBin + kernelIter))**(2.0 / len(parCol)) kernelIter += 1 else: grow_kernel = 0 # ------------ # Skellam plot # ------------ if autoKernel and deltmed / deltstd > -100 and doskellam and makePlot: plt.plot(np.arange(-10, 10, 0.01), stats.norm().cdf(np.arange(-10, 10, 0.01)), "k-") plt.plot(np.arange(-10, 10, 0.01), stats.norm(scale=0.4).cdf(np.arange(-10, 10, 0.01)), "k:") plt.legend(("Gaussian (sigma=1)", "Gaussian (sigma=0.4)"), loc="lower right", prop={"size": 13}) plt.hist(delt / deltstd, bins=np.arange(deltmin / deltstd, max(5.1, deltmax / deltstd), 0.01), cumulative=True, histtype="step", color="r", density=True) plt.xlim(-5, 5) plt.ylim(0, 1) plt.xlabel("(P-N)/sqrt(N+P)") plt.ylabel("cumulative distribution") plt.plot([0, 0], [0, 1], "k--") fig0.savefig("%s_rel_skellam.pdf" % pdfoutname, rasterized=True) if not scaleKernel: fig3 = plt.figure() deltplot = np.array(deltplot) plt.plot(deltplot[:, 0], deltplot[:, 1], "ko-") plt.xlabel("kernel size (1D-sigma, aribtrary units)") plt.ylabel("median/std of (P-N)/sqrt(P+N)") plt.axhline(y=skellamTol, linestyle="--", color="r") fig3.savefig("%s_rel_skellam-delta.pdf" % pdfoutname, rasterized=True) # ----------------------- # Scatter plot of sources # ----------------------- specialids = [] if doscatter and makePlot: if verb: err.message(" plotting sources ...") fig1 = plt.figure(figsize=(18, 4.5 * nr)) plt.subplots_adjust(left=0.06, bottom=0.15 / nr, right=0.97, top=1 - 0.08 / nr, wspace=0.35, hspace=0.25) n_p = 0 for jj in projections: if verb: err.message(" projection %i/%i" % (projections.index(jj) + 1, len(projections))) n_p, p1, p2 = n_p + 1, jj[0], jj[1] plt.subplot(nr, nc, n_p) plt.scatter(pars[p1, pos], pars[p2, pos], marker="o", c="b", s=10, edgecolor="face", alpha=0.5) plt.scatter(pars[p1, neg], pars[p2, neg], marker="o", c="r", s=10, edgecolor="face", alpha=0.5) for si in specialids: plt.plot(pars[p1, ids == si], pars[p2, ids == si], "kd", zorder=10000, ms=7, mfc="none", mew=2) # Plot Integrated SNR threshold if fMin > 0 and (parSpace[jj[0]], parSpace[jj[1]]) == ("snr_sum", "snr_mean"): xArray = np.arange( lims[p1][0], lims[p1][1] + (lims[p1][1] - lims[p1][0]) / 100, (lims[p1][1] - lims[p1][0]) / 100) plt.plot(xArray, np.log10(fMin) * 2 - xArray, 'k:') elif fMin > 0 and (parSpace[jj[0]], parSpace[jj[1]]) == ("snr_mean", "snr_sum"): yArray = np.arange( lims[p2][0], lims[p2][1] + (lims[p2][1] - lims[p2][0]) / 100, (lims[p2][1] - lims[p2][0]) / 100) plt.plot(np.log10(fMin) * 2 - yArray, yArray, 'k:') plt.xlim(lims[p1][0], lims[p1][1]) plt.ylim(lims[p2][0], lims[p2][1]) plt.xlabel(labs[p1]) plt.ylabel(labs[p2]) plt.grid(color='k', linestyle='-', linewidth=0.2) fig1.savefig("%s_rel_scatter.pdf" % pdfoutname, rasterized=True) # ------------- # Plot contours # ------------- if docontour and makePlot: levs = 10**np.arange(-1.5, 2, 0.5) if verb: err.message(" plotting contours ...") fig2 = plt.figure(figsize=(18, 4.5 * nr)) plt.subplots_adjust(left=0.06, bottom=0.15 / nr, right=0.97, top=1 - 0.08 / nr, wspace=0.35, hspace=0.25) n_p = 0 for jj in projections: if verb: err.message(" projection %i/%i" % (projections.index(jj) + 1, len(projections))) n_p, p1, p2 = n_p + 1, jj[0], jj[1] g1, g2 = grid[p1], grid[p2] x1 = np.arange(g1[0], g1[1], g1[2]) x2 = np.arange(g2[0], g2[1], g2[2]) pshape = (x2.shape[0], x1.shape[0]) # Get array of source parameters on current projection parsp = np.concatenate((pars[p1:p1 + 1], pars[p2:p2 + 1]), axis=0) # Derive Np and Nn density fields on the current projection setcov = kernel[p1:p2 + 1:p2 - p1, p1:p2 + 1:p2 - p1] try: Np = gaussian_kde_set_covariance(parsp[:, pos], setcov) Nn = gaussian_kde_set_covariance(parsp[:, neg], setcov) except: err.error( "Reliability determination failed because of issues with the\n" "smoothing kernel. This is likely due to an insufficient number\n" "of negative detections. Please review your filtering and source\n" "finding settings to ensure that a sufficient number of negative\n" "detections is found.", fatal=True, frame=True) # Evaluate density fields on grid on current projection g = np.transpose( np.transpose(np.mgrid[slice(g1[0], g1[1], g1[2]), slice(g2[0], g2[1], g2[2])]).reshape( -1, 2)) Np = Np(g) Nn = Nn(g) Np = Np / Np.sum() * Npos Nn = Nn / Nn.sum() * Nneg Np.resize(pshape) Nn.resize(pshape) plt.subplot(nr, nc, n_p) plt.contour(x1, x2, Np, origin="lower", colors="b", levels=levs, zorder=2) plt.contour(x1, x2, Nn, origin="lower", colors="r", levels=levs, zorder=1) # Plot Integrated SNR threshold if fMin > 0 and (parSpace[jj[0]], parSpace[jj[1]]) == ("snr_sum", "snr_mean"): xArray = np.arange( lims[p1][0], lims[p1][1] + (lims[p1][1] - lims[p1][0]) / 100, (lims[p1][1] - lims[p1][0]) / 100) plt.plot(xArray, np.log10(fMin) * 2 - xArray, 'k:') elif fMin > 0 and (parSpace[jj[0]], parSpace[jj[1]]) == ("snr_mean", "snr_sum"): yArray = np.arange( lims[p2][0], lims[p2][1] + (lims[p2][1] - lims[p2][0]) / 100, (lims[p2][1] - lims[p2][0]) / 100) plt.plot(np.log10(fMin) * 2 - yArray, yArray, 'k:') if reliable.sum(): plt.scatter(pars[p1, pos][reliable], pars[p2, pos][reliable], marker="o", s=10, edgecolor="k", facecolor="k", zorder=4) if (pseudoreliable * (reliable == False)).sum(): plt.scatter(pars[p1, pos][pseudoreliable * (reliable == False)], pars[p2, pos][pseudoreliable * (reliable == False)], marker="x", s=40, edgecolor="0.5", facecolor="0.5", zorder=3) for si in specialids: plt.plot(pars[p1, ids == si], pars[p2, ids == si], "kd", zorder=10000, ms=7, mfc="none", mew=2) plt.xlim(lims[p1][0], lims[p1][1]) plt.ylim(lims[p2][0], lims[p2][1]) plt.xlabel(labs[p1]) plt.ylabel(labs[p2]) plt.grid(color='k', linestyle='-', linewidth=0.2) fig2.savefig("%s_rel_contour.pdf" % pdfoutname, rasterized=True) # ------------------------- # Add Np, Nn and R to table # ------------------------- # This allows me not to calculate R every time I want to do some plot analysis, # but just read it from the file if saverel: if not (docontour or dostats): Nps = Np(pars[:, pos]) * Npos Nns = Nn(pars[:, pos]) * Nneg Np = np.zeros((data.shape[0], )) Np[pos] = Nps Nn = np.zeros((data.shape[0], )) Nn[pos] = Nns R = -np.ones((data.shape[0], )) # R will be -1 for negative sources # Set R to zero for positive sources if R < 0 because of Nn > Np R[pos] = np.maximum(0, (Np[pos] - Nn[pos]) / Np[pos]) data = np.concatenate( (data, Np.reshape(-1, 1), Nn.reshape(-1, 1), R.reshape(-1, 1)), axis=1) data = [list(jj) for jj in list(data)] return data, ids[pos][reliable].astype(int)
def add_wcs_coordinates(objects, catParNames, catParFormt, catParUnits, Parameters): try: hdulist = fits.open(Parameters["import"]["inFile"]) header = hdulist[0].header hdulist.close() # Fix headers where "per second" is written "/S" instead of "/s" # (assuming they mean "per second" and not "per Siemens"). if "cunit3" in header and "/S" in header["cunit3"]: err.warning("Converting '/S' to '/s' in CUNIT3.") header["cunit3"] = header["cunit3"].replace("/S", "/s") # Check if there is a Nmap/GIPSY FITS header keyword value present gipsyKey = [ k for k in ["FREQ-OHEL", "FREQ-OLSR", "FREQ-RHEL", "FREQ-RLSR"] if (k in [header[key] for key in header if ("CTYPE" in key)]) ] if gipsyKey: err.message( "GIPSY header found. Trying to convert to FITS standard.") from astropy.wcs import Wcsprm header = fix_gipsy_header(header) wcsin = Wcsprm(str(header)) wcsin.sptr("VOPT-F2W") #if header["naxis"] == 4: # objects = np.concatenate((objects, wcsin.p2s(np.concatenate((objects[:, catParNames.index("x"):catParNames. index("x") + 3], np.zeros((objects.shape[0], 1))), axis=1), 0)["world"][:,:-1]), axis=1) #else: # objects = np.concatenate((objects, wcsin.p2s(objects[:, catParNames.index("x"):catParNames.index("x") + 3], 0)["world"]), axis=1) objects = np.concatenate( (objects, wcsin.p2s( objects[:, catParNames.index("x"):catParNames.index("x") + 3], 0)["world"]), axis=1) catParUnits = tuple( list(catParUnits) + [str(cc).replace(" ", "") for cc in wcsin.cunit]) catParNames = tuple( list(catParNames) + [(cc.split("--")[0]).lower() for cc in wcsin.ctype]) catParFormt = tuple( list(catParFormt) + ["%15.7e", "%15.7e", "%15.7e"]) else: # Constrain the RA axis reference value CRVAL_ to be between 0 and 360 deg rafound = 0 for kk in range(header["naxis"]): if header["ctype1"][:2] == "RA": rafound = 1 break if rafound: if header["crval%i" % (kk + 1)] < 0: err.warning("Adding 360 deg to RA reference value.") header["crval%i" % (kk + 1)] += 360 elif header["crval%i" % (kk + 1)] > 360: err.warning("Subtracting 360 deg from RA reference value.") header["crval%i" % (kk + 1)] -= 360 #if header["naxis"] == 4: wcsin = wcs.WCS(header, naxis=[wcs.WCSSUB_CELESTIAL, wcs.WCSSUB_SPECTRAL, wcs.WCSSUB_STOKES]) #else: wcsin = wcs.WCS(header, naxis=[wcs.WCSSUB_CELESTIAL, wcs.WCSSUB_SPECTRAL]) wcsin = wcs.WCS(header, naxis=[wcs.WCSSUB_CELESTIAL, wcs.WCSSUB_SPECTRAL]) xyz = objects[:, catParNames.index("x"):catParNames.index("x") + 3].astype(float) if "cellscal" in header and header["cellscal"] == "1/F": err.warning( "CELLSCAL keyword with value of 1/F found.\n" "Will account for varying pixel scale in WCS coordinate calculation." ) x0, y0 = header["crpix1"] - 1, header["crpix2"] - 1 # Will calculate the pixscale factor of each channel as: # pixscale = ref_frequency / frequency if header["ctype3"] == "VELO-HEL": pixscale = (1 - header["crval3"] / scipy.constants.c) / ( 1 - (((xyz[:, 2] + 1) - header["crpix3"]) * header["cdelt3"] + header["crval3"]) / scipy.constants.c) else: err.warning( "Cannot convert 3rd axis coordinates to frequency. Ignoring the effect of CELLSCAL = 1/F." ) pixscale = 1.0 xyz[:, 0] = (xyz[:, 0] - x0) * pixscale + x0 xyz[:, 1] = (xyz[:, 1] - y0) * pixscale + y0 #if header["naxis"] == 4: objects = np.concatenate((objects, wcsin.wcs_pix2world(np.concatenate((xyz, np.zeros((objects.shape[0], 1))), axis=1), 0)[:, :-1]), axis=1) #else: objects = np.concatenate((objects, wcsin.wcs_pix2world(xyz, 0)), axis=1) objects = np.concatenate((objects, wcsin.wcs_pix2world(xyz, 0)), axis=1) catParUnits = tuple( list(catParUnits) + [str(cc).replace(" ", "") for cc in wcsin.wcs.cunit]) catParNames = tuple( list(catParNames) + [(cc.split("--")[0]).lower() for cc in wcsin.wcs.ctype]) catParFormt = tuple( list(catParFormt) + ["%15.7e", "%15.7e", "%15.7e"]) #if header["naxis"] == 4: # catParUnits = catParUnits[:-1] # catParNames= catParNames[:-1] err.message("WCS coordinates added to catalogue.") # Create IAU-compliant source name: # WARNING: This currently assumes a regular, ≥ 2-dim. data cube where the first two axes are longitude and latitude. n_src = objects.shape[0] n_par = objects.shape[1] iau_names = np.empty([n_src, 1], dtype=object) if header["ctype1"][:4] == "RA--": # Equatorial coordinates; try to figure out equinox: iau_coord = "equ" if "equinox" in header: if int(header["equinox"]) >= 2000: iau_equinox = "J" else: iau_equinox = "B" elif "epoch" in header: # Assume that EPOCH has been abused to record the equinox: if int(header["epoch"]) >= 2000: iau_equinox = "J" else: iau_equinox = "B" else: # Equinox undefined: iau_equinox = "X" elif header["ctype1"][:4] == "GLON": # Galactic coordinates: iau_coord = "gal" iau_equinox = "G" else: # Unsupported coordinate system: iau_coord = "" iau_equinox = "" for src in xrange(n_src): lon = objects[src][n_par - 3] lat = objects[src][n_par - 2] if iau_coord == "equ": ra = Longitude(lon, unit=u.deg) dec = Latitude(lat, unit=u.deg) iau_pos = ra.to_string(unit=u.h, decimal=False, sep="", precision=2, alwayssign=False, pad=True, fields=3) iau_pos += dec.to_string(unit=u.deg, decimal=False, sep="", precision=1, alwayssign=True, pad=True, fields=3) else: iau_pos = "{0:08.4f}".format(lon) if lat < 0.0: iau_pos += "-" else: iau_pos += "+" iau_pos += "{0:07.4f}".format(abs(lat)) iau_names[src][0] = "SoFiA " + iau_equinox + iau_pos objects = np.concatenate((objects, iau_names), axis=1) catParUnits = tuple(list(catParUnits) + ["-"]) catParNames = tuple(list(catParNames) + ["name"]) catParFormt = tuple(list(catParFormt) + ["%30s"]) except: err.warning("WCS conversion of parameters failed.") return (objects, catParNames, catParFormt, catParUnits)
"1) Enable automatic overwrite in the GUI or parameter file\n" "2) Change base name and/or output directory in the GUI or\n" " parameter file\n" "3) Delete or rename the existing directory", fatal=True, frame=True) return # ----------------------------------------------- # ---- Check if parameter file name provided ---- # ----------------------------------------------- if len(sys.argv) != 2: err.message( "\n\033[1;4mUsage:\033[24m sofia_pipeline.py \033[3m<filename>\033[0m\n\nThe filename of a valid SoFiA parameter file must be specified. Please\nadd the full path if the file is not located in the current directory.\n\n" ) sys.exit(1) # ----------------------------------------------- # ---- Print some initial status information ---- # ----------------------------------------------- err.print_progress_message("Running the SoFiA pipeline") err.message(" Using: SoFiA " + sofia_version + "\n" " Python " + str(sys.version_info[0]) + "." + str(sys.version_info[1]) + "." + str(sys.version_info[2]) + "\n" " NumPy " + np.__version__ + "\n" " SciPy " + scipy_version + "\n" " Astropy " + astropy_version + "\n")
def print_memory_usage(t0): err.message("\x1B[36mPeak memory usage: {0:.3f} MB at {1:.3f} s\x1B[0m".format(float(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss) / MEM_FACTOR, time() - t0)) return
def write_catalog_from_array(mode, objects, catHeader, catUnits, catFormat, parList, outName, flagCompress, flagOverwrite, flagUncertainties): # Check output format and compression availableModes = ["ASCII", "XML", "SQL"] if mode not in availableModes: err.warning("Unknown catalogue format: " + str(mode) + ". Defaulting to ASCII.") mode = "ASCII" modeIndex = availableModes.index(mode) if flagCompress: outName += ".gz" err.message("Writing " + availableModes[modeIndex] + " catalogue: " + outName + ".") # Exit if file exists and overwrite flag is set to false func.check_overwrite(outName, flagOverwrite, fatal=True) # Do we need to write all parameters? if parList == ["*"] or not parList: parList = list(catHeader) # Remove undefined parameters parList = [item for item in parList if item in catHeader] # Remove statistical uncertainties if not requested if not flagUncertainties: for item in ["err_x", "err_y", "err_z", "err_w20", "err_w50"]: while item in parList: parList.remove(item) # Check whether there is anything left if not len(parList): err.error("No valid output parameters selected. No output catalogue written.", fatal=False) return # Create and write catalogue in requested format # ------------------------------------------------------------------------- if mode == "XML": # Define basic XML header information votable = Element("VOTABLE") resource = SubElement(votable, "RESOURCE", name="SoFiA catalogue (version %s)" % sofia_version) description = SubElement(resource, "DESCRIPTION") description.text = "Source catalogue from the Source Finding Application (SoFiA) version %s" % sofia_version coosys = SubElement(resource, "COOSYS", ID="J2000") table = SubElement(resource, "TABLE", ID="sofia_cat", name="sofia_cat") # Load list of parameters and unified content descriptors (UCDs) ucdList = {} fileUcdPath = os.environ["SOFIA_PIPELINE_PATH"] fileUcdPath = fileUcdPath.replace("sofia_pipeline.py", "SoFiA_source_parameters.dat") try: with open(fileUcdPath) as fileUcd: for line in fileUcd: (key, value) = line.split() ucdList[key] = value except: err.warning("Failed to read UCD file.") # Create parameter fields for par in parList: ucdEntity = ucdList[par] if par in ucdList else "" index = list(catHeader).index(par) if catFormat[index] == "%30s": field = SubElement(table, "FIELD", name=par, ucd=ucdEntity, datatype="char", arraysize="30", unit=catUnits[index]) else: field = SubElement(table, "FIELD", name=par, ucd=ucdEntity, datatype="float", unit=catUnits[index]) # Create data table entries data = SubElement(table, "DATA") tabledata = SubElement(data, "TABLEDATA") for obj in objects: tr = SubElement(tabledata, "TR") for par in parList: td = SubElement(tr, "TD") index = list(catHeader).index(par) td.text = (catFormat[index] % obj[index]).strip() # Write XML catalogue: try: f1 = gzopen(outName, "wb") if flagCompress else open(outName, "w") except: err.error("Failed to write to XML catalogue: " + outName + ".", fatal=False) return f1.write(prettify(votable)) #f1.write(tostring(votable, "utf-8")) // without prettifying, which is faster and uses much less memory f1.close # -----------------------------------------------------------------End-XML- elif mode == "SQL": # Record if there is an ID column in the catalogue # (if no ID is present, we will later create one for use as primary key) noID = "id" not in parList # Write some header information: content = "-- SoFiA catalogue (version %s)\n\nSET SQL_MODE = \"NO_AUTO_VALUE_ON_ZERO\";\n\n" % sofia_version # Construct and write table structure: flagProgress = False content += "CREATE TABLE IF NOT EXISTS `SoFiA-Catalogue` (\n" if noID: content += " `id` INT NOT NULL,\n" for par in parList: index = list(catHeader).index(par) if flagProgress: content += ",\n" content += " " + sqlHeaderItem(par) + sqlFormat(catFormat[index]) flagProgress = True content += ",\n PRIMARY KEY (`id`),\n KEY (`id`)\n) DEFAULT CHARSET=utf8 COMMENT=\'SoFiA source catalogue\';\n\n" # Insert data: flagProgress = False content += "INSERT INTO `SoFiA-Catalogue` (" if noID: content += "`id`, " for par in parList: if flagProgress: content += ", " content += sqlHeaderItem(par) flagProgress = True content += ") VALUES\n" source_count = 0 for obj in objects: flagProgress = False source_count += 1 content += "(" if noID: content += str(source_count) + ", " for par in parList: index = list(catHeader).index(par) if flagProgress: content += ", " content += sqlDataItem(obj[index], catFormat[index]) flagProgress = True if(source_count < len(objects)): content += "),\n" else: content += ");\n" # Write catalogue try: fp = gzopen(outName, "wb") if flagCompress else open(outName, "w") except: err.error("Failed to write to SQL catalogue: " + outName + ".", fatal=False) return fp.write(content) fp.close() # -----------------------------------------------------------------End-SQL- else: # mode == "ASCII" by default # Determine header sizes based on variable-length formatting lenCathead = [] for j in catFormat: lenCathead.append(int(j.split("%")[1].split("e")[0].split("f")[0].split("i")[0].split("d")[0].split(".")[0].split("s")[0]) + 1) # Create header headerName = "" headerUnit = "" headerCol = "" outFormat = "" colCount = 0 header = "SoFiA catalogue (version %s)\n" % sofia_version for par in parList: index = list(catHeader).index(par) headerName += catHeader[index].rjust(lenCathead[index]) headerUnit += catUnits[index].rjust(lenCathead[index]) headerCol += ("(%i)" % (colCount + 1)).rjust(lenCathead[index]) outFormat += catFormat[index] + " " colCount += 1 header += headerName[3:] + '\n' + headerUnit[3:] + '\n' + headerCol[3:] # Create catalogue outObjects = [] for obj in objects: outObjects.append([]) for par in parList: outObjects[-1].append(obj[list(catHeader).index(par)]) # Write ASCII catalogue try: np.savetxt(outName, np.array(outObjects, dtype=object), fmt=outFormat, header=header) except: err.error("Failed to write to ASCII catalogue: " + outName + ".", fatal=False) return # ---------------------------------------------------------------End-ASCII- return
def dilate(cube, mask, objects, cathead, Parameters): dilateThreshold = Parameters["parameters"]["dilateThreshold"] dilatePixMax = Parameters["parameters"]["dilatePixMax"] dilateChanMax = Parameters["parameters"]["dilateChanMax"] # Stops dilating when (flux_new - flux_old) / flux_new < dilateThreshold sourceIDs = np.unique(mask) # remove first element which should be zero if sourceIDs[0] == 0: sourceIDs = np.delete(sourceIDs,0) for i in range(0, len(sourceIDs)): obj = objects[i] xmin = max(0, obj[list(cathead).index("x_min")] - dilatePixMax) xmax = min(cube.shape[2] - 1, obj[list(cathead).index("x_max")] + dilatePixMax) ymin = max(0, obj[list(cathead).index("y_min")] - dilatePixMax) ymax = min(cube.shape[1] - 1, obj[list(cathead).index("y_max")] + dilatePixMax) zmin = max(0, obj[list(cathead).index("z_min")] - dilateChanMax) zmax = min(cube.shape[0] - 1, obj[list(cathead).index("z_max")] + dilateChanMax) [zmin, zmax, ymin, ymax, xmin, xmax] = map(int, [zmin, zmax, ymin, ymax, xmin, xmax]) objcube = cube[zmin:zmax+1, ymin:ymax+1, xmin:xmax+1].copy() objmask = mask[zmin:zmax+1, ymin:ymax+1, xmin:xmax+1].copy() allmask = mask[zmin:zmax+1, ymin:ymax+1, xmin:xmax+1].copy() otherobjs = (allmask > 0) * (allmask != sourceIDs[i]) if (otherobjs).sum(): # Ensure that objects with different source IDs within dilatePixMax, dilateChanMax are not # included in the flux growth calculation err.warning("Object {0:d} has possible overlapping objects within {1:d} pix, {2:d} chan.".format(sourceIDs[i], dilatePixMax, dilateChanMax)) objcube[(allmask > 0) * (allmask != sourceIDs[i])] = 0 fluxes = [] # Loop through Z dilation kernels until the flux converges or the maximum allowed Z dilation is reached for dilchan in range(dilateChanMax + 1): dd = dilchan * 2 + 1 dilstruct = np.ones((dd,1,1)) fluxes.append(objcube[nd.morphology.binary_dilation(objmask==sourceIDs[i], structure=dilstruct)].sum()) if dilchan > 0 and (fluxes[-1] - fluxes[-2]) / fluxes[-1] < dilateThreshold: dilchan -= 1 break # Pick the best Z dilation kernel for current object and update mask dd = dilchan * 2 + 1 dilstruct = np.ones((dd,1,1)) # Only grow the mask of object sourceIDs[i] even when other objects are present in objmask objmask[nd.morphology.binary_dilation(objmask==sourceIDs[i], structure=dilstruct).astype(int) == 1] = sourceIDs[i] # Loop through XY dilation kernels until the flux converges or the maximum allowed XY dilation is reached for dilpix in range(dilatePixMax + 1): dd = dilpix * 2 + 1 dilstruct = (np.sqrt(((np.indices((dd, dd)) - dilpix)**2).sum(axis=0)) <= dilpix).astype(int) dilstruct.resize((1, dilstruct.shape[0], dilstruct.shape[1])) fluxes.append(objcube[nd.morphology.binary_dilation(objmask==sourceIDs[i], structure=dilstruct)].sum()) if dilpix > 0 and (fluxes[-1] - fluxes[-2]) / fluxes[-1] < dilateThreshold: dilpix -= 1 break # Pick the best XY dilation kernel for current object and update mask dd = dilpix * 2 + 1 dilstruct = (np.sqrt(((np.indices((dd, dd)) - dilpix)**2).sum(axis=0)) <= dilpix).astype(int) dilstruct.resize((1, dilstruct.shape[0], dilstruct.shape[1])) # Only grow the mask of object sourceIDs[i] even when other objects are present in objmask objmask[nd.morphology.binary_dilation(objmask==sourceIDs[i], structure=dilstruct).astype(int) == 1] = sourceIDs[i] err.message("Mask of source {0:d} dilated by {2:d} chan and then by {1:d} pix.".format(sourceIDs[i], dilpix, dilchan)) # Put back in objmask objects != sourceIDs[i] that may have been inside objmask before # dilation or may have been temporarily replaced by the dilated object sourceIDs[i] if (otherobjs).sum(): objmask[otherobjs] = allmask[otherobjs] mask[zmin:zmax+1, ymin:ymax+1, xmin:xmax+1] = objmask # Update n_pix, x_geo and n_chan n_pix = objmask[objmask == sourceIDs[i]].sum() / sourceIDs[i] ind = np.vstack(np.where(objmask == sourceIDs[i])) cgeo = (ind.sum(axis=1)).astype(float) / float(n_pix) x_geo, y_geo, z_geo = cgeo[2] + xmin, cgeo[1] + ymin, cgeo[0] + zmin zmin, zmax = min(ind[0]), max(ind[0]) + 1 n_chan = zmax - zmin # Update n_los objmask[objmask != sourceIDs[i]] = 0 maskSumA0 = objmask.sum(axis=0) maskSumA0[maskSumA0 > 1] = 1 n_los = maskSumA0.sum() del objcube del objmask del allmask del otherobjs objects[i,list(cathead).index("x_min")] = max(0, obj[list(cathead).index("x_min")] - dilpix) objects[i,list(cathead).index("x_max")] = min(cube.shape[2] - 1, obj[list(cathead).index("x_max")] + dilpix) objects[i,list(cathead).index("y_min")] = max(0, obj[list(cathead).index("y_min")] - dilpix) objects[i,list(cathead).index("y_max")] = min(cube.shape[1] - 1, obj[list(cathead).index("y_max")] + dilpix) objects[i,list(cathead).index("z_min")] = max(0, obj[list(cathead).index("z_min")] - dilchan) objects[i,list(cathead).index("z_max")] = min(cube.shape[0] - 1, obj[list(cathead).index("z_max")] + dilchan) objects[i,list(cathead).index("n_pix")] = n_pix objects[i,list(cathead).index("n_chan")] = n_chan objects[i,list(cathead).index("n_los")] = n_los objects[i,list(cathead).index("x_geo")] = x_geo objects[i,list(cathead).index("y_geo")] = y_geo objects[i,list(cathead).index("z_geo")] = z_geo return mask, objects
def parametrise(cube, mask, objects, cathead, catformt, catparunits, Parameters, dunits): cathead = np.array(cathead) objects = np.array(objects) initcatalog = cp.PySourceCatalog() for obj in objects: # Check flags source_flag = create_source_flags(cube, mask, cathead, obj[cathead == "id"], obj[cathead == "x_min"], obj[cathead == "x_max"], obj[cathead == "y_min"], obj[cathead == "y_max"], obj[cathead == "z_min"], obj[cathead == "z_max"]) newSource = cp.PySource() newSource.ID = obj[cathead == "id"] newParamsDict = { "flag": cp.PyMeasurement("flag", source_flag, 0.0, ""), "x": cp.PyMeasurement("x", obj[cathead == "x_geo"], 0.0, ""), "y": cp.PyMeasurement("y", obj[cathead == "y_geo"], 0.0, ""), "z": cp.PyMeasurement("z", obj[cathead == "z_geo"], 0.0, ""), "x_min": cp.PyMeasurement("x_min", obj[cathead == "x_min"], 0.0, ""), "x_max": cp.PyMeasurement("x_max", obj[cathead == "x_max"], 0.0, ""), "y_min": cp.PyMeasurement("y_min", obj[cathead == "y_min"], 0.0, ""), "y_max": cp.PyMeasurement("y_max", obj[cathead == "y_max"], 0.0, ""), "z_min": cp.PyMeasurement("z_min", obj[cathead == "z_min"], 0.0, ""), "z_max": cp.PyMeasurement("z_max", obj[cathead == "z_max"], 0.0, "") } newSource.setParameters(newParamsDict) initcatalog.insert(newSource) moduleParametrizer = cp.PyModuleParametrisation() moduleParametrizer.setFlags(Parameters["parameters"]["optimiseMask"], Parameters["parameters"]["fitBusyFunction"]) cube = cube.astype("<f4", copy=False) mask = mask.astype("<i2", copy=False) moduleParametrizer.run(cube, mask, initcatalog) results = moduleParametrizer.getCatalog() # Append the results to the objects array or reset replParam = ["x_min", "x_max", "y_min", "y_max", "z_min", "z_max", "id", "x", "y", "z", "n_pix"] origParam = ["x_min", "x_max", "y_min", "y_max", "z_min", "z_max", "id", "x", "y", "z", "n_pix"] d = results.getSources() # Select data set with maximum number of parameters parsListLen = [len(d[list(d.keys())[i]].getParameters()) for i in range(0, len(d))] index = parsListLen.index(max(parsListLen)) # Add parameter names from parameterisation pars = d[list(d.keys())[index]].getParameters() cathead = list(cathead) newunits = { "id": "-", "flag": "-", "x": "pix", "y": "pix", "z": "pix", "err_x": "pix", "err_y": "pix", "err_z": "pix", "x_min": "pix", "x_max": "pix", "y_min": "pix", "y_max": "pix", "z_min": "chan", "z_max": "chan", "w50": "chan", "w20": "chan", "err_w50": "chan", "err_w20": "chan", "wm50": "chan", "f_wm50": dunits, "ell_maj": "pix", "ell_min": "pix", "ell_pa": "deg", "ell3s_maj": "pix", "ell3s_min": "pix", "ell3s_pa": "deg", "kin_pa": "deg", "f_int": dunits, "bf_flag": "-", "bf_chi2": "-", "bf_z": "chan", "bf_a": dunits, "bf_b1": "chan**(-1)", "bf_b2": "chan**(-1)", "bf_c": "chan**(-2)", "bf_xe": "chan", "bf_xp": "chan", "bf_w": "chan", "bf_w50": "chan", "bf_w20": "chan", "bf_f_peak": dunits, "bf_f_int": dunits, "rms": dunits, "f_peak": dunits, "snr_int": "-" } catformt = list(catformt) catparunits = list(catparunits) for i in sorted(pars): if i not in replParam: cathead.append(i) catformt.append("%16.8f") catparunits.append(newunits[i]) # Extend the parameter array tmpObjects = np.empty((objects.shape[0], len(cathead))) tmpObjects[:, :] = np.nan tmpObjects[:, 0:objects.shape[1]] = objects objects = tmpObjects for i in d: source_dict = d[i].getParameters() # Check source index ID = int(source_dict["id"].getValue()) IDind = cathead.index("id") ind = np.where(objects[:,IDind]==ID)[0][0] for j in sorted(source_dict): if j in replParam: objects[ind][cathead.index(origParam[replParam.index(j)])] = source_dict[j].getValue() else: objects[ind][cathead.index(j)] = source_dict[j].getValue() objects = np.array(objects) cathead = np.array(cathead) catparunits = np.array(catparunits) catformt = np.array(catformt) # if mask optimization is enabled, some parameters from the linker have to be updated if Parameters["parameters"]["optimiseMask"]: for i in range(objects.shape[0]): # bounding box coordinates coord = [] for c in ["x_min", "x_max", "y_min", "y_max", "z_min", "z_max"]: coord.append(int(objects[i, cathead == c])) # cut out object submask submask = mask[coord[4]:coord[5] + 1, coord[2]:coord[3] + 1, coord[0]:coord[1] + 1] objID = objects[i, cathead == "id"] submask[submask!=objID] = 0 # Update n_pix, x_geo and n_chan n_pix = submask.sum() / objID ind = np.vstack(np.where(submask == objID)) cgeo = (ind.sum(axis=1)).astype(float) / float(n_pix) x_geo, y_geo, z_geo = cgeo[2] + coord[0], cgeo[1] + coord[2], cgeo[0] + coord[4] zmin, zmax = min(ind[0]), max(ind[0]) + 1 n_chan = zmax - zmin # Update n_los submaskSumA0 = submask.sum(axis=0) submaskSumA0[submaskSumA0 > 1] = 1 n_los = submaskSumA0.sum() objects[i, cathead == "n_pix"] = n_pix objects[i, cathead == "n_chan"] = n_chan objects[i, cathead == "n_los"] = n_los objects[i, cathead == "x_geo"] = x_geo objects[i ,cathead == "y_geo"] = y_geo objects[i, cathead == "z_geo"] = z_geo del submask err.message("Parameterisation complete.") return cube, mask, objects, cathead, catformt, catparunits
def sigma_scale(cube, scaleX=False, scaleY=False, scaleZ=True, edgeX=0, edgeY=0, edgeZ=0, statistic="mad", fluxRange="all", method="global", windowSpatial=20, windowSpectral=20, gridSpatial=0, gridSpectral=0, interpolation="none"): # Print some informational messages err.message("Generating noise-scaled data cube:") err.message(" Selecting " + str(method) + " noise measurement method.") if statistic == "mad": err.message(" Applying median absolute deviation to " + str(fluxRange) + " pixels.") if statistic == "std": err.message(" Applying standard deviation to " + str(fluxRange) + " pixels.") if statistic == "gauss": err.message(" Applying Gaussian fit to " + str(fluxRange) + " pixels.") if statistic == "negative": err.message(" Applying Gaussian fit to negative pixels.") # Check the dimensions of the cube (could be obtained from header information) dimensions = np.shape(cube) # LOCAL noise measurement within running window (slower and less memory-friendly) if method == "local": # Make window sizes integers >= 1 windowSpatial = max(int(windowSpatial), 1) windowSpectral = max(int(windowSpectral), 1) # Ensure that window sizes are odd windowSpatial += (1 - windowSpatial % 2) windowSpectral += (1 - windowSpectral % 2) # Set grid sizes to half the window sizes if undefined if not gridSpatial: gridSpatial = windowSpatial // 2 if not gridSpectral: gridSpectral = windowSpectral // 2 # Make grid sizes integers >= 1 gridSpatial = max(int(gridSpatial), 1) gridSpectral = max(int(gridSpectral), 1) # Ensure that grid sizes are odd gridSpatial += (1 - gridSpatial % 2) gridSpectral += (1 - gridSpectral % 2) # Print grid and window sizes adopted err.message(" Using grid size of [" + str(gridSpatial) + ", " + str(gridSpectral) + "]") err.message(" and window size of [" + str(windowSpatial) + ", " + str(windowSpectral) + "].") # Generate grid points to be used gridPointsZ = np.arange((dimensions[0] - gridSpectral * (int(math.ceil(float(dimensions[0]) / float(gridSpectral))) - 1)) // 2, dimensions[0], gridSpectral) gridPointsY = np.arange((dimensions[1] - gridSpatial * (int(math.ceil(float(dimensions[1]) / float(gridSpatial))) - 1)) // 2, dimensions[1], gridSpatial) gridPointsX = np.arange((dimensions[2] - gridSpatial * (int(math.ceil(float(dimensions[2]) / float(gridSpatial))) - 1)) // 2, dimensions[2], gridSpatial) # Divide grid and window sizes by 2 to get radii radiusGridSpatial = gridSpatial // 2 radiusGridSpectral = gridSpectral // 2 radiusWindowSpatial = windowSpatial // 2 radiusWindowSpectral = windowSpectral // 2 # Create empty cube (filled with NaN) to hold noise values rms_cube = np.full(cube.shape, np.nan, dtype=cube.dtype) # Determine RMS across window centred on grid cell for z in gridPointsZ: for y in gridPointsY: for x in gridPointsX: grid = (max(0, z - radiusGridSpectral), min(dimensions[0], z + radiusGridSpectral + 1), max(0, y - radiusGridSpatial), min(dimensions[1], y + radiusGridSpatial + 1), max(0, x - radiusGridSpatial), min(dimensions[2], x + radiusGridSpatial + 1)) window = (max(0, z - radiusWindowSpectral), min(dimensions[0], z + radiusWindowSpectral + 1), max(0, y - radiusWindowSpatial), min(dimensions[1], y + radiusWindowSpatial + 1), max(0, x - radiusWindowSpatial), min(dimensions[2], x + radiusWindowSpatial + 1)) if not np.all(np.isnan(cube[window[0]:window[1], window[2]:window[3], window[4]:window[5]])): if interpolation == "linear" or interpolation == "cubic": # Write value into grid point for later interpolation rms_cube[z, y, x] = GetRMS(cube[window[0]:window[1], window[2]:window[3], window[4]:window[5]], rmsMode=statistic, fluxRange=fluxRange, zoomx=1, zoomy=1, zoomz=1, verbose=0) else: # Fill entire grid cell rms_cube[grid[0]:grid[1], grid[2]:grid[3], grid[4]:grid[5]] = GetRMS(cube[window[0]:window[1], window[2]:window[3], window[4]:window[5]], rmsMode=statistic, fluxRange=fluxRange, zoomx=1, zoomy=1, zoomz=1, verbose=0) del grid, window # Carry out interpolation if requested, taking NaNs into account if interpolation == "linear" or interpolation == "cubic": err.message(" Interpolating in between grid points (" + str(interpolation) + ").") # First across each spatial plane if gridSpatial > 1: for z in gridPointsZ: for y in gridPointsY: data_values = rms_cube[z, y, gridPointsX] not_nan = np.logical_not(np.isnan(data_values)) if any(not_nan): interp_coords = np.arange(0, dimensions[2]) if interpolation == "cubic": spline = InterpolatedUnivariateSpline(gridPointsX[not_nan], data_values[not_nan]) rms_cube[z, y, 0:dimensions[2]] = spline(interp_coords) del spline else: interp_values = np.interp(interp_coords, gridPointsX[not_nan], data_values[not_nan]) rms_cube[z, y, 0:dimensions[2]] = interp_values del interp_values del interp_coords del data_values, not_nan for x in range(dimensions[2]): data_values = rms_cube[z, gridPointsY, x] not_nan = np.logical_not(np.isnan(data_values)) if any(not_nan): interp_coords = np.arange(0, dimensions[1]) if interpolation == "cubic": spline = InterpolatedUnivariateSpline(gridPointsY[not_nan], data_values[not_nan]) rms_cube[z, 0:dimensions[1], x] = spline(interp_coords) del spline else: interp_values = np.interp(interp_coords, gridPointsY[not_nan], data_values[not_nan]) rms_cube[z, 0:dimensions[1], x] = interp_values del interp_values del interp_coords del data_values, not_nan # Alternative option: 2-D spatial interpolation using SciPy's interp2d #from scipy.interpolate import interp2d #xx, yy = np.meshgrid(gridPointsX, gridPointsY) #data_values = rms_cube[z, yy, xx] #f = interp2d(gridPointsX, gridPointsY, data_values, kind="cubic") #interp_coords_x = np.arange(0, dimensions[2]) #interp_coords_y = np.arange(0, dimensions[1]) #rms_cube[z, :, :] = f(interp_coords_x, interp_coords_y) # Then along the spectral axis if gridSpectral > 1: for y in range(dimensions[1]): for x in range(dimensions[2]): data_values = rms_cube[gridPointsZ, y, x] not_nan = np.logical_not(np.isnan(data_values)) if any(not_nan): interp_coords = np.arange(0, dimensions[0]) if interpolation == "cubic": spline = InterpolatedUnivariateSpline(gridPointsZ[not_nan], data_values[not_nan]) rms_cube[0:dimensions[0], y, x] = spline(interp_coords) del spline else: interp_values = np.interp(interp_coords, gridPointsZ[not_nan], data_values[not_nan]) rms_cube[0:dimensions[0], y, x] = interp_values del interp_values del interp_coords del data_values, not_nan # Replace any invalid RMS values with NaN with np.errstate(invalid="ignore"): rms_cube[rms_cube <= 0] = np.nan # Divide data cube by RMS cube cube /= rms_cube # Delete the RMS cube again to release its memory #del rms_cube # GLOBAL noise measurement on entire 2D plane (faster and more memory-friendly) else: # Define the range over which statistics are calculated z1 = int(edgeZ) z2 = int(dimensions[0] - edgeZ) y1 = int(edgeY) y2 = int(dimensions[1] - edgeY) x1 = int(edgeX) x2 = int(dimensions[2] - edgeX) # Make sure edges don't exceed cube size err.ensure(z1 < z2 and y1 < y2 and x1 < x2, "Edge size exceeds cube size for at least one axis.") # Create empty cube (filled with 1) to hold noise values rms_cube = np.ones(cube.shape, dtype=cube.dtype) # Measure noise across 2D planes and scale cube accordingly if scaleZ: for i in range(dimensions[0]): if not np.all(np.isnan(cube[i, y1:y2, x1:x2])): rms = GetRMS(cube[i, y1:y2, x1:x2], rmsMode=statistic, fluxRange=fluxRange, zoomx=1, zoomy=1, zoomz=1, verbose=0) if rms > 0: rms_cube[i, :, :] *= rms cube[i, :, :] /= rms if scaleY: for i in range(dimensions[1]): if not np.all(np.isnan(cube[z1:z2, i, x1:x2])): rms = GetRMS(cube[z1:z2, i, x1:x2], rmsMode=statistic, fluxRange=fluxRange, zoomx=1, zoomy=1, zoomz=1, verbose=0) if rms > 0: rms_cube[:, i, :] *= rms cube[:, i, :] /= rms if scaleX: for i in range(dimensions[2]): if not np.all(np.isnan(cube[z1:z2, y1:y2, i])): rms = GetRMS(cube[z1:z2, y1:y2, i], rmsMode=statistic, fluxRange=fluxRange, zoomx=1, zoomy=1, zoomz=1, verbose=0) if rms > 0: rms_cube[:, :, i] *= rms cube[:, :, i] /= rms err.message("Noise-scaled data cube generated.\n") return cube, rms_cube
def import_mask(maskFile, header, axis_size, subcube): err.message("Loading mask cube:\n " + str(maskFile)) try: f = fits.open(maskFile, memmap=False) header_mask = f[0].header except: err.error("Failed to read mask cube.") # Extract axis sizes and types n_axes_mask, axis_size_mask, axis_type_mask = extract_axis_size(header_mask) # Ensure correct dimensionality check_cube_dimensions(n_axes_mask, axis_size_mask, cube_name="mask cube", min_dim = 1, max_dim = 4) # 1-D spectrum if n_axes_mask == 1: err.warning("Mask cube has 1 axis; interpreted as spectrum.\nAdding first and second axis.") ensure(header_mask['CRVAL1'] == header['CRVAL1'], "Input cube and mask are not on the same WCS grid.") if len(subcube) == 6: if header_mask["NAXIS1"] == axis_size[2]: err.message(" Input mask cube already matches size of data subcube.\n No subcube selection applied.") mask = np.reshape(f[0].data, (-1, 1, 1)) elif header_mask["NAXIS1"] == fullshape[0]: err.message(" Subcube selection applied to input mask cube.") mask = np.reshape(f[0].section[subcube[4]:subcube[5]], (-1, 1, 1)) else: err.error("Data subcube does not match size of mask subcube or full mask.") elif not len(subcube): mask = np.reshape(f[0].data, (-1, 1, 1)) else: err.error("The subcube list must have 6 entries ({0:d} given).".format(len(subcube))) # 2-D image elif n_axes_mask == 2: err.ensure(header_mask["CRVAL1"] == header["CRVAL1"] and header_mask["CRVAL2"] == header["CRVAL2"], "Input cube and mask are not on the same WCS grid.") if len(subcube) == 6 or len(subcube) == 4: if header_mask["NAXIS1"] == axis_size[0] and header_mask["NAXIS2"] == axis_size[1]: err.message(" Input mask cube already matches size of data subcube.\n No subcube selection applied.") mask = np.array([f[0].data]) elif header_mask["NAXIS1"] == fullshape[2] and header_mask["NAXIS2"] == fullshape[1]: err.message(" Subcube selection applied to input mask cube.") mask = np.array([f[0].section[subcube[2]:subcube[3], subcube[0]:subcube[1]]]) else: err.error("Data subcube does not match size of mask subcube or full mask.") else: mask = np.array([f[0].data]) # 3-D cube elif n_axes_mask == 3: err.ensure(header_mask["CRVAL1"] == header["CRVAL1"] and header_mask["CRVAL2"] == header["CRVAL2"] and header_mask["CRVAL3"] == header["CRVAL3"], "Input cube and mask are not on the same WCS grid.") if len(subcube) == 6: if header_mask["NAXIS1"] == axis_size[0] and header_mask["NAXIS2"] == axis_size[1] and header_mask["NAXIS3"] == axis_size[2]: err.message(" Input mask cube already matches size of data subcube.\n No subcube selection applied.") mask = f[0].data elif header_mask["NAXIS1"] == fullshape[2] and header_mask["NAXIS2"] == fullshape[1] and header_mask["NAXIS3"] == fullshape[0]: err.message(" Subcube selection applied to input mask cube.") mask = f[0].section[subcube[4]:subcube[5], subcube[2]:subcube[3], subcube[0]:subcube[1]] else: err.error("Data subcube does not match size of mask subcube or full mask.") else: mask = f[0].data # 4-D hypercube else: err.ensure(header_mask["CRVAL1"] == header["CRVAL1"] and header_mask["CRVAL2"] == header["CRVAL2"] and header_mask["CRVAL3"] == header["CRVAL3"], "Input cube and mask are not on the same WCS grid.") if len(subcube) == 6: if header_mask["NAXIS1"] == axis_size[0] and header_mask["NAXIS2"] == axis_size[1] and header_mask["NAXIS3"] == axis_size[2]: err.message(" Input mask cube already matches size of data subcube.\n No subcube selection applied.") mask = f[0].section[0] elif header_mask["NAXIS1"] == fullshape[2] and header_mask["NAXIS2"] == fullshape[1] and header_mask["NAXIS3"] == fullshape[0]: err.message(" Subcube selection applied to input mask cube.") mask = f[0].section[0, subcube[4]:subcube[5], subcube[2]:subcube[3], subcube[0]:subcube[1]] else: err.error("Data subcube does not match size of mask subcube or full mask.") else: mask = f[0].section[0] mask[mask > 0] = 1 f.close() err.message("Mask cube loaded.") # In all cases, convert mask to Boolean with masked pixels set to 1. return (mask > 0).astype(bool)
def apply_flagging(data, flagFile, flagRegions, subcube): # ------------------------------- # Apply flagging cube if provided # ------------------------------- if flagFile: err.message("Applying flagging cube:\n " + str(flagFile)) try: f = fits.open(flagFile, memmap=False) header_flags = f[0].header except: err.error("Failed to read flagging cube.") # Extract axis sizes and types n_axes_flags, axis_size_flags, axis_type_flags = extract_axis_size(header_flags) # Ensure correct dimensionality check_cube_dimensions(n_axes_flags, axis_size_flags, cube_name="flagging cube") # Apply flagging # 2-D image if n_axes_flags == 2: for chan in range(data.shape[0]): if len(subcube) == 6 or len(subcube) == 4: data[chan][np.isnan(f[0].section[subcube[2]:subcube[3], subcube[0]:subcube[1]])] = np.nan else: data[chan][np.isnan(np.array([f[0].data]))] = np.nan # 3-D cube elif n_axes_flags == 3: if len(subcube) == 6: data[np.isnan(f[0].section[subcube[4]:subcube[5], subcube[2]:subcube[3], subcube[0]:subcube[1]])] = np.nan else: data[np.isnan(f[0].data)] = np.nan # 4-D hypercube else: if len(subcube) == 6: data[np.isnan(f[0].section[0, subcube[4]:subcube[5], subcube[2]:subcube[3], subcube[0]:subcube[1]])] = np.nan else: data[np.isnan(f[0].section[0])] = np.nan f.close() err.message("Flagging cube applied.") # ---------------------------------- # Apply flagging regions if provided # ---------------------------------- if flagRegions: err.message("Applying flagging regions:\n " + str(flagRegions)) dim = len(data.shape) try: for region in flagRegions: for i in range(0, len(region) / 2): if region[2 * i + 1] == "": region[2 * i + 1] = data.shape[dim - i - 1] if len(region) == 2: data[0, region[2]:region[3], region[0]:region[1]] = np.nan else: data[region[4]:region[5], region[2]:region[3], region[0]:region[1]] = np.nan err.message("Flagging regions applied.") except: err.error("Flagging did not succeed. Please check the dimensions\nof your data cube and flagging regions.") return data
def get_subcube_range(header, n_axes, axis_size, subcube, subcubeMode): # Basic sanity checks err.ensure( subcubeMode in {"pixel", "world"}, "Subcube mode must be 'pixel' or 'world'.") err.ensure( (len(subcube) == 4 and n_axes == 2) or (len(subcube) == 6 and n_axes > 2), "Subcube range must contain 4 values for 2-D cubes\n" "or 6 values for 3-D/4-D cubes.") # ----------------- # World coordinates # ----------------- if subcubeMode == "world": # Read WCS information try: wcsin = wcs.WCS(header) except: err.error("Failed to read WCS information from data cube header.") # Calculate cos(dec) correction for RA range: if wcsin.wcs.cunit[0] == "deg" and wcsin.wcs.cunit[1] == "deg": corrfact = math.cos(math.radians(subcube[1])) if n_axes == 4: subcube = wcsin.wcs_world2pix(np.array([[subcube[0] - subcube[3] / corrfact, subcube[1] - subcube[4], subcube[2] - subcube[5], 0], [subcube[0] + subcube[3] / corrfact, subcube[1] + subcube[4], subcube[2] + subcube[5], 0]]), 0)[:, :3] elif n_axes == 3: subcube = wcsin.wcs_world2pix(np.array([[subcube[0] - subcube[3] / corrfact, subcube[1] - subcube[4], subcube[2] - subcube[5]], [subcube[0] + subcube[3] / corrfact, subcube[1] + subcube[4], subcube[2] + subcube[5]]]), 0) elif n_axes == 2: subcube = wcsin.wcs_world2pix(np.array([[subcube[0] - subcube[2] / corrfact, subcube[1] - subcube[3]], [subcube[0] + subcube[2] / corrfact, subcube[1] + subcube[3]]]), 0) else: err.error("Unsupported number of axes.") # Flatten array subcube = np.ravel(subcube, order="F") # Ensure that min pix coord < max pix coord for all axes. # This operation is meaningful because wcs_world2pix returns negative pixel coordinates # only for pixels located before an axis' start (i.e., negative pixel coordinates should # not be interpreted as counting backward from an axis' end). subcube[0], subcube[1] = correct_order(subcube[0], subcube[1]) subcube[2], subcube[3] = correct_order(subcube[2], subcube[3]) if len(subcube) == 6: subcube[4], subcube[5] = correct_order(subcube[4], subcube[5]) # Convert to integer subcube = list(subcube.astype(int)) # Constrain subcube to be within cube boundaries for axis in range(min(3, n_axes)): err.ensure(subcube[2 * axis + 1] >= 0 and subcube[2 * axis] < axis_size[axis], "Subcube outside input cube range for axis {0:d}.".format(axis)) subcube[2 * axis] = max(subcube[2 * axis], 0) subcube[2 * axis + 1] = min(subcube[2 * axis + 1] + 1, axis_size[axis]) # ----------------- # Pixel coordinates # ----------------- else: # Ensure that pixel coordinates are integers for value in subcube: err.ensure(type(value) == int, "Subcube boundaries must be integer values.") # Sanity checks on boundaries for axis in range(min(3, n_axes)): # Ensure correct order err.ensure(subcube[2 * axis] < subcube[2 * axis + 1], "Lower subcube boundary greater than upper boundary.\nPlease check your input.") # Adjust lower boundary subcube[2 * axis] = max(subcube[2 * axis], 0) subcube[2 * axis] = min(subcube[2 * axis], axis_size[axis] - 1) # Adjust upper boundary: subcube[2 * axis + 1] = max(subcube[2 * axis + 1], 1) subcube[2 * axis + 1] = min(subcube[2 * axis + 1], axis_size[axis]) # Report final subcube boundaries err.message(" Loading subcube of range " + str(subcube) + '.') return subcube
"The directory already exists and is not empty. You can do one\n" "of the following:\n\n" "1) Enable automatic overwrite in the GUI or parameter file\n" "2) Change base name and/or output directory in the GUI or\n" " parameter file\n" "3) Delete or rename the existing directory", fatal=True, frame=True) return # ----------------------------------------------- # ---- Check if parameter file name provided ---- # ----------------------------------------------- if len(sys.argv) != 2: err.message("\n\033[1;4mUsage:\033[24m sofia_pipeline.py \033[3m<filename>\033[0m\n\nThe filename of a valid SoFiA parameter file must be specified. Please\nadd the full path if the file is not located in the current directory.\n\n") sys.exit(1) # ----------------------------------------------- # ---- Print some initial status information ---- # ----------------------------------------------- err.print_progress_message("Running the SoFiA pipeline") err.message( " Using: SoFiA " + sofia_version + "\n" " Python " + str(sys.version_info[0]) + "." + str(sys.version_info[1]) + "." + str(sys.version_info[2]) + "\n" " NumPy " + np.__version__ + "\n" " SciPy " + scipy_version + "\n" " Astropy " + astropy_version + "\n")
def GetRMS(cube, rmsMode="negative", fluxRange="all", zoomx=1, zoomy=1, zoomz=1, verbose=0, min_hist_peak=0.05, sample=1, twoPass=False): """ Description of arguments ------------------------ rmsMode Select which algorithm should be used for calculating the noise. Allowed options: 'std' Standard deviation about 0. 'mad' Median absolute deviation about 0. 'moment' 2nd moment of flux histogram, assuming a 1st moment of 0. 'gauss' Width of Gaussian fitted to flux histogram, assuming a centroid of 0. 'negative' Width of Gaussian fitted to negative side of the flux histogram, again assuming a centroid of 0. This is a legacy option and may be removed from SoFiA in the future. fluxRange Define which part of the data are to be used in the noise measurement. Allowed options: 'negative' Use only pixels with negative flux. 'positive' Use only pixels with positive flux. 'all' Use both positive and negative (i.e. all) pixels. verbose Print additional progress messages if set to True. twoPass Run a second pass of MAD and STD, this time with a clip level of 5 times the RMS from the first pass. """ # Check input for sanity if fluxRange != "all" and fluxRange != "positive" and fluxRange != "negative": sys.stderr.write("WARNING: Illegal value of fluxRange = '" + str(fluxRange) + "'.\n") sys.stderr.write(" Using default value of 'all' instead.\n") fluxRange = "all" if rmsMode != "std" and rmsMode != "mad" and rmsMode != "negative" and rmsMode != "gauss" and rmsMode != "moment": sys.stderr.write("WARNING: Illegal value of rmsMode = '" + str(rmsMode) + "'.\n") sys.stderr.write(" Using default value of 'mad' instead.\n") rmsMode = "mad" # Ensure that we have a 3D cube if len(cube.shape) == 2: cube = np.array([cube]) x0, x1 = int(math.ceil((1 - 1.0 / zoomx) * cube.shape[2] / 2)), int(math.floor((1 + 1.0 / zoomx) * cube.shape[2] / 2)) + 1 y0, y1 = int(math.ceil((1 - 1.0 / zoomy) * cube.shape[1] / 2)), int(math.floor((1 + 1.0 / zoomy) * cube.shape[1] / 2)) + 1 z0, z1 = int(math.ceil((1 - 1.0 / zoomz) * cube.shape[0] / 2)), int(math.floor((1 + 1.0 / zoomz) * cube.shape[0] / 2)) + 1 err.message(" Estimating rms on subcube (x,y,z zoom = %.0f,%.0f,%.0f) ..." % (zoomx, zoomy, zoomz), verbose) err.message(" Estimating rms on subcube sampling every %i voxels ..." % (sample), verbose) err.message(" ... Subcube shape is " + str(cube[z0:z1:sample, y0:y1:sample, x0:x1:sample].shape) + " ...", verbose) # Check if only negative or positive pixels are to be used: if fluxRange == "negative": with np.errstate(invalid="ignore"): halfCube = cube[z0:z1:sample, y0:y1:sample, x0:x1:sample][cube[z0:z1:sample, y0:y1:sample, x0:x1:sample] < 0] err.ensure(halfCube.size, "Cannot measure noise from negative flux values.\nNo negative fluxes found in data cube.") elif fluxRange == "positive": with np.errstate(invalid="ignore"): halfCube = cube[z0:z1:sample, y0:y1:sample, x0:x1:sample][cube[z0:z1:sample, y0:y1:sample, x0:x1:sample] > 0] err.ensure(halfCube.size, "Cannot measure noise from positive flux values.\nNo positive fluxes found in data cube.") # NOTE: The purpose of the with... statement is to temporarily disable certain warnings, as otherwise the # Python interpreter would print a warning whenever a value of NaN is compared to 0. The comparison # is defined to yield False, which conveniently removes NaNs by default without having to do that # manually in a separate step, but the associated warning message is unfortunately a nuisance. # GAUSSIAN FIT TO NEGATIVE FLUXES if rmsMode == "negative": nrbins = max(100, int(math.ceil(float(cube.size) / 1e+5))) cubemin = np.nanmin(cube) err.ensure(cubemin < 0, "Cannot estimate noise from Gaussian fit to negative flux\nhistogram; no negative fluxes found in data cube.") bins = np.arange(cubemin, abs(cubemin) / nrbins - 1e-12, abs(cubemin) / nrbins) fluxval = (bins[:-1] + bins[1:]) / 2 rmshisto = np.histogram(cube[z0:z1:sample, y0:y1:sample, x0:x1:sample][~np.isnan(cube[z0:z1:sample, y0:y1:sample, x0:x1:sample])], bins=bins)[0] nrsummedbins = 0 while rmshisto[-nrsummedbins-1:].sum() < min_hist_peak * rmshisto.sum(): nrsummedbins += 1 if nrsummedbins: if verbose: sys.stdout.write(" ... adjusting bin size to get a fraction of voxels in central bin >= " + str(min_hist_peak) + "\n") nrbins /= (nrsummedbins + 1) bins = np.arange(cubemin, abs(cubemin) / nrbins - 1e-12, abs(cubemin) / nrbins) fluxval = (bins[:-1] + bins[1:]) / 2.0 rmshisto = np.histogram(cube[z0:z1:sample, y0:y1:sample, x0:x1:sample][~np.isnan(cube[z0:z1:sample, y0:y1:sample, x0:x1:sample])], bins=bins)[0] rms = abs(sp.optimize.curve_fit(Gaussian, fluxval, rmshisto, p0=[rmshisto.max(), -fluxval[rmshisto < rmshisto.max() / 2.0].max() * 2.0 / 2.355])[0][1]) # GAUSSIAN FIT TO FLUX HISTOGRAM / SECOND MOMENT OF FLUX HISTOGRAM elif rmsMode == "gauss" or rmsMode == "moment": nBins = 100 dataMin = float(np.nanmin(cube)) dataMax = float(np.nanmax(cube)) err.ensure(dataMin < dataMax, "Maximum not greater than minimum. Cannot determine noise level.") if fluxRange == "negative": # Set upper limit to 0 err.ensure(dataMin < 0.0, "Minimum > 0. Cannot determine noise level for negative pixels.") dataMax = 0.0 elif fluxRange == "positive": # Set lower limit to 0 err.ensure(dataMax > 0.0, "Maximum < 0. Cannot determine noise level for positive pixels.") dataMin = 0.0 else: # Select the smallest of the two for both limits err.ensure(dataMin < 0.0 and dataMax > 0.0, "Noise values not scattered around 0. Cannot measure noise level.") dataMin = -min(abs(dataMin), abs(dataMax)) dataMax = min(abs(dataMin), abs(dataMax)) binWidth = (dataMax - dataMin) / float(nBins) bins = np.arange(dataMin, dataMax, binWidth) binCtr = (bins[:-1] + bins[1:]) / 2.0 hist = np.histogram(cube[z0:z1:sample, y0:y1:sample, x0:x1:sample][~np.isnan(cube[z0:z1:sample, y0:y1:sample, x0:x1:sample])], bins=bins)[0] # Calculate 2nd moment mom2 = moment2(binCtr, hist) # Adjust bin size if necessary counter = 0 while mom2 < 5.0 * binWidth and counter < 2: counter += 1 err.message("Increasing number of bins by factor of " + str(int(20.0 * binWidth / mom2)) + " for Gaussian fit.") nBins = int(nBins * 20.0 * binWidth / mom2) binWidth = (dataMax - dataMin) / float(nBins) binCtr = (bins[:-1] + bins[1:]) / 2.0 hist = np.histogram(cube[z0:z1:sample, y0:y1:sample, x0:x1:sample][~np.isnan(cube[z0:z1:sample, y0:y1:sample, x0:x1:sample])], bins=bins)[0] mom2 = moment2(binCtr, hist) # Carry out Gaussian fitting if requested if rmsMode == "gauss": rms = abs(sp.optimize.curve_fit(Gaussian, binCtr, hist, p0=[hist.max(), mom2])[0][1]) else: rms = mom2 # MEDIAN ABSOLUTE DEVIATION elif rmsMode == "mad": if fluxRange == "all": # NOTE: Here we assume that the median of the data is zero! rms = 1.4826 * nanmedian(abs(cube[z0:z1:sample, y0:y1:sample, x0:x1:sample]), axis=None) if twoPass: err.message("Repeating noise estimation with 5-sigma clip.", verbose) with np.errstate(invalid="ignore"): rms = 1.4826 * nanmedian(abs(cube[z0:z1:sample, y0:y1:sample, x0:x1:sample][abs(cube[z0:z1:sample, y0:y1:sample, x0:x1:sample]) < 5.0 * rms]), axis=None) else: # NOTE: Here we assume that the median of the data is zero! There are no more NaNs in halfCube. rms = 1.4826 * np.median(abs(halfCube), axis=None) if twoPass: err.message("Repeating noise estimation with 5-sigma clip.", verbose) rms = 1.4826 * np.median(abs(halfCube[abs(halfCube) < 5.0 * rms]), axis=None) # STANDARD DEVIATION elif rmsMode == "std": if fluxRange == "all": # NOTE: Here we assume that the mean of the data is zero! rms = nan_standard_deviation(cube[z0:z1:sample, y0:y1:sample, x0:x1:sample]) if twoPass: err.message("Repeating noise estimation with 5-sigma clip.", verbose) with np.errstate(invalid="ignore"): rms = nan_standard_deviation(cube[z0:z1:sample, y0:y1:sample, x0:x1:sample][abs(cube[z0:z1:sample, y0:y1:sample, x0:x1:sample]) < 5.0 * rms]) else: # NOTE: Here we assume that the mean of the data is zero! There are no more NaNs in halfCube. rms = standard_deviation(halfCube) if twoPass: err.message("Repeating noise estimation with 5-sigma clip.", verbose) rms = standard_deviation(halfCube[abs(halfCube) < 5.0 * rms]) err.message(" ... %s rms = %.2e (data units)" % (rmsMode, rms), verbose) return rms
def SCfinder_mem(cube, mask, header, t0, kernels=[[0, 0, 0, "b"],], threshold=3.5, sizeFilter=0, maskScaleXY=2.0, maskScaleZ=2.0, kernelUnit="pixel", edgeMode="constant", rmsMode="negative", fluxRange="all", verbose=0): # Define a few constants FWHM_CONST = 2.0 * math.sqrt(2.0 * math.log(2.0)) # Conversion between sigma and FWHM of Gaussian function MAX_PIX_CONST = 1.0e+6 # Maximum number of pixels for noise calculation; sampling is set accordingly # Check for NaN in cube found_nan = np.isnan(cube).any() # Set sampling sampleRms for rms measurement sampleRms = max(1, int((float(cube.size) / MAX_PIX_CONST)**(1.0 / min(3, len(cube.shape))))) # Measure noise in original cube with sampling "sampleRms" rms = GetRMS(cube, rmsMode=rmsMode, fluxRange=fluxRange, zoomx=1, zoomy=1, zoomz=1, verbose=verbose, sample=sampleRms) # Loop over all kernels for kernel in kernels: [kx, ky, kz, kt] = kernel if verbose: err.linebreak() err.print_progress_time(t0) err.message(" Filter {0:} {1:} {2:} {3:} ...".format(kx, ky, kz, kt)) if kernelUnit == "world" or kernelUnit == "w": if verbose: err.message(" Converting filter size to pixels ...") kx = abs(float(kx) / header["CDELT1"]) ky = abs(float(ky) / header["CDELT2"]) kz = abs(float(kz) / header["CDELT3"]) if kt == "b": if kz != int(math.ceil(kz)) and verbose: err.warning("Rounding width of boxcar z kernel to next integer.") kz = int(math.ceil(kz)) # Create a copy of the original cube cube_smooth = np.copy(cube) # Replace all NaNs with zero if found_nan: cube_smooth[np.isnan(cube)] = 0.0 cube_smooth[(cube_smooth > 0) & (mask > 0)] = maskScaleXY * rms cube_smooth[(cube_smooth < 0) & (mask > 0)] = -maskScaleXY * rms # Spatial smoothing if kx + ky: cube_smooth = ndimage.filters.gaussian_filter(cube_smooth, [0, ky / FWHM_CONST, kx / FWHM_CONST], mode=edgeMode) # Spectral smoothing if kz: if kt == "b": cube_smooth = ndimage.filters.uniform_filter1d(cube_smooth, kz, axis=0, mode=edgeMode) elif kt == "g": cube_smooth = ndimage.filters.gaussian_filter1d(cube_smooth, kz / FWHM_CONST, axis=0, mode=edgeMode) # Re-insert the NaNs taken out earlier if found_nan: cube_smooth[np.isnan(cube)] = np.nan # Calculate the RMS of the smoothed cube: rms_smooth = GetRMS(cube_smooth, rmsMode=rmsMode, fluxRange=fluxRange, zoomx=1, zoomy=1, zoomz=1, verbose=verbose, sample=sampleRms) # Add pixels above threshold to mask by setting bit 1 with np.errstate(invalid="ignore"): mask |= (np.absolute(cube_smooth) >= threshold * rms_smooth) #mask = np.bitwise_or(mask, np.greater_equal(np.absolute(cube_smooth), threshold * rms_smooth)) # Delete smoothed cube again del cube_smooth return
def writeMoments(datacube, maskcube, filename, debug, header, compress, write_mom, flagOverwrite): # Exit if nothing is to be done if not any(write_mom): err.warning("No moment maps requested; skipping moment map generation.") return # --------------------------- # Number of detected channels # --------------------------- nrdetchan = (maskcube > 0).sum(axis=0) if np.nanmax(nrdetchan) < 65535: nrdetchan = nrdetchan.astype("int16") else: nrdetchan = nrdetchan.astype("int32") hdu = pyfits.PrimaryHDU(data=nrdetchan, header=header) hdu.header["BUNIT"] = "Nchan" hdu.header["DATAMIN"] = np.nanmin(nrdetchan) hdu.header["DATAMAX"] = np.nanmax(nrdetchan) hdu.header["ORIGIN"] = sofia_version_full glob.delete_header(hdu.header, "CTYPE3") glob.delete_header(hdu.header, "CRPIX3") glob.delete_header(hdu.header, "CRVAL3") glob.delete_header(hdu.header, "CDELT3") name = str(filename) + "_nrch.fits" if compress: name += ".gz" # Check for overwrite flag if not flagOverwrite and os.path.exists(name): err.error("Output file exists: " + str(name) + ".", fatal=False) else: hdu.writeto(name, output_verify="warn", **__astropy_arg_overwrite__) # ---------------------- # Moment 0, 1 and 2 maps # ---------------------- # WARNING: The generation of moment maps will mask the copy of the data cube held # in memory by SoFiA. If you wish to use the original data cube after # this point, please reload it first! datacube[maskcube == 0] = 0 # Regrid cube if necessary if "CELLSCAL" in header and header["CELLSCAL"] == "1/F": err.warning( "CELLSCAL keyword with value of 1/F found.\n" "Will regrid data cube before creating moment images.") datacube = glob.regridMaskedChannels(datacube, maskcube, header) # ALERT: Why are we doing this? #datacube = np.array(datacube, dtype=np.single) # Extract relevant WCS parameters if glob.check_wcs_info(header): width = header["CDELT3"] chan0 = header["CRPIX3"] freq0 = header["CRVAL3"] mom_scale_factor = 1.0 # Velocity if glob.check_header_keywords(glob.KEYWORDS_VELO, header["CTYPE3"]): if not "CUNIT3" in header or header["CUNIT3"].lower() == "m/s": # Assuming m/s and converting to km/s mom_scale_factor = 1.0e-3 unit_spec = "km/s" elif header["CUNIT3"].lower() == "km/s": # Assuming km/s unit_spec = "km/s" else: # Working with whatever velocity units the cube has unit_spec = str(header["CUNIT3"]) # Frequency elif glob.check_header_keywords(glob.KEYWORDS_FREQ, header["CTYPE3"]): if not "CUNIT3" in header or header["CUNIT3"].lower() == "hz": # Assuming Hz unit_spec = "Hz" elif header["CUNIT3"].lower() == "khz": # Assuming kHz and converting to Hz mom_scale_factor = 1.0e+3 unit_spec = "Hz" else: # Working with whatever frequency units the cube has unit_spec = str(header["CUNIT3"]) else: err.warning("Axis descriptors missing from FITS file header.\nMoment maps will not be scaled!") width = 1.0 chan0 = 0.0 freq0 = 0.0 mom_scale_factor = 1.0 unit_spec = "chan" # Prepare moment maps # NOTE: We are making use of NumPy's array broadcasting rules here to avoid # having to cast array sizes to the full 3-D data cube size! moments = [None, None, None] with np.errstate(invalid="ignore"): if any(write_mom): # Definition of moment 0 moments[0] = np.nansum(datacube, axis=0) if write_mom[1] or write_mom[2]: # Definition of moment 1 velArr = ((np.arange(datacube.shape[0]) + 1.0 - chan0) * width + freq0).reshape((datacube.shape[0], 1, 1)) moments[1] = np.divide(np.nansum(velArr * datacube, axis=0), moments[0]) if write_mom[2]: # Definition of moment 2 velArr = velArr - moments[1] moments[2] = np.sqrt(np.divide(np.nansum(velArr * velArr * datacube, axis=0), moments[0])) # Multiply moment 0 by channel width moments[0] *= abs(width) # Set up unit strings if "BUNIT" in header: unit_flux = str(header["BUNIT"]) # Correct for common misspellings of "Jy[/beam]" if unit_flux.lower() == "jy": unit_flux = "Jy." + unit_spec elif unit_flux.lower() == "jy/beam": unit_flux = "Jy/beam." + unit_spec else: unit_flux += "." + unit_spec else: err.warning("Cannot determine flux unit; BUNIT missing from header.") unit_flux = "" unit_mom = [unit_flux, unit_spec, unit_spec] # Writing moment maps to disk for i in range(3): if write_mom[i] and moments[i] is not None: err.message("Writing moment {0:d} image.".format(i)) moments[i] *= mom_scale_factor hdu = pyfits.PrimaryHDU(data=moments[i], header=header) hdu.header["BUNIT"] = unit_mom[i] hdu.header["DATAMIN"] = np.nanmin(moments[i]) hdu.header["DATAMAX"] = np.nanmax(moments[i]) hdu.header["ORIGIN"] = sofia_version_full hdu.header["CELLSCAL"] = "CONSTANT" glob.delete_header(hdu.header, "CRPIX3") glob.delete_header(hdu.header, "CRVAL3") glob.delete_header(hdu.header, "CDELT3") glob.delete_header(hdu.header, "CTYPE3") if debug: hdu.writeto(str(filename) + "_mom{0:d}.debug.fits".format(i), output_verify="warn", **__astropy_arg_overwrite__) else: name = str(filename) + "_mom{0:d}.fits".format(i) if compress: name += ".gz" # Check for overwrite flag if not flagOverwrite and os.path.exists(name): err.error("Output file exists: " + str(name) + ".", fatal=False) else: hdu.writeto(name, output_verify="warn", **__astropy_arg_overwrite__) return
def SCfinder_mem(cube, header, t0, kernels=[[0, 0, 0, "b"],], threshold=3.5, sizeFilter=0, maskScaleXY=2.0, maskScaleZ=2.0, kernelUnit="pixel", edgeMode="constant", rmsMode="negative", fluxRange="all", verbose=0): # Define a few constants FWHM_CONST = 2.0 * math.sqrt(2.0 * math.log(2.0)) # Conversion between sigma and FWHM of Gaussian function MAX_PIX_CONST = 1e+6 # Maximum number of pixels for noise calculation; sampling is set accordingly # Create binary mask array msk = np.zeros(cube.shape, np.bool) found_nan = np.isnan(cube).sum() # Set sampling sampleRms for rms measurement sampleRms = max(1, int((float(np.array(cube.shape).prod()) / MAX_PIX_CONST)**(1.0 / min(3, len(cube.shape))))) # Measure noise in original cube with sampling "sampleRms" rms = GetRMS(cube, rmsMode=rmsMode, fluxRange=fluxRange, zoomx=1, zoomy=1, zoomz=1, verbose=verbose, sample=sampleRms) # Loop over all kernels for kernel in kernels: [kx, ky, kz, kt] = kernel if verbose: err.linebreak() err.print_progress_time(t0) err.message(" Filter %s %s %s %s ..." % (kx, ky, kz, kt)) if kernelUnit == "world" or kernelUnit == "w": if verbose: err.message(" Converting filter size to pixels ...") kx = abs(float(kx) / header["CDELT1"]) ky = abs(float(ky) / header["CDELT2"]) kz = abs(float(kz) / header["CDELT3"]) if kt == "b": if kz != int(math.ceil(kz)) and verbose: err.warning("Rounding width of boxcar z kernel to next integer.") kz = int(math.ceil(kz)) # Create a copy of the original cube smoothedCube = np.copy(cube) # Replace all NaNs with zero (and INFs with a finite number) if found_nan: smoothedCube = np.nan_to_num(smoothedCube) smoothedCube[(smoothedCube > 0) & (msk > 0)] = +maskScaleXY * rms smoothedCube[(smoothedCube < 0) & (msk > 0)] = -maskScaleXY * rms # Spatial smoothing if kx + ky: smoothedCube = ndimage.filters.gaussian_filter(smoothedCube, [0, ky / FWHM_CONST, kx / FWHM_CONST], mode=edgeMode) # Spectral smoothing if kz: if kt == "b": smoothedCube = ndimage.filters.uniform_filter1d(smoothedCube, kz, axis=0, mode=edgeMode) elif kt == "g": smoothedCube = ndimage.filters.gaussian_filter1d(smoothedCube, kz / FWHM_CONST, axis=0, mode=edgeMode) # Re-insert the NaNs (but not the INFs) taken out earlier if found_nan: smoothedCube[np.isnan(cube)] = np.nan # Calculate the RMS of the smoothed cube: smoothedrms = GetRMS(smoothedCube, rmsMode=rmsMode, fluxRange=fluxRange, zoomx=1, zoomy=1, zoomz=1, verbose=verbose, sample=sampleRms) # Get rid of the NaNs a second time #if found_nan: smoothedCube = np.nan_to_num(smoothedCube) # NOTE: This should not be necessary because any comparison with NaN will always yield False. # Hence, NaN pixels will never be included in the mask below. # Add pixels above threshold to mask by setting bit 1 with np.errstate(invalid="ignore"): msk = np.bitwise_or(msk, np.greater_equal(np.absolute(smoothedCube), threshold * smoothedrms)) # Delete smoothed cube again del smoothedCube return msk
def parametrise(cube, mask, objects, cathead, catformt, catparunits, Parameters, dunits): cathead = np.array(cathead) objects = np.array(objects) initcatalog = cp.PySourceCatalog() for obj in objects: # Check flags source_flag = create_source_flags( cube, mask, cathead, obj[cathead == "id"], obj[cathead == "x_min"], obj[cathead == "x_max"], obj[cathead == "y_min"], obj[cathead == "y_max"], obj[cathead == "z_min"], obj[cathead == "z_max"]) newSource = cp.PySource() newSource.ID = obj[cathead == "id"] newParamsDict = { "flag": cp.PyMeasurement("flag", source_flag, 0.0, ""), "x": cp.PyMeasurement("x", obj[cathead == "x_geo"], 0.0, ""), "y": cp.PyMeasurement("y", obj[cathead == "y_geo"], 0.0, ""), "z": cp.PyMeasurement("z", obj[cathead == "z_geo"], 0.0, ""), "x_min": cp.PyMeasurement("x_min", obj[cathead == "x_min"], 0.0, ""), "x_max": cp.PyMeasurement("x_max", obj[cathead == "x_max"], 0.0, ""), "y_min": cp.PyMeasurement("y_min", obj[cathead == "y_min"], 0.0, ""), "y_max": cp.PyMeasurement("y_max", obj[cathead == "y_max"], 0.0, ""), "z_min": cp.PyMeasurement("z_min", obj[cathead == "z_min"], 0.0, ""), "z_max": cp.PyMeasurement("z_max", obj[cathead == "z_max"], 0.0, "") } newSource.setParameters(newParamsDict) initcatalog.insert(newSource) moduleParametrizer = cp.PyModuleParametrisation() moduleParametrizer.setFlags(Parameters["parameters"]["optimiseMask"], Parameters["parameters"]["fitBusyFunction"]) cube = cube.astype("<f4", copy=False) mask = mask.astype("<i2", copy=False) moduleParametrizer.run(cube, mask, initcatalog) results = moduleParametrizer.getCatalog() # Append the results to the objects array or reset replParam = [ "x_min", "x_max", "y_min", "y_max", "z_min", "z_max", "id", "x", "y", "z", "n_pix" ] origParam = [ "x_min", "x_max", "y_min", "y_max", "z_min", "z_max", "id", "x", "y", "z", "n_pix" ] d = results.getSources() # Select data set with maximum number of parameters parsListLen = [ len(d[list(d.keys())[i]].getParameters()) for i in range(0, len(d)) ] index = parsListLen.index(max(parsListLen)) # Add parameter names from parameterisation pars = d[list(d.keys())[index]].getParameters() cathead = list(cathead) newunits = { "id": "-", "flag": "-", "x": "pix", "y": "pix", "z": "pix", "err_x": "pix", "err_y": "pix", "err_z": "pix", "x_min": "pix", "x_max": "pix", "y_min": "pix", "y_max": "pix", "z_min": "chan", "z_max": "chan", "w50": "chan", "w20": "chan", "err_w50": "chan", "err_w20": "chan", "wm50": "chan", "f_wm50": dunits, "ell_maj": "pix", "ell_min": "pix", "ell_pa": "deg", "ell3s_maj": "pix", "ell3s_min": "pix", "ell3s_pa": "deg", "kin_pa": "deg", "f_int": dunits, "bf_flag": "-", "bf_chi2": "-", "bf_z": "chan", "bf_a": dunits, "bf_b1": "chan**(-1)", "bf_b2": "chan**(-1)", "bf_c": "chan**(-2)", "bf_xe": "chan", "bf_xp": "chan", "bf_w": "chan", "bf_w50": "chan", "bf_w20": "chan", "bf_f_peak": dunits, "bf_f_int": dunits, "rms": dunits, "f_peak": dunits, "snr_int": "-" } catformt = list(catformt) catparunits = list(catparunits) for i in sorted(pars): if i not in replParam: cathead.append(i) catformt.append("%18.6e") catparunits.append(newunits[i]) # Extend the parameter array tmpObjects = np.empty((objects.shape[0], len(cathead))) tmpObjects[:, :] = np.nan tmpObjects[:, 0:objects.shape[1]] = objects objects = tmpObjects for i in d: source_dict = d[i].getParameters() # Check source index ID = int(source_dict["id"].getValue()) IDind = cathead.index("id") ind = np.where(objects[:, IDind] == ID)[0][0] for j in sorted(source_dict): if j in replParam: objects[ind][cathead.index(origParam[replParam.index( j)])] = source_dict[j].getValue() else: objects[ind][cathead.index(j)] = source_dict[j].getValue() objects = np.array(objects) cathead = np.array(cathead) catparunits = np.array(catparunits) catformt = np.array(catformt) # if mask optimization is enabled, some parameters from the linker have to be updated if Parameters["parameters"]["optimiseMask"]: for i in range(objects.shape[0]): # bounding box coordinates coord = [] for c in ["x_min", "x_max", "y_min", "y_max", "z_min", "z_max"]: coord.append(int(objects[i, cathead == c])) # cut out object submask submask = mask[coord[4]:coord[5] + 1, coord[2]:coord[3] + 1, coord[0]:coord[1] + 1] objID = objects[i, cathead == "id"] submask[submask != objID] = 0 # Update n_pix, x_geo and n_chan n_pix = submask.sum() / objID ind = np.vstack(np.where(submask == objID)) cgeo = (ind.sum(axis=1)).astype(float) / float(n_pix) x_geo, y_geo, z_geo = cgeo[2] + coord[0], cgeo[1] + coord[2], cgeo[ 0] + coord[4] zmin, zmax = min(ind[0]), max(ind[0]) + 1 n_chan = zmax - zmin # Update n_los submaskSumA0 = submask.sum(axis=0) submaskSumA0[submaskSumA0 > 1] = 1 n_los = submaskSumA0.sum() objects[i, cathead == "n_pix"] = n_pix objects[i, cathead == "n_chan"] = n_chan objects[i, cathead == "n_los"] = n_los objects[i, cathead == "x_geo"] = x_geo objects[i, cathead == "y_geo"] = y_geo objects[i, cathead == "z_geo"] = z_geo del submask err.message("Parameterisation complete.") return cube, mask, objects, cathead, catformt, catparunits
def fix_gipsy_header(header_orig): # GIPSY keys for spectral axis key_opt = ["FREQ-OHEL", "FREQ-OLSR"] key_rad = ["FREQ-RHEL", "FREQ-RLSR"] header = header_orig.copy() naxis = header["NAXIS"] for i in range(1, naxis + 1): ctype = header["CTYPE%d" % i] if ctype in key_opt + key_rad: axis = i # Read reference velocity - from VELR or DRVAL try: if "VELR" in header: vel = header["VELR"] elif "DRVAL%d" % axis in header: vel = header["VELR"] unit = header["DUNIT%d" % axis] if unit.lower() == "km/s": vel = vel * 1000.0 elif unit.lower() != "m/s": break except: err.warning("Problem with reference velocity.") break # Convert reference frequency to Hz try: freq = header["CRVAL%d" % axis] dfreq = header["CDELT%d" % axis] unit = header["CUNIT%d" % axis] freqUnits = ["hz", "khz", "mhz", "ghz"] j = freqUnits.index(unit.lower()) freq *= 10**j dfreq *= 10**j except: err.warning("Problem with reference frequency.") break # Need rest frequency for conversion try: freq0Names = ["FREQ0", "FREQR", "RESTFRQ"] for key in freq0Names: try: freq0 = header[key] #foundFreq0 = 1 except: pass header["RESTFRQ"] = freq0 #foundFreq0 except: err.warning("Rest frequency not found.") break # Calculate reference frequency in the barycentric system if ctype in key_opt: freqB = freq0 / (1.0 + vel / scipy.constants.c) else: freqB = freq0 / (1.0 - vel / scipy.constants.c) # Calculate topocentric velocity velT = scipy.constants.c * ((freqB**2 - freq**2) / (freqB**2 + freq**2)) dfreqB = dfreq * math.sqrt( (scipy.constants.c - velT) / (scipy.constants.c + velT)) header["CTYPE%d" % axis] = "FREQ" header["CUNIT%d" % axis] = "Hz" header["CRVAL%d" % axis] = freqB header["CDELT%d" % axis] = dfreqB ## GIPSY headers seem to contain the unit "DEGREE" for RA/Dec ## WCS lib does not like that for key in header: if "CUNIT" in key and header[key] == "DEGREE": header[key] = "deg" err.message("Header repaired successfully.") return header
def apply_flagging(data, flagFile, flagRegions, subcube): # ------------------------------- # Apply flagging cube if provided # ------------------------------- if flagFile: err.message("Applying flagging cube:\n " + str(flagFile)) try: f = fits.open(flagFile, memmap=False) header_flags = f[0].header except: err.error("Failed to read flagging cube.") # Extract axis sizes and types n_axes_flags, axis_size_flags, axis_type_flags = extract_axis_size( header_flags) # Ensure correct dimensionality check_cube_dimensions(n_axes_flags, axis_size_flags, cube_name="flagging cube") # Apply flagging # 2-D image if n_axes_flags == 2: for chan in range(data.shape[0]): if len(subcube) == 6 or len(subcube) == 4: data[chan][np.isnan( f[0].section[subcube[2]:subcube[3], subcube[0]:subcube[1]])] = np.nan else: data[chan][np.isnan(np.array([f[0].data]))] = np.nan # 3-D cube elif n_axes_flags == 3: if len(subcube) == 6: data[np.isnan(f[0].section[subcube[4]:subcube[5], subcube[2]:subcube[3], subcube[0]:subcube[1]])] = np.nan else: data[np.isnan(f[0].data)] = np.nan # 4-D hypercube else: if len(subcube) == 6: data[np.isnan(f[0].section[0, subcube[4]:subcube[5], subcube[2]:subcube[3], subcube[0]:subcube[1]])] = np.nan else: data[np.isnan(f[0].section[0])] = np.nan f.close() err.message("Flagging cube applied.") # ---------------------------------- # Apply flagging regions if provided # ---------------------------------- if flagRegions: err.message("Applying flagging regions:\n " + str(flagRegions)) dim = len(data.shape) try: for region in flagRegions: for i in range(0, len(region) / 2): if region[2 * i + 1] == "": region[2 * i + 1] = data.shape[dim - i - 1] if len(region) == 2: data[0, region[2]:region[3], region[0]:region[1]] = np.nan else: data[region[4]:region[5], region[2]:region[3], region[0]:region[1]] = np.nan err.message("Flagging regions applied.") except: err.error( "Flagging did not succeed. Please check the dimensions\nof your data cube and flagging regions." ) return data
def smooth(indata, kernel, edgeMode, kernelX, kernelY, kernelZ): """ Smooth a data cube with the specified kernel type and size. Arguments: indata: The input data cube. kernel: The smoothing kernel; "gaussian", "boxcar" or "median". edgeMode: Determines how borders are handled; "reflect", "constant", "nearest", "mirror" or "wrap". kernelX/Y/Z: Size of the kernel (standard deviation in the case of a Gaussian kernel). Returns: Smoothed copy of the data cube. """ err.message("Smoothing data cube") # Sanity checks of user input err.ensure( kernel in {"gaussian", "boxcar", "median"}, "Smoothing failed. Illegal smoothing type: '" + str(kernel) + "'.") err.ensure( edgeMode in {"reflect", "constant", "nearest", "mirror", "wrap"}, "Smoothing failed. Illegal edge mode: '" + str(edgeMode) + "'.") err.ensure(kernelX or kernelY or kernelZ, "Smoothing failed. All smoothing kernels are zero.") err.ensure( kernel != "median" or (kernelX and kernelY and kernelZ), "Smoothing failed. Cannot determine median for kernel size of zero.") # Print some information err.message(" Kernel type: " + str(kernel).title()) err.message(" Kernel size: [" + str(kernelX) + ", " + str(kernelY) + ", " + str(kernelZ) + "]") err.message(" Edge mode: " + str(edgeMode)) # Create copy of input cube to be smoothed outdata = np.copy(indata) # Remove NaNs (and INFs) if necessary found_nan = np.isnan(indata).sum() if found_nan: outdata = np.nan_to_num(outdata) # Smooth with the selected kernel if kernel == "gaussian": outdata = ndimage.filters.gaussian_filter(outdata, sigma=(kernelZ, kernelX, kernelY), mode=edgeMode) elif kernel == "boxcar": outdata = ndimage.filters.uniform_filter(outdata, size=(kernelZ, kernelX, kernelY), mode=edgeMode) else: # kernel == "median" outdata = ndimage.filters.median_filter(outdata, size=(kernelZ, kernelX, kernelY), mode=edgeMode) # Put NaNs back in if necessary if found_nan: outdata[np.isnan(indata)] = np.nan return outdata
def write_catalog_from_array(mode, objects, catHeader, catUnits, catFormat, parList, outName, flagCompress, flagOverwrite, flagUncertainties): # Check output format and compression availableModes = ["ASCII", "XML", "SQL"] if mode not in availableModes: err.warning("Unknown catalogue format: " + str(mode) + ". Defaulting to ASCII.") mode = "ASCII" modeIndex = availableModes.index(mode) if flagCompress: outName += ".gz" err.message("Writing " + availableModes[modeIndex] + " catalogue: " + outName + ".") # Exit if file exists and overwrite flag is set to false func.check_overwrite(outName, flagOverwrite, fatal=True) # Do we need to write all parameters? if parList == ["*"] or not parList: parList = list(catHeader) # Remove undefined parameters parList = [item for item in parList if item in catHeader] # Remove statistical uncertainties if not requested if not flagUncertainties: for item in ["err_x", "err_y", "err_z", "err_w20", "err_w50"]: while item in parList: parList.remove(item) # Check whether there is anything left if not len(parList): err.error( "No valid output parameters selected. No output catalogue written.", fatal=False) return # Create and write catalogue in requested format # ------------------------------------------------------------------------- if mode == "XML": # Define basic XML header information votable = Element("VOTABLE") resource = SubElement(votable, "RESOURCE", name="SoFiA catalogue (version %s)" % sofia_version) description = SubElement(resource, "DESCRIPTION") description.text = "Source catalogue from the Source Finding Application (SoFiA) version %s" % sofia_version coosys = SubElement(resource, "COOSYS", ID="J2000") table = SubElement(resource, "TABLE", ID="sofia_cat", name="sofia_cat") # Load list of parameters and unified content descriptors (UCDs) ucdList = {} fileUcdPath = os.environ["SOFIA_PIPELINE_PATH"] fileUcdPath = fileUcdPath.replace("sofia_pipeline.py", "SoFiA_source_parameters.dat") try: with open(fileUcdPath) as fileUcd: for line in fileUcd: (key, value) = line.split() ucdList[key] = value except: err.warning("Failed to read UCD file.") # Create parameter fields for par in parList: ucdEntity = ucdList[par] if par in ucdList else "" index = list(catHeader).index(par) if catFormat[index] == "%30s": field = SubElement(table, "FIELD", name=par, ucd=ucdEntity, datatype="char", arraysize="30", unit=catUnits[index]) else: field = SubElement(table, "FIELD", name=par, ucd=ucdEntity, datatype="float", unit=catUnits[index]) # Create data table entries data = SubElement(table, "DATA") tabledata = SubElement(data, "TABLEDATA") for obj in objects: tr = SubElement(tabledata, "TR") for par in parList: td = SubElement(tr, "TD") index = list(catHeader).index(par) td.text = (catFormat[index] % obj[index]).strip() # Write XML catalogue: try: f1 = gzopen(outName, "wb") if flagCompress else open(outName, "w") except: err.error("Failed to write to XML catalogue: " + outName + ".", fatal=False) return f1.write(prettify(votable)) #f1.write(tostring(votable, "utf-8")) // without prettifying, which is faster and uses much less memory f1.close # -----------------------------------------------------------------End-XML- elif mode == "SQL": # Record if there is an ID column in the catalogue # (if no ID is present, we will later create one for use as primary key) noID = "id" not in parList # Write some header information: content = "-- SoFiA catalogue (version %s)\n\nSET SQL_MODE = \"NO_AUTO_VALUE_ON_ZERO\";\n\n" % sofia_version # Construct and write table structure: flagProgress = False content += "CREATE TABLE IF NOT EXISTS `SoFiA-Catalogue` (\n" if noID: content += " `id` INT NOT NULL,\n" for par in parList: index = list(catHeader).index(par) if flagProgress: content += ",\n" content += " " + sqlHeaderItem(par) + sqlFormat(catFormat[index]) flagProgress = True content += ",\n PRIMARY KEY (`id`),\n KEY (`id`)\n) DEFAULT CHARSET=utf8 COMMENT=\'SoFiA source catalogue\';\n\n" # Insert data: flagProgress = False content += "INSERT INTO `SoFiA-Catalogue` (" if noID: content += "`id`, " for par in parList: if flagProgress: content += ", " content += sqlHeaderItem(par) flagProgress = True content += ") VALUES\n" source_count = 0 for obj in objects: flagProgress = False source_count += 1 content += "(" if noID: content += str(source_count) + ", " for par in parList: index = list(catHeader).index(par) if flagProgress: content += ", " content += sqlDataItem(obj[index], catFormat[index]) flagProgress = True if (source_count < len(objects)): content += "),\n" else: content += ");\n" # Write catalogue try: fp = gzopen(outName, "wb") if flagCompress else open(outName, "w") except: err.error("Failed to write to SQL catalogue: " + outName + ".", fatal=False) return fp.write(content) fp.close() # -----------------------------------------------------------------End-SQL- else: # mode == "ASCII" by default # Determine header sizes based on variable-length formatting lenCathead = [] for j in catFormat: lenCathead.append( int( j.split("%")[1].split("e")[0].split("f")[0].split("i") [0].split("d")[0].split(".")[0].split("s")[0]) + 1) # Create header headerName = "" headerUnit = "" headerCol = "" outFormat = "" colCount = 0 header = "SoFiA catalogue (version %s)\n" % sofia_version for par in parList: index = list(catHeader).index(par) headerName += catHeader[index].rjust(lenCathead[index]) headerUnit += catUnits[index].rjust(lenCathead[index]) headerCol += ("(%i)" % (colCount + 1)).rjust(lenCathead[index]) outFormat += catFormat[index] + " " colCount += 1 header += headerName[3:] + '\n' + headerUnit[3:] + '\n' + headerCol[3:] # Create catalogue outObjects = [] for obj in objects: outObjects.append([]) for par in parList: outObjects[-1].append(obj[list(catHeader).index(par)]) # Write ASCII catalogue try: np.savetxt(outName, np.array(outObjects, dtype=object), fmt=outFormat, header=header) except: err.error("Failed to write to ASCII catalogue: " + outName + ".", fatal=False) return # ---------------------------------------------------------------End-ASCII- return
def get_subcube_range(header, n_axes, axis_size, subcube, subcubeMode): # Basic sanity checks err.ensure(subcubeMode in {"pixel", "world"}, "Subcube mode must be 'pixel' or 'world'.") err.ensure((len(subcube) == 4 and n_axes == 2) or (len(subcube) == 6 and n_axes > 2), "Subcube range must contain 4 values for 2-D cubes\n" "or 6 values for 3-D/4-D cubes.") # ----------------- # World coordinates # ----------------- if subcubeMode == "world": # Read WCS information try: wcsin = wcs.WCS(header) except: err.error("Failed to read WCS information from data cube header.") # Calculate cos(dec) correction for RA range: if wcsin.wcs.cunit[0] == "deg" and wcsin.wcs.cunit[1] == "deg": corrfact = math.cos(math.radians(subcube[1])) if n_axes == 4: subcube = wcsin.wcs_world2pix( np.array([[ subcube[0] - subcube[3] / corrfact, subcube[1] - subcube[4], subcube[2] - subcube[5], 0 ], [ subcube[0] + subcube[3] / corrfact, subcube[1] + subcube[4], subcube[2] + subcube[5], 0 ]]), 0)[:, :3] elif n_axes == 3: subcube = wcsin.wcs_world2pix( np.array([[ subcube[0] - subcube[3] / corrfact, subcube[1] - subcube[4], subcube[2] - subcube[5] ], [ subcube[0] + subcube[3] / corrfact, subcube[1] + subcube[4], subcube[2] + subcube[5] ]]), 0) elif n_axes == 2: subcube = wcsin.wcs_world2pix( np.array([[ subcube[0] - subcube[2] / corrfact, subcube[1] - subcube[3] ], [ subcube[0] + subcube[2] / corrfact, subcube[1] + subcube[3] ]]), 0) else: err.error("Unsupported number of axes.") # Flatten array subcube = np.ravel(subcube, order="F") # Ensure that min pix coord < max pix coord for all axes. # This operation is meaningful because wcs_world2pix returns negative pixel coordinates # only for pixels located before an axis' start (i.e., negative pixel coordinates should # not be interpreted as counting backward from an axis' end). subcube[0], subcube[1] = correct_order(subcube[0], subcube[1]) subcube[2], subcube[3] = correct_order(subcube[2], subcube[3]) if len(subcube) == 6: subcube[4], subcube[5] = correct_order(subcube[4], subcube[5]) # Convert to integer subcube = list(subcube.astype(int)) # Constrain subcube to be within cube boundaries for axis in range(min(3, n_axes)): err.ensure( subcube[2 * axis + 1] >= 0 and subcube[2 * axis] < axis_size[axis], "Subcube outside input cube range for axis {0:d}.".format( axis)) subcube[2 * axis] = max(subcube[2 * axis], 0) subcube[2 * axis + 1] = min(subcube[2 * axis + 1] + 1, axis_size[axis]) # ----------------- # Pixel coordinates # ----------------- else: # Ensure that pixel coordinates are integers for value in subcube: err.ensure( type(value) == int, "Subcube boundaries must be integer values.") # Sanity checks on boundaries for axis in range(min(3, n_axes)): # Ensure correct order err.ensure( subcube[2 * axis] < subcube[2 * axis + 1], "Lower subcube boundary greater than upper boundary.\nPlease check your input." ) # Adjust lower boundary subcube[2 * axis] = max(subcube[2 * axis], 0) subcube[2 * axis] = min(subcube[2 * axis], axis_size[axis] - 1) # Adjust upper boundary: subcube[2 * axis + 1] = max(subcube[2 * axis + 1], 1) subcube[2 * axis + 1] = min(subcube[2 * axis + 1], axis_size[axis]) # Report final subcube boundaries err.message(" Loading subcube of range " + str(subcube) + '.') return subcube
def fix_gipsy_header(header_orig): # GIPSY keys for spectral axis key_opt = ["FREQ-OHEL","FREQ-OLSR"] key_rad = ["FREQ-RHEL","FREQ-RLSR"] header = header_orig.copy() naxis = header["NAXIS"] for i in range(1, naxis + 1): ctype = header["CTYPE%d" % i] if ctype in key_opt + key_rad: axis = i # Read reference velocity - from VELR or DRVAL try: if "VELR" in header: vel = header["VELR"] elif "DRVAL%d" % axis in header: vel = header["VELR"] unit = header["DUNIT%d" % axis] if unit.lower() == "km/s": vel = vel * 1000.0 elif unit.lower() != "m/s": break except: err.warning("Problem with reference velocity.") break # Convert reference frequency to Hz try: freq = header["CRVAL%d" % axis] dfreq = header["CDELT%d" % axis] unit = header["CUNIT%d" % axis] freqUnits = ["hz", "khz", "mhz", "ghz"] j = freqUnits.index(unit.lower()) freq *= 10**j dfreq *= 10**j except: err.warning("Problem with reference frequency.") break # Need rest frequency for conversion try: freq0Names = ["FREQ0", "FREQR", "RESTFRQ"] for key in freq0Names: try: freq0 = header[key] #foundFreq0 = 1 except: pass header["RESTFRQ"] = freq0 #foundFreq0 except: err.warning("Rest frequency not found.") break # Calculate reference frequency in the barycentric system if ctype in key_opt: freqB = freq0 / (1.0 + vel / scipy.constants.c) else: freqB = freq0 / (1.0 - vel / scipy.constants.c) # Calculate topocentric velocity velT = scipy.constants.c * ((freqB**2 - freq**2) / (freqB**2 + freq**2)) dfreqB = dfreq * math.sqrt((scipy.constants.c - velT) / (scipy.constants.c + velT)) header["CTYPE%d" % axis] = "FREQ" header["CUNIT%d" % axis] = "Hz" header["CRVAL%d" % axis] = freqB header["CDELT%d" % axis] = dfreqB ## GIPSY headers seem to contain the unit "DEGREE" for RA/Dec ## WCS lib does not like that for key in header: if "CUNIT" in key and header[key] == "DEGREE": header[key] = "deg" err.message("Header repaired successfully.") return header
def dilate(cube, mask, objects, cathead, Parameters): dilateThreshold = Parameters["parameters"]["dilateThreshold"] dilatePixMax = Parameters["parameters"]["dilatePixMax"] dilateChanMax = Parameters["parameters"]["dilateChanMax"] # Stops dilating when (flux_new - flux_old) / flux_new < dilateThreshold sourceIDs = np.unique(mask) # remove first element which should be zero if sourceIDs[0] == 0: sourceIDs = np.delete(sourceIDs, 0) for i in range(0, len(sourceIDs)): obj = objects[i] xmin = max(0, obj[list(cathead).index("x_min")] - dilatePixMax) xmax = min(cube.shape[2] - 1, obj[list(cathead).index("x_max")] + dilatePixMax) ymin = max(0, obj[list(cathead).index("y_min")] - dilatePixMax) ymax = min(cube.shape[1] - 1, obj[list(cathead).index("y_max")] + dilatePixMax) zmin = max(0, obj[list(cathead).index("z_min")] - dilateChanMax) zmax = min(cube.shape[0] - 1, obj[list(cathead).index("z_max")] + dilateChanMax) [zmin, zmax, ymin, ymax, xmin, xmax] = map(int, [zmin, zmax, ymin, ymax, xmin, xmax]) objcube = cube[zmin:zmax + 1, ymin:ymax + 1, xmin:xmax + 1].copy() objmask = mask[zmin:zmax + 1, ymin:ymax + 1, xmin:xmax + 1].copy() allmask = mask[zmin:zmax + 1, ymin:ymax + 1, xmin:xmax + 1].copy() otherobjs = (allmask > 0) * (allmask != sourceIDs[i]) if (otherobjs).sum(): # Ensure that objects with different source IDs within dilatePixMax, dilateChanMax are not # included in the flux growth calculation err.warning( "Object {0:d} has possible overlapping objects within {1:d} pix, {2:d} chan." .format(np.int(sourceIDs[i]), dilatePixMax, dilateChanMax)) objcube[(allmask > 0) * (allmask != sourceIDs[i])] = 0 fluxes = [] # Loop through Z dilation kernels until the flux converges or the maximum allowed Z dilation is reached for dilchan in range(dilateChanMax + 1): dd = dilchan * 2 + 1 dilstruct = np.ones((dd, 1, 1)) fluxes.append(objcube[nd.morphology.binary_dilation( objmask == sourceIDs[i], structure=dilstruct)].sum()) if dilchan > 0 and (fluxes[-1] - fluxes[-2]) / fluxes[-1] < dilateThreshold: dilchan -= 1 break # Pick the best Z dilation kernel for current object and update mask dd = dilchan * 2 + 1 dilstruct = np.ones((dd, 1, 1)) # Only grow the mask of object sourceIDs[i] even when other objects are present in objmask objmask[nd.morphology.binary_dilation(objmask == sourceIDs[i], structure=dilstruct).astype(int) == 1] = sourceIDs[i] # Loop through XY dilation kernels until the flux converges or the maximum allowed XY dilation is reached for dilpix in range(dilatePixMax + 1): dd = dilpix * 2 + 1 dilstruct = (np.sqrt(((np.indices( (dd, dd)) - dilpix)**2).sum(axis=0)) <= dilpix).astype(int) dilstruct.resize((1, dilstruct.shape[0], dilstruct.shape[1])) fluxes.append(objcube[nd.morphology.binary_dilation( objmask == sourceIDs[i], structure=dilstruct)].sum()) if dilpix > 0 and (fluxes[-1] - fluxes[-2]) / fluxes[-1] < dilateThreshold: dilpix -= 1 break # Pick the best XY dilation kernel for current object and update mask dd = dilpix * 2 + 1 dilstruct = (np.sqrt(((np.indices( (dd, dd)) - dilpix)**2).sum(axis=0)) <= dilpix).astype(int) dilstruct.resize((1, dilstruct.shape[0], dilstruct.shape[1])) # Only grow the mask of object sourceIDs[i] even when other objects are present in objmask objmask[nd.morphology.binary_dilation(objmask == sourceIDs[i], structure=dilstruct).astype(int) == 1] = sourceIDs[i] err.message( "Mask of source {0:d} dilated by {2:d} chan and then by {1:d} pix." .format(np.int(sourceIDs[i]), dilpix, dilchan)) # Put back in objmask objects != sourceIDs[i] that may have been inside objmask before # dilation or may have been temporarily replaced by the dilated object sourceIDs[i] if (otherobjs).sum(): objmask[otherobjs] = allmask[otherobjs] mask[zmin:zmax + 1, ymin:ymax + 1, xmin:xmax + 1] = objmask # Update n_pix, x_geo and n_chan n_pix = objmask[objmask == sourceIDs[i]].sum() / sourceIDs[i] ind = np.vstack(np.where(objmask == sourceIDs[i])) cgeo = (ind.sum(axis=1)).astype(float) / float(n_pix) x_geo, y_geo, z_geo = cgeo[2] + xmin, cgeo[1] + ymin, cgeo[0] + zmin zmin, zmax = min(ind[0]), max(ind[0]) + 1 n_chan = zmax - zmin # Update n_los objmask[objmask != sourceIDs[i]] = 0 maskSumA0 = objmask.sum(axis=0) maskSumA0[maskSumA0 > 1] = 1 n_los = maskSumA0.sum() del objcube del objmask del allmask del otherobjs objects[i, list(cathead).index("x_min")] = max( 0, obj[list(cathead).index("x_min")] - dilpix) objects[i, list(cathead).index("x_max")] = min( cube.shape[2] - 1, obj[list(cathead).index("x_max")] + dilpix) objects[i, list(cathead).index("y_min")] = max( 0, obj[list(cathead).index("y_min")] - dilpix) objects[i, list(cathead).index("y_max")] = min( cube.shape[1] - 1, obj[list(cathead).index("y_max")] + dilpix) objects[i, list(cathead).index("z_min")] = max( 0, obj[list(cathead).index("z_min")] - dilchan) objects[i, list(cathead).index("z_max")] = min( cube.shape[0] - 1, obj[list(cathead).index("z_max")] + dilchan) objects[i, list(cathead).index("n_pix")] = n_pix objects[i, list(cathead).index("n_chan")] = n_chan objects[i, list(cathead).index("n_los")] = n_los objects[i, list(cathead).index("x_geo")] = x_geo objects[i, list(cathead).index("y_geo")] = y_geo objects[i, list(cathead).index("z_geo")] = z_geo return mask, objects
def import_data(doSubcube, inFile, weightsFile, maskFile, weightsFunction=None, subcube=[], subcubeMode="pixel", doFlag=False, flagRegions=False, flagFile="", cubeOnly=False): # Basic sanity checks on user input err.ensure( os.path.isfile(inFile), "Data file not found:\n " + str(inFile)) # ------------------------------- # Open input cube and read header # ------------------------------- err.message("Loading input data cube.") try: f = fits.open(inFile, mode="readonly", memmap=False, do_not_scale_image_data=False) header = f[0].header except: err.error("Failed to load primary HDU of data file:\n " + str(inFile)) # Extract axis sizes and types n_axes, axis_size, axis_type = extract_axis_size(header) # Check dimensionality of data cube check_cube_dimensions(n_axes, axis_size, cube_name="data cube") # Print some information err.message(" Data cube has {0:d} axes.".format(header["NAXIS"])) err.message(" Types: " + str(axis_type)) err.message(" Sizes: " + str(axis_size)) # Extract subcube boundaries if requested if len(subcube): subcube = get_subcube_range(header, n_axes, axis_size, subcube, subcubeMode) else: subcube = [] # -------------------------------- # Read requested subregion of data # -------------------------------- # 2-D image if n_axes == 2: fullshape = [axis_size[1], axis_size[0]] if len(subcube): data = np.array([f[0].section[subcube[2]:subcube[3], subcube[0]:subcube[1]]]) header["CRPIX1"] -= subcube[0] header["CRPIX2"] -= subcube[2] header["NAXIS1"] = subcube[1] - subcube[0] header["NAXIS2"] = subcube[3] - subcube[2] else: data = np.array([f[0].data]) # 3-D cube elif n_axes == 3: fullshape = [axis_size[2], axis_size[1], axis_size[0]] if len(subcube): data = f[0].section[subcube[4]:subcube[5], subcube[2]:subcube[3], subcube[0]:subcube[1]] header["CRPIX1"] -= subcube[0] header["CRPIX2"] -= subcube[2] header["CRPIX3"] -= subcube[4] header["NAXIS1"] = subcube[1] - subcube[0] header["NAXIS2"] = subcube[3] - subcube[2] header["NAXIS3"] = subcube[5] - subcube[4] else: data = f[0].data #4-D hypercube else: fullshape = [axis_size[2], axis_size[1], axis_size[0]] if len(subcube): data = f[0].section[0, subcube[4]:subcube[5], subcube[2]:subcube[3], subcube[0]:subcube[1]] header["CRPIX1"] -= subcube[0] header["CRPIX2"] -= subcube[2] header["CRPIX3"] -= subcube[4] header["NAXIS1"] = subcube[1] - subcube[0] header["NAXIS2"] = subcube[3] - subcube[2] header["NAXIS3"] = subcube[5] - subcube[4] else: data = f[0].section[0] # Close input cube f.close() err.message("Input data cube loaded.") # --------------------------------------------------------- # If no additional actions required, return data and header # --------------------------------------------------------- if cubeOnly: return data, header # --------------------------------------------------- # Otherwise carry out additional actions as requested # --------------------------------------------------- # Weighting if weightsFile: data = apply_weights_file(data, weightsFile, subcube) elif weightsFunction: data = apply_weights_function(data, weightsFunction) # Flagging if doFlag: data = apply_flagging(data, flagFile, flagRegions, subcube) # Masking if maskFile: mask = import_mask(maskFile, header, axis_size, subcube) else: # Create an empty mask if none is provided. mask = np.zeros(data.shape, dtype=bool) return data, header, mask, subcube
def import_mask(maskFile, header, axis_size, subcube): err.message("Loading mask cube:\n " + str(maskFile)) try: f = fits.open(maskFile, memmap=False) header_mask = f[0].header except: err.error("Failed to read mask cube.") # Extract axis sizes and types n_axes_mask, axis_size_mask, axis_type_mask = extract_axis_size( header_mask) # Ensure correct dimensionality check_cube_dimensions(n_axes_mask, axis_size_mask, cube_name="mask cube", min_dim=1, max_dim=4) # 1-D spectrum if n_axes_mask == 1: err.warning( "Mask cube has 1 axis; interpreted as spectrum.\nAdding first and second axis." ) ensure(header_mask['CRVAL1'] == header['CRVAL1'], "Input cube and mask are not on the same WCS grid.") if len(subcube) == 6: if header_mask["NAXIS1"] == axis_size[2]: err.message( " Input mask cube already matches size of data subcube.\n No subcube selection applied." ) mask = np.reshape(f[0].data, (-1, 1, 1)) elif header_mask["NAXIS1"] == fullshape[0]: err.message(" Subcube selection applied to input mask cube.") mask = np.reshape(f[0].section[subcube[4]:subcube[5]], (-1, 1, 1)) else: err.error( "Data subcube does not match size of mask subcube or full mask." ) elif not len(subcube): mask = np.reshape(f[0].data, (-1, 1, 1)) else: err.error( "The subcube list must have 6 entries ({0:d} given).".format( len(subcube))) # 2-D image elif n_axes_mask == 2: err.ensure( header_mask["CRVAL1"] == header["CRVAL1"] and header_mask["CRVAL2"] == header["CRVAL2"], "Input cube and mask are not on the same WCS grid.") if len(subcube) == 6 or len(subcube) == 4: if header_mask["NAXIS1"] == axis_size[0] and header_mask[ "NAXIS2"] == axis_size[1]: err.message( " Input mask cube already matches size of data subcube.\n No subcube selection applied." ) mask = np.array([f[0].data]) elif header_mask["NAXIS1"] == fullshape[2] and header_mask[ "NAXIS2"] == fullshape[1]: err.message(" Subcube selection applied to input mask cube.") mask = np.array([ f[0].section[subcube[2]:subcube[3], subcube[0]:subcube[1]] ]) else: err.error( "Data subcube does not match size of mask subcube or full mask." ) else: mask = np.array([f[0].data]) # 3-D cube elif n_axes_mask == 3: err.ensure( header_mask["CRVAL1"] == header["CRVAL1"] and header_mask["CRVAL2"] == header["CRVAL2"] and header_mask["CRVAL3"] == header["CRVAL3"], "Input cube and mask are not on the same WCS grid.") if len(subcube) == 6: if header_mask["NAXIS1"] == axis_size[0] and header_mask[ "NAXIS2"] == axis_size[1] and header_mask[ "NAXIS3"] == axis_size[2]: err.message( " Input mask cube already matches size of data subcube.\n No subcube selection applied." ) mask = f[0].data elif header_mask["NAXIS1"] == fullshape[2] and header_mask[ "NAXIS2"] == fullshape[1] and header_mask[ "NAXIS3"] == fullshape[0]: err.message(" Subcube selection applied to input mask cube.") mask = f[0].section[subcube[4]:subcube[5], subcube[2]:subcube[3], subcube[0]:subcube[1]] else: err.error( "Data subcube does not match size of mask subcube or full mask." ) else: mask = f[0].data # 4-D hypercube else: err.ensure( header_mask["CRVAL1"] == header["CRVAL1"] and header_mask["CRVAL2"] == header["CRVAL2"] and header_mask["CRVAL3"] == header["CRVAL3"], "Input cube and mask are not on the same WCS grid.") if len(subcube) == 6: if header_mask["NAXIS1"] == axis_size[0] and header_mask[ "NAXIS2"] == axis_size[1] and header_mask[ "NAXIS3"] == axis_size[2]: err.message( " Input mask cube already matches size of data subcube.\n No subcube selection applied." ) mask = f[0].section[0] elif header_mask["NAXIS1"] == fullshape[2] and header_mask[ "NAXIS2"] == fullshape[1] and header_mask[ "NAXIS3"] == fullshape[0]: err.message(" Subcube selection applied to input mask cube.") mask = f[0].section[0, subcube[4]:subcube[5], subcube[2]:subcube[3], subcube[0]:subcube[1]] else: err.error( "Data subcube does not match size of mask subcube or full mask." ) else: mask = f[0].section[0] mask[mask > 0] = 1 f.close() err.message("Mask cube loaded.") # In all cases, convert mask to Boolean with masked pixels set to 1. return (mask > 0).astype(bool)
def SCfinder_mem(cube, mask, header, t0, kernels=[ [0, 0, 0, "b"], ], threshold=3.5, sizeFilter=0, maskScaleXY=2.0, maskScaleZ=2.0, kernelUnit="pixel", edgeMode="constant", rmsMode="negative", fluxRange="all", verbose=0, perSCkernel=False, scaleX=False, scaleY=False, scaleZ=True, edgeX=0, edgeY=0, edgeZ=0, method="1d2d", windowSpatial=20, windowSpectral=20, gridSpatial=0, gridSpectral=0, interpolation="none"): # Define a few constants FWHM_CONST = 2.0 * math.sqrt(2.0 * math.log( 2.0)) # Conversion between sigma and FWHM of Gaussian function MAX_PIX_CONST = 1.0e+6 # Maximum number of pixels for noise calculation; sampling is set accordingly # Check for NaN in cube found_nan = np.isnan(cube).any() # Set sampling sampleRms for rms measurement sampleRms = max( 1, int((float(cube.size) / MAX_PIX_CONST)**(1.0 / min(3, len(cube.shape))))) # Measure noise in original cube with sampling "sampleRms" rms = GetRMS(cube, rmsMode=rmsMode, fluxRange=fluxRange, zoomx=1, zoomy=1, zoomz=1, verbose=verbose, sample=sampleRms) # Loop over all kernels for kernel in kernels: [kx, ky, kz, kt] = kernel if verbose: err.linebreak() err.print_progress_time(t0) err.message(" Filter {0:} {1:} {2:} {3:} ...".format( kx, ky, kz, kt)) if kernelUnit == "world" or kernelUnit == "w": if verbose: err.message(" Converting filter size to pixels ...") kx = abs(float(kx) / header["CDELT1"]) ky = abs(float(ky) / header["CDELT2"]) kz = abs(float(kz) / header["CDELT3"]) if kt == "b": if kz != int(math.ceil(kz)) and verbose: err.warning( "Rounding width of boxcar z kernel to next integer.") kz = int(math.ceil(kz)) # Create a copy of the original cube cube_smooth = np.copy(cube) # Replace all NaNs with zero if found_nan: cube_smooth[np.isnan(cube)] = 0.0 cube_smooth[(cube_smooth > 0) & (mask > 0)] = maskScaleXY * rms cube_smooth[(cube_smooth < 0) & (mask > 0)] = -maskScaleXY * rms # Spatial smoothing if kx + ky: cube_smooth = ndimage.filters.gaussian_filter( cube_smooth, [0, ky / FWHM_CONST, kx / FWHM_CONST], mode=edgeMode) # Spectral smoothing if kz: if kt == "b": cube_smooth = ndimage.filters.uniform_filter1d(cube_smooth, kz, axis=0, mode=edgeMode) elif kt == "g": cube_smooth = ndimage.filters.gaussian_filter1d(cube_smooth, kz / FWHM_CONST, axis=0, mode=edgeMode) # Re-insert the NaNs taken out earlier if found_nan: cube_smooth[np.isnan(cube)] = np.nan # Per-kernel noise normalisation (Time consuming!) if perSCkernel: cube_smooth, noise_smooth = sigma_scale( cube_smooth, scaleX=scaleX, scaleY=scaleY, scaleZ=scaleZ, edgeX=edgeX, edgeY=edgeY, edgeZ=edgeZ, statistic=rmsMode, fluxRange=fluxRange, method=method, windowSpatial=windowSpatial, windowSpectral=windowSpectral, gridSpatial=gridSpatial, gridSpectral=gridSpectral, interpolation=interpolation) # Calculate the RMS of the smoothed (possibly normalised) cube rms_smooth = GetRMS(cube_smooth, rmsMode=rmsMode, fluxRange=fluxRange, zoomx=1, zoomy=1, zoomz=1, verbose=verbose, sample=sampleRms) # Add pixels above threshold to mask by setting bit 1 err.message(" Applying +/- {0:} sigma detection threshold".format( threshold)) with np.errstate(invalid="ignore"): mask |= (np.absolute(cube_smooth) >= threshold * rms_smooth) #mask = np.bitwise_or(mask, np.greater_equal(np.absolute(cube_smooth), threshold * rms_smooth)) # Delete smoothed cube again del cube_smooth return
def add_wcs_coordinates(objects, catParNames, catParFormt, catParUnits, Parameters): try: hdulist = fits.open(Parameters["import"]["inFile"]) header = hdulist[0].header hdulist.close() # Fix headers where "per second" is written "/S" instead of "/s" # (assuming they mean "per second" and not "per Siemens"). if "cunit3" in header and "/S" in header["cunit3"]: err.warning("Converting '/S' to '/s' in CUNIT3.") header["cunit3"] = header["cunit3"].replace("/S","/s") # Check if there is a Nmap/GIPSY FITS header keyword value present gipsyKey = [k for k in ["FREQ-OHEL", "FREQ-OLSR", "FREQ-RHEL", "FREQ-RLSR"] if (k in [header[key] for key in header if ("CTYPE" in key)])] if gipsyKey: err.message("GIPSY header found. Trying to convert to FITS standard.") from astropy.wcs import Wcsprm header = fix_gipsy_header(header) wcsin = Wcsprm(str(header)) wcsin.sptr("VOPT-F2W") #if header["naxis"] == 4: # objects = np.concatenate((objects, wcsin.p2s(np.concatenate((objects[:, catParNames.index("x"):catParNames. index("x") + 3], np.zeros((objects.shape[0], 1))), axis=1), 0)["world"][:,:-1]), axis=1) #else: # objects = np.concatenate((objects, wcsin.p2s(objects[:, catParNames.index("x"):catParNames.index("x") + 3], 0)["world"]), axis=1) objects = np.concatenate((objects, wcsin.p2s(objects[:, catParNames.index("x"):catParNames.index("x") + 3], 0)["world"]), axis=1) catParUnits = tuple(list(catParUnits) + [str(cc).replace(" ", "") for cc in wcsin.cunit]) catParNames = tuple(list(catParNames) + [(cc.split("--")[0]).lower() for cc in wcsin.ctype]) catParFormt = tuple(list(catParFormt) + ["%15.7e", "%15.7e", "%15.7e"]) else: # Constrain the RA axis reference value CRVAL_ to be between 0 and 360 deg rafound = 0 for kk in range(header["naxis"]): if header["ctype1"][:2] == "RA": rafound = 1 break if rafound: if header["crval%i" % (kk + 1)] < 0: err.warning("Adding 360 deg to RA reference value.") header["crval%i" % (kk + 1)] += 360 elif header["crval%i" % (kk + 1)] > 360: err.warning("Subtracting 360 deg from RA reference value.") header["crval%i" % (kk + 1)] -= 360 #if header["naxis"] == 4: wcsin = wcs.WCS(header, naxis=[wcs.WCSSUB_CELESTIAL, wcs.WCSSUB_SPECTRAL, wcs.WCSSUB_STOKES]) #else: wcsin = wcs.WCS(header, naxis=[wcs.WCSSUB_CELESTIAL, wcs.WCSSUB_SPECTRAL]) wcsin = wcs.WCS(header, naxis=[wcs.WCSSUB_CELESTIAL, wcs.WCSSUB_SPECTRAL]) xyz = objects[:, catParNames.index("x"):catParNames.index("x") + 3].astype(float) if "cellscal" in header and header["cellscal"] == "1/F": err.warning( "CELLSCAL keyword with value of 1/F found.\n" "Will account for varying pixel scale in WCS coordinate calculation.") x0, y0 = header["crpix1"] - 1, header["crpix2"] - 1 # Will calculate the pixscale factor of each channel as: # pixscale = ref_frequency / frequency if header["ctype3"] == "VELO-HEL": pixscale = (1 - header["crval3"] / scipy.constants.c) / (1 - (((xyz[:, 2] + 1) - header["crpix3"]) * header["cdelt3"] + header["crval3"]) / scipy.constants.c) else: err.warning("Cannot convert 3rd axis coordinates to frequency. Ignoring the effect of CELLSCAL = 1/F.") pixscale = 1.0 xyz[:, 0] = (xyz[:, 0] - x0) * pixscale + x0 xyz[:, 1] = (xyz[:, 1] - y0) * pixscale + y0 #if header["naxis"] == 4: objects = np.concatenate((objects, wcsin.wcs_pix2world(np.concatenate((xyz, np.zeros((objects.shape[0], 1))), axis=1), 0)[:, :-1]), axis=1) #else: objects = np.concatenate((objects, wcsin.wcs_pix2world(xyz, 0)), axis=1) objects = np.concatenate((objects, wcsin.wcs_pix2world(xyz, 0)), axis=1) catParUnits = tuple(list(catParUnits) + [str(cc).replace(" ", "") for cc in wcsin.wcs.cunit]) catParNames = tuple(list(catParNames) + [(cc.split("--")[0]).lower() for cc in wcsin.wcs.ctype]) catParFormt = tuple(list(catParFormt) + ["%15.7e", "%15.7e", "%15.7e"]) #if header["naxis"] == 4: # catParUnits = catParUnits[:-1] # catParNames= catParNames[:-1] err.message("WCS coordinates added to catalogue.") # Create IAU-compliant source name: # WARNING: This currently assumes a regular, ≥ 2-dim. data cube where the first two axes are longitude and latitude. n_src = objects.shape[0] n_par = objects.shape[1] iau_names = np.empty([n_src, 1], dtype=object) if header["ctype1"][:4] == "RA--": # Equatorial coordinates; try to figure out equinox: iau_coord = "equ" if "equinox" in header: if int(header["equinox"]) >= 2000: iau_equinox = "J" else: iau_equinox = "B" elif "epoch" in header: # Assume that EPOCH has been abused to record the equinox: if int(header["epoch"]) >= 2000: iau_equinox = "J" else: iau_equinox = "B" else: # Equinox undefined: iau_equinox = "X" elif header["ctype1"][:4] == "GLON": # Galactic coordinates: iau_coord = "gal" iau_equinox = "G" else: # Unsupported coordinate system: iau_coord = "" iau_equinox = "" for src in xrange(n_src): lon = objects[src][n_par - 3] lat = objects[src][n_par - 2] if iau_coord == "equ": ra = Longitude(lon, unit=u.deg) dec = Latitude(lat, unit=u.deg) iau_pos = ra.to_string(unit=u.h, decimal=False, sep="", precision=2, alwayssign=False, pad=True, fields=3) iau_pos += dec.to_string(unit=u.deg, decimal=False, sep="", precision=1, alwayssign=True, pad=True, fields=3) else: iau_pos = "{0:08.4f}".format(lon) if lat < 0.0: iau_pos += "-" else: iau_pos += "+" iau_pos += "{0:07.4f}".format(abs(lat)) iau_names[src][0] = "SoFiA " + iau_equinox + iau_pos objects = np.concatenate((objects, iau_names), axis = 1) catParUnits = tuple(list(catParUnits) + ["-"]) catParNames = tuple(list(catParNames) + ["name"]) catParFormt = tuple(list(catParFormt) + ["%30s"]) except: err.warning("WCS conversion of parameters failed.") return (objects, catParNames, catParFormt, catParUnits)
def EstimateRel(data, pdfoutname, parNames, parSpace=["snr_sum", "snr_max", "n_pix"], logPars=[1, 1, 1], autoKernel=True, scaleKernel=1, negPerBin=1, skellamTol=-0.5, kernel=[0.15, 0.05, 0.1], usecov=False, doscatter=1, docontour=1, doskellam=1, dostats=0, saverel=1, threshold=0.99, fMin=0, verb=0, makePlot=False): # Always work on logarithmic parameter values; the reliability.logPars parameter should be removed if 0 in logPars: err.warning(" Setting all reliability.logPars entries to 1. This parameter is no longer editable by users.") logPars=[1 for pp in parSpace] # Import Matplotlib if diagnostic plots requested if makePlot: import matplotlib # The following line is necessary to run SoFiA remotely matplotlib.use("Agg") import matplotlib.pyplot as plt # -------------------------------- # Build array of source parameters # -------------------------------- idCOL = parNames.index("id") ftotCOL = parNames.index("snr_sum") fmaxCOL = parNames.index("snr_max") fminCOL = parNames.index("snr_min") # Get columns of requested parameters parCol = [] for ii in range(len(parSpace)): parCol.append(parNames.index(parSpace[ii])) # Get position and number of positive and negative sources pos = data[:, ftotCOL] > 0 neg = data[:, ftotCOL] <= 0 Npos = pos.sum() Nneg = neg.sum() err.ensure(Npos, "No positive sources found; cannot proceed.") err.ensure(Nneg, "No negative sources found; cannot proceed.") # Get array of relevant source parameters (and take log of them if requested) ids = data[:,idCOL] pars = np.empty((data.shape[0], 0)) for ii in range(len(parSpace)): if parSpace[ii] == "snr_max": parsTmp = data[:,fmaxCOL] * pos - data[:,fminCOL] * neg if logPars[ii]: parsTmp = np.log10(parsTmp) pars = np.concatenate((pars, parsTmp.reshape(-1, 1)), axis=1) elif parSpace[ii] == "snr_sum" or parSpace[ii] == "snr_mean": parsTmp = abs(data[:,parCol[ii]].reshape(-1, 1)) if logPars[ii]: parsTmp = np.log10(parsTmp) pars = np.concatenate((pars, parsTmp), axis=1) else: parsTmp = data[:,parCol[ii]].reshape(-1, 1) if logPars[ii]: parsTmp = np.log10(parsTmp) pars = np.concatenate((pars, parsTmp), axis=1) err.message(" Working in parameter space {0:}".format(str(parSpace))) err.message(" Will convolve the distribution of positive and negative sources in this space to derive the P and N density fields") pars = np.transpose(pars) # ---------------------------------------------------------- # Set parameters to work with and gridding/plotting for each # ---------------------------------------------------------- # Axis labels when plotting labs = [] for ii in range(len(parSpace)): labs.append("") if logPars[ii]: labs[ii] += "log " labs[ii] += parSpace[ii] # Axis limits when plotting pmin, pmax = pars.min(axis=1), pars.max(axis=1) pmin, pmax = pmin - 0.1 * (pmax - pmin), pmax + 0.1 * (pmax - pmin) lims = [[pmin[i], pmax[i]] for i in range(len(parSpace))] # Grid on which to evaluate Np and Nn in order to plot contours grid = [[pmin[i], pmax[i], 0.02 * (pmax[i] - pmin[i])] for i in range(len(parSpace))] # Calculate the number of rows and columns in figure projections = [subset for subset in combinations(range(len(parSpace)), 2)] nr = int(np.floor(np.sqrt(len(projections)))) nc = int(np.ceil(float(len(projections)) / nr)) # --------------------------------------- # Set smoothing kernel in parameter space # --------------------------------------- # If autoKernel is True, then the initial kernel is taken as a scaled version of the covariance matrix # of the negative sources. The kernel size along each axis is such that the number of sources per kernel # width (sigma**2) is equal to "negPerBin". Optionally, the user can decide to use only the diagonal # terms of the covariance matrix. The kernel is then grown until convergence is reached on the Skellam # plot. If autoKernel is False, then use the kernel given by "kernel" parameter (argument of EstimateRel); # this is sigma, and is squared to be consistent with the auto kernel above. if autoKernel: # Set the kernel shape to that of the variance or covariance matrix kernel = np.cov(pars[:, neg]) kernelType = "covariance" # Check if kernel matrix can be inverted try: np.linalg.inv(kernel) except: err.error( "The reliability cannot be calculated because the smoothing kernel\n" "derived from " + str(pars[:,neg].shape[1]) + " negative sources cannot be inverted.\n" "This is likely due to an insufficient number of negative sources.\n" "Try to increase the number of negative sources by changing the\n" "source finding and/or filtering settings.", fatal=True, frame=True) if np.isnan(kernel).sum(): err.error( "The reliability cannot be calculated because the smoothing kernel\n" "derived from " + str(pars[:,neg].shape[1]) + " negative sources contains NaNs.\n" "A good kernel is required to calculate the density field of positive\n" "and negative sources in parameter space.\n" "Try to increase the number of negative sources by changing the\n" "source finding and/or filtering settings.", fatal=True, frame=True) if not usecov: kernel = np.diag(np.diag(kernel)) kernelType = "variance" kernelIter = 0.0 deltplot = [] # Scale the kernel size as requested by the user (scaleKernel>0) or use the autoscale algorithm (scaleKernel=0) if scaleKernel>0: # Scale kernel size as requested by the user # Note that the scale factor is squared because users are asked to give a factor to apply to sqrt(kernel) kernel *= scaleKernel**2 err.message(" Using the {0:s} matrix scaled by a factor {1:.2f} as convolution kernel".format(kernelType, scaleKernel)) err.message(" The sqrt(kernel) size is:") err.message(" " + str(np.sqrt(np.abs(kernel)))) elif scaleKernel==0: # Scale kernel size to get started the kernel-growing loop # The scale factor for sqrt(kernel) is elevated to the power of 1.0 / len(parCol) err.message(" Will search for the best convolution kernel by scaling the {0:s} matrix".format(kernelType)) err.message(" The {0:s} matrix has sqrt:".format(kernelType)) err.message(" " + str(np.sqrt(np.abs(kernel)))) # negPerBin must be >=1 err.ensure(negPerBin>=1,"The parameter reliability.negPerBin used to start the convolution kernel search was set to {0:.1f} but must be >= 1. Please change your settings.".format(negPerBin)) kernel *= ((negPerBin + kernelIter) / Nneg)**(2.0 / len(parCol)) err.message(" Search starting from the kernel with sqrt:") err.message(" " + str(np.sqrt(np.abs(kernel)))) err.message(" Iteratively growing kernel until the distribution of (P-N)/sqrt(P+N) reaches median/width = {0:.2f} ...".format(skellamTol)) err.ensure(skellamTol<=0,"The parameter reliability.skellamTol was set to {0:.2f} but must be <= 0. Please change your settings.".format(skellamTol)) else: err.ensure(scaleKernel>=0,\ "The reliability.scaleKernel parameter cannot be negative.\n"\ "It should be = 0 if you want SoFiA to find the optimal kernel scaling\n"\ "or > 0 if you want to set the scaling yourself.\n"\ "Please change your settings.") #deltOLD=-1e+9 # Used to stop kernel growth if P-N stops moving closer to zero [NOT USED CURRENTLY] if doskellam and makePlot: fig0 = plt.figure() else: # Note that the user must give sigma, which then gets squared err.message(" Using user-defined variance kernel with sqrt(kernel) size: {0}".format(kernel)) err.ensure(len(parSpace)==len(kernel),"The number of entries in the kernel above does not match the number of parameters you requested for the reliability calculation.") kernel = np.identity(len(kernel)) * np.array(kernel)**2 # Set grow_kernel to 1 to start the kernel growing loop below. grow_kernel = 1 # This loop will estimate the reliability, check whether the kernel is large enough, # and if not pick a larger kernel. If autoKernel = 0 or scaleKernel = 0, we will do # just one pass (i.e., we will not grow the kernel). while grow_kernel: # ------------------------ # Evaluate N-d reliability # ------------------------ if verb: err.message(" estimate normalised positive and negative density fields ...") Np = gaussian_kde_set_covariance(pars[:,pos], kernel) Nn = gaussian_kde_set_covariance(pars[:,neg], kernel) # Calculate the number of positive and negative sources at the location of positive sources Nps = Np(pars[:,pos]) * Npos Nns = Nn(pars[:,pos]) * Nneg # Calculate the number of positive and negative sources at the location of negative sources nNps = Np(pars[:,neg]) * Npos nNns = Nn(pars[:,neg]) * Nneg # Calculate the reliability at the location of positive sources Rs = (Nps - Nns) / Nps # The reliability must be <= 1. If not, something is wrong. err.ensure(Rs.max() <= 1, "Maximum reliability greater than 1; something is wrong.\nPlease ensure that enough negative sources are detected\nand decrease your source finding threshold if necessary.", frame=True) # Find pseudo-reliable sources (taking maximum(Rs, 0) in order to include objects with Rs < 0 # if threshold == 0; Rs may be < 0 because of insufficient statistics) # These are called pseudo-reliable because some objects may be discarded later based on additional criteria below pseudoreliable = np.maximum(Rs, 0) >= threshold # Find reliable sources (taking maximum(Rs, 0) in order to include objects with Rs < 0 if # threshold == 0; Rs may be < 0 because of insufficient statistics) #reliable=(np.maximum(Rs, 0)>=threshold) * (data[pos, ftotCOL].reshape(-1,) > fMin) * (data[pos, fmaxCOL].reshape(-1,) > 4) reliable = (np.maximum(Rs, 0) >= threshold) * ((data[pos, ftotCOL] / np.sqrt(data[pos, parNames.index("n_pix")])).reshape(-1,) > fMin) if autoKernel: # Calculate quantities needed for comparison to Skellam distribution delt = (nNps - nNns) / np.sqrt(nNps + nNns) deltstd = delt.std() deltmed = np.median(delt) deltmin = delt.min() deltmax = delt.max() if deltmed / deltstd > -100 and doskellam and makePlot: plt.hist(delt / deltstd, bins=np.arange(deltmin / deltstd, max(5.1, deltmax / deltstd), 0.01), cumulative=True, histtype="step", color=(min(1, float(max(1.,negPerBin) + kernelIter) / Nneg), 0,0), normed=True) deltplot.append([((max(1.,negPerBin) + kernelIter) / Nneg)**(1.0 / len(parCol)), deltmed / deltstd]) if scaleKernel: grow_kernel = 0 else: err.message(" iteration, median, width, median/width = %3i, %9.2e, %9.2e, %9.2e" % (kernelIter, deltmed, deltstd, deltmed / deltstd)) if deltmed / deltstd > skellamTol or negPerBin + kernelIter >= Nneg: grow_kernel = 0 err.message(" Found good kernel after %i kernel growth iterations. The sqrt(kernel) size is:" % kernelIter) err.message(np.sqrt(np.abs(kernel))) elif deltmed / deltstd < 5 * skellamTol: kernel *= (float(negPerBin + kernelIter + 20) / (negPerBin + kernelIter))**(2.0 / len(parCol)) kernelIter += 20 elif deltmed / deltstd < 2 * skellamTol: kernel *= (float(negPerBin + kernelIter + 10) / (negPerBin + kernelIter))**(2.0 / len(parCol)) kernelIter += 10 elif deltmed / deltstd < 1.5 * skellamTol: kernel *= (float(negPerBin + kernelIter + 3) / (negPerBin + kernelIter))**(2.0 / len(parCol)) kernelIter += 3 else: kernel *= (float(negPerBin + kernelIter + 1) / (negPerBin + kernelIter))**(2.0 / len(parCol)) kernelIter += 1 else: grow_kernel = 0 # ------------ # Skellam plot # ------------ if autoKernel and deltmed / deltstd > -100 and doskellam and makePlot: plt.plot(np.arange(-10, 10, 0.01), stats.norm().cdf(np.arange(-10, 10, 0.01)), "k-") plt.plot(np.arange(-10, 10, 0.01), stats.norm(scale=0.4).cdf(np.arange(-10, 10, 0.01)), "k:") plt.legend(("Gaussian (sigma=1)", "Gaussian (sigma=0.4)"), loc="lower right", prop={"size":13}) plt.hist(delt / deltstd, bins=np.arange(deltmin / deltstd, max(5.1, deltmax / deltstd), 0.01), cumulative=True, histtype="step", color="r", normed=True) plt.xlim(-5, 5) plt.ylim(0, 1) plt.xlabel("(P-N)/sqrt(N+P)") plt.ylabel("cumulative distribution") plt.plot([0, 0], [0, 1], "k--") fig0.savefig("%s_rel_skellam.pdf" % pdfoutname, rasterized=True) if not scaleKernel: fig3 = plt.figure() deltplot = np.array(deltplot) plt.plot(deltplot[:,0], deltplot[:,1], "ko-") plt.xlabel("kernel size (1D-sigma, aribtrary units)") plt.ylabel("median/std of (P-N)/sqrt(P+N)") plt.axhline(y=skellamTol, linestyle="--", color="r") fig3.savefig("%s_rel_skellam-delta.pdf" % pdfoutname, rasterized=True) # ----------------------- # Scatter plot of sources # ----------------------- specialids = [] if doscatter and makePlot: if verb: err.message(" plotting sources ...") fig1 = plt.figure(figsize=(18, 4.5 * nr)) plt.subplots_adjust(left=0.06, bottom=0.15/nr, right = 0.97, top=1-0.08/nr, wspace=0.35, hspace=0.25) n_p = 0 for jj in projections: if verb: err.message(" projection %i/%i" % (projections.index(jj) + 1, len(projections))) n_p, p1, p2 = n_p + 1, jj[0], jj[1] plt.subplot(nr, nc, n_p) plt.scatter(pars[p1,pos], pars[p2,pos], marker="o", c="b", s=10, edgecolor="face", alpha=0.5) plt.scatter(pars[p1,neg], pars[p2,neg], marker="o", c="r", s=10, edgecolor="face", alpha=0.5) for si in specialids: plt.plot(pars[p1, ids==si], pars[p2, ids==si], "kd", zorder=10000, ms=7, mfc="none", mew=2) # Plot Integrated SNR threshold if fMin>0 and (parSpace[jj[0]],parSpace[jj[1]])==("snr_sum","snr_mean"): xArray=np.arange(lims[p1][0],lims[p1][1]+(lims[p1][1]-lims[p1][0])/100,(lims[p1][1]-lims[p1][0])/100) plt.plot(xArray,np.log10(fMin)*2-xArray,'k:') elif fMin>0 and (parSpace[jj[0]],parSpace[jj[1]])==("snr_mean","snr_sum"): yArray=np.arange(lims[p2][0],lims[p2][1]+(lims[p2][1]-lims[p2][0])/100,(lims[p2][1]-lims[p2][0])/100) plt.plot(np.log10(fMin)*2-yArray,yArray,'k:') plt.xlim(lims[p1][0], lims[p1][1]) plt.ylim(lims[p2][0], lims[p2][1]) plt.xlabel(labs[p1]) plt.ylabel(labs[p2]) plt.grid(color='k',linestyle='-',linewidth=0.2) fig1.savefig("%s_rel_scatter.pdf" % pdfoutname, rasterized=True) # ------------- # Plot contours # ------------- if docontour and makePlot: levs = 10**np.arange(-1.5, 2, 0.5) if verb: err.message(" plotting contours ...") fig2 = plt.figure(figsize=(18, 4.5 * nr)) plt.subplots_adjust(left=0.06, bottom=0.15/nr, right=0.97, top=1-0.08/nr, wspace=0.35, hspace=0.25) n_p = 0 for jj in projections: if verb: err.message(" projection %i/%i" % (projections.index(jj) + 1, len(projections))) n_p, p1, p2 = n_p + 1, jj[0], jj[1] g1, g2 = grid[p1], grid[p2] x1 = np.arange(g1[0], g1[1], g1[2]) x2 = np.arange(g2[0], g2[1], g2[2]) pshape = (x2.shape[0], x1.shape[0]) # Get array of source parameters on current projection parsp = np.concatenate((pars[p1:p1+1], pars[p2:p2+1]), axis=0) # Derive Np and Nn density fields on the current projection setcov = kernel[p1:p2+1:p2-p1,p1:p2+1:p2-p1] try: Np = gaussian_kde_set_covariance(parsp[:,pos], setcov) Nn = gaussian_kde_set_covariance(parsp[:,neg], setcov) except: err.error( "Reliability determination failed because of issues with the\n" "smoothing kernel. This is likely due to an insufficient number\n" "of negative detections. Please review your filtering and source\n" "finding settings to ensure that a sufficient number of negative\n" "detections is found.", fatal=True, frame=True) # Evaluate density fields on grid on current projection g = np.transpose(np.transpose(np.mgrid[slice(g1[0], g1[1], g1[2]), slice(g2[0], g2[1], g2[2])]).reshape(-1, 2)) Np = Np(g) Nn = Nn(g) Np = Np / Np.sum() * Npos Nn = Nn / Nn.sum() * Nneg Np.resize(pshape) Nn.resize(pshape) plt.subplot(nr, nc, n_p) plt.contour(x1, x2, Np, origin="lower", colors="b", levels=levs, zorder=2) plt.contour(x1, x2, Nn, origin="lower", colors="r", levels=levs, zorder=1) # Plot Integrated SNR threshold if fMin>0 and (parSpace[jj[0]],parSpace[jj[1]])==("snr_sum","snr_mean"): xArray=np.arange(lims[p1][0],lims[p1][1]+(lims[p1][1]-lims[p1][0])/100,(lims[p1][1]-lims[p1][0])/100) plt.plot(xArray,np.log10(fMin)*2-xArray,'k:') elif fMin>0 and (parSpace[jj[0]],parSpace[jj[1]])==("snr_mean","snr_sum"): yArray=np.arange(lims[p2][0],lims[p2][1]+(lims[p2][1]-lims[p2][0])/100,(lims[p2][1]-lims[p2][0])/100) plt.plot(np.log10(fMin)*2-yArray,yArray,'k:') if reliable.sum(): plt.scatter(pars[p1,pos][reliable], pars[p2,pos][reliable], marker="o", s=10, edgecolor="k", facecolor="k", zorder=4) if (pseudoreliable * (reliable == False)).sum(): plt.scatter(pars[p1,pos][pseudoreliable * (reliable == False)], pars[p2,pos][pseudoreliable * (reliable == False)], marker="x", s=40, edgecolor="0.5", facecolor="0.5", zorder=3) for si in specialids: plt.plot(pars[p1,ids==si], pars[p2,ids==si], "kd", zorder=10000, ms=7, mfc="none", mew=2) plt.xlim(lims[p1][0], lims[p1][1]) plt.ylim(lims[p2][0], lims[p2][1]) plt.xlabel(labs[p1]) plt.ylabel(labs[p2]) plt.grid(color='k',linestyle='-',linewidth=0.2) fig2.savefig("%s_rel_contour.pdf" % pdfoutname, rasterized=True) # ------------------------- # Add Np, Nn and R to table # ------------------------- # This allows me not to calculate R every time I want to do some plot analysis, # but just read it from the file if saverel: if not (docontour or dostats): Nps = Np(pars[:,pos]) * Npos Nns = Nn(pars[:,pos]) * Nneg Np = np.zeros((data.shape[0],)) Np[pos] = Nps Nn = np.zeros((data.shape[0],)) Nn[pos] = Nns R = -np.ones((data.shape[0],)) # R will be -1 for negative sources # Set R to zero for positive sources if R < 0 because of Nn > Np R[pos] = np.maximum(0, (Np[pos] - Nn[pos]) / Np[pos]) data = np.concatenate((data, Np.reshape(-1, 1), Nn.reshape(-1, 1), R.reshape(-1, 1)), axis=1) data = [list(jj) for jj in list(data)] return data, ids[pos][reliable].astype(int)
def sigma_scale(cube, scaleX=False, scaleY=False, scaleZ=True, edgeX=0, edgeY=0, edgeZ=0, statistic="mad", fluxRange="all", method="global", windowSpatial=20, windowSpectral=20, gridSpatial=0, gridSpectral=0, interpolation="none"): # Print some informational messages err.message("Generating noise-scaled data cube:") err.message(" Selecting " + str(method) + " noise measurement method.") if statistic == "mad": err.message(" Applying median absolute deviation to " + str(fluxRange) + " pixels.") if statistic == "std": err.message(" Applying standard deviation to " + str(fluxRange) + " pixels.") if statistic == "gauss": err.message(" Applying Gaussian fit to " + str(fluxRange) + " pixels.") if statistic == "negative": err.message(" Applying Gaussian fit to negative pixels.") # Check the dimensions of the cube (could be obtained from header information) dimensions = np.shape(cube) # LOCAL noise measurement within running window (slower and less memory-friendly) if method == "local": # Make window sizes integers >= 1 windowSpatial = max(int(windowSpatial), 1) windowSpectral = max(int(windowSpectral), 1) # Ensure that window sizes are odd windowSpatial += (1 - windowSpatial % 2) windowSpectral += (1 - windowSpectral % 2) # Set grid sizes to half the window sizes if undefined if not gridSpatial: gridSpatial = windowSpatial // 2 if not gridSpectral: gridSpectral = windowSpectral // 2 # Make grid sizes integers >= 1 gridSpatial = max(int(gridSpatial), 1) gridSpectral = max(int(gridSpectral), 1) # Ensure that grid sizes are odd gridSpatial += (1 - gridSpatial % 2) gridSpectral += (1 - gridSpectral % 2) # Print grid and window sizes adopted err.message(" Using grid size of [" + str(gridSpatial) + ", " + str(gridSpectral) + "]") err.message(" and window size of [" + str(windowSpatial) + ", " + str(windowSpectral) + "].") # Generate grid points to be used gridPointsZ = np.arange( (dimensions[0] - gridSpectral * (int(math.ceil(float(dimensions[0]) / float(gridSpectral))) - 1)) // 2, dimensions[0], gridSpectral) gridPointsY = np.arange( (dimensions[1] - gridSpatial * (int(math.ceil(float(dimensions[1]) / float(gridSpatial))) - 1)) // 2, dimensions[1], gridSpatial) gridPointsX = np.arange( (dimensions[2] - gridSpatial * (int(math.ceil(float(dimensions[2]) / float(gridSpatial))) - 1)) // 2, dimensions[2], gridSpatial) # Divide grid and window sizes by 2 to get radii radiusGridSpatial = gridSpatial // 2 radiusGridSpectral = gridSpectral // 2 radiusWindowSpatial = windowSpatial // 2 radiusWindowSpectral = windowSpectral // 2 # Create empty cube (filled with NaN) to hold noise values rms_cube = np.full(cube.shape, np.nan, dtype=cube.dtype) # Determine RMS across window centred on grid cell for z in gridPointsZ: for y in gridPointsY: for x in gridPointsX: grid = (max(0, z - radiusGridSpectral), min(dimensions[0], z + radiusGridSpectral + 1), max(0, y - radiusGridSpatial), min(dimensions[1], y + radiusGridSpatial + 1), max(0, x - radiusGridSpatial), min(dimensions[2], x + radiusGridSpatial + 1)) window = (max(0, z - radiusWindowSpectral), min(dimensions[0], z + radiusWindowSpectral + 1), max(0, y - radiusWindowSpatial), min(dimensions[1], y + radiusWindowSpatial + 1), max(0, x - radiusWindowSpatial), min(dimensions[2], x + radiusWindowSpatial + 1)) if not np.all( np.isnan( cube[window[0]:window[1], window[2]:window[3], window[4]:window[5]])): if interpolation == "linear" or interpolation == "cubic": # Write value into grid point for later interpolation rms_cube[z, y, x] = GetRMS(cube[window[0]:window[1], window[2]:window[3], window[4]:window[5]], rmsMode=statistic, fluxRange=fluxRange, zoomx=1, zoomy=1, zoomz=1, verbose=0) else: # Fill entire grid cell rms_cube[grid[0]:grid[1], grid[2]:grid[3], grid[4]:grid[5]] = GetRMS( cube[window[0]:window[1], window[2]:window[3], window[4]:window[5]], rmsMode=statistic, fluxRange=fluxRange, zoomx=1, zoomy=1, zoomz=1, verbose=0) del grid, window # Carry out interpolation if requested, taking NaNs into account if interpolation == "linear" or interpolation == "cubic": err.message(" Interpolating in between grid points (" + str(interpolation) + ").") # First across each spatial plane if gridSpatial > 1: for z in gridPointsZ: for y in gridPointsY: data_values = rms_cube[z, y, gridPointsX] not_nan = np.logical_not(np.isnan(data_values)) if any(not_nan): interp_coords = np.arange(0, dimensions[2]) if interpolation == "cubic": spline = InterpolatedUnivariateSpline( gridPointsX[not_nan], data_values[not_nan]) rms_cube[z, y, 0:dimensions[2]] = spline( interp_coords) del spline else: interp_values = np.interp( interp_coords, gridPointsX[not_nan], data_values[not_nan]) rms_cube[z, y, 0:dimensions[2]] = interp_values del interp_values del interp_coords del data_values, not_nan for x in range(dimensions[2]): data_values = rms_cube[z, gridPointsY, x] not_nan = np.logical_not(np.isnan(data_values)) if any(not_nan): interp_coords = np.arange(0, dimensions[1]) if interpolation == "cubic": spline = InterpolatedUnivariateSpline( gridPointsY[not_nan], data_values[not_nan]) rms_cube[z, 0:dimensions[1], x] = spline(interp_coords) del spline else: interp_values = np.interp( interp_coords, gridPointsY[not_nan], data_values[not_nan]) rms_cube[z, 0:dimensions[1], x] = interp_values del interp_values del interp_coords del data_values, not_nan # Alternative option: 2-D spatial interpolation using SciPy's interp2d #from scipy.interpolate import interp2d #xx, yy = np.meshgrid(gridPointsX, gridPointsY) #data_values = rms_cube[z, yy, xx] #f = interp2d(gridPointsX, gridPointsY, data_values, kind="cubic") #interp_coords_x = np.arange(0, dimensions[2]) #interp_coords_y = np.arange(0, dimensions[1]) #rms_cube[z, :, :] = f(interp_coords_x, interp_coords_y) # Then along the spectral axis if gridSpectral > 1: for y in range(dimensions[1]): for x in range(dimensions[2]): data_values = rms_cube[gridPointsZ, y, x] not_nan = np.logical_not(np.isnan(data_values)) if any(not_nan): interp_coords = np.arange(0, dimensions[0]) if interpolation == "cubic": spline = InterpolatedUnivariateSpline( gridPointsZ[not_nan], data_values[not_nan]) rms_cube[0:dimensions[0], y, x] = spline(interp_coords) del spline else: interp_values = np.interp( interp_coords, gridPointsZ[not_nan], data_values[not_nan]) rms_cube[0:dimensions[0], y, x] = interp_values del interp_values del interp_coords del data_values, not_nan # Replace any invalid RMS values with NaN with np.errstate(invalid="ignore"): rms_cube[rms_cube <= 0] = np.nan # Divide data cube by RMS cube cube /= rms_cube # Delete the RMS cube again to release its memory #del rms_cube # GLOBAL noise measurement on entire 2D plane (faster and more memory-friendly) else: # Define the range over which statistics are calculated z1 = int(edgeZ) z2 = int(dimensions[0] - edgeZ) y1 = int(edgeY) y2 = int(dimensions[1] - edgeY) x1 = int(edgeX) x2 = int(dimensions[2] - edgeX) # Make sure edges don't exceed cube size err.ensure(z1 < z2 and y1 < y2 and x1 < x2, "Edge size exceeds cube size for at least one axis.") # Create empty cube (filled with 1) to hold noise values rms_cube = np.ones(cube.shape, dtype=cube.dtype) # Measure noise across 2D planes and scale cube accordingly if scaleZ: for i in range(dimensions[0]): if not np.all(np.isnan(cube[i, y1:y2, x1:x2])): rms = GetRMS(cube[i, y1:y2, x1:x2], rmsMode=statistic, fluxRange=fluxRange, zoomx=1, zoomy=1, zoomz=1, verbose=0) if rms > 0: rms_cube[i, :, :] *= rms cube[i, :, :] /= rms if scaleY: for i in range(dimensions[1]): if not np.all(np.isnan(cube[z1:z2, i, x1:x2])): rms = GetRMS(cube[z1:z2, i, x1:x2], rmsMode=statistic, fluxRange=fluxRange, zoomx=1, zoomy=1, zoomz=1, verbose=0) if rms > 0: rms_cube[:, i, :] *= rms cube[:, i, :] /= rms if scaleX: for i in range(dimensions[2]): if not np.all(np.isnan(cube[z1:z2, y1:y2, i])): rms = GetRMS(cube[z1:z2, y1:y2, i], rmsMode=statistic, fluxRange=fluxRange, zoomx=1, zoomy=1, zoomz=1, verbose=0) if rms > 0: rms_cube[:, :, i] *= rms cube[:, :, i] /= rms err.message("Noise-scaled data cube generated.\n") return cube, rms_cube