예제 #1
0
def apply_weights_file(data, weightsFile, subcube):
	# Load weights cube
	err.message("Applying weights cube:\n  " + str(weightsFile))
	try:
		f = fits.open(weightsFile, memmap=False)
		header_weights = f[0].header
	except:
		err.error("Failed to read weights cube.")
	
	# Extract axis sizes and types
	n_axes_weights, axis_size_weights, axis_type_weights = extract_axis_size(header_weights)
	
	# Ensure correct dimensionality
	check_cube_dimensions(n_axes_weights, axis_size_weights, cube_name="weights cube", min_dim=1, max_dim=4)
	
	# Multiply data by weights
	# 1-D spectrum
	if n_axes_weights == 1:
		err.warning("Weights cube has 1 axis; interpreted as spectrum.\nAdding first and second axis.")
		if len(subcube):
			err.ensure(len(subcube) == 6, "Subcube list must have 6 entries ({0:d} given).".format(len(subcube)))
			data *= np.reshape(f[0].section[subcube[4]:subcube[5]], (-1, 1, 1))
		else:
			data *= reshape(f[0].data, (-1, 1, 1))
	
	# 2-D image
	elif n_axes_weights == 2:
		if len(subcube) == 6 or len(subcube) == 4:
			data *= np.array([f[0].section[subcube[2]:subcube[3], subcube[0]:subcube[1]]])
		else:
			data *= np.array([f[0].data])
	
	# 3-D cube
	elif n_axes_weights == 3:
		if len(subcube) == 6:
			data *= f[0].section[subcube[4]:subcube[5], subcube[2]:subcube[3], subcube[0]:subcube[1]]
		else:
			data *= f[0].data
	
	# 4-D hypercube
	else:
		if len(subcube) == 6:
			data *= f[0].section[0, subcube[4]:subcube[5], subcube[2]:subcube[3], subcube[0]:subcube[1]]
		else:
			data *= f[0].section[0]
	
	f.close()
	err.message("  Weights cube applied.")
	
	return data
예제 #2
0
def regridMaskedChannels(datacube, maskcube, header):
    import numpy as np
    import scipy.constants
    from scipy import interpolate
    from sofia import error as err

    if not check_wcs_info(header, spatial=True):
        err.warning(
            "Axis descriptors missing from FITS file header.\nIgnoring the effect of CELLSCAL = 1/F."
        )
        return datacube

    maskcubeFlt = maskcube.astype("float")
    maskcubeFlt[maskcube > 1] = 1.0

    err.message("Regridding...")
    z = (np.arange(1.0, header["NAXIS3"] + 1) -
         header["CRPIX3"]) * header["CDELT3"] + header["CRVAL3"]

    if check_header_keywords(KEYWORDS_VELO, header["CTYPE3"]):
        pixscale = (1.0 - header["CRVAL3"] / scipy.constants.c) / (
            1.0 - z / scipy.constants.c)
    elif check_header_keywords(KEYWORDS_FREQ, header["CTYPE3"]):
        pixscale = header["CRVAL3"] / z
    else:
        err.warning(
            "Cannot convert 3rd axis coordinates to frequency.\nIgnoring the effect of CELLSCAL = 1/F."
        )
        pixscale = np.ones((header["NAXIS3"]))

    x0 = header["CRPIX1"] - 1
    y0 = header["CRPIX2"] - 1
    xs = np.arange(datacube.shape[2], dtype=float) - x0
    ys = np.arange(datacube.shape[1], dtype=float) - y0

    for zz in range(datacube.shape[0]):
        regrid_channel = interpolate.RectBivariateSpline(
            ys * pixscale[zz], xs * pixscale[zz], datacube[zz])
        datacube[zz] = regrid_channel(ys, xs)
        regrid_channel_mask = interpolate.RectBivariateSpline(
            ys * pixscale[zz], xs * pixscale[zz], maskcubeFlt[zz])
        maskcubeFlt[zz] = regrid_channel_mask(ys, xs)

    datacube[abs(maskcubeFlt) <= abs(np.nanmin(maskcubeFlt))] = np.nan
    del maskcubeFlt

    return datacube
예제 #3
0
def flag(cube, regions):
    if regions:
        err.message("Flagging data cube.")
        dim = len(cube.shape)

        try:
            for region in regions:
                for i in range(0, len(region) / 2):
                    if region[2 * i + 1] == "":
                        region[2 * i + 1] = cube.shape[dim - i - 1]
                if len(region) == 2:
                    cube[0, region[2]:region[3], region[0]:region[1]] = np.nan
                else:
                    cube[region[4]:region[5], region[2]:region[3],
                         region[0]:region[1]] = np.nan
            err.message("Cube has been flagged.")
        except:
            err.warning(
                "Flagging did not succeed. Please check the dimensions of your cube and filters."
            )

    return cube
예제 #4
0
def regridMaskedChannels(datacube, maskcube, header):
	import numpy as np
	import scipy.constants
	from scipy import interpolate
	from sofia import error as err
	
	if not check_wcs_info(header, spatial=True):
		err.warning("Axis descriptors missing from FITS file header.\nIgnoring the effect of CELLSCAL = 1/F.")
		return datacube
	
	maskcubeFlt = maskcube.astype("float")
	maskcubeFlt[maskcube > 1] = 1.0
	
	err.message("Regridding...")
	z = (np.arange(1.0, header["NAXIS3"] + 1) - header["CRPIX3"]) * header["CDELT3"] + header["CRVAL3"]
	
	if check_header_keywords(KEYWORDS_VELO, header["CTYPE3"]):
		pixscale = (1.0 - header["CRVAL3"] / scipy.constants.c) / (1.0 - z / scipy.constants.c)
	elif check_header_keywords(KEYWORDS_FREQ, header["CTYPE3"]):
		pixscale = header["CRVAL3"] / z
	else:
		err.warning("Cannot convert 3rd axis coordinates to frequency.\nIgnoring the effect of CELLSCAL = 1/F.")
		pixscale = np.ones((header["NAXIS3"]))
	
	x0 = header["CRPIX1"] - 1
	y0 = header["CRPIX2"] - 1
	xs = np.arange(datacube.shape[2], dtype=float) - x0
	ys = np.arange(datacube.shape[1], dtype=float) - y0
	
	for zz in range(datacube.shape[0]):
		regrid_channel = interpolate.RectBivariateSpline(ys * pixscale[zz], xs * pixscale[zz], datacube[zz])
		datacube[zz] = regrid_channel(ys, xs)
		regrid_channel_mask = interpolate.RectBivariateSpline(ys * pixscale[zz], xs * pixscale[zz], maskcubeFlt[zz])
		maskcubeFlt[zz] = regrid_channel_mask(ys, xs)
	
	datacube[abs(maskcubeFlt) <= abs(np.nanmin(maskcubeFlt))] = np.nan
	del maskcubeFlt
	
	return datacube
예제 #5
0
		np.add(mask, CNHI.find_sources(np_Cube, mask, **Parameters["CNHI"]), out=mask, casting="unsafe") # This should work...
		if Parameters["pipeline"]["trackMemory"]: print_memory_usage(t0)
	
	# --- THRESHOLD ---	
	if Parameters["steps"]["doThreshold"]:
		err.message("Running threshold filter")
		threshold_filter.filter(mask, np_Cube, dict_Header, **Parameters["threshold"])
		if Parameters["pipeline"]["trackMemory"]: print_memory_usage(t0)
	
	err.message("Source finding complete.")

# Check if positivity flag is set; if so, remove negative pixels from mask:
if Parameters["merge"]["positivity"]:
	err.warning(
		"Enabling mask.positivity is dangerous and will render some of SoFiA's\n"
		"most  powerful  algorithms useless,  including mask  optimisation and\n"
		"reliability calculation.  Only use this option if you are fully aware\n"
		"of its risks and consequences!", frame=True)
	mask[np_Cube < 0.0] = 0
	if Parameters["pipeline"]["trackMemory"]: print_memory_usage(t0)

# Check whether any pixels are detected
NRdet = (mask > 0).sum()
if not NRdet:
	err.warning("No pixels detected. Exiting pipeline.", fatal=True)
else:
	err.message("{0:,d} out of {1:,d} pixels detected ({2:.4f}%)".format(NRdet, np.array(mask.shape).prod(), 100.0 * float(NRdet) / float(np.array(mask.shape).prod())))



# -----------------
예제 #6
0
def add_wcs_coordinates(objects, catParNames, catParFormt, catParUnits, Parameters):
	try:
		hdulist = fits.open(Parameters["import"]["inFile"])
		header = hdulist[0].header
		hdulist.close()
		
		# Fix headers where "per second" is written "/S" instead of "/s"
		# (assuming they mean "per second" and not "per Siemens").
		if "cunit3" in header and "/S" in header["cunit3"]:
			err.warning("Converting '/S' to '/s' in CUNIT3.")
			header["cunit3"] = header["cunit3"].replace("/S","/s")
		
		# Check if there is a Nmap/GIPSY FITS header keyword value present
		gipsyKey = [k for k in ["FREQ-OHEL", "FREQ-OLSR", "FREQ-RHEL", "FREQ-RLSR"] if (k in [header[key] for key in header if ("CTYPE" in key)])]
		if gipsyKey:
			err.message("GIPSY header found. Trying to convert to FITS standard.")
			from astropy.wcs import Wcsprm
			header = fix_gipsy_header(header)
			wcsin = Wcsprm(str(header))
			wcsin.sptr("VOPT-F2W")
			#if header["naxis"] == 4:
			#	objects = np.concatenate((objects, wcsin.p2s(np.concatenate((objects[:, catParNames.index("x"):catParNames. index("x") + 3], np.zeros((objects.shape[0], 1))), axis=1), 0)["world"][:,:-1]), axis=1)
			#else:
			#	objects = np.concatenate((objects, wcsin.p2s(objects[:, catParNames.index("x"):catParNames.index("x") + 3], 0)["world"]), axis=1)
			objects = np.concatenate((objects, wcsin.p2s(objects[:, catParNames.index("x"):catParNames.index("x") + 3], 0)["world"]), axis=1)
			catParUnits = tuple(list(catParUnits) + [str(cc).replace(" ", "") for cc in wcsin.cunit])
			catParNames = tuple(list(catParNames) + [(cc.split("--")[0]).lower() for cc in wcsin.ctype])
			catParFormt = tuple(list(catParFormt) + ["%15.7e", "%15.7e", "%15.7e"])
		
		else:
			# Constrain the RA axis reference value CRVAL_ to be between 0 and 360 deg
			rafound = 0
			for kk in range(header["naxis"]):
				if header["ctype1"][:2] == "RA":
					rafound = 1
					break
			if rafound:
				if header["crval%i" % (kk + 1)] < 0:
					err.warning("Adding 360 deg to RA reference value.")
					header["crval%i" % (kk + 1)] += 360
				elif header["crval%i" % (kk + 1)] > 360:
					err.warning("Subtracting 360 deg from RA reference value.")
					header["crval%i" % (kk + 1)] -= 360
			
			#if header["naxis"] == 4: wcsin = wcs.WCS(header, naxis=[wcs.WCSSUB_CELESTIAL, wcs.WCSSUB_SPECTRAL, wcs.WCSSUB_STOKES])
			#else: wcsin = wcs.WCS(header, naxis=[wcs.WCSSUB_CELESTIAL, wcs.WCSSUB_SPECTRAL])
			wcsin = wcs.WCS(header, naxis=[wcs.WCSSUB_CELESTIAL, wcs.WCSSUB_SPECTRAL])
			xyz = objects[:, catParNames.index("x"):catParNames.index("x") + 3].astype(float)
			if "cellscal" in header and header["cellscal"] == "1/F":
				err.warning(
					"CELLSCAL keyword with value of 1/F found.\n"
					"Will account for varying pixel scale in WCS coordinate calculation.")
				x0, y0 = header["crpix1"] - 1, header["crpix2"] - 1
				# Will calculate the pixscale factor of each channel as:
				# pixscale = ref_frequency / frequency
				if header["ctype3"] == "VELO-HEL":
					pixscale = (1 - header["crval3"] / scipy.constants.c) / (1 - (((xyz[:, 2] + 1) - header["crpix3"]) * header["cdelt3"] + header["crval3"]) / scipy.constants.c)
				else:
					err.warning("Cannot convert 3rd axis coordinates to frequency. Ignoring the effect of CELLSCAL = 1/F.")
					pixscale = 1.0
				xyz[:, 0] = (xyz[:, 0] - x0) * pixscale + x0
				xyz[:, 1] = (xyz[:, 1] - y0) * pixscale + y0
			#if header["naxis"] == 4: objects = np.concatenate((objects, wcsin.wcs_pix2world(np.concatenate((xyz, np.zeros((objects.shape[0], 1))), axis=1), 0)[:, :-1]), axis=1)
			#else: objects = np.concatenate((objects, wcsin.wcs_pix2world(xyz, 0)), axis=1)
			objects = np.concatenate((objects, wcsin.wcs_pix2world(xyz, 0)), axis=1)
			catParUnits = tuple(list(catParUnits) + [str(cc).replace(" ", "") for cc in wcsin.wcs.cunit])
			catParNames = tuple(list(catParNames) + [(cc.split("--")[0]).lower() for cc in wcsin.wcs.ctype])
			catParFormt = tuple(list(catParFormt) + ["%15.7e", "%15.7e", "%15.7e"])
		#if header["naxis"] == 4:
		#	catParUnits = catParUnits[:-1]
		#	catParNames= catParNames[:-1]
		err.message("WCS coordinates added to catalogue.")
		
		# Create IAU-compliant source name:
		# WARNING: This currently assumes a regular, ≥ 2-dim. data cube where the first two axes are longitude and latitude.
		n_src = objects.shape[0]
		n_par = objects.shape[1]
		
		iau_names = np.empty([n_src, 1], dtype=object)
		
		if header["ctype1"][:4] == "RA--":
			# Equatorial coordinates; try to figure out equinox:
			iau_coord = "equ"
			if "equinox" in header:
				if int(header["equinox"]) >= 2000: iau_equinox = "J"
				else: iau_equinox = "B"
			elif "epoch" in header:
				# Assume that EPOCH has been abused to record the equinox:
				if int(header["epoch"]) >= 2000: iau_equinox = "J"
				else: iau_equinox = "B"
			else:
				# Equinox undefined:
				iau_equinox = "X"
		elif header["ctype1"][:4] == "GLON":
			# Galactic coordinates:
			iau_coord = "gal"
			iau_equinox = "G"
		else:
			# Unsupported coordinate system:
			iau_coord = ""
			iau_equinox = ""
		
		for src in xrange(n_src):
			lon = objects[src][n_par - 3]
			lat = objects[src][n_par - 2]
			
			if iau_coord == "equ":
				ra = Longitude(lon, unit=u.deg)
				dec = Latitude(lat, unit=u.deg)
				iau_pos = ra.to_string(unit=u.h, decimal=False, sep="", precision=2, alwayssign=False, pad=True, fields=3)
				iau_pos += dec.to_string(unit=u.deg, decimal=False, sep="", precision=1, alwayssign=True, pad=True, fields=3)
			else:
				iau_pos = "{0:08.4f}".format(lon)
				if lat < 0.0: iau_pos += "-"
				else: iau_pos += "+"
				iau_pos += "{0:07.4f}".format(abs(lat))
			
			iau_names[src][0] = "SoFiA " + iau_equinox + iau_pos
		
		objects = np.concatenate((objects, iau_names), axis = 1)
		catParUnits = tuple(list(catParUnits) + ["-"])
		catParNames = tuple(list(catParNames) + ["name"])
		catParFormt = tuple(list(catParFormt) + ["%30s"])
	except:
		err.warning("WCS conversion of parameters failed.")
	
	return (objects, catParNames, catParFormt, catParUnits)
예제 #7
0
def import_mask(maskFile, header, axis_size, subcube):
    err.message("Loading mask cube:\n  " + str(maskFile))

    try:
        f = fits.open(maskFile, memmap=False)
        header_mask = f[0].header
    except:
        err.error("Failed to read mask cube.")

    # Extract axis sizes and types
    n_axes_mask, axis_size_mask, axis_type_mask = extract_axis_size(
        header_mask)

    # Ensure correct dimensionality
    check_cube_dimensions(n_axes_mask,
                          axis_size_mask,
                          cube_name="mask cube",
                          min_dim=1,
                          max_dim=4)

    # 1-D spectrum
    if n_axes_mask == 1:
        err.warning(
            "Mask cube has 1 axis; interpreted as spectrum.\nAdding first and second axis."
        )
        ensure(header_mask['CRVAL1'] == header['CRVAL1'],
               "Input cube and mask are not on the same WCS grid.")

        if len(subcube) == 6:
            if header_mask["NAXIS1"] == axis_size[2]:
                err.message(
                    "  Input mask cube already matches size of data subcube.\n  No subcube selection applied."
                )
                mask = np.reshape(f[0].data, (-1, 1, 1))
            elif header_mask["NAXIS1"] == fullshape[0]:
                err.message("  Subcube selection applied to input mask cube.")
                mask = np.reshape(f[0].section[subcube[4]:subcube[5]],
                                  (-1, 1, 1))
            else:
                err.error(
                    "Data subcube does not match size of mask subcube or full mask."
                )
        elif not len(subcube):
            mask = np.reshape(f[0].data, (-1, 1, 1))
        else:
            err.error(
                "The subcube list must have 6 entries ({0:d} given).".format(
                    len(subcube)))

    # 2-D image
    elif n_axes_mask == 2:
        err.ensure(
            header_mask["CRVAL1"] == header["CRVAL1"]
            and header_mask["CRVAL2"] == header["CRVAL2"],
            "Input cube and mask are not on the same WCS grid.")

        if len(subcube) == 6 or len(subcube) == 4:
            if header_mask["NAXIS1"] == axis_size[0] and header_mask[
                    "NAXIS2"] == axis_size[1]:
                err.message(
                    "  Input mask cube already matches size of data subcube.\n  No subcube selection applied."
                )
                mask = np.array([f[0].data])
            elif header_mask["NAXIS1"] == fullshape[2] and header_mask[
                    "NAXIS2"] == fullshape[1]:
                err.message("  Subcube selection applied to input mask cube.")
                mask = np.array([
                    f[0].section[subcube[2]:subcube[3], subcube[0]:subcube[1]]
                ])
            else:
                err.error(
                    "Data subcube does not match size of mask subcube or full mask."
                )
        else:
            mask = np.array([f[0].data])

    # 3-D cube
    elif n_axes_mask == 3:
        err.ensure(
            header_mask["CRVAL1"] == header["CRVAL1"]
            and header_mask["CRVAL2"] == header["CRVAL2"]
            and header_mask["CRVAL3"] == header["CRVAL3"],
            "Input cube and mask are not on the same WCS grid.")

        if len(subcube) == 6:
            if header_mask["NAXIS1"] == axis_size[0] and header_mask[
                    "NAXIS2"] == axis_size[1] and header_mask[
                        "NAXIS3"] == axis_size[2]:
                err.message(
                    "  Input mask cube already matches size of data subcube.\n  No subcube selection applied."
                )
                mask = f[0].data
            elif header_mask["NAXIS1"] == fullshape[2] and header_mask[
                    "NAXIS2"] == fullshape[1] and header_mask[
                        "NAXIS3"] == fullshape[0]:
                err.message("  Subcube selection applied to input mask cube.")
                mask = f[0].section[subcube[4]:subcube[5],
                                    subcube[2]:subcube[3],
                                    subcube[0]:subcube[1]]
            else:
                err.error(
                    "Data subcube does not match size of mask subcube or full mask."
                )
        else:
            mask = f[0].data

    # 4-D hypercube
    else:
        err.ensure(
            header_mask["CRVAL1"] == header["CRVAL1"]
            and header_mask["CRVAL2"] == header["CRVAL2"]
            and header_mask["CRVAL3"] == header["CRVAL3"],
            "Input cube and mask are not on the same WCS grid.")

        if len(subcube) == 6:
            if header_mask["NAXIS1"] == axis_size[0] and header_mask[
                    "NAXIS2"] == axis_size[1] and header_mask[
                        "NAXIS3"] == axis_size[2]:
                err.message(
                    "  Input mask cube already matches size of data subcube.\n  No subcube selection applied."
                )
                mask = f[0].section[0]
            elif header_mask["NAXIS1"] == fullshape[2] and header_mask[
                    "NAXIS2"] == fullshape[1] and header_mask[
                        "NAXIS3"] == fullshape[0]:
                err.message("  Subcube selection applied to input mask cube.")
                mask = f[0].section[0, subcube[4]:subcube[5],
                                    subcube[2]:subcube[3],
                                    subcube[0]:subcube[1]]
            else:
                err.error(
                    "Data subcube does not match size of mask subcube or full mask."
                )
        else:
            mask = f[0].section[0]

    mask[mask > 0] = 1
    f.close()
    err.message("Mask cube loaded.")

    # In all cases, convert mask to Boolean with masked pixels set to 1.
    return (mask > 0).astype(bool)
예제 #8
0
파일: addrel.py 프로젝트: whagood/SoFiA
def EstimateRel(data,
                pdfoutname,
                parNames,
                parSpace=["snr_sum", "snr_max", "n_pix"],
                logPars=[1, 1, 1],
                autoKernel=True,
                scaleKernel=1,
                negPerBin=1,
                skellamTol=-0.5,
                kernel=[0.15, 0.05, 0.1],
                usecov=False,
                doscatter=1,
                docontour=1,
                doskellam=1,
                dostats=0,
                saverel=1,
                threshold=0.99,
                fMin=0,
                verb=0,
                makePlot=False):

    # Always work on logarithmic parameter values; the reliability.logPars parameter should be removed
    if 0 in logPars:
        err.warning(
            "  Setting all reliability.logPars entries to 1. This parameter is no longer editable by users."
        )
    logPars = [1 for pp in parSpace]

    # Import Matplotlib if diagnostic plots requested
    if makePlot:
        import matplotlib
        # The following line is necessary to run SoFiA remotely
        matplotlib.use("Agg")
        import matplotlib.pyplot as plt

    # --------------------------------
    # Build array of source parameters
    # --------------------------------

    idCOL = parNames.index("id")
    ftotCOL = parNames.index("snr_sum")
    fmaxCOL = parNames.index("snr_max")
    fminCOL = parNames.index("snr_min")

    # Get columns of requested parameters
    parCol = []
    for ii in range(len(parSpace)):
        parCol.append(parNames.index(parSpace[ii]))

    # Get position and number of positive and negative sources
    pos = data[:, ftotCOL] > 0
    neg = data[:, ftotCOL] <= 0
    Npos = pos.sum()
    Nneg = neg.sum()

    err.ensure(Npos, "No positive sources found; cannot proceed.")
    err.ensure(Nneg, "No negative sources found; cannot proceed.")

    # Get array of relevant source parameters (and take log of them if requested)
    ids = data[:, idCOL]
    pars = np.empty((data.shape[0], 0))

    for ii in range(len(parSpace)):
        if parSpace[ii] == "snr_max":
            parsTmp = data[:, fmaxCOL] * pos - data[:, fminCOL] * neg
            if logPars[ii]: parsTmp = np.log10(parsTmp)
            pars = np.concatenate((pars, parsTmp.reshape(-1, 1)), axis=1)
        elif parSpace[ii] == "snr_sum" or parSpace[ii] == "snr_mean":
            parsTmp = abs(data[:, parCol[ii]].reshape(-1, 1))
            if logPars[ii]: parsTmp = np.log10(parsTmp)
            pars = np.concatenate((pars, parsTmp), axis=1)
        else:
            parsTmp = data[:, parCol[ii]].reshape(-1, 1)
            if logPars[ii]: parsTmp = np.log10(parsTmp)
            pars = np.concatenate((pars, parsTmp), axis=1)

    err.message("  Working in parameter space {0:}".format(str(parSpace)))
    err.message(
        "  Will convolve the distribution of positive and negative sources in this space to derive the P and N density fields"
    )
    pars = np.transpose(pars)

    # ----------------------------------------------------------
    # Set parameters to work with and gridding/plotting for each
    # ----------------------------------------------------------

    # Axis labels when plotting
    labs = []
    for ii in range(len(parSpace)):
        labs.append("")
        if logPars[ii]: labs[ii] += "log "
        labs[ii] += parSpace[ii]

    # Axis limits when plotting
    pmin, pmax = pars.min(axis=1), pars.max(axis=1)
    pmin, pmax = pmin - 0.1 * (pmax - pmin), pmax + 0.1 * (pmax - pmin)
    lims = [[pmin[i], pmax[i]] for i in range(len(parSpace))]

    # Grid on which to evaluate Np and Nn in order to plot contours
    grid = [[pmin[i], pmax[i], 0.02 * (pmax[i] - pmin[i])]
            for i in range(len(parSpace))]

    # Calculate the number of rows and columns in figure
    projections = [subset for subset in combinations(range(len(parSpace)), 2)]
    nr = int(np.floor(np.sqrt(len(projections))))
    nc = int(np.ceil(float(len(projections)) / nr))

    # ---------------------------------------
    # Set smoothing kernel in parameter space
    # ---------------------------------------

    # If autoKernel is True, then the initial kernel is taken as a scaled version of the covariance matrix
    # of the negative sources. The kernel size along each axis is such that the number of sources per kernel
    # width (sigma**2) is equal to "negPerBin". Optionally, the user can decide to use only the diagonal
    # terms of the covariance matrix. The kernel is then grown until convergence is reached on the Skellam
    # plot. If autoKernel is False, then use the kernel given by "kernel" parameter (argument of EstimateRel);
    # this is sigma, and is squared to be consistent with the auto kernel above.

    if autoKernel:
        # Set the kernel shape to that of the variance or covariance matrix
        kernel = np.cov(pars[:, neg])
        kernelType = "covariance"
        # Check if kernel matrix can be inverted
        try:
            np.linalg.inv(kernel)
        except:
            err.error(
                "The reliability cannot be calculated because the smoothing kernel\n"
                "derived from " + str(pars[:, neg].shape[1]) +
                " negative sources cannot be inverted.\n"
                "This is likely due to an insufficient number of negative sources.\n"
                "Try to increase the number of negative sources by changing the\n"
                "source finding and/or filtering settings.",
                fatal=True,
                frame=True)

        if np.isnan(kernel).sum():
            err.error(
                "The reliability cannot be calculated because the smoothing kernel\n"
                "derived from " + str(pars[:, neg].shape[1]) +
                " negative sources contains NaNs.\n"
                "A good kernel is required to calculate the density field of positive\n"
                "and negative sources in parameter space.\n"
                "Try to increase the number of negative sources by changing the\n"
                "source finding and/or filtering settings.",
                fatal=True,
                frame=True)

        if not usecov:
            kernel = np.diag(np.diag(kernel))
            kernelType = "variance"

        kernelIter = 0.0
        deltplot = []

        # Scale the kernel size as requested by the user (scaleKernel>0) or use the autoscale algorithm (scaleKernel=0)
        if scaleKernel > 0:
            # Scale kernel size as requested by the user
            # Note that the scale factor is squared because users are asked to give a factor to apply to sqrt(kernel)
            kernel *= scaleKernel**2
            err.message(
                "  Using the {0:s} matrix scaled by a factor {1:.2f} as convolution kernel"
                .format(kernelType, scaleKernel))
            err.message("  The sqrt(kernel) size is:")
            err.message(" " + str(np.sqrt(np.abs(kernel))))
        elif scaleKernel == 0:
            # Scale kernel size to get started the kernel-growing loop
            # The scale factor for sqrt(kernel) is elevated to the power of 1.0 / len(parCol)
            err.message(
                "  Will search for the best convolution kernel by scaling the {0:s} matrix"
                .format(kernelType))
            err.message("  The {0:s} matrix has sqrt:".format(kernelType))
            err.message(" " + str(np.sqrt(np.abs(kernel))))
            # negPerBin must be >=1
            err.ensure(
                negPerBin >= 1,
                "The parameter reliability.negPerBin used to start the convolution kernel search was set to {0:.1f} but must be >= 1. Please change your settings."
                .format(negPerBin))
            kernel *= ((negPerBin + kernelIter) / Nneg)**(2.0 / len(parCol))
            err.message("  Search starting from the kernel with sqrt:")
            err.message(" " + str(np.sqrt(np.abs(kernel))))
            err.message(
                "  Iteratively growing kernel until the distribution of (P-N)/sqrt(P+N) reaches median/width = {0:.2f} ..."
                .format(skellamTol))
            err.ensure(
                skellamTol <= 0,
                "The parameter reliability.skellamTol was set to {0:.2f} but must be <= 0. Please change your settings."
                .format(skellamTol))
        else:
            err.ensure(scaleKernel>=0,\
             "The reliability.scaleKernel parameter cannot be negative.\n"\
             "It should be = 0 if you want SoFiA to find the optimal kernel scaling\n"\
             "or > 0 if you want to set the scaling yourself.\n"\
             "Please change your settings.")

        #deltOLD=-1e+9 # Used to stop kernel growth if P-N stops moving closer to zero [NOT USED CURRENTLY]
        if doskellam and makePlot: fig0 = plt.figure()
    else:
        # Note that the user must give sigma, which then gets squared
        err.message(
            "  Using user-defined variance kernel with sqrt(kernel) size: {0}".
            format(kernel))
        err.ensure(
            len(parSpace) == len(kernel),
            "The number of entries in the kernel above does not match the number of parameters you requested for the reliability calculation."
        )
        kernel = np.identity(len(kernel)) * np.array(kernel)**2

    # Set grow_kernel to 1 to start the kernel growing loop below.
    grow_kernel = 1

    # This loop will estimate the reliability, check whether the kernel is large enough,
    # and if not pick a larger kernel. If autoKernel = 0 or scaleKernel = 0, we will do
    # just one pass (i.e., we will not grow the kernel).
    while grow_kernel:
        # ------------------------
        # Evaluate N-d reliability
        # ------------------------

        if verb:
            err.message(
                "   estimate normalised positive and negative density fields ..."
            )

        Np = gaussian_kde_set_covariance(pars[:, pos], kernel)
        Nn = gaussian_kde_set_covariance(pars[:, neg], kernel)

        # Calculate the number of positive and negative sources at the location of positive sources
        Nps = Np(pars[:, pos]) * Npos
        Nns = Nn(pars[:, pos]) * Nneg

        # Calculate the number of positive and negative sources at the location of negative sources
        nNps = Np(pars[:, neg]) * Npos
        nNns = Nn(pars[:, neg]) * Nneg

        # Calculate the reliability at the location of positive sources
        Rs = (Nps - Nns) / Nps

        # The reliability must be <= 1. If not, something is wrong.
        err.ensure(
            Rs.max() <= 1,
            "Maximum reliability greater than 1; something is wrong.\nPlease ensure that enough negative sources are detected\nand decrease your source finding threshold if necessary.",
            frame=True)

        # Find pseudo-reliable sources (taking maximum(Rs, 0) in order to include objects with Rs < 0
        # if threshold == 0; Rs may be < 0 because of insufficient statistics)
        # These are called pseudo-reliable because some objects may be discarded later based on additional criteria below
        pseudoreliable = np.maximum(Rs, 0) >= threshold

        # Find reliable sources (taking maximum(Rs, 0) in order to include objects with Rs < 0 if
        # threshold == 0; Rs may be < 0 because of insufficient statistics)
        #reliable=(np.maximum(Rs, 0)>=threshold) * (data[pos, ftotCOL].reshape(-1,) > fMin) * (data[pos, fmaxCOL].reshape(-1,) > 4)
        reliable = (np.maximum(Rs, 0) >= threshold) * (
            (data[pos, ftotCOL] /
             np.sqrt(data[pos, parNames.index("n_pix")])).reshape(-1, ) > fMin)

        if autoKernel:
            # Calculate quantities needed for comparison to Skellam distribution
            delt = (nNps - nNns) / np.sqrt(nNps + nNns)
            deltstd = delt.std()
            deltmed = np.median(delt)
            deltmin = delt.min()
            deltmax = delt.max()

            if deltmed / deltstd > -100 and doskellam and makePlot:
                plt.hist(delt / deltstd,
                         bins=np.arange(deltmin / deltstd,
                                        max(5.1, deltmax / deltstd), 0.01),
                         cumulative=True,
                         histtype="step",
                         color=(min(
                             1,
                             float(max(1., negPerBin) + kernelIter) / Nneg), 0,
                                0),
                         density=True)
                deltplot.append([((max(1., negPerBin) + kernelIter) /
                                  Nneg)**(1.0 / len(parCol)),
                                 deltmed / deltstd])

            if scaleKernel: grow_kernel = 0
            else:
                err.message(
                    "  iteration, median, width, median/width = %3i, %9.2e, %9.2e, %9.2e"
                    % (kernelIter, deltmed, deltstd, deltmed / deltstd))

                if deltmed / deltstd > skellamTol or negPerBin + kernelIter >= Nneg:
                    grow_kernel = 0
                    err.message(
                        "  Found good kernel after %i kernel growth iterations. The sqrt(kernel) size is:"
                        % kernelIter)
                    err.message(np.sqrt(np.abs(kernel)))
                elif deltmed / deltstd < 5 * skellamTol:
                    kernel *= (float(negPerBin + kernelIter + 20) /
                               (negPerBin + kernelIter))**(2.0 / len(parCol))
                    kernelIter += 20
                elif deltmed / deltstd < 2 * skellamTol:
                    kernel *= (float(negPerBin + kernelIter + 10) /
                               (negPerBin + kernelIter))**(2.0 / len(parCol))
                    kernelIter += 10
                elif deltmed / deltstd < 1.5 * skellamTol:
                    kernel *= (float(negPerBin + kernelIter + 3) /
                               (negPerBin + kernelIter))**(2.0 / len(parCol))
                    kernelIter += 3
                else:
                    kernel *= (float(negPerBin + kernelIter + 1) /
                               (negPerBin + kernelIter))**(2.0 / len(parCol))
                    kernelIter += 1
        else:
            grow_kernel = 0

    # ------------
    # Skellam plot
    # ------------

    if autoKernel and deltmed / deltstd > -100 and doskellam and makePlot:
        plt.plot(np.arange(-10, 10, 0.01),
                 stats.norm().cdf(np.arange(-10, 10, 0.01)), "k-")
        plt.plot(np.arange(-10, 10, 0.01),
                 stats.norm(scale=0.4).cdf(np.arange(-10, 10, 0.01)), "k:")
        plt.legend(("Gaussian (sigma=1)", "Gaussian (sigma=0.4)"),
                   loc="lower right",
                   prop={"size": 13})
        plt.hist(delt / deltstd,
                 bins=np.arange(deltmin / deltstd, max(5.1, deltmax / deltstd),
                                0.01),
                 cumulative=True,
                 histtype="step",
                 color="r",
                 density=True)
        plt.xlim(-5, 5)
        plt.ylim(0, 1)
        plt.xlabel("(P-N)/sqrt(N+P)")
        plt.ylabel("cumulative distribution")
        plt.plot([0, 0], [0, 1], "k--")
        fig0.savefig("%s_rel_skellam.pdf" % pdfoutname, rasterized=True)

        if not scaleKernel:
            fig3 = plt.figure()
            deltplot = np.array(deltplot)
            plt.plot(deltplot[:, 0], deltplot[:, 1], "ko-")
            plt.xlabel("kernel size (1D-sigma, aribtrary units)")
            plt.ylabel("median/std of (P-N)/sqrt(P+N)")
            plt.axhline(y=skellamTol, linestyle="--", color="r")
            fig3.savefig("%s_rel_skellam-delta.pdf" % pdfoutname,
                         rasterized=True)

    # -----------------------
    # Scatter plot of sources
    # -----------------------

    specialids = []

    if doscatter and makePlot:
        if verb: err.message("  plotting sources ...")
        fig1 = plt.figure(figsize=(18, 4.5 * nr))
        plt.subplots_adjust(left=0.06,
                            bottom=0.15 / nr,
                            right=0.97,
                            top=1 - 0.08 / nr,
                            wspace=0.35,
                            hspace=0.25)

        n_p = 0
        for jj in projections:
            if verb:
                err.message("    projection %i/%i" %
                            (projections.index(jj) + 1, len(projections)))
            n_p, p1, p2 = n_p + 1, jj[0], jj[1]
            plt.subplot(nr, nc, n_p)
            plt.scatter(pars[p1, pos],
                        pars[p2, pos],
                        marker="o",
                        c="b",
                        s=10,
                        edgecolor="face",
                        alpha=0.5)
            plt.scatter(pars[p1, neg],
                        pars[p2, neg],
                        marker="o",
                        c="r",
                        s=10,
                        edgecolor="face",
                        alpha=0.5)
            for si in specialids:
                plt.plot(pars[p1, ids == si],
                         pars[p2, ids == si],
                         "kd",
                         zorder=10000,
                         ms=7,
                         mfc="none",
                         mew=2)
            # Plot Integrated SNR threshold
            if fMin > 0 and (parSpace[jj[0]], parSpace[jj[1]]) == ("snr_sum",
                                                                   "snr_mean"):
                xArray = np.arange(
                    lims[p1][0],
                    lims[p1][1] + (lims[p1][1] - lims[p1][0]) / 100,
                    (lims[p1][1] - lims[p1][0]) / 100)
                plt.plot(xArray, np.log10(fMin) * 2 - xArray, 'k:')
            elif fMin > 0 and (parSpace[jj[0]],
                               parSpace[jj[1]]) == ("snr_mean", "snr_sum"):
                yArray = np.arange(
                    lims[p2][0],
                    lims[p2][1] + (lims[p2][1] - lims[p2][0]) / 100,
                    (lims[p2][1] - lims[p2][0]) / 100)
                plt.plot(np.log10(fMin) * 2 - yArray, yArray, 'k:')
            plt.xlim(lims[p1][0], lims[p1][1])
            plt.ylim(lims[p2][0], lims[p2][1])
            plt.xlabel(labs[p1])
            plt.ylabel(labs[p2])
            plt.grid(color='k', linestyle='-', linewidth=0.2)
        fig1.savefig("%s_rel_scatter.pdf" % pdfoutname, rasterized=True)

    # -------------
    # Plot contours
    # -------------

    if docontour and makePlot:
        levs = 10**np.arange(-1.5, 2, 0.5)

        if verb: err.message("  plotting contours ...")
        fig2 = plt.figure(figsize=(18, 4.5 * nr))
        plt.subplots_adjust(left=0.06,
                            bottom=0.15 / nr,
                            right=0.97,
                            top=1 - 0.08 / nr,
                            wspace=0.35,
                            hspace=0.25)
        n_p = 0
        for jj in projections:
            if verb:
                err.message("    projection %i/%i" %
                            (projections.index(jj) + 1, len(projections)))
            n_p, p1, p2 = n_p + 1, jj[0], jj[1]
            g1, g2 = grid[p1], grid[p2]
            x1 = np.arange(g1[0], g1[1], g1[2])
            x2 = np.arange(g2[0], g2[1], g2[2])
            pshape = (x2.shape[0], x1.shape[0])

            # Get array of source parameters on current projection
            parsp = np.concatenate((pars[p1:p1 + 1], pars[p2:p2 + 1]), axis=0)

            # Derive Np and Nn density fields on the current projection
            setcov = kernel[p1:p2 + 1:p2 - p1, p1:p2 + 1:p2 - p1]
            try:
                Np = gaussian_kde_set_covariance(parsp[:, pos], setcov)
                Nn = gaussian_kde_set_covariance(parsp[:, neg], setcov)
            except:
                err.error(
                    "Reliability  determination  failed  because of issues  with the\n"
                    "smoothing kernel.  This is likely due to an insufficient number\n"
                    "of negative detections. Please review your filtering and source\n"
                    "finding settings to ensure that a sufficient number of negative\n"
                    "detections is found.",
                    fatal=True,
                    frame=True)

            # Evaluate density fields on grid on current projection
            g = np.transpose(
                np.transpose(np.mgrid[slice(g1[0], g1[1], g1[2]),
                                      slice(g2[0], g2[1], g2[2])]).reshape(
                                          -1, 2))
            Np = Np(g)
            Nn = Nn(g)
            Np = Np / Np.sum() * Npos
            Nn = Nn / Nn.sum() * Nneg
            Np.resize(pshape)
            Nn.resize(pshape)
            plt.subplot(nr, nc, n_p)
            plt.contour(x1,
                        x2,
                        Np,
                        origin="lower",
                        colors="b",
                        levels=levs,
                        zorder=2)
            plt.contour(x1,
                        x2,
                        Nn,
                        origin="lower",
                        colors="r",
                        levels=levs,
                        zorder=1)

            # Plot Integrated SNR threshold
            if fMin > 0 and (parSpace[jj[0]], parSpace[jj[1]]) == ("snr_sum",
                                                                   "snr_mean"):
                xArray = np.arange(
                    lims[p1][0],
                    lims[p1][1] + (lims[p1][1] - lims[p1][0]) / 100,
                    (lims[p1][1] - lims[p1][0]) / 100)
                plt.plot(xArray, np.log10(fMin) * 2 - xArray, 'k:')
            elif fMin > 0 and (parSpace[jj[0]],
                               parSpace[jj[1]]) == ("snr_mean", "snr_sum"):
                yArray = np.arange(
                    lims[p2][0],
                    lims[p2][1] + (lims[p2][1] - lims[p2][0]) / 100,
                    (lims[p2][1] - lims[p2][0]) / 100)
                plt.plot(np.log10(fMin) * 2 - yArray, yArray, 'k:')

            if reliable.sum():
                plt.scatter(pars[p1, pos][reliable],
                            pars[p2, pos][reliable],
                            marker="o",
                            s=10,
                            edgecolor="k",
                            facecolor="k",
                            zorder=4)
            if (pseudoreliable * (reliable == False)).sum():
                plt.scatter(pars[p1,
                                 pos][pseudoreliable * (reliable == False)],
                            pars[p2,
                                 pos][pseudoreliable * (reliable == False)],
                            marker="x",
                            s=40,
                            edgecolor="0.5",
                            facecolor="0.5",
                            zorder=3)
            for si in specialids:
                plt.plot(pars[p1, ids == si],
                         pars[p2, ids == si],
                         "kd",
                         zorder=10000,
                         ms=7,
                         mfc="none",
                         mew=2)
            plt.xlim(lims[p1][0], lims[p1][1])
            plt.ylim(lims[p2][0], lims[p2][1])
            plt.xlabel(labs[p1])
            plt.ylabel(labs[p2])
            plt.grid(color='k', linestyle='-', linewidth=0.2)
        fig2.savefig("%s_rel_contour.pdf" % pdfoutname, rasterized=True)

    # -------------------------
    # Add Np, Nn and R to table
    # -------------------------

    # This allows me not to calculate R every time I want to do some plot analysis,
    # but just read it from the file
    if saverel:
        if not (docontour or dostats):
            Nps = Np(pars[:, pos]) * Npos
            Nns = Nn(pars[:, pos]) * Nneg
        Np = np.zeros((data.shape[0], ))
        Np[pos] = Nps
        Nn = np.zeros((data.shape[0], ))
        Nn[pos] = Nns
        R = -np.ones((data.shape[0], ))  # R will be -1 for negative sources
        # Set R to zero for positive sources if R < 0 because of Nn > Np
        R[pos] = np.maximum(0, (Np[pos] - Nn[pos]) / Np[pos])
        data = np.concatenate(
            (data, Np.reshape(-1, 1), Nn.reshape(-1, 1), R.reshape(-1, 1)),
            axis=1)

    data = [list(jj) for jj in list(data)]
    return data, ids[pos][reliable].astype(int)
예제 #9
0
def add_wcs_coordinates(objects, catParNames, catParFormt, catParUnits,
                        Parameters):
    try:
        hdulist = fits.open(Parameters["import"]["inFile"])
        header = hdulist[0].header
        hdulist.close()

        # Fix headers where "per second" is written "/S" instead of "/s"
        # (assuming they mean "per second" and not "per Siemens").
        if "cunit3" in header and "/S" in header["cunit3"]:
            err.warning("Converting '/S' to '/s' in CUNIT3.")
            header["cunit3"] = header["cunit3"].replace("/S", "/s")

        # Check if there is a Nmap/GIPSY FITS header keyword value present
        gipsyKey = [
            k for k in ["FREQ-OHEL", "FREQ-OLSR", "FREQ-RHEL", "FREQ-RLSR"]
            if (k in [header[key] for key in header if ("CTYPE" in key)])
        ]
        if gipsyKey:
            err.message(
                "GIPSY header found. Trying to convert to FITS standard.")
            from astropy.wcs import Wcsprm
            header = fix_gipsy_header(header)
            wcsin = Wcsprm(str(header))
            wcsin.sptr("VOPT-F2W")
            #if header["naxis"] == 4:
            #	objects = np.concatenate((objects, wcsin.p2s(np.concatenate((objects[:, catParNames.index("x"):catParNames. index("x") + 3], np.zeros((objects.shape[0], 1))), axis=1), 0)["world"][:,:-1]), axis=1)
            #else:
            #	objects = np.concatenate((objects, wcsin.p2s(objects[:, catParNames.index("x"):catParNames.index("x") + 3], 0)["world"]), axis=1)
            objects = np.concatenate(
                (objects,
                 wcsin.p2s(
                     objects[:,
                             catParNames.index("x"):catParNames.index("x") +
                             3], 0)["world"]),
                axis=1)
            catParUnits = tuple(
                list(catParUnits) +
                [str(cc).replace(" ", "") for cc in wcsin.cunit])
            catParNames = tuple(
                list(catParNames) + [(cc.split("--")[0]).lower()
                                     for cc in wcsin.ctype])
            catParFormt = tuple(
                list(catParFormt) + ["%15.7e", "%15.7e", "%15.7e"])

        else:
            # Constrain the RA axis reference value CRVAL_ to be between 0 and 360 deg
            rafound = 0
            for kk in range(header["naxis"]):
                if header["ctype1"][:2] == "RA":
                    rafound = 1
                    break
            if rafound:
                if header["crval%i" % (kk + 1)] < 0:
                    err.warning("Adding 360 deg to RA reference value.")
                    header["crval%i" % (kk + 1)] += 360
                elif header["crval%i" % (kk + 1)] > 360:
                    err.warning("Subtracting 360 deg from RA reference value.")
                    header["crval%i" % (kk + 1)] -= 360

            #if header["naxis"] == 4: wcsin = wcs.WCS(header, naxis=[wcs.WCSSUB_CELESTIAL, wcs.WCSSUB_SPECTRAL, wcs.WCSSUB_STOKES])
            #else: wcsin = wcs.WCS(header, naxis=[wcs.WCSSUB_CELESTIAL, wcs.WCSSUB_SPECTRAL])
            wcsin = wcs.WCS(header,
                            naxis=[wcs.WCSSUB_CELESTIAL, wcs.WCSSUB_SPECTRAL])
            xyz = objects[:,
                          catParNames.index("x"):catParNames.index("x") +
                          3].astype(float)
            if "cellscal" in header and header["cellscal"] == "1/F":
                err.warning(
                    "CELLSCAL keyword with value of 1/F found.\n"
                    "Will account for varying pixel scale in WCS coordinate calculation."
                )
                x0, y0 = header["crpix1"] - 1, header["crpix2"] - 1
                # Will calculate the pixscale factor of each channel as:
                # pixscale = ref_frequency / frequency
                if header["ctype3"] == "VELO-HEL":
                    pixscale = (1 - header["crval3"] / scipy.constants.c) / (
                        1 - (((xyz[:, 2] + 1) - header["crpix3"]) *
                             header["cdelt3"] + header["crval3"]) /
                        scipy.constants.c)
                else:
                    err.warning(
                        "Cannot convert 3rd axis coordinates to frequency. Ignoring the effect of CELLSCAL = 1/F."
                    )
                    pixscale = 1.0
                xyz[:, 0] = (xyz[:, 0] - x0) * pixscale + x0
                xyz[:, 1] = (xyz[:, 1] - y0) * pixscale + y0
            #if header["naxis"] == 4: objects = np.concatenate((objects, wcsin.wcs_pix2world(np.concatenate((xyz, np.zeros((objects.shape[0], 1))), axis=1), 0)[:, :-1]), axis=1)
            #else: objects = np.concatenate((objects, wcsin.wcs_pix2world(xyz, 0)), axis=1)
            objects = np.concatenate((objects, wcsin.wcs_pix2world(xyz, 0)),
                                     axis=1)
            catParUnits = tuple(
                list(catParUnits) +
                [str(cc).replace(" ", "") for cc in wcsin.wcs.cunit])
            catParNames = tuple(
                list(catParNames) + [(cc.split("--")[0]).lower()
                                     for cc in wcsin.wcs.ctype])
            catParFormt = tuple(
                list(catParFormt) + ["%15.7e", "%15.7e", "%15.7e"])
        #if header["naxis"] == 4:
        #	catParUnits = catParUnits[:-1]
        #	catParNames= catParNames[:-1]
        err.message("WCS coordinates added to catalogue.")

        # Create IAU-compliant source name:
        # WARNING: This currently assumes a regular, ≥ 2-dim. data cube where the first two axes are longitude and latitude.
        n_src = objects.shape[0]
        n_par = objects.shape[1]

        iau_names = np.empty([n_src, 1], dtype=object)

        if header["ctype1"][:4] == "RA--":
            # Equatorial coordinates; try to figure out equinox:
            iau_coord = "equ"
            if "equinox" in header:
                if int(header["equinox"]) >= 2000: iau_equinox = "J"
                else: iau_equinox = "B"
            elif "epoch" in header:
                # Assume that EPOCH has been abused to record the equinox:
                if int(header["epoch"]) >= 2000: iau_equinox = "J"
                else: iau_equinox = "B"
            else:
                # Equinox undefined:
                iau_equinox = "X"
        elif header["ctype1"][:4] == "GLON":
            # Galactic coordinates:
            iau_coord = "gal"
            iau_equinox = "G"
        else:
            # Unsupported coordinate system:
            iau_coord = ""
            iau_equinox = ""

        for src in xrange(n_src):
            lon = objects[src][n_par - 3]
            lat = objects[src][n_par - 2]

            if iau_coord == "equ":
                ra = Longitude(lon, unit=u.deg)
                dec = Latitude(lat, unit=u.deg)
                iau_pos = ra.to_string(unit=u.h,
                                       decimal=False,
                                       sep="",
                                       precision=2,
                                       alwayssign=False,
                                       pad=True,
                                       fields=3)
                iau_pos += dec.to_string(unit=u.deg,
                                         decimal=False,
                                         sep="",
                                         precision=1,
                                         alwayssign=True,
                                         pad=True,
                                         fields=3)
            else:
                iau_pos = "{0:08.4f}".format(lon)
                if lat < 0.0: iau_pos += "-"
                else: iau_pos += "+"
                iau_pos += "{0:07.4f}".format(abs(lat))

            iau_names[src][0] = "SoFiA " + iau_equinox + iau_pos

        objects = np.concatenate((objects, iau_names), axis=1)
        catParUnits = tuple(list(catParUnits) + ["-"])
        catParNames = tuple(list(catParNames) + ["name"])
        catParFormt = tuple(list(catParFormt) + ["%30s"])
    except:
        err.warning("WCS conversion of parameters failed.")

    return (objects, catParNames, catParFormt, catParUnits)
예제 #10
0
def write_catalog_from_array(mode, objects, catHeader, catUnits, catFormat, parList, outName, flagCompress, flagOverwrite, flagUncertainties):
	# Check output format and compression
	availableModes = ["ASCII", "XML", "SQL"]
	if mode not in availableModes:
		err.warning("Unknown catalogue format: " + str(mode) + ". Defaulting to ASCII.")
		mode = "ASCII"
	modeIndex = availableModes.index(mode)
	
	if flagCompress: outName += ".gz"
	err.message("Writing " + availableModes[modeIndex] + " catalogue: " + outName + ".")
	
	# Exit if file exists and overwrite flag is set to false
	func.check_overwrite(outName, flagOverwrite, fatal=True)
	
	# Do we need to write all parameters?
	if parList == ["*"] or not parList: parList = list(catHeader)
	
	# Remove undefined parameters
	parList = [item for item in parList if item in catHeader]
	
	# Remove statistical uncertainties if not requested
	if not flagUncertainties:
		for item in ["err_x", "err_y", "err_z", "err_w20", "err_w50"]:
			while item in parList: parList.remove(item)
	
	# Check whether there is anything left
	if not len(parList):
		err.error("No valid output parameters selected. No output catalogue written.", fatal=False)
		return
	
	
	# Create and write catalogue in requested format
	# -------------------------------------------------------------------------
	if mode == "XML":
		# Define basic XML header information
		votable          = Element("VOTABLE")
		resource         = SubElement(votable, "RESOURCE", name="SoFiA catalogue (version %s)" % sofia_version)
		description      = SubElement(resource, "DESCRIPTION")
		description.text = "Source catalogue from the Source Finding Application (SoFiA) version %s" % sofia_version
		coosys           = SubElement(resource, "COOSYS", ID="J2000")
		table            = SubElement(resource, "TABLE", ID="sofia_cat", name="sofia_cat")
		
		# Load list of parameters and unified content descriptors (UCDs)
		ucdList = {}
		fileUcdPath = os.environ["SOFIA_PIPELINE_PATH"]
		fileUcdPath = fileUcdPath.replace("sofia_pipeline.py", "SoFiA_source_parameters.dat")
		
		try:
			with open(fileUcdPath) as fileUcd:
				for line in fileUcd:
					(key, value) = line.split()
					ucdList[key] = value
		except:
			err.warning("Failed to read UCD file.")
		
		# Create parameter fields
		for par in parList:
			ucdEntity = ucdList[par] if par in ucdList else ""
			index = list(catHeader).index(par)
			if catFormat[index] == "%30s":
				field = SubElement(table, "FIELD", name=par, ucd=ucdEntity, datatype="char", arraysize="30", unit=catUnits[index])
			else:
				field = SubElement(table, "FIELD", name=par, ucd=ucdEntity, datatype="float", unit=catUnits[index])
		
		# Create data table entries
		data = SubElement(table, "DATA")
		tabledata = SubElement(data, "TABLEDATA")
		
		for obj in objects:
			tr = SubElement(tabledata, "TR")
			for par in parList:
				td = SubElement(tr, "TD")
				index = list(catHeader).index(par)
				td.text = (catFormat[index] % obj[index]).strip()
		
		# Write XML catalogue:
		try:
			f1 = gzopen(outName, "wb") if flagCompress else open(outName, "w")
		except:
			err.error("Failed to write to XML catalogue: " + outName + ".", fatal=False)
			return
		f1.write(prettify(votable))
		#f1.write(tostring(votable, "utf-8")) // without prettifying, which is faster and uses much less memory
		f1.close
	
	# -----------------------------------------------------------------End-XML-
	
	elif mode == "SQL":
		# Record if there is an ID column in the catalogue
		# (if no ID is present, we will later create one for use as primary key)
		noID = "id" not in parList
		
		# Write some header information:
		content = "-- SoFiA catalogue (version %s)\n\nSET SQL_MODE = \"NO_AUTO_VALUE_ON_ZERO\";\n\n" % sofia_version
		
		# Construct and write table structure:
		flagProgress = False
		content += "CREATE TABLE IF NOT EXISTS `SoFiA-Catalogue` (\n"
		if noID: content += "  `id` INT NOT NULL,\n"
		for par in parList:
			index = list(catHeader).index(par)
			if flagProgress: content += ",\n"
			content += "  " + sqlHeaderItem(par) + sqlFormat(catFormat[index])
			flagProgress = True
		content += ",\n  PRIMARY KEY (`id`),\n  KEY (`id`)\n) DEFAULT CHARSET=utf8 COMMENT=\'SoFiA source catalogue\';\n\n"
		
		# Insert data:
		flagProgress = False
		content += "INSERT INTO `SoFiA-Catalogue` ("
		if noID: content += "`id`, "
		for par in parList:
			if flagProgress: content += ", "
			content += sqlHeaderItem(par)
			flagProgress = True
		content += ") VALUES\n"
		
		source_count = 0
		for obj in objects:
			flagProgress = False
			source_count += 1
			content += "("
			if noID: content += str(source_count) + ", "
			
			for par in parList:
				index = list(catHeader).index(par)
				if flagProgress: content += ", "
				content += sqlDataItem(obj[index], catFormat[index])
				flagProgress = True
			
			if(source_count < len(objects)): content += "),\n"
			else: content += ");\n"
		
		# Write catalogue
		try:
			fp = gzopen(outName, "wb") if flagCompress else open(outName, "w")
		except:
			err.error("Failed to write to SQL catalogue: " + outName + ".", fatal=False)
			return
		fp.write(content)
		fp.close()
	
	# -----------------------------------------------------------------End-SQL-
	
	else: # mode == "ASCII" by default
		# Determine header sizes based on variable-length formatting
		lenCathead = []
		for j in catFormat: lenCathead.append(int(j.split("%")[1].split("e")[0].split("f")[0].split("i")[0].split("d")[0].split(".")[0].split("s")[0]) + 1)
		
		# Create header
		headerName = ""
		headerUnit = ""
		headerCol  = ""
		outFormat  = ""
		colCount   =  0
		header     = "SoFiA catalogue (version %s)\n" % sofia_version
		
		for par in parList:
			index = list(catHeader).index(par)
			headerName += catHeader[index].rjust(lenCathead[index])
			headerUnit += catUnits[index].rjust(lenCathead[index])
			headerCol  += ("(%i)" % (colCount + 1)).rjust(lenCathead[index])
			outFormat  += catFormat[index] + " "
			colCount += 1
		header += headerName[3:] + '\n' + headerUnit[3:] + '\n' + headerCol[3:]
		
		# Create catalogue
		outObjects = []
		for obj in objects:
			outObjects.append([])
			for par in parList: outObjects[-1].append(obj[list(catHeader).index(par)])
		
		# Write ASCII catalogue
		try:
			np.savetxt(outName, np.array(outObjects, dtype=object), fmt=outFormat, header=header)
		
		except:
			err.error("Failed to write to ASCII catalogue: " + outName + ".", fatal=False)
			return
	
	# ---------------------------------------------------------------End-ASCII-
	
	return
예제 #11
0
def dilate(cube, mask, objects, cathead, Parameters):
	dilateThreshold = Parameters["parameters"]["dilateThreshold"]
	dilatePixMax = Parameters["parameters"]["dilatePixMax"]
	dilateChanMax = Parameters["parameters"]["dilateChanMax"]
	
	# Stops dilating when (flux_new - flux_old) / flux_new < dilateThreshold
	sourceIDs = np.unique(mask)
	# remove first element which should be zero
	if sourceIDs[0] == 0:
		sourceIDs = np.delete(sourceIDs,0)
	
	for i in range(0, len(sourceIDs)):
		obj = objects[i]
		xmin = max(0, obj[list(cathead).index("x_min")] - dilatePixMax)
		xmax = min(cube.shape[2] - 1, obj[list(cathead).index("x_max")] + dilatePixMax)
		ymin = max(0, obj[list(cathead).index("y_min")] - dilatePixMax)
		ymax = min(cube.shape[1] - 1, obj[list(cathead).index("y_max")] + dilatePixMax)
		zmin = max(0, obj[list(cathead).index("z_min")] - dilateChanMax)
		zmax = min(cube.shape[0] - 1, obj[list(cathead).index("z_max")] + dilateChanMax)
		
		[zmin, zmax, ymin, ymax, xmin, xmax] = map(int, [zmin, zmax, ymin, ymax, xmin, xmax])
		
		objcube = cube[zmin:zmax+1, ymin:ymax+1, xmin:xmax+1].copy()
		objmask = mask[zmin:zmax+1, ymin:ymax+1, xmin:xmax+1].copy()
		allmask = mask[zmin:zmax+1, ymin:ymax+1, xmin:xmax+1].copy()
		otherobjs = (allmask > 0) * (allmask != sourceIDs[i])
		
		if (otherobjs).sum():
			# Ensure that objects with different source IDs within dilatePixMax, dilateChanMax are not
			# included in the flux growth calculation
			err.warning("Object {0:d} has possible overlapping objects within {1:d} pix, {2:d} chan.".format(sourceIDs[i], dilatePixMax, dilateChanMax))
			objcube[(allmask > 0) * (allmask != sourceIDs[i])] = 0
		
		fluxes = []
		
		# Loop through Z dilation kernels until the flux converges or the maximum allowed Z dilation is reached
		for dilchan in range(dilateChanMax + 1):
			dd = dilchan * 2 + 1
			dilstruct = np.ones((dd,1,1))
			fluxes.append(objcube[nd.morphology.binary_dilation(objmask==sourceIDs[i], structure=dilstruct)].sum())
			if dilchan > 0 and (fluxes[-1] - fluxes[-2]) / fluxes[-1] < dilateThreshold:
				dilchan -= 1
				break
		# Pick the best Z dilation kernel for current object and update mask
		dd = dilchan * 2 + 1
		dilstruct = np.ones((dd,1,1))
		# Only grow the mask of object sourceIDs[i] even when other objects are present in objmask
		objmask[nd.morphology.binary_dilation(objmask==sourceIDs[i], structure=dilstruct).astype(int) == 1] = sourceIDs[i]

		# Loop through XY dilation kernels until the flux converges or the maximum allowed XY dilation is reached
		for dilpix in range(dilatePixMax + 1):
			dd = dilpix * 2 + 1
			dilstruct = (np.sqrt(((np.indices((dd, dd)) - dilpix)**2).sum(axis=0)) <= dilpix).astype(int)
			dilstruct.resize((1, dilstruct.shape[0], dilstruct.shape[1]))
			fluxes.append(objcube[nd.morphology.binary_dilation(objmask==sourceIDs[i], structure=dilstruct)].sum())
			if dilpix > 0 and (fluxes[-1] - fluxes[-2]) / fluxes[-1] < dilateThreshold:
				dilpix -= 1
				break
		# Pick the best XY dilation kernel for current object and update mask
		dd = dilpix * 2 + 1
		dilstruct = (np.sqrt(((np.indices((dd, dd)) - dilpix)**2).sum(axis=0)) <= dilpix).astype(int)
		dilstruct.resize((1, dilstruct.shape[0], dilstruct.shape[1]))
		# Only grow the mask of object sourceIDs[i] even when other objects are present in objmask
		objmask[nd.morphology.binary_dilation(objmask==sourceIDs[i], structure=dilstruct).astype(int) == 1] = sourceIDs[i]
		
		err.message("Mask of source {0:d} dilated by {2:d} chan and then by {1:d} pix.".format(sourceIDs[i], dilpix, dilchan))
		# Put back in objmask objects != sourceIDs[i] that may have been inside objmask before 
		# dilation or may have been temporarily replaced by the dilated object sourceIDs[i]
		if (otherobjs).sum():
			objmask[otherobjs] = allmask[otherobjs]
		mask[zmin:zmax+1, ymin:ymax+1, xmin:xmax+1] = objmask
		
		# Update n_pix, x_geo and n_chan
		n_pix = objmask[objmask == sourceIDs[i]].sum() / sourceIDs[i]
		ind = np.vstack(np.where(objmask == sourceIDs[i]))
		cgeo = (ind.sum(axis=1)).astype(float) / float(n_pix)
		x_geo, y_geo, z_geo = cgeo[2] + xmin, cgeo[1] + ymin, cgeo[0] + zmin
		zmin, zmax = min(ind[0]), max(ind[0]) + 1
		n_chan = zmax - zmin
		
		# Update n_los
		objmask[objmask != sourceIDs[i]] = 0
		maskSumA0 = objmask.sum(axis=0)
		maskSumA0[maskSumA0 > 1] = 1
		n_los = maskSumA0.sum()
		

		del objcube
		del objmask
		del allmask
		del otherobjs
	
		objects[i,list(cathead).index("x_min")]  = max(0, obj[list(cathead).index("x_min")] - dilpix)
		objects[i,list(cathead).index("x_max")]  = min(cube.shape[2] - 1, obj[list(cathead).index("x_max")] + dilpix)
		objects[i,list(cathead).index("y_min")]  = max(0, obj[list(cathead).index("y_min")] - dilpix)
		objects[i,list(cathead).index("y_max")]  = min(cube.shape[1] - 1, obj[list(cathead).index("y_max")] + dilpix)
		objects[i,list(cathead).index("z_min")]  = max(0, obj[list(cathead).index("z_min")] - dilchan)
		objects[i,list(cathead).index("z_max")]  = min(cube.shape[0] - 1, obj[list(cathead).index("z_max")] + dilchan)
		objects[i,list(cathead).index("n_pix")]  = n_pix
		objects[i,list(cathead).index("n_chan")] = n_chan
		objects[i,list(cathead).index("n_los")]  = n_los
		objects[i,list(cathead).index("x_geo")]  = x_geo
		objects[i,list(cathead).index("y_geo")]  = y_geo
		objects[i,list(cathead).index("z_geo")]  = z_geo
		
		
        
	return mask, objects
예제 #12
0
def import_mask(maskFile, header, axis_size, subcube):
	err.message("Loading mask cube:\n  " + str(maskFile))
	
	try:
		f = fits.open(maskFile, memmap=False)
		header_mask = f[0].header
	except:
		err.error("Failed to read mask cube.")
	
	# Extract axis sizes and types
	n_axes_mask, axis_size_mask, axis_type_mask = extract_axis_size(header_mask)
	
	# Ensure correct dimensionality
	check_cube_dimensions(n_axes_mask, axis_size_mask, cube_name="mask cube", min_dim = 1, max_dim = 4)
	
	# 1-D spectrum
	if n_axes_mask == 1:
		err.warning("Mask cube has 1 axis; interpreted as spectrum.\nAdding first and second axis.")
		ensure(header_mask['CRVAL1'] == header['CRVAL1'], "Input cube and mask are not on the same WCS grid.")
		
		if len(subcube) == 6:
			if header_mask["NAXIS1"] == axis_size[2]:
				err.message("  Input mask cube already matches size of data subcube.\n  No subcube selection applied.")
				mask = np.reshape(f[0].data, (-1, 1, 1))
			elif header_mask["NAXIS1"] == fullshape[0]:
				err.message("  Subcube selection applied to input mask cube.")
				mask = np.reshape(f[0].section[subcube[4]:subcube[5]], (-1, 1, 1))
			else:
				err.error("Data subcube does not match size of mask subcube or full mask.")
		elif not len(subcube):
			mask = np.reshape(f[0].data, (-1, 1, 1))
		else:
			err.error("The subcube list must have 6 entries ({0:d} given).".format(len(subcube)))
	
	# 2-D image
	elif n_axes_mask == 2:
		err.ensure(header_mask["CRVAL1"] == header["CRVAL1"] and header_mask["CRVAL2"] == header["CRVAL2"],
			"Input cube and mask are not on the same WCS grid.")
		
		if len(subcube) == 6 or len(subcube) == 4:
			if header_mask["NAXIS1"] == axis_size[0] and header_mask["NAXIS2"] == axis_size[1]:
				err.message("  Input mask cube already matches size of data subcube.\n  No subcube selection applied.")
				mask = np.array([f[0].data])
			elif header_mask["NAXIS1"] == fullshape[2] and header_mask["NAXIS2"] == fullshape[1]:
				err.message("  Subcube selection applied to input mask cube.")
				mask = np.array([f[0].section[subcube[2]:subcube[3], subcube[0]:subcube[1]]])
			else:
				err.error("Data subcube does not match size of mask subcube or full mask.")
		else: mask = np.array([f[0].data])
	
	# 3-D cube
	elif n_axes_mask == 3:
		err.ensure(header_mask["CRVAL1"] == header["CRVAL1"] and header_mask["CRVAL2"] == header["CRVAL2"] and header_mask["CRVAL3"] == header["CRVAL3"], "Input cube and mask are not on the same WCS grid.")
		
		if len(subcube) == 6:
			if header_mask["NAXIS1"] == axis_size[0] and header_mask["NAXIS2"] == axis_size[1] and header_mask["NAXIS3"] == axis_size[2]:
				err.message("  Input mask cube already matches size of data subcube.\n  No subcube selection applied.")
				mask = f[0].data
			elif header_mask["NAXIS1"] == fullshape[2] and header_mask["NAXIS2"] == fullshape[1] and header_mask["NAXIS3"] == fullshape[0]:
				err.message("  Subcube selection applied to input mask cube.")
				mask = f[0].section[subcube[4]:subcube[5], subcube[2]:subcube[3], subcube[0]:subcube[1]]
			else:
				err.error("Data subcube does not match size of mask subcube or full mask.")
		else: mask = f[0].data
	
	# 4-D hypercube
	else:
		err.ensure(header_mask["CRVAL1"] == header["CRVAL1"] and header_mask["CRVAL2"] == header["CRVAL2"] and header_mask["CRVAL3"] == header["CRVAL3"], "Input cube and mask are not on the same WCS grid.")
		
		if len(subcube) == 6:
			if header_mask["NAXIS1"] == axis_size[0] and header_mask["NAXIS2"] == axis_size[1] and header_mask["NAXIS3"] == axis_size[2]:
				err.message("  Input mask cube already matches size of data subcube.\n  No subcube selection applied.")
				mask = f[0].section[0]
			elif header_mask["NAXIS1"] == fullshape[2] and header_mask["NAXIS2"] == fullshape[1] and header_mask["NAXIS3"] == fullshape[0]:
				err.message("  Subcube selection applied to input mask cube.")
				mask = f[0].section[0, subcube[4]:subcube[5], subcube[2]:subcube[3], subcube[0]:subcube[1]]
			else:
				err.error("Data subcube does not match size of mask subcube or full mask.")
		else: mask = f[0].section[0]
	
	mask[mask > 0] = 1
	f.close()
	err.message("Mask cube loaded.")
	
	# In all cases, convert mask to Boolean with masked pixels set to 1.
	return (mask > 0).astype(bool)
예제 #13
0
def SCfinder_mem(cube, header, t0, kernels=[[0, 0, 0, "b"],], threshold=3.5, sizeFilter=0, maskScaleXY=2.0, maskScaleZ=2.0, kernelUnit="pixel", edgeMode="constant", rmsMode="negative", fluxRange="all", verbose=0):
	# Define a few constants
	FWHM_CONST    = 2.0 * math.sqrt(2.0 * math.log(2.0))   # Conversion between sigma and FWHM of Gaussian function
	MAX_PIX_CONST = 1e+6                                   # Maximum number of pixels for noise calculation; sampling is set accordingly
	
	# Create binary mask array
	msk = np.zeros(cube.shape, np.bool)
	found_nan = np.isnan(cube).sum()
	
	# Set sampling sampleRms for rms measurement
	sampleRms = max(1, int((float(np.array(cube.shape).prod()) / MAX_PIX_CONST)**(1.0 / min(3, len(cube.shape)))))
	
	# Measure noise in original cube with sampling "sampleRms"
	rms = GetRMS(cube, rmsMode=rmsMode, fluxRange=fluxRange, zoomx=1, zoomy=1, zoomz=1, verbose=verbose, sample=sampleRms)
	
	# Loop over all kernels
	for kernel in kernels:
		[kx, ky, kz, kt] = kernel
		if verbose:
			err.linebreak()
			err.print_progress_time(t0)
			err.message("    Filter %s %s %s %s ..." % (kx, ky, kz, kt))
		if kernelUnit == "world" or kernelUnit == "w":
			if verbose: err.message("    Converting filter size to pixels ...")
			kx = abs(float(kx) / header["CDELT1"])
			ky = abs(float(ky) / header["CDELT2"])
			kz = abs(float(kz) / header["CDELT3"])
		if kt == "b":
			if kz != int(math.ceil(kz)) and verbose: err.warning("Rounding width of boxcar z kernel to next integer.")
			kz = int(math.ceil(kz))
		
		# Create a copy of the original cube
		smoothedCube = np.copy(cube)
		
		# Replace all NaNs with zero (and INFs with a finite number)
		if found_nan: smoothedCube = np.nan_to_num(smoothedCube)
		
		smoothedCube[(smoothedCube > 0) & (msk > 0)] = +maskScaleXY * rms
		smoothedCube[(smoothedCube < 0) & (msk > 0)] = -maskScaleXY * rms
		
		# Spatial smoothing
		if kx + ky:
			smoothedCube = ndimage.filters.gaussian_filter(smoothedCube, [0, ky / FWHM_CONST, kx / FWHM_CONST], mode=edgeMode)
		
		# Spectral smoothing
		if kz:
			if   kt == "b": smoothedCube = ndimage.filters.uniform_filter1d(smoothedCube, kz, axis=0, mode=edgeMode)
			elif kt == "g": smoothedCube = ndimage.filters.gaussian_filter1d(smoothedCube, kz / FWHM_CONST, axis=0, mode=edgeMode)
		
		# Re-insert the NaNs (but not the INFs) taken out earlier
		if found_nan: smoothedCube[np.isnan(cube)] = np.nan
		
		# Calculate the RMS of the smoothed cube:
		smoothedrms = GetRMS(smoothedCube, rmsMode=rmsMode, fluxRange=fluxRange, zoomx=1, zoomy=1, zoomz=1, verbose=verbose, sample=sampleRms)
		
		# Get rid of the NaNs a second time
		#if found_nan: smoothedCube = np.nan_to_num(smoothedCube)
		# NOTE: This should not be necessary because any comparison with NaN will always yield False.
		#       Hence, NaN pixels will never be included in the mask below.
		
		# Add pixels above threshold to mask by setting bit 1
		with np.errstate(invalid="ignore"):
			msk = np.bitwise_or(msk, np.greater_equal(np.absolute(smoothedCube), threshold * smoothedrms))
		
		# Delete smoothed cube again
		del smoothedCube
	return msk
예제 #14
0
def readPipelineOptions(filename = "pipeline.options"):
	# Try to open parameter file
	try:
		f = open(filename, "r")
	except IOError as e:
		err.error("Failed to read parameter file: " + str(filename) + "\n" + str(e), fatal=True)
	
	# Extract lines from parameter file
	lines = f.readlines()
	f.close()
	
	# Remove leading/trailing whitespace and empty lines
	lines = [line.strip() for line in lines]
	
	# Check for version number
	for line in lines:
		if "# Creator: SoFiA" in line:
			par_file_version = line[17:]
			if par_file_version != sofia_version:
				err.warning(
					"The parameter file was created with a different version of SoFiA\n"
					"(" + str(par_file_version) + ") than the one you are currently using (" + str(sofia_version) + ").\n"
					"Some settings defined in the parameter file may not be recognised\n"
					"by SoFiA, which could lead to unexpected results.", frame=True)
	
	# Remove comments
	lines = [line for line in lines if len(line) > 0 and line[0] != "#"]
	
	# Some additional setup
	datatypes = allowedDataTypes()
	tasks = {}
	pedantic = True     # Controls whether to exit on unrecognised or redefined parameters
	pedantic_counter = 0
	
	# Loop over all lines:
	for line in lines:
		# Extract parameter name and value
		try:
			parameter, value = tuple(line.split("=", 1))
			parameter = parameter.strip()
			value = value.split("#")[0].strip()
			module, parname = tuple(parameter.split(".", 1))
		except:
			err.error("Failed to read parameter: " + str(line) + "\nExpected format: module.parameter = value", fatal=True)
		
		# Ensure that module and parameter names are not empty
		if len(module) < 1 or len(parname) < 1:
			err.error("Failed to read parameter: " + str(line) + "\nExpected format: module.parameter = value", fatal=True)
		
		subtasks = tasks
		if module not in subtasks: subtasks[module] = {}
		subtasks = subtasks[module]
		
		if parname in subtasks:
			err.warning("Multiple definitions of parameter " + str(parameter) + " encountered.\nIgnoring all additional definitions.")
			pedantic_counter += 1
			continue
		
		if parameter in datatypes:
			try:
				if datatypes[parameter]   == "bool":  subtasks[parname] = str2bool(value)
				elif datatypes[parameter] == "float": subtasks[parname] = float(value)
				elif datatypes[parameter] == "int":   subtasks[parname] = int(value)
				elif datatypes[parameter] == "array": subtasks[parname] = literal_eval(value)
				else: subtasks[parname] = str(value)
			except:
				err.error("Failed to parse parameter value:\n" + str(line) + "\nExpected data type: " + str(datatypes[parameter]), fatal=True)
			if parameter == "pipeline.pedantic": pedantic = subtasks[parname]  # Update 'pedantic' setting
		else:
			err.warning("Ignoring unknown parameter: " + str(parameter) + " = " + str(value))
			pedantic_counter += 1
			continue
	
	if pedantic and pedantic_counter:
		err.error("Multiply-defined or unrecognised parameter(s) encountered.\nPlease check your parameter file or set pipeline.pedantic = false\nto ignore unknown or already defined parameters.", fatal=True)
	
	return tasks
예제 #15
0
def read_data(doSubcube,
              inFile,
              invertData,
              weightsFile,
              maskFile,
              sources,
              weightsFunction=None,
              subcube=[],
              subcubeMode="pixel",
              doFlag=False,
              flagRegions=False,
              flagFile="",
              cubeOnly=False):
    # import the fits file into an numpy array for the cube and a dictionary for the header:
    # the data cube is converted into a 3D array

    # First check if the data file exists:
    if not os.path.isfile(inFile):
        sys.stderr.write(
            "ERROR: The specified data cube does not exist.\n       Cannot find: "
            + inFile + "\n")
        raise SystemExit(1)

    # Handle sub-cube if requested by user:
    # ALERT: Note that subcube boundaries for any axis are interpreted as [min, max) rather than [min, max] as expected by the user!!!
    #        This should be changed to avoid confusion. In addition, we should agree at some point whether SoFiA should be 0 or 1-based.
    if doSubcube:
        # Ensure that sub-cube specifications are as expected:
        if len(subcube) not in [4, 6] or subcubeMode not in ["pixel", "world"]:
            sys.stderr.write(
                "ERROR: import.subcubeMode can only be 'pixel' or 'world',\n       and import.subcube must have 4 or 6 entries.\n"
            )
            raise SystemExit(1)

        # Read the file header:
        from astropy import wcs
        hdulist = fits.open(inFile, memmap=False)
        header = hdulist[0].header
        hdulist.close()

        # Extract cube size information:
        axisSize = []
        for axis in range(min(3, header["NAXIS"])):
            axisSize.append(int(header["NAXIS%i" % (axis + 1)]))

        # Sub-cube in world coordinates:
        if subcubeMode == "world":
            sys.stdout.write(
                "Calculating subcube boundaries from input WCS centre and radius\n"
            )

            # Read WCS information:
            try:
                wcsin = wcs.WCS(header)
            except:
                sys.stderr.write(
                    "ERROR: Failed to read WCS information from data cube header.\n"
                )
                raise SystemExit(1)

            # Calculate cos(Dec) correction for RA range:
            if wcsin.wcs.cunit[0] == "deg" and wcsin.wcs.cunit[1] == "deg":
                corrfact = cos(subcube[1] / 180.0 * pi)

            if header["NAXIS"] == 4:
                subcube = wcsin.wcs_world2pix(
                    array([[
                        subcube[0] - subcube[3] / corrfact,
                        subcube[1] - subcube[4], subcube[2] - subcube[5], 0
                    ],
                           [
                               subcube[0] + subcube[3] / corrfact,
                               subcube[1] + subcube[4],
                               subcube[2] + subcube[5], 0
                           ]]), 0)[:, :3]
            elif header["NAXIS"] == 3:
                subcube = wcsin.wcs_world2pix(
                    array([[
                        subcube[0] - subcube[3] / corrfact,
                        subcube[1] - subcube[4], subcube[2] - subcube[5]
                    ],
                           [
                               subcube[0] + subcube[3] / corrfact,
                               subcube[1] + subcube[4], subcube[2] + subcube[5]
                           ]]), 0)
            elif header["NAXIS"] == 2:
                subcube = wcsin.wcs_world2pix(
                    array([[
                        subcube[0] - subcube[2] / corrfact,
                        subcube[1] - subcube[3]
                    ],
                           [
                               subcube[0] + subcube[2] / corrfact,
                               subcube[1] + subcube[3]
                           ]]), 0)
            else:
                sys.stderr.write("ERROR: Unsupported number of axes.\n")
                raise SystemExit(1)

            subcube = ravel(subcube, order="F")
            # make sure min pix coord is < max pix coord for all axes
            # this operation is meaningful because wcs_world2pix returns negative pixel coordinates only for pixels located before an axis" start
            # (i.e., negative pixel coordinates should not be interpreted as counting backward from an axis' end)
            if subcube[0] > subcube[1]:
                subcube[0], subcube[1] = subcube[1], subcube[0]
            if subcube[2] > subcube[3]:
                subcube[2], subcube[3] = subcube[3], subcube[2]
            if len(subcube) == 6:
                if subcube[4] > subcube[5]:
                    subcube[4], subcube[5] = subcube[5], subcube[4]
            # constrain subcube to be within the cube boundaries; if this is not possible then exit
            for axis in range(min(3, header["NAXIS"])):
                if ceil(subcube[1 + 2 * axis]) < 0 or floor(
                        subcube[2 * axis]) >= header["NAXIS%i" % (axis + 1)]:
                    sys.stderr.write(
                        "ERROR: The requested subcube is outside the input cube along axis %i \n"
                        % (axis))
                    raise SystemExit(1)
                else:
                    subcube[2 * axis] = max(0, floor(subcube[2 * axis]))
                    subcube[1 +
                            2 * axis] = min(header["NAXIS%i" % (axis + 1)] - 1,
                                            ceil(subcube[1 + 2 * axis])) + 1
            subcube = list(subcube.astype(int))

        # Sub-cube in pixel coordinates:
        else:
            # Ensure that pixel coordinates are integers:
            for ss in subcube:
                if type(ss) != int:
                    sys.stderr.write(
                        "ERROR: For subcubeMode = pixel, subcube boundaries must be integer.\n"
                    )
                    sys.stderr.write(
                        "       The %i-th coordinate is not an integer value.\n"
                        % subcube.index(ss))
                    raise SystemExit(1)

            # Ensure to be within cube boundaries:
            for axis in range(min(3, header["NAXIS"])):
                # Lower boundary:
                if subcube[2 * axis] < 0:
                    subcube[2 * axis] = 0
                    sys.stderr.write(
                        "WARNING: Adjusting lower subcube boundary to 0 for axis %i.\n"
                        % (axis + 1))
                elif subcube[2 * axis] >= axisSize[axis]:
                    subcube[2 * axis] = axisSize[axis] - 1
                    sys.stderr.write(
                        "WARNING: Adjusting lower subcube boundary to %i for axis %i.\n"
                        % (axisSize[axis] - 1, axis + 1))
                # Upper boundary:
                if subcube[2 * axis + 1] < 1:
                    subcube[2 * axis + 1] = 1
                    sys.stderr.write(
                        "WARNING: Adjusting upper subcube boundary to 1 for axis %i.\n"
                        % (axis + 1))
                elif subcube[2 * axis + 1] > axisSize[axis]:
                    subcube[2 * axis + 1] = axisSize[axis]
                    sys.stderr.write(
                        "WARNING: Adjusting upper subcube boundary to %i for axis %i.\n"
                        % (axisSize[axis], axis + 1))

            # Ensure that boundaries are internally consistent:
            for axis in range(min(3, header["NAXIS"])):
                if subcube[2 * axis] >= subcube[2 * axis + 1]:
                    sys.stderr.write(
                        "ERROR: Lower subcube boundary greater than upper subcube boundary.\n"
                    )
                    sys.stderr.write("       Please check your input.\n")
                    raise SystemExit(1)

        # Report final subcube boundaries:
        if len(subcube) == 4:
            sys.stdout.write("Loading subcube of " + str(inFile) +
                             "\n  defined by [x1 x2 y1 y2] = " + str(subcube) +
                             "\n")
        else:
            sys.stdout.write("Loading subcube of " + str(inFile) +
                             "\n  defined by [x1 x2 y1 y2 z1 z2] = " +
                             str(subcube) + "\n")
    else:
        sys.stdout.write("Loading cube " + str(inFile) + "\n")
        subcube = []

    # Open FITS file:
    try:
        f = fits.open(inFile, memmap=False)
        dict_Header = f[0].header
    except:
        sys.stderr.write("ERROR: Failed to load primary HDU of FITS file " +
                         str(inFile) + "\n")
        raise SystemExit(1)

    # Check whether the number of dimensions is acceptable and read data accordingly.
    # The default is three axes:
    if dict_Header["NAXIS"] == 3:
        print("The input cube has 3 axes:")
        print("type: " + str(dict_Header["CTYPE1"]) + " " +
              str(dict_Header["CTYPE2"]) + " " + str(dict_Header["CTYPE3"]))
        print("dimensions: " + str(dict_Header["NAXIS1"]) + " " +
              str(dict_Header["NAXIS2"]) + " " + str(dict_Header["NAXIS3"]))

        fullshape = [
            dict_Header["NAXIS3"], dict_Header["NAXIS2"], dict_Header["NAXIS1"]
        ]

        if len(subcube) == 6:
            np_Cube = f[0].section[subcube[4]:subcube[5],
                                   subcube[2]:subcube[3],
                                   subcube[0]:subcube[1]]
            dict_Header["CRPIX1"] -= subcube[0]
            dict_Header["CRPIX2"] -= subcube[2]
            dict_Header["CRPIX3"] -= subcube[4]
            dict_Header["NAXIS1"] = subcube[1] - subcube[0]
            dict_Header["NAXIS2"] = subcube[3] - subcube[2]
            dict_Header["NAXIS3"] = subcube[5] - subcube[4]
        elif not len(subcube):
            np_Cube = f[0].data
        else:
            sys.stderr.write(
                "ERROR: The subcube list must have 6 entries (%i given).\n" %
                len(subcube))
            raise SystemExit(1)
    # 4 axes:
    elif dict_Header["NAXIS"] == 4:
        if dict_Header["NAXIS4"] != 1:
            print("type: " + str(dict_Header["CTYPE1"]) + " " +
                  str(dict_Header["CTYPE2"]) + " " +
                  str(dict_Header["CTYPE3"]) + " " +
                  str(dict_Header["CTYPE4"]))
            print("dimensions: " + str(dict_Header["NAXIS1"]) + " " +
                  str(dict_Header["NAXIS2"]) + " " +
                  str(dict_Header["NAXIS3"]) + " " +
                  str(dict_Header["NAXIS4"]))
            sys.stderr.write("ERROR: The size of the 4th dimension is > 1.\n")
            raise SystemExit(1)
        else:
            fullshape = [
                dict_Header["NAXIS3"], dict_Header["NAXIS2"],
                dict_Header["NAXIS1"]
            ]

            if len(subcube) == 6:
                np_Cube = f[0].section[0, subcube[4]:subcube[5],
                                       subcube[2]:subcube[3],
                                       subcube[0]:subcube[1]]
                dict_Header["CRPIX1"] -= subcube[0]
                dict_Header["CRPIX2"] -= subcube[2]
                dict_Header["CRPIX3"] -= subcube[4]
                dict_Header["NAXIS1"] = subcube[1] - subcube[0]
                dict_Header["NAXIS2"] = subcube[3] - subcube[2]
                dict_Header["NAXIS3"] = subcube[5] - subcube[4]
            elif not len(subcube):
                np_Cube = f[0].section[0]
            else:
                sys.stderr.write(
                    "ERROR: The subcube list must have 6 entries (%i given). Ignore 4th axis.\n"
                    % len(subcube))
                raise SystemExit(1)
    # 2 axes:
    elif dict_Header["NAXIS"] == 2:
        sys.stderr.write(
            "WARNING: The input cube has 2 axes, third axis added.\n")
        print("type: " + str(dict_Header["CTYPE1"]) + " " +
              str(dict_Header["CTYPE2"]))
        print("dimensions: " + str(dict_Header["NAXIS1"]) + " " +
              str(dict_Header["NAXIS2"]))

        fullshape = [dict_Header["NAXIS2"], dict_Header["NAXIS1"]]

        if len(subcube) == 4:
            np_Cube = array(
                [f[0].section[subcube[2]:subcube[3], subcube[0]:subcube[1]]])
            dict_Header["CRPIX1"] -= subcube[0]
            dict_Header["CRPIX2"] -= subcube[2]
            dict_Header["NAXIS1"] = subcube[1] - subcube[0]
            dict_Header["NAXIS2"] = subcube[3] - subcube[2]
        elif not len(subcube):
            np_Cube = array([f[0].data])
        else:
            sys.stderr.write(
                "ERROR: The subcube list must have 4 entries (%i given).\n" %
                len(subcube))
            raise SystemExit(1)
    # 1 axis:
    elif dict_Header["NAXIS"] == 1:
        sys.stderr.write(
            "ERROR: The input has 1 axis, this is probably a spectrum instead of a 2D/3D image.\n"
        )
        sys.stderr.write("       Type: " + str(dict_Header["CTYPE1"]) + "\n")
        sys.stderr.write("       Dimensions: " + str(dict_Header["NAXIS1"]) +
                         "\n")
        raise SystemExit(1)
    else:
        sys.stderr.write(
            "ERROR: The file has fewer than 1 or more than 4 dimensions.\n")
        raise SystemExit(1)

    f.close()

    # Check if cube needs to be inverted
    if invertData:
        np_Cube *= -1.0
        print("Inverting data cube to search for negative signals")

    # check whether the axes are in the expected order:
    #if dict_Header["CTYPE1"][0:2] != "RA" or dict_Header["CTYPE2"][0:3] != "DEC":
    #	sys.stderr.write("WARNING: The dimensions are not in the expected order.\n")

    print("The data cube has been loaded.")

    if not cubeOnly:
        # Apply weights file if provided:
        if weightsFile:
            # The original data are replaced with the weighted cube!
            # If weighting is being used, the data should be read in again during parameterisation.
            # check whether the weights cube exists:
            if os.path.isfile(weightsFile) == False:
                sys.stderr.write(
                    "ERROR: The defined weights cube does not exist.\n")
                sys.stderr.write("       Cannot find: " + weightsFile + "\n")
                raise SystemExit(1)
            else:
                # Scale the input cube with a weights cube
                # load the weights cube and convert it into a 3D array to be applied to the data 3D array
                # (note that the data has been converted into a 3D array above)
                print("Loading and applying weights cube: " + weightsFile)
                f = fits.open(weightsFile, memmap=False)
                dict_Weights_header = f[0].header
                if dict_Weights_header["NAXIS"] == 3:
                    if len(subcube) == 6:
                        np_Cube *= f[0].section[subcube[4]:subcube[5],
                                                subcube[2]:subcube[3],
                                                subcube[0]:subcube[1]]
                    else:
                        np_Cube *= f[0].data
                elif dict_Weights_header["NAXIS"] == 4:
                    if dict_Weights_header["NAXIS4"] != 1:
                        sys.stderr.write(
                            "ERROR: The 4th dimension has more than 1 value.\n"
                        )
                        raise SystemExit(1)
                    else:
                        sys.stderr.write(
                            "WARNING: The weights cube has 4 axes; first axis ignored.\n"
                        )
                        if len(subcube) == 6:
                            np_Cube *= f[0].section[0, subcube[4]:subcube[5],
                                                    subcube[2]:subcube[3],
                                                    subcube[0]:subcube[1]]
                        else:
                            np_Cube *= f[0].section[0]
                elif dict_Weights_header["NAXIS"] == 2:
                    sys.stderr.write(
                        "WARNING: The weights cube has 2 axes; third axis added.\n"
                    )
                    if len(subcube) == 6 or len(subcube) == 4:
                        np_Cube *= array([
                            f[0].section[subcube[2]:subcube[3],
                                         subcube[0]:subcube[1]]
                        ])
                    else:
                        np_Cube *= array([f[0].data])
                elif dict_Weights_header["NAXIS"] == 1:
                    sys.stderr.write(
                        "WARNING: The weights cube has 1 axis; interpreted as third axis; first and second axes added.\n"
                    )
                    if len(subcube) == 6:
                        np_Cube *= reshape(f[0].section[subcube[4]:subcube[5]],
                                           (-1, 1, 1))
                    elif not len(subcube):
                        np_Cube *= reshape(f[0].data, (-1, 1, 1))
                    else:
                        sys.stderr.write(
                            "ERROR: The subcube list must have 6 entries (%i given).\n"
                            % len(subcube))
                        raise SystemExit(1)
                else:
                    sys.stderr.write(
                        "ERROR: The weights cube has fewer than 1 or more than 4 dimensions.\n"
                    )
                    raise SystemExit(1)

                f.close()
                print("Weights cube loaded and applied.")

        # Else apply weights function if defined:
        elif weightsFunction:
            # WARNING: I'm not sure if there is a safe way to properly implement multiplication of a data array
            # WARNING: with a user-specified function in Python without the need for a whitelist, nested loops,
            # WARNING: or the creation of multiple copies of the cube.
            print("Evaluating function: %s" % weightsFunction)

            # Define whitelist of allowed character sequences:
            whitelist = [
                "x", "y", "z", "e", "E", "sin", "cos", "tan", "arcsin",
                "arccos", "arctan", "arctan2", "sinh", "cosh", "tanh",
                "arcsinh", "arccosh", "arctanh", "exp", "log", "sqrt",
                "square", "power", "absolute", "fabs", "sign"
            ]

            # Search for all keywords consisting of consecutive sequences of alphabetical characters:
            # NOTE: Explicit conversion to string is required unless readoptions.py is modified!
            keywordsFound = filter(
                None, re.split("[^a-zA-Z]+", str(weightsFunction)))

            # Check for non-whitelisted sequences:
            for keyword in keywordsFound:
                if keyword not in whitelist:
                    sys.stderr.write(
                        "ERROR: Unsupported keyword/function found in weights function:\n"
                    )
                    sys.stderr.write("         %s\n" % weightsFunction)
                    sys.stderr.write("       Please check your input.\n")
                    raise SystemExit(1)

            # ALERT: Why are we not applying the weights function continuously (channel-by-channel)?!?
            # hardcoded number of weights chunks
            Nz = 50
            # check that the number of weights z-chunks is at most equal to the total nr of chans
            Nz = min(Nz, np_Cube.shape[0])
            # calculate the size of each chunk along the Z axis (rounding to integer)
            Lz = int(round(float(np_Cube.shape[0]) / Nz))
            # calculate number of chunks really needed given above rounding
            Nz = int(ceil(float(np_Cube.shape[0]) / Lz))
            print(
                "Evaluating and applying weights function in %i chunks along the Z axis"
                % Nz)
            for zz in range(Nz):
                # last chunk may have different length than the others
                if zz == Nz - 1:
                    z, y, x = indices((np_Cube.shape[0] - Lz * zz,
                                       np_Cube.shape[1], np_Cube.shape[2]))
                else:
                    z, y, x = indices((Lz, np_Cube.shape[1], np_Cube.shape[2]))
                z += zz * Lz
                try:
                    # NOTE: eval() should be safe now as we don't allow for non-whitelisted sequences...
                    np_Cube[z, y, x] *= eval(str(weightsFunction))
                    # WARNING: There is no check here whether the expression to be evaluated is actually valid,
                    #          e.g. SoFiA will crash if the weights function is sqrt(-1). 'try' doesn't catch this!
                    #          Even if we set np.seterr(all="raise"), we still run into problems with expressions
                    #          that are valid but not floating-point numbers, e.g. sqrt((1,2)).
                except:
                    sys.stderr.write(
                        "ERROR: Failed to evaluate weights function:\n")
                    sys.stderr.write("         %s\n" % weightsFunction)
                    sys.stderr.write("       Please check your input.\n")
                    raise SystemExit(1)
            print("Function-weighted cube created.\n")

        if doFlag:
            # Apply blanks cube if provided:
            if flagFile:
                # check whether the weights cube exists:
                if os.path.isfile(flagFile) == False:
                    sys.stderr.write(
                        "ERROR: The defined flag cube does not exist.\n")
                    sys.stderr.write("       Cannot find: " + weightsFile +
                                     "\n")
                    raise SystemExit(1)

                # Loading and applying flag file
                print("Loading and applying flag cube: " + flagFile)
                f = fits.open(flagFile, memmap=False)
                dict_Flag_header = f[0].header
                if dict_Flag_header["NAXIS"] == 3:
                    if len(subcube) == 6:
                        flags = f[0].section[subcube[4]:subcube[5],
                                             subcube[2]:subcube[3],
                                             subcube[0]:subcube[1]]
                        np_Cube[isnan(flags)] = nan
                    else:
                        np_Cube[isnan(f[0].data)] = nan
                elif dict_Flag_header["NAXIS"] == 4:
                    if dict_Flag_header["NAXIS4"] != 1:
                        sys.stderr.write(
                            "ERROR: The 4th dimension has more than 1 value.\n"
                        )
                        raise SystemExit(1)
                    else:
                        sys.stderr.write(
                            "WARNING: The flag cube has 4 axes; first axis ignored.\n"
                        )
                        if len(subcube) == 6:
                            flags = f[0].section[0, subcube[4]:subcube[5],
                                                 subcube[2]:subcube[3],
                                                 subcube[0]:subcube[1]]
                            np_Cube[isnan(flags)] = nan
                        else:
                            np_Cube[isnan(f[0].section[0])] = nan
                elif dict_Flag_header["NAXIS"] == 2:
                    sys.stderr.write(
                        "WARNING: The flag cube has 2 axes; third axis added.\n"
                    )
                    if len(subcube) == 6 or len(subcube) == 4:
                        flags = f[0].section[subcube[2]:subcube[3],
                                             subcube[0]:subcube[1]]
                        for channel in range(np_Cube.shape[0]):
                            np_Cube[channel][isnan(flags)] = nan
                    else:
                        for channel in range(np_Cube.shape[0]):
                            np_Cube[channel][isnan(f(0).data)] = nan
                else:
                    sys.stderr.write(
                        "ERROR: The weights cube has fewer than 1 or more than 4 dimensions.\n"
                    )
                    raise SystemExit(1)

                f.close()
                print("Flag cube loaded and applied.")
            # if flag regions if provided
            if flagRegions:
                flag(np_Cube, flagRegions)

        if maskFile:
            # check whether the mask cube exists:
            if not os.path.isfile(maskFile):
                sys.stderr.write(
                    "ERROR: The specified mask cube does not exist.\n")
                sys.stderr.write("       Cannot find: " + maskFile + "\n")
                raise SystemExit(1)

            else:
                print("Loading mask cube: " + maskFile)
                g = fits.open(maskFile,
                              memmap=False,
                              do_not_scale_image_data=True)
                dict_Mask_header = g[0].header
                if dict_Mask_header["NAXIS"] == 3:
                    if dict_Mask_header["CRVAL1"] != dict_Header[
                            "CRVAL1"] or dict_Mask_header[
                                "CRVAL2"] != dict_Header[
                                    "CRVAL2"] or dict_Mask_header[
                                        "CRVAL3"] != dict_Header["CRVAL3"]:
                        err.warning(
                            "Input cube and mask may not be on the same WCS grid."
                        )
                        err.warning(
                            str(dict_Mask_header["CRVAL1"]) + " " +
                            str(dict_Header["CRVAL1"]) + " " +
                            str(dict_Mask_header["CRVAL2"]) + " " +
                            str(dict_Header["CRVAL2"]) + " " +
                            str(dict_Mask_header["CRVAL3"]) + " " +
                            str(dict_Header["CRVAL3"]))
                    if len(subcube) == 6:
                        if dict_Mask_header["NAXIS1"] == np_Cube.shape[
                                2] and dict_Mask_header[
                                    "NAXIS2"] == np_Cube.shape[
                                        1] and dict_Mask_header[
                                            "NAXIS3"] == np_Cube.shape[0]:
                            print(
                                "Subcube selection NOT applied to input mask. The full input mask cube matches size of the selected data subcube."
                            )
                            mask = g[0].data
                        elif dict_Mask_header["NAXIS1"] == fullshape[
                                2] and dict_Mask_header["NAXIS2"] == fullshape[
                                    1] and dict_Mask_header[
                                        "NAXIS3"] == fullshape[0]:
                            print(
                                "Subcube selection applied also to input mask. The mask subcube matches size of the selected data subcube."
                            )
                            mask = g[0].section[subcube[4]:subcube[5],
                                                subcube[2]:subcube[3],
                                                subcube[0]:subcube[1]]
                        else:
                            sys.stderr.write(
                                "ERROR: Neither the full mask nor the subcube of the mask match size of the selected data subcube.\n"
                            )
                            raise SystemExit(1)
                    else:
                        mask = g[0].data
                elif dict_Mask_header["NAXIS"] == 4:
                    if dict_Mask_header["CRVAL1"] != dict_Header[
                            "CRVAL1"] or dict_Mask_header[
                                "CRVAL2"] != dict_Header[
                                    "CRVAL2"] or dict_Mask_header[
                                        "CRVAL3"] != dict_Header["CRVAL3"]:
                        err.warning(
                            "Input cube and mask may not be on the same WCS grid."
                        )
                    if dict_Mask_header["NAXIS4"] != 1:
                        sys.stderr.write(
                            "ERROR: The 4th dimension has more than 1 value.\n"
                        )
                        raise SystemExit(1)
                    elif len(subcube) == 6:
                        sys.stderr.write(
                            "WARNING: The mask cube has 4 axes; first axis ignored.\n"
                        )
                        if dict_Mask_header["NAXIS1"] == np_Cube.shape[
                                2] and dict_Mask_header[
                                    "NAXIS2"] == np_Cube.shape[
                                        1] and dict_Mask_header[
                                            "NAXIS3"] == np_Cube.shape[0]:
                            print(
                                "Subcube selection NOT applied to input mask. The full input mask cube matches size of the selected data subcube."
                            )
                            mask = g[0].section[0]
                        elif dict_Mask_header["NAXIS1"] == fullshape[
                                2] and dict_Mask_header["NAXIS2"] == fullshape[
                                    1] and dict_Mask_header[
                                        "NAXIS3"] == fullshape[0]:
                            print(
                                "Subcube selection applied also to input mask. The mask subcube matches size of the selected data subcube."
                            )
                            mask = g[0].section[0, subcube[4]:subcube[5],
                                                subcube[2]:subcube[3],
                                                subcube[0]:subcube[1]]
                        else:
                            sys.stderr.write(
                                "ERROR: Neither the full mask nor the subcube of the mask match size of the selected data subcube.\n"
                            )
                            raise SystemExit(1)
                    else:
                        mask = g[0].section[0]
                elif dict_Mask_header["NAXIS"] == 2:
                    if dict_Mask_header["CRVAL1"] != dict_Header[
                            "CRVAL1"] or dict_Mask_header[
                                "CRVAL2"] != dict_Header["CRVAL2"]:
                        err.warning(
                            "Input cube and mask may not be on the same WCS grid."
                        )
                    sys.stderr.write(
                        "WARNING: The mask cube has 2 axes; third axis added.\n"
                    )
                    if len(subcube) == 6 or len(subcube) == 4:
                        if dict_Mask_header["NAXIS1"] == np_Cube.shape[
                                2] and dict_Mask_header[
                                    "NAXIS2"] == np_Cube.shape[1]:
                            print(
                                "Subcube selection NOT applied to input mask. The full input mask cube matches size of the selected data subcube."
                            )
                            mask = array([g[0].data])
                        elif dict_Mask_header["NAXIS1"] == fullshape[
                                2] and dict_Mask_header["NAXIS2"] == fullshape[
                                    1]:
                            print(
                                "Subcube selection applied also to input mask. The mask subcube matches size of the selected data subcube."
                            )
                            mask = array([
                                g[0].section[subcube[2]:subcube[3],
                                             subcube[0]:subcube[1]]
                            ])
                        else:
                            sys.stderr.write(
                                "ERROR: Neither the full mask nor the subcube of the mask match size of the selected data subcube.\n"
                            )
                            raise SystemExit(1)
                    else:
                        mask = array([g[0].data])
                elif dict_Mask_header["NAXIS"] == 1:
                    sys.stderr.write(
                        "WARNING: The mask cube has 1 axis; interpreted as third axis; first and second axes added.\n"
                    )
                    if dict_Mask_header["CRVAL1"] != dict_Header["CRVAL1"]:
                        err.warning(
                            "Input cube and mask may not be on the same WCS grid."
                        )
                    if len(subcube) == 6:
                        if dict_Mask_header["NAXIS1"] == np_Cube.shape[0]:
                            print(
                                "Subcube selection NOT applied to input mask. The full input mask cube matches size of the selected data subcube."
                            )
                            mask = reshape(g[0].data, (-1, 1, 1))
                        elif dict_Mask_header["NAXIS1"] == fullshape[0]:
                            print(
                                "Subcube selection applied also to input mask. The mask subcube matches size of the selected data subcube."
                            )
                            mask = reshape(g[0].section[subcube[4]:subcube[5]],
                                           (-1, 1, 1))
                        else:
                            sys.stderr.write(
                                "ERROR: Neither the full mask nor the subcube of the mask match size of the selected data subcube.\n"
                            )
                            raise SystemExit(1)
                    elif not len(subcube):
                        mask = reshape(g[0].data, (-1, 1, 1))
                    else:
                        sys.stderr.write(
                            "ERROR: The subcube list must have 6 entries (%i given).\n"
                            % len(subcube))
                        raise SystemExit(1)
                else:
                    sys.stderr.write(
                        "ERROR: The mask cube has fewer than 1 or more than 4 dimensions.\n"
                    )
                    raise SystemExit(1)
                #mask[mask > 0] = 1
                g.close()
                print("Mask cube loaded.")

            # In all cases, evaluate import.sources to only keep specific source IDs
            # WARNING: This assumes that source IDs are positive!
            if isinstance(sources, list):
                if sources:
                    sys.stdout.write(
                        "Only the following sources from the mask are retained:\n  "
                        + str(sources) + "\n")
                    for sid in sources:
                        mask[mask == sid] *= -1
                    mask[mask > 0] *= 0
                    mask *= -1
                else:
                    sys.stdout.write(
                        "All sources from the mask will be retained.\n")
            else:
                sys.stderr.write(
                    "\nWARNING: Ignoring parameter 'import.sources'; value is not a valid list.\n\n"
                )

            # In all cases, convert mask to Boolean with masked pixels set to 1.
            #mask = (mask > 0).astype(bool)
        else:
            # Create an empty mask if none is provided.
            mask = zeros(np_Cube.shape, dtype=int32)

    if not cubeOnly:
        return np_Cube, dict_Header, mask, subcube
    else:
        return np_Cube, dict_Header
예제 #16
0
파일: pyfind.py 프로젝트: SoFiA-Admin/SoFiA
def SCfinder_mem(cube, mask, header, t0, kernels=[[0, 0, 0, "b"],], threshold=3.5, sizeFilter=0, maskScaleXY=2.0, maskScaleZ=2.0, kernelUnit="pixel", edgeMode="constant", rmsMode="negative", fluxRange="all", verbose=0):
	# Define a few constants
	FWHM_CONST    = 2.0 * math.sqrt(2.0 * math.log(2.0))   # Conversion between sigma and FWHM of Gaussian function
	MAX_PIX_CONST = 1.0e+6                                 # Maximum number of pixels for noise calculation; sampling is set accordingly
	
	# Check for NaN in cube
	found_nan = np.isnan(cube).any()
	
	# Set sampling sampleRms for rms measurement
	sampleRms = max(1, int((float(cube.size) / MAX_PIX_CONST)**(1.0 / min(3, len(cube.shape)))))
	
	# Measure noise in original cube with sampling "sampleRms"
	rms = GetRMS(cube, rmsMode=rmsMode, fluxRange=fluxRange, zoomx=1, zoomy=1, zoomz=1, verbose=verbose, sample=sampleRms)
	
	# Loop over all kernels
	for kernel in kernels:
		[kx, ky, kz, kt] = kernel
		if verbose:
			err.linebreak()
			err.print_progress_time(t0)
			err.message("    Filter {0:} {1:} {2:} {3:} ...".format(kx, ky, kz, kt))
		if kernelUnit == "world" or kernelUnit == "w":
			if verbose: err.message("    Converting filter size to pixels ...")
			kx = abs(float(kx) / header["CDELT1"])
			ky = abs(float(ky) / header["CDELT2"])
			kz = abs(float(kz) / header["CDELT3"])
		if kt == "b":
			if kz != int(math.ceil(kz)) and verbose: err.warning("Rounding width of boxcar z kernel to next integer.")
			kz = int(math.ceil(kz))
		
		# Create a copy of the original cube
		cube_smooth = np.copy(cube)
		
		# Replace all NaNs with zero
		if found_nan:
			cube_smooth[np.isnan(cube)] = 0.0
		
		cube_smooth[(cube_smooth > 0) & (mask > 0)] = maskScaleXY * rms
		cube_smooth[(cube_smooth < 0) & (mask > 0)] = -maskScaleXY * rms
		
		# Spatial smoothing
		if kx + ky:
			cube_smooth = ndimage.filters.gaussian_filter(cube_smooth, [0, ky / FWHM_CONST, kx / FWHM_CONST], mode=edgeMode)
		
		# Spectral smoothing
		if kz:
			if   kt == "b": cube_smooth = ndimage.filters.uniform_filter1d(cube_smooth, kz, axis=0, mode=edgeMode)
			elif kt == "g": cube_smooth = ndimage.filters.gaussian_filter1d(cube_smooth, kz / FWHM_CONST, axis=0, mode=edgeMode)
		
		# Re-insert the NaNs taken out earlier
		if found_nan:
			cube_smooth[np.isnan(cube)] = np.nan
		
		# Calculate the RMS of the smoothed cube:
		rms_smooth = GetRMS(cube_smooth, rmsMode=rmsMode, fluxRange=fluxRange, zoomx=1, zoomy=1, zoomz=1, verbose=verbose, sample=sampleRms)
		
		# Add pixels above threshold to mask by setting bit 1
		with np.errstate(invalid="ignore"):
			mask |= (np.absolute(cube_smooth) >= threshold * rms_smooth)
			#mask = np.bitwise_or(mask, np.greater_equal(np.absolute(cube_smooth), threshold * rms_smooth))
		
		# Delete smoothed cube again
		del cube_smooth
	return
예제 #17
0
        err.message("Running threshold filter")
        threshold_filter.filter(mask, np_Cube, dict_Header,
                                **Parameters["threshold"])
        if Parameters["pipeline"]["trackMemory"]: print_memory_usage(t0)

    err.message("Source finding complete.")

# --------------------
# ---- POSITIVITY ----
# --------------------

# Check if positivity flag is set; if so, remove negative pixels from mask:
if Parameters["merge"]["positivity"]:
    err.warning(
        "Enabling mask.positivity is dangerous and will render some of SoFiA's\n"
        "most  powerful  algorithms useless,  including mask  optimisation and\n"
        "reliability calculation.  Only use this option if you are fully aware\n"
        "of its risks and consequences!",
        frame=True)
    mask[np_Cube < 0.0] = 0
    if Parameters["pipeline"]["trackMemory"]: print_memory_usage(t0)

# Check whether any pixels are detected
NRdet = (mask > 0).sum()
if not NRdet:
    err.warning("No pixels detected. Exiting pipeline.", fatal=True)
else:
    err.message("{0:,d} out of {1:,d} pixels detected ({2:.4f}%)".format(
        NRdet,
        np.array(mask.shape).prod(),
        100.0 * float(NRdet) / float(np.array(mask.shape).prod())))
예제 #18
0
def writeMoments(datacube, maskcube, filename, debug, header, compress, write_mom, flagOverwrite):
	# Exit if nothing is to be done
	if not any(write_mom):
		err.warning("No moment maps requested; skipping moment map generation.")
		return
	
	# ---------------------------
	# Number of detected channels
	# ---------------------------
	nrdetchan = (maskcube > 0).sum(axis=0)
	if np.nanmax(nrdetchan) < 65535:
		nrdetchan = nrdetchan.astype("int16")
	else:
		nrdetchan = nrdetchan.astype("int32")
	
	hdu = pyfits.PrimaryHDU(data=nrdetchan, header=header)
	hdu.header["BUNIT"] = "Nchan"
	hdu.header["DATAMIN"] = np.nanmin(nrdetchan)
	hdu.header["DATAMAX"] = np.nanmax(nrdetchan)
	hdu.header["ORIGIN"] = sofia_version_full
	glob.delete_header(hdu.header, "CTYPE3")
	glob.delete_header(hdu.header, "CRPIX3")
	glob.delete_header(hdu.header, "CRVAL3")
	glob.delete_header(hdu.header, "CDELT3")
	
	name = str(filename) + "_nrch.fits"
	if compress: name += ".gz"
	
	# Check for overwrite flag
	if not flagOverwrite and os.path.exists(name):
		err.error("Output file exists: " + str(name) + ".", fatal=False)
	else:
		hdu.writeto(name, output_verify="warn", **__astropy_arg_overwrite__)
	
	# ----------------------
	# Moment 0, 1 and 2 maps
	# ----------------------
	# WARNING: The generation of moment maps will mask the copy of the data cube held
	#          in memory by SoFiA. If you wish to use the original data cube after
	#          this point, please reload it first!
	datacube[maskcube == 0] = 0
	
	# Regrid cube if necessary
	if "CELLSCAL" in header and header["CELLSCAL"] == "1/F":
		err.warning(
			"CELLSCAL keyword with value of 1/F found.\n"
			"Will regrid data cube before creating moment images.")
		datacube = glob.regridMaskedChannels(datacube, maskcube, header)
	
	# ALERT: Why are we doing this?
	#datacube = np.array(datacube, dtype=np.single)
	
	# Extract relevant WCS parameters
	if glob.check_wcs_info(header):
		width = header["CDELT3"]
		chan0 = header["CRPIX3"]
		freq0 = header["CRVAL3"]
		mom_scale_factor = 1.0
		
		# Velocity
		if glob.check_header_keywords(glob.KEYWORDS_VELO, header["CTYPE3"]):
			if not "CUNIT3" in header or header["CUNIT3"].lower() == "m/s":
				# Assuming m/s and converting to km/s
				mom_scale_factor = 1.0e-3
				unit_spec = "km/s"
			elif header["CUNIT3"].lower() == "km/s":
				# Assuming km/s
				unit_spec = "km/s"
			else:
				# Working with whatever velocity units the cube has
				unit_spec = str(header["CUNIT3"])
		# Frequency
		elif glob.check_header_keywords(glob.KEYWORDS_FREQ, header["CTYPE3"]):
			if not "CUNIT3" in header or header["CUNIT3"].lower() == "hz":
				# Assuming Hz
				unit_spec = "Hz"
			elif header["CUNIT3"].lower() == "khz":
				# Assuming kHz and converting to Hz
				mom_scale_factor = 1.0e+3
				unit_spec = "Hz"
			else:
				# Working with whatever frequency units the cube has
				unit_spec = str(header["CUNIT3"])
	else:
		err.warning("Axis descriptors missing from FITS file header.\nMoment maps will not be scaled!")
		width = 1.0
		chan0 = 0.0
		freq0 = 0.0
		mom_scale_factor = 1.0
		unit_spec = "chan"
	
	# Prepare moment maps
	# NOTE: We are making use of NumPy's array broadcasting rules here to avoid
	#       having to cast array sizes to the full 3-D data cube size!
	moments = [None, None, None]
	with np.errstate(invalid="ignore"):
		if any(write_mom):
			# Definition of moment 0
			moments[0] = np.nansum(datacube, axis=0)
		
		if write_mom[1] or write_mom[2]:
			# Definition of moment 1
			velArr = ((np.arange(datacube.shape[0]) + 1.0 - chan0) * width + freq0).reshape((datacube.shape[0], 1, 1))
			moments[1] = np.divide(np.nansum(velArr * datacube, axis=0), moments[0])
		
		if write_mom[2]:
			# Definition of moment 2
			velArr = velArr - moments[1]
			moments[2] = np.sqrt(np.divide(np.nansum(velArr * velArr * datacube, axis=0), moments[0]))
	
	# Multiply moment 0 by channel width
	moments[0] *= abs(width)
	
	# Set up unit strings
	if "BUNIT" in header:
		unit_flux = str(header["BUNIT"])
		# Correct for common misspellings of "Jy[/beam]"
		if unit_flux.lower() == "jy":
			unit_flux = "Jy." + unit_spec
		elif unit_flux.lower() == "jy/beam":
			unit_flux = "Jy/beam." + unit_spec
		else:
			unit_flux += "." + unit_spec
	else:
		err.warning("Cannot determine flux unit; BUNIT missing from header.")
		unit_flux = ""
	unit_mom = [unit_flux, unit_spec, unit_spec]
	
	# Writing moment maps to disk
	for i in range(3):
		if write_mom[i] and moments[i] is not None:
			err.message("Writing moment {0:d} image.".format(i))
			moments[i] *= mom_scale_factor
			
			hdu = pyfits.PrimaryHDU(data=moments[i], header=header)
			hdu.header["BUNIT"] = unit_mom[i]
			hdu.header["DATAMIN"] = np.nanmin(moments[i])
			hdu.header["DATAMAX"] = np.nanmax(moments[i])
			hdu.header["ORIGIN"] = sofia_version_full
			hdu.header["CELLSCAL"] = "CONSTANT"
			glob.delete_header(hdu.header, "CRPIX3")
			glob.delete_header(hdu.header, "CRVAL3")
			glob.delete_header(hdu.header, "CDELT3")
			glob.delete_header(hdu.header, "CTYPE3")
			
			if debug:
				hdu.writeto(str(filename) + "_mom{0:d}.debug.fits".format(i), output_verify="warn", **__astropy_arg_overwrite__)
			else:
				name = str(filename) + "_mom{0:d}.fits".format(i)
				if compress: name += ".gz"
				
				# Check for overwrite flag
				if not flagOverwrite and os.path.exists(name):
					err.error("Output file exists: " + str(name) + ".", fatal=False)
				else:
					hdu.writeto(name, output_verify="warn", **__astropy_arg_overwrite__)
	
	return
예제 #19
0
def fix_gipsy_header(header_orig):
    # GIPSY keys for spectral axis
    key_opt = ["FREQ-OHEL", "FREQ-OLSR"]
    key_rad = ["FREQ-RHEL", "FREQ-RLSR"]
    header = header_orig.copy()
    naxis = header["NAXIS"]

    for i in range(1, naxis + 1):
        ctype = header["CTYPE%d" % i]
        if ctype in key_opt + key_rad:
            axis = i
            # Read reference velocity - from VELR or DRVAL
            try:
                if "VELR" in header:
                    vel = header["VELR"]
                elif "DRVAL%d" % axis in header:
                    vel = header["VELR"]
                    unit = header["DUNIT%d" % axis]
                    if unit.lower() == "km/s":
                        vel = vel * 1000.0
                    elif unit.lower() != "m/s":
                        break
            except:
                err.warning("Problem with reference velocity.")
                break

            # Convert reference frequency to Hz
            try:
                freq = header["CRVAL%d" % axis]
                dfreq = header["CDELT%d" % axis]
                unit = header["CUNIT%d" % axis]
                freqUnits = ["hz", "khz", "mhz", "ghz"]
                j = freqUnits.index(unit.lower())
                freq *= 10**j
                dfreq *= 10**j
            except:
                err.warning("Problem with reference frequency.")
                break

            # Need rest frequency for conversion
            try:
                freq0Names = ["FREQ0", "FREQR", "RESTFRQ"]
                for key in freq0Names:
                    try:
                        freq0 = header[key]
                        #foundFreq0 = 1
                    except:
                        pass
                header["RESTFRQ"] = freq0
                #foundFreq0
            except:
                err.warning("Rest frequency not found.")
                break

            # Calculate reference frequency in the barycentric system
            if ctype in key_opt:
                freqB = freq0 / (1.0 + vel / scipy.constants.c)
            else:
                freqB = freq0 / (1.0 - vel / scipy.constants.c)

            # Calculate topocentric velocity
            velT = scipy.constants.c * ((freqB**2 - freq**2) /
                                        (freqB**2 + freq**2))
            dfreqB = dfreq * math.sqrt(
                (scipy.constants.c - velT) / (scipy.constants.c + velT))
            header["CTYPE%d" % axis] = "FREQ"
            header["CUNIT%d" % axis] = "Hz"
            header["CRVAL%d" % axis] = freqB
            header["CDELT%d" % axis] = dfreqB
            ## GIPSY headers seem to contain the unit "DEGREE" for RA/Dec
            ## WCS lib does not like that
            for key in header:
                if "CUNIT" in key and header[key] == "DEGREE":
                    header[key] = "deg"
            err.message("Header repaired successfully.")

            return header
예제 #20
0
def dilate(cube, mask, objects, cathead, Parameters):
    dilateThreshold = Parameters["parameters"]["dilateThreshold"]
    dilatePixMax = Parameters["parameters"]["dilatePixMax"]
    dilateChanMax = Parameters["parameters"]["dilateChanMax"]

    # Stops dilating when (flux_new - flux_old) / flux_new < dilateThreshold
    sourceIDs = np.unique(mask)
    # remove first element which should be zero
    if sourceIDs[0] == 0:
        sourceIDs = np.delete(sourceIDs, 0)

    for i in range(0, len(sourceIDs)):
        obj = objects[i]
        xmin = max(0, obj[list(cathead).index("x_min")] - dilatePixMax)
        xmax = min(cube.shape[2] - 1,
                   obj[list(cathead).index("x_max")] + dilatePixMax)
        ymin = max(0, obj[list(cathead).index("y_min")] - dilatePixMax)
        ymax = min(cube.shape[1] - 1,
                   obj[list(cathead).index("y_max")] + dilatePixMax)
        zmin = max(0, obj[list(cathead).index("z_min")] - dilateChanMax)
        zmax = min(cube.shape[0] - 1,
                   obj[list(cathead).index("z_max")] + dilateChanMax)

        [zmin, zmax, ymin, ymax, xmin,
         xmax] = map(int, [zmin, zmax, ymin, ymax, xmin, xmax])

        objcube = cube[zmin:zmax + 1, ymin:ymax + 1, xmin:xmax + 1].copy()
        objmask = mask[zmin:zmax + 1, ymin:ymax + 1, xmin:xmax + 1].copy()
        allmask = mask[zmin:zmax + 1, ymin:ymax + 1, xmin:xmax + 1].copy()
        otherobjs = (allmask > 0) * (allmask != sourceIDs[i])

        if (otherobjs).sum():
            # Ensure that objects with different source IDs within dilatePixMax, dilateChanMax are not
            # included in the flux growth calculation
            err.warning(
                "Object {0:d} has possible overlapping objects within {1:d} pix, {2:d} chan."
                .format(np.int(sourceIDs[i]), dilatePixMax, dilateChanMax))
            objcube[(allmask > 0) * (allmask != sourceIDs[i])] = 0

        fluxes = []

        # Loop through Z dilation kernels until the flux converges or the maximum allowed Z dilation is reached
        for dilchan in range(dilateChanMax + 1):
            dd = dilchan * 2 + 1
            dilstruct = np.ones((dd, 1, 1))
            fluxes.append(objcube[nd.morphology.binary_dilation(
                objmask == sourceIDs[i], structure=dilstruct)].sum())
            if dilchan > 0 and (fluxes[-1] -
                                fluxes[-2]) / fluxes[-1] < dilateThreshold:
                dilchan -= 1
                break
        # Pick the best Z dilation kernel for current object and update mask
        dd = dilchan * 2 + 1
        dilstruct = np.ones((dd, 1, 1))
        # Only grow the mask of object sourceIDs[i] even when other objects are present in objmask
        objmask[nd.morphology.binary_dilation(objmask == sourceIDs[i],
                                              structure=dilstruct).astype(int)
                == 1] = sourceIDs[i]

        # Loop through XY dilation kernels until the flux converges or the maximum allowed XY dilation is reached
        for dilpix in range(dilatePixMax + 1):
            dd = dilpix * 2 + 1
            dilstruct = (np.sqrt(((np.indices(
                (dd, dd)) - dilpix)**2).sum(axis=0)) <= dilpix).astype(int)
            dilstruct.resize((1, dilstruct.shape[0], dilstruct.shape[1]))
            fluxes.append(objcube[nd.morphology.binary_dilation(
                objmask == sourceIDs[i], structure=dilstruct)].sum())
            if dilpix > 0 and (fluxes[-1] -
                               fluxes[-2]) / fluxes[-1] < dilateThreshold:
                dilpix -= 1
                break
        # Pick the best XY dilation kernel for current object and update mask
        dd = dilpix * 2 + 1
        dilstruct = (np.sqrt(((np.indices(
            (dd, dd)) - dilpix)**2).sum(axis=0)) <= dilpix).astype(int)
        dilstruct.resize((1, dilstruct.shape[0], dilstruct.shape[1]))
        # Only grow the mask of object sourceIDs[i] even when other objects are present in objmask
        objmask[nd.morphology.binary_dilation(objmask == sourceIDs[i],
                                              structure=dilstruct).astype(int)
                == 1] = sourceIDs[i]

        err.message(
            "Mask of source {0:d} dilated by {2:d} chan and then by {1:d} pix."
            .format(np.int(sourceIDs[i]), dilpix, dilchan))
        # Put back in objmask objects != sourceIDs[i] that may have been inside objmask before
        # dilation or may have been temporarily replaced by the dilated object sourceIDs[i]
        if (otherobjs).sum():
            objmask[otherobjs] = allmask[otherobjs]
        mask[zmin:zmax + 1, ymin:ymax + 1, xmin:xmax + 1] = objmask

        # Update n_pix, x_geo and n_chan
        n_pix = objmask[objmask == sourceIDs[i]].sum() / sourceIDs[i]
        ind = np.vstack(np.where(objmask == sourceIDs[i]))
        cgeo = (ind.sum(axis=1)).astype(float) / float(n_pix)
        x_geo, y_geo, z_geo = cgeo[2] + xmin, cgeo[1] + ymin, cgeo[0] + zmin
        zmin, zmax = min(ind[0]), max(ind[0]) + 1
        n_chan = zmax - zmin

        # Update n_los
        objmask[objmask != sourceIDs[i]] = 0
        maskSumA0 = objmask.sum(axis=0)
        maskSumA0[maskSumA0 > 1] = 1
        n_los = maskSumA0.sum()

        del objcube
        del objmask
        del allmask
        del otherobjs

        objects[i, list(cathead).index("x_min")] = max(
            0, obj[list(cathead).index("x_min")] - dilpix)
        objects[i, list(cathead).index("x_max")] = min(
            cube.shape[2] - 1, obj[list(cathead).index("x_max")] + dilpix)
        objects[i, list(cathead).index("y_min")] = max(
            0, obj[list(cathead).index("y_min")] - dilpix)
        objects[i, list(cathead).index("y_max")] = min(
            cube.shape[1] - 1, obj[list(cathead).index("y_max")] + dilpix)
        objects[i, list(cathead).index("z_min")] = max(
            0, obj[list(cathead).index("z_min")] - dilchan)
        objects[i, list(cathead).index("z_max")] = min(
            cube.shape[0] - 1, obj[list(cathead).index("z_max")] + dilchan)
        objects[i, list(cathead).index("n_pix")] = n_pix
        objects[i, list(cathead).index("n_chan")] = n_chan
        objects[i, list(cathead).index("n_los")] = n_los
        objects[i, list(cathead).index("x_geo")] = x_geo
        objects[i, list(cathead).index("y_geo")] = y_geo
        objects[i, list(cathead).index("z_geo")] = z_geo

    return mask, objects
예제 #21
0
def apply_weights_file(data, weightsFile, subcube):
    # Load weights cube
    err.message("Applying weights cube:\n  " + str(weightsFile))
    try:
        f = fits.open(weightsFile, memmap=False)
        header_weights = f[0].header
    except:
        err.error("Failed to read weights cube.")

    # Extract axis sizes and types
    n_axes_weights, axis_size_weights, axis_type_weights = extract_axis_size(
        header_weights)

    # Ensure correct dimensionality
    check_cube_dimensions(n_axes_weights,
                          axis_size_weights,
                          cube_name="weights cube",
                          min_dim=1,
                          max_dim=4)

    # Multiply data by weights
    # 1-D spectrum
    if n_axes_weights == 1:
        err.warning(
            "Weights cube has 1 axis; interpreted as spectrum.\nAdding first and second axis."
        )
        if len(subcube):
            err.ensure(
                len(subcube) == 6,
                "Subcube list must have 6 entries ({0:d} given).".format(
                    len(subcube)))
            data *= np.reshape(f[0].section[subcube[4]:subcube[5]], (-1, 1, 1))
        else:
            data *= reshape(f[0].data, (-1, 1, 1))

    # 2-D image
    elif n_axes_weights == 2:
        if len(subcube) == 6 or len(subcube) == 4:
            data *= np.array(
                [f[0].section[subcube[2]:subcube[3], subcube[0]:subcube[1]]])
        else:
            data *= np.array([f[0].data])

    # 3-D cube
    elif n_axes_weights == 3:
        if len(subcube) == 6:
            data *= f[0].section[subcube[4]:subcube[5], subcube[2]:subcube[3],
                                 subcube[0]:subcube[1]]
        else:
            data *= f[0].data

    # 4-D hypercube
    else:
        if len(subcube) == 6:
            data *= f[0].section[0, subcube[4]:subcube[5],
                                 subcube[2]:subcube[3], subcube[0]:subcube[1]]
        else:
            data *= f[0].section[0]

    f.close()
    err.message("  Weights cube applied.")

    return data
예제 #22
0
def readPipelineOptions(filename="pipeline.options"):
    # Try to open parameter file
    try:
        f = open(filename, "r")
    except IOError as e:
        err.error("Failed to read parameter file: " + str(filename) + "\n" +
                  str(e),
                  fatal=True)

    # Extract lines from parameter file
    lines = f.readlines()
    f.close()

    # Remove leading/trailing whitespace and empty lines
    lines = [line.strip() for line in lines]

    # Check for version number
    for line in lines:
        if "# Creator: SoFiA" in line:
            par_file_version = line[17:]
            sof_file_version = getVersion()
            if par_file_version != sof_file_version:
                err.warning(
                    "The parameter file was created with a different version of SoFiA\n"
                    "(" + str(par_file_version) +
                    ") than the one you are currently using (" +
                    str(sof_file_version) + ").\n"
                    "Some settings defined in the parameter file may not be recognised\n"
                    "by SoFiA, which could lead to unexpected results.",
                    frame=True)

    # Remove comments
    lines = [line for line in lines if len(line) > 0 and line[0] != "#"]

    # Some additional setup
    datatypes = allowedDataTypes()
    tasks = {}
    pedantic = True  # Controls whether to exit on unrecognised or redefined parameters
    pedantic_counter = 0

    # Loop over all lines:
    for line in lines:
        # Extract parameter name and value
        try:
            parameter, value = tuple(line.split("=", 1))
            parameter = parameter.strip()
            value = value.split("#")[0].strip()
            module, parname = tuple(parameter.split(".", 1))
        except:
            err.error("Failed to read parameter: " + str(line) +
                      "\nExpected format: module.parameter = value",
                      fatal=True)

        # Ensure that module and parameter names are not empty
        if len(module) < 1 or len(parname) < 1:
            err.error("Failed to read parameter: " + str(line) +
                      "\nExpected format: module.parameter = value",
                      fatal=True)

        subtasks = tasks
        if module not in subtasks: subtasks[module] = {}
        subtasks = subtasks[module]

        if parname in subtasks:
            err.warning("Multiple definitions of parameter " + str(parameter) +
                        " encountered.\nIgnoring all additional definitions.")
            pedantic_counter += 1
            continue

        if parameter in datatypes:
            try:
                if datatypes[parameter] == "bool":
                    subtasks[parname] = str2bool(value)
                elif datatypes[parameter] == "float":
                    subtasks[parname] = float(value)
                elif datatypes[parameter] == "int":
                    subtasks[parname] = int(value)
                elif datatypes[parameter] == "array":
                    subtasks[parname] = literal_eval(value)
                else:
                    subtasks[parname] = str(value)
            except:
                err.error("Failed to parse parameter value:\n" + str(line) +
                          "\nExpected data type: " + str(datatypes[parameter]),
                          fatal=True)
            if parameter == "pipeline.pedantic":
                pedantic = subtasks[parname]  # Update 'pedantic' setting
        else:
            err.warning("Ignoring unknown parameter: " + str(parameter) +
                        " = " + str(value))
            pedantic_counter += 1
            continue

    if pedantic and pedantic_counter:
        err.error(
            "Multiply-defined or unrecognised parameter(s) encountered.\nPlease check your parameter file or set pipeline.pedantic = false\nto ignore unknown or already defined parameters.",
            fatal=True)

    return tasks
예제 #23
0
def write_catalog_from_array(mode, objects, catHeader, catUnits, catFormat,
                             parList, outName, flagCompress, flagOverwrite,
                             flagUncertainties):
    # Check output format and compression
    availableModes = ["ASCII", "XML", "SQL"]
    if mode not in availableModes:
        err.warning("Unknown catalogue format: " + str(mode) +
                    ". Defaulting to ASCII.")
        mode = "ASCII"
    modeIndex = availableModes.index(mode)

    if flagCompress: outName += ".gz"
    err.message("Writing " + availableModes[modeIndex] + " catalogue: " +
                outName + ".")

    # Exit if file exists and overwrite flag is set to false
    func.check_overwrite(outName, flagOverwrite, fatal=True)

    # Do we need to write all parameters?
    if parList == ["*"] or not parList: parList = list(catHeader)

    # Remove undefined parameters
    parList = [item for item in parList if item in catHeader]

    # Remove statistical uncertainties if not requested
    if not flagUncertainties:
        for item in ["err_x", "err_y", "err_z", "err_w20", "err_w50"]:
            while item in parList:
                parList.remove(item)

    # Check whether there is anything left
    if not len(parList):
        err.error(
            "No valid output parameters selected. No output catalogue written.",
            fatal=False)
        return

    # Create and write catalogue in requested format
    # -------------------------------------------------------------------------
    if mode == "XML":
        # Define basic XML header information
        votable = Element("VOTABLE")
        resource = SubElement(votable,
                              "RESOURCE",
                              name="SoFiA catalogue (version %s)" %
                              sofia_version)
        description = SubElement(resource, "DESCRIPTION")
        description.text = "Source catalogue from the Source Finding Application (SoFiA) version %s" % sofia_version
        coosys = SubElement(resource, "COOSYS", ID="J2000")
        table = SubElement(resource, "TABLE", ID="sofia_cat", name="sofia_cat")

        # Load list of parameters and unified content descriptors (UCDs)
        ucdList = {}
        fileUcdPath = os.environ["SOFIA_PIPELINE_PATH"]
        fileUcdPath = fileUcdPath.replace("sofia_pipeline.py",
                                          "SoFiA_source_parameters.dat")

        try:
            with open(fileUcdPath) as fileUcd:
                for line in fileUcd:
                    (key, value) = line.split()
                    ucdList[key] = value
        except:
            err.warning("Failed to read UCD file.")

        # Create parameter fields
        for par in parList:
            ucdEntity = ucdList[par] if par in ucdList else ""
            index = list(catHeader).index(par)
            if catFormat[index] == "%30s":
                field = SubElement(table,
                                   "FIELD",
                                   name=par,
                                   ucd=ucdEntity,
                                   datatype="char",
                                   arraysize="30",
                                   unit=catUnits[index])
            else:
                field = SubElement(table,
                                   "FIELD",
                                   name=par,
                                   ucd=ucdEntity,
                                   datatype="float",
                                   unit=catUnits[index])

        # Create data table entries
        data = SubElement(table, "DATA")
        tabledata = SubElement(data, "TABLEDATA")

        for obj in objects:
            tr = SubElement(tabledata, "TR")
            for par in parList:
                td = SubElement(tr, "TD")
                index = list(catHeader).index(par)
                td.text = (catFormat[index] % obj[index]).strip()

        # Write XML catalogue:
        try:
            f1 = gzopen(outName, "wb") if flagCompress else open(outName, "w")
        except:
            err.error("Failed to write to XML catalogue: " + outName + ".",
                      fatal=False)
            return
        f1.write(prettify(votable))
        #f1.write(tostring(votable, "utf-8")) // without prettifying, which is faster and uses much less memory
        f1.close

    # -----------------------------------------------------------------End-XML-

    elif mode == "SQL":
        # Record if there is an ID column in the catalogue
        # (if no ID is present, we will later create one for use as primary key)
        noID = "id" not in parList

        # Write some header information:
        content = "-- SoFiA catalogue (version %s)\n\nSET SQL_MODE = \"NO_AUTO_VALUE_ON_ZERO\";\n\n" % sofia_version

        # Construct and write table structure:
        flagProgress = False
        content += "CREATE TABLE IF NOT EXISTS `SoFiA-Catalogue` (\n"
        if noID: content += "  `id` INT NOT NULL,\n"
        for par in parList:
            index = list(catHeader).index(par)
            if flagProgress: content += ",\n"
            content += "  " + sqlHeaderItem(par) + sqlFormat(catFormat[index])
            flagProgress = True
        content += ",\n  PRIMARY KEY (`id`),\n  KEY (`id`)\n) DEFAULT CHARSET=utf8 COMMENT=\'SoFiA source catalogue\';\n\n"

        # Insert data:
        flagProgress = False
        content += "INSERT INTO `SoFiA-Catalogue` ("
        if noID: content += "`id`, "
        for par in parList:
            if flagProgress: content += ", "
            content += sqlHeaderItem(par)
            flagProgress = True
        content += ") VALUES\n"

        source_count = 0
        for obj in objects:
            flagProgress = False
            source_count += 1
            content += "("
            if noID: content += str(source_count) + ", "

            for par in parList:
                index = list(catHeader).index(par)
                if flagProgress: content += ", "
                content += sqlDataItem(obj[index], catFormat[index])
                flagProgress = True

            if (source_count < len(objects)): content += "),\n"
            else: content += ");\n"

        # Write catalogue
        try:
            fp = gzopen(outName, "wb") if flagCompress else open(outName, "w")
        except:
            err.error("Failed to write to SQL catalogue: " + outName + ".",
                      fatal=False)
            return
        fp.write(content)
        fp.close()

    # -----------------------------------------------------------------End-SQL-

    else:  # mode == "ASCII" by default
        # Determine header sizes based on variable-length formatting
        lenCathead = []
        for j in catFormat:
            lenCathead.append(
                int(
                    j.split("%")[1].split("e")[0].split("f")[0].split("i")
                    [0].split("d")[0].split(".")[0].split("s")[0]) + 1)

        # Create header
        headerName = ""
        headerUnit = ""
        headerCol = ""
        outFormat = ""
        colCount = 0
        header = "SoFiA catalogue (version %s)\n" % sofia_version

        for par in parList:
            index = list(catHeader).index(par)
            headerName += catHeader[index].rjust(lenCathead[index])
            headerUnit += catUnits[index].rjust(lenCathead[index])
            headerCol += ("(%i)" % (colCount + 1)).rjust(lenCathead[index])
            outFormat += catFormat[index] + " "
            colCount += 1
        header += headerName[3:] + '\n' + headerUnit[3:] + '\n' + headerCol[3:]

        # Create catalogue
        outObjects = []
        for obj in objects:
            outObjects.append([])
            for par in parList:
                outObjects[-1].append(obj[list(catHeader).index(par)])

        # Write ASCII catalogue
        try:
            np.savetxt(outName,
                       np.array(outObjects, dtype=object),
                       fmt=outFormat,
                       header=header)

        except:
            err.error("Failed to write to ASCII catalogue: " + outName + ".",
                      fatal=False)
            return

    # ---------------------------------------------------------------End-ASCII-

    return
예제 #24
0
def SCfinder_mem(cube,
                 mask,
                 header,
                 t0,
                 kernels=[
                     [0, 0, 0, "b"],
                 ],
                 threshold=3.5,
                 sizeFilter=0,
                 maskScaleXY=2.0,
                 maskScaleZ=2.0,
                 kernelUnit="pixel",
                 edgeMode="constant",
                 rmsMode="negative",
                 fluxRange="all",
                 verbose=0,
                 perSCkernel=False,
                 scaleX=False,
                 scaleY=False,
                 scaleZ=True,
                 edgeX=0,
                 edgeY=0,
                 edgeZ=0,
                 method="1d2d",
                 windowSpatial=20,
                 windowSpectral=20,
                 gridSpatial=0,
                 gridSpectral=0,
                 interpolation="none"):
    # Define a few constants
    FWHM_CONST = 2.0 * math.sqrt(2.0 * math.log(
        2.0))  # Conversion between sigma and FWHM of Gaussian function
    MAX_PIX_CONST = 1.0e+6  # Maximum number of pixels for noise calculation; sampling is set accordingly

    # Check for NaN in cube
    found_nan = np.isnan(cube).any()

    # Set sampling sampleRms for rms measurement
    sampleRms = max(
        1,
        int((float(cube.size) / MAX_PIX_CONST)**(1.0 /
                                                 min(3, len(cube.shape)))))

    # Measure noise in original cube with sampling "sampleRms"
    rms = GetRMS(cube,
                 rmsMode=rmsMode,
                 fluxRange=fluxRange,
                 zoomx=1,
                 zoomy=1,
                 zoomz=1,
                 verbose=verbose,
                 sample=sampleRms)

    # Loop over all kernels
    for kernel in kernels:
        [kx, ky, kz, kt] = kernel
        if verbose:
            err.linebreak()
            err.print_progress_time(t0)
            err.message("    Filter {0:} {1:} {2:} {3:} ...".format(
                kx, ky, kz, kt))
        if kernelUnit == "world" or kernelUnit == "w":
            if verbose: err.message("    Converting filter size to pixels ...")
            kx = abs(float(kx) / header["CDELT1"])
            ky = abs(float(ky) / header["CDELT2"])
            kz = abs(float(kz) / header["CDELT3"])
        if kt == "b":
            if kz != int(math.ceil(kz)) and verbose:
                err.warning(
                    "Rounding width of boxcar z kernel to next integer.")
            kz = int(math.ceil(kz))

        # Create a copy of the original cube
        cube_smooth = np.copy(cube)

        # Replace all NaNs with zero
        if found_nan:
            cube_smooth[np.isnan(cube)] = 0.0

        cube_smooth[(cube_smooth > 0) & (mask > 0)] = maskScaleXY * rms
        cube_smooth[(cube_smooth < 0) & (mask > 0)] = -maskScaleXY * rms

        # Spatial smoothing
        if kx + ky:
            cube_smooth = ndimage.filters.gaussian_filter(
                cube_smooth, [0, ky / FWHM_CONST, kx / FWHM_CONST],
                mode=edgeMode)

        # Spectral smoothing
        if kz:
            if kt == "b":
                cube_smooth = ndimage.filters.uniform_filter1d(cube_smooth,
                                                               kz,
                                                               axis=0,
                                                               mode=edgeMode)
            elif kt == "g":
                cube_smooth = ndimage.filters.gaussian_filter1d(cube_smooth,
                                                                kz /
                                                                FWHM_CONST,
                                                                axis=0,
                                                                mode=edgeMode)

        # Re-insert the NaNs taken out earlier
        if found_nan:
            cube_smooth[np.isnan(cube)] = np.nan

        # Per-kernel noise normalisation (Time consuming!)
        if perSCkernel:
            cube_smooth, noise_smooth = sigma_scale(
                cube_smooth,
                scaleX=scaleX,
                scaleY=scaleY,
                scaleZ=scaleZ,
                edgeX=edgeX,
                edgeY=edgeY,
                edgeZ=edgeZ,
                statistic=rmsMode,
                fluxRange=fluxRange,
                method=method,
                windowSpatial=windowSpatial,
                windowSpectral=windowSpectral,
                gridSpatial=gridSpatial,
                gridSpectral=gridSpectral,
                interpolation=interpolation)

        # Calculate the RMS of the smoothed (possibly normalised) cube
        rms_smooth = GetRMS(cube_smooth,
                            rmsMode=rmsMode,
                            fluxRange=fluxRange,
                            zoomx=1,
                            zoomy=1,
                            zoomz=1,
                            verbose=verbose,
                            sample=sampleRms)

        # Add pixels above threshold to mask by setting bit 1
        err.message("    Applying +/- {0:} sigma detection threshold".format(
            threshold))
        with np.errstate(invalid="ignore"):
            mask |= (np.absolute(cube_smooth) >= threshold * rms_smooth)
            #mask = np.bitwise_or(mask, np.greater_equal(np.absolute(cube_smooth), threshold * rms_smooth))

        # Delete smoothed cube again
        del cube_smooth
    return
예제 #25
0
def fix_gipsy_header(header_orig):
	# GIPSY keys for spectral axis
	key_opt = ["FREQ-OHEL","FREQ-OLSR"]
	key_rad = ["FREQ-RHEL","FREQ-RLSR"]
	header = header_orig.copy()
	naxis = header["NAXIS"]
	
	for i in range(1, naxis + 1):
		ctype = header["CTYPE%d" % i]
		if ctype in key_opt + key_rad:
			axis = i
			# Read reference velocity - from VELR or DRVAL
			try:
				if "VELR" in header:
					vel = header["VELR"]
				elif "DRVAL%d" % axis in header:
					vel = header["VELR"]
					unit = header["DUNIT%d" % axis]
					if unit.lower() == "km/s":
						vel = vel * 1000.0
					elif unit.lower() != "m/s":
						break
			except:
				err.warning("Problem with reference velocity.")
				break
			
			# Convert reference frequency to Hz
			try:
				freq  = header["CRVAL%d" % axis]
				dfreq = header["CDELT%d" % axis]
				unit  = header["CUNIT%d" % axis]
				freqUnits = ["hz", "khz", "mhz", "ghz"]
				j = freqUnits.index(unit.lower())
				freq  *= 10**j
				dfreq *= 10**j
			except:
				err.warning("Problem with reference frequency.")
				break
			
			# Need rest frequency for conversion
			try:
				freq0Names = ["FREQ0", "FREQR", "RESTFRQ"]
				for key in freq0Names:
					try:
						freq0 = header[key]
						#foundFreq0 = 1
					except:
						pass
				header["RESTFRQ"] = freq0
				#foundFreq0
			except:
				err.warning("Rest frequency not found.")
				break
			
			# Calculate reference frequency in the barycentric system
			if ctype in key_opt:
				freqB = freq0 / (1.0 + vel / scipy.constants.c)
			else:
				freqB = freq0 / (1.0 - vel / scipy.constants.c)
			
			# Calculate topocentric velocity
			velT = scipy.constants.c * ((freqB**2 - freq**2) / (freqB**2 + freq**2))
			dfreqB = dfreq * math.sqrt((scipy.constants.c - velT) / (scipy.constants.c + velT))
			header["CTYPE%d" % axis] = "FREQ"
			header["CUNIT%d" % axis] = "Hz"
			header["CRVAL%d" % axis] = freqB
			header["CDELT%d" % axis] = dfreqB
			## GIPSY headers seem to contain the unit "DEGREE" for RA/Dec
			## WCS lib does not like that
			for key in header:
				if "CUNIT" in key and header[key] == "DEGREE":
					header[key] = "deg"
			err.message("Header repaired successfully.")
			
			return header
예제 #26
0
파일: addrel.py 프로젝트: SoFiA-Admin/SoFiA
def EstimateRel(data, pdfoutname, parNames, parSpace=["snr_sum", "snr_max", "n_pix"], logPars=[1, 1, 1], autoKernel=True, scaleKernel=1, negPerBin=1, skellamTol=-0.5, kernel=[0.15, 0.05, 0.1], usecov=False, doscatter=1, docontour=1, doskellam=1, dostats=0, saverel=1, threshold=0.99, fMin=0, verb=0, makePlot=False):

	# Always work on logarithmic parameter values; the reliability.logPars parameter should be removed
	if 0 in logPars: err.warning("  Setting all reliability.logPars entries to 1. This parameter is no longer editable by users.")
	logPars=[1 for pp in parSpace]
	
	# Import Matplotlib if diagnostic plots requested
	if makePlot:
		import matplotlib
		# The following line is necessary to run SoFiA remotely
		matplotlib.use("Agg")
		import matplotlib.pyplot as plt
	
	# --------------------------------
	# Build array of source parameters
	# --------------------------------
	
	idCOL   = parNames.index("id")
	ftotCOL = parNames.index("snr_sum")
	fmaxCOL = parNames.index("snr_max")
	fminCOL = parNames.index("snr_min")
	
	# Get columns of requested parameters
	parCol = []
	for ii in range(len(parSpace)): parCol.append(parNames.index(parSpace[ii]))
	
	# Get position and number of positive and negative sources
	pos  = data[:, ftotCOL] >  0
	neg  = data[:, ftotCOL] <= 0
	Npos = pos.sum()
	Nneg = neg.sum()
	
	err.ensure(Npos, "No positive sources found; cannot proceed.")
	err.ensure(Nneg, "No negative sources found; cannot proceed.")
	
	# Get array of relevant source parameters (and take log of them if requested)
	ids = data[:,idCOL]
	pars = np.empty((data.shape[0], 0))
	
	for ii in range(len(parSpace)):
		if parSpace[ii] == "snr_max":
			parsTmp = data[:,fmaxCOL] * pos - data[:,fminCOL] * neg
			if logPars[ii]: parsTmp = np.log10(parsTmp)
			pars = np.concatenate((pars, parsTmp.reshape(-1, 1)), axis=1)
		elif parSpace[ii] == "snr_sum" or parSpace[ii] == "snr_mean":
			parsTmp = abs(data[:,parCol[ii]].reshape(-1, 1))
			if logPars[ii]: parsTmp = np.log10(parsTmp)
			pars = np.concatenate((pars, parsTmp), axis=1)
		else:
			parsTmp = data[:,parCol[ii]].reshape(-1, 1)
			if logPars[ii]: parsTmp = np.log10(parsTmp)
			pars = np.concatenate((pars, parsTmp), axis=1)
	
	err.message("  Working in parameter space {0:}".format(str(parSpace)))
	err.message("  Will convolve the distribution of positive and negative sources in this space to derive the P and N density fields")
	pars = np.transpose(pars)
	
	
	# ----------------------------------------------------------
	# Set parameters to work with and gridding/plotting for each
	# ----------------------------------------------------------
	
	# Axis labels when plotting
	labs = []
	for ii in range(len(parSpace)):
		labs.append("")
		if logPars[ii]: labs[ii] += "log "
		labs[ii] += parSpace[ii]
	
	# Axis limits when plotting
	pmin, pmax = pars.min(axis=1), pars.max(axis=1)
	pmin, pmax = pmin - 0.1 * (pmax - pmin), pmax + 0.1 * (pmax - pmin)
	lims = [[pmin[i], pmax[i]] for i in range(len(parSpace))]
	
	# Grid on which to evaluate Np and Nn in order to plot contours
	grid = [[pmin[i], pmax[i], 0.02 * (pmax[i] - pmin[i])] for i in range(len(parSpace))]
	
	# Calculate the number of rows and columns in figure
	projections = [subset for subset in combinations(range(len(parSpace)), 2)]
	nr = int(np.floor(np.sqrt(len(projections))))
	nc = int(np.ceil(float(len(projections)) / nr))
	
	
	# ---------------------------------------
	# Set smoothing kernel in parameter space
	# ---------------------------------------
	
	# If autoKernel is True, then the initial kernel is taken as a scaled version of the covariance matrix
	# of the negative sources. The kernel size along each axis is such that the number of sources per kernel
	# width (sigma**2) is equal to "negPerBin". Optionally, the user can decide to use only the diagonal
	# terms of the covariance matrix. The kernel is then grown until convergence is reached on the Skellam
	# plot. If autoKernel is False, then use the kernel given by "kernel" parameter (argument of EstimateRel);
	# this is sigma, and is squared to be consistent with the auto kernel above.
	
	if autoKernel:
		# Set the kernel shape to that of the variance or covariance matrix
		kernel = np.cov(pars[:, neg])
		kernelType = "covariance"
		# Check if kernel matrix can be inverted
		try:
			np.linalg.inv(kernel)
		except:
			err.error(
				"The reliability cannot be calculated because the smoothing kernel\n"
				"derived from " + str(pars[:,neg].shape[1]) + " negative sources cannot be inverted.\n"
				"This is likely due to an insufficient number of negative sources.\n"
				"Try to increase the number of negative sources by changing the\n"
				"source finding and/or filtering settings.", fatal=True, frame=True)
		
		if np.isnan(kernel).sum():
			err.error(
				"The reliability cannot be calculated because the smoothing kernel\n"
				"derived from " + str(pars[:,neg].shape[1]) + " negative sources contains NaNs.\n"
				"A good kernel is required to calculate the density field of positive\n"
				"and negative sources in parameter space.\n"
				"Try to increase the number of negative sources by changing the\n"
				"source finding and/or filtering settings.", fatal=True, frame=True)
		
		if not usecov:
			kernel = np.diag(np.diag(kernel))
			kernelType = "variance"
		
		kernelIter = 0.0
		deltplot = []
		
		# Scale the kernel size as requested by the user (scaleKernel>0) or use the autoscale algorithm (scaleKernel=0)
		if scaleKernel>0:
			# Scale kernel size as requested by the user
			# Note that the scale factor is squared because users are asked to give a factor to apply to sqrt(kernel)
			kernel *= scaleKernel**2
			err.message("  Using the {0:s} matrix scaled by a factor {1:.2f} as convolution kernel".format(kernelType, scaleKernel))
			err.message("  The sqrt(kernel) size is:")
			err.message(" " + str(np.sqrt(np.abs(kernel))))
		elif scaleKernel==0:
			# Scale kernel size to get started the kernel-growing loop
			# The scale factor for sqrt(kernel) is elevated to the power of 1.0 / len(parCol)
			err.message("  Will search for the best convolution kernel by scaling the {0:s} matrix".format(kernelType))
			err.message("  The {0:s} matrix has sqrt:".format(kernelType))
			err.message(" " + str(np.sqrt(np.abs(kernel))))
			# negPerBin must be >=1
			err.ensure(negPerBin>=1,"The parameter reliability.negPerBin used to start the convolution kernel search was set to {0:.1f} but must be >= 1. Please change your settings.".format(negPerBin))
			kernel *= ((negPerBin + kernelIter) / Nneg)**(2.0 / len(parCol))
			err.message("  Search starting from the kernel with sqrt:")
			err.message(" " + str(np.sqrt(np.abs(kernel))))
			err.message("  Iteratively growing kernel until the distribution of (P-N)/sqrt(P+N) reaches median/width = {0:.2f} ...".format(skellamTol))
			err.ensure(skellamTol<=0,"The parameter reliability.skellamTol was set to {0:.2f} but must be <= 0. Please change your settings.".format(skellamTol))
		else:
			err.ensure(scaleKernel>=0,\
				"The reliability.scaleKernel parameter cannot be negative.\n"\
				"It should be = 0 if you want SoFiA to find the optimal kernel scaling\n"\
				"or > 0 if you want to set the scaling yourself.\n"\
				"Please change your settings.")
		
		#deltOLD=-1e+9 # Used to stop kernel growth if P-N stops moving closer to zero [NOT USED CURRENTLY]
		if doskellam and makePlot: fig0 = plt.figure()
	else:
		# Note that the user must give sigma, which then gets squared
		err.message("  Using user-defined variance kernel with sqrt(kernel) size: {0}".format(kernel))
		err.ensure(len(parSpace)==len(kernel),"The number of entries in the kernel above does not match the number of parameters you requested for the reliability calculation.")
		kernel = np.identity(len(kernel)) * np.array(kernel)**2
	
	# Set grow_kernel to 1 to start the kernel growing loop below.
	grow_kernel = 1
	
	# This loop will estimate the reliability, check whether the kernel is large enough,
	# and if not pick a larger kernel. If autoKernel = 0 or scaleKernel = 0, we will do
	# just one pass (i.e., we will not grow the kernel).
	while grow_kernel:
		# ------------------------
		# Evaluate N-d reliability
		# ------------------------
		
		if verb: err.message("   estimate normalised positive and negative density fields ...")
		
		Np = gaussian_kde_set_covariance(pars[:,pos], kernel)
		Nn = gaussian_kde_set_covariance(pars[:,neg], kernel)
		
		# Calculate the number of positive and negative sources at the location of positive sources
		Nps = Np(pars[:,pos]) * Npos
		Nns = Nn(pars[:,pos]) * Nneg
		
		# Calculate the number of positive and negative sources at the location of negative sources
		nNps = Np(pars[:,neg]) * Npos
		nNns = Nn(pars[:,neg]) * Nneg
		
		# Calculate the reliability at the location of positive sources
		Rs = (Nps - Nns) / Nps
		
		# The reliability must be <= 1. If not, something is wrong.
		err.ensure(Rs.max() <= 1, "Maximum reliability greater than 1; something is wrong.\nPlease ensure that enough negative sources are detected\nand decrease your source finding threshold if necessary.", frame=True)
		
		# Find pseudo-reliable sources (taking maximum(Rs, 0) in order to include objects with Rs < 0
		# if threshold == 0; Rs may be < 0 because of insufficient statistics)
		# These are called pseudo-reliable because some objects may be discarded later based on additional criteria below
		pseudoreliable = np.maximum(Rs, 0) >= threshold

		# Find reliable sources (taking maximum(Rs, 0) in order to include objects with Rs < 0 if
		# threshold == 0; Rs may be < 0 because of insufficient statistics)
		#reliable=(np.maximum(Rs, 0)>=threshold) * (data[pos, ftotCOL].reshape(-1,) > fMin) * (data[pos, fmaxCOL].reshape(-1,) > 4)
		reliable = (np.maximum(Rs, 0) >= threshold) * ((data[pos, ftotCOL] / np.sqrt(data[pos, parNames.index("n_pix")])).reshape(-1,) > fMin)
		
		if autoKernel:
			# Calculate quantities needed for comparison to Skellam distribution
			delt = (nNps - nNns) / np.sqrt(nNps + nNns)
			deltstd = delt.std()
			deltmed = np.median(delt)
			deltmin = delt.min()
			deltmax = delt.max()
			
			if deltmed / deltstd > -100 and doskellam and makePlot:
				plt.hist(delt / deltstd, bins=np.arange(deltmin / deltstd, max(5.1, deltmax / deltstd), 0.01), cumulative=True, histtype="step", color=(min(1, float(max(1.,negPerBin) + kernelIter) / Nneg), 0,0), normed=True)
				deltplot.append([((max(1.,negPerBin) + kernelIter) / Nneg)**(1.0 / len(parCol)), deltmed / deltstd])
						
			if scaleKernel: grow_kernel = 0
			else:
				err.message("  iteration, median, width, median/width = %3i, %9.2e, %9.2e, %9.2e" % (kernelIter, deltmed, deltstd, deltmed / deltstd))

				if deltmed / deltstd > skellamTol or negPerBin + kernelIter >= Nneg:
					grow_kernel = 0
					err.message("  Found good kernel after %i kernel growth iterations. The sqrt(kernel) size is:" % kernelIter)
					err.message(np.sqrt(np.abs(kernel)))
				elif deltmed / deltstd < 5 * skellamTol:
					kernel *= (float(negPerBin + kernelIter + 20) / (negPerBin + kernelIter))**(2.0 / len(parCol)) 
					kernelIter += 20
				elif deltmed / deltstd < 2 * skellamTol:
					kernel *= (float(negPerBin + kernelIter + 10) / (negPerBin + kernelIter))**(2.0 / len(parCol))
					kernelIter += 10
				elif deltmed / deltstd < 1.5 * skellamTol:
					kernel *= (float(negPerBin + kernelIter + 3) / (negPerBin + kernelIter))**(2.0 / len(parCol))
					kernelIter += 3
				else:
					kernel *= (float(negPerBin + kernelIter + 1) / (negPerBin + kernelIter))**(2.0 / len(parCol))
					kernelIter += 1
		else:
			grow_kernel = 0
	
	
	# ------------
	# Skellam plot
	# ------------
	
	if autoKernel and deltmed / deltstd > -100 and doskellam and makePlot:
		plt.plot(np.arange(-10, 10, 0.01), stats.norm().cdf(np.arange(-10, 10, 0.01)), "k-")
		plt.plot(np.arange(-10, 10, 0.01), stats.norm(scale=0.4).cdf(np.arange(-10, 10, 0.01)), "k:")
		plt.legend(("Gaussian (sigma=1)", "Gaussian (sigma=0.4)"), loc="lower right", prop={"size":13})
		plt.hist(delt / deltstd, bins=np.arange(deltmin / deltstd, max(5.1, deltmax / deltstd), 0.01), cumulative=True, histtype="step", color="r", normed=True)
		plt.xlim(-5, 5)
		plt.ylim(0, 1)
		plt.xlabel("(P-N)/sqrt(N+P)")
		plt.ylabel("cumulative distribution")
		plt.plot([0, 0], [0, 1], "k--")
		fig0.savefig("%s_rel_skellam.pdf" % pdfoutname, rasterized=True)
		
		if not scaleKernel:
			fig3 = plt.figure()
			deltplot = np.array(deltplot)
			plt.plot(deltplot[:,0], deltplot[:,1], "ko-")
			plt.xlabel("kernel size (1D-sigma, aribtrary units)")
			plt.ylabel("median/std of (P-N)/sqrt(P+N)")
			plt.axhline(y=skellamTol, linestyle="--", color="r")
			fig3.savefig("%s_rel_skellam-delta.pdf" % pdfoutname, rasterized=True)
	
	
	# -----------------------
	# Scatter plot of sources
	# -----------------------
	
	specialids = []
	
	if doscatter and makePlot:
		if verb: err.message("  plotting sources ...")
		fig1 = plt.figure(figsize=(18, 4.5 * nr))
		plt.subplots_adjust(left=0.06, bottom=0.15/nr, right = 0.97, top=1-0.08/nr, wspace=0.35, hspace=0.25)
		
		n_p = 0
		for jj in projections:
			if verb: err.message("    projection %i/%i" % (projections.index(jj) + 1, len(projections)))
			n_p, p1, p2 = n_p + 1, jj[0], jj[1]
			plt.subplot(nr, nc, n_p)
			plt.scatter(pars[p1,pos], pars[p2,pos], marker="o", c="b", s=10, edgecolor="face", alpha=0.5)
			plt.scatter(pars[p1,neg], pars[p2,neg], marker="o", c="r", s=10, edgecolor="face", alpha=0.5)
			for si in specialids: plt.plot(pars[p1, ids==si], pars[p2, ids==si], "kd", zorder=10000, ms=7, mfc="none", mew=2)
			# Plot Integrated SNR threshold
			if fMin>0 and (parSpace[jj[0]],parSpace[jj[1]])==("snr_sum","snr_mean"):
				xArray=np.arange(lims[p1][0],lims[p1][1]+(lims[p1][1]-lims[p1][0])/100,(lims[p1][1]-lims[p1][0])/100)
				plt.plot(xArray,np.log10(fMin)*2-xArray,'k:')
			elif fMin>0 and (parSpace[jj[0]],parSpace[jj[1]])==("snr_mean","snr_sum"):
				yArray=np.arange(lims[p2][0],lims[p2][1]+(lims[p2][1]-lims[p2][0])/100,(lims[p2][1]-lims[p2][0])/100)
				plt.plot(np.log10(fMin)*2-yArray,yArray,'k:')
			plt.xlim(lims[p1][0], lims[p1][1])
			plt.ylim(lims[p2][0], lims[p2][1])
			plt.xlabel(labs[p1])
			plt.ylabel(labs[p2])
			plt.grid(color='k',linestyle='-',linewidth=0.2)
		fig1.savefig("%s_rel_scatter.pdf" % pdfoutname, rasterized=True)
	
	
	# -------------
	# Plot contours
	# -------------
	
	if docontour and makePlot:
		levs = 10**np.arange(-1.5, 2, 0.5)
		
		if verb: err.message("  plotting contours ...")
		fig2 = plt.figure(figsize=(18, 4.5 * nr))
		plt.subplots_adjust(left=0.06, bottom=0.15/nr, right=0.97, top=1-0.08/nr, wspace=0.35, hspace=0.25)
		n_p = 0
		for jj in projections:
			if verb: err.message("    projection %i/%i" % (projections.index(jj) + 1, len(projections)))
			n_p, p1, p2 = n_p + 1, jj[0], jj[1]
			g1, g2 = grid[p1], grid[p2]
			x1 = np.arange(g1[0], g1[1], g1[2])
			x2 = np.arange(g2[0], g2[1], g2[2])
			pshape = (x2.shape[0], x1.shape[0])
			
			# Get array of source parameters on current projection
			parsp = np.concatenate((pars[p1:p1+1], pars[p2:p2+1]), axis=0)
			
			# Derive Np and Nn density fields on the current projection
			setcov = kernel[p1:p2+1:p2-p1,p1:p2+1:p2-p1]
			try:
				Np = gaussian_kde_set_covariance(parsp[:,pos], setcov)
				Nn = gaussian_kde_set_covariance(parsp[:,neg], setcov)
			except:
				err.error(
					"Reliability  determination  failed  because of issues  with the\n"
					"smoothing kernel.  This is likely due to an insufficient number\n"
					"of negative detections. Please review your filtering and source\n"
					"finding settings to ensure that a sufficient number of negative\n"
					"detections is found.", fatal=True, frame=True)
			
			# Evaluate density fields on grid on current projection
			g = np.transpose(np.transpose(np.mgrid[slice(g1[0], g1[1], g1[2]), slice(g2[0], g2[1], g2[2])]).reshape(-1, 2))
			Np = Np(g)
			Nn = Nn(g)
			Np = Np / Np.sum() * Npos
			Nn = Nn / Nn.sum() * Nneg
			Np.resize(pshape)
			Nn.resize(pshape)
			plt.subplot(nr, nc, n_p)
			plt.contour(x1, x2, Np, origin="lower", colors="b", levels=levs, zorder=2)
			plt.contour(x1, x2, Nn, origin="lower", colors="r", levels=levs, zorder=1)
			
			# Plot Integrated SNR threshold
			if fMin>0 and (parSpace[jj[0]],parSpace[jj[1]])==("snr_sum","snr_mean"):
				xArray=np.arange(lims[p1][0],lims[p1][1]+(lims[p1][1]-lims[p1][0])/100,(lims[p1][1]-lims[p1][0])/100)
				plt.plot(xArray,np.log10(fMin)*2-xArray,'k:')
			elif fMin>0 and (parSpace[jj[0]],parSpace[jj[1]])==("snr_mean","snr_sum"):
				yArray=np.arange(lims[p2][0],lims[p2][1]+(lims[p2][1]-lims[p2][0])/100,(lims[p2][1]-lims[p2][0])/100)
				plt.plot(np.log10(fMin)*2-yArray,yArray,'k:')
			
			if reliable.sum(): plt.scatter(pars[p1,pos][reliable], pars[p2,pos][reliable], marker="o", s=10, edgecolor="k", facecolor="k", zorder=4)
			if (pseudoreliable * (reliable == False)).sum(): plt.scatter(pars[p1,pos][pseudoreliable * (reliable == False)], pars[p2,pos][pseudoreliable * (reliable == False)], marker="x", s=40, edgecolor="0.5", facecolor="0.5", zorder=3)
			for si in specialids: plt.plot(pars[p1,ids==si], pars[p2,ids==si], "kd", zorder=10000, ms=7, mfc="none", mew=2)
			plt.xlim(lims[p1][0], lims[p1][1])
			plt.ylim(lims[p2][0], lims[p2][1])
			plt.xlabel(labs[p1])
			plt.ylabel(labs[p2])
			plt.grid(color='k',linestyle='-',linewidth=0.2)
		fig2.savefig("%s_rel_contour.pdf" % pdfoutname, rasterized=True)
	
	
	# -------------------------
	# Add Np, Nn and R to table
	# -------------------------
	
	# This allows me not to calculate R every time I want to do some plot analysis,
	# but just read it from the file
	if saverel:
		if not (docontour or dostats):
			Nps = Np(pars[:,pos]) * Npos
			Nns = Nn(pars[:,pos]) * Nneg
		Np = np.zeros((data.shape[0],))
		Np[pos] = Nps
		Nn = np.zeros((data.shape[0],))
		Nn[pos] = Nns
		R = -np.ones((data.shape[0],)) # R will be -1 for negative sources
		# Set R to zero for positive sources if R < 0 because of Nn > Np
		R[pos] = np.maximum(0, (Np[pos] - Nn[pos]) / Np[pos])
		data = np.concatenate((data, Np.reshape(-1, 1), Nn.reshape(-1, 1), R.reshape(-1, 1)), axis=1)
	
	data = [list(jj) for jj in list(data)]
	return data, ids[pos][reliable].astype(int)