Пример #1
0
def writeMask(cube, header, dictionary, filename, compress, flagOverwrite):
    header.add_history("SoFiA source finding")
    optionsList = []
    optionsDepth = []
    dictionary = removeOptions(dictionary)
    recursion(dictionary, optionsList, optionsDepth)
    headerList = []

    for i in range(0, len(optionsList)):
        if len(optionsList[i].split("=")) > 1:
            tmpString = optionsList[i]
            depthNumber = optionsDepth[i]
            j = i - 1

            while depthNumber > 0:
                if optionsDepth[i] > optionsDepth[j]:
                    tmpString = optionsList[j] + "." + tmpString
                    depthNumber = optionsDepth[j]
                j -= 1

            headerList.append(tmpString)

    for option in headerList:
        header.add_history(option)
    if cube.max() < 32767: cube = cube.astype("int16")

    # add axes required to make the shape of the mask cube equal to the shape of the input datacube
    while header["naxis"] > len(cube.shape):
        cube.resize(tuple([
            1,
        ] + list(cube.shape)))
    # write the mask as 2D fits if the input does not contain information for a third axis
    if 'CTYPE3' not in header:
        hdu = fits.PrimaryHDU(data=cube[0], header=header)
    else:
        hdu = fits.PrimaryHDU(data=cube, header=header)
    hdu = fits.PrimaryHDU(data=cube, header=header)
    hdu.header["BUNIT"] = "source_ID"
    hdu.header["DATAMIN"] = cube.min()
    hdu.header["DATAMAX"] = cube.max()
    hdu.header["ORIGIN"] = sofia_version_full

    name = filename
    if compress: name += ".gz"

    # Check for overwrite flag:
    if func.check_overwrite(name, flagOverwrite):
        hdu.writeto(name, output_verify="warn", **__astropy_arg_overwrite__)

    return
Пример #2
0
def writeMask(cube, header, dictionary, filename, compress, flagOverwrite):
	header.add_history("SoFiA source finding")
	optionsList = []
	optionsDepth = []
	dictionary = removeOptions(dictionary)
	recursion(dictionary,optionsList,optionsDepth)
	headerList = []
	
	for i in range(0, len(optionsList)):
		if len(optionsList[i].split("=")) > 1:
			tmpString = optionsList[i]
			depthNumber = optionsDepth[i]
			j = i - 1
			
			while depthNumber > 0:
				if optionsDepth[i] > optionsDepth[j]:
					tmpString = optionsList[j] + "." + tmpString
					depthNumber = optionsDepth[j]
				j -= 1
			
			headerList.append(tmpString)
	
	for option in headerList:
		header.add_history(option)
	if cube.max() < 32767: cube=cube.astype("int16")
	
	# add axes required to make the shape of the mask cube equal to the shape of the input datacube
	while header["naxis"] > len(cube.shape): cube.resize(tuple([1,] + list(cube.shape)))
	# write the mask as 2D fits if the input does not contain information for a third axis
	if 'CTYPE3' not in header:
		hdu = fits.PrimaryHDU(data=cube[0], header=header)
	else:
		hdu = fits.PrimaryHDU(data=cube, header=header)
	hdu = fits.PrimaryHDU(data=cube, header=header)
	hdu.header["BUNIT"] = "source_ID"
	hdu.header["DATAMIN"] = cube.min()
	hdu.header["DATAMAX"] = cube.max()
	hdu.header["ORIGIN"] = sofia_version_full
	
	name = filename
	if compress: name += ".gz"
	
	# Check for overwrite flag:
	if func.check_overwrite(name, flagOverwrite):
		hdu.writeto(name, output_verify="warn", **__astropy_arg_overwrite__)
	
	return
Пример #3
0
def write_catalog_from_array(mode, objects, catHeader, catUnits, catFormat,
                             parList, outName, flagCompress, flagOverwrite,
                             flagUncertainties):
    # Check output format and compression
    availableModes = ["ASCII", "XML", "SQL"]
    if mode not in availableModes:
        err.warning("Unknown catalogue format: " + str(mode) +
                    ". Defaulting to ASCII.")
        mode = "ASCII"
    modeIndex = availableModes.index(mode)

    if flagCompress: outName += ".gz"
    err.message("Writing " + availableModes[modeIndex] + " catalogue: " +
                outName + ".")

    # Exit if file exists and overwrite flag is set to false
    func.check_overwrite(outName, flagOverwrite, fatal=True)

    # Do we need to write all parameters?
    if parList == ["*"] or not parList: parList = list(catHeader)

    # Remove undefined parameters
    parList = [item for item in parList if item in catHeader]

    # Remove statistical uncertainties if not requested
    if not flagUncertainties:
        for item in ["err_x", "err_y", "err_z", "err_w20", "err_w50"]:
            while item in parList:
                parList.remove(item)

    # Check whether there is anything left
    if not len(parList):
        err.error(
            "No valid output parameters selected. No output catalogue written.",
            fatal=False)
        return

    # Create and write catalogue in requested format
    # -------------------------------------------------------------------------
    if mode == "XML":
        # Define basic XML header information
        votable = Element("VOTABLE")
        resource = SubElement(votable,
                              "RESOURCE",
                              name="SoFiA catalogue (version %s)" %
                              sofia_version)
        description = SubElement(resource, "DESCRIPTION")
        description.text = "Source catalogue from the Source Finding Application (SoFiA) version %s" % sofia_version
        coosys = SubElement(resource, "COOSYS", ID="J2000")
        table = SubElement(resource, "TABLE", ID="sofia_cat", name="sofia_cat")

        # Load list of parameters and unified content descriptors (UCDs)
        ucdList = {}
        fileUcdPath = os.environ["SOFIA_PIPELINE_PATH"]
        fileUcdPath = fileUcdPath.replace("sofia_pipeline.py",
                                          "SoFiA_source_parameters.dat")

        try:
            with open(fileUcdPath) as fileUcd:
                for line in fileUcd:
                    (key, value) = line.split()
                    ucdList[key] = value
        except:
            err.warning("Failed to read UCD file.")

        # Create parameter fields
        for par in parList:
            ucdEntity = ucdList[par] if par in ucdList else ""
            index = list(catHeader).index(par)
            if catFormat[index] == "%30s":
                field = SubElement(table,
                                   "FIELD",
                                   name=par,
                                   ucd=ucdEntity,
                                   datatype="char",
                                   arraysize="30",
                                   unit=catUnits[index])
            else:
                field = SubElement(table,
                                   "FIELD",
                                   name=par,
                                   ucd=ucdEntity,
                                   datatype="float",
                                   unit=catUnits[index])

        # Create data table entries
        data = SubElement(table, "DATA")
        tabledata = SubElement(data, "TABLEDATA")

        for obj in objects:
            tr = SubElement(tabledata, "TR")
            for par in parList:
                td = SubElement(tr, "TD")
                index = list(catHeader).index(par)
                td.text = (catFormat[index] % obj[index]).strip()

        # Write XML catalogue:
        try:
            f1 = gzopen(outName, "wb") if flagCompress else open(outName, "w")
        except:
            err.error("Failed to write to XML catalogue: " + outName + ".",
                      fatal=False)
            return
        f1.write(prettify(votable))
        #f1.write(tostring(votable, "utf-8")) // without prettifying, which is faster and uses much less memory
        f1.close

    # -----------------------------------------------------------------End-XML-

    elif mode == "SQL":
        # Record if there is an ID column in the catalogue
        # (if no ID is present, we will later create one for use as primary key)
        noID = "id" not in parList

        # Write some header information:
        content = "-- SoFiA catalogue (version %s)\n\nSET SQL_MODE = \"NO_AUTO_VALUE_ON_ZERO\";\n\n" % sofia_version

        # Construct and write table structure:
        flagProgress = False
        content += "CREATE TABLE IF NOT EXISTS `SoFiA-Catalogue` (\n"
        if noID: content += "  `id` INT NOT NULL,\n"
        for par in parList:
            index = list(catHeader).index(par)
            if flagProgress: content += ",\n"
            content += "  " + sqlHeaderItem(par) + sqlFormat(catFormat[index])
            flagProgress = True
        content += ",\n  PRIMARY KEY (`id`),\n  KEY (`id`)\n) DEFAULT CHARSET=utf8 COMMENT=\'SoFiA source catalogue\';\n\n"

        # Insert data:
        flagProgress = False
        content += "INSERT INTO `SoFiA-Catalogue` ("
        if noID: content += "`id`, "
        for par in parList:
            if flagProgress: content += ", "
            content += sqlHeaderItem(par)
            flagProgress = True
        content += ") VALUES\n"

        source_count = 0
        for obj in objects:
            flagProgress = False
            source_count += 1
            content += "("
            if noID: content += str(source_count) + ", "

            for par in parList:
                index = list(catHeader).index(par)
                if flagProgress: content += ", "
                content += sqlDataItem(obj[index], catFormat[index])
                flagProgress = True

            if (source_count < len(objects)): content += "),\n"
            else: content += ");\n"

        # Write catalogue
        try:
            fp = gzopen(outName, "wb") if flagCompress else open(outName, "w")
        except:
            err.error("Failed to write to SQL catalogue: " + outName + ".",
                      fatal=False)
            return
        fp.write(content)
        fp.close()

    # -----------------------------------------------------------------End-SQL-

    else:  # mode == "ASCII" by default
        # Determine header sizes based on variable-length formatting
        lenCathead = []
        for j in catFormat:
            lenCathead.append(
                int(
                    j.split("%")[1].split("e")[0].split("f")[0].split("i")
                    [0].split("d")[0].split(".")[0].split("s")[0]) + 1)

        # Create header
        headerName = ""
        headerUnit = ""
        headerCol = ""
        outFormat = ""
        colCount = 0
        header = "SoFiA catalogue (version %s)\n" % sofia_version

        for par in parList:
            index = list(catHeader).index(par)
            headerName += catHeader[index].rjust(lenCathead[index])
            headerUnit += catUnits[index].rjust(lenCathead[index])
            headerCol += ("(%i)" % (colCount + 1)).rjust(lenCathead[index])
            outFormat += catFormat[index] + " "
            colCount += 1
        header += headerName[3:] + '\n' + headerUnit[3:] + '\n' + headerCol[3:]

        # Create catalogue
        outObjects = []
        for obj in objects:
            outObjects.append([])
            for par in parList:
                outObjects[-1].append(obj[list(catHeader).index(par)])

        # Write ASCII catalogue
        try:
            np.savetxt(outName,
                       np.array(outObjects, dtype=object),
                       fmt=outFormat,
                       header=header)

        except:
            err.error("Failed to write to ASCII catalogue: " + outName + ".",
                      fatal=False)
            return

    # ---------------------------------------------------------------End-ASCII-

    return
Пример #4
0
def writeSubcube(cube, header, mask, objects, cathead, outroot, outputDir,
                 compress, flagOverwrite):
    # Strip path variable to get the file name and the directory separately
    splitroot = outroot.split("/")
    cubename = splitroot[-1]
    #if len(splitroot) > 1:
    #	outputDir = "/".join(splitroot[:-1]) + "/objects/"
    #else:
    #	outputDir = "./objects/"

    # Check if output directory exists and create it if not
    if not os.path.exists(outputDir):
        os.system("mkdir " + outputDir)

    # Copy of header for manipulation
    headerCubelets = header.copy()

    # Read all important information (central pixels & values, increments) from the header
    #dX    = headerCubelets["CDELT1"]
    #dY    = headerCubelets["CDELT2"]
    dZ = headerCubelets["CDELT3"]
    #cValX = headerCubelets["CRVAL1"]
    #cValY = headerCubelets["CRVAL2"]
    cValZ = headerCubelets["CRVAL3"]
    cPixX = headerCubelets["CRPIX1"] - 1
    cPixY = headerCubelets["CRPIX2"] - 1
    cPixZ = headerCubelets["CRPIX3"] - 1
    cubeDim = cube.shape

    for obj in objects:
        # Centres and bounding boxes
        Xc = obj[cathead == "x"][0]
        Yc = obj[cathead == "y"][0]
        Zc = obj[cathead == "z"][0]
        Xmin = obj[cathead == "x_min"][0]
        Ymin = obj[cathead == "y_min"][0]
        Zmin = obj[cathead == "z_min"][0]
        Xmax = obj[cathead == "x_max"][0]
        Ymax = obj[cathead == "y_max"][0]
        Zmax = obj[cathead == "z_max"][0]

        # If centre of mass estimation is wrong replace by geometric centre
        if Xc < 0 or Xc > cubeDim[2] - 1: Xc = obj[cathead == "x_geo"][0]
        if Yc < 0 or Yc > cubeDim[1] - 1: Yc = obj[cathead == "y_geo"][0]
        if Zc < 0 or Zc > cubeDim[0] - 1: Zc = obj[cathead == "z_geo"][0]

        cPixXNew = int(Xc)
        cPixYNew = int(Yc)
        cPixZNew = int(Zc)

        # Largest distance of source limits from the centre
        maxX = 2 * max(abs(cPixXNew - Xmin), abs(cPixXNew - Xmax))
        maxY = 2 * max(abs(cPixYNew - Ymin), abs(cPixYNew - Ymax))
        maxZ = 2 * max(abs(cPixZNew - Zmin), abs(cPixZNew - Zmax))

        # Calculate the new bounding box for the mass centred cube
        XminNew = cPixXNew - maxX
        if XminNew < 0: XminNew = 0
        YminNew = cPixYNew - maxY
        if YminNew < 0: YminNew = 0
        ZminNew = cPixZNew - maxZ
        if ZminNew < 0: ZminNew = 0
        XmaxNew = cPixXNew + maxX
        if XmaxNew > cubeDim[2] - 1: XmaxNew = cubeDim[2] - 1
        YmaxNew = cPixYNew + maxY
        if YmaxNew > cubeDim[1] - 1: YmaxNew = cubeDim[1] - 1
        ZmaxNew = cPixZNew + maxZ
        if ZmaxNew > cubeDim[0] - 1: ZmaxNew = cubeDim[0] - 1

        # Calculate the centre with respect to the cutout cube
        cPixXCut = cPixX - XminNew
        cPixYCut = cPixY - YminNew
        cPixZCut = cPixZ - ZminNew

        # Update header keywords:
        headerCubelets["CRPIX1"] = cPixXCut + 1
        headerCubelets["CRPIX2"] = cPixYCut + 1
        headerCubelets["CRPIX3"] = cPixZCut + 1

        # Extract the cubelet
        [ZminNew, ZmaxNew, YminNew, YmaxNew, XminNew,
         XmaxNew] = map(int,
                        [ZminNew, ZmaxNew, YminNew, YmaxNew, XminNew, XmaxNew])
        subcube = cube[ZminNew:ZmaxNew + 1, YminNew:YmaxNew + 1,
                       XminNew:XmaxNew + 1]

        # Update header keywords:
        headerCubelets["NAXIS1"] = subcube.shape[2]
        headerCubelets["NAXIS2"] = subcube.shape[1]
        headerCubelets["NAXIS3"] = subcube.shape[0]

        headerCubelets["ORIGIN"] = sofia_version_full

        # Write the cubelet
        hdu = fits.PrimaryHDU(data=subcube, header=headerCubelets)
        hdulist = fits.HDUList([hdu])
        name = outputDir + cubename + "_" + str(int(obj[0])) + ".fits"
        if compress: name += ".gz"

        # Check for overwrite flag:
        if func.check_overwrite(name, flagOverwrite):
            hdulist.writeto(name,
                            output_verify="warn",
                            **__astropy_arg_overwrite__)

        hdulist.close()

        # -------------------------
        # Position-velocity diagram
        # -------------------------

        if "kin_pa" in cathead:
            kin_pa = math.radians(float(obj[cathead == "kin_pa"][0]))
            pv_sampling = 10
            pv_r = np.arange(-max(subcube.shape[1:]),
                             max(subcube.shape[1:]) - 1 + 1.0 / pv_sampling,
                             1.0 / pv_sampling)
            pv_y = Yc - float(YminNew) + pv_r * math.cos(kin_pa)
            pv_x = Xc - float(XminNew) - pv_r * math.sin(kin_pa)
            pv_x, pv_y = pv_x[(pv_x >= 0) *
                              (pv_x <= subcube.shape[2] - 1)], pv_y[
                                  (pv_x >= 0) * (pv_x <= subcube.shape[2] - 1)]
            pv_x, pv_y = pv_x[(pv_y >= 0) *
                              (pv_y <= subcube.shape[1] - 1)], pv_y[
                                  (pv_y >= 0) * (pv_y <= subcube.shape[1] - 1)]
            pv_x.resize((1, pv_x.shape[0]))
            pv_y.resize((pv_x.shape))
            pv_coords = np.concatenate((pv_y, pv_x), axis=0)
            pv_array = []
            for jj in range(subcube.shape[0]):
                plane = map_coordinates(subcube[jj], pv_coords)
                plane = [plane[ii::pv_sampling] for ii in range(pv_sampling)]
                plane = np.array([ii[:plane[-1].shape[0]] for ii in plane])
                pv_array.append(plane.mean(axis=0))
            pv_array = np.array(pv_array)
            hdu = fits.PrimaryHDU(data=pv_array, header=headerCubelets)
            hdulist = fits.HDUList([hdu])
            hdulist[0].header["CTYPE1"] = "PV--DIST"
            hdulist[0].header["CDELT1"] = hdulist[0].header["CDELT2"]
            hdulist[0].header["CRVAL1"] = 0
            hdulist[0].header["CRPIX1"] = pv_array.shape[1] / 2
            hdulist[0].header["CTYPE2"] = hdulist[0].header["CTYPE3"]
            hdulist[0].header["CDELT2"] = hdulist[0].header["CDELT3"]
            hdulist[0].header["CRVAL2"] = hdulist[0].header["CRVAL3"]
            hdulist[0].header["CRPIX2"] = hdulist[0].header["CRPIX3"]
            hdulist[0].header["ORIGIN"] = sofia_version_full
            func.delete_3rd_axis(hdulist[0].header)
            name = outputDir + cubename + "_" + str(int(obj[0])) + "_pv.fits"
            if compress: name += ".gz"

            # Check for overwrite flag:
            if func.check_overwrite(name, flagOverwrite):
                hdulist.writeto(name,
                                output_verify="warn",
                                **__astropy_arg_overwrite__)
            hdulist.close()

        # -------------
        # Mask cubelets
        # -------------

        # Remove all other sources from the mask
        submask = mask[ZminNew:ZmaxNew + 1, YminNew:YmaxNew + 1,
                       XminNew:XmaxNew + 1].astype("int")
        submask[submask != obj[0]] = 0
        submask[submask == obj[0]] = 1

        # Write mask
        hdu = fits.PrimaryHDU(data=submask.astype("int16"),
                              header=headerCubelets)
        hdu.header["BUNIT"] = "Source-ID"
        hdu.header["DATAMIN"] = np.nanmin(submask)
        hdu.header["DATAMAX"] = np.nanmax(submask)
        hdu.header["ORIGIN"] = sofia_version_full
        hdulist = fits.HDUList([hdu])
        name = outputDir + cubename + "_" + str(int(obj[0])) + "_mask.fits"
        if compress: name += ".gz"

        # Check for overwrite flag:
        if func.check_overwrite(name, flagOverwrite):
            hdulist.writeto(name,
                            output_verify="warn",
                            **__astropy_arg_overwrite__)
        hdulist.close()

        # ------------------
        # Moments 0, 1 and 2
        # ------------------

        # Units of moment images
        # Velocity
        if func.check_header_keywords(func.KEYWORDS_VELO,
                                      headerCubelets["CTYPE3"]):
            if not "CUNIT3" in headerCubelets or headerCubelets[
                    "CUNIT3"].lower() == "m/s":
                # Converting m/s to km/s
                dkms = abs(headerCubelets["CDELT3"]) * 1e-3
                scalemom12 = 1e-3
                bunitExt = ".km/s"
            elif headerCubelets["CUNIT3"].lower() == "km/s":
                dkms = abs(headerCubelets["CDELT3"])
                scalemom12 = 1.0
                bunitExt = ".km/s"
            else:
                # Working with whatever units the cube has
                dkms = abs(headerCubelets["CDELT3"])
                scalemom12 = 1.0
                bunitExt = "." + headerCubelets["CUNIT3"]
        # Frequency
        elif func.check_header_keywords(func.KEYWORDS_FREQ,
                                        headerCubelets["CTYPE3"]):
            if not "CUNIT3" in headerCubelets or headerCubelets[
                    "CUNIT3"].lower() == "hz":
                dkms = abs(headerCubelets["CDELT3"])
                scalemom12 = 1.0
                bunitExt = ".Hz"
            elif headerCubelets["CUNIT3"].lower() == "khz":
                # Converting kHz to Hz
                dkms = abs(headerCubelets["CDELT3"]) * 1e+3
                scalemom12 = 1e+3
                bunitExt = ".Hz"
            else:
                # Working with whatever units the cube has
                dkms = abs(headerCubelets["CDELT3"])
                scalemom12 = 1.0
                bunitExt = "." + headerCubelets["CUNIT3"]
        # Other
        else:
            # Working with whatever units the cube has
            dkms = abs(headerCubelets["CDELT3"])
            scalemom12 = 1.0
            if not "CUNIT3" in headerCubelets:
                bunitExt = ".std_unit_" + headerCubelets["CTYPE3"]
            else:
                bunitExt = "." + headerCubelets["CUNIT3"]

        # Make copy of subcube and regrid if necessary
        # NOTE: Why on earth do we need to make a copy here? If we don't create a copy,
        #       then SoFiA will crash as all pixels in the moment map are NaN, but I
        #       don't understand why this would be the case in the first place.
        subcubeCopy = subcube.copy()
        if "cellscal" in headerCubelets and headerCubelets["cellscal"] == "1/F":
            subcubeCopy[
                submask ==
                0] = 0  # NOTE: These will later be set to NaN by the regridding task
            subcubeCopy = func.regridMaskedChannels(subcubeCopy, submask,
                                                    headerCubelets)
        else:
            subcubeCopy[
                submask ==
                0] = np.nan  # NOTE: Manually set to NaN to ensure correct generation of spectra below

        moments = [None, None, None, None, None, None]
        with np.errstate(invalid="ignore"):
            # Definition of moment 0
            moments[0] = np.nansum(subcubeCopy, axis=0)

            # Definition of moment 1
            velArr = ((np.arange(subcubeCopy.shape[0]).reshape(
                (subcubeCopy.shape[0], 1, 1)) + 1.0 - headerCubelets["CRPIX3"])
                      * headerCubelets["CDELT3"] +
                      headerCubelets["CRVAL3"]) * scalemom12
            moments[1] = np.divide(np.nansum(velArr * subcubeCopy, axis=0),
                                   moments[0])
            moments[1][~np.isfinite(moments[1])] = np.nan
            # NOTE: Here we make use of array broadcasting in NumPy, but we need to reshape the velocity array
            #       from [nz] to [nz, 1, 1] for this to work, so that [nz, 1, 1] * [nz, ny, nx] --> [nz, ny, nx].

            # Definition of moment 2
            velArrShift = velArr - moments[1]
            moments[2] = np.sqrt(
                np.divide(
                    np.nansum(velArrShift * velArrShift * subcubeCopy, axis=0),
                    moments[0]))
            moments[2][~np.isfinite(moments[2])] = np.nan
            # NOTE: The above works due to array broadcasting in NumPy and despite different array dimensions.
            #       [nz, 1, 1] - [ny, nx] --> [nz, ny, nx] according to NumPy's broadcasting rules.

            # Moment 1 and 2 images considering positive voxels only
            subcubeCopy[subcubeCopy < 0] = np.nan
            moments[3] = np.nansum(subcubeCopy, axis=0)
            moments[4] = np.divide(np.nansum(velArr * subcubeCopy, axis=0),
                                   moments[3])
            moments[4][~np.isfinite(moments[4])] = np.nan
            velArrShift = velArr - moments[4]
            moments[5] = np.sqrt(
                np.divide(
                    np.nansum(velArrShift * velArrShift * subcubeCopy, axis=0),
                    moments[3]))
            moments[5][~np.isfinite(moments[5])] = np.nan

        moments[0] *= dkms
        moments[3] *= dkms
        units = [
            headerCubelets["BUNIT"] + bunitExt, bunitExt[1:], bunitExt[1:]
        ]

        for i in range(3):
            hdu = fits.PrimaryHDU(data=moments[i], header=headerCubelets)
            func.delete_3rd_axis(hdu.header)
            hdu.header["BUNIT"] = units[i]
            hdu.header["DATAMIN"] = np.nanmin(moments[i])
            hdu.header["DATAMAX"] = np.nanmax(moments[i])
            hdu.header["ORIGIN"] = sofia_version_full
            filename = outputDir + cubename + "_{0:d}_mom{1:d}.fits".format(
                int(obj[0]), i)
            if compress: filename += ".gz"
            if func.check_overwrite(filename, flagOverwrite):
                hdu.writeto(filename,
                            output_verify="warn",
                            **__astropy_arg_overwrite__)

            if i:
                hdu = fits.PrimaryHDU(data=moments[i + 3],
                                      header=headerCubelets)
                func.delete_3rd_axis(hdu.header)
                hdu.header["BUNIT"] = units[i]
                hdu.header["DATAMIN"] = np.nanmin(moments[i + 3])
                hdu.header["DATAMAX"] = np.nanmax(moments[i + 3])
                hdu.header["ORIGIN"] = sofia_version_full
                filename = outputDir + cubename + "_{0:d}_posmom{1:d}.fits".format(
                    int(obj[0]), i)
                if compress: filename += ".gz"
                if func.check_overwrite(filename, flagOverwrite):
                    hdu.writeto(filename,
                                output_verify="warn",
                                **__astropy_arg_overwrite__)

        # -------------------
        # Integrated spectrum
        # -------------------
        spec = np.nansum(subcubeCopy, axis=(1, 2))
        nPix = np.sum(~np.isnan(subcubeCopy), axis=(1, 2))

        name = outputDir + cubename + "_" + str(int(obj[0])) + "_spec.txt"
        if compress: name += ".gz"

        # Check for overwrite flag:
        if func.check_overwrite(name, flagOverwrite):
            if compress:
                import gzip
                f = gzip.open(name, "wb")
            else:
                f = open(name, "w")

            f.write("# Integrated source spectrum\n")
            f.write("# Creator: %s\n#\n" % sofia_version_full)
            f.write("# Description of columns:\n")
            f.write("# - Chan      Channel number.\n")
            f.write(
                "# - Spectral  Associated value of the spectral coordinate according to\n"
            )
            f.write(
                "#             the WCS information in the FITS file header.\n")
            f.write(
                "# - Sum       Sum of flux values of all spatial pixels covered by the\n"
            )
            f.write(
                "#             source in that channel. Note that this has not yet been\n"
            )
            f.write(
                "#             divided by the beam solid angle! If your data cube is in\n"
            )
            f.write(
                "#             Jy/beam, you will have to manually divide by the beam\n"
            )
            f.write(
                "#             size which, for Gaussian beams, is given as\n")
            f.write("#               PI * a * b / (4 * ln(2))\n")
            f.write(
                "#             where a and b are the major and minor axis of the beam in\n"
            )
            f.write("#             units of pixels.\n")
            f.write(
                "# - Npix      Number of spatial pixels covered by the source in that\n"
            )
            f.write(
                "#             channel. This can be used to determine the statistical\n"
            )
            f.write(
                "#             uncertainty of the summed flux value. Again, this has\n"
            )
            f.write(
                "#             not yet been corrected for any potential spatial correla-\n"
            )
            f.write(
                "#             tion of pixels due to the beam solid angle!\n#\n"
            )
            f.write("# Chan        Spectral             Sum    Npix\n")
            f.write("# --------------------------------------------\n")

            for i in range(0, len(spec)):
                xspec = cValZ + (i + float(ZminNew) - cPixZ) * dZ
                f.write("%6d %15.6e %15.6e %7d\n" %
                        (i + ZminNew, xspec, spec[i], nPix[i]))

            f.close()
Пример #5
0
def write_catalog_from_array(mode, objects, catHeader, catUnits, catFormat, parList, outName, flagCompress, flagOverwrite, flagUncertainties):
	# Check output format and compression
	availableModes = ["ASCII", "XML", "SQL"]
	if mode not in availableModes:
		err.warning("Unknown catalogue format: " + str(mode) + ". Defaulting to ASCII.")
		mode = "ASCII"
	modeIndex = availableModes.index(mode)
	
	if flagCompress: outName += ".gz"
	err.message("Writing " + availableModes[modeIndex] + " catalogue: " + outName + ".")
	
	# Exit if file exists and overwrite flag is set to false
	func.check_overwrite(outName, flagOverwrite, fatal=True)
	
	# Do we need to write all parameters?
	if parList == ["*"] or not parList: parList = list(catHeader)
	
	# Remove undefined parameters
	parList = [item for item in parList if item in catHeader]
	
	# Remove statistical uncertainties if not requested
	if not flagUncertainties:
		for item in ["err_x", "err_y", "err_z", "err_w20", "err_w50"]:
			while item in parList: parList.remove(item)
	
	# Check whether there is anything left
	if not len(parList):
		err.error("No valid output parameters selected. No output catalogue written.", fatal=False)
		return
	
	
	# Create and write catalogue in requested format
	# -------------------------------------------------------------------------
	if mode == "XML":
		# Define basic XML header information
		votable          = Element("VOTABLE")
		resource         = SubElement(votable, "RESOURCE", name="SoFiA catalogue (version %s)" % sofia_version)
		description      = SubElement(resource, "DESCRIPTION")
		description.text = "Source catalogue from the Source Finding Application (SoFiA) version %s" % sofia_version
		coosys           = SubElement(resource, "COOSYS", ID="J2000")
		table            = SubElement(resource, "TABLE", ID="sofia_cat", name="sofia_cat")
		
		# Load list of parameters and unified content descriptors (UCDs)
		ucdList = {}
		fileUcdPath = os.environ["SOFIA_PIPELINE_PATH"]
		fileUcdPath = fileUcdPath.replace("sofia_pipeline.py", "SoFiA_source_parameters.dat")
		
		try:
			with open(fileUcdPath) as fileUcd:
				for line in fileUcd:
					(key, value) = line.split()
					ucdList[key] = value
		except:
			err.warning("Failed to read UCD file.")
		
		# Create parameter fields
		for par in parList:
			ucdEntity = ucdList[par] if par in ucdList else ""
			index = list(catHeader).index(par)
			if catFormat[index] == "%30s":
				field = SubElement(table, "FIELD", name=par, ucd=ucdEntity, datatype="char", arraysize="30", unit=catUnits[index])
			else:
				field = SubElement(table, "FIELD", name=par, ucd=ucdEntity, datatype="float", unit=catUnits[index])
		
		# Create data table entries
		data = SubElement(table, "DATA")
		tabledata = SubElement(data, "TABLEDATA")
		
		for obj in objects:
			tr = SubElement(tabledata, "TR")
			for par in parList:
				td = SubElement(tr, "TD")
				index = list(catHeader).index(par)
				td.text = (catFormat[index] % obj[index]).strip()
		
		# Write XML catalogue:
		try:
			f1 = gzopen(outName, "wb") if flagCompress else open(outName, "w")
		except:
			err.error("Failed to write to XML catalogue: " + outName + ".", fatal=False)
			return
		f1.write(prettify(votable))
		#f1.write(tostring(votable, "utf-8")) // without prettifying, which is faster and uses much less memory
		f1.close
	
	# -----------------------------------------------------------------End-XML-
	
	elif mode == "SQL":
		# Record if there is an ID column in the catalogue
		# (if no ID is present, we will later create one for use as primary key)
		noID = "id" not in parList
		
		# Write some header information:
		content = "-- SoFiA catalogue (version %s)\n\nSET SQL_MODE = \"NO_AUTO_VALUE_ON_ZERO\";\n\n" % sofia_version
		
		# Construct and write table structure:
		flagProgress = False
		content += "CREATE TABLE IF NOT EXISTS `SoFiA-Catalogue` (\n"
		if noID: content += "  `id` INT NOT NULL,\n"
		for par in parList:
			index = list(catHeader).index(par)
			if flagProgress: content += ",\n"
			content += "  " + sqlHeaderItem(par) + sqlFormat(catFormat[index])
			flagProgress = True
		content += ",\n  PRIMARY KEY (`id`),\n  KEY (`id`)\n) DEFAULT CHARSET=utf8 COMMENT=\'SoFiA source catalogue\';\n\n"
		
		# Insert data:
		flagProgress = False
		content += "INSERT INTO `SoFiA-Catalogue` ("
		if noID: content += "`id`, "
		for par in parList:
			if flagProgress: content += ", "
			content += sqlHeaderItem(par)
			flagProgress = True
		content += ") VALUES\n"
		
		source_count = 0
		for obj in objects:
			flagProgress = False
			source_count += 1
			content += "("
			if noID: content += str(source_count) + ", "
			
			for par in parList:
				index = list(catHeader).index(par)
				if flagProgress: content += ", "
				content += sqlDataItem(obj[index], catFormat[index])
				flagProgress = True
			
			if(source_count < len(objects)): content += "),\n"
			else: content += ");\n"
		
		# Write catalogue
		try:
			fp = gzopen(outName, "wb") if flagCompress else open(outName, "w")
		except:
			err.error("Failed to write to SQL catalogue: " + outName + ".", fatal=False)
			return
		fp.write(content)
		fp.close()
	
	# -----------------------------------------------------------------End-SQL-
	
	else: # mode == "ASCII" by default
		# Determine header sizes based on variable-length formatting
		lenCathead = []
		for j in catFormat: lenCathead.append(int(j.split("%")[1].split("e")[0].split("f")[0].split("i")[0].split("d")[0].split(".")[0].split("s")[0]) + 1)
		
		# Create header
		headerName = ""
		headerUnit = ""
		headerCol  = ""
		outFormat  = ""
		colCount   =  0
		header     = "SoFiA catalogue (version %s)\n" % sofia_version
		
		for par in parList:
			index = list(catHeader).index(par)
			headerName += catHeader[index].rjust(lenCathead[index])
			headerUnit += catUnits[index].rjust(lenCathead[index])
			headerCol  += ("(%i)" % (colCount + 1)).rjust(lenCathead[index])
			outFormat  += catFormat[index] + " "
			colCount += 1
		header += headerName[3:] + '\n' + headerUnit[3:] + '\n' + headerCol[3:]
		
		# Create catalogue
		outObjects = []
		for obj in objects:
			outObjects.append([])
			for par in parList: outObjects[-1].append(obj[list(catHeader).index(par)])
		
		# Write ASCII catalogue
		try:
			np.savetxt(outName, np.array(outObjects, dtype=object), fmt=outFormat, header=header)
		
		except:
			err.error("Failed to write to ASCII catalogue: " + outName + ".", fatal=False)
			return
	
	# ---------------------------------------------------------------End-ASCII-
	
	return
Пример #6
0
def writeMoments(datacube, maskcube, filename, debug, header, compress,
                 write_mom, flagOverwrite):
    # Exit if nothing is to be done
    if not any(write_mom):
        err.warning(
            "No moment maps requested; skipping moment map generation.")
        return

    # ---------------------------
    # Number of detected channels
    # ---------------------------
    nrdetchan = (maskcube > 0).sum(axis=0)
    if np.nanmax(nrdetchan) < 65535:
        nrdetchan = nrdetchan.astype("int16")
    else:
        nrdetchan = nrdetchan.astype("int32")

    hdu = pyfits.PrimaryHDU(data=nrdetchan, header=header)
    hdu.header["BUNIT"] = "Nchan"
    hdu.header["DATAMIN"] = np.nanmin(nrdetchan)
    hdu.header["DATAMAX"] = np.nanmax(nrdetchan)
    hdu.header["ORIGIN"] = sofia_version_full
    func.delete_header(hdu.header, "CTYPE3")
    func.delete_header(hdu.header, "CRPIX3")
    func.delete_header(hdu.header, "CRVAL3")
    func.delete_header(hdu.header, "CDELT3")

    name = str(filename) + "_nrch.fits"
    if compress: name += ".gz"

    # Check for overwrite flag
    if func.check_overwrite(name, flagOverwrite):
        hdu.writeto(name, output_verify="warn", **__astropy_arg_overwrite__)

    # ----------------------
    # Moment 0, 1 and 2 maps
    # ----------------------
    # WARNING: The generation of moment maps will mask the copy of the data cube held
    #          in memory by SoFiA. If you wish to use the original data cube after
    #          this point, please reload it first!
    datacube[maskcube == 0] = 0

    # Regrid cube if necessary
    # ------------------------
    if "CELLSCAL" in header and header["CELLSCAL"] == "1/F":
        err.warning("CELLSCAL keyword with value of 1/F found.\n"
                    "Will regrid data cube before creating moment images.")
        datacube = func.regridMaskedChannels(datacube, maskcube, header)

    # ALERT: Why are we doing this?
    #datacube = np.array(datacube, dtype=np.single)

    # Extract relevant WCS parameters
    # -------------------------------
    if func.check_wcs_info(header):
        width = header["CDELT3"]
        chan0 = header["CRPIX3"]
        freq0 = header["CRVAL3"]
        mom_scale_factor = 1.0

        # Velocity
        if func.check_header_keywords(func.KEYWORDS_VELO, header["CTYPE3"]):
            if not "CUNIT3" in header or header["CUNIT3"].lower() == "m/s":
                # Assuming m/s and converting to km/s
                mom_scale_factor = 1.0e-3
                unit_spec = "km/s"
            elif header["CUNIT3"].lower() == "km/s":
                # Assuming km/s
                unit_spec = "km/s"
            else:
                # Working with whatever velocity units the cube has
                unit_spec = str(header["CUNIT3"])
        # Frequency
        elif func.check_header_keywords(func.KEYWORDS_FREQ, header["CTYPE3"]):
            if not "CUNIT3" in header or header["CUNIT3"].lower() == "hz":
                # Assuming Hz
                unit_spec = "Hz"
            elif header["CUNIT3"].lower() == "khz":
                # Assuming kHz and converting to Hz
                mom_scale_factor = 1.0e+3
                unit_spec = "Hz"
            else:
                # Working with whatever frequency units the cube has
                unit_spec = str(header["CUNIT3"])
    else:
        err.warning(
            "Axis descriptors missing from FITS file header.\nMoment maps will not be scaled!"
        )
        width = 1.0
        chan0 = 0.0
        freq0 = 0.0
        mom_scale_factor = 1.0
        unit_spec = "chan"

    # Calculate moment maps
    # ---------------------
    moments = [None, None, None]
    # ALERT: The order of axes and memory layout differ, so transposition
    #        and memory layout change are necessary! This will need to be
    #        fixed in some way.
    datacube = np.transpose(datacube, axes=[1, 2, 0]).astype(np.float32,
                                                             order="C")
    with np.errstate(invalid="ignore"):
        if any(write_mom):
            # Definition of moment 0
            #moments[0] = np.nansum(datacube, axis=0)
            moments[0] = stat.moment(datacube, mom=0)

        if write_mom[1] or write_mom[2]:
            # Definition of moment 1
            #velArr = ((np.arange(datacube.shape[0]) + 1.0 - chan0) * width + freq0).reshape((datacube.shape[0], 1, 1))
            #moments[1] = np.divide(np.nansum(velArr * datacube, axis=0), moments[0])
            moments[1] = stat.moment(datacube, mom=1, mom0=moments[0])

        if write_mom[2]:
            # Definition of moment 2
            #velArr = velArr - moments[1]
            #moments[2] = np.sqrt(np.divide(np.nansum(velArr * velArr * datacube, axis=0), moments[0]))
            moments[2] = stat.moment(datacube,
                                     mom=2,
                                     mom0=moments[0],
                                     mom1=moments[1])
    datacube = np.transpose(datacube, axes=[2, 0, 1])

    # Convert moments to physical units
    # ---------------------------------
    if moments[0] is not None: moments[0] *= abs(width)
    if moments[1] is not None:
        moments[1] = (moments[1] + 1.0 - chan0) * width + freq0
    if moments[2] is not None: moments[2] *= abs(width)

    # Set up unit strings
    # -------------------
    if "BUNIT" in header:
        unit_flux = str(header["BUNIT"])
        # Correct for common misspellings of "Jy[/beam]"
        if unit_flux.lower() == "jy":
            unit_flux = "Jy." + unit_spec
        elif unit_flux.lower() == "jy/beam":
            unit_flux = "Jy/beam." + unit_spec
        else:
            unit_flux += "." + unit_spec
    else:
        err.warning("Cannot determine flux unit; BUNIT missing from header.")
        unit_flux = ""
    unit_mom = [unit_flux, unit_spec, unit_spec]

    # Writing moment maps to disk
    # ---------------------------
    for i in range(3):
        if write_mom[i] and moments[i] is not None:
            err.message("Writing moment {0:d} image.".format(i))
            moments[i] *= mom_scale_factor

            hdu = pyfits.PrimaryHDU(data=moments[i], header=header)
            hdu.header["BUNIT"] = unit_mom[i]
            hdu.header["DATAMIN"] = np.nanmin(moments[i])
            hdu.header["DATAMAX"] = np.nanmax(moments[i])
            hdu.header["ORIGIN"] = sofia_version_full
            hdu.header["CELLSCAL"] = "CONSTANT"
            func.delete_header(hdu.header, "CRPIX3")
            func.delete_header(hdu.header, "CRVAL3")
            func.delete_header(hdu.header, "CDELT3")
            func.delete_header(hdu.header, "CTYPE3")

            if debug:
                hdu.writeto(str(filename) + "_mom{0:d}.debug.fits".format(i),
                            output_verify="warn",
                            **__astropy_arg_overwrite__)
            else:
                name = str(filename) + "_mom{0:d}.fits".format(i)
                if compress: name += ".gz"

                # Check for overwrite flag
                if func.check_overwrite(name, flagOverwrite):
                    hdu.writeto(name,
                                output_verify="warn",
                                **__astropy_arg_overwrite__)

    return
Пример #7
0
def writeSubcube(cube, header, mask, objects, cathead, outroot, outputDir, compress, flagOverwrite):
	# Strip path variable to get the file name and the directory separately
	splitroot = outroot.split("/")
	cubename  = splitroot[-1]
	#if len(splitroot) > 1:
	#	outputDir = "/".join(splitroot[:-1]) + "/objects/"
	#else:
	#	outputDir = "./objects/"
	
	# Check if output directory exists and create it if not
	if not os.path.exists(outputDir):
		os.system("mkdir " + outputDir)
	
	# Copy of header for manipulation
	headerCubelets = header.copy()
	
	# Read all important information (central pixels & values, increments) from the header
	#dX    = headerCubelets["CDELT1"]
	#dY    = headerCubelets["CDELT2"]
	dZ    = headerCubelets["CDELT3"]
	#cValX = headerCubelets["CRVAL1"]
	#cValY = headerCubelets["CRVAL2"]
	cValZ = headerCubelets["CRVAL3"]
	cPixX = headerCubelets["CRPIX1"] - 1
	cPixY = headerCubelets["CRPIX2"] - 1
	cPixZ = headerCubelets["CRPIX3"] - 1
	cubeDim = cube.shape
	
	for obj in objects:
		# Centres and bounding boxes
		Xc = obj[cathead == "x"][0]
		Yc = obj[cathead == "y"][0]
		Zc = obj[cathead == "z"][0]
		Xmin = obj[cathead == "x_min"][0]
		Ymin = obj[cathead == "y_min"][0]
		Zmin = obj[cathead == "z_min"][0]
		Xmax = obj[cathead == "x_max"][0]
		Ymax = obj[cathead == "y_max"][0]
		Zmax = obj[cathead == "z_max"][0]
		
		# If centre of mass estimation is wrong replace by geometric centre
		if Xc < 0 or Xc > cubeDim[2] - 1: Xc = obj[cathead == "x_geo"][0]
		if Yc < 0 or Yc > cubeDim[1] - 1: Yc = obj[cathead == "y_geo"][0]
		if Zc < 0 or Zc > cubeDim[0] - 1: Zc = obj[cathead == "z_geo"][0]
		
		cPixXNew = int(Xc)
		cPixYNew = int(Yc)
		cPixZNew = int(Zc)
		
		# Largest distance of source limits from the centre
		maxX = 2 * max(abs(cPixXNew - Xmin), abs(cPixXNew - Xmax))
		maxY = 2 * max(abs(cPixYNew - Ymin), abs(cPixYNew - Ymax))
		maxZ = 2 * max(abs(cPixZNew - Zmin), abs(cPixZNew - Zmax))
		
		# Calculate the new bounding box for the mass centred cube
		XminNew = cPixXNew - maxX
		if XminNew < 0: XminNew = 0
		YminNew = cPixYNew - maxY
		if YminNew < 0: YminNew = 0
		ZminNew = cPixZNew - maxZ
		if ZminNew < 0: ZminNew = 0
		XmaxNew = cPixXNew + maxX
		if XmaxNew > cubeDim[2] - 1: XmaxNew = cubeDim[2] - 1
		YmaxNew = cPixYNew + maxY
		if YmaxNew > cubeDim[1] - 1: YmaxNew = cubeDim[1] - 1
		ZmaxNew = cPixZNew + maxZ
		if ZmaxNew > cubeDim[0] - 1: ZmaxNew = cubeDim[0] - 1
		
		# Calculate the centre with respect to the cutout cube
		cPixXCut = cPixX - XminNew
		cPixYCut = cPixY - YminNew
		cPixZCut = cPixZ - ZminNew
		
		# Update header keywords:
		headerCubelets["CRPIX1"] = cPixXCut + 1
		headerCubelets["CRPIX2"] = cPixYCut + 1
		headerCubelets["CRPIX3"] = cPixZCut + 1
		
		# Extract the cubelet
		[ZminNew, ZmaxNew, YminNew, YmaxNew, XminNew, XmaxNew] = map(int, [ZminNew, ZmaxNew, YminNew, YmaxNew, XminNew, XmaxNew])
		subcube = cube[ZminNew:ZmaxNew + 1, YminNew:YmaxNew + 1, XminNew:XmaxNew + 1]
		
		# Update header keywords:
		headerCubelets["NAXIS1"] = subcube.shape[2]
		headerCubelets["NAXIS2"] = subcube.shape[1]
		headerCubelets["NAXIS3"] = subcube.shape[0]
		
		headerCubelets["ORIGIN"] = sofia_version_full
		
		# Write the cubelet
		hdu = fits.PrimaryHDU(data=subcube, header=headerCubelets)
		hdulist = fits.HDUList([hdu])
		name = outputDir + cubename + "_" + str(int(obj[0])) + ".fits"
		if compress: name += ".gz"
		
		# Check for overwrite flag:
		if func.check_overwrite(name, flagOverwrite): hdulist.writeto(name, output_verify="warn", **__astropy_arg_overwrite__)
		
		hdulist.close()
		
		
		# -------------------------
		# Position-velocity diagram
		# -------------------------
		
		if "kin_pa" in cathead:
			kin_pa = math.radians(float(obj[cathead == "kin_pa"][0]))
			pv_sampling = 10
			pv_r = np.arange(-max(subcube.shape[1:]), max(subcube.shape[1:]) - 1 + 1.0 / pv_sampling, 1.0 / pv_sampling)
			pv_y = Yc - float(YminNew) + pv_r * math.cos(kin_pa)
			pv_x = Xc - float(XminNew) - pv_r * math.sin(kin_pa)
			pv_x, pv_y = pv_x[(pv_x >= 0) * (pv_x <= subcube.shape[2] - 1)], pv_y[(pv_x >= 0) * (pv_x <= subcube.shape[2] - 1)]
			pv_x, pv_y = pv_x[(pv_y >= 0) * (pv_y <= subcube.shape[1] - 1)], pv_y[(pv_y >= 0) * (pv_y <= subcube.shape[1] - 1)]
			pv_x.resize((1, pv_x.shape[0]))
			pv_y.resize((pv_x.shape))
			pv_coords = np.concatenate((pv_y, pv_x), axis=0)
			pv_array=[]
			for jj in range(subcube.shape[0]):
				plane = map_coordinates(subcube[jj], pv_coords)
				plane = [plane[ii::pv_sampling] for ii in range(pv_sampling)]
				plane = np.array([ii[:plane[-1].shape[0]] for ii in plane])
				pv_array.append(plane.mean(axis=0))
			pv_array = np.array(pv_array)
			hdu = fits.PrimaryHDU(data=pv_array, header=headerCubelets)
			hdulist = fits.HDUList([hdu])
			hdulist[0].header["CTYPE1"] = "PV--DIST"
			hdulist[0].header["CDELT1"] = hdulist[0].header["CDELT2"]
			hdulist[0].header["CRVAL1"] = 0
			hdulist[0].header["CRPIX1"] = pv_array.shape[1] / 2
			hdulist[0].header["CTYPE2"] = hdulist[0].header["CTYPE3"]
			hdulist[0].header["CDELT2"] = hdulist[0].header["CDELT3"]
			hdulist[0].header["CRVAL2"] = hdulist[0].header["CRVAL3"]
			hdulist[0].header["CRPIX2"] = hdulist[0].header["CRPIX3"]
			hdulist[0].header["ORIGIN"] = sofia_version_full
			func.delete_3rd_axis(hdulist[0].header)
			name = outputDir + cubename + "_" + str(int(obj[0])) + "_pv.fits"
			if compress: name += ".gz"
			
			# Check for overwrite flag:
			if func.check_overwrite(name, flagOverwrite): hdulist.writeto(name,output_verify="warn", **__astropy_arg_overwrite__)
			hdulist.close()
		
		
		# -------------
		# Mask cubelets
		# -------------
		
		# Remove all other sources from the mask
		submask = mask[ZminNew:ZmaxNew + 1, YminNew:YmaxNew + 1, XminNew:XmaxNew + 1].astype("int")
		submask[submask != obj[0]] = 0
		submask[submask == obj[0]] = 1
		
		# Write mask
		hdu = fits.PrimaryHDU(data=submask.astype("int16"), header=headerCubelets)
		hdu.header["BUNIT"] = "Source-ID"
		hdu.header["DATAMIN"] = np.nanmin(submask)
		hdu.header["DATAMAX"] = np.nanmax(submask)
		hdu.header["ORIGIN"] = sofia_version_full
		hdulist = fits.HDUList([hdu])
		name = outputDir + cubename + "_" + str(int(obj[0])) + "_mask.fits"
		if compress: name += ".gz"
		
		# Check for overwrite flag:
		if func.check_overwrite(name, flagOverwrite): hdulist.writeto(name, output_verify="warn", **__astropy_arg_overwrite__)
		hdulist.close()
		
		
		# ------------------
		# Moments 0, 1 and 2
		# ------------------
		
		# Units of moment images
		# Velocity
		if func.check_header_keywords(func.KEYWORDS_VELO, headerCubelets["CTYPE3"]):
			if not "CUNIT3" in headerCubelets or headerCubelets["CUNIT3"].lower() == "m/s":
				# Converting m/s to km/s
				dkms = abs(headerCubelets["CDELT3"]) * 1e-3
				scalemom12 = 1e-3
				bunitExt = ".km/s"
			elif headerCubelets["CUNIT3"].lower() == "km/s":
				dkms = abs(headerCubelets["CDELT3"])
				scalemom12 = 1.0
				bunitExt = ".km/s"
			else:
				# Working with whatever units the cube has
				dkms = abs(headerCubelets["CDELT3"])
				scalemom12 = 1.0
				bunitExt = "." + headerCubelets["CUNIT3"]
		# Frequency
		elif func.check_header_keywords(func.KEYWORDS_FREQ, headerCubelets["CTYPE3"]):
			if not "CUNIT3" in headerCubelets or headerCubelets["CUNIT3"].lower() == "hz":
				dkms = abs(headerCubelets["CDELT3"])
				scalemom12 = 1.0
				bunitExt = ".Hz"
			elif headerCubelets["CUNIT3"].lower() == "khz":
				# Converting kHz to Hz
				dkms = abs(headerCubelets["CDELT3"]) * 1e+3
				scalemom12 = 1e+3
				bunitExt = ".Hz"
			else:
				# Working with whatever units the cube has
				dkms = abs(headerCubelets["CDELT3"])
				scalemom12 = 1.0
				bunitExt = "." + headerCubelets["CUNIT3"]
		# Other
		else:
			# Working with whatever units the cube has
			dkms = abs(headerCubelets["CDELT3"])
			scalemom12 = 1.0
			if not "CUNIT3" in headerCubelets: bunitExt = ".std_unit_" + headerCubelets["CTYPE3"]
			else: bunitExt = "." + headerCubelets["CUNIT3"]
		
		# Make copy of subcube and regrid if necessary
		# NOTE: Why on earth do we need to make a copy here? If we don't create a copy,
		#       then SoFiA will crash as all pixels in the moment map are NaN, but I
		#       don't understand why this would be the case in the first place.
		subcubeCopy = subcube.copy()
		if "cellscal" in headerCubelets and headerCubelets["cellscal"] == "1/F":
			subcubeCopy[submask == 0] = 0        # NOTE: These will later be set to NaN by the regridding task
			subcubeCopy = func.regridMaskedChannels(subcubeCopy, submask, headerCubelets)
		else:
			subcubeCopy[submask == 0] = np.nan   # NOTE: Manually set to NaN to ensure correct generation of spectra below
		
		moments = [None, None, None]
		with np.errstate(invalid="ignore"):
			# Definition of moment 0
			moments[0] = np.nansum(subcubeCopy, axis=0)
			
			# Definition of moment 1
			velArr = ((np.arange(subcubeCopy.shape[0]).reshape((subcubeCopy.shape[0], 1, 1)) + 1.0 - headerCubelets["CRPIX3"]) * headerCubelets["CDELT3"] + headerCubelets["CRVAL3"]) * scalemom12
			moments[1] = np.divide(np.nansum(velArr * subcubeCopy, axis=0), moments[0])
			# NOTE: Here we make use of array broadcasting in NumPy, but we need to reshape the velocity array
			#       from [nz] to [nz, 1, 1] for this to work, so that [nz, 1, 1] * [nz, ny, nx] --> [nz, ny, nx].
			
			# Definition of moment 2
			velArr = velArr - moments[1]
			moments[2] = np.sqrt(np.divide(np.nansum(velArr * velArr * subcubeCopy, axis=0), moments[0]))
			# NOTE: The above works due to array broadcasting in NumPy and despite different array dimensions.
			#       [nz, 1, 1] - [ny, nx] --> [nz, ny, nx] according to NumPy's broadcasting rules.
		
		moments[0] *= dkms
		units = [headerCubelets["BUNIT"] + bunitExt, bunitExt[1:], bunitExt[1:]]
		
		for i in range(3):
			hdu = fits.PrimaryHDU(data=moments[i], header=headerCubelets)
			func.delete_3rd_axis(hdu.header)
			hdu.header["BUNIT"]   = units[i]
			hdu.header["DATAMIN"] = np.nanmin(moments[i])
			hdu.header["DATAMAX"] = np.nanmax(moments[i])
			hdu.header["ORIGIN"]  = sofia_version_full
			filename = outputDir + cubename + "_{0:d}_mom{1:d}.fits".format(int(obj[0]), i)
			if compress: filename += ".gz"
			if func.check_overwrite(filename, flagOverwrite): hdu.writeto(filename, output_verify="warn", **__astropy_arg_overwrite__)
		
		
		# -------------------
		# Integrated spectrum
		# -------------------
		spec = np.nansum(subcubeCopy, axis=(1, 2))
		nPix = np.sum(~np.isnan(subcubeCopy), axis=(1, 2))
		
		name = outputDir + cubename + "_" + str(int(obj[0])) + "_spec.txt"
		if compress: name += ".gz"
		
		# Check for overwrite flag:
		if func.check_overwrite(name, flagOverwrite):
			if compress:
				import gzip
				f = gzip.open(name, "wb")
			else:
				f = open(name, "w")
			
			f.write("# Integrated source spectrum\n")
			f.write("# Creator: %s\n#\n" % sofia_version_full)
			f.write("# Description of columns:\n")
			f.write("# - Chan      Channel number.\n")
			f.write("# - Spectral  Associated value of the spectral coordinate according to\n")
			f.write("#             the WCS information in the FITS file header.\n")
			f.write("# - Sum       Sum of flux values of all spatial pixels covered by the\n")
			f.write("#             source in that channel. Note that this has not yet been\n")
			f.write("#             divided by the beam solid angle! If your data cube is in\n")
			f.write("#             Jy/beam, you will have to manually divide by the beam\n")
			f.write("#             size which, for Gaussian beams, is given as\n")
			f.write("#               PI * a * b / (4 * ln(2))\n")
			f.write("#             where a and b are the major and minor axis of the beam in\n")
			f.write("#             units of pixels.\n")
			f.write("# - Npix      Number of spatial pixels covered by the source in that\n")
			f.write("#             channel. This can be used to determine the statistical\n")
			f.write("#             uncertainty of the summed flux value. Again, this has\n")
			f.write("#             not yet been corrected for any potential spatial correla-\n")
			f.write("#             tion of pixels due to the beam solid angle!\n#\n")
			f.write("# Chan        Spectral             Sum    Npix\n")
			f.write("# --------------------------------------------\n")
			
			for i in range(0,len(spec)):
				xspec = cValZ + (i + float(ZminNew) - cPixZ) * dZ
				f.write("%6d %15.6e %15.6e %7d\n" % (i + ZminNew, xspec, spec[i], nPix[i]))
			
			f.close()