Exemplo n.º 1
0
    def __init__(self,
                 prefix="",
                 *,
                 skiFilePath=None,
                 inDirPath=None,
                 outDirPath="",
                 process=None):

        if len(prefix) == 0 and skiFilePath is None:
            raise ValueError(
                "Simulation constructor must specify prefix or ski file path")

        # remember the absolute paths and the prefix
        self._skiFilePath = ut.absPath(
            skiFilePath) if skiFilePath is not None else None
        self._inDirPath = ut.absPath(
            inDirPath) if inDirPath is not None else None
        self._outDirPath = ut.absPath(outDirPath)
        self._prefix = prefix if len(prefix) > 0 else self._skiFilePath.stem

        # remember the process, if provided, and its completion status
        self._process = process
        self._running = 'true' if process is not None else 'unknown'

        # provide placeholders for caching frequently-used objects
        self._parameters = None
Exemplo n.º 2
0
def writeStoredTable(outFilePath, axisNames, axisUnits, axisScales, axisGrids,
                     quantityNames, quantityUnits, quantityScales,
                     quantityValues):
    outpath = ut.absPath(outFilePath)

    # verify some of the requirements/restrictions on the specified data
    if outpath.suffix != ".stab":
        raise ValueError(
            "Stored table filename extension is not '.stab': {}".format(
                outpath))
    numAxes = len(axisNames)
    if numAxes < 1 or numAxes > 9 or numAxes != len(
            axisUnits) or numAxes != len(axisGrids):
        raise ValueError("Mismatch in number of axes")
    shapeValues = tuple([len(axisGrid) for axisGrid in axisGrids])
    numQuantities = len(quantityNames)
    if numQuantities < 1 or numQuantities > 9 or numQuantities != len(
            quantityUnits) or numQuantities != len(quantityValues):
        raise ValueError("Mismatch in number of quantities")
    for valueArray in quantityValues:
        if valueArray.shape != shapeValues:
            raise ValueError("Mismatch in number of values")

    # open the output file
    with open(outpath, 'wb') as out:
        # write the SKIRT and endianness tags
        out.write(b"SKIRT X\n")
        intToFile(out, 0x010203040A0BFEFF)

        # write the axes metadata
        intToFile(out, numAxes)
        for axisName in axisNames:
            stringToFile(out, axisName)
        for axisUnit in axisUnits:
            stringToFile(out, axisUnit)
        for axisScale in axisScales:
            stringToFile(out, axisScale)

        # write the grid for each axis
        for axisGrid in axisGrids:
            intToFile(out, len(axisGrid))
            arrayToFile(out, axisGrid)

        # write the quantities metadata
        intToFile(out, numQuantities)
        for quantityName in quantityNames:
            stringToFile(out, quantityName)
        for quantityUnit in quantityUnits:
            stringToFile(out, quantityUnit)
        for quantityScale in quantityScales:
            stringToFile(out, quantityScale)

        # write the values
        arrayToFile(out, np.stack(quantityValues))

        # write the EOF tag
        out.write(b"STABEND\n")

    # report file creation to user
    logging.info("Created stored table file: {}".format(outpath))
Exemplo n.º 3
0
    def __init__(self, subSuite=".", *, suitePath=None):

        # set the top-level suite path and remember the skirt path
        self._suitePath = ut.absPath(
            suitePath
        ) if suitePath is not None else ut.projectParentPath() / "Functional9"

        # find all matching sub-suite paths
        if subSuite is None or subSuite == "" or "." in subSuite:
            subSuitePaths = [self._suitePath]
        else:
            subSuitePaths = self._suitePath.rglob(subSuite)

        # find all valid test cases in any of the sub-suite paths and remember the paths to the corresponding ski files
        skiPathSet = set()
        for subSuitePath in subSuitePaths:
            for skiPath in subSuitePath.rglob("[!.]*.ski"):
                if len(list(skiPath.parent.glob("[!.]*.ski"))) == 1:
                    skiPathSet.add(skiPath)
        self._skiPaths = sorted(skiPathSet)

        # abort if there are no valid test cases
        if len(self._skiPaths) == 0:
            raise ut.UserError(
                "No valid test cases found for sub-suite specification: '{}'".
                format(subSuite))
Exemplo n.º 4
0
def listStoredTableInfo(tableFilePath):
    inpath = ut.absPath(tableFilePath)

    # open the file
    with open(inpath, 'rb') as infile:
        # verify header tags
        if stringFromFile(infile) != "SKIRT X" or intFromFile(infile) != 0x010203040A0BFEFF:
            raise ValueError("File does not have SKIRT stored table format: {}".format(inpath))

        # get the axes metadata and grids
        numAxes = intFromFile(infile)
        axisNames = [ stringFromFile(infile) for i in range(numAxes) ]
        axisUnits = [ stringFromFile(infile) for i in range(numAxes) ]
        axisScales = [ stringFromFile(infile) for i in range(numAxes) ]
        axisGrids = [ arrayFromFile(infile, (intFromFile(infile),)) for i in range(numAxes) ]

        # get the quantities metadata
        numQuantities = intFromFile(infile)
        quantityNames = [ stringFromFile(infile) for i in range(numQuantities) ]
        quantityUnits = [ stringFromFile(infile) for i in range(numQuantities) ]
        quantityScales = [ stringFromFile(infile) for i in range(numQuantities) ]

    # print file path
    logging.info("Stored table file: {}".format(inpath))

    # print axes information
    for i in range(numAxes):
        logging.info("  axis {}: {} ({}) with {} points from {:.3e} to {:.3e} on {} scale"  \
           .format(i, axisNames[i], axisUnits[i], len(axisGrids[i]), axisGrids[i][0], axisGrids[i][-1], axisScales[i]))

    # print quantities information
    for i in range(numQuantities):
        logging.info("  quantity {}: {} ({}) on {} scale"  \
                     .format(i, quantityNames[i], quantityUnits[i], quantityScales[i]))
Exemplo n.º 5
0
def listStoredColumnsInfo(columnsFilePath):
    inpath = ut.absPath(columnsFilePath)

    # open the file
    with open(inpath, 'rb') as infile:
        # verify header tags
        if stringFromFile(infile) != "SKIRT X" or intFromFile(infile) != 0x010203040A0BFEFF \
                        or intFromFile(infile) != 0:
            raise ValueError(
                "File does not have SKIRT stored columns format: {}".format(
                    inpath))

        # get the number of columns and rows
        numRows = intFromFile(infile)
        numColumns = intFromFile(infile)

        # get the column metadata
        columnNames = [stringFromFile(infile) for i in range(numColumns)]
        columnUnits = [stringFromFile(infile) for i in range(numColumns)]

    # print file path
    logging.info("Stored columns file: {}".format(inpath))

    # print columns information
    for i in range(numColumns):
        logging.info("  column {}: {} ({})".format(i + 1, columnNames[i],
                                                   columnUnits[i]))
    logging.info("  nr of rows: {}".format(numRows))
Exemplo n.º 6
0
def getColumnDescriptions(path):
    path = ut.absPath(path)

    # parse the header
    descriptions = []
    with open(path) as infile:
        for line in infile:
            # skip empty lines
            line = line.strip()
            if len(line) > 0:
                # handle end-of-header
                if not line.startswith("#"): break
                # remove hash character and skip non-column header lines
                line = line[1:].strip()
                if line.lower().startswith("column") and ":" in line:
                    # extract the description
                    colon = line.find(":")
                    if line.endswith(')'):
                        left = line.rfind("(")
                        description = line[colon + 1:left].strip()
                    else:
                        description = line[colon + 1:].strip()
                    # add the description to the list
                    descriptions.append(description)
    return descriptions
Exemplo n.º 7
0
    def __init__(self, outFilePath, shape=(800, 600), rate=24):
        outFilePath = ut.absPath(outFilePath)
        assert outFilePath.suffix.lower() == ".mp4"

        # remember the frame shape
        self._shape = shape

        # ensure that we have access rights to create the output file (since we ignore any messages from ffmpeg)
        open(outFilePath, 'w').close()

        # construct the first part of the command line for raw video input
        cmdline = [
            'ffmpeg',  # path to executable
            '-v',
            'quiet',  # be less verbose
            '-y',  # overwrite output file if it exists
            '-f',
            'rawvideo',  # input format: raw, uncompressed data stream
            '-pix_fmt',
            'rgba',  # input pixel format (3 channels plus dummy alpha channel, 8 bits each)
            '-s',
            '{:1d}x{:1d}'.format(*shape),  # frame size (pixels)
            '-r',
            '{:1d}'.format(rate),  # frame rate (frames per second)
            '-i',
            '-',  # the input comes from a pipe
            '-an',  # there is no audio
            '-vcodec',
            'mpeg4',  # output encoding
            outFilePath
        ]

        # launch ffmpeg; pipe the input from this process and pipe any messages to the null device
        self._p = subprocess.Popen(cmdline, stdin=subprocess.PIPE)
Exemplo n.º 8
0
def do( skiDirPath : (str,"directory containing the ski files to be upgraded"),
        ) -> "upgrade ski files in a given directory to the latest version of SKIRT 9":

    import pts.skiupgrade
    import pts.utils as ut

    for skipath in sorted(ut.absPath(skiDirPath).glob("*.ski")):
        pts.skiupgrade.upgradeSkiFile(skipath)
Exemplo n.º 9
0
    def __init__(self, skiFilePath):
        # get the absolute path and verify the file name
        self._path = ut.absPath(skiFilePath)
        if self._path.suffix.lower() not in (".ski", ".xml"):
            raise ValueError("Invalid filename extension for ski file")

        # load the XML tree from the ski file (remove blank text to avoid confusing the pretty printer when saving)
        self._tree = etree.parse(
            str(self._path), parser=etree.XMLParser(remove_blank_text=True))
Exemplo n.º 10
0
    def __init__(self, source, frameIndices=None):
        # initialize private data members here to avoid warnings in the development environment
        self._shape = None
        self._dpil = None
        self._dbuf = None
        self._darr = None
        self._rangearr = None

        # numpy array
        if isinstance(
                source,
                np.ndarray) and source.ndim == 3 and source.shape[2] == 3:
            self._shape = source.shape[0:2]
            self._setArr(source)

        # matplotlib Figure
        elif isinstance(source, matplotlib.figure.Figure):
            source.canvas.draw()  # flush the figure to the buffer
            buffer, self._shape = source.canvas.print_to_buffer()
            self._setBuf(buffer)

        # file path
        elif isinstance(source, (str, pathlib.Path)):
            path = ut.absPath(source)

            # standard image file
            if path.suffix.lower() in (".jpg", ".jpeg", ".png", ".tif",
                                       ".tiff"):
                self._setPil(PIL.Image.open(path))
                self._shape = self._dpil.size

            # FITS file
            elif path.suffix.lower() == ".fits":
                data = fits.getdata(
                    path
                ).T  # returns an array with shape (nx, ny) or (nx, ny, nlambda)
                if data.ndim == 2:
                    self._setArr(np.dstack((data, data, data)))
                elif data.ndim == 3:
                    if frameIndices is None:
                        n = data.shape[2]
                        frameIndices = (n - 1, n // 2, 0)
                    r, g, b = frameIndices
                    self._setArr(
                        np.dstack((data[:, :, r], data[:, :, g], data[:, :,
                                                                      b])))
                else:
                    raise ValueError(
                        "Data in FITS file '{}' has unsupported shape".format(
                            path))
                self._shape = self._darr.shape[0:2]

        # unsupported type
        else:
            raise ValueError(
                "Image source '{}' has unsupported type".format(source))
Exemplo n.º 11
0
def readStoredTable(tableFilePath):
    inpath = ut.absPath(tableFilePath)

    # open the file
    with open(inpath, 'rb') as infile:
        # verify header tags
        if stringFromFile(infile) != "SKIRT X" or intFromFile(
                infile) != 0x010203040A0BFEFF:
            raise ValueError(
                "File does not have SKIRT stored table format: {}".format(
                    inpath))

        # get the axes metadata and grids
        numAxes = intFromFile(infile)
        axisNames = [stringFromFile(infile) for i in range(numAxes)]
        axisUnits = [stringFromFile(infile) for i in range(numAxes)]
        axisScales = [stringFromFile(infile) for i in range(numAxes)]
        axisGrids = [
            arrayFromFile(infile, (intFromFile(infile), ))
            for i in range(numAxes)
        ]

        # get the quantities metadata
        numQuantities = intFromFile(infile)
        quantityNames = [stringFromFile(infile) for i in range(numQuantities)]
        quantityUnits = [stringFromFile(infile) for i in range(numQuantities)]
        quantityScales = [stringFromFile(infile) for i in range(numQuantities)]

        # get the quantity values
        shapeValues = tuple([numQuantities] +
                            [len(axisGrid) for axisGrid in axisGrids])
        values = arrayFromFile(infile, shapeValues)

        # verify the trailing tag
        if stringFromFile(infile) != "STABEND":
            raise ValueError(
                "File does not have the proper trailing tag: {}".format(
                    inpath))

    # construct the dictionary that will be returned, adding basic metadata
    d = dict(axisNames=axisNames,
             axisUnits=axisUnits,
             axisScales=axisScales,
             quantityNames=quantityNames,
             quantityUnits=quantityUnits,
             quantityScales=quantityScales)

    # add axis grids
    for i in range(numAxes):
        d[axisNames[i]] = axisGrids[i] << sm.unit(axisUnits[i])

    # add quantities information
    for i in range(numQuantities):
        d[quantityNames[i]] = values[i] << sm.unit(quantityUnits[i])

    return d
Exemplo n.º 12
0
def plotSpectralResolution(inFilePath, minWavelength=None, maxWavelength=None, decades=None, *, title=None,
                outDirPath=None, outFileName=None, outFilePath=None, figSize=(8, 5), interactive=None):

    # load the wavelength grid
    inFilePath = ut.absPath(inFilePath)
    if inFilePath.suffix.lower() == ".stab":
        table = stab.readStoredTable(inFilePath)
        if "lambda" not in table:
            raise ValueError("No wavelength axis in stored table: {}".format(inFilePath))
        grid = table["lambda"]
    elif inFilePath.suffix.lower() == ".dat":
        if "wavelength" not in sm.getColumnDescriptions(inFilePath)[0].lower():
            raise ValueError("First text column is not labeled 'wavelength': {}".format(inFilePath))
        grid = sm.loadColumns(inFilePath, "1")[0]
    elif inFilePath.suffix.lower() == ".fits":
        axes = sm.getFitsAxes(inFilePath)
        if len(axes) != 3:
            raise ValueError("FITS file does not have embedded wavelength axis")
        grid = axes[2]
    else:
        raise ValueError("Filename does not have the .stab, .dat, or .fits extension: {}".format(inFilePath))

    # calculate the spectral resolution
    R = grid[:-1] / (grid[1:] - grid[:-1])
    Rmax = R.max()

    # choose wavelength units from grid
    wunit = grid.unit

    # setup the plot
    plt.figure(figsize=figSize)
    plt.xlabel(sm.latexForWavelengthWithUnit(wunit), fontsize='large')
    plt.ylabel(r"$R=\frac{\lambda}{\Delta\lambda}$", fontsize='large')
    plt.xscale('log')
    plt.yscale('log')
    plt.grid(which='major', axis='both', ls=":")
    plt.xlim(_adjustWavelengthRange(plt.xlim(), wunit, minWavelength, maxWavelength))
    if decades is not None:
        plt.ylim(Rmax* 10 ** (-decades), Rmax * 10 ** 0.2)

    # plot the spectral resolution
    if title is None or len(title)==0: title = inFilePath.stem
    label = "{}\n{} pts from {:g} to {:g} {}".format(title, len(grid), grid[0].to_value(wunit), grid[-1].to_value(wunit),
                                              sm.latexForUnit(wunit))
    plt.plot(grid[:-1].to_value(wunit), R, label=label)
    plt.legend()

    # if not in interactive mode, save the figure; otherwise leave it open
    if not ut.interactive(interactive):
        saveFilePath = ut.savePath(inFilePath.stem+".pdf", (".pdf",".png"),
                                   outDirPath=outDirPath, outFileName=outFileName, outFilePath=outFilePath)
        plt.savefig(saveFilePath, bbox_inches='tight', pad_inches=0.25)
        plt.close()
        logging.info("Created {}".format(saveFilePath))
Exemplo n.º 13
0
def createSimulations(outDirPath="", prefix=None):
    simulations = []

    # loop over the log files in the specified directory
    for logfile in ut.absPath(outDirPath).glob("*_log.txt"):
        if prefix is None or prefix == logfile.stem[:-4]:
            simulations += [
                Simulation(outDirPath=logfile.parent, prefix=logfile.stem[:-4])
            ]

    return simulations
Exemplo n.º 14
0
def do(infilepath: (str, "filepath pattern of the FSPS files to be converted"),
       outfilepath: (str, "filepath of the resulting stored table file")
       ) -> "Convert FSPS-generated SED family to stored table format":

    import glob
    import pts.utils as ut
    from pts.storedtable.convert_sed import convertFSPSSEDFamily as convertFSPSSEDFamily

    infilepath = str(ut.absPath(infilepath))
    if len(glob.glob(infilepath)) == 0:
        raise ut.UserError(
            "No input files found for pattern: '{}'".format(infilepath))

    outfilepath = ut.absPath(outfilepath)
    if (outfilepath.is_dir()):
        outfilepath = ut.savePath(defFilePath="CustomFSPSSEDFamily",
                                  outDirPath=outfilepath,
                                  suffix=".stab")
    else:
        outfilepath = ut.savePath(defFilePath=None,
                                  outFilePath=outfilepath,
                                  suffix=".stab")

    convertFSPSSEDFamily([infilepath], [outfilepath])
Exemplo n.º 15
0
    def saveTo(self, saveFilePath):
        # get the absolute path and verify the file name
        path = ut.absPath(saveFilePath)
        if self._path.suffix.lower() not in (".ski", ".xml"):
            raise ValueError("Invalid filename extension for ski file")

        # update the producer and time attributes on the root element
        root = self._tree.getroot()
        root.set("producer", "Python toolkit for SKIRT (SkiFile class)")
        root.set("time", datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S"))

        # serialize the XML tree
        self._tree.write(str(path),
                         encoding="UTF-8",
                         xml_declaration=True,
                         pretty_print=True)
Exemplo n.º 16
0
    def __init__(self, path=None):

        # set the SKIRT path
        if path is None:
            self._path = ut.skirtPath()
            if self._path is None:
                raise ValueError(
                    "Cannot locate default SKIRT executable in PTS/SKIRT project directory structure"
                )
        else:
            self._path = ut.absPath(path)
            if not self._path.is_file():
                raise ValueError(
                    "Specified SKIRT executable does not exist: {}".format(
                        self._path))

        # initialize execution state
        self._process = None
Exemplo n.º 17
0
def getFitsAxes(path):
    # open the file
    path = ut.absPath(path)
    with fits.open(path) as hdul:
        # build x and y grids
        h = hdul[0].header
        x = _grid(h['NAXIS1'], h['CRPIX1'], h['CRVAL1'], h['CDELT1'],
                  h['CUNIT1'])
        y = _grid(h['NAXIS2'], h['CRPIX2'], h['CRVAL2'], h['CDELT2'],
                  h['CUNIT2'])
        # if there are only two axes, we're done
        if int(h['NAXIS']) == 2:
            return x, y

        # if there are three axis, read the z grid from the table extension
        hdu = hdul["Z-axis coordinate values"]
        z = hdu.data["GRID_POINTS"].astype(float) << smunit(
            hdu.header["TUNIT1"])
        return x, y, z
Exemplo n.º 18
0
def writeStoredColumns(outFilePath, columnNames, columnUnits, values):
    outpath = ut.absPath(outFilePath)

    # assemble all values in single array
    values = np.array(values)
    numColumns, numRows = values.shape

    # verify some of the requirements/restrictions on the specified data
    if outpath.suffix != ".scol":
        raise ValueError(
            "Stored columns filename extension is not '.scol': {}".format(
                outpath))
    numColumns = len(columnNames)
    if numColumns < 1 or numColumns != len(columnNames) or numColumns != len(
            columnUnits):
        raise ValueError("Mismatch in number of columns")

    # open the output file
    with open(outpath, 'wb') as out:
        # write the SKIRT and endianness tags
        out.write(b"SKIRT X\n")
        intToFile(out, 0x010203040A0BFEFF)
        intToFile(out, 0)

        # write the rows and columns metadata
        intToFile(out, numRows)
        intToFile(out, numColumns)
        for columnName in columnNames:
            stringToFile(out, columnName)
        for columnUnit in columnUnits:
            stringToFile(out, columnUnit)

        # write the values
        arrayToFile(out, values)

        # write the EOF tag
        out.write(b"SCOLEND\n")

    # report file creation to user
    logging.info("Created stored columns file: {}".format(outpath))
Exemplo n.º 19
0
def getQuantityFromFile(path, trigger, header):
    path = ut.absPath(path)

    triggers = trigger.split("/")
    triggered = 0
    with open(path) as infile:
        for line in infile:
            # select the line
            if triggered < len(triggers) and triggers[triggered] in line:
                triggered += 1
            elif triggered == len(triggers) and header in line:
                # remove section between parentheses
                if line.strip().endswith(')'):
                    line = line[:line.rfind('(')]
                segments = line.split()
                if len(segments) >= 2:
                    try:
                        return float(segments[-1]) << smunit("")
                    except ValueError:
                        pass
                    return float(segments[-2]) << smunit(segments[-1])

    raise ValueError("Quantity '{}' not found in text file".format(header))
Exemplo n.º 20
0
def do(
    filepath: (str, "name or path of the column text file to be converted"),
    names: (str, "white-space-separated list of column names"),
    units: (str, "white-space-separated list of unit strings"),
) -> "convert column text file to SKIRT stored columns format":

    import numpy as np
    import pts.storedtable as stab
    import pts.utils as ut

    # get the file paths
    inpath = ut.absPath(filepath)
    outpath = inpath.with_suffix('.scol')

    # load the text file contents
    values = np.loadtxt(inpath, unpack=True)

    # construct list of names and unit string, allowing commas as separators
    names = names.replace(",", " ").split()
    units = units.replace(",", " ").split()

    # save the binary file
    stab.writeStoredColumns(outpath, names, units, values)
Exemplo n.º 21
0
def readStoredColumns(columnsFilePath):
    inpath = ut.absPath(columnsFilePath)

    # open the file
    with open(inpath, 'rb') as infile:
        # verify header tags
        if stringFromFile(infile) != "SKIRT X" or intFromFile(infile) != 0x010203040A0BFEFF \
                        or intFromFile(infile) != 0:
            raise ValueError(
                "File does not have SKIRT stored table format: {}".format(
                    inpath))

        # get the number of columns and rows
        numRows = intFromFile(infile)
        numColumns = intFromFile(infile)

        # get the column metadata
        columnNames = [stringFromFile(infile) for i in range(numColumns)]
        columnUnits = [stringFromFile(infile) for i in range(numColumns)]

        # get the data values
        values = arrayFromFile(infile, (numColumns, numRows))

        # verify the trailing tag
        if stringFromFile(infile) != "SCOLEND":
            raise ValueError(
                "File does not have the proper trailing tag: {}".format(
                    inpath))

    # construct the dictionary that will be returned, adding basic metadata
    d = dict(columnNames=columnNames, columnUnits=columnUnits)

    # add data values
    for i in range(numColumns):
        d[columnNames[i]] = values[i] << sm.unit(columnUnits[i])

    return d
Exemplo n.º 22
0
def upgradeSkiFile(inpath, *, backup=True, replace=True):
    # load the ski file
    inpath = ut.absPath(inpath)
    try:
        ski = sm.SkiFile(inpath)
    except SyntaxError:
        logging.error("File does not contain well-formed XML: {}".format(inpath))
        return

    # verify the ski file format version
    try:
        version = ski.getStringAttribute("/skirt-simulation-hierarchy", "format")
    except ValueError:
        logging.error("XML file does not have ski file format: {}".format(inpath))
        return
    if version!="9":
        logging.error("Ski file is older than version 9: {}".format(inpath))
        return

    # perform the upgrade in the XML tree in memory, keeping track of whether the contents has actually changed
    changed = False
    for condition,templates in _getUpgradeDefinitions():
        changed |= ski.transformIf(condition, templates)

    # save the upgraded version if needed
    if changed:
        if backup:
            inpath.rename(inpath.with_name(inpath.stem + "_" + ut.timestamp() + "_backupski.xml"))
        if replace:
            ski.saveTo(inpath)
            logging.warning("Ski file UPGRADED:  {}".format(inpath))
        else:
            outpath = inpath.with_name(inpath.stem + "_upgradedski.xml")
            ski.saveTo(outpath)
            logging.warning("Ski file UPGRADED:  {} --> {}".format(inpath, outpath.name))
    else:
        logging.info("Ski file unchanged: {}".format(inpath))
Exemplo n.º 23
0
def saveColumns(path,
                quantities,
                units,
                descriptions,
                *,
                title=None,
                fmt="%1.9e"):

    # split descriptions and units into segments
    descriptions = [s.strip() for s in descriptions.split(",")]
    units = [s.strip() for s in units.split(",")]

    # verify length of input sequences
    if len(quantities) != len(units) or len(quantities) != len(descriptions):
        raise ValueError(
            "Number of units or descriptions does not match number of quantities"
        )

    # convert the quantities to the requested units
    quantities = [
        quantity.to(smunit(unit)) for quantity, unit in zip(quantities, units)
    ]

    # open the file
    path = ut.absPath(path)
    with open(path, 'wt') as outfile:

        # write the header
        if title:
            outfile.write("# {}\n".format(title))
        for col, (description, unit) in enumerate(zip(descriptions, units)):
            outfile.write("# column {}: {} ({})\n".format(
                col + 1, description, unit))

        # write the data
        np.savetxt(outfile, np.stack(quantities).T, fmt=fmt)
Exemplo n.º 24
0
    def execute(self,
                skiFilePath,
                *,
                inDirPath="",
                outDirPath="",
                skiRelative=False,
                numThreadsPerProcess=0,
                numProcesses=1,
                verbose=False,
                wait=True,
                console='regular'):

        if self.isRunning():
            raise ValueError(
                "Calling execute on SKIRT object that is still executing")

        # --- build the argument list ---

        # mpi support
        if isinstance(numProcesses, str):
            if numProcesses == 'lsf':
                arguments = ["mpirun", "-lsf"]
            elif numProcesses == 'srun':
                arguments = ["mpirun", "-srun"]
            else:
                raise ValueError(
                    "Unsupported string value for numProcesses: {}",
                    numProcesses)
        else:
            numProcesses = int(numProcesses)
            if numProcesses > 1:
                arguments = ["mpirun", "-np", str(numProcesses)]
            else:
                arguments = []

        # skirt executable
        arguments += [str(self._path)]

        # ski file
        arguments += [str(ut.absPath(skiFilePath))]

        # i/o path options
        if skiRelative:
            base = ut.absPath(skiFilePath).parent
            inpath = ut.absPath(base / inDirPath)
            outpath = ut.absPath(base / outDirPath)
        else:
            inpath = ut.absPath(inDirPath)
            outpath = ut.absPath(outDirPath)
        arguments += ["-i", str(inpath)]
        arguments += ["-o", str(outpath)]

        # parallelization options
        numThreadsPerProcess = int(numThreadsPerProcess)
        if numThreadsPerProcess > 0:
            arguments += ["-t", str(numThreadsPerProcess)]
        if verbose:
            arguments += ["-v"]

        # logging options
        if console != 'regular' or not wait:
            arguments += ["-b"]

        # --- launch SKIRT ---

        if wait:
            self._process = None
            if console == 'silent':
                subprocess.run(arguments,
                               stdout=subprocess.DEVNULL,
                               stderr=subprocess.DEVNULL)
            else:
                # we pipe the SKIRT output to the console ourselves because this also works in Jupyter notebooks
                popen = subprocess.Popen(arguments,
                                         stdout=subprocess.PIPE,
                                         stderr=subprocess.STDOUT,
                                         universal_newlines=True)
                for line in iter(popen.stdout.readline, ""):
                    print(line, end="")
                popen.stdout.close()
                popen.wait()
        else:
            self._process = subprocess.Popen(arguments,
                                             stdout=subprocess.DEVNULL,
                                             stderr=subprocess.DEVNULL)

        return Simulation(skiFilePath=skiFilePath,
                          inDirPath=inpath,
                          outDirPath=outpath,
                          process=self._process)
Exemplo n.º 25
0
def loadFits(path):
    path = ut.absPath(path)
    data, header = fits.getdata(path, header=True)
    return data.T.astype(float) << smunit(header['BUNIT'])
Exemplo n.º 26
0
def loadColumns(path, columns=None):
    path = ut.absPath(path)

    # parse the header
    header = []
    with open(path) as infile:
        for line in infile:
            # skip empty lines
            line = line.strip()
            if len(line) > 0:
                # handle end-of-header
                if not line.startswith("#"): break
                # remove hash character and skip non-column header lines
                line = line[1:].strip()
                if line.lower().startswith("column") and ":" in line:
                    # extract the description and the unit
                    colon = line.find(":")
                    if line.endswith(')'):
                        left = line.rfind("(")
                        description = line[colon + 1:left].strip()
                        unit = line[left + 1:-1].strip()
                    else:
                        description = line[colon + 1:].strip()
                        unit = ""
                    # add the column metadata to the internal list
                    header.append((description, unit))

    # construct a list of zero-based indices for columns to be loaded
    if columns is None:
        # None --> all columns
        usecols = list(range(len(header)))
    elif isinstance(columns, str):
        # single string --> comma-separated list of one-based column indices and/or description fragments
        usecols = []
        for col in columns.split(","):
            # string representing an integer -> one-based index
            if _representsInteger(col):
                col = int(col)
                if col < 1 or col > len(header):
                    raise ValueError(
                        "one-based column index is out of range: {}".format(
                            col))
                usecols.append(col - 1)
            # other string --> match column description
            else:
                usecols.append(_indexForDescriptionInHeader(col, header))
    else:
        # sequence --> zero-based column indices
        usecols = []
        for col in columns:
            col = int(col)
            if col < 0 or col >= len(header):
                raise ValueError(
                    "zero-based column index is out of range: {}".format(col))
            usecols.append(col)

    # load the data and assign units
    data = np.loadtxt(path, usecols=usecols, ndmin=2, unpack=True)
    return [
        coldata << smunit(header[colindex][1])
        for colindex, coldata in zip(usecols, data)
    ]
Exemplo n.º 27
0
    def saveTo(self, outFilePath, tiff16bit=False):
        path = ut.absPath(outFilePath)
        suffix = path.suffix.lower()

        # 16-bit TIFF file
        if tiff16bit and suffix in (".tif", ".tiff"):
            # tiff header as uint16 words
            lsNX, msNX = _split16(self._shape[0])
            lsNY, msNY = _split16(self._shape[1])
            lsBYTES, msBYTES = _split16(self._shape[0] * self._shape[1] * 6)
            header = (
                0x4949,
                42,
                8,
                0,  #   0: TIFF header (little endian)
                12,  #   8: number of directory entries
                #  (directory entry: tag,type,count,0,value/offset x 2)
                256,
                4,
                1,
                0,
                lsNX,
                msNX,  #  10: ImageWidth, 1 LONG
                257,
                4,
                1,
                0,
                lsNY,
                msNY,  #  22: ImageLength, 1 LONG
                258,
                3,
                3,
                0,
                158,
                0,  #  34: BitsPerSample, 3 SHORT (-> offset!)
                259,
                3,
                1,
                0,
                1,
                0,  #  46: Compression, 1 SHORT
                262,
                3,
                1,
                0,
                2,
                0,  #  58: PhotometricInterpretation, 1 SHORT
                273,
                4,
                1,
                0,
                180,
                0,  #  70: StripOffsets, 1 LONG
                277,
                3,
                1,
                0,
                3,
                0,  #  82: SamplesPerPixel, 1 SHORT
                278,
                4,
                1,
                0,
                lsNY,
                msNY,  #  94: RowsPerStrip, 1 LONG
                279,
                4,
                1,
                0,
                lsBYTES,
                msBYTES,  # 106: StripByteCounts, 1 LONG
                282,
                5,
                1,
                0,
                164,
                0,  # 118: XResolution, 1 RATIONAL (-> offset!)
                283,
                5,
                1,
                0,
                172,
                0,  # 130: YResolution, 1 RATIONAL (-> offset!)
                296,
                3,
                1,
                0,
                2,
                0,  # 142: ResolutionUnit, 1 SHORT
                0,
                0,  # 154: IFD list terminator
                16,
                16,
                16,  # 158: BitsPerSample value
                72,
                0,
                1,
                0,  # 164: XResolution value
                72,
                0,
                1,
                0)  # 172: YResolution value
            # 180: Image data
            out = open(path, 'wb')
            out.write(np.array(header, dtype=np.uint16).tostring())
            data = self.scaledPixelArray(0, 65535.99)
            out.write(
                np.flipud(np.rollaxis(data, 1)).astype(np.uint16).tostring())
            out.close()

        # standard 8-bit image file
        elif suffix in (".bmp", ".gif", ".jpg", ".jpeg", ".png", ".tif",
                        ".tiff", ".pdf"):
            self._ensurePil(invalidate=False)
            if suffix in (".jpg", ".jpeg"):
                self._dpil.save(path, format="JPEG", quality=80)
            elif suffix in (".tif", ".tiff"):
                self._dpil.save(path, format="TIFF")
            elif suffix == ".png":
                self._dpil.save(path, format="PNG")
            elif suffix == ".pdf":
                self._dpil.save(path, format="PDF")

        # FITS file
        elif suffix == ".fits":
            self._ensureArr(invalidate=False)
            data = np.dstack(
                (self._darr[:, :, 2], self._darr[:, :, 1], self._darr[:, :,
                                                                      0]))
            fits.writeto(path, data.T, overwrite=True)

        # unsupported type
        else:
            raise ValueError("OutFilePath has unsupported filename extension")