示例#1
0
def text_load(path: str, filename: str) -> Spectrum:
    """
    Loads the standardized ASCII format Spectrum file as written by text_write.
    
    Note: If for some reason, the redshift and/or gmag values cannot be converted to a float,
    they will be assigned a value of -1
    
    :param path: /path/to/input file
    :param filename: input file name
    :type path: str
    :type filename: str
    :return: Loaded Spectrum
    :rtype: Spectrum
    :raises: FileNotFoundError
    """
    fileCheck(path, filename)

    with open(join(path, filename), 'r') as infile:
        """ Read header.

        File format:
            namestring=55555-4444-333,z=float(),gmag=float()
            wavelength,flux density,error

        Parse the first line, use the second as CSV reader input
         """
        header = infile.readline().strip().split(',')
        namestring = fns(header[0])
        try:
            z = float(header[1].strip("z="))
        except ValueError:
            z = -1
        try:
            gmag = float(header[2].strip("gmag="))
        except ValueError:
            gmag = -1

        reader = DictReader(infile,
                            fieldnames=infile.readline().strip().split(','))
        wls = []
        flux = []
        err = []
        for row in reader:
            try:
                wls.append(int(row['wavelength']))
            except ValueError:
                wls.append(float(row['wavelength']))
            flux.append(float(row['flux density']))
            err.append(float(row['error']))
    spec = Spectrum(namestring=namestring, z=z, gmag=gmag)
    spec.setDict(wls, flux, err)
    return spec
示例#2
0
def load(path: str, filename: str) -> Spectrum:
    """
    Loads the serialized spectrum file at /path/filename

    :param path: /path/to/filename
    :param filename:  file name of spectrum to load
    :type path: str
    :type filename: str
    :rtype: Spectrum
    :raises: FileNotFoundError
    """
    fileCheck(path, filename)
    return pickle.load(open(join(path, filename), 'rb'))
示例#3
0
def namestring_dict_reader(path: str,
                           filename: str,
                           top_key: str = "namestring",
                           has_header: bool = True) -> dict:
    from fileio.utils import fileCheck, join
    fileCheck(path, filename)

    def num_conv(num: str) -> Union[float, int]:
        try:
            num = int(num)
        except ValueError:
            try:
                num = float(num)
            except ValueError:
                pass
        return num

    def form_dict(line: dict) -> dict:
        namestring = line.pop(top_key)
        for k, v in line.items():
            line[k] = num_conv(v)
        return {namestring: dict(line)}

    def form_list_value(line: str) -> dict:
        line = line.strip().split(',')
        if (len(line) > 2):
            for i in range(1, len(line)):
                line[i] = num_conv(line[i])
            return {line[0]: line[1:]}

        line[1] = num_conv(line[1])
        return {line[0]: line[1]}

    with open(join(path, filename), 'r') as infile:
        outdict = {}
        reader = infile

        reader_func = None
        if (has_header):
            from csv import DictReader
            reader = DictReader(
                infile, fieldnames=infile.readline().strip().split(','))
            reader_func = form_dict
        else:
            reader_func = form_list_value

        for line in reader:
            outdict.update(reader_func(line))

    return outdict
示例#4
0
def simple_list_reader(path: str,
                       filename: str,
                       valuespliter: Union[str, None] = ",") -> list:
    """
    Reads in a simple list from a file.  Will attempt to split each line by valuesplitter variable.
    Is capable of discerning between input types of int, float and str.  Will evaluate to these types accordingly.

    If the length of line.split( valuesplitter ) == 1, returns a simple list of values.  If that length is greater than one,
    the entry will be a tuple of all the individual values.

    :param path: /path/to/filename
    :param filename: name of the file
    :param valuespliter: value to split the line by.  Defaults to a comma ","  If you need to ensure the line is NOT split, enter valuesplitter = None
    :type path: str
    :type filename: str
    :type valuespliter: str or NoneType
    :return: List of file lines
    :rtype: list
    """
    from fileio.utils import fileCheck, join

    # Use this method to determine subtypes and assign accordingly
    # i.e. firgure out if it's an int, float, or string
    # If it can't make value one of those three terms, it throws an error
    def __get_type(value) -> type:
        types = [int, float, str]
        for t in types:
            try:
                t(value)
                return t
            except ValueError:
                continue
        raise ValueError(f"Unable to determine type if input value: { value }")

    fileCheck(path, filename)
    outlist = []
    with open(join(path, filename), 'r') as infile:
        for line in infile:
            line = line.strip().split(valuespliter)
            for i in range(len(line)):
                line[i] = __get_type(line[i].strip())(line[i])
            if len(line) == 1:
                line = line[0]
            else:
                line = tuple(line)
            outlist.append(line)
    return outlist
示例#5
0
def fit_spec_loader(path: str,
                    filename: str,
                    mask_dict: dict = DEF_MASK_DICT) -> Spectrum:
    """
    Loads a FIT spectrum file from SDSS DR 7 or lower.  Converts it into Spectrum type.

    Note: error_dict has the actual mask values as keys.  Loader will iterate through these keys
    and delete any points where these keys are found.  The dict format is an artifact where the values attached
    to each key are the SDSS error names in text.

    :param path: /path/to/file
    :param filename: filename.fits
    :param mask_dict: Defaults to DEF_ERR_DICT defined in this file if not passed
    :type path: str
    :type filename: str
    :type mask_dict: dict
    :rtype: Spectrum
    """
    from astropy.io.fits import getheader, getdata
    from fileio.utils import fileCheck, join
    from catalog import shenCat

    fileCheck(path, filename)

    shenCat.load()
    infile = join(path, filename)

    # Assemble basic info from the header
    # Check if the HW redshift is included in the shenCat.  If so, assign it,
    # otherwise use the one in the file
    header = getheader(infile, 0)
    namestring = "%05i-%04i-%03i" % (header['MJD'], header['PLATEID'],
                                     header['FIBERID'])
    z = shenCat.subkey(namestring, 'z') if namestring in shenCat else float(
        header['z'])
    gmag = float(header['MAG'].split()[1])  # Stored as UGRIZ

    data = getdata(infile, 0)
    flux_data = data[0].tolist(
    )  # first apertrure is the calibrated spectrum flux density
    # data[ 1 ] is the continuum-subtracted spectrum.  Not of interest
    err_data = data[2].tolist()  # third is the +/- of flux denisty
    mask_data = data[3].tolist()  # error mask

    # Wavelength values are not stored in FIT files.  Only three values are available, and these are used to
    # generate the wavelengths which correspond to the pixels
    #   i.e. wl[ pixel 0 ] -> flux density[ 0 ], error[ 0 ], mask[ 0 ], etc
    #
    # Those 3 values are:
    #   naxis1 : number of pixels stored
    #   coeff0 : Log10 of the first wavelength
    #   coeff1 : Log10 of the dispersion coefficient
    #
    # Log10( wavelengths ) are generated by the function:   log_wl_n( n ) = c0 + c1 * n
    # where n is the nth pixel
    # Then the wavelength, in angstroms is given 10^(log_wl_n)
    c0 = header['coeff0']
    c1 = header['coeff1']
    num_pixels = header['naxis1']
    # The actual wavelength generation happens here
    wavelengths = [pow(10, c0 + c1 * n) for n in num_pixels]

    out_spec = Spectrum(namestring=namestring, z=z, gmag=gmag)
    out_spec.setDict(wavelengths, flux_data, err_data)

    # Mask out the errors
    for i in range(len(err_data)):
        if __bit_mask(mask_data[i], mask_dict):
            del out_spec[wavelengths[i]]
    return out_spec