Esempio n. 1
0
    def _from_File(self, fname):
        """_from_File -- load the content of a FITS or HDF file

        Parameters
        ----------

        fname: str
            filename (incl. path) to read from
        """

        # load_seds - load wavelength and seds
        if self._get_type(fname) == "fits":
            with pyfits.open(fname) as f:
                self.seds = f[0].data[:-1]
                self.lamb = f[0].data[-1]
            self.grid = Table(fname)

        elif self._get_type(fname) == "hdf":
            with HDFStore(fname, mode="r") as s:
                self.seds = s["/seds"].read()
                self.lamb = s["/lamb"].read()
                try:
                    self.cov_diag = s["/covdiag"].read()
                except Exception:
                    self.cov_diag = None
                try:
                    self.cov_offdiag = s["/covoffdiag"].read()
                except Exception:
                    self.cov_offdiag = None
            self.grid = Table(fname, tablename="/grid")

        self._header = self.grid.header
Esempio n. 2
0
    def _load_grid(self, fname):
        """load_grid - load grid table"""
        # load_seds - load wavelength and seds
        if self._grid is None:
            if self._get_type(fname) == "fits":
                self._grid = Table(self.fname)

            elif self._get_type(fname) == "hdf":
                self._grid = Table(self.fname, tablename="/grid")
Esempio n. 3
0
    def _load_table_(self, source):
        t = Table(self.source)
        data = {}
        for k in list(t.keys()):
            data[k] = t[k]
        # Alias columns
        data["logM"] = log10(np.asarray(data["M_ini"]))
        data["logg"] = np.asarray(data["logG"])
        data["logT"] = np.asarray(data["logTe"])
        data["logL"] = np.asarray(data["logL/Lo"])
        data["logA"] = np.asarray(data["log(age/yr)"])
        # clean columns
        data.pop("log(age/yr)")
        data.pop("M_ini")
        data.pop("logG")
        data.pop("logTe")
        data.pop("logL/Lo")

        self.data = Table(data, name="Isochrone from %s" % self.name)
Esempio n. 4
0
    def _from_HDFBackend(self, b):
        """_from_HDFBackend -- convert from HDFBackend

        Parameters
        ----------

        b: GridBackend or sub class
            backend to convert from
        """
        self.lamb = b.lamb.read()
        self.seds = b.seds.read()
        self.grid = Table(b.grid.read())
        self._filters = b._filters[:]
        self._header = b.header
        self._aliases = b._aliases
Esempio n. 5
0
def make_extinguished_grid(
    spec_grid,
    filter_names,
    extLaw,
    avs,
    rvs,
    fAs=None,
    av_prior_model={"name": "flat"},
    rv_prior_model={"name": "flat"},
    fA_prior_model={"name": "flat"},
    chunksize=0,
    add_spectral_properties_kwargs=None,
    absflux_cov=False,
    filterLib=None,
):
    """
    Extinguish spectra and extract an SEDGrid through given series of filters
    (all wavelengths in stellar SEDs and filter response functions are assumed
    to be in Angstroms)

    Parameters
    ----------
    spec_grid: string or grid.SpectralGrid
        if string:
        spec_grid is the filename to the grid file with stellar spectra
        the backend to load this grid will be the minimal invasive: 'HDF'
        if possible, 'cache' otherwise.

        if not a string, expecting the corresponding SpectralGrid instance
        (backend already setup)

    filter_names: list
        list of filter names according to the filter lib

    Avs: sequence
        Av values to iterate over

    av_prior_model: list
        list including prior model name and parameters

    Rvs: sequence
        Rv values to iterate over

    rv_prior_model: list
        list including prior model name and parameters

    fAs: sequence (optional)
        f_A values to iterate over
        f_A can be omitted if the extinction Law does not use it or allow
        fixed values

    fA_prior_model: list
        list including prior model name and parameters

    chunksize: int, optional (default=0)
        number of extinction model variations to generate at each cycle.
        Note that this means len(spec_grid * chunksize)
        If default <= 0, all models will be returned at once.

    filterLib:  str
        full filename to the filter library hd5 file

    add_spectral_properties_kwargs: dict
        keyword arguments to call :func:`add_spectral_properties` at each
        iteration to add model properties from the spectra into the grid
        property table

    asbflux_cov: boolean
        set to calculate the absflux covariance matrices for each model
        (can be very slow!!!  But it is the right thing to do)

    Returns
    -------
    g: grid.SpectralGrid
        final grid of reddened SEDs and models
    """
    # Check inputs
    # ============
    # get the stellar grid (no dust yet)
    # if string is provided try to load the most memory efficient backend
    # otherwise use a cache-type backend (load only when needed)
    if isinstance(spec_grid, str):
        ext = spec_grid.split(".")[-1]
        if ext in ["hdf", "hd5", "hdf5"]:
            g0 = SpectralGrid(spec_grid, backend="hdf")
        else:
            g0 = SpectralGrid(spec_grid, backend="cache")
    else:
        helpers.type_checker("spec_grid", spec_grid, SpectralGrid)
        g0 = spec_grid

    # Tag fA usage
    if fAs is None:
        with_fA = False
    else:
        with_fA = True

    # get the min/max R(V) values necessary for the grid point definition
    min_Rv = min(rvs)
    max_Rv = max(rvs)

    # Create the sampling mesh
    # ========================
    # basically the dot product from all input 1d vectors
    # setup interation over the full dust parameter grid
    if with_fA:
        dustpriors = PriorWeightsDust(avs, av_prior_model, rvs, rv_prior_model,
                                      fAs, fA_prior_model)

        it = np.nditer(np.ix_(avs, rvs, fAs))
        niter = np.size(avs) * np.size(rvs) * np.size(fAs)
        npts, pts = _make_dust_fA_valid_points_generator(it, min_Rv, max_Rv)

        # Pet the user
        print("""number of initially requested points = {0:d}
              number of valid points = {1:d} (based on restrictions in R(V)
                 versus f_A plane)
              """.format(niter, npts))

        if npts == 0:
            raise AttributeError("No valid points")
    else:
        dustpriors = PriorWeightsDust(avs, av_prior_model, rvs, rv_prior_model,
                                      [1.0], fA_prior_model)

        it = np.nditer(np.ix_(avs, rvs))
        npts = np.size(avs) * np.size(rvs)
        pts = ((float(ak), float(rk)) for ak, rk in it)

    # Generate the Grid
    # =================
    N0 = len(g0.grid)
    N = N0 * npts

    if chunksize <= 0:
        print("Generating a final grid of {0:d} points".format(N))
    else:
        print("Generating a final grid of {0:d} points in {1:d}" +
              " pieces".format(N, int(float(N0) / chunksize + 1.0)))

    if chunksize <= 0:
        chunksize = npts

    if add_spectral_properties_kwargs is not None:
        nameformat = add_spectral_properties_kwargs.pop("nameformat",
                                                        "{0:s}") + "_wd"

    for chunk_pts in helpers.chunks(pts, chunksize):
        # iter over chunks of models

        # setup chunk outputs
        cols = {"Av": np.empty(N, dtype=float), "Rv": np.empty(N, dtype=float)}

        if with_fA:
            cols["Rv_A"] = np.empty(N, dtype=float)
            cols["f_A"] = np.empty(N, dtype=float)

        keys = list(g0.keys())
        for key in keys:
            cols[key] = np.empty(N, dtype=float)

        n_filters = len(filter_names)
        _seds = np.empty((N, n_filters), dtype=float)
        if absflux_cov:
            n_offdiag = ((n_filters**2) - n_filters) / 2
            _cov_diag = np.empty((N, n_filters), dtype=float)
            _cov_offdiag = np.empty((N, n_offdiag), dtype=float)

        for count, pt in enumerate(tqdm(chunk_pts, desc="SED grid")):

            if with_fA:
                Av, Rv, f_A = pt
                dust_prior_weight = dustpriors.get_weight(Av, Rv, f_A)
                Rv_MW = extLaw.get_Rv_A(Rv, f_A)
                r = g0.applyExtinctionLaw(extLaw,
                                          Av=Av,
                                          Rv=Rv,
                                          f_A=f_A,
                                          inplace=False)
                # add extra "spectral bands" if requested
                if add_spectral_properties_kwargs is not None:
                    r = add_spectral_properties(
                        r,
                        nameformat=nameformat,
                        filterLib=filterLib,
                        **add_spectral_properties_kwargs)
                temp_results = r.getSEDs(filter_names, filterLib=filterLib)
                # adding the dust parameters to the models
                cols["Av"][N0 * count:N0 * (count + 1)] = Av
                cols["Rv"][N0 * count:N0 * (count + 1)] = Rv
                cols["f_A"][N0 * count:N0 * (count + 1)] = f_A
                cols["Rv_A"][N0 * count:N0 * (count + 1)] = Rv_MW

            else:
                Av, Rv = pt
                dust_prior_weight = dustpriors.get_weight(Av, Rv, 1.0)
                r = g0.applyExtinctionLaw(extLaw, Av=Av, Rv=Rv, inplace=False)

                if add_spectral_properties_kwargs is not None:
                    r = add_spectral_properties(
                        r,
                        nameformat=nameformat,
                        filterLib=filterLib,
                        **add_spectral_properties_kwargs)
                temp_results = r.getSEDs(filter_names, filterLib=filterLib)
                # adding the dust parameters to the models
                cols["Av"][N0 * count:N0 * (count + 1)] = Av
                cols["Rv"][N0 * count:N0 * (count + 1)] = Rv

            # get new attributes if exist
            for key in list(temp_results.grid.keys()):
                if key not in keys:
                    k1 = N0 * count
                    k2 = N0 * (count + 1)
                    cols.setdefault(key, np.empty(
                        N, dtype=float))[k1:k2] = temp_results.grid[key]

            # compute the fractional absflux covariance matrices
            if absflux_cov:
                absflux_covmats = calc_absflux_cov_matrices(
                    r, temp_results, filter_names)
                _cov_diag[N0 * count:N0 * (count + 1)] = absflux_covmats[0]
                _cov_offdiag[N0 * count:N0 * (count + 1)] = absflux_covmats[1]

            # assign the extinguished SEDs to the output object
            _seds[N0 * count:N0 * (count + 1)] = temp_results.seds[:]

            # copy the rest of the parameters
            for key in keys:
                cols[key][N0 * count:N0 * (count + 1)] = g0.grid[key]

            # multiply existing prior weights by the dust prior weight
            cols["weight"][N0 * count:N0 * (count + 1)] *= dust_prior_weight
            cols["prior_weight"][N0 * count:N0 *
                                 (count + 1)] *= dust_prior_weight

            if count == 0:
                cols["lamb"] = temp_results.lamb[:]

        _lamb = cols.pop("lamb")

        # free the memory of temp_results
        # del temp_results
        # del tempgrid

        # Ship
        if absflux_cov:
            g = SpectralGrid(
                _lamb,
                seds=_seds,
                cov_diag=_cov_diag,
                cov_offdiag=_cov_offdiag,
                grid=Table(cols),
                backend="memory",
            )
        else:
            g = SpectralGrid(_lamb,
                             seds=_seds,
                             grid=Table(cols),
                             backend="memory")

        g.grid.header["filters"] = " ".join(filter_names)

        yield g
Esempio n. 6
0
def apply_distance_grid(specgrid, distances, redshift=0):
    """
    Distances are applied to the spectral grid by copying the grid and
    applying a scaling factor.

    Parameters
    ----------

    project: str
        project name

    specgrid: grid.SpectralGrid object
        spectral grid to transform

    distances: list of float
        Distances at which models should be shifted
        0 means absolute magnitude.
        Expecting pc units

    redshift: float
        Redshift to which wavelengths should be shifted
        Default is 0 (rest frame)
    """
    g0 = specgrid

    # Current length of the grid
    N0 = len(g0.grid)
    N = N0 * len(distances)

    # Make singleton list if a single distance is given
    if not hasattr(distances, "__iter__"):
        _distances = [distances]
    else:
        _distances = distances

    # Add distance column if multiple distances are specified
    cols = {}
    cols["distance"] = np.empty(N, dtype=float)

    # Existing columns
    keys0 = list(g0.keys())
    for key in keys0:
        cols[key] = np.empty(N, dtype=float)

    n_sed_points = g0.seds.shape[1]
    new_seds = np.empty((N, n_sed_points), dtype=float)

    for count, distance in enumerate(tqdm(_distances, desc="Distance grid")):

        # The range where the current distance points will live
        distance_slice = slice(N0 * count, N0 * (count + 1))

        # The seds default to 10 pc.
        # Therefore, scale them with (d / (10 pc))**(-2).
        distance_pc = distance.to(units.pc).value
        new_seds[distance_slice, :] = g0.seds / (0.1 * distance_pc)**2

        # Fill in the distance in the distance column
        cols["distance"][distance_slice] = distance_pc

        # Copy the old columns
        for key in keys0:
            cols[key][distance_slice] = g0.grid[key]

    # apply redshift
    g0.lamb = g0.lamb * (1.0 + redshift)

    # New object
    g = SpectralGrid(g0.lamb,
                     seds=new_seds,
                     grid=Table(cols),
                     backend="memory")
    return g
Esempio n. 7
0
def trim_models(
    sedgrid,
    sedgrid_noisemodel,
    obsdata,
    sed_outname,
    noisemodel_outname,
    sigma_fac=3.0,
    n_detected=4,
    inFlux=True,
    trunchen=False,
):
    """
    For a given set of observations, there will be models that are so
    bright or faint that they will always have ~0 probability of fitting
    the data.  This program trims those models out of the SED grid
    so that time is not spent calculating model points that are always
    zero probability.

    Parameters
    ----------
    sedgrid: grid.SEDgrid instance
        model grid

    sedgrid_noisemodel: beast noisemodel instance
        noise model data

    obsdata: Observation object instance
        observation catalog

    sed_outname: str
        name for output sed file

    noisemodel_outname: str
        name for output noisemodel file

    sigma_fac: float
        factor for trimming the upper and lower range of grid so that
        the model range cuts off sigma_fac above and below the brightest
        and faintest models, respectively (default: 3.)

    n_detected: int
        minimum number of bands where ASTs yielded a detection for
        a given model, if fewer detections than n_detected this model
        gets eliminated (default: 4)

    inFlux: boolean
        if true data are in fluxes (default: True)

    trunchen: boolean
        if true use the trunchen noise model (default: False)
    """
    # Store the brigtest and faintest fluxes in each band (for data and asts)
    n_filters = len(obsdata.filters)
    min_data = np.zeros(n_filters)
    max_data = np.zeros(n_filters)
    min_models = np.zeros(n_filters)
    max_models = np.zeros(n_filters)
    for k, filtername in enumerate(obsdata.filters):
        sfiltname = obsdata.data.resolve_alias(filtername)
        if inFlux:
            min_data[k] = np.amin(obsdata.data[sfiltname] *
                                  obsdata.vega_flux[k])
            max_data[k] = np.amax(obsdata.data[sfiltname] *
                                  obsdata.vega_flux[k])
        else:
            min_data[k] = np.amin(10**(-0.4 * obsdata.data[sfiltname]) *
                                  obsdata.vega_flux[k])
            max_data[k] = np.amax(10**(-0.4 * obsdata.data[sfiltname]) *
                                  obsdata.vega_flux[k])

        min_models[k] = np.amin(sedgrid.seds[:, k])
        max_models[k] = np.amax(sedgrid.seds[:, k])

    # first remove all models that have any band with fluxes below the
    #    faintest ASTs run
    # when the noisemodel was computed, models with fluxes below the
    #    faintest ASTs were tagged with a negative error/uncertainty
    # identify the models that have been detected in enough bands
    #   the idea here is that if the ASTs are not measured that means
    #   that *none* were recovered and this implies
    #   that no model with these values would be recovered and thus the
    #   probability should always be zero
    model_unc = sedgrid_noisemodel.root.error[:]
    above_ast = model_unc > 0
    sum_above_ast = np.sum(above_ast, axis=1)
    indxs, = np.where(sum_above_ast >= n_detected)

    # cache the noisemodel values
    model_bias = sedgrid_noisemodel.root.bias[:]
    model_unc = np.fabs(sedgrid_noisemodel.root.error[:])
    model_compl = sedgrid_noisemodel.root.completeness[:]
    if trunchen:
        model_q_norm = sedgrid_noisemodel.root.q_norm[:]
        model_icov_diag = sedgrid_noisemodel.root.icov_diag[:]
        model_icov_offdiag = sedgrid_noisemodel.root.icov_offdiag[:]

    if len(indxs) <= 0:
        raise ValueError("no models are brighter than the minimum ASTs run")

    n_ast_indxs = len(indxs)

    # Find models with fluxes (with margin) between faintest and brightest data
    for k in range(n_filters):
        print("working on filter # = ", k)

        # Get upper and lower values for the models given the noise model
        #  sigma_fac defaults to 3.
        model_val = sedgrid.seds[indxs, k] + model_bias[indxs, k]
        model_down = model_val - sigma_fac * model_unc[indxs, k]
        model_up = model_val + sigma_fac * model_unc[indxs, k]

        nindxs, = np.where((model_up >= min_data[k])
                           & (model_down <= max_data[k]))
        if len(nindxs) > 0:
            indxs = indxs[nindxs]

    if len(indxs) == 0:
        raise ValueError("no models that are within the data range")

    print("number of original models = ", len(sedgrid.seds[:, 0]))
    print("number of ast trimmed models = ", n_ast_indxs)
    print("number of trimmed models = ", len(indxs))

    # Save the grid
    print("Writing trimmed sedgrid to disk into {0:s}".format(sed_outname))
    cols = {}
    for key in list(sedgrid.grid.keys()):
        cols[key] = sedgrid.grid[key][indxs]

    # New column to save the index of the model in the full grid
    cols["fullgrid_idx"] = indxs.astype(int)
    g = SpectralGrid(sedgrid.lamb,
                     seds=sedgrid.seds[indxs],
                     grid=Table(cols),
                     backend="memory")
    filternames = obsdata.filters
    g.grid.header["filters"] = " ".join(filternames)

    # trimmed grid name
    g.writeHDF(sed_outname)

    # save the trimmed noise model
    print("Writing trimmed noisemodel to disk into {0:s}".format(
        noisemodel_outname))
    with tables.open_file(noisemodel_outname, "w") as outfile:
        outfile.create_array(outfile.root, "bias", model_bias[indxs])
        outfile.create_array(outfile.root, "error", model_unc[indxs])
        outfile.create_array(outfile.root, "completeness", model_compl[indxs])
        if trunchen:
            outfile.create_array(outfile.root, "q_norm", model_q_norm[indxs])
            outfile.create_array(outfile.root, "icov_diag",
                                 model_icov_diag[indxs])
            outfile.create_array(outfile.root, "icov_offdiag",
                                 model_icov_offdiag[indxs])
Esempio n. 8
0
    def _get_continuous_isochrone(self, *args, **kwargs):
        """ Return a resampled isochrone accounting for variations
            useful for continuous sampling
        """
        # define the maximum allowable difference between points
        dm = kwargs.pop("dm", 0.01)
        dt = kwargs.pop("dt", 0.01)
        dl = kwargs.pop("dl", 0.01)

        iso = self._get_isochrone(*args, **kwargs)
        logT, logg, logL, logM = (iso["logT"], iso["logg"], iso["logL"],
                                  iso["logM"])

        # compute vector of discrete derivaties for each quantity
        # and the final number of points
        npts = (np.abs(np.divide(np.diff(logM), dm))).astype(int)
        npts += (np.abs(np.divide(np.diff(logT), dt))).astype(int)
        npts += (np.abs(np.divide(np.diff(logL), dl))).astype(int)
        idx = np.hstack([[0], np.cumsum(npts + 1)])
        # set up vectors for storage
        ntot = (npts + 1).sum()
        newm = np.empty(ntot, dtype=float)
        newdm = np.empty(ntot, dtype=float)
        newt = np.empty(ntot, dtype=float)
        newg = np.empty(ntot, dtype=float)
        newl = np.empty(ntot, dtype=float)

        for i in range(len(npts)):
            a, b = idx[i], idx[i] + npts[i] + 1
            if npts[i] > 0:
                # construct new 1d grids in each dimension, being careful
                #   about endpoints
                # append them to storage vectors
                newm[a:b] = np.linspace(logM[i],
                                        logM[i + 1],
                                        npts[i] + 1,
                                        endpoint=False)
                newt[a:b] = np.linspace(logT[i],
                                        logT[i + 1],
                                        npts[i] + 1,
                                        endpoint=False)
                newg[a:b] = np.linspace(logg[i],
                                        logg[i + 1],
                                        npts[i] + 1,
                                        endpoint=False)
                newl[a:b] = np.linspace(logL[i],
                                        logL[i + 1],
                                        npts[i] + 1,
                                        endpoint=False)
                newdm[a:b] = (np.ones(npts[i] + 1) * (logM[i + 1] - logM[i]) /
                              (npts[i] + 1))
            else:
                # if the maximumum allowable difference is small,
                # then just store the good point
                newm[a] = logM[i]
                newt[a] = logT[i]
                newg[a] = logg[i]
                newl[a] = logL[i]
                newdm[a] = logM[i + 1] - logM[i]
        # tack on the last point on the grid, as the loop is one element short
        newm[-1] = logM[-1]
        newt[-1] = logT[-1]
        newg[-1] = logg[-1]
        newl[-1] = logL[-1]
        newdm[-1] = logM[-1] - logM[-2]

        table = Table(
            dict(logM=newm, logT=newt, logg=newg, logL=newl, dlogm=newdm))

        for k in list(iso.header.keys()):
            table.header[k] = iso.header[k]

        table.header["NAME"] = "Resampled " + table.header["NAME"]

        table.header["dlogT"] = dt
        table.header["dlogM"] = dm
        table.header["dlogg"] = dl

        return table
Esempio n. 9
0
    def _get_t_isochrone(self,
                         age,
                         metal=None,
                         FeH=None,
                         masses=None,
                         *args,
                         **kwargs):
        """ Retrieve isochrone from the original source
            internal use to adapt any library
        """
        # make sure unit is in years and then only give the value (no units)
        _age = int(units.Quantity(age, units.year).value)

        #        if hasUnit(age):
        #            _age = int(age.to('yr').magnitude)
        #        else:
        #            _age = int(age * inputUnit.to('yr').magnitude)

        _logA = np.log10(_age)

        assert (metal
                is not None) | (FeH is not None), "Need a chemical par. value."

        if (metal is not None) & (FeH is not None):
            print("Warning: both Z & [Fe/H] provided, ignoring [Fe/H].")

        if metal is None:
            metal = self.FeHtometal(FeH)

        if self.interpolation():
            # Do the actual nd interpolation

            # Maybe already exists?
            if (metal in self.Z) & (_age in self.ages):
                t = self.selectWhere(
                    "*",
                    "(round(Z, 6) == {0}) & (round(logA, 6) == {1})".format(
                        metal, _logA),
                )
                if t.nrows > 0:
                    return t
            # apparently not
            # find 2 closest metal values
            ca1 = self.ages <= _age
            ca2 = self.ages > _age
            cz1 = self.Z <= metal
            cz2 = self.Z > metal
            if metal in self.Z:
                # perfect match in metal, need to find ages
                if _age in self.ages:
                    return self.selectWhere(
                        "*",
                        "(round(Z, 6) == {0}) & (round(logA, 6) == {1})".
                        format(metal, _logA),
                    )
                elif (True in ca1) & (True in ca2):
                    # bracket on _age: closest values
                    a1, a2 = (
                        np.log10(max(self.ages[ca1])),
                        np.log10(min(self.ages[ca2])),
                    )
                    iso = self.selectWhere(
                        "*",
                        "(Z == 0.02) & ( (abs(logA - {0}) < 1e-4) | (abs(logA - {1}) < 1e-4 )  )"
                        .format(a1, a2),
                    )
                    if masses is None:
                        _logM = np.unique(iso["logM"])
                    else:
                        _logM = masses

                    # define interpolator
                    points = np.array([self[k]
                                       for k in "logA logM Z".split()]).T
                    values = np.array(
                        [self[k] for k in list(self.data.keys())]).T
                    _ifunc = interpolate.LinearNDInterpolator(points, values)

                    pts = np.array([(_logA, logMk, metal) for logMk in _logM])
                    r = _ifunc(pts)
                    return Table(r)
                else:
                    raise Exception("Age not covered by the isochrones")
            elif (True in cz1) & (True in cz2):
                # need to find closest Z
                pass
            return
        else:
            # find the closest match
            _Z = self.Z[((metal - self.Z)**2).argmin()]
            # _logA = np.log10(self.ages[((_age - self.ages) ** 2).argmin()])
            _logA = self.logages[((np.log10(_age) - self.logages)**2).argmin()]
            tab = self.data.selectWhere(
                "*", "(round(Z, 6) == {0}) & (round(logA,6) == {1})".format(
                    _Z, _logA))
            # mass selection
            if masses is not None:
                # masses are expected in logM for interpolation
                # if masses.max() > 2.3:
                #    _m = np.log10(masses)
                # else:
                _m = masses
                data_logM = tab["logM"][:]
                # refuse extrapolation!
                # ind = np.where(_m <= max(data_logM))
                data = {}
                for kn in list(tab.keys()):
                    data[kn] = interp(_m,
                                      data_logM,
                                      tab[kn],
                                      left=np.nan,
                                      right=np.nan)
                return Table(data)
Esempio n. 10
0
 def _load_table_(self, source):
     self.data = Table(self.source).selectWhere("*", "isfinite(logA)")
Esempio n. 11
0
class ezIsoch(Isochrone):
    """ Trying to make something that is easy to manipulate
    This class is basically a proxy to a table (whatever format works best)
    and tries to keep things coherent.
    """
    def __init__(self, source, interp=False):
        super().__init__()
        self.name = "<auto>"
        self.source = source
        self._load_table_(self.source)
        # round because of precision noise
        self.logages = np.unique(np.round(self.data["logA"], 6))
        self.ages = np.round(10**self.logages)
        self.Z = np.unique(np.round(self.data["Z"], 6))
        self.interpolation(interp)

    def selectWhere(self, *args, **kwargs):
        return self.data.selectWhere(*args, **kwargs)

    def interpolation(self, b=None):
        if b is not None:
            if hasattr(self, "interp"):
                print("Do not use interpolation yet, at your own risks!!")
            self.interp = bool(b)
        else:
            return self.interp

    def _load_table_(self, source):
        self.data = Table(self.source).selectWhere("*", "isfinite(logA)")

    def __getitem__(self, key):
        return self.data[key]

    def _get_t_isochrone(self,
                         age,
                         metal=None,
                         FeH=None,
                         masses=None,
                         *args,
                         **kwargs):
        """ Retrieve isochrone from the original source
            internal use to adapt any library
        """
        # make sure unit is in years and then only give the value (no units)
        _age = int(units.Quantity(age, units.year).value)

        #        if hasUnit(age):
        #            _age = int(age.to('yr').magnitude)
        #        else:
        #            _age = int(age * inputUnit.to('yr').magnitude)

        _logA = np.log10(_age)

        assert (metal
                is not None) | (FeH is not None), "Need a chemical par. value."

        if (metal is not None) & (FeH is not None):
            print("Warning: both Z & [Fe/H] provided, ignoring [Fe/H].")

        if metal is None:
            metal = self.FeHtometal(FeH)

        if self.interpolation():
            # Do the actual nd interpolation

            # Maybe already exists?
            if (metal in self.Z) & (_age in self.ages):
                t = self.selectWhere(
                    "*",
                    "(round(Z, 6) == {0}) & (round(logA, 6) == {1})".format(
                        metal, _logA),
                )
                if t.nrows > 0:
                    return t
            # apparently not
            # find 2 closest metal values
            ca1 = self.ages <= _age
            ca2 = self.ages > _age
            cz1 = self.Z <= metal
            cz2 = self.Z > metal
            if metal in self.Z:
                # perfect match in metal, need to find ages
                if _age in self.ages:
                    return self.selectWhere(
                        "*",
                        "(round(Z, 6) == {0}) & (round(logA, 6) == {1})".
                        format(metal, _logA),
                    )
                elif (True in ca1) & (True in ca2):
                    # bracket on _age: closest values
                    a1, a2 = (
                        np.log10(max(self.ages[ca1])),
                        np.log10(min(self.ages[ca2])),
                    )
                    iso = self.selectWhere(
                        "*",
                        "(Z == 0.02) & ( (abs(logA - {0}) < 1e-4) | (abs(logA - {1}) < 1e-4 )  )"
                        .format(a1, a2),
                    )
                    if masses is None:
                        _logM = np.unique(iso["logM"])
                    else:
                        _logM = masses

                    # define interpolator
                    points = np.array([self[k]
                                       for k in "logA logM Z".split()]).T
                    values = np.array(
                        [self[k] for k in list(self.data.keys())]).T
                    _ifunc = interpolate.LinearNDInterpolator(points, values)

                    pts = np.array([(_logA, logMk, metal) for logMk in _logM])
                    r = _ifunc(pts)
                    return Table(r)
                else:
                    raise Exception("Age not covered by the isochrones")
            elif (True in cz1) & (True in cz2):
                # need to find closest Z
                pass
            return
        else:
            # find the closest match
            _Z = self.Z[((metal - self.Z)**2).argmin()]
            # _logA = np.log10(self.ages[((_age - self.ages) ** 2).argmin()])
            _logA = self.logages[((np.log10(_age) - self.logages)**2).argmin()]
            tab = self.data.selectWhere(
                "*", "(round(Z, 6) == {0}) & (round(logA,6) == {1})".format(
                    _Z, _logA))
            # mass selection
            if masses is not None:
                # masses are expected in logM for interpolation
                # if masses.max() > 2.3:
                #    _m = np.log10(masses)
                # else:
                _m = masses
                data_logM = tab["logM"][:]
                # refuse extrapolation!
                # ind = np.where(_m <= max(data_logM))
                data = {}
                for kn in list(tab.keys()):
                    data[kn] = interp(_m,
                                      data_logM,
                                      tab[kn],
                                      left=np.nan,
                                      right=np.nan)
                return Table(data)
Esempio n. 12
0
    def _get_isochrone(self,
                       age,
                       metal=None,
                       FeH=None,
                       masses=None,
                       *args,
                       **kwargs):
        """ Retrieve isochrone from the original source
            internal use to adapt any library
        """
        # make sure unit is in years and then only give the value (no units)
        _age = int(units.Quantity(age, units.year).value)

        #        if hasUnit(age):
        #            _age = int(age.to('Myr').magnitude)
        #        else:
        #            _age = int(age * inputUnit.to('Myr').magnitude)

        assert (metal
                is not None) | (FeH is not None), "Need a chemical par. value."

        if (metal is not None) & (FeH is not None):
            print("Warning: both Z & [Fe/H] provided, ignoring [Fe/H].")

        if metal is None:
            metal = self.FeHtometal(FeH)

        assert metal in self.Z, "Metal %f not find in %s" % (metal, self.Z)
        # node = self.data.getNode('/Z' + str(metal)[2:])

        data = {}
        if age in self.ages:
            # no interpolation, isochrone already in the file
            t = self.data.getNode("/Z" + str(metal)[2:] + "/a" + str(_age))
            for kn in t.colnames:
                data[kn] = t.col(kn)
        else:
            # interpolate between isochrones
            d = (self.ages - float(age))**2
            a1, a2 = np.sort(self.ages[np.argsort(d)[:2]] * 1e-6)
            # print "Warning: Interpolation between %d and %d Myr" % (a1, a2)
            r = np.log10(_age / a1) / np.log10(a2 / a1)

            t1 = self.data.getNode("/Z" + str(metal)[2:] + "/a" + str(int(a1)))
            t2 = self.data.getNode("/Z" + str(metal)[2:] + "/a" + str(int(a2)))

            stop = min(t1.nrows, t2.nrows)

            for kn in t1.colnames:
                y2 = t2.col(kn)[:stop]
                y1 = t1.col(kn)[:stop]
                data[kn] = y2 * r + y1 * (1.0 - r)
                del y1, y2

        # mass selection
        if masses is not None:
            # masses are expected in logM for interpolation
            if masses.max() > 2.3:
                _m = np.log10(masses)
            else:
                _m = masses
            data_logM = data["logM"][:]
            for kn in data:
                data[kn] = interp(_m, data_logM, data[kn])

        table = Table(data, name="Isochrone from %s" % self.name)
        table.header["metal"] = metal
        table.header["time"] = _age * 1e6
        return table
Esempio n. 13
0
    def _get_isochrone(self,
                       age,
                       metal=None,
                       FeH=None,
                       masses=None,
                       *args,
                       **kwargs):
        """ Retrieve isochrone from the original source
            internal use to adapt any library
        """
        # make sure unit is in years and then only give the value (no units)
        _age = int(units.Quantity(age, units.year).value)

        # if hasUnit(age):
        #    _age = int(age.to('yr').magnitude)
        # else:
        #    _age = int(age * inputUnit.to('yr').magnitude)

        assert (metal
                is not None) | (FeH is not None), "Need a chemical par. value."

        if (metal is not None) & (FeH is not None):
            print("Warning: both Z & [Fe/H] provided, ignoring [Fe/H].")

        if metal is None:
            metal = self.FeHtometal(FeH)

        assert metal in self.Z, "Metal %f not find in %s" % (metal, self.Z)

        data = {}
        t = self.data.selectWhere("*", "(Z == _z)", condvars={"_z": metal})
        if _age in self.ages:
            # no interpolation, isochrone already in the file
            t = t.selectWhere("*",
                              "(logA == _age)",
                              condvars={"_age": log10(_age)})
            for kn in list(t.keys()):
                data[kn] = np.asarray(t[kn])
        else:
            # interpolate between isochrones
            d = (self.ages - float(_age))**2
            a1, a2 = self.ages[np.argsort(d)[:2]]
            # print "Warning: Interpolation between %d and %d Myr" % (a1, a2)
            r = np.log10(_age / a1) / np.log10(a2 / a1)

            t1 = t.selectWhere("*",
                               "logA == _age",
                               condvars={"_age": log10(a1)})
            t2 = t.selectWhere("*",
                               "logA == _age",
                               condvars={"_age": log10(a2)})

            stop = min(t1.nrows, t2.nrows)

            for kn in list(t1.keys()):
                y2 = t2[kn][:stop]
                y1 = t1[kn][:stop]
                data[kn] = y2 * r + y1 * (1.0 - r)
                del y1, y2

        # mass selection
        if masses is not None:
            # masses are expected in logM for interpolation
            if masses.max() > 2.3:
                _m = np.log10(masses)
            else:
                _m = masses
            data_logM = data["logM"][:]
            for kn in data:
                data[kn] = interp(_m, data_logM, data[kn])

        del t
        table = Table(data, name="Isochrone from %s" % self.name)
        table.header["metal"] = metal
        table.header["time"] = _age
        return table
Esempio n. 14
0
class CacheBackend(GridBackend):
    """CacheBackend -- Load content from a file only when needed

    The key idea is to be able to load the content only at the first query

    Currently the grid attribute is an eztable.Table object as it was before.
    """
    def __init__(self, fname, *args, **kwargs):
        """__init__

        Parameters
        ----------

        fname: str
            FITS or HD5 file containing the grid
        """
        super(CacheBackend, self).__init__()

        self.fname = fname
        self._type = self._get_type(fname)
        self.clear()

    def clear(self, attrname=None):
        """clear current cache

        Parameters
        ----------

        attrname: str in [lamb, filters, grid, header, lamb, seds]
            if provided clear only one attribute
            else all cache will be erased
        """
        if attrname is None:
            self._seds = None
            self._lamb = None
            self._filters = None
            self._grid = None
            self._header = None
        else:
            setattr(self, "_{0}".format(attrname), None)

    def _load_seds(self, fname):
        """load_seds - load seds"""
        if self._seds is None:
            if self._get_type(fname) == "fits":
                with pyfits.open(self.fname) as f:
                    self._seds = f[0].data[:-1]

            elif self._get_type(fname) == "hdf":
                with HDFStore(self.fname, mode="r") as s:
                    self._seds = s["/seds"].read()

    def _load_lamb(self, fname):
        """load_seds - load wavelength"""
        if self._lamb is None:
            if self._get_type(fname) == "fits":
                with pyfits.open(self.fname) as f:
                    self._lamb = f[0].data[-1]

            elif self._get_type(fname) == "hdf":
                with HDFStore(self.fname, mode="r") as s:
                    self._lamb = s["/lamb"].read()

    def _load_grid(self, fname):
        """load_grid - load grid table"""
        # load_seds - load wavelength and seds
        if self._grid is None:
            if self._get_type(fname) == "fits":
                self._grid = Table(self.fname)

            elif self._get_type(fname) == "hdf":
                self._grid = Table(self.fname, tablename="/grid")

    def _load_filters(self, fname):
        """load_filters -- load only filters"""
        if self._filters is None:
            if self._type == "fits":
                with pyfits.open(self.fname) as f:
                    self._filters = f[1].header.get(
                        "FILTERS", None) or f[1].header.get("filters", None)
                    if self._filters is not None:
                        self._filters = self._filters.split()
            elif self._type == "hdf":
                self._filters = self.header.get(
                    "FILTERS", None) or self.header.get("filters", None)
                if self._filters is not None:
                    self._filters = self._filters.split()

    @property
    def seds(self):
        """seds - load in cache if needed """
        self._load_seds(self.fname)
        return self._seds

    @seds.setter
    def seds(self, value):
        """ replace seds value """
        self._seds = value

    @property
    def lamb(self):
        """lamb - load in cache if needed """
        self._load_lamb(self.fname)
        return self._lamb

    @lamb.setter
    def lamb(self, value):
        """ replace seds value """
        self._lamb = value

    @property
    def grid(self):
        """grid - load in cache if needed """
        self._load_grid(self.fname)
        return self._grid

    @grid.setter
    def grid(self, value):
        """ replace seds value """
        self._grid = value

    @property
    def header(self):
        """header - load in cache if needed """
        self._load_grid(self.fname)
        return self._grid.header

    @header.setter
    def header(self, value):
        """ replace seds value """
        self._header = value

    @property
    def filters(self):
        """filters - load in cache if needed """
        self._load_filters(self.fname)
        return self._filters

    @filters.setter
    def filters(self, value):
        """ replace seds value """
        self._filters = value

    def keys(self):
        """ return column names when possible, avoid loading when possible """
        if hasattr(self._grid, "coldescrs"):
            return list(self._grid.coldescrs.keys())
        elif hasattr(self._grid, "keys"):
            return list(self._grid.keys())
        elif hasattr(self.grid, "keys"):
            return list(self.grid.keys())
        else:
            return []

    def writeFITS(self, fname, *args, **kwargs):
        """write -- export to fits file

        Parameters
        ----------

        fname: str
            filename (incl. path) to export to
        """
        if (self.lamb is not None) & (self.seds is not None) & (self.grid
                                                                is not None):
            if not isinstance(self.grid, Table):
                raise TypeError("Only eztables.Table are supported so far")
            r = numpy.vstack([self.seds, self.lamb])
            pyfits.writeto(fname, r, **kwargs)
            del r
            if getattr(self, "filters", None) is not None:
                if "FILTERS" not in list(self.grid.header.keys()):
                    self.grid.header["FILTERS"] = " ".join(self.filters)
            self.grid.write(fname, append=True)

    def writeHDF(self, fname, append=False, *args, **kwargs):
        """write -- export to HDF file

        Parameters
        ----------

        fname: str
            filename (incl. path) to export to

        append: bool, optional (default False)
            if set, it will append data to each Array or Table
        """
        if (self.lamb is not None) & (self.seds is not None) & (self.grid
                                                                is not None):
            if not isinstance(self.grid, Table):
                raise TypeError("Only eztables.Table are supported so far")
            with HDFStore(fname, mode="a") as hd:
                if not append:
                    hd["/seds"] = self.seds[:]
                    hd["/lamb"] = self.lamb[:]
                else:
                    try:
                        node = hd.get_node("/seds")
                        node.append(self.seds[:])
                    except Exception:
                        hd["/seds"] = self.seds[:]
                        hd["/lamb"] = self.lamb[:]
            if getattr(self, "filters", None) is not None:
                if "FILTERS" not in list(self.grid.header.keys()):
                    self.grid.header["FILTERS"] = " ".join(self.filters)
            self.grid.write(fname, tablename="grid", append=True)

    def copy(self):
        """ implement a copy method """
        g = CacheBackend(self.fname)
        g._aliases = copy.deepcopy(self._aliases)
        if self._grid is not None:
            g._grid = copy.deepcopy(self._grid)
        if self._seds is not None:
            g._seds = copy.deepcopy(self._seds)
        if self._lamb is not None:
            g._lamb = copy.deepcopy(self._lamb)
        if self._header is not None:
            g._header = copy.deepcopy(self._header)
        if self._filters is not None:
            g._filters = copy.deepcopy(self._filters)

        return g
Esempio n. 15
0
class MemoryBackend(GridBackend):
    """ Instanciate an grid object that has no physical storage
        Helps to create new grids on the fly. Because it deriveds from
        ModelGrid, this can be exported on disk too.
    """
    def __init__(
        self,
        lamb,
        seds=None,
        grid=None,
        cov_diag=None,
        cov_offdiag=None,
        header={},
        aliases={},
    ):
        """__init__

        Parameters
        ----------

        lamb: ndarray or GridBackend subclass
            if ndarray: wavelength of the SEDs (requires seds and grid
                                                arguments)
            if backend: ref to the given grid

        seds: ndarray[dtype=float, ndim=2]
            array of seds

        grid: eztable.Table
            table of properties associated to each sed

        header: dict
            if provided, update the grid table header

        aliases:
            if provided, update the grid table aliases

        """
        super(MemoryBackend, self).__init__()

        # read from various formats
        if isinstance(lamb, HDFBackend):
            self._fromHDFBackend(lamb)
        elif isNestedInstance(lamb, GridBackend):
            self._from_GridBackend(lamb)
        elif isinstance(lamb, basestring):
            self._from_File(lamb)
        else:
            if (seds is None) | (grid is None):
                raise ValueError("Wrong number of arguments")
            self.lamb = lamb
            self.seds = seds
            self.grid = grid
            if (cov_diag is not None) & (cov_offdiag is not None):
                print("including cov diag and offdiag")
                self.cov_diag = cov_diag
                self.cov_offdiag = cov_offdiag
            else:
                self.cov_diag = None
                self.cov_offdiag = None

        # update header
        if self._header is None:
            self._header = header
        else:
            for k, v in list(header.items()):
                self.grid.header[k] = v

        # update aliases
        self._aliases.update(aliases)
        self.fname = ":memory:"

    @property
    def filters(self):
        """filters"""
        r = self._header.get("filters", None) or self._header.get(
            "FILTERS", None)
        if r is not None:
            r = r.split()
        return r

    @property
    def header(self):
        return self._header

    def _from_File(self, fname):
        """_from_File -- load the content of a FITS or HDF file

        Parameters
        ----------

        fname: str
            filename (incl. path) to read from
        """

        # load_seds - load wavelength and seds
        if self._get_type(fname) == "fits":
            with pyfits.open(fname) as f:
                self.seds = f[0].data[:-1]
                self.lamb = f[0].data[-1]
            self.grid = Table(fname)

        elif self._get_type(fname) == "hdf":
            with HDFStore(fname, mode="r") as s:
                self.seds = s["/seds"].read()
                self.lamb = s["/lamb"].read()
                try:
                    self.cov_diag = s["/covdiag"].read()
                except Exception:
                    self.cov_diag = None
                try:
                    self.cov_offdiag = s["/covoffdiag"].read()
                except Exception:
                    self.cov_offdiag = None
            self.grid = Table(fname, tablename="/grid")

        self._header = self.grid.header

    def writeFITS(self, fname, *args, **kwargs):
        """write -- export to fits file

        Parameters
        ----------

        fname: str
            filename (incl. path) to export to
        """
        if (self.lamb is not None) & (self.seds is not None) & (self.grid
                                                                is not None):
            if not isinstance(self.grid, Table):
                raise TypeError("Only eztables.Table are supported so far")
            r = numpy.vstack([self.seds, self.lamb])
            pyfits.writeto(fname, r, **kwargs)
            if getattr(self, "filters", None) is not None:
                if "FILTERS" not in list(self.grid.header.keys()):
                    self.grid.header["FILTERS"] = " ".join(self.filters)
            self.grid.write(fname, append=True)

    def writeHDF(self, fname, append=False, *args, **kwargs):
        """write -- export to HDF file

        Parameters
        ----------

        fname: str
            filename (incl. path) to export to

        append: bool, optional (default False)
            if set, it will append data to each Array or Table
        """
        if (self.lamb is not None) & (self.seds is not None) & (self.grid
                                                                is not None):
            if not isinstance(self.grid, Table):
                raise TypeError("Only eztables.Table are supported so far")
            with HDFStore(fname, mode="a") as hd:
                if not append:
                    hd["/seds"] = self.seds[:]
                    hd["/lamb"] = self.lamb[:]
                    if self.cov_diag is not None:
                        hd["/covdiag"] = self.cov_diag[:]
                    if self.cov_offdiag is not None:
                        hd["/covoffdiag"] = self.cov_offdiag[:]
                else:
                    try:
                        node = hd.get_node("/seds")
                        node.append(self.seds[:])
                    except Exception:
                        hd["/seds"] = self.seds[:]
                        hd["/lamb"] = self.lamb[:]
                        if self.cov_diag is not None:
                            hd["/covdiag"] = self.cov_diag[:]
                        if self.cov_offdiag is not None:
                            hd["/covoffdiag"] = self.cov_offdiag[:]
            if getattr(self, "filters", None) is not None:
                if "FILTERS" not in list(self.grid.header.keys()):
                    self.grid.header["FILTERS"] = " ".join(self.filters)
            self.grid.write(fname, tablename="grid", append=True)

    def copy(self):
        """ implement a copy method """
        g = MemoryBackend(
            copy.deepcopy(self.lamb),
            seds=copy.deepcopy(self.seds),
            grid=copy.deepcopy(self.grid),
            cov_diag=copy.deepcopy(self.cov_diag),
            cov_offdiag=copy.deepcopy(self.cov_offdiag),
        )
        g._filters = copy.deepcopy(self._filters)
        g._header = copy.deepcopy(self._header)
        g._aliases = copy.deepcopy(self._aliases)
        return g