Exemple #1
0
def ini_periods(self, **kwargs):
    datei = self.datei
    datef = self.datef
    nho = self.nho
    nhours = self.nhours

    # List of sub-simulation windows
    self.subsimu_dates = date_range(datei, datef, period=self.periods)

    # Time steps defined by the user
    nphour_ref = self.nphour_ref

    # Numbers of time steps per hour really used in the simulation
    self.tstep_dates = {}
    self.tstep_all = []
    self.nhour = []
    self.subtstep = []
    self.input_dates = {}

    for dd in self.subsimu_dates[:-1]:
        # time-steps in METEO.nc, computed by diagmet
        sdc = dd.strftime("%Y%m%d%H")
        met = "{}/METEO.{}.{}.nc".format(self.meteo.dir, sdc, nho)
        nbstep = readnc(met, ["nphourm"]).astype(int)

        # Loop on hours and check CFL
        self.tstep_dates[dd] = []
        for nh in range(nhours):
            nphour = nbstep[nh] if nphour_ref < nbstep[nh] else nphour_ref

            # Saving substep indexes for matching with observation
            self.subtstep.extend(list(range(1, nphour + 1)))
            self.nhour.extend(nphour * [nh + 1])

            # Frequency in seconds
            # TODO: Check with FORTRAN: nphour rounding?
            freq = "{}s".format(int(3600 // nphour))

            # List of time steps
            # TODO: what about chemical time steps?
            # the time step to really use is nphour*ichemstep
            ddhi = dd + datetime.timedelta(hours=nh)
            ddhe = dd + datetime.timedelta(hours=nh + 1)
            drange = list(pd.date_range(ddhi, ddhe, freq=freq).to_pydatetime())
            self.tstep_dates[dd].extend(drange[:-1])
            self.tstep_all.extend(drange[:-1])

        # List of dates for which inputs are needed
        self.input_dates[dd] = pd.date_range(dd, periods=nhours + 1,
                                             freq="1H").to_pydatetime()

        # Include last time step
        self.tstep_dates[dd].append(ddhe)
        self.tstep_dates[dd] = np.array(self.tstep_dates[dd])

    # Include very last time step
    self.tstep_all.append(ddhe)
    self.tstep_all = np.array(self.tstep_all)
Exemple #2
0
def read(
    self,
    name,
    tracdir,
    tracfile,
    varnames,
    dates,
    interpol_flx=False,
    tracer=None,
    model=None,
    filetypes=["defstoke", "fluxstoke", "fluxstokev", "phystoke"],
    **kwargs
):
    """Reads meteorology and links to the working directory

    Args:
        meteo (dictionary): dictionary defining the domain. Should include
        dirmeteo to be able to read the meteorology
        datei (datetime.datetime): initial date for the inversion window
        datef (datetime.datetime): end date for the inversion window
        workdir (str): path to the working directory where meteo files
                       should be copied
        logfile (str): path to the log file
        filetypes ([str]): list of file radicals to copy in the working
                           directory
        **kwargs (dictionary): extra arguments

    Return:
        ????????

    Notes: At some point, include option to compute mass fluxes for LMDz,
    with different physics What is needed to do that? Possible only on CCRT?
    Flexibility to define new domains Can be very heavy and not necessarily
    relevant

    """

    for date in dates:
        for filetype in filetypes:
            meteo_file = "{}.an{}.m{:02d}.nc".format(
                filetype, date.year, date.month
            )

            if filetype == "defstoke" and not os.path.isfile(
                "{}/{}".format(tracdir, meteo_file)
            ):
                meteo_file = filetype + ".nc"

            target = "{}/{}".format(tracdir, meteo_file)

            # Loading information on time steps
            if filetype == "defstoke" and not hasattr(self, "offtstep"):
                vars = readnc(target, ["dtvr", "istdyn"])
                offtstep = vars[0][0, 0] * vars[1][0, 0]
                self.offtstep = offtstep
Exemple #3
0
def read(self, name, tracdir, tracfile, varnames, dates,
         interpol_flx=False, **kwargs):
    """Get fluxes from pre-computed fluxes and load them into a pycif
    variables

    Args:
        self: the model Plugin
        name: the name of the component
        tracdir, tracfic: flux directory and file format
        dates: list of dates to extract
        interpol_flx (bool): if True, interpolates fluxes at time t from
        values of surrounding available files

    """

    # Todo: can probably just get years in range dates[0], dates[-1]
    list_fic_flx = np.unique([dd.strftime(tracfile) for dd in dates])

    # Reading fluxes for periods within the simulation window
    trcr_flx = np.empty((0, *self.domain.zlat.shape), dtype=np.float)
    times = []
    for file_flx in list(list_fic_flx):
        # First load inside domain
        data, lat, lon, time_jd = \
            readnc(os.path.join(tracdir, file_flx),
                   [self.varname_flx, self.latname_flx,
                    self.lonname_flx, self.timename_flx])

        # Convert julian day (since 1-1-1900) to datetime
        times.extend(
            [datetime.datetime(1900, 1, 1) + datetime.timedelta(int(t))
             for t in time_jd]
        )

        # Convert to ng/m2/s
        numscale = np.float(getattr(self, 'numscale', 1.E12))
        data *= numscale / 3600.

        # Extract only data covering the inversion region
        ix0 = np.argmin(np.abs(lon - self.domain.lon_in[0]))
        iy0 = np.argmin(np.abs(lat - self.domain.lat_in[0]))

        flx_reg_in = data[:, iy0:iy0 + self.domain.nlat,
                     ix0:ix0 + self.domain.nlon]
        flx_reg_in = flx_reg_in.reshape(flx_reg_in.shape[0], -1)
        
        # Loading outside data
        out_file = os.path.join(
            os.path.dirname(os.path.join(tracdir, file_flx)),
            times[0].strftime(self.file_glob)
        )
        data, lat, lon, time_jd = \
            readnc(out_file,
                   [self.varname_flx, self.latname_flx,
                    self.lonname_flx, self.timename_flx])

        # Extract data outside nest domain
        flx_reg_out = \
            np.delete(data.reshape(data.shape[0], -1),
                      self.domain.raveled_indexes_glob, 1)
        
        # Concatenate
        trcr_flx = np.append(
            trcr_flx,
            np.append(flx_reg_in, flx_reg_out, axis=1)[:, :, np.newaxis],
            axis=0)
    
    # Put data into dataarry
    xmod = xr.DataArray(trcr_flx[:, np.newaxis],
                        coords={'time': np.array(times)},
                        dims=('time', 'lev', 'lat', 'lon'))
    
    # Reindex to required dates
    xmod = reindex(xmod, levels={"time": dates.astype(np.datetime64)})

    #
    #
    # # TODO: take care if several files are read
    # # TODO: scale flux contribution by area weight for boxes
    # # TODO: consider storing fluxes at original time resolution and
    # #       interpolate as needed
    #
    # flx = np.ndarray((self.ndates, self.domain.nlat, self.domain.nlon))
    #
    # # Interpolate fluxes to start time of control period
    # for ddt in range(self.ndates):
    #     if interpol_flx:
    #         flx[ddt, :, :] = xmod.interp(time=self.dates[ddt])
    #     else:
    #         flx[ddt, :, :] = xmod.sel(time=self.dates[ddt], method='nearest')

    return xmod
Exemple #4
0
def read_glob(self,
              name,
              tracdir,
              tracfic,
              dates,
              interpol_flx=False,
              **kwargs):
    """Get global fluxes from pre-computed fluxes and load them into a pycif
    variables

    Args:
        self: the model Plugin
        name: the name of the component
        tracdir, tracfic: flux directory and file format
        dates: list of dates to extract
        interpol_flx (bool): if True, interpolates fluxes at time t from
        values of surrounding available files

    Note:
        This was originally copied from ../flexpart/read.py. May eventually 
        be moved to a different plugin

    """

    # Available files in the directory
    list_files = os.listdir(tracdir)
    dates_available = []
    for fic in list_files:
        try:
            dates_available.append(datetime.datetime.strptime(fic, tracfic))
        except:
            continue

    dates_available = np.array(dates_available)

    # Todo: can probably just get years in range dates[0], dates[-1]
    list_fic_flx = np.unique([dd.strftime(tracfic) for dd in dates])

    # Reading fluxes for periods within the simulation window
    trcr_flx = []
    times = []
    #    for dd, fic_flx in zip(dates, list_fic_flx):
    trcr_flx = []
    for file_flx in list(list_fic_flx):
        data, lat, lon, time_jd = readnc(os.path.join(tracdir, file_flx), [
            self.varname_flx, self.latname_flx, self.lonname_flx,
            self.timename_flx
        ])

        # Convert julian day (since 1-1-1900) to datetime
        for t in time_jd:
            times.append(
                datetime.datetime(1900, 1, 1) + datetime.timedelta(int(t)))

        # Convert to ng/m2/s
        numscale = np.float(getattr(self, 'numscale', 1.E12))
        data *= numscale / 3600.

        trcr_flx.append(data[:, :, :])

    xmod = xr.DataArray(trcr_flx[0],
                        coords={'time': times},
                        dims=('time', 'lat', 'lon'))

    # TODO: take care if several files are read
    # TODO: scale flux contribution by area weight for boxes
    # TODO: consider storing fluxes at original time resolution and
    #       interpolate as needed

    flx = np.ndarray(
        (self.ndates, self.domain.nlat_glob, self.domain.nlon_glob))

    # Interpolate fluxes to start time of control period
    for ddt in range(self.ndates):
        if interpol_flx:
            flx[ddt, :, :] = xmod.interp(time=self.dates[ddt])
        else:
            flx[ddt, :, :] = xmod.sel(time=self.dates[ddt], method='nearest')

    return flx
Exemple #5
0
def read(self,
         name,
         tracdir,
         tracfile,
         varnames,
         dates,
         interpol_flx=False,
         comp_type=None,
         **kwargs):
    """Get fluxes from pre-computed fluxes and load them into a pyCIF
    variables

    Args:
        self: the fluxes Plugin
        name: the name of the component
        tracdir, tracfile: flux directory and file format
        dates: list of dates to extract
        interpol_flx (bool): if True, interpolates fluxes at time t from
        values of surrounding available files

    """
    # Check the type of limit condition to check
    if comp_type is None:
        raise Exception("Trying to read limit conditions for CHIMERE, "
                        "but did not specify the type")

    # Read INI_CONCS
    if comp_type == "inicond":
        ic_file = min(dates).strftime("{}/{}".format(tracdir, tracfile))
        with Dataset(ic_file, "r") as f:
            data = f.variables[name][:][np.newaxis, :]
            xmod = xr.DataArray(
                data,
                coords={"time": [min(dates)]},
                dims=("time", "lev", "lat", "lon"),
            )

    # Read Lateral boundary conditions
    elif comp_type in ["latcond", "topcond"]:
        # Available files in the directory
        list_files = os.listdir(tracdir)
        list_available = []
        for bc_file in list_files:
            try:
                list_available.append(
                    datetime.datetime.strptime(bc_file, tracfile))
            except BaseException:
                continue

        list_available = np.array(list_available)
        list_available.sort()

        # Reading required fluxes files
        trcr_bc = []
        for dd in dates:
            delta = dd - list_available
            mask = delta >= datetime.timedelta(0)
            imin = np.argmin(delta[mask])
            fdates = list_available[mask][imin]

            # Getting the data
            filein = fdates.strftime("{}/{}".format(tracdir, tracfile))
            spec = "lat_conc" if comp_type == "latcond" else "top_conc"
            data, times, specs = readnc(filein, [spec, "Times", "species"])

            # Get the correct date and species index
            ispec = ["".join(c).strip() for c in specs].index(name)
            idate = [
                datetime.datetime.strptime("".join(d), "%Y-%m-%d_%H:%M:%S")
                for d in times
            ].index(dd)

            # Appending
            trcr_bc.append(data[idate, ..., ispec])

        # Putting the data into an xarray
        # Adding an empty latitude axis
        if comp_type == "latcond":
            xout = np.array(trcr_bc)[..., np.newaxis, :]
        else:
            xout = np.array(trcr_bc)[:, np.newaxis, ...]

        xmod = xr.DataArray(xout,
                            coords={"time": dates},
                            dims=("time", "lev", "lat", "lon"))
    else:
        raise Exception("Could not recognize the type of boundary condition "
                        "to read in CHIMERE: {}".format(comp_type))

    return xmod
Exemple #6
0
def build_hcorrelations(zlat,
                        zlon,
                        lsm,
                        sigma_land,
                        sigma_sea,
                        file_lsm=None,
                        evalmin=0.5,
                        dump=False,
                        dir_dump="",
                        projection="gps",
                        **kwargs):
    """Build horizontal correlation matrix based on distance between grid
    cells.
    For cells i and j, the corresponding correlation is:
    c(i,j) = exp(-dist(i, j) / sigma)
    sigma depends on the land-sea mask: land and sea cells are assumed
    un-correlated

    Args:
        zlat (np.array): 2D array of latitudes
        zlon (np.array): 2D array of longitudes
        file_lsm (str): path to NetCDF file with land-sea mask (grid must be
        consistent with LMDZ grid); the land-sea mask is assumed to be stored
        in the varible 'lsm'
        sigma_land (float): decay distance for correlation between land cells
        sigma_sea (float): idem for sea
        evalmin (float): flag out all eigenvalues below this value. Default
        is 0.5
        dump (bool): dumps computed correlations if True
        dir_dump (str): directory where correlation matrices are stored
        projection (str): the projection used for the longitudes and latitudes

    Return:
        tuple with:
            - square roots of eigenvalues
            - eigenvectors

    """

    # Define domain dimensions
    nlon, nlat = zlat.shape

    # Try reading existing file
    try:
        evalues, evectors = read_hcorr(nlon, nlat, sigma_sea, sigma_land,
                                       dir_dump)

    # Else build correlations
    except IOError:
        info("Computing hcorr")
        # No correlation between land and sea if lsm = True
        if lsm:
            landseamask = readnc(file_lsm, ["lsm"]).flatten()
            sigma = sigma_land * (landseamask[:, np.newaxis] >= 0.5) * (
                landseamask[np.newaxis, :] >=
                0.5) + sigma_sea * (landseamask[:, np.newaxis] < 0.5) * (
                    landseamask[np.newaxis, :] < 0.5
                )  # Otherwise, isotropic correlation
        # Takes sigma_land
        else:
            sigma = sigma_land

        # Compute matrix of distance
        dx = dist_matrix(zlat, zlon, projection)

        # Compute the correlation matrix itself
        corr = np.exp(-dx / sigma)
        corr[sigma <= 0] = 0

        # Component analysis
        evalues, evectors = np.linalg.eigh(corr)

        # Re-ordering values
        # (not necessary in principle in recent numpy versions)
        index = np.argsort(evalues)[::-1]

        evalues = evalues[index]
        evectors = evectors[:, index]

        # Dumping to a txt file
        if dump:
            dump_hcorr(nlon, nlat, sigma_sea, sigma_land, evalues, evectors,
                       dir_dump)

    except Exception as e:
        raise e

    # Truncating values < evalmin
    mask = evalues >= evalmin

    return evalues[mask]**0.5, evectors[:, mask]
Exemple #7
0
def read(
    self,
    name,
    tracdir,
    tracfile,
    varnames,
    dates,
    interpol_flx=False,
    tracer=None,
    model=None,
    **kwargs
):
    """Get fluxes from pre-computed fluxes and load them into a pyCIF
    variables

    Args:
        self: the fluxes Plugin
        name: the name of the component
        tracdir, tracfile: flux directory and file format
        dates: list of dates to extract
        interpol_flx (bool): if True, interpolates fluxes at time t from
        values of surrounding available files

    """

    # Replace tracfile by available information from model
    if tracfile == "":
        tracfile = model.fluxes.file

    # Available files in the directory
    list_files = os.listdir(tracdir)
    list_available = []
    for flx_file in list_files:
        try:
            list_available.append(
                datetime.datetime.strptime(flx_file, tracfile)
            )
        except BaseException:
            continue

    list_available = np.array(list_available)

    # Reading required fluxes files
    trcr_flx = []
    for dd in dates:
        delta = dd - list_available
        mask = delta >= datetime.timedelta(0)
        imin = np.argmin(delta[mask])
        fdates = list_available[mask][imin]

        filein = fdates.strftime("{}/{}".format(tracdir, tracfile))

        data, times = readnc(filein, [name, "Times"])

        # Get the correct hour in the file
        times = [
            datetime.datetime.strptime(
                str(b"".join(s), "utf-8"), "%Y-%m-%d_%H:%M:%S"
            )
            for s in times
        ]
        hour = int((dd - times[0]).total_seconds() // 3600)

        trcr_flx.append(data[hour, ...])
    
    # Building a xarray
    xmod = xr.DataArray(
        trcr_flx, coords={"time": dates}, dims=("time", "lev", "lat", "lon")
    )

    return xmod