コード例 #1
0
def vcoord(obsvect, **kwargs):
    """Computes the vertical layer in which fall the observations
    """

    info("Finding model levels corresponding to observations")

    # Don't do anything if the datastore is empty
    if len(obsvect.datastore) == 0:
        return obsvect

    # If a file with fixed vertical coordinates is specified, use it
    if hasattr(obsvect, "file_statlev"):
        file_statlev = obsvect.file_statlev
        info(
            "Using pre-defined vertical coordinates for stations: {}".format(
                file_statlev
            )
        )
        obsvect.datastore = vcoordfromfile(
            obsvect.datastore, file_statlev, **kwargs
        )

    # Else compute vertical coordinates from meteo files
    # To be coded from pyCIF-CHIMERE
    # To be generalized by using meteo plugin
    else:
        obsvect.datastore = vcoordfrommeteo(
            obsvect.workdir, obsvect.datastore, **kwargs
        )

    return obsvect
コード例 #2
0
def loop_zonal_stats(lyr,
                     raster,
                     resol=10,
                     option="mean",
                     return_weight=False):
    featList = range(lyr.GetFeatureCount())
    statList = []

    for FID in featList:
        try:
            feat = lyr.GetFeature(FID)
            meanValue = zonal_stats(
                feat,
                raster,
                resol=resol,
                option=option,
                return_weight=return_weight,
            )
        except Exception as e:
            info(e)
            meanValue = raster.RasterCount * [np.nan]
            raise e

        if FID % (len(featList) / 10) == 0:
            info(FID, len(featList))

        statList.append(meanValue)

    if return_weight:
        return statList

    else:
        return np.array(statList)
コード例 #3
0
ファイル: __init__.py プロジェクト: san57/python
def ini_data(plugin, **kwargs):
    """Initializes the observation operator

    Args:
        plugin (dict): dictionary defining the plugin
        **kwargs (dictionary): possible extra parameters

    """

    info("Initializing the observation operator")

    workdir = plugin.workdir

    # Initializes the directory
    path.init_dir("{}/obsoperator".format(workdir))

    # Initializes transforms
    init_transform(plugin, plugin.statevect)
    init_transform(plugin, plugin.obsvect, transform_type="obs")

    # Re-compile model if necessary
    if hasattr(plugin.model, "compile"):
        plugin.model.compile()

    return plugin
コード例 #4
0
ファイル: __init__.py プロジェクト: san57/python
def ini_data(self, **kwargs):
    """Initializes the chemistry depending on the model used
    for the inversion.

    Args:
        plugin (ChemistryPlugin): chemistry definition

    Returns:
        Updates on the fly the chemistry
    """

    info("Initializing the Chemistry")

    # Copying the chemical scheme to the working directory
    workdir = self.workdir
    dirchem_ref = "{}/chemical_scheme/{}/".format(workdir, self.schemeid)
    self.dirchem_ref = dirchem_ref

    shutil.rmtree(dirchem_ref, ignore_errors=True)
    init_dir(dirchem_ref)

    # If pre-computed scheme is specified
    if hasattr(self, "dir_precomp"):
        copy_tree("{}/{}/".format(self.dir_precomp, self.schemeid),
                  dirchem_ref)

        # Read chemistry
        self.read_chemicalscheme(**kwargs)

    # Otherwise, initialize files from the yaml
    else:
        self.create_chemicalscheme()
コード例 #5
0
    def ini_data(self, **kwargs):
        """Initializes the domain depending on the model used for the inversion.
        Defines a domain grid from a grid file or a set of parameters if the
        domain was not already defined. Domains are model dependant, so the
        outputs can be different depending
        on the model.

        Args:
            plugin (DomainPlugin): domain definition

        Returns:
            Updates on the fly the domain
        """

        # Read or create a domain
        try:
            # Read domain
            self.read_grid(**kwargs)

        except (IOError, AttributeError):
            # Generate a domain
            info("Couldn't read the domain. Generating it.")
            self.create_domain(**kwargs)

        # Compute areas that can be needed for emissions or diagnostics
        if not hasattr(self, "areas") and getattr(self, "compute_areas",
                                                  False):
            info("Computing areas")
            self.calc_areas(**kwargs)
コード例 #6
0
def check_monitor(self):
    """Check the consistency between the observation datastore and the model
    configuration set up"""

    datastore = self.datastore

    # For old monitor with no nc_attributes, do nothing
    if not hasattr(datastore, "nc_attributes"):
        info("Cannot check the datastore from a previous version."
             "Please be careful with the use of it")
        return True, True, True, False

    # Otherwise, check what part of the monitor is to be re-computed
    nc_attributes = datastore.nc_attributes

    ok_hcoord = (nc_attributes.get("domain nlat", None)
                 == str(self.model.domain.nlat)) and (nc_attributes.get(
                     "domain nlon", None) == str(self.model.domain.nlon))

    ok_vcoord = True

    ok_tstep = (
        nc_attributes.get("datei", None)
        == self.datei.strftime("%d-%m-%Y %H:%M:%S")) and (nc_attributes.get(
            "datef", None) == self.datef.strftime("%d-%m-%Y %H:%M:%S"))

    allcorrec = ok_hcoord and ok_tstep and ok_vcoord

    return allcorrec, ok_hcoord, ok_vcoord, not ok_tstep
コード例 #7
0
ファイル: setup.py プロジェクト: san57/python
    def run_simu(cls, args):
        # Dealing with relative and variable path
        def_file = os.path.abspath(os.path.expanduser(args["def_file"]))

        # Loading Yaml
        setup = cls.yaml_to_setup(def_file)
        setup = cls.load_config(setup)

        # Copying Yaml file for traceability of simulations
        os.system("cp " + setup.def_file + " " + setup.workdir + "/")

        # Saving the loaded configuration
        if getattr(setup, "dump_config", False):
            cls.to_yaml(
                setup,
                "{}/loaded.{}".format(setup.workdir,
                                      os.path.basename(setup.def_file)),
            )

        # Run the mode
        if getattr(getattr(setup, "mode", None), "loaded_requirements", False):
            return setup.mode.execute(**args)

        else:
            info("pycif has correctly been initialized "
                 "but no execution mode was specified")
コード例 #8
0
ファイル: fetch.py プロジェクト: san57/python
def fetch(
    ref_dir,
    ref_file,
    input_dates,
    target_dir,
    tracer=None,
    filetypes=["defstoke", "fluxstoke", "fluxstokev", "phystoke"],
    **kwargs
):
    """Reads meteorology and links to the working directory

    Args:
        meteo (dictionary): dictionary defining the domain. Should include
        dirmeteo to be able to read the meteorology
        datei (datetime.datetime): initial date for the inversion window
        datef (datetime.datetime): end date for the inversion window
        workdir (str): path to the working directory where meteo files
                       should be copied
        logfile (str): path to the log file
        filetypes ([str]): list of file radicals to copy in the working
                           directory
        **kwargs (dictionary): extra arguments

    Return:
        ????????

    Notes: At some point, include option to compute mass fluxes for LMDz,
    with different physics What is needed to do that? Possible only on CCRT?
    Flexibility to define new domains Can be very heavy and not necessarily
    relevant

    """

    info("Copying meteo files from {} to {}".format(ref_dir, target_dir))

    # Create the sub-directory to store meteo files
    path.init_dir(target_dir)

    # Loop over dates and file types
    for date in input_dates:
        for filetype in filetypes:
            meteo_file = "{}.an{}.m{:02d}.nc".format(
                filetype, date.year, date.month
            )

            if filetype == "defstoke" and not os.path.isfile(
                ref_dir + meteo_file
            ):
                meteo_file = filetype + ".nc"

            target = "{}/{}".format(target_dir, meteo_file)
            source = "{}/{}".format(ref_dir, meteo_file)
            path.link(source, target)

    list_files = {datei: [] for datei in input_dates}
    list_dates = {datei: [] for datei in input_dates}

    return list_files, list_dates
コード例 #9
0
ファイル: read.py プロジェクト: san57/python
def read(self,
         name,
         tracdir,
         tracfile,
         varnames,
         dates,
         interpol_flx=False,
         **kwargs):
    """Get fluxes from pre-computed fluxes and load them into a pycif
    variables

    Args:
        self: the model Plugin
        name: the name of the component
        tracdir, tracfile: flux directory and file format
        dates: list of dates to extract
        interpol_flx (bool): if True, interpolates fluxes at time t from
        values of surrounding available files

    """

    list_file_flx = [dd.strftime(tracfile) for dd in dates]

    trcr_dates = []
    trcr_flx = []
    trcr_flx_tl = []
    for dd, file_flx in zip(dates, list_file_flx):
        # Read binary file
        data = []

        with FortranFile("{}/{}".format(tracdir, file_flx)) as f:
            while True:
                try:
                    data.append(f.read_reals())

                except BaseException:
                    info("End of file {}".format(file_flx))
                    break

        # Reshape file
        nlon = self.domain.nlon
        nlat = self.domain.nlat

        data = np.array(data)

        flx = data[:, 0].reshape((-1, nlat, nlon))
        flx_tl = data[:, 1].reshape((-1, nlat, nlon))

        trcr_flx.append(flx)
        trcr_flx_tl.append(flx_tl)
        trcr_dates.extend(list(pd.date_range(dd, freq="D", periods=len(flx))))

    xmod = xr.DataArray(trcr_flx[0],
                        coords={"time": trcr_dates},
                        dims=("time", "lat", "lon"))

    return xmod
コード例 #10
0
ファイル: execute.py プロジェクト: san57/python
def execute(self, **kwargs):
    """Runs the model in forward mode

    Args:
        setup (Plugin): definition of the full set-up

    """

    # Working directory
    workdir = self.workdir

    # Control vector
    statevect = self.statevect

    # Observation operator
    obsoper = self.obsoperator

    # Simulation window
    datei = self.datei
    datef = self.datef

    # Some verbose
    info("Running a direct run")

    # Putting x at xb value if available
    if hasattr(statevect, "xb"):
        statevect.x = statevect.xb

    # Running the observation operator
    obsvect = obsoper.obsoper(statevect,
                              "fwd",
                              datei=datei,
                              datef=datef,
                              workdir=workdir,
                              **kwargs)

    # Perturbs the output monitor if required in the Yaml
    if getattr(self, "perturb_obsvect", False):
        # Altering obsvect and save data
        obserror = self.obserror * obsvect.datastore["sim"].mean()

        obsvect.datastore["obs"] = (np.random.normal(
            loc=0, scale=obserror, size=obsvect.datastore.index.size) +
                                    obsvect.datastore["sim"])
        obsvect.datastore["obserror"] = obserror

        # Dumping the datastore with reference data
        dump_datastore(
            obsvect.datastore,
            file_monit=obsvect.file_obsvect,
            dump_type="nc",
            mode="w",
        )

    return obsvect
コード例 #11
0
ファイル: read_domain.py プロジェクト: san57/python
def read_grid(domain, **kwargs):
    """Reads a grid from an existing file

    Args:
        domain (Plugin): dictionary defining the domain. Should include
        filegrid to be able to read the grid from a file

    Return:
        Grid dictionary with meshgrids for center lon/lat and corner lon/lat

    Notes: Coordinates are in meters from a reference point
    """

    # Tries open filelon, filelat
    try:
        zlon = np.loadtxt(domain.filelon)
        zlat = np.loadtxt(domain.filelat)

        nlon = zlon.size
        nlat = zlat.size

        # Corner coordinates
        dlon = np.ptp(zlon) / (nlon - 1) / 2.0
        zlonc = zlon - dlon
        zlonc = np.append(zlonc, zlonc[-1] + 2 * dlon)

        dlat = np.ptp(zlat) / (nlat - 1) / 2.0
        zlatc = zlat - dlat
        zlatc = np.append(zlatc, zlatc[-1] + 2 * dlat)

        # Meshgrids
        zlon, zlat = np.meshgrid(zlon, zlat)
        zlonc, zlatc = np.meshgrid(zlonc, zlatc)

        # Saving information to domain attributes
        domain.nlon = nlon
        domain.nlat = nlat
        domain.zlon = zlon
        domain.zlat = zlat
        domain.zlonc = zlonc
        domain.zlatc = zlatc

    except (IOError, AttributeError):
        info("Couldn't read longitudes and latitudes.\n"
             "Make them from given coordinates")
        domain.create_domain()

    # Compute areas in m2
    domain.areas = (np.diff(domain.zlatc, axis=1)[:-1] *
                    np.diff(domain.zlonc, axis=0)[:, :-1])

    # Projection not as GPS
    domain.projection = "xy"
コード例 #12
0
ファイル: read.py プロジェクト: san57/python
def read(self,
         name,
         tracdir,
         tracfile,
         varnames,
         dates,
         interpol_flx=False,
         tracer=None,
         model=None,
         **kwargs):

    # tracfile can be a list of same length as dates
    try:
        if len(tracfile) != len(dates):
            raise Exception(
                "Try read EDGAR files from a list of dates and a "
                "list of files, but not of same length:\n{}\n{}".format(
                    tracfile, dates))
        list_files = tracfile[:]
    except TypeError:
        list_files = len(dates) * [tracfile]

    # Reading fluxes for periods within the simulation window
    trcr_flx = []
    for dd, dd_file in zip(dates, list_files):
        file_flx = dd.strftime(dd_file)
        dir_flx = dd.strftime(tracdir)

        if not os.path.isfile("{}/{}".format(dir_flx, file_flx)) and getattr(
                self, "closest_year", False):
            info("Warning: could not find correct year for EDGAR; "
                 "using closest available one")
            list_dates = [
                datetime.datetime.strptime(os.path.basename(f), tracfile)
                for f in glob.glob("{}/v50_*nc".format(dir_flx))
            ]
            delta_dates = np.abs(dd - np.array(list_dates))
            file_flx = list_dates[np.argmin(delta_dates)].strftime(tracfile)

        nc = xr.open_dataset("{}/{}".format(dir_flx, file_flx),
                             decode_times=False)
        trcr_flx.append(nc[varnames].values)

    xmod = xr.DataArray(
        np.array(trcr_flx)[:, np.newaxis, ...],
        coords={"time": dates},
        dims=("time", "lev", "lat", "lon"),
    )

    return xmod
コード例 #13
0
ファイル: __init__.py プロジェクト: san57/python
def ini_data(plugin, **kwargs):
    """Initializes CHIMERE

    Args:
        plugin (dict): dictionary defining the plugin
        **kwargs (dictionary): possible extra parameters

    Returns:
        loaded plugin and directory with executable

    """

    info("Initializing the model")

    workdir = getattr(plugin, "workdir", "./")

    # Initializes the directory
    path.init_dir("{}/model".format(workdir))

    # Default values:
    # period: '1D'
    plugin.periods = getattr(plugin, "periods", "1D")

    # Number of hours per period
    plugin.nhours = int(
        pd.to_timedelta(plugin.periods).total_seconds() // 3600)
    plugin.nho = "{:.0f}".format(plugin.nhours)

    # Replacing nsaveconcs if not specified
    # Forces the end.nc file to contain concentration every N hours
    # By default, saves only at the end
    if not hasattr(plugin, "nsaveconcs"):
        plugin.nsaveconcs = plugin.nhours

    # Replace name for METEO files
    plugin.meteo.file = plugin.meteo.file.format(nho=plugin.nho)

    # Replace name for AEMISSION files and BEMISSIONS files
    plugin.fluxes.file = plugin.fluxes.file.format(nho=plugin.nho)
    plugin.fluxes.nlevemis = plugin.nlevemis

    plugin.biofluxes.file = plugin.biofluxes.file.format(nho=plugin.nho)
    plugin.biofluxes.nlevemis = plugin.nlevemis_bio

    # Replace name for BOUN_CONCS files
    plugin.latcond.file = plugin.latcond.file.format(nho=plugin.nho)
    plugin.topcond.file = plugin.topcond.file.format(nho=plugin.nho)

    return plugin
コード例 #14
0
ファイル: utils.py プロジェクト: san57/python
def grib_file_reader(filepath, varname, attribute=None):
    """
        Filepath is the absolute file path
        Dimension_i is the name of dimension i, e.i. given between cotes ''
        if there is not third dimension, Dimension_3's value = None
        Variable is the variable's name, e.i. given between cotes ''
    """

    # Forcing import of attributes if not in attribute key list
    if (attribute is not None
            and attribute not in cfgrib.dataset.DATA_ATTRIBUTES_KEYS):
        cfgrib.dataset.DATA_ATTRIBUTES_KEYS.append(attribute)
        cfgrib.dataset.ALL_KEYS = sorted(cfgrib.dataset.ALL_KEYS + [attribute])

    if not os.path.exists(filepath):
        info("{} was not found".format(filepath))
        raise IOError

    info("Reading {}".format(filepath))

    df = cfgrib.open_file(filepath)

    if len(np.shape(varname)) == 0:
        varnames = [varname]
    else:
        varnames = varname[:]

    varout = []
    for name in varnames:
        var = df.variables[name].data

        if hasattr(var, "build_array"):
            var = var.build_array()
        varout.append(var)

    # Fetching attributes if needed
    if attribute is not None:
        for name in df.variables:
            attr = df.variables[name].attributes.get(
                "GRIB_{}".format(attribute), None)
            if attr is not None:
                return attr
        raise Exception("Could not find attribute {} in {}".format(
            attribute, filepath))

    if len(np.shape(varname)) == 0:
        return varout[0]
    else:
        return varout
コード例 #15
0
ファイル: create_domain.py プロジェクト: san57/python
def create_domain(domain, **kwargs):
    """Creates a grid if needed

    Args:
        domain (dictionary): dictionary defining the domain.

    Returns:
         Error as LMDZ shouldn't be used with unknown grids

    """

    logfile = kwargs.get("logfile", None)

    info("TO DO", logfile)

    raise Exception
コード例 #16
0
def simul(self, chi, grad=True, run_id=-1, **kwargs):
    """Computes the cost function J (and its gradient) based on the Gaussian
    formulation of the inversion framework:
    J(x) = chi^T chi + (Hx-y)^T R^-1 (Hx-y)
         = j_b         + j_r

    gradJ(x) = 2 * chi + 2 * H^T R^(-1) (Hx-y)


    Args:
        chi (np.array): a flat vector defining the current state of the control
                        vector
        grad (bool, optional): if True, returns both the function value and
                               its gradient
        run_id (int): ID for the current run (determines the folder names)

    Returns:
        J(x), gradJ(x)
    """

    # Various variables
    datei = self.datei
    datef = self.datef
    workdir = self.workdir

    # Get the observation operator from extra arguments
    if not hasattr(self, "obsoperator"):
        raise Exception("Observation operator is missing to compute the "
                        "simulator. Please check your setup files")

    obsoper = self.obsoperator
    statevect = self.statevect

    # Saving chi to the control vector for later
    statevect.chi = chi

    zcost = np.sum((chi - np.arange(len(chi)))**2)
    zgrad = 2 * (chi - np.arange(len(chi)))

    # Verbose the norms
    znorm_grad_b = np.dot(zgrad, zgrad)**0.5
    info("In Simulator:\n"
         "    grad(Jb) = {}\n"
         "         Jb  = {}\n".format(znorm_grad_b, zcost))

    return zcost, zgrad
コード例 #17
0
def make_fluxes(self, data, ddi, ddf, runsubdir, mode):
    """Prepare a binary file per emitted species containing flux data.

    :param self:
    :param datastore:
    :param ddi:
    :param ddf:
    :param runsubdir:
    :param mode:
    :return:
    """

    datastore = data.datastore

    for spec in self.chemistry.emis_species.attributes:
        tracer = getattr(self.chemistry.emis_species, spec)

        if not ("fluxes", spec) in datastore:
            info("{} not available for being emitted in LMDZ".format(spec))
            continue

        info("LMDZ is generating flux inputs for {}".format(spec))

        data = datastore[("fluxes", spec)]

        # If not determined by the control vector
        if "spec" not in data:
            data["spec"] = self.fluxes.read(
                spec,
                data["dirorig"],
                data["fileorig"],
                data["varname"],
                self.input_dates[ddi],
            )

        # Adds empty increments if not available
        if "incr" not in data:
            data["incr"] = 0.0 * data["spec"]

        # Put in dataset for writing by 'write'
        ds = xr.Dataset({"fwd": data["spec"], "tl": data["incr"]})

        # Write to FORTRAN binary
        flx_file = "{}/mod_{}.bin".format(runsubdir, spec)
        self.emis_species.write(spec, flx_file, ds)
コード例 #18
0
ファイル: crop_monitor.py プロジェクト: san57/python
def crop_monitor(datastore, datei, datef, **kwargs):
    """Crops observation datasets to keep observations whose duration fits
    entirely during the simulation period

    Args:
        datastore (pd.DataFrame): observation dataset
        datei (datetime.datetime): start date
        datef (datetime.datetime): end date

    Returns:
        pd.DataFrame: Cropped dataframe
    """

    info("Cropping obsvect.datastore to simulation window")
    info("{} to {}".format(datei, datef))

    mask = (datastore.index >= datei) & (datastore.index + pd.to_timedelta(
        datastore["duration"], unit="h") <= datef)
    return datastore.loc[mask]
コード例 #19
0
def default_fetch(ref_dir,
                  ref_file,
                  input_dates,
                  target_dir,
                  tracer=None,
                  **kwargs):

    # Picking info from tracer if not in inputs
    if ref_dir == "" and hasattr(tracer, "dir"):
        input_dir = tracer.dir
    else:
        input_dir = ref_dir

    if ref_file == "" and hasattr(tracer, "file"):
        input_file = tracer.file
    else:
        input_file = ref_file

    info("Fetching input files using directory and file format")
    info("{}/{}".format(input_dir, input_file))

    list_files = {}
    list_dates = {}
    for datei in input_dates:
        tmp_files = []
        tmp_dates = []
        for dd in input_dates[datei]:
            dir_dd = dd.strftime(input_dir)
            file_dd = dd.strftime(input_file)
            tmp_files.append("{}/{}".format(dir_dd, file_dd))

        # Fetching
        local_files = []
        for f in tmp_files:
            target_file = "{}/{}".format(target_dir, os.path.basename(f))
            path.link(f, target_file)
            local_files.append(target_file)

        list_files[datei] = list(set(local_files))
        list_dates[datei] = list(set(tmp_dates))

    return list_files, list_dates
コード例 #20
0
ファイル: create_domain.py プロジェクト: san57/python
def create_domain(domain, **kwargs):
    """Creates a grid if needed

    Args:
        domain (dictionary): dictionary defining the domain.

    Returns:
         Error as LMDZ shouldn't be used with unknown grids

    """

    logfile = kwargs.get("logfile", None)

    info(
        "Cannot create a LMDZ grid as LMDZ should be used with "
        "pre-defined grids only",
        logfile,
    )

    raise Exception
コード例 #21
0
def fetch(ref_dir, ref_file, input_dates, target_dir, tracer=None, **kwargs):

    list_files = {}
    list_dates = {}
    for datei in input_dates:
        tmp_files = []
        tmp_dates = []
        for dd in input_dates[datei]:
            file_flx = dd.strftime(ref_file)
            dir_flx = dd.strftime(ref_dir)
            date_flx = dd

            if not os.path.isfile(
                    "{}/{}".format(dir_flx, file_flx)) and getattr(
                        tracer, "closest_year", False):
                info("Warning: could not find correct year for EDGAR; "
                     "using closest available one")
                list_dates_avail = [
                    datetime.datetime.strptime(os.path.basename(f), ref_file)
                    for f in glob.glob("{}/v50_*nc".format(dir_flx))
                ]
                delta_dates = np.abs(dd - np.array(list_dates_avail))
                date_flx = list_dates_avail[np.argmin(delta_dates)]
                file_flx = date_flx.strftime(ref_file)

            tmp_files.append("{}/{}".format(dir_flx, file_flx))
            tmp_dates.append(date_flx)

        # Fetching
        local_files = []
        for f in tmp_files:
            target_file = "{}/{}".format(target_dir, os.path.basename(f))
            path.link(f, target_file)
            local_files.append(target_file)

        list_files[datei] = list(set(local_files))
        list_dates[datei] = list(set(tmp_dates))

    return list_files, list_dates
コード例 #22
0
ファイル: minimize.py プロジェクト: san57/python
def minimize(self, finit, gradinit, chi0, **kwargs):
    # x, f, g, auxil, io, niter, nsim, iz, df1, m=5, dxmin=1.e-20,
    #  epsg=1.e-20, impres=1, mode=0, **kwargs
    """Entry point for M1QN3 algorithm.

    Args:
        finit (float): initial value for the function to minimize
        gradinit (np.array): gradient at the starting point
        chi (np.array): initial state for the unknown to optimize
        simulator (module): simulator module to evaluate the function and
                               its gradient
        minimizer (module): minimizer module, used to define minimizer options

    Returns:
        (np.array, float): a tuple with the optimized vector and the
                           corresponding function maximum

    """

    # Initializing options (and filling missing values with default)
    self = self.check_options(chi0, finit, **kwargs)

    # Running M1QN3
    xopt, fopt, gradopt, niter, nsim, epsg, mode = self.m1qn3(
        finit, gradinit, chi0, **kwargs
    )

    # Final verbose and output
    towrite = """
        M1QN3:
            output mode is {}
            number of iterations: {}
            number of simulations: {}
            realized relative precision on g: {}
        """.format(
        mode, niter, nsim, epsg
    )

    info(towrite)

    r1 = np.sqrt(np.dot(xopt, xopt))
    r2 = np.sqrt(np.dot(gradopt, gradopt))

    info("norm of x = " + str(r1))
    info("f         = " + str(fopt))
    info("norm of g = " + str(r2))

    return xopt
コード例 #23
0
ファイル: __init__.py プロジェクト: san57/python
def ini_data(plugin, **kwargs):
    """Initializes the dummy_txt Gaussian model

    Args:
        plugin (Plugin): the model plugin to initialize
        **kwargs (dictionary): possible extra parameters

    Returns:
        loaded plugin and directory with executable

    """

    info("Initializing the model")

    workdir = getattr(plugin, "workdir", "./")

    # Initializes the directory
    path.init_dir("{}/model".format(workdir))

    # copying the model Pasquill Gifford matrix
    target = "{}/model/".format(workdir) + os.path.basename(plugin.file_pg)
    source = plugin.file_pg

    shutil.copy(source, target)

    # Required inputs for running a LMDz simulations
    plugin.required_inputs = ["fluxes", "meteo", "param"]

    # Initializes default values:
    # - sub-simulations of 1day
    # - time steps of 1 hour
    plugin.periods = getattr(plugin, "periods", "1D")
    plugin.tstep = getattr(plugin, "tstep", "1H")
    plugin.save_H = getattr(plugin, "save_H", False)

    plugin.H_matrix = {}

    return plugin
コード例 #24
0
ファイル: setup.py プロジェクト: san57/python
    def config_info(cls, setup):
        """Prints out main input parameters for pyCIF
        """

        verbose_txt = [
            "pyCIF has been initialized with the following parameters:",
            "Yaml configuration file: {}".format(setup.def_file),
            "Log file: {}".format(setup.logfile),
            "Start date: {}".format(setup.datei),
            "End date: {}".format(setup.datef),
            "Working directory: {}".format(setup.workdir),
        ]

        list(map(lambda v: info(v), verbose_txt))
コード例 #25
0
ファイル: native2state.py プロジェクト: san57/python
def native2state(transform, data, mapper, mod_input, di, df, mode, runsubdir,
                 workdir, trans_mode, **kwargs):

    datastore = data.datastore
    for trid in mapper["inputs"]:
        input_type = trid[0]

        # If trid in datastore, dumps this one
        if trid in datastore:
            todump = [trid]

        # If input parameter is '',
        # dumps all available parameters of this component
        elif trid[1] == "":
            todump = [t for t in datastore if t[0] == trid[0]]

        # Otherwise check whether there is a component
        # encompassing all parameters (i.e., with '' as parameter)
        else:
            todump = [(trid[0], "")]

        # Create new data to extract
        data2dump = {t: datastore[t] for t in todump}

        data2dump = transform.model.outputs2native(data2dump, input_type, di,
                                                   df, runsubdir, mode)

        for tr in data2dump:
            if tr in datastore:
                datastore[tr].update(data2dump[tr])

            else:
                info("{} was simulated by the model, "
                     "but could not be transferred to the control vector".
                     format(tr))

    return data
コード例 #26
0
ファイル: read.py プロジェクト: san57/python
def read(self,
         name,
         tracdir,
         tracfile,
         varnames,
         dates,
         interpol_flx=False,
         **kwargs):
    """Get fluxes from pre-computed fluxes and load them into a pycif
    variables

    Args:
        self: the model Plugin
        name: the name of the component
        tracdir, tracfile: flux directory and file format
        dates: list of dates to extract
        interpol_flx (bool): if True, interpolates fluxes at time t from
        values of surrounding available files

    """

    info("READING NetCDF dummy fluxes")

    raise Exception
コード例 #27
0
ファイル: check.py プロジェクト: san57/python
def check_inputs(inputs, mode):
    """Check the consistency of inputs given to the observation operator.

    """
    if mode not in ["tl", "fwd", "adj"]:
        info("The following running mode is not accepted by the "
             "observation operator: {}".format(mode))
        raise Exception

    if mode == "tl" and not (hasattr(inputs, "x") and hasattr(inputs, "dx")):
        info("The observation operator was operated in tangent-linear mode "
             "but not with both increments and control vector")
        raise Exception

    if mode == "fwd" and not hasattr(inputs, "x"):
        info("The observation operator was operated in forward mode "
             "with no control vector")
        info("All inputs will be dealt as fixed")

    return True
コード例 #28
0
    def parse_multiple_files(self, **kwargs):
        """Parses multiple files specified by a glob pattern and stores the
        content into a datastore

        Args:
            provider_name (str):  provider of the input file
            file_format_id (str): name of the type of file with a given format
            glob_pattern (str): glob pattern: /**/ for recursive  matching
                                in subdirectories

        Keyword Args:
            encoding (str): Encoding of input files
            freq (str): frequency after re-sampling
                        see `Offset Aliases`_ for valid strings
            src_freq (str): explicit setting of the frequency in the input file
                            shouldn't be necessary

        Notes:
            - Additional kwargs for a parser are possible or even required.
              See the respective documentation

        Returns:
            dict: {obs_file} = df[obssite_id, parameter]
        """

        # parser = cls.get_parser(provider_name, file_format_id)

        dfs = {}

        info("Reading files in " + self.dir_obs)

        for obs_file in sorted(glob.glob(self.dir_obs + "*")):
            try:
                dfs[os.path.basename(obs_file)] = self.parse_file(
                    obs_file, **kwargs
                )

            except error.PluginError as e:
                info(
                    "{} was not loaded for the following reason".format(
                        obs_file
                    )
                )
                info(e.message)

        if dfs != {}:
            return pd.concat(list(dfs.values()))
        else:
            return pd.DataFrame({})
コード例 #29
0
ファイル: minimize.py プロジェクト: san57/python
def minimize(self, finit, gradinit, chi0, **kwargs):
    # x, f, g, auxil, io, niter, nsim, iz, df1, m=5, dxmin=1.e-20,
    #  epsg=1.e-20, impres=1, mode=0, **kwargs
    """Entry point for CONGRAD algorithm.

    Args:
        finit (float): initial value for the function to minimize
        gradinit (np.array): gradient at the starting point
        chi (np.array): initial state for the unknown to optimize
        simulator (module): simulator module to evaluate the function and
                               its gradient
        minimizer (module): minimizer module, used to define minimizer options

    Returns:
        (np.array, float): a tuple with the optimized vector and the
                           corresponding function maximum

    """

    # Initializing options (and filling missing values with default)
    self = self.check_options(chi0, **kwargs)

    # Running CONGRAD
    lanczvect0 = copy.deepcopy(gradinit)
    xopt, gradopt, preduc, pevecs, iiter = self.congrad(
        chi0, gradinit, lanczvect0, **kwargs)

    # Final verbose and output
    towrite = """
        CONGRAD:
            number of iterations: {}
            achieved relative reduction of the gradient: {}
        """.format(iiter, preduc)

    info(towrite)

    r1 = np.sqrt(np.dot(xopt, xopt))
    r2 = np.sqrt(np.dot(gradopt, gradopt))

    info("norm of x = " + str(r1))
    info("norm of g = " + str(r2))

    return xopt
コード例 #30
0
ファイル: setup.py プロジェクト: san57/python
    def from_yaml(cls, def_file):
        """Generates a dictionary including all pyCIF parameters

        Args:
            def_file (string) : Path to the definition file
                                Handles both absolute and relative paths

        Returns:
            config_dict (dictionary): Dictionary populated with all pyCIF
            parameters

        """

        yml_file = os.path.abspath(os.path.expanduser(def_file))

        try:
            with open(yml_file, "r") as f:
                config_dict = ordered_load(f)

                config_dict["def_file"] = yml_file

                if "datei" in config_dict:
                    # Converting dates to datetime if necessary
                    config_dict["datei"] = dates.date2datetime(
                        config_dict["datei"])
                    config_dict["datef"] = dates.date2datetime(
                        config_dict["datef"])
                return config_dict

        except IOError as e:
            info("Couldn't find config file: {}".format(yml_file))
            info("Please check directories")
            raise e

        except yaml.scanner.ScannerError as e:
            info("Error in the syntax of config file: {}".format(yml_file))
            raise e