Ejemplo n.º 1
0
            def con_limit(z):
                ## Transform: standard normal-to-random variable
                df_norm = DataFrame(data=[z], columns=model.var_rand)
                df_rand = model.norm2rand(df_norm)
                df = model.var_outer(df_rand, df_det=df_inner)

                ## Eval limit state
                df_res = gr.eval_df(model, df=df)
                g = df_res[key].iloc[0]

                return g
Ejemplo n.º 2
0
            def objective(z):
                ## Transform: standard normal-to-random variable
                df_norm = DataFrame(data=[z], columns=model.var_rand)
                df_rand = model.norm2rand(df_norm)
                df = model.var_outer(df_rand, df_det=df_inner)

                df_res = gr.eval_df(model, df=df)
                g = df_res[key].iloc[0]

                # return (g, jac)
                return g
Ejemplo n.º 3
0
        def objective(x):
            """x = [var_fit]"""
            ## Evaluate model
            df_var = tran_outer(
                df_data[var_feat],
                concat(
                    (df_nom[var_fix].iloc[[0]], df_make(**dict(zip(var_fit, x)))),
                    axis=1,
                ),
            )
            df_tmp = eval_df(model, df=df_var)

            ## Compute joint MSE
            return ((df_tmp[out].values - df_data[out].values) ** 2).mean()
Ejemplo n.º 4
0
def eval_lhs(model,
             n=1,
             df_det=None,
             seed=None,
             append=True,
             skip=False,
             criterion=None):
    r"""Latin Hypercube evaluation
    Evaluates a given model on a latin hypercube sample (LHS) using the model's
    density.
    Args:
        model (gr.Model): Model to evaluate
        n (numeric): Number of LHS samples to draw
        df_det (DataFrame): Deterministic levels for evaluation; use "nom"
            for nominal deterministic levels.
        seed (int): Random seed to use
        append (bool): Append results to conservative inputs?
        skip (bool): Skip evaluation of the functions?
        criterion (str): flag for LHS sample criterion
            allowable values: None, "center" ("c"), "maxmin" ("m"),
            "centermaxmin" ("cm"), "correlation" ("corr")
    Returns:
        DataFrame: Results of evaluation or unevaluated design
    Notes:
        - Wrapper on pyDOE.lhs
    """
    ## Set seed only if given
    if seed is not None:
        set_seed(seed)

    ## Ensure sample count is int
    if not isinstance(n, Integral):
        print("eval_lhs() is rounding n...")
        n = int(n)

    ## Draw samples
    df_quant = DataFrame(data=lhs(model.n_var_rand, samples=n),
                         columns=model.var_rand)

    ## Convert samples to desired marginals
    df_rand = model.density.pr2sample(df_quant)
    ## Construct outer-product DOE
    df_samp = model.var_outer(df_rand, df_det=df_det)

    if skip:
        return df_samp
    else:
        return gr.eval_df(model, df=df_samp, append=append)
Ejemplo n.º 5
0
def eval_sinews(
    model,
    n_density=10,
    n_sweeps=3,
    seed=None,
    df_det=None,
    varname="sweep_var",
    indname="sweep_ind",
    append=True,
    skip=False,
):
    r"""Sweep study

    Perform coordinate sweeps over each model random variable ("sinew" design). Use random starting points drawn from the joint density. Optionally sweep the deterministic variables.

    For more expensive models, it can be helpful to tune n_density and n_sweeps to achieve a reasonable runtime.

    Use gr.plot_auto() to construct a quick visualization of the output dataframe. Use `skip` version to visualize the design, and non-skipped version to visualize the results.

    Args:
        model (gr.Model): Model to evaluate
        n_density (numeric): Number of points along each sweep
        n_sweeps (numeric): Number of sweeps per-random variable
        seed (int): Random seed to use
        df_det (DataFrame): Deterministic levels for evaluation;
            use "nom" for nominal deterministic levels,
            use "swp" to sweep deterministic variables
        varname (str): Column name to give for sweep variable; default="sweep_var"
        indname (str): Column name to give for sweep index; default="sweep_ind"
        append (bool): Append results to conservative inputs?
        skip (bool): Skip evaluation of the functions?

    Returns:
        DataFrame: Results of evaluation or unevaluated design

    Examples:

        >>> import grama as gr
        >>> md = gr.make_cantilever_beam()
        >>> # Skip evaluation, vis. design
        >>> df_design = md >> gr.ev_sinews(df_det="nom", skip=True)
        >>> df_design >> gr.pt_auto()
        >>> # Vis results
        >>> df_sinew = md >> gr.ev_sinews(df_det="nom")
        >>> df_sinew >> gr.pt_auto()

    """
    ## Override model if deterministic sweeps desired
    if df_det == "swp":
        ## Collect sweep-able deterministic variables
        var_sweep = list(
            filter(
                lambda v: isfinite(model.domain.get_width(v))
                & (model.domain.get_width(v) > 0),
                model.var_det,
            ))
        ## Generate pseudo-marginals
        dicts_var = {}
        for v in var_sweep:
            dicts_var[v] = {
                "dist": "uniform",
                "loc": model.domain.get_bound(v)[0],
                "scale": model.domain.get_width(v),
            }
        ## Overwrite model
        model = comp_marginals(model, **dicts_var)
        ## Restore flag
        df_det = "nom"

    ## Set seed only if given
    if seed is not None:
        set_seed(seed)

    ## Ensure sample count is int
    if not isinstance(n_density, Integral):
        print("eval_sinews() is rounding n_density...")
        n_density = int(n_density)
    if not isinstance(n_sweeps, Integral):
        print("eval_sinews() is rounding n_sweeps...")
        n_sweeps = int(n_sweeps)

    ## Build quantile sweep data
    q_random = tile(random((1, model.n_var_rand, n_sweeps)), (n_density, 1, 1))
    q_dense = linspace(0, 1, num=n_density)
    Q_all = zeros((n_density * n_sweeps * model.n_var_rand, model.n_var_rand))
    C_var = ["tmp"] * (n_density * n_sweeps * model.n_var_rand)
    C_ind = [0] * (n_density * n_sweeps * model.n_var_rand)

    ## Interlace
    for i_input in range(model.n_var_rand):
        ind_base = i_input * n_density * n_sweeps
        for i_sweep in range(n_sweeps):
            ind_start = ind_base + i_sweep * n_density
            ind_end = ind_base + (i_sweep + 1) * n_density

            Q_all[ind_start:ind_end] = q_random[:, :, i_sweep]
            Q_all[ind_start:ind_end, i_input] = q_dense
            C_var[ind_start:ind_end] = [model.var_rand[i_input]] * n_density
            C_ind[ind_start:ind_end] = [i_sweep] * n_density

            ## Modify endpoints for infinite support
            if not isfinite(
                    model.density.marginals[model.var_rand[i_input]].q(0)):
                Q_all[ind_start, i_input] = 1 / n_density / 10
            if not isfinite(
                    model.density.marginals[model.var_rand[i_input]].q(1)):
                Q_all[ind_end - 1, i_input] = 1 - 1 / n_density / 10

    ## Assemble sampling plan
    df_pr = DataFrame(data=Q_all, columns=model.var_rand)
    df_rand = model.density.pr2sample(df_pr)
    df_rand[varname] = C_var
    df_rand[indname] = C_ind
    ## Construct outer-product DOE
    df_samp = model.var_outer(df_rand, df_det=df_det)

    if skip:
        ## Evaluation estimate
        runtime_est = model.runtime(df_samp.shape[0])
        if runtime_est > 0:
            print(
                "Estimated runtime for design with model ({0:1}):\n  {1:4.3} sec"
                .format(model.name, runtime_est))
        else:
            print(
                "Design runtime estimates unavailable; model has no timing data."
            )

        ## For autoplot
        with catch_warnings():
            simplefilter("ignore")
            df_samp._plot_info = {
                "type": "sinew_inputs",
                "var": model.var_rand
            }

        ## Pass-through
        return df_samp

    ## Apply
    df_res = eval_df(model, df=df_samp, append=append)
    ## For autoplot
    with catch_warnings():
        simplefilter("ignore")
        df_res._plot_info = {
            "type": "sinew_outputs",
            "var": model.var_rand,
            "out": model.out,
        }

    return df_res
Ejemplo n.º 6
0
def eval_hybrid(
    model,
    n=1,
    plan="first",
    df_det=None,
    varname="hybrid_var",
    seed=None,
    append=True,
    skip=False,
):
    r"""Hybrid points for Sobol' indices

    Use the "hybrid point" design (Sobol', 1999) to support estimating Sobol'
    indices. Use gr.tran_sobol() to post-process the results and compute
    estimates.

    Args:
        model (gr.Model): Model to evaluate; must have CopulaIndependence
        n (numeric): Number of points along each sweep
        plan (str): Sobol' index to compute; plan={"first", "total"}
        seed (int): Random seed to use
        df_det (DataFrame): Deterministic levels for evaluation; use "nom"
            for nominal deterministic levels.
        varname (str): Column name to give for sweep variable; default="hybrid_var"
        append (bool): Append results to conservative inputs?
        skip (bool): Skip evaluation of the functions?

    Returns:
        DataFrame: Results of evaluation or unevaluated design

    References:
        I.M. Sobol', "Sensitivity Estimates for Nonlinear Mathematical Models"
        (1999) MMCE, Vol 1.

    Examples:

        >>> import grama as gr
        >>> md = gr.make_cantilever_beam()
        >>> df_first = md >> gr.ev_hybrid(df_det="nom", plan="first")
        >>> df_first >> gr.tf_sobol()
        >>>
        >>> df_total = md >> gr.ev_hybrid(df_det="nom", plan="total")
        >>> df_total >> gr.tf_sobol()

    """
    ## Check invariants
    if not isinstance(model.density.copula, CopulaIndependence):
        raise ValueError(
            "model must have CopulaIndependence structure;\n" +
            "Sobol' indices only defined for independent variables")

    ## Set seed only if given
    if seed is not None:
        set_seed(seed)

    if not isinstance(n, Integral):
        print("eval_hybrid() is rounding n...")
        n = int(n)

    ## Draw hybrid points
    X = random((n, model.n_var_rand))
    Z = random((n, model.n_var_rand))

    ## Reserve space
    Q_all = zeros((n * (model.n_var_rand + 1), model.n_var_rand))
    Q_all[:n] = X  # Base samples
    C_var = ["_"] * (n * (model.n_var_rand + 1))

    ## Interleave samples
    for i_in in range(model.n_var_rand):
        i_start = (i_in + 1) * n
        i_end = (i_in + 2) * n

        if plan == "first":
            Q_all[i_start:i_end, :] = Z
            Q_all[i_start:i_end, i_in] = X[:, i_in]
        elif plan == "total":
            Q_all[i_start:i_end, :] = X
            Q_all[i_start:i_end, i_in] = Z[:, i_in]
        else:
            raise ValueError("plan must be `first` or `total`")

        C_var[i_start:i_end] = [model.var_rand[i_in]] * n

    ## Construct sampling plan
    df_pr = DataFrame(data=Q_all, columns=model.var_rand)
    ## Convert samples to desired marginals
    df_rand = model.density.pr2sample(df_pr)
    df_rand[varname] = C_var
    ## Construct outer-product DOE
    df_samp = model.var_outer(df_rand, df_det=df_det)

    if skip:
        with catch_warnings():
            simplefilter("ignore")
            df_samp._meta = dict(
                type="eval_hybrid",
                varname=varname,
                plan=plan,
                var_rand=model.var_rand,
                out=model.out,
            )

        return df_samp

    df_res = eval_df(model, df=df_samp, append=append)
    with catch_warnings():
        simplefilter("ignore")
        df_res._meta = dict(
            type="eval_hybrid",
            varname=varname,
            plan=plan,
            var_rand=model.var_rand,
            out=model.out,
        )

    return df_res
Ejemplo n.º 7
0
def eval_monte_carlo(model,
                     n=1,
                     df_det=None,
                     seed=None,
                     append=True,
                     skip=False):
    r"""Monte Carlo evaluation

    Evaluates a given model at a given dataframe. Generates outer product
    with deterministic samples.

    Args:
        model (gr.Model): Model to evaluate
        n (numeric): number of Monte Carlo samples to draw
        df_det (DataFrame): Deterministic levels for evaluation; use "nom"
            for nominal deterministic levels.
        seed (int): random seed to use
        append (bool): Append results to random values?
        skip (bool): Skip evaluation of the functions?

    Returns:
        DataFrame: Results of evaluation or unevaluated design

    Examples:

        >>> import grama as gr
        >>> from grama.models import make_test
        >>> md = make_test()
        >>> df = md >> gr.ev_monte_carlo(n=1e2, df_det="nom")
        >>> df.describe()

    """
    ## Set seed only if given
    if seed is not None:
        set_seed(seed)

    ## Ensure sample count is int
    if not isinstance(n, Integral):
        print("eval_monte_carlo() is rounding n...")
        n = int(n)

    ## Draw samples
    df_rand = model.density.sample(n=n, seed=seed)
    ## Construct outer-product DOE
    df_samp = model.var_outer(df_rand, df_det=df_det)

    if skip:
        ## Evaluation estimate
        runtime_est = model.runtime(df_samp.shape[0])
        if runtime_est > 0:
            print(
                "Estimated runtime for design with model ({0:1}):\n  {1:4.3} sec"
                .format(model.name, runtime_est))
        else:
            print(
                "Design runtime estimates unavailable; model has no timing data."
            )

        ## Attach metadata
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            df_samp._plot_info = {
                "type": "monte_carlo_inputs",
                "var": model.var_rand,
            }

        return df_samp
    else:
        df_res = gr.eval_df(model, df=df_samp, append=append)

        ## Attach metadata
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            df_res._plot_info = {
                "type": "monte_carlo_outputs",
                "out": model.out
            }

        return df_res
Ejemplo n.º 8
0
def fit_nls(
    df_data,
    md=None,
    out=None,
    var_fix=None,
    df_init=None,
    verbose=True,
    uq_method=None,
    **kwargs,
):
    r"""Fit a model with Nonlinear Least Squares (NLS)

    Estimate best-fit variable levels with nonlinear least squares (NLS), and
    return an executable model with those frozen best-fit levels. Optionally,
    fit a distribution on the parameters to quantify parametric uncertainty.

    Note: This is a *synonym* for eval_nls(); see the documentation for
    eval_nls() for keyword argument options available beyond those listed here.

    Args:
        df_data (DataFrame): Data for estimating best-fit variable levels.
            Variables not found in df_data optimized for fitting.
        md (gr.Model): Model to analyze. All model variables
            selected for fitting must be bounded or random. Deterministic
            variables may have semi-infinite bounds.
        var_fix (list or None): Variables to fix to nominal levels. Note that
            variables with domain width zero will automatically be fixed.
        df_init (DataFrame): Initial guesses for parameters; overrides n_restart
        n_restart (int): Number of restarts to try; the first try is at
            the nominal conditions of the model. Returned model will use
            the least-error parameter set among restarts tested.
        n_maxiter (int): Optimizer maximum iterations
        verbose (bool): Print best-fit parameters to console?
        uq_method (str OR None): If string, select method to quantify parameter
            uncertainties. If None, provide best-fit values only. Methods:
            uq_method = "linpool": assume normal errors; linearly approximate
                parameter effects; equally pool variance matrices for each output

    Returns:
        gr.Model: Model for evaluation with best-fit variables frozen to
            optimized levels.

    Examples:
        >>> import grama as gr
        >>> from grama.data import df_trajectory_windowed
        >>> from grama.models import make_trajectory_linear
        >>> X = gr.Intention()
        >>>
        >>> md_trajectory = make_trajectory_linear()
        >>> md_fitted = (
        >>>     df_trajectory_windowed
        >>>     >> gr.ft_nls(
        >>>         md=md_trajectory,
        >>>         uq_method="linpool",
        >>>     )
        >>> )
    """
    ## Check `out` invariants
    if out is None:
        out = md.out
        print("... fit_nls setting out = {}".format(out))

    ## Check invariants
    if md is None:
        raise ValueError("Must provide model md")

    ## Determine variables to be fixed
    if var_fix is None:
        var_fix = set()
    else:
        var_fix = set(var_fix)
    for var in md.var_det:
        wid = md.domain.get_width(var)
        if wid == 0:
            var_fix.add(var)

    ## Run eval_nls to fit model parameter values
    df_fit = eval_nls(
        md,
        df_data=df_data,
        var_fix=var_fix,
        df_init=df_init,
        append=True,
        verbose=verbose,
        **kwargs,
    )
    ## Select best-fit values
    df_best = df_fit.sort_values(by="mse",
                                 axis=0).iloc[[0]].reset_index(drop=True)
    if verbose:
        print(df_fit.sort_values(by="mse", axis=0))

    ## Determine variables that were fitted
    var_fitted = list(set(md.var).intersection(set(df_best.columns)))
    var_remain = list(set(md.var).difference(set(var_fitted)))

    if len(var_remain) == 0:
        raise ValueError("Resulting model is constant!")

    ## Assemble and return fitted model
    if md.name is None:
        name = "(Fitted Model)"
    else:
        name = md.name + " (Fitted)"

    ## Calibrate parametric uncertainty, if requested
    if uq_method == "linpool":
        ## Precompute data
        df_nom = eval_nominal(md, df_det="nom")
        df_base = tran_outer(
            df_data, concat((df_best[var_fitted], df_nom[var_fix]), axis=1))
        df_pred = eval_df(md, df=df_base)
        df_grad = eval_grad_fd(md, df_base=df_base, var=var_fitted)

        ## Pool variance matrices
        n_obs = df_data.shape[0]
        n_fitted = len(var_fitted)
        Sigma_pooled = zeros((n_fitted, n_fitted))

        for output in out:
            ## Approximate sigma_sq
            sigma_sq = npsum(
                nppow(df_data[output].values - df_pred[output].values,
                      2)) / (n_obs - n_fitted)
            ## Approximate (pseudo)-inverse hessian
            var_grad = list(map(lambda v: "D" + output + "_D" + v, var_fitted))
            Z = df_grad[var_grad].values
            Hinv = pinv(Z.T.dot(Z), hermitian=True)

            ## Add variance matrix to pooled Sigma
            Sigma_pooled = Sigma_pooled + sigma_sq * Hinv / n_fitted

        ## Check model for identifiability
        kappa_out = cond(Sigma_pooled)
        if kappa_out > 1e10:
            warn(
                "Model is locally unidentifiable as measured by the " +
                "condition number of the pooled covariance matrix; " +
                "kappa = {}".format(kappa_out),
                RuntimeWarning,
            )

        ## Convert to std deviations and correlation
        sigma_comp = npsqrt(diag(Sigma_pooled))
        corr_mat = Sigma_pooled / (atleast_2d(sigma_comp).T.dot(
            atleast_2d(sigma_comp)))
        corr_data = []
        I, J = triu_indices(n_fitted, k=1)
        for ind in range(len(I)):
            i = I[ind]
            j = J[ind]
            corr_data.append([var_fitted[i], var_fitted[j], corr_mat[i, j]])
        df_corr = DataFrame(data=corr_data, columns=["var1", "var2", "corr"])

        ## Assemble marginals
        marginals = {}
        for ind, var_ in enumerate(var_fitted):
            marginals[var_] = {
                "dist": "norm",
                "loc": df_best[var_].values[0],
                "scale": sigma_comp[ind],
            }

        ## Construct model with Gaussian copula
        if len(var_fix) > 0:
            md_res = (Model(name) >> cp_function(
                lambda x: df_nom[var_fix].values,
                var=set(var_remain).difference(var_fix),
                out=var_fix,
                name="Fix variable levels",
            ) >> cp_md_det(md=md) >> cp_marginals(**marginals) >>
                      cp_copula_gaussian(df_corr=df_corr))
        else:
            md_res = (Model(name) >> cp_md_det(md=md) >> cp_marginals(
                **marginals) >> cp_copula_gaussian(df_corr=df_corr))

    ## Return deterministic model
    elif uq_method is None:
        md_res = (Model(name) >> cp_function(
            lambda x: df_best[var_fitted].values,
            var=var_remain,
            out=var_fitted,
            name="Fix variable levels",
        ) >> cp_md_det(md=md))

    else:
        raise ValueError(
            "uq_method option {} not recognized".format(uq_method))

    return md_res
Ejemplo n.º 9
0
def eval_contour(
    model,
    var=None,
    out=None,
    df=None,
    levels=None,
    n_side=20,
    n_levels=5,
):
    r"""Generate contours from a model

    Generates contours from a model. Evaluates the model on a dense grid, then runs marching squares to generate contours. Supports targeting multiple outputs and handling auxiliary inputs not included in the contour map.

    Args:
        model (gr.Model): Model to evaluate.
        var (list of str): Model inputs to target; must provide exactly two inputs, and both must have finite domain width.
        out (list of str): Model output(s) for contour generation.
        df (DataFrame or None): Levels for model variables not included in var 
            (auxiliary inputs). If provided var and model.var contain the same
            values, then df may equal None.
        levels (dict): Specific output levels for contour generation; overrides n_levels.
        n_side (int): Side resolution for grid; n_side**2 total evaluations.
        n_levels (int): Number of contour levels.

    Returns:
        DataFrame: Points along contours, organized by output and auxiliary variable levels.

    Examples::

        import grama as gr
        ## Multiple outputs
        (
            gr.Model()
            >> gr.cp_vec_function(
                fun=lambda df: gr.df_make(
                    f=df.x**2 + df.y**2,
                    g=df.x + df.y,
                ),
                var=["x", "y"],
                out=["f", "g"],
            )
            >> gr.cp_bounds(
                x=(-1, +1),
                y=(-1, +1),
            )
            >> gr.ev_contour(
                var=["x", "y"],
                out=["f", "g"],
            )
            # Contours with no auxiliary variables can autoplot
            >> gr.pt_auto()
        )

        ## Auxiliary inputs
        (
            gr.Model()
            >> gr.cp_vec_function(
                fun=lambda df: gr.df_make(
                    f=df.c * df.x + (1 - df.c) * df.y,
                ),
                var=["x", "y"],
                out=["f", "g"],
            )
            >> gr.cp_bounds(
                x=(-1, +1),
                y=(-1, +1),
            )
            >> gr.ev_contour(
                var=["x", "y"],
                out=["f"],
                df=gr.df_make(c=[0, 1])
            )

            # Contours with auxiliary variables should be manually plotted
            >> gr.ggplot(gr.aes("x", "y"))
            + gr.geom_segment(gr.aes(xend="x_end", yend="y_end", group="level", color="c"))
        )

    """
    ## Check invariants
    invariants_eval_model(model)
    invariants_eval_df(df, acc_none=True)
    # Argument given
    if var is None:
        raise ValueError("No `var` given")
    # Correct number of inputs
    if len(var) != 2:
        raise ValueError("Must provide exactly 2 inputs in `var`.")
    # Inputs available
    var_diff = set(var).difference(set(model.var))
    if len(var_diff) > 0:
        raise ValueError(
            "`var` must be a subset of model.var; missing: {}".format(
                var_diff))
    # All inputs supported
    var_diff = set(model.var).difference(set(var))
    if len(var_diff) > 0:
        if df is None:
            raise ValueError(
                "Must provide values for remaining model variables using df; "
                + "missing values: {}".format(var_diff))
        # Drop the swept variables
        df = df.drop(columns=var, errors="ignore")

        # Check for unsupported inputs
        var_diff2 = var_diff.difference(set(df.columns))
        if len(var_diff2) > 0:
            raise ValueError(
                "All model variables need values in provided df; " +
                "missing values: {}".format(var_diff2))

        if df.shape[0] > 1:
            has_aux = True
        else:
            has_aux = False
    else:
        has_aux = False

    # Finite bound width
    if not all([
            isfinite(model.domain.get_width(v)) and
        (model.domain.get_width(v) > 0) for v in var
    ]):
        raise ValueError(
            "All model bounds for `var` must be finite and nonzero")

    # Argument given
    if out is None:
        raise ValueError("No `out` given")
    # Outputs available
    out_diff = set(out).difference(set(model.out))
    if len(out_diff) > 0:
        raise ValueError(
            "`out` must be a subset of model.out; missing: {}".format(
                out_diff))

    ## Generate data
    xv = linspace(*model.domain.get_bound(var[0]), n_side)
    yv = linspace(*model.domain.get_bound(var[1]), n_side)
    df_x = DataFrame({var[0]: xv})
    df_y = DataFrame({var[1]: yv})
    df_input = (df_x >> tf_outer(df_outer=df_y))

    # Create singleton level if necessary
    if df is None:
        df = DataFrame({"_foo": [0]})

    ## Loop over provided auxiliary levels
    df_res = DataFrame()
    for i in range(df.shape[0]):
        df_in_tmp = (df_input >> tf_outer(df_outer=df.iloc[[i]]))
        df_out = eval_df(
            model,
            df=df_in_tmp,
        )

        ## Set output threshold levels
        if levels is None:
            # Do not overwrite `levels`, to adapt per loop
            levels_wk = dict(
                zip(out, [
                    linspace(df_out[o].min(), df_out[o].max(),
                             n_levels + 2)[1:-1] for o in out
                ]))
        else:
            levels_wk = levels

        ## Run marching squares
        # Output quantity
        for o in out:
            # Reshape data
            Data = reshape(df_out[o].values, (n_side, n_side))
            # Threshold level
            for t in levels_wk[o]:
                # Run marching squares
                segments = marching_square(xv, yv, Data, t)
                sqdata = array(segments).squeeze()

                if len(sqdata) > 0:
                    # Package
                    df_tmp = DataFrame(
                        data=sqdata,
                        columns=[
                            var[0], var[1], var[0] + "_end", var[1] + "_end"
                        ],
                    )
                    df_tmp["out"] = [o] * df_tmp.shape[0]
                    df_tmp["level"] = [t] * df_tmp.shape[0]
                    df_tmp = (df_tmp >> tf_outer(df_outer=df.iloc[[i]]))

                    df_res = concat((df_res, df_tmp), axis=0)
                else:
                    warn("Output {0:} had no contours at level {1:}".format(
                        o,
                        t,
                    ))

    ## Remove dummy column, if present
    if "_foo" in df_res.columns:
        df_res.drop("_foo", axis=1, inplace=True)

    # Drop index
    df_res = df_res.reset_index(drop=True)

    ## Attach metadata
    with catch_warnings():
        simplefilter("ignore")
        df_res._plot_info = {
            "type": "contour",
            "var": var,
            "out": "out",
            "level": "level",
            "aux": has_aux,
        }

    ## Return the results
    return df_res
Ejemplo n.º 10
0
 def fun(x):
     df = DataFrame([x], columns=model.var)
     df_res = eval_df(model, df)
     return sign * df_res[out]
Ejemplo n.º 11
0
def eval_min(
    model,
    out_min=None,
    out_geq=None,
    out_leq=None,
    out_eq=None,
    method="SLSQP",
    tol=1e-6,
    n_restart=1,
    n_maxiter=50,
    seed=None,
    df_start=None,
):
    r"""Constrained minimization using functions from a model

    Perform constrained minimization using functions from a model. Model must
    have deterministic variables only.

    Wrapper for scipy.optimize.minimize

    Args:
        model (gr.Model): Model to analyze. All model variables must be
            deterministic.
        out_min (str): Output to use as minimization objective.
        out_geq (None OR list of str): Outputs to use as geq constraints; out >= 0
        out_leq (None OR list of str): Outputs to use as leq constraints; out <= 0
        out_eq (None OR list of str): Outputs to use as equality constraints; out == 0

        method (str): Optimization method; see the documentation for
            scipy.optimize.minimize for options.
        tol (float): Optimization objective convergence tolerance
        n_restart (int): Number of restarts; beyond n_restart=1 random
            restarts are used.
        df_start (None or DataFrame): Specific starting values to use; overrides
            n_restart if non None provided.

    Returns:
        DataFrame: Results of optimization

    Examples:
        >>> import grama as gr
        >>> md = (
        >>>     gr.Model("Constrained Rosenbrock")
        >>>     >> gr.cp_function(
        >>>         fun=lambda x: (1 - x[0])**2 + 100*(x[1] - x[0]**2)**2,
        >>>         var=["x", "y"],
        >>>         out=["c"],
        >>>     )
        >>>     >> gr.cp_function(
        >>>         fun=lambda x: (x[0] - 1)**3 - x[1] + 1,
        >>>         var=["x", "y"],
        >>>         out=["g1"],
        >>>     )
        >>>     >> gr.cp_function(
        >>>         fun=lambda x: x[0] + x[1] - 2,
        >>>         var=["x", "y"],
        >>>         out=["g2"],
        >>>     )
        >>>     >> gr.cp_bounds(
        >>>         x=(-1.5, +1.5),
        >>>         y=(-0.5, +2.5),
        >>>     )
        >>> )
        >>> md >> gr.ev_min(
        >>>     out_min="c",
        >>>     out_leq=["g1", "g2"]
        >>> )

    """
    ## Check that model has only deterministic variables
    if model.n_var_rand > 0:
        raise ValueError("model must have no random variables")
    ## Check that objective is in model
    if not (out_min in model.out):
        raise ValueError("model must contain out_min")
    ## Check that constraints are in model
    if not (out_geq is None):
        out_diff = set(out_geq).difference(set(model.out))
        if len(out_diff) > 0:
            raise ValueError(
                "model must contain each out_geq; missing {}".format(out_diff))
    if not (out_leq is None):
        out_diff = set(out_leq).difference(set(model.out))
        if len(out_diff) > 0:
            raise ValueError(
                "model must contain each out_leq; missing {}".format(out_diff))
    if not (out_eq is None):
        out_diff = set(out_eq).difference(set(model.out))
        if len(out_diff) > 0:
            raise ValueError(
                "model must contain each out_eq; missing {}".format(out_diff))

    ## Formulate initial guess
    df_nom = eval_nominal(model, df_det="nom", skip=True)
    if df_start is None:
        df_start = df_nom[model.var]

        if n_restart > 1:
            if not (seed is None):
                setseed(seed)
            ## Collect sweep-able deterministic variables
            var_sweep = list(
                filter(
                    lambda v: isfinite(model.domain.get_width(v))
                    & (model.domain.get_width(v) > 0),
                    model.var_det,
                ))
            ## Generate pseudo-marginals
            dicts_var = {}
            for v in var_sweep:
                dicts_var[v] = {
                    "dist": "uniform",
                    "loc": model.domain.get_bound(v)[0],
                    "scale": model.domain.get_width(v),
                }
            ## Overwrite model
            md_sweep = comp_marginals(model, **dicts_var)
            md_sweep = comp_copula_independence(md_sweep)
            ## Generate random start points
            df_rand = eval_sample(
                md_sweep,
                n=n_restart - 1,
                df_det="nom",
                skip=True,
            )
            df_start = concat((df_start, df_rand[model.var]),
                              axis=0).reset_index(drop=True)
    else:
        n_restart = df_start.shape[0]

    ## Factory for wrapping model's output
    def make_fun(out, sign=+1):
        def fun(x):
            df = DataFrame([x], columns=model.var)
            df_res = eval_df(model, df)
            return sign * df_res[out]

        return fun

    ## Create helper functions for constraints
    constraints = []

    if not (out_geq is None):
        for out in out_geq:
            constraints.append({
                "type": "ineq",
                "fun": make_fun(out),
            })

    if not (out_leq is None):
        for out in out_leq:
            constraints.append({
                "type": "ineq",
                "fun": make_fun(out, sign=-1),
            })

    if not (out_eq is None):
        for out in out_eq:
            constraints.append({
                "type": "eq",
                "fun": make_fun(out),
            })

    ## Parse the bounds for minimize
    bounds = list(map(lambda k: model.domain.bounds[k], model.var))

    ## Run optimization
    df_res = DataFrame()
    for i in range(n_restart):
        x0 = df_start[model.var].iloc[i].values
        res = minimize(
            make_fun(out_min),
            x0,
            args=(),
            method=method,
            jac=False,
            tol=tol,
            options={
                "maxiter": n_maxiter,
                "disp": False
            },
            constraints=constraints,
            bounds=bounds,
        )

        df_opt = df_make(
            **dict(zip(model.var, res.x)),
            **dict(zip(map(lambda s: s + "_0", model.var), x0)),
        )
        df_tmp = eval_df(model, df=df_opt)
        df_tmp["success"] = [res.success]
        df_tmp["message"] = [res.message]
        df_tmp["n_iter"] = [res.nit]

        df_res = concat((df_res, df_tmp), axis=0).reset_index(drop=True)

    return df_res