예제 #1
0
def make_tlmc_model_2f1m():

    import numpy as np
    import grama as gr

    def fun_lev0(x):  # evaluate level 0 function at x, record cost
        P = x
        cost = 1
        return P, cost

    def fun_lev1(x):  # evaluate level 1 function at x, record cost
        P = np.sin(x)
        cost = 2
        return P, cost

    md = gr.Model(name = "tlmc_model") >> \
    gr.cp_function(
        fun = fun_lev0,
        var = ["x"],
        out = ["P0" , "cost0"],
        name = ["level 0 function"] ) >> \
    gr.cp_function(
        fun = fun_lev1,
        var = ["x"],
        out = ["P1" , "cost1"],
        name = ["level 1 function"] ) >> \
    gr.cp_marginals(
        x = {"dist": "norm", "loc": 0, "scale": 1, "sign": +1}) >> \
    gr.cp_copula_independence

    return md
예제 #2
0
def make_poly():
    md = gr.Model("Polynomials") >> \
         gr.cp_function(fun=lambda x: x, var=1, out=1, name="linear") >> \
         gr.cp_function(fun=lambda x: x**2, var=1, out=1, name="quadratic") >> \
         gr.cp_function(fun=lambda x: x**3, var=1, out=1, name="cubic") >> \
         gr.cp_marginals(
             x0={"dist": "uniform", "loc": -1, "scale": 2},
             x1={"dist": "uniform", "loc": -1, "scale": 2},
             x2={"dist": "uniform", "loc": -1, "scale": 2}
         ) >> \
         gr.cp_copula_independence()

    return md
예제 #3
0
def make_trajectory_linear():
    ## Assemble model
    md_trajectory = (gr.Model("Trajectory Model") >> gr.cp_function(
        fun=fun_x,
        var=var_list,
        out=["x"],
        name="x_trajectory",
    ) >> gr.cp_function(
        fun=fun_y,
        var=var_list,
        out=["y"],
        name="y_trajectory",
    ) >> gr.cp_bounds(
        u0=[0.1, np.Inf], v0=[0.1, np.Inf], tau=[0.05, np.Inf], t=[0, 600]))

    return md_trajectory
예제 #4
0
def fit_nls(
    df_data,
    md=None,
    verbose=True,
    **kwargs,
):
    r"""Fit a model with Nonlinear Least Squares (NLS)

    Estimate best-fit variable levels with nonlinear least squares (NLS), and
    return an executable model with those frozen best-fit levels.

    Note: This is a *synonym* for eval_nls(); see the documentation for
    eval_nls() for keyword argument options available beyond those listed here.

    Args:
        df_data (DataFrame): Data for estimating best-fit variable levels.
            Variables not found in df_data optimized for fitting.
        md (gr.Model): Model to analyze. All model variables
            selected for fitting must be bounded or random. Deterministic
            variables may have semi-infinite bounds.

    Returns:
        gr.Model: Model for evaluation with best-fit variables frozen to
            optimized levels.

    """
    ## Check invariants
    if md is None:
        raise ValueError("Must provide model md")

    ## Run eval_nls to fit model parameter values
    df_fit = eval_nls(md, df_data=df_data, append=True, **kwargs)
    ## Select best-fit values
    df_best = df_fit.sort_values(by="mse", axis=0).iloc[[0]]
    if verbose:
        print(df_best)

    ## Determine variables to fix
    var_fixed = list(set(md.var).intersection(set(df_best.columns)))
    var_remain = list(set(md.var).difference(set(var_fixed)))

    if len(var_remain) == 0:
        raise ValueError("Resulting model is constant!")

    ## Assemble and return fitted model
    if md.name is None:
        name = "(Fitted Model)"
    else:
        name = md.name + " (Fitted)"

    md_res = (Model(name) >> cp_function(
        lambda x: df_best[var_fixed].values,
        var=var_remain,
        out=var_fixed,
        name="Fix variable levels",
    ) >> cp_md_det(md=md))

    return md_res
예제 #5
0
def make_test():
    md = Model() >> \
         cp_function(fun=fun, var=3, out=1) >> \
         cp_bounds(x0=(-1,+1), x1=(-1,+1), x2=(-1,+1)) >> \
         cp_marginals(
             x0={"dist": "uniform", "loc": -1, "scale": 2},
             x1={"dist": "uniform", "loc": -1, "scale": 2}
         ) >> \
         cp_copula_independence()

    return md
예제 #6
0
def make_ishigami():
    """Ishigami function

    The Ishigami function is commonly used as a test case for estimating Sobol'
    indices.

    Model definition:

        y0 = sin(x1) + a sin(x2)^2 + b x3^4 sin(x1)

        x1 ~ U[-pi, +pi]

        x2 ~ U[-pi, +pi]

        x3 ~ U[-pi, +pi]

    Sobol' index data:

        V[y0] = a^2/8 + b pi^4/5 + b^2 pi^8/18 + 0.5

        T1 = 0.5(1 + b pi^4/5)^2

        T2 = a^2/8

        T3 = 0

        Tt1 = 0.5(1 + b pi^4/5)^2 + 8 b^2 pi^8/225

        Tt2 = a^2/8

        Tt3 = 8 b^2 pi^8/225

    References:
        T. Ishigami and T. Homma, “An importance quantification technique in uncertainty analysis for computer models,” In the First International Symposium on Uncertainty Modeling and Analysis, Maryland, USA, Dec. 3–5, 1990. DOI:10.1109/SUMA.1990.151285
    """

    md = gr.Model(name = "Ishigami Function") >> \
        gr.cp_function(
            fun=fun,
            var=["a", "b", "x1", "x2", "x3"],
            out=1
        ) >> \
        gr.cp_bounds(a=(6.0, 8.0), b=(0, 0.2)) >> \
        gr.cp_marginals(
            x1={"dist": "uniform", "loc": -np.pi, "scale": 2 * np.pi},
            x2={"dist": "uniform", "loc": -np.pi, "scale": 2 * np.pi},
            x3={"dist": "uniform", "loc": -np.pi, "scale": 2 * np.pi}
        ) >> \
        gr.cp_copula_independence()

    return md
예제 #7
0
def make_prlc():
    md_RLC_det = (gr.Model("RLC Circuit") >> gr.cp_vec_function(
        fun=lambda df: gr.df_make(np.sqrt(1 / df.L / df.C)),
        var=["L", "C"],
        out=["omega0"],
    ) >> gr.cp_function(fun=lambda df: gr.df_make(Q=df.omega0 * df.R * df.C),
                        name="parallel RLC",
                        var=["omega0", "R", "C"],
                        out=["Q"]) >> gr.cp_bounds(
                            R=(1e-3, 1e0),
                            L=(1e-9, 1e-3),
                            C=(1e-3, 100),
                        ))

    return md_RLC_det
예제 #8
0
def make_linear_normal():
    md = Model("Linear-Normal Reliability Problem") >> \
         cp_function(
             fun=limit_state,
             var=2,
             out=["g_linear"],
             name="limit state"
         ) >> \
         cp_marginals(
             x0={"dist": "norm", "loc": 0, "scale": 1, "sign":+1},
             x1={"dist": "norm", "loc": 0, "scale": 1, "sign":+1}
         ) >> \
         cp_copula_independence()

    return md
예제 #9
0
def make_plate_buckle():
    md = (gr.Model("Plate Buckling") >> gr.cp_function(
        fun=function_buckle_state,
        var=["t", "h", "w", "E", "mu", "L"],
        out=["g_buckle"],
        name="limit state",
    ) >> gr.cp_bounds(
        t=(0.5 * THICKNESS, 2 * THICKNESS),
        h=(6, 18),
        w=(6, 18),
        L=(LOAD / 2, LOAD * 2),
    ) >> gr.cp_marginals(E=gr.marg_named(df_stang.E, "norm"),
                         mu=gr.marg_named(df_stang.mu, "beta")) >>
          gr.cp_copula_gaussian(df_data=df_stang))

    return md
예제 #10
0
def md_gen(fun_gen, num_levs):
    import grama as gr
    models = list()
    costs = list()
    for i in range(num_levs):
        md = gr.Model(name = ("md{}".format(i))) >> \
        gr.cp_function(
            fun = fun_gen(10**(i+1)),
            var = ["x"],
            out = ["P"],
            name = ["level 0 function"] ) >> \
        gr.cp_marginals(
            x = {"dist": "norm", "loc": 0.5, "scale": 0.2, "sign": +1}) >> \
        gr.cp_copula_independence

        models.append(md)
        md_cost = i + 1
        costs.append(md_cost)
    return models, costs
예제 #11
0
def make_tlmc_model_1f1m():

    import numpy as np
    import grama as gr

    def fun_lev(args):  # evaluate level "lev" function at x, record cost
        level, x = args

        def fun_lev0(x):  # evaluate level 0 function at x, record cost
            P = x
            cost = 1
            return P, cost

        def fun_lev1(x):  # evaluate level 1 function at x, record cost
            P = np.sin(x)
            cost = 2
            return P, cost

        if level == 0:
            fun = fun_lev0
        elif level == 1:
            fun = fun_lev1
        else:
            raise ValueError('Input level too high')
        P, cost = fun(x)

        return P, cost

    md = gr.Model(name = "tlmc_model_1f1m") >> \
    gr.cp_function(
        fun = fun_lev,
        var = ["level", "x"],
        out = ["P" , "cost"],
        name = ["level function"] ) >> \
    gr.cp_marginals(
        x = {"dist": "norm", "loc": 0, "scale": 1, "sign": +1}) >> \
    gr.cp_copula_independence

    return md
예제 #12
0
def fit_nls(
    df_data,
    md=None,
    out=None,
    var_fix=None,
    df_init=None,
    verbose=True,
    uq_method=None,
    **kwargs,
):
    r"""Fit a model with Nonlinear Least Squares (NLS)

    Estimate best-fit variable levels with nonlinear least squares (NLS), and
    return an executable model with those frozen best-fit levels. Optionally,
    fit a distribution on the parameters to quantify parametric uncertainty.

    Note: This is a *synonym* for eval_nls(); see the documentation for
    eval_nls() for keyword argument options available beyond those listed here.

    Args:
        df_data (DataFrame): Data for estimating best-fit variable levels.
            Variables not found in df_data optimized for fitting.
        md (gr.Model): Model to analyze. All model variables
            selected for fitting must be bounded or random. Deterministic
            variables may have semi-infinite bounds.
        var_fix (list or None): Variables to fix to nominal levels. Note that
            variables with domain width zero will automatically be fixed.
        df_init (DataFrame): Initial guesses for parameters; overrides n_restart
        n_restart (int): Number of restarts to try; the first try is at
            the nominal conditions of the model. Returned model will use
            the least-error parameter set among restarts tested.
        n_maxiter (int): Optimizer maximum iterations
        verbose (bool): Print best-fit parameters to console?
        uq_method (str OR None): If string, select method to quantify parameter
            uncertainties. If None, provide best-fit values only. Methods:
            uq_method = "linpool": assume normal errors; linearly approximate
                parameter effects; equally pool variance matrices for each output

    Returns:
        gr.Model: Model for evaluation with best-fit variables frozen to
            optimized levels.

    Examples:
        >>> import grama as gr
        >>> from grama.data import df_trajectory_windowed
        >>> from grama.models import make_trajectory_linear
        >>> X = gr.Intention()
        >>>
        >>> md_trajectory = make_trajectory_linear()
        >>> md_fitted = (
        >>>     df_trajectory_windowed
        >>>     >> gr.ft_nls(
        >>>         md=md_trajectory,
        >>>         uq_method="linpool",
        >>>     )
        >>> )
    """
    ## Check `out` invariants
    if out is None:
        out = md.out
        print("... fit_nls setting out = {}".format(out))

    ## Check invariants
    if md is None:
        raise ValueError("Must provide model md")

    ## Determine variables to be fixed
    if var_fix is None:
        var_fix = set()
    else:
        var_fix = set(var_fix)
    for var in md.var_det:
        wid = md.domain.get_width(var)
        if wid == 0:
            var_fix.add(var)

    ## Run eval_nls to fit model parameter values
    df_fit = eval_nls(
        md,
        df_data=df_data,
        var_fix=var_fix,
        df_init=df_init,
        append=True,
        verbose=verbose,
        **kwargs,
    )
    ## Select best-fit values
    df_best = df_fit.sort_values(by="mse",
                                 axis=0).iloc[[0]].reset_index(drop=True)
    if verbose:
        print(df_fit.sort_values(by="mse", axis=0))

    ## Determine variables that were fitted
    var_fitted = list(set(md.var).intersection(set(df_best.columns)))
    var_remain = list(set(md.var).difference(set(var_fitted)))

    if len(var_remain) == 0:
        raise ValueError("Resulting model is constant!")

    ## Assemble and return fitted model
    if md.name is None:
        name = "(Fitted Model)"
    else:
        name = md.name + " (Fitted)"

    ## Calibrate parametric uncertainty, if requested
    if uq_method == "linpool":
        ## Precompute data
        df_nom = eval_nominal(md, df_det="nom")
        df_base = tran_outer(
            df_data, concat((df_best[var_fitted], df_nom[var_fix]), axis=1))
        df_pred = eval_df(md, df=df_base)
        df_grad = eval_grad_fd(md, df_base=df_base, var=var_fitted)

        ## Pool variance matrices
        n_obs = df_data.shape[0]
        n_fitted = len(var_fitted)
        Sigma_pooled = zeros((n_fitted, n_fitted))

        for output in out:
            ## Approximate sigma_sq
            sigma_sq = npsum(
                nppow(df_data[output].values - df_pred[output].values,
                      2)) / (n_obs - n_fitted)
            ## Approximate (pseudo)-inverse hessian
            var_grad = list(map(lambda v: "D" + output + "_D" + v, var_fitted))
            Z = df_grad[var_grad].values
            Hinv = pinv(Z.T.dot(Z), hermitian=True)

            ## Add variance matrix to pooled Sigma
            Sigma_pooled = Sigma_pooled + sigma_sq * Hinv / n_fitted

        ## Check model for identifiability
        kappa_out = cond(Sigma_pooled)
        if kappa_out > 1e10:
            warn(
                "Model is locally unidentifiable as measured by the " +
                "condition number of the pooled covariance matrix; " +
                "kappa = {}".format(kappa_out),
                RuntimeWarning,
            )

        ## Convert to std deviations and correlation
        sigma_comp = npsqrt(diag(Sigma_pooled))
        corr_mat = Sigma_pooled / (atleast_2d(sigma_comp).T.dot(
            atleast_2d(sigma_comp)))
        corr_data = []
        I, J = triu_indices(n_fitted, k=1)
        for ind in range(len(I)):
            i = I[ind]
            j = J[ind]
            corr_data.append([var_fitted[i], var_fitted[j], corr_mat[i, j]])
        df_corr = DataFrame(data=corr_data, columns=["var1", "var2", "corr"])

        ## Assemble marginals
        marginals = {}
        for ind, var_ in enumerate(var_fitted):
            marginals[var_] = {
                "dist": "norm",
                "loc": df_best[var_].values[0],
                "scale": sigma_comp[ind],
            }

        ## Construct model with Gaussian copula
        if len(var_fix) > 0:
            md_res = (Model(name) >> cp_function(
                lambda x: df_nom[var_fix].values,
                var=set(var_remain).difference(var_fix),
                out=var_fix,
                name="Fix variable levels",
            ) >> cp_md_det(md=md) >> cp_marginals(**marginals) >>
                      cp_copula_gaussian(df_corr=df_corr))
        else:
            md_res = (Model(name) >> cp_md_det(md=md) >> cp_marginals(
                **marginals) >> cp_copula_gaussian(df_corr=df_corr))

    ## Return deterministic model
    elif uq_method is None:
        md_res = (Model(name) >> cp_function(
            lambda x: df_best[var_fitted].values,
            var=var_remain,
            out=var_fitted,
            name="Fix variable levels",
        ) >> cp_md_det(md=md))

    else:
        raise ValueError(
            "uq_method option {} not recognized".format(uq_method))

    return md_res
예제 #13
0
def make_cantilever_beam():
    """Cantilever beam

    A standard reliability test-case, often used for benchmarking reliability
    analysis and design algorithms.

    Generally used in the following optimization problem:

        min_{w,t} c_area

        s.t.      P[g_stress <= 0] <= 1.35e-3

                  P[g_disp <= 0] <= 1.35e-3

                  1 <= w, t <= 4

    Deterministic Variables:
        w: Beam width
        t: Beam thickness
    Random Variables:
        H: Horizontal applied force
        V: Vertical applied force
        E: Elastic modulus
        Y: Yield stress
    Outputs:
        c_area: Cost; beam cross-sectional area
        g_stress: Limit state; stress
        g_disp: Limit state; tip displacement

    References:
        Wu, Y.-T., Shin, Y., Sues, R., and Cesare, M., "Safety-factor based approach for probability-based design optimization," American Institute of Aeronautics and Astronautics, Seattle, Washington, April 2001.
        Sues, R., Aminpour, M., and Shin, Y., "Reliability-based Multi-Disciplinary Optimiation for Aerospace Systems," American Institute of Aeronautics and Astronautics, Seattle, Washington, April 2001.

    """

    md = gr.Model(name = "Cantilever Beam") >> \
         gr.cp_function(
             fun=function_area,
             var=["w", "t"],
             out=["c_area"],
             name="cross-sectional area",
             runtime=1.717e-7
         ) >> \
         gr.cp_function(
             fun=function_stress,
             var=["w", "t", "H", "V", "E", "Y"],
             out=["g_stress"],
             name="limit state: stress",
             runtime=8.88e-7
         ) >> \
         gr.cp_function(
             fun=function_displacement,
             var=["w", "t", "H", "V", "E", "Y"],
             out=["g_disp"],
             name="limit state: displacement",
             runtime=3.97e-6
         ) >> \
         gr.cp_bounds(
             w=(2, 4),
             t=(2, 4)
         ) >> \
         gr.cp_marginals(
             H={"dist": "norm", "loc": MU_H, "scale": TAU_H, "sign": +1},
             V={"dist": "norm", "loc": MU_V, "scale": TAU_V, "sign": +1},
             E={"dist": "norm", "loc": MU_E, "scale": TAU_E, "sign":  0},
             Y={"dist": "norm", "loc": MU_Y, "scale": TAU_Y, "sign": -1}
         ) >> \
         gr.cp_copula_independence()

    return md
예제 #14
0

var_applied = ["L", "w", "t"]
out_applied = ["sig_app"]


def fun_limit(x):
    sig_cr, sig_app = x
    return sig_cr - sig_app


var_limit = ["sig_cr", "sig_app"]
out_limit = ["safety"]

## Build model
md_plate = (
    gr.Model("Plate under buckling load") >> gr.cp_function(
        fun=fun_critical, var=var_critical, out=out_critical, name="Critical")
    >> gr.cp_function(
        fun=fun_applied, var=var_applied, out=out_applied, name="Applied") >>
    gr.cp_function(fun=fun_limit, var=var_limit, out=out_limit,
                   name="Safety") >> gr.cp_bounds(  # Deterministic variables
                       t=(0.03, 0.12),  # Thickness
                       w=(6, 18),  # Width
                       h=(6, 18),  # Height
                       L=(2.5e-1, 4.0e-1),  # Load
                   ) >> gr.cp_marginals(  # Random variables
                       E=gr.marg_gkde(df_stang.E),
                       mu=gr.marg_gkde(df_stang.mu)) >>
    gr.cp_copula_gaussian(df_data=df_stang))  # Dependence