Exemple #1
0
def make_prlc_rand():
    md_RLC_rand = (
        gr.Model("RLC with component tolerances") >> gr.cp_vec_function(
            fun=lambda df: gr.df_make(
                Rr=df.R * (1 + df.dR),
                Lr=df.L * (1 + df.dL),
                Cr=df.C * (1 + df.dC),
            ),
            var=["R", "dR", "L", "dL", "C", "dC"],
            out=["Rr", "Lr", "Cr"],
        ) >> gr.cp_vec_function(
            fun=lambda df: gr.df_make(omega0=np.sqrt(1 / df.Lr / df.Cr)),
            var=["Lr", "Cr"],
            out=["omega0"],
        ) >> gr.cp_vec_function(
            fun=lambda df: gr.df_make(Q=df.omega0 * df.Rr * df.Cr),
            name="parallel RLC",
            var=["omega0", "Rr", "Cr"],
            out=["Q"]) >> gr.cp_bounds(
                R=(1e-3, 1e0),
                L=(1e-9, 1e-3),
                C=(1e-3, 100),
            ) >> gr.cp_marginals(
                dR=dict(dist="uniform",
                        loc=R_percent_lo,
                        scale=R_percent_up - R_percent_lo),
                dL=dict(dist="uniform",
                        loc=L_percent_lo,
                        scale=L_percent_up - L_percent_lo),
                dC=dict(dist="uniform",
                        loc=C_percent_lo,
                        scale=C_percent_up - C_percent_lo),
            ) >> gr.cp_copula_independence())

    return md_RLC_rand
Exemple #2
0
def make_plate_buckle():
    r"""Initialize a buckling plate model

    Variables (deterministic):
        w (in): Plate width
        h (in): Plate height
        t (in): Plate thickness
        m (-): Wavenumber
        L (kips): Applied (compressive) load;
            uniformly applied along top and bottom edges

    Variables (random):
        E (kips/in^2): Elasticity
        mu (-): Poisson's ratio

    Outputs:
        k_cr (-): Prefactor for buckling stress
        g_buckle (kips/in^2): Buckling limit state:
            critical stress - applied stress
    """
    md = (
        Model("Plate Buckling")
        >> cp_vec_function(
            fun=lambda df: df_make(
                k_cr=(df.m*df.h/df.w + df.w/df.m/df.h)**2
            ),
            var=["w", "h", "m"],
            out=["k_cr"],
        )
        >> cp_vec_function(
            fun=lambda df: df_make(
                g_buckle=df.k_cr * pi**2/12 * df.E / (1 - df.mu**2) * (df.t/df.h)**2
                - df.L / df.t / df.w
            ),
            var=["k_cr", "t", "h", "w", "E", "mu", "L"],
            out=["g_buckle"],
            name="limit state",
        )
        >> cp_bounds(
            t=(0.5 * THICKNESS, 2 * THICKNESS),
            h=(6, 18),
            w=(6, 18),
            m=(1, 5),
            L=(LOAD / 2, LOAD * 2),
        )
        >> cp_marginals(
            E=marg_fit("norm", df_stang.E),
            mu=marg_fit("beta", df_stang.mu),
        )
        >> cp_copula_gaussian(df_data=df_stang)
    )

    return md
Exemple #3
0
def make_channel_nondim():
    r"""Make 1d channel model; dimensionless form

    Instantiates a model for particle and fluid temperature rise; particles are suspended in a fluid with bulk velocity along a square cross-section channel. The walls of said channel are transparent, and radiation heats the particles as they travel down the channel.

    References:
        Banko, A.J. "RADIATION ABSORPTION BY INERTIAL PARTICLES IN A TURBULENT SQUARE DUCT FLOW" (2018) PhD Thesis, Stanford University, Chapter 2

    """
    md = (
        Model("1d Particle-laden Channel with Radiation; Dimensionless Form")
        >> cp_vec_function(
            fun=lambda df: df_make(beta=120 * (1 + df.Phi_M * df.chi)),
            var=["Phi_M", "chi"],
            out=["beta"],
        ) >> cp_vec_function(
            fun=lambda df: df_make(
                T_f=(df.Phi_M * df.chi) / (1 + df.Phi_M * df.chi) *
                (df.I * df.xst - df.beta**(-1) * df.I *
                 (1 - exp(-df.beta * df.xst))),
                T_p=1 / (1 + df.Phi_M * df.chi) *
                (df.Phi_M * df.chi * df.I * df.xst + df.beta**(-1) * df.I *
                 (1 - exp(-df.beta * df.xst))),
            ),
            var=["xst", "Phi_M", "chi", "I", "beta"],
            out=["T_f", "T_p"],
        ) >> cp_bounds(
            ## Dimensionless axial location (-)
            xst=(0, 5), ) >> cp_marginals(
                ## Mass loading ratio (-)
                Phi_M={
                    "dist": "uniform",
                    "loc": 0,
                    "scale": 1
                },
                ## Particle-fluid heat capacity ratio (-)
                chi={
                    "dist": "uniform",
                    "loc": 0.1,
                    "scale": 0.9
                },
                ## Normalized radiative intensity (-)
                I={
                    "dist": "uniform",
                    "loc": 0.1,
                    "scale": 0.9
                },
            ) >> cp_copula_independence())

    return md
Exemple #4
0
def make_prlc():
    md_RLC_det = (gr.Model("RLC Circuit") >> gr.cp_vec_function(
        fun=lambda df: gr.df_make(np.sqrt(1 / df.L / df.C)),
        var=["L", "C"],
        out=["omega0"],
    ) >> gr.cp_function(fun=lambda df: gr.df_make(Q=df.omega0 * df.R * df.C),
                        name="parallel RLC",
                        var=["omega0", "R", "C"],
                        out=["Q"]) >> gr.cp_bounds(
                            R=(1e-3, 1e0),
                            L=(1e-9, 1e-3),
                            C=(1e-3, 100),
                        ))

    return md_RLC_det
Exemple #5
0
        def objective(x):
            """x = [var_fit]"""
            ## Evaluate model
            df_var = tran_outer(
                df_data[var_feat],
                concat(
                    (df_nom[var_fix].iloc[[0]], df_make(**dict(zip(var_fit, x)))),
                    axis=1,
                ),
            )
            df_tmp = eval_df(model, df=df_var)

            ## Compute joint MSE
            return ((df_tmp[out].values - df_data[out].values) ** 2).mean()
Exemple #6
0
def make_pareto_random(twoDim=True):
    """ Create a model of random points for a pareto frontier evaluation
    Args:
        twoDim (bool): determines whether to create a 2D or 3D model
    """
    if twoDim == True:
        # Model to make dataset
        md_true = (Model() >> cp_vec_function(
            fun=lambda df: df_make(
                y1=df.x1 * cos(df.x2),
                y2=df.x1 * sin(df.x2),
            ),
            var=["x1", "x2"],
            out=["y1", "y2"],
        ) >> cp_marginals(
            x1=dict(dist="uniform", loc=0, scale=1),
            x2=dict(dist="uniform", loc=0, scale=pi / 2),
        ) >> cp_copula_independence())

        return md_true
    else:
        # Model to make dataset
        md_true = (Model() >> cp_vec_function(
            fun=lambda df: df_make(
                y1=df.x1 * cos(df.x2),
                y2=df.x1 * sin(df.x2),
                y3=df.x1 * tan(df.x2),
            ),
            var=["x1", "x2", "x3"],
            out=["y1", "y2", "y3"],
        ) >> cp_marginals(x1=dict(dist="uniform", loc=0, scale=1),
                          x2=dict(dist="uniform", loc=0, scale=pi / 2),
                          x3=dict(dist="uniform", loc=0, scale=pi / 4)) >>
                   cp_copula_independence())

        return md_true
Exemple #7
0
    def fun_mp(i):
        x0 = df_init[var_fit].iloc[i].values

        ## Build evaluator
        def objective(x):
            """x = [var_fit]"""
            ## Evaluate model
            df_var = tran_outer(
                df_data[var_feat],
                concat(
                    (df_nom[var_fix].iloc[[0]],
                     df_make(**dict(zip(var_fit, x)))),
                    axis=1,
                ),
            )
            df_tmp = eval_df(model, df=df_var)

            ## Compute joint MSE
            return ((df_tmp[out].values - df_data[out].values)**2).mean()

        ## Run optimization
        res = minimize(
            objective,
            x0,
            args=(),
            method=method,
            jac=False,
            tol=tol,
            options={
                "maxiter": n_maxiter,
                "disp": False,
                "ftol": ftol,
                "gtol": gtol,
            },
            bounds=bounds,
        )

        df_tmp = df_make(
            **dict(zip(var_fit, res.x)),
            **dict(zip(map(lambda s: s + "_0", var_fit), x0)),
        )
        df_tmp["success"] = [res.success]
        df_tmp["message"] = [res.message]
        df_tmp["n_iter"] = [res.nit]
        df_tmp["mse"] = [res.fun]
        return df_tmp
Exemple #8
0
def sir_vtime(T, S0, I0, R0, beta, gamma, rtol=1e-4):
    r"""Solve SIR IVP, vectorized over T

    Solves the initial value problem (IVP) associated with the SIR model, given parameter values and a span of time values. This routine uses an adaptive timestep to solve the IVP to a specified tolerance, then uses cubic interpolation to query the time points of interest.

    Args:
        T (array-like): Time points of interest
        S0 (float): Initial number of susceptible individuals (at t=0)
        I0 (float): Initial number of infected individuals (at t=0)
        R0 (float): Initial number of removed individuals (at t=0)
        beta (float): Infection rate parameter
        gamma (float): Removal rate parameter

    Returns:
        pandas DataFrame: Simulation timeseries results
    """

    ## Solve SIR model on adaptive, coarse time mesh
    T_span = [0, max(T)]
    y0 = [S0, I0, R0]

    res = solve_ivp(
        sir_rhs,
        T_span,
        y0,
        args=(beta, gamma),
        rtol=rtol,
        t_eval=T,
    )

    ## Interpolate to desired T points
    df_res = gr.df_make(
        t=T,
        S=res.y[0, :],
        I=res.y[1, :],
        R=res.y[2, :],
        S0=[S0],
        I0=[I0],
        R0=[R0],
        beta=[beta],
        gamma=[gamma],
    )

    return df_res
Exemple #9
0
def make_sir(rtol=1e-4):
    r"""Make an SIR model

    Instantiates a Susceptible, Infected, Removed (SIR) model for disease transmission.

    Args:
        rtol (float): Relative tolerance for IVP solver

    Returns:
        grama Model: SIR model

    References:
        "Compartmental models in epidemiology," Wikipedia, url: https://en.wikipedia.org/wiki/Compartmental_models_in_epidemiology

    """

    md_sir = (
        gr.Model("SIR Model") >> gr.cp_vec_function(
            fun=lambda df: gr.df_make(
                # Assume no recovered people at virus onset
                R0=0,
                # Assume total population of N=100
                S0=df.N - df.I0,
            ),
            var=["I0", "N"],
            out=["S0", "R0"],
            name="Population setup",
        ) >> gr.cp_vec_function(
            fun=fun_sir(rtol=rtol),
            var=["t", "S0", "I0", "R0", "beta", "gamma"],
            out=["S", "I", "R"],
            name="ODE solver & interpolation",
        ) >> gr.cp_bounds(
            N=(100, 100),  # Fixed population size
            I0=(1, 10),
            beta=(0.1, 0.5),
            gamma=(0.1, 0.5),
            t=(0, 100),
        ))

    return md_sir
Exemple #10
0
def function_displacement(df):
    return df_make(g_disp=D_MAX - float64(4) * LENGTH**3 / df.E / df.w / df.t *
                   sqrt(df.V**2 / df.t**4 + df.H**2 / df.w**4))
Exemple #11
0
def eval_min(
    model,
    out_min=None,
    out_geq=None,
    out_leq=None,
    out_eq=None,
    method="SLSQP",
    tol=1e-6,
    n_restart=1,
    n_maxiter=50,
    seed=None,
    df_start=None,
):
    r"""Constrained minimization using functions from a model

    Perform constrained minimization using functions from a model. Model must
    have deterministic variables only.

    Wrapper for scipy.optimize.minimize

    Args:
        model (gr.Model): Model to analyze. All model variables must be
            deterministic.
        out_min (str): Output to use as minimization objective.
        out_geq (None OR list of str): Outputs to use as geq constraints; out >= 0
        out_leq (None OR list of str): Outputs to use as leq constraints; out <= 0
        out_eq (None OR list of str): Outputs to use as equality constraints; out == 0

        method (str): Optimization method; see the documentation for
            scipy.optimize.minimize for options.
        tol (float): Optimization objective convergence tolerance
        n_restart (int): Number of restarts; beyond n_restart=1 random
            restarts are used.
        df_start (None or DataFrame): Specific starting values to use; overrides
            n_restart if non None provided.

    Returns:
        DataFrame: Results of optimization

    Examples:
        >>> import grama as gr
        >>> md = (
        >>>     gr.Model("Constrained Rosenbrock")
        >>>     >> gr.cp_function(
        >>>         fun=lambda x: (1 - x[0])**2 + 100*(x[1] - x[0]**2)**2,
        >>>         var=["x", "y"],
        >>>         out=["c"],
        >>>     )
        >>>     >> gr.cp_function(
        >>>         fun=lambda x: (x[0] - 1)**3 - x[1] + 1,
        >>>         var=["x", "y"],
        >>>         out=["g1"],
        >>>     )
        >>>     >> gr.cp_function(
        >>>         fun=lambda x: x[0] + x[1] - 2,
        >>>         var=["x", "y"],
        >>>         out=["g2"],
        >>>     )
        >>>     >> gr.cp_bounds(
        >>>         x=(-1.5, +1.5),
        >>>         y=(-0.5, +2.5),
        >>>     )
        >>> )
        >>> md >> gr.ev_min(
        >>>     out_min="c",
        >>>     out_leq=["g1", "g2"]
        >>> )

    """
    ## Check that model has only deterministic variables
    if model.n_var_rand > 0:
        raise ValueError("model must have no random variables")
    ## Check that objective is in model
    if not (out_min in model.out):
        raise ValueError("model must contain out_min")
    ## Check that constraints are in model
    if not (out_geq is None):
        out_diff = set(out_geq).difference(set(model.out))
        if len(out_diff) > 0:
            raise ValueError(
                "model must contain each out_geq; missing {}".format(out_diff))
    if not (out_leq is None):
        out_diff = set(out_leq).difference(set(model.out))
        if len(out_diff) > 0:
            raise ValueError(
                "model must contain each out_leq; missing {}".format(out_diff))
    if not (out_eq is None):
        out_diff = set(out_eq).difference(set(model.out))
        if len(out_diff) > 0:
            raise ValueError(
                "model must contain each out_eq; missing {}".format(out_diff))

    ## Formulate initial guess
    df_nom = eval_nominal(model, df_det="nom", skip=True)
    if df_start is None:
        df_start = df_nom[model.var]

        if n_restart > 1:
            if not (seed is None):
                setseed(seed)
            ## Collect sweep-able deterministic variables
            var_sweep = list(
                filter(
                    lambda v: isfinite(model.domain.get_width(v))
                    & (model.domain.get_width(v) > 0),
                    model.var_det,
                ))
            ## Generate pseudo-marginals
            dicts_var = {}
            for v in var_sweep:
                dicts_var[v] = {
                    "dist": "uniform",
                    "loc": model.domain.get_bound(v)[0],
                    "scale": model.domain.get_width(v),
                }
            ## Overwrite model
            md_sweep = comp_marginals(model, **dicts_var)
            md_sweep = comp_copula_independence(md_sweep)
            ## Generate random start points
            df_rand = eval_sample(
                md_sweep,
                n=n_restart - 1,
                df_det="nom",
                skip=True,
            )
            df_start = concat((df_start, df_rand[model.var]),
                              axis=0).reset_index(drop=True)
    else:
        n_restart = df_start.shape[0]

    ## Factory for wrapping model's output
    def make_fun(out, sign=+1):
        def fun(x):
            df = DataFrame([x], columns=model.var)
            df_res = eval_df(model, df)
            return sign * df_res[out]

        return fun

    ## Create helper functions for constraints
    constraints = []

    if not (out_geq is None):
        for out in out_geq:
            constraints.append({
                "type": "ineq",
                "fun": make_fun(out),
            })

    if not (out_leq is None):
        for out in out_leq:
            constraints.append({
                "type": "ineq",
                "fun": make_fun(out, sign=-1),
            })

    if not (out_eq is None):
        for out in out_eq:
            constraints.append({
                "type": "eq",
                "fun": make_fun(out),
            })

    ## Parse the bounds for minimize
    bounds = list(map(lambda k: model.domain.bounds[k], model.var))

    ## Run optimization
    df_res = DataFrame()
    for i in range(n_restart):
        x0 = df_start[model.var].iloc[i].values
        res = minimize(
            make_fun(out_min),
            x0,
            args=(),
            method=method,
            jac=False,
            tol=tol,
            options={
                "maxiter": n_maxiter,
                "disp": False
            },
            constraints=constraints,
            bounds=bounds,
        )

        df_opt = df_make(
            **dict(zip(model.var, res.x)),
            **dict(zip(map(lambda s: s + "_0", model.var), x0)),
        )
        df_tmp = eval_df(model, df=df_opt)
        df_tmp["success"] = [res.success]
        df_tmp["message"] = [res.message]
        df_tmp["n_iter"] = [res.nit]

        df_res = concat((df_res, df_tmp), axis=0).reset_index(drop=True)

    return df_res
Exemple #12
0
def eval_nls(
    model,
    df_data=None,
    out=None,
    var_fix=None,
    append=False,
    tol=1e-3,
    maxiter=25,
    nrestart=1,
):
    r"""Estimate with Nonlinear Least Squares (NLS)

    Estimate best-fit variable levels with nonlinear least squares (NLS).

    Args:
        model (gr.Model): Model to analyze. All model variables
            selected for fitting must be bounded or random. Deterministic
            variables may have semi-infinite bounds.
        df_data (DataFrame): Data for estimating parameters. Variables not
            found in df_data optimized in fitting.
        out (list or None): Output contributions to consider in computing MSE.
            Assumed to be model.out if left as None.
        var_fix (list or None): Variables to fix to nominal levels. Note that
            variables with domain width zero will automatically be fixed.
        append (bool): Append metadata? (Initial guess, MSE, optimizer status)
        tol (float): Optimizer convergence tolerance
        maxiter (int): Optimizer maximum iterations
        nrestart (int): Number of restarts; beyond nrestart=1 random
            restarts are used.

    Returns:
        DataFrame: Results of estimation

    Examples:
        >>> import grama as gr
        >>> from grama.data import df_trajectory_full
        >>> from grama.models import make_trajectory_linear
        >>>
        >>> md_trajectory = make_trajectory_linear()
        >>>
        >>> df_fit = (
        >>>     md_trajectory
        >>>     >> gr.ev_nls(df_data=df_trajectory_full)
        >>> )
        >>>
        >>> print(df_fit)

    """
    ## Check `out` invariants
    if out is None:
        out = model.out
        print("... eval_nls setting out = {}".format(out))
    set_diff = set(out).difference(set(df_data.columns))
    if len(set_diff) > 0:
        raise ValueError("out must be subset of df_data.columns\n" +
                         "difference = {}".format(set_diff))

    ## Determine variables to be fixed
    if var_fix is None:
        var_fix = set()
    else:
        var_fix = set(var_fix)
    for var in model.var_det:
        wid = model.domain.get_width(var)
        if wid == 0:
            var_fix.add(var)
    print("... eval_nls setting var_fix = {}".format(list(var_fix)))

    ## Determine variables for evaluation
    var_feat = set(model.var).intersection(set(df_data.columns))
    print("... eval_nls setting var_feat = {}".format(list(var_feat)))

    ## Determine variables for fitting
    var_fit = set(model.var).difference(var_fix.union(var_feat))
    if len(var_fit) == 0:
        raise ValueError("No var selected for fitting!\n" +
                         "Try checking model bounds and df_data.columns.")

    ## Separate var_fit into det and rand
    var_fit_det = list(set(model.var_det).intersection(var_fit))
    var_fit_rand = list(set(model.var_rand).intersection(var_fit))

    ## Construct bounds, fix var_fit order
    var_fit = var_fit_det + var_fit_rand
    bounds = []
    var_prob = []
    for var in var_fit_det:
        if not isfinite(model.domain.get_nominal(var)):
            var_prob.append(var)
        bounds.append(model.domain.get_bound(var))
    if len(var_prob) > 0:
        raise ValueError(
            "all variables to be fitted must finite nominal value\n" +
            "offending var = {}".format(var_prob))

    for var in var_fit_rand:
        bounds.append((
            model.density.marginals[var].q(0),
            model.density.marginals[var].q(1),
        ))

    ## Determine initial guess points
    df_nom = eval_nominal(model, df_det="nom", skip=True)

    df_init = df_nom[var_fit]
    if nrestart > 1:
        raise NotImplementedError()

    ## Iterate over initial guesses
    df_res = DataFrame()
    for i in range(df_init.shape[0]):
        x0 = df_init[var_fit].iloc[i].values

        ## Build evaluator
        def objective(x):
            """x = [var_fit]"""
            ## Evaluate model
            df_var = tran_outer(
                df_data[var_feat],
                concat(
                    (df_nom[var_fix].iloc[[0]],
                     df_make(**dict(zip(var_fit, x)))),
                    axis=1,
                ),
            )
            df_res = eval_df(model, df=df_var)

            ## Compute joint MSE
            return ((df_res[out].values - df_data[out].values)**2).mean()

        ## Run optimization
        res = minimize(
            objective,
            x0,
            args=(),
            method="SLSQP",
            jac=False,
            tol=tol,
            options={
                "maxiter": maxiter,
                "disp": False
            },
            bounds=bounds,
        )

        df_res = concat(
            (
                df_res,
                df_make(
                    **dict(zip(var_fit, res.x)),
                    **dict(zip(map(lambda s: s + "_0", var_fit), x0)),
                    status=res.status,
                    mse=res.fun,
                ),
            ),
            axis=0,
        )

    ## Post-process
    if append:
        return df_res
    else:
        return df_res[var_fit]
Exemple #13
0
def make_channel():
    r"""Make 1d channel model; dimensional form

    Instantiates a model for particle and fluid temperature rise; particles are suspended in a fluid with bulk velocity along a square cross-section channel. The walls of said channel are transparent, and radiation heats the particles as they travel down the channel.

    Note that this takes the same inputs as the builtin dataset `df_channel`.

    References:
        Banko, A.J. "RADIATION ABSORPTION BY INERTIAL PARTICLES IN A TURBULENT SQUARE DUCT FLOW" (2018) PhD Thesis, Stanford University, Chapter 2

    Examples:

    >>> import grama as gr
    >>> from grama.data import df_channel
    >>> from grama.models import make_channel
    >>> md_channel = make_channel()

    >>> (
    >>>     df_channel
    >>>     >> gr.tf_md(md_channel)

    >>>     >> gr.ggplot(gr.aes("T_f", "T_norm"))
    >>>     + gr.geom_abline(slope=1, intercept=0, linetype="dashed")
    >>>     + gr.geom_point()
    >>>     + gr.labs(x="1D Model", y="3D DNS")
    >>> )

    """
    md = (
        Model("1d Particle-laden Channel with Radiation; Dimensional Form") >>
        cp_vec_function(
            fun=lambda df: df_make(
                Re=df.U * df.H / df.nu_f,
                chi=df.cp_p / df.cp_f,
                Pr=df.nu_f / df.alpha_f,
                Phi_M=df.rho_p * 0.524 * df.d_p**3 * df.n / df.rho_f,
                tau_flow=df.L / df.U,
                tau_pt=(df.rho_p * df.cp_p * 0.318 * df.d_p) / df.h_p,
                tau_rad=(df.rho_p * df.cp_p * 0.667 * df.d_p * df.T_0) /
                (df.Q_abs * 0.78 * df.I_0),
            ),
            var=[
                "U",  # Fluid bulk velocity
                "H",  # Channel width
                "nu_f",  # Fluid kinematic viscosity
                "cp_p",  # Particle isobaric heat capacity
                "cp_f",  # Fluid isobaric heat capacity
                "alpha_f",  # Fluid thermal diffusivity
                "rho_p",  # Particle density
                "rho_f",  # Fluid density
                "d_p",  # Particle diameter
                "n",  # Particle number density
                "h_p",  # Particle-to-gas convection coefficient
                "T_0",  # Initial temperature
                "Q_abs",  # Particle radiation absorption coefficient
                "I_0",  # Incident radiation
            ],
            out=[
                "Re",  # Reynolds number
                "Pr",  # Prandtl number
                "chi",  # Particle-fluid heat capacity ratio
                "Phi_M",  # Mass Loading Ratio
                "tau_flow",  # Fluid residence time
                "tau_pt",  # Particle thermal time constant
                "tau_rad",  # Particle temperature doubling time (approximate)
            ],
            name="Dimensionless Numbers",
        ) >> cp_vec_function(
            fun=lambda df: df_make(
                ## Let xi = x / L
                xst=(df.xi * df.L) / df.H / df.Re / df.Pr,
                ## Assume an optically-thin scenario; I/I_0 = 1
                Is=df.Re * df.Pr * (df.H / df.L) *
                (df.tau_flow / df.tau_rad) * 1,
                beta=df.Re * df.Pr * (df.H / df.L) *
                (df.tau_flow / df.tau_pt) * (1 + df.Phi_M * df.chi),
            ),
            var=[
                "xi", "chi", "H", "L", "Phi_M", "tau_flow", "tau_rad", "tau_pt"
            ],
            out=[
                "xst",  # Flow-normalized channel axial location
                "Is",  # Normalized heat flux
                "beta",  # Spatial development coefficient
            ],
            name="Intermediate Dimensionless Numbers",
        ) >> cp_vec_function(
            fun=lambda df: df_make(
                T_f=(df.Phi_M * df.chi) / (1 + df.Phi_M * df.chi) *
                (df.Is * df.xst - df.Is / df.beta *
                 (1 - exp(-df.beta * df.xst))),
                T_p=1 / (1 + df.Phi_M * df.chi) *
                (df.Phi_M * df.chi * df.Is * df.xst + df.Is / df.beta *
                 (1 - exp(-df.beta * df.xst))),
            ),
            var=["xst", "Phi_M", "chi", "Is", "beta"],
            out=["T_f", "T_p"],
        ) >> cp_bounds(
            ## Normalized axial location; xi = x/L (-)
            xi=(0, 1), ) >> cp_marginals(
                ## Channel width (m)
                H={
                    "dist": "uniform",
                    "loc": 0.038,
                    "scale": 0.004
                },
                ## Channel length (m)
                L={
                    "dist": "uniform",
                    "loc": 0.152,
                    "scale": 0.016
                },
                ## Fluid bulk velocity (m/s)
                U={
                    "dist": "uniform",
                    "loc": 1,
                    "scale": 2.5
                },
                ## Fluid kinematic viscosity (m^2/s)
                nu_f={
                    "dist": "uniform",
                    "loc": 1.4e-5,
                    "scale": 0.1e-5
                },
                ## Particle isobaric heat capacity (J/(kg K))
                cp_p={
                    "dist": "uniform",
                    "loc": 100,
                    "scale": 900
                },
                ## Fluid isobaric heat capacity (J/(kg K))
                cp_f={
                    "dist": "uniform",
                    "loc": 1000,
                    "scale": 1000
                },
                ## Fluid thermal diffusivity (m^2/s)
                alpha_f={
                    "dist": "uniform",
                    "loc": 50e-6,
                    "scale": 50e-6
                },
                ## Particle density (kg / m^3)
                rho_p={
                    "dist": "uniform",
                    "loc": 1e3,
                    "scale": 9e3
                },
                ## Fluid density (kg / m^3)
                rho_f={
                    "dist": "uniform",
                    "loc": 0.5,
                    "scale": 1.0
                },
                ## Particle diameter (m)
                d_p={
                    "dist": "uniform",
                    "loc": 1e-6,
                    "scale": 9e-6
                },
                ## Particle number density (1 / m^3)
                n={
                    "dist": "uniform",
                    "loc": 9.5e9,
                    "scale": 1.0e9
                },
                ## Particle-to-gas convection coefficient (W / (m^2 K))
                h_p={
                    "dist": "uniform",
                    "loc": 1e3,
                    "scale": 9e3
                },
                ## Initial temperature (K)
                T_0={
                    "dist": "uniform",
                    "loc": 285,
                    "scale": 30
                },
                ## Particle radiation absorption coefficient (-)
                Q_abs={
                    "dist": "uniform",
                    "loc": 0.25,
                    "scale": 0.50
                },
                ## Incident radiation (W/m^2)
                I_0={
                    "dist": "uniform",
                    "loc": 9.5e6,
                    "scale": 1.0e6
                },
            ) >> cp_copula_independence())

    return md
Exemple #14
0
def function_area(df):
    return df_make(c_area=df.w * df.t)
Exemple #15
0
def eval_nls(
    model,
    df_data=None,
    out=None,
    var_fix=None,
    df_init=None,
    append=False,
    tol=1e-6,
    ftol=1e-9,
    gtol=1e-5,
    n_maxiter=100,
    n_restart=1,
    method="L-BFGS-B",
    seed=None,
    verbose=True,
):
    r"""Estimate with Nonlinear Least Squares (NLS)

    Estimate best-fit variable levels with nonlinear least squares (NLS).

    Args:
        model (gr.Model): Model to analyze. All model variables
            selected for fitting must be bounded or random. Deterministic
            variables may have semi-infinite bounds.
        df_data (DataFrame): Data for estimating parameters. Variables not
            found in df_data optimized in fitting.
        out (list or None): Output contributions to consider in computing MSE.
            Assumed to be model.out if left as None.
        var_fix (list or None): Variables to fix to nominal levels. Note that
            variables with domain width zero will automatically be fixed.
        df_init (DataFrame): Initial guesses for parameters; overrides n_restart
        append (bool): Append metadata? (Initial guess, MSE, optimizer status)
        tol (float): Optimizer convergence tolerance
        n_maxiter (int): Optimizer maximum iterations
        n_restart (int): Number of restarts; beyond n_restart=1 random
            restarts are used.
        seed (int OR None): Random seed for restarts
        verbose (bool): Print messages to console?

    Returns:
        DataFrame: Results of estimation

    Examples:
        >>> import grama as gr
        >>> from grama.data import df_trajectory_full
        >>> from grama.models import make_trajectory_linear
        >>>
        >>> md_trajectory = make_trajectory_linear()
        >>>
        >>> df_fit = (
        >>>     md_trajectory
        >>>     >> gr.ev_nls(df_data=df_trajectory_full)
        >>> )
        >>>
        >>> print(df_fit)

    """
    ## Check `out` invariants
    if out is None:
        out = model.out
        if verbose:
            print("... eval_nls setting out = {}".format(out))
    set_diff = set(out).difference(set(df_data.columns))
    if len(set_diff) > 0:
        raise ValueError("out must be subset of df_data.columns\n" +
                         "difference = {}".format(set_diff))

    ## Determine variables to be fixed
    if var_fix is None:
        var_fix = set()
    else:
        var_fix = set(var_fix)
    for var in model.var_det:
        wid = model.domain.get_width(var)
        if wid == 0:
            var_fix.add(var)
    if verbose:
        print("... eval_nls setting var_fix = {}".format(list(var_fix)))

    ## Determine variables for evaluation
    var_feat = set(model.var).intersection(set(df_data.columns))
    if verbose:
        print("... eval_nls setting var_feat = {}".format(list(var_feat)))

    ## Determine variables for fitting
    var_fit = set(model.var).difference(var_fix.union(var_feat))
    if len(var_fit) == 0:
        raise ValueError("No var selected for fitting!\n" +
                         "Try checking model bounds and df_data.columns.")

    ## Separate var_fit into det and rand
    var_fit_det = list(set(model.var_det).intersection(var_fit))
    var_fit_rand = list(set(model.var_rand).intersection(var_fit))

    ## Construct bounds, fix var_fit order
    var_fit = var_fit_det + var_fit_rand
    bounds = []
    var_prob = []
    for var in var_fit_det:
        if not isfinite(model.domain.get_nominal(var)):
            var_prob.append(var)
        bounds.append(model.domain.get_bound(var))
    if len(var_prob) > 0:
        raise ValueError(
            "all variables to be fitted must finite nominal value\n" +
            "offending var = {}".format(var_prob))

    for var in var_fit_rand:
        bounds.append((
            model.density.marginals[var].q(0),
            model.density.marginals[var].q(1),
        ))

    ## Determine initial guess points
    df_nom = eval_nominal(model, df_det="nom", skip=True)

    ## Use specified initial guess(es)
    if not (df_init is None):
        # Check invariants
        set_diff = set(var_fit).difference(set(df_init.columns))
        if len(set_diff) > 0:
            raise ValueError("var_fit must be subset of df_init.columns\n" +
                             "difference = {}".format(set_diff))
        # Pull n_restart
        n_restart = df_init.shape[0]

    ## Generate initial guess(es)
    else:

        df_init = df_nom[var_fit]
        if n_restart > 1:
            if not (seed is None):
                setseed(seed)
            ## Collect sweep-able deterministic variables
            var_sweep = list(
                filter(
                    lambda v: isfinite(model.domain.get_width(v))
                    & (model.domain.get_width(v) > 0),
                    model.var_det,
                ))
            ## Generate pseudo-marginals
            dicts_var = {}
            for v in var_sweep:
                dicts_var[v] = {
                    "dist": "uniform",
                    "loc": model.domain.get_bound(v)[0],
                    "scale": model.domain.get_width(v),
                }
            ## Overwrite model
            md_sweep = comp_marginals(model, **dicts_var)
            md_sweep = comp_copula_independence(md_sweep)
            ## Generate random start points
            df_rand = eval_monte_carlo(
                md_sweep,
                n=n_restart - 1,
                df_det="nom",
                skip=True,
            )
            df_init = concat((df_init, df_rand[var_fit]),
                             axis=0).reset_index(drop=True)

    ## Iterate over initial guesses
    df_res = DataFrame()
    for i in range(n_restart):
        x0 = df_init[var_fit].iloc[i].values

        ## Build evaluator
        def objective(x):
            """x = [var_fit]"""
            ## Evaluate model
            df_var = tran_outer(
                df_data[var_feat],
                concat(
                    (df_nom[var_fix].iloc[[0]],
                     df_make(**dict(zip(var_fit, x)))),
                    axis=1,
                ),
            )
            df_tmp = eval_df(model, df=df_var)

            ## Compute joint MSE
            return ((df_tmp[out].values - df_data[out].values)**2).mean()

        ## Run optimization
        res = minimize(
            objective,
            x0,
            args=(),
            method=method,
            jac=False,
            tol=tol,
            options={
                "maxiter": n_maxiter,
                "disp": False,
                "ftol": ftol,
                "gtol": gtol,
            },
            bounds=bounds,
        )

        ## Package results
        df_tmp = df_make(
            **dict(zip(var_fit, res.x)),
            **dict(zip(map(lambda s: s + "_0", var_fit), x0)),
        )
        df_tmp["success"] = [res.success]
        df_tmp["message"] = [res.message]
        df_tmp["n_iter"] = [res.nit]
        df_tmp["mse"] = [res.fun]

        df_res = concat(
            (
                df_res,
                df_tmp,
            ),
            axis=0,
        ).reset_index(drop=True)

    ## Post-process
    if append:
        return df_res
    else:
        return df_res[var_fit]
Exemple #16
0
def fun_y(df):
    v_inf = g * df.tau
    return df_make(
        y=df.tau * (df.v0 - v_inf) * (1 - exp(-df.t / df.tau)) + v_inf * df.t + y0
    )
Exemple #17
0
def function_stress(df):
    return df_make(g_stress=(df.Y - 600 * df.V / df.w / df.t**2 -
                             600 * df.H / df.w**2 / df.t) / MU_Y)
Exemple #18
0
def tlmc_1f1m(md, N0, eps):

    import numpy as np
    import grama as gr
    X = gr.Intention()

    md1f1m = gr.make_tlmc_model_1f1m()

    # Check that md is OK --> same # inputs/outputs
    # Check inputs
    try:
        r = md.functions[0].func(0, 0)
    except TypeError:
        print(
            'Input model must have 2 inputs: level and point at which to evaluate.'
        )

    # Check outputs
    r = md.functions[0].func(0, 0)
    if len(r) != 2:
        raise ValueError(
            'Level 0 function must have 2 outputs: result and cost.')
        r = md.functions[0].func(1, 0)
    if len(r) != 2:
        raise ValueError(
            'Level 1 function must have 2 outputs: result and cost.')

    # Check that md has 1 function
    if len(md.functions) != 1:
        raise ValueError('Input model must have 1 function.')

    # make sure N0 and eps are greater than 0
    if ((N0 <= 0) | (eps <= 0)):  # make sure N0 and eps are greater than 0
        raise ValueError('N0 and eps must be > 0.')

    its = 0  # initialize iteration counter

    Nlev = np.zeros((1, 2))  # samples taken per level (initialize)
    dNlev = np.array([[N0, N0]])  # samples left to take per level (initialize)
    Vlev = np.zeros((1, 2))  # variance per level (initialize)
    sumlev = np.zeros((2, 2))  # sample results per level (initialize)
    costlev = np.zeros((1, 2))  # total cost per level (initialize)

    while np.sum(dNlev) > 0:  # check if there are samples left to be evaluated
        for lev in range(2):
            if dNlev[
                    0,
                    lev] > 0:  # check if there are samples to be evaluated on level 'lev'
                df_mc_lev = md1f1m >> gr.ev_monte_carlo(
                    n=dNlev[0, lev], df_det=gr.df_make(level=lev))
                if lev > 0:
                    df_prev = df_mc_lev >> gr.tf_select(
                        gr.columns_between(
                            "x", "level")) >> gr.tf_mutate(level=X.level - 1)
                    df_mc_lev_prev = md1f1m >> gr.ev_df(df_prev)
                    Y = df_mc_lev.P - df_mc_lev_prev.P
                    C = sum(df_mc_lev.cost) + sum(df_mc_lev_prev.cost)
                else:
                    Y = df_mc_lev.P
                    C = sum(df_mc_lev.cost)

                cost = C
                sums = [sum(Y), sum(Y**2)]

                Nlev[0, lev] = Nlev[0, lev] + dNlev[
                    0, lev]  # update samples taken on level 'lev'
                sumlev[0, lev] = sumlev[0, lev] + sums[
                    0]  # update sample results on level 'lev'
                sumlev[1, lev] = sumlev[1, lev] + sums[
                    1]  # update sample results on level 'lev'
                costlev[0, lev] = costlev[
                    0, lev] + cost  # update total cost on level 'lev'

        mlev = np.abs(sumlev[0, :] / Nlev)  # expected value per level
        Vlev = np.maximum(
            0, (sumlev[1, :] / Nlev - mlev**2))  # variance per level
        Clev = costlev / Nlev  # cost per result per level

        mu = eps**(-2) * sum(np.sqrt(
            Vlev *
            Clev))  # Lagrange multiplier to minimize variance for a fixed cost
        Ns = np.ceil(
            mu * np.sqrt(Vlev / Clev))  # optimal number of samples per level
        dNlev = np.maximum(0,
                           Ns - Nlev)  # update samples left to take per level
        its += 1  # update counter

    P = np.sum(sumlev[0, :] / Nlev)  # evaluate two-level estimator
    return P, Nlev, Vlev, its
Exemple #19
0
def fun_x(df):
    return df_make(
        x=df.tau * df.u0 * (1 - exp(-df.t / df.tau)) + x0
    )