Beispiel #1
0
def E_cond(poly, freeze, dist, **kws):

    assert not dist.dependent()

    if poly.dim<len(dist):
        poly = po.setdim(poly, len(dist))

    freeze = po.Poly(freeze)
    freeze = po.setdim(freeze, len(dist))
    keys = freeze.A.keys()
    if len(keys)==1 and keys[0]==(0,)*len(dist):
        freeze = freeze.A.values()[0]
    else:
        freeze = np.array(keys)
    freeze = freeze.reshape(freeze.size/len(dist), len(dist))

    shape = poly.shape
    poly = po.flatten(poly)

    kmax = np.max(poly.keys, 0)+1
    keys = [i for i in np.ndindex(*kmax)]
    vals = dist.mom(np.array(keys).T, **kws).T
    mom = dict(zip(keys, vals))

    A = poly.A.copy()
    keys = A.keys()

    out = {}
    zeros = [0]*poly.dim
    for i in xrange(len(keys)):

        key = list(keys[i])
        a = A[tuple(key)]

        for d in xrange(poly.dim):
            for j in xrange(len(freeze)):
                if freeze[j,d]:
                    key[d], zeros[d] = zeros[d], key[d]
                    break

        tmp = a*mom[tuple(key)]
        if tuple(zeros) in out:
            out[tuple(zeros)] = out[tuple(zeros)] + tmp
        else:
            out[tuple(zeros)] = tmp

        for d in xrange(poly.dim):
            for j in xrange(len(freeze)):
                if freeze[j,d]:
                    key[d], zeros[d] = zeros[d], key[d]
                    break

    out = po.Poly(out, poly.dim, poly.shape, float)
    out = po.reshape(out, shape)

    return out
Beispiel #2
0
def fit_regression(P, x, u, rule="LS", retall=False, **kws):
    """
Fit a polynomial chaos expansion using linear regression.

Parameters
----------
P : Poly
    Polynomial chaos expansion with `P.shape=(M,)` and `P.dim=D`.
x : array_like
    Collocation nodes with `x.shape=(D,K)`.
u : array_like
    Model evaluations with `len(u)=K`.
retall : bool
    If True return uhat in addition to R
rule : str
    Regression method used.

    The follwong methods uses scikits-learn as backend.
    See `sklearn.linear_model` for more details.

    Key     Scikit-learn    Description
    ---     ------------    -----------
        Parameters      Description
        ----------      -----------

    "BARD"  ARDRegression   Bayesian ARD Regression
        n_iter=300      Maximum iterations
        tol=1e-3        Optimization tolerance
        alpha_1=1e-6    Gamma scale parameter
        alpha_2=1e-6    Gamma inverse scale parameter
        lambda_1=1e-6   Gamma shape parameter
        lambda_2=1e-6   Gamma inverse scale parameter
        threshold_lambda=1e-4   Upper pruning threshold

    "BR"    BayesianRidge   Bayesian Ridge Regression
        n_iter=300      Maximum iterations
        tol=1e-3        Optimization tolerance
        alpha_1=1e-6    Gamma scale parameter
        alpha_2=1e-6    Gamma inverse scale parameter
        lambda_1=1e-6   Gamma shape parameter
        lambda_2=1e-6   Gamma inverse scale parameter

    "EN"    ElastiNet       Elastic Net
        alpha=1.0       Dampening parameter
        rho             Mixing parameter in [0,1]
        max_iter=300    Maximum iterations
        tol             Optimization tolerance

    "ENC"   ElasticNetCV    EN w/Cross Validation
        rho             Dampening parameter(s)
        eps=1e-3        min(alpha)/max(alpha)
        n_alphas        Number of alphas
        alphas          List of alphas
        max_iter        Maximum iterations
        tol             Optimization tolerance
        cv=3            Cross validation folds

    "LA"    Lars            Least Angle Regression
        n_nonzero_coefs Number of non-zero coefficients
        eps             Cholesky regularization

    "LAC"   LarsCV          LAR w/Cross Validation
        max_iter        Maximum iterations
        cv=5            Cross validation folds
        max_n_alphas    Max points for residuals in cv

    "LAS"   Lasso           Least Absolute Shrinkage and
                            Selection Operator
        alpha=1.0       Dampening parameter
        max_iter        Maximum iterations
        tol             Optimization tolerance

    "LASC"  LassoCV         LAS w/Cross Validation
        eps=1e-3        min(alpha)/max(alpha)
        n_alphas        Number of alphas
        alphas          List of alphas
        max_iter        Maximum iterations
        tol             Optimization tolerance
        cv=3            Cross validation folds

    "LL"    LassoLars       Lasso and Lars model
        max_iter        Maximum iterations
        eps             Cholesky regularization

    "LLC"   LassoLarsCV     LL w/Cross Validation
        max_iter        Maximum iterations
        cv=5            Cross validation folds
        max_n_alphas    Max points for residuals in cv
        eps             Cholesky regularization

    "LLIC"  LassoLarsIC     LL w/AIC or BIC
        criterion       "AIC" or "BIC" criterion
        max_iter        Maximum iterations
        eps             Cholesky regularization

    "OMP"   OrthogonalMatchingPursuit
        n_nonzero_coefs Number of non-zero coefficients
        tol             Max residual norm (instead of non-zero coef)

    Local methods

    Key     Description
    ---     -----------
    "LS"    Ordenary Least Squares

    "T"     Ridge Regression/Tikhonov Regularization
        order           Order of regularization (or custom matrix)
        alpha           Dampning parameter (else estimated from gcv)

    "TC"    T w/Cross Validation
        order           Order of regularization (or custom matrix)
        alpha           Dampning parameter (else estimated from gcv)


Returns
-------
R[, uhat]

R : Poly
    Fitted polynomial with `R.shape=u.shape[1:]` and `R.dim=D`.
uhat : np.ndarray
    The Fourier coefficients in the estimation.

Examples
--------
>>> P = cp.Poly([1, x, y])
>>> s = [[-1,-1,1,1], [-1,1,-1,1]]
>>> u = [0,1,1,2]
>>> print fit_regression(P, s, u)
0.5q1+0.5q0+1.0

    """

    x = np.array(x)
    if len(x.shape) == 1:
        x = x.reshape(1, *x.shape)
    u = np.array(u)

    Q = P(*x).T
    shape = u.shape[1:]
    u = u.reshape(u.shape[0], int(np.prod(u.shape[1:])))

    rule = rule.upper()

    # Local rules
    if rule == "LS":
        uhat = la.lstsq(Q, u)[0].T

    elif rule == "T":
        uhat, alphas = rlstsq(Q, u, kws.get("order", 0),
                              kws.get("alpha", None), False, True)
        uhat = uhat.T

    elif rule == "TC":
        uhat = rlstsq(Q, u, kws.get("order", 0), kws.get("alpha", None), True)
        uhat = uhat.T

    else:

        # Scikit-learn wrapper
        try:
            _ = lm
        except:
            raise NotImplementedError("sklearn not installed")

        if rule == "BARD":
            solver = lm.ARDRegression(fit_intercept=False, copy_X=False, **kws)

        elif rule == "BR":
            kws["fit_intercept"] = kws.get("fit_intercept", False)
            solver = lm.BayesianRidge(**kws)

        elif rule == "EN":
            kws["fit_intercept"] = kws.get("fit_intercept", False)
            solver = lm.ElasticNet(**kws)

        elif rule == "ENC":
            kws["fit_intercept"] = kws.get("fit_intercept", False)
            solver = lm.ElasticNetCV(**kws)

        elif rule == "LA":  # success
            kws["fit_intercept"] = kws.get("fit_intercept", False)
            solver = lm.Lars(**kws)

        elif rule == "LAC":
            kws["fit_intercept"] = kws.get("fit_intercept", False)
            solver = lm.LarsCV(**kws)

        elif rule == "LAS":
            kws["fit_intercept"] = kws.get("fit_intercept", False)
            solver = lm.Lasso(**kws)

        elif rule == "LASC":
            kws["fit_intercept"] = kws.get("fit_intercept", False)
            solver = lm.LassoCV(**kws)

        elif rule == "LL":
            kws["fit_intercept"] = kws.get("fit_intercept", False)
            solver = lm.LassoLars(**kws)

        elif rule == "LLC":
            kws["fit_intercept"] = kws.get("fit_intercept", False)
            solver = lm.LassoLarsCV(**kws)

        elif rule == "LLIC":
            kws["fit_intercept"] = kws.get("fit_intercept", False)
            solver = lm.LassoLarsIC(**kws)

        elif rule == "OMP":
            solver = lm.OrthogonalMatchingPursuit(**kws)

        uhat = solver.fit(Q, u).coef_

    u = u.reshape(u.shape[0], *shape)

    R = po.sum((P * uhat), -1)
    R = po.reshape(R, shape)

    if retall == 1:
        return R, uhat
    elif retall == 2:
        if rule == "T":
            return R, uhat, Q, alphas
        return R, uhat, Q
    return R
Beispiel #3
0
def pcm_lr(func,
           order,
           dist_out,
           sample=None,
           dist_in=None,
           rule="H",
           orth=3,
           regression="LS",
           retall=False):
    """
Probabilistic Collocation Method using Linear Least Squares fit

Parameters
----------
Required arguemnts

func : callable
    The model to be approximated.  Must accept arguments on the
    form `z` is an 1-dimensional array with `len(z)==len(dist)`.
order : int
    The order of chaos expansion.
dist_out : Dist
    Distributions for models parameter.

Optional arguments

sample : int
    The order of the sample scheme to be used.
    If omited it defaults to 2*len(orth).
dist_in : Dist
    If included, space will be mapped using a Rosenblatt
    transformation from dist_out to dist_in before creating an
    expansin in terms of dist_in
rule:
    rule for generating samples, where d is the number of
    dimensions.

    Key     Name                Nested
    ----    ----------------    ------
    "K"     Korobov             no
    "R"     (Pseudo-)Random     no
    "L"     Latin hypercube     no
    "S"     Sobol               yes
    "H"     Halton              yes
    "M"     Hammersley          yes

orth : int, str, callable, Poly
    Orthogonal polynomial generation.

    int, str :
        orth will be passed to orth_select
        for selection of orthogonalization.
        See orth_select doc for more details.

    callable :
        the return of orth(M, dist) will be used.

    Poly :
        it will be used directly.
        It must be of length N+1=comb(M+D, M)
regression : str
    Linear regression method used.
    See fit_regression for more details.
retall : bool
    If True, return extra values.

#  Examples
#  --------
#
#  Define function:
#  >>> func = lambda z: -z[1]**2 + 0.1*z[0]
#
#  Define distribution:
#  >>> dist = cp.J(cp.Normal(), cp.Normal())
#
#  Perform pcm:
#  >>> q, x, y = cp.pcm_lr(func, 2, dist, retall=True)
#  >>> print cp.around(q, 10)
#  -q1^2+0.1q0
#  >>> print len(x.T)
#  12
    """

    if dist_in is None:
        dist = dist_out
    else:
        dist = dist_in

    # orthogonalization
    if orth is None:
        if dist.dependent():
            orth = "chol"
        else:
            orth = "ttr"
    if isinstance(orth, (str, int, long)):
        orth = orth_select(orth)
    if not isinstance(orth, po.Poly):
        orth = orth(order, dist)

    # sampling
    if sample is None:
        sample = 2 * len(orth)

    x = samplegen(sample, dist, rule)

    # Rosenblatt
    if not (dist_in is None):
        x = dist_out.ppf(dist_in.cdf(x))

    # evals
    y = np.array(map(func, x.T))
    shape = y.shape[1:]
    y = y.reshape(len(y), y.size / len(y))
    if sample == 0:
        y_ = y[:]
        R = orth * y
    else:
        R, y_ = fit_regression(orth, x, y, regression, retall=1)

    R = po.reshape(R, shape)

    if retall:
        return R, x, y
    return R
Beispiel #4
0
def fit_regression(P, x, u, rule="LS", retall=False, **kws):
    """
Fit a polynomial chaos expansion using linear regression.

Parameters
----------
P : Poly
    Polynomial chaos expansion with `P.shape=(M,)` and `P.dim=D`.
x : array_like
    Collocation nodes with `x.shape=(D,K)`.
u : array_like
    Model evaluations with `len(u)=K`.
retall : bool
    If True return uhat in addition to R
rule : str
    Regression method used.

    The follwong methods uses scikits-learn as backend.
    See `sklearn.linear_model` for more details.

    Key     Scikit-learn    Description
    ---     ------------    -----------
        Parameters      Description
        ----------      -----------

    "BARD"  ARDRegression   Bayesian ARD Regression
        n_iter=300      Maximum iterations
        tol=1e-3        Optimization tolerance
        alpha_1=1e-6    Gamma scale parameter
        alpha_2=1e-6    Gamma inverse scale parameter
        lambda_1=1e-6   Gamma shape parameter
        lambda_2=1e-6   Gamma inverse scale parameter
        threshold_lambda=1e-4   Upper pruning threshold

    "BR"    BayesianRidge   Bayesian Ridge Regression
        n_iter=300      Maximum iterations
        tol=1e-3        Optimization tolerance
        alpha_1=1e-6    Gamma scale parameter
        alpha_2=1e-6    Gamma inverse scale parameter
        lambda_1=1e-6   Gamma shape parameter
        lambda_2=1e-6   Gamma inverse scale parameter

    "EN"    ElastiNet       Elastic Net
        alpha=1.0       Dampening parameter
        rho             Mixing parameter in [0,1]
        max_iter=300    Maximum iterations
        tol             Optimization tolerance

    "ENC"   ElasticNetCV    EN w/Cross Validation
        rho             Dampening parameter(s)
        eps=1e-3        min(alpha)/max(alpha)
        n_alphas        Number of alphas
        alphas          List of alphas
        max_iter        Maximum iterations
        tol             Optimization tolerance
        cv=3            Cross validation folds

    "LA"    Lars            Least Angle Regression
        n_nonzero_coefs Number of non-zero coefficients
        eps             Cholesky regularization

    "LAC"   LarsCV          LAR w/Cross Validation
        max_iter        Maximum iterations
        cv=5            Cross validation folds
        max_n_alphas    Max points for residuals in cv

    "LAS"   Lasso           Least Absolute Shrinkage and
                            Selection Operator
        alpha=1.0       Dampening parameter
        max_iter        Maximum iterations
        tol             Optimization tolerance

    "LASC"  LassoCV         LAS w/Cross Validation
        eps=1e-3        min(alpha)/max(alpha)
        n_alphas        Number of alphas
        alphas          List of alphas
        max_iter        Maximum iterations
        tol             Optimization tolerance
        cv=3            Cross validation folds

    "LL"    LassoLars       Lasso and Lars model
        max_iter        Maximum iterations
        eps             Cholesky regularization

    "LLC"   LassoLarsCV     LL w/Cross Validation
        max_iter        Maximum iterations
        cv=5            Cross validation folds
        max_n_alphas    Max points for residuals in cv
        eps             Cholesky regularization

    "LLIC"  LassoLarsIC     LL w/AIC or BIC
        criterion       "AIC" or "BIC" criterion
        max_iter        Maximum iterations
        eps             Cholesky regularization

    "OMP"   OrthogonalMatchingPursuit
        n_nonzero_coefs Number of non-zero coefficients
        tol             Max residual norm (instead of non-zero coef)

    Local methods

    Key     Description
    ---     -----------
    "LS"    Ordenary Least Squares

    "T"     Ridge Regression/Tikhonov Regularization
        order           Order of regularization (or custom matrix)
        alpha           Dampning parameter (else estimated from gcv)

    "TC"    T w/Cross Validation
        order           Order of regularization (or custom matrix)
        alpha           Dampning parameter (else estimated from gcv)


Returns
-------
R[, uhat]

R : Poly
    Fitted polynomial with `R.shape=u.shape[1:]` and `R.dim=D`.
uhat : np.ndarray
    The Fourier coefficients in the estimation.

Examples
--------
>>> P = cp.Poly([1, x, y])
>>> x = [[-1,-1,1,1], [-1,1,-1,1]]
>>> u = [0,1,1,2]
>>> print fit_regression(P, x, u)
0.5q1+0.5q0+1.0

    """

    x = np.array(x)
    if len(x.shape)==1:
        x = x.reshape(1, *x.shape)
    u = np.array(u)

    Q = P(*x).T
    shape = u.shape[1:]
    u = u.reshape(u.shape[0], np.prod(u.shape[1:]))

    rule = rule.upper()

    # Local rules
    if rule=="LS":
        uhat = la.lstsq(Q, u)[0]

    elif rule=="T":
        uhat = rlstsq(Q, u, kws.get("order",0),
                kws.get("alpha", None), False)

    elif rule=="TC":
        uhat = rlstsq(Q, u, kws.get("order",0),
                kws.get("alpha", None), True)

    else:

        # Scikit-learn wrapper
        try:
            _ = lm
        except:
            raise NotImplementedError(
                    "sklearn not installed")

        if rule=="BARD":
            solver = lm.ARDRegression(fit_intercept=False,
                    copy_X=False, **kws)

        elif rule=="BR":
            kws["fit_intercept"] = kws.get("fit_intercept", False)
            solver = lm.BayesianRidge(**kws)

        elif rule=="EN":
            kws["fit_intercept"] = kws.get("fit_intercept", False)
            solver = lm.ElasticNet(**kws)

        elif rule=="ENC":
            kws["fit_intercept"] = kws.get("fit_intercept", False)
            solver = lm.ElasticNetCV(**kws)

        elif rule=="LA":
            kws["fit_intercept"] = kws.get("fit_intercept", False)
            solver = lm.Lars(**kws)

        elif rule=="LAC":
            kws["fit_intercept"] = kws.get("fit_intercept", False)
            solver = lm.LarsCV(**kws)

        elif rule=="LAS":
            kws["fit_intercept"] = kws.get("fit_intercept", False)
            solver = lm.Lasso(**kws)

        elif rule=="LASC":
            kws["fit_intercept"] = kws.get("fit_intercept", False)
            solver = lm.LassoCV(**kws)

        elif rule=="LL":
            kws["fit_intercept"] = kws.get("fit_intercept", False)
            solver = lm.LassoLars(**kws)

        elif rule=="LLC":
            kws["fit_intercept"] = kws.get("fit_intercept", False)
            solver = lm.LassoLarsCV(**kws)

        elif rule=="LLIC":
            kws["fit_intercept"] = kws.get("fit_intercept", False)
            solver = lm.LassoLarsIC(**kws)

        elif rule=="OMP":
            solver = lm.OrthogonalMatchingPursuit(**kws)

        uhat = solver.fit(Q, u).coef_

    u = u.reshape(u.shape[0], *shape)

    R = po.sum((P*uhat.T), -1)
    R = po.reshape(R, shape)

    if retall==1:
        return R, uhat
    elif retall==2:
        return R, uhat, Q
    return R
Beispiel #5
0
def pcm_lr(func, order, dist_out, sample=None,
        dist_in=None, rule="H",
        orth=3, regression="LS", retall=False):
    """
Probabilistic Collocation Method using Linear Least Squares fit

Parameters
----------
Required arguemnts

func : callable
    The model to be approximated.  Must accept arguments on the
    form `z` is an 1-dimensional array with `len(z)==len(dist)`.
order : int
    The order of chaos expansion.
dist_out : Dist
    Distributions for models parameter.

Optional arguments

sample : int
    The order of the sample scheme to be used.
    If omited it defaults to 2*len(orth).
dist_in : Dist
    If included, space will be mapped using a Rosenblatt
    transformation from dist_out to dist_in before creating an
    expansin in terms of dist_in
rule:
    rule for generating samples, where d is the number of
    dimensions.

    Key     Name                Nested
    ----    ----------------    ------
    "K"     Korobov             no
    "R"     (Pseudo-)Random     no
    "L"     Latin hypercube     no
    "S"     Sobol               yes
    "H"     Halton              yes
    "M"     Hammersley          yes

orth : int, str, callable, Poly
    Orthogonal polynomial generation.

    int, str :
        orth will be passed to orth_select
        for selection of orthogonalization.
        See orth_select doc for more details.

    callable :
        the return of orth(M, dist) will be used.

    Poly :
        it will be used directly.
        It must be of length N+1=comb(M+D, M)
regression : str
    Linear regression method used.
    See fit_regression for more details.
retall : bool
    If True, return extra values.

#  Examples
#  --------
#  
#  Define function:
#  >>> func = lambda z: -z[1]**2 + 0.1*z[0]
#  
#  Define distribution:
#  >>> dist = cp.J(cp.Normal(), cp.Normal())
#  
#  Perform pcm:
#  >>> q, x, y = cp.pcm_lr(func, 2, dist, retall=True)
#  >>> print cp.around(q, 10)
#  -q1^2+0.1q0
#  >>> print len(x.T)
#  12
    """

    if dist_in is None:
        dist = dist_out
    else:
        dist = dist_in

    # orthogonalization
    if orth is None:
        if dist.dependent():
            orth = "chol"
        else:
            orth = "ttr"
    if isinstance(orth, (str, int, long)):
        orth = orth_select(orth)
    if not isinstance(orth, po.Poly):
        orth = orth(order, dist)

    # sampling
    if sample is None:
        sample = 2*len(orth)

    x = samplegen(sample, dist, rule)


    # Rosenblatt
    if not (dist_in is None):
        x = dist_out.ppf(dist_in.cdf(x))

    # evals
    y = np.array(map(func, x.T))
    shape = y.shape[1:]
    y = y.reshape(len(y), y.size/len(y))
    if sample==0:
        y_ = y[:]
        R = orth * y
    else:
        R, y_ = fit_regression(orth, x, y, regression, retall=1)

    R = po.reshape(R, shape)

    if retall:
        return R, x, y
    return R