Esempio n. 1
0
def getUncertainty(parametric=True, initial=False, knowledge=False):

    perturbations = {
        'parametric': None,
        'initial': None,
        'knowledge': None,
    }

    if parametric:
        # Define Uncertainty Joint PDF
        CD = cp.Uniform(-0.10, 0.10)  # CD
        CL = cp.Uniform(-0.10, 0.10)  # CL
        rho0 = cp.Normal(0, 0.0333 * 1.5)  # rho0
        scaleHeight = cp.Uniform(-0.025, 0.01)  # scaleheight
        perturbations['parametric'] = cp.J(CD, CL, rho0, scaleHeight)

    if knowledge:
        pitch = cp.Normal(
            0, np.radians(1. / 3.)
        )  # Initial attitude uncertainty in angle of attack, 1-deg 3-sigma
        perturbations['knowledge'] = cp.J(pitch)

    if initial:
        V = cp.Uniform(-150, 150)  # Entry velocity deviation
        gamma = cp.Normal(0,
                          2.0 / 3.0)  # Entry FPA deviation, +- 2 deg 3-sigma
        perturbations['initial'] = cp.J(V, gamma)

    return perturbations
Esempio n. 2
0
def get_joint_uniform_distribution(dim, lower=0, upper=1):
    if dim == 1:
        return cp.Uniform(lower, upper)
    elif dim == 2:
        return cp.J(cp.Uniform(lower, upper), cp.Uniform(lower, upper))
    elif dim == 3:
        return cp.J(cp.Uniform(lower, upper), cp.Uniform(lower, upper),
                    cp.Uniform(lower, upper))
    elif dim == 4:
        return cp.J(cp.Uniform(lower, upper), cp.Uniform(lower, upper),
                    cp.Uniform(lower, upper), cp.Uniform(lower, upper))
    else:
        print("Wrong input dimension")
        return None
Esempio n. 3
0
    def test_SVD_equivalence(self):
        systemsize = 4
        eigen_decayrate = 2.0

        # Create Hadmard Quadratic object
        QoI = examples.HadamardQuadratic(systemsize, eigen_decayrate)

        # Create the joint distribution
        jdist = cp.J(cp.Uniform(-1,1),
                     cp.Uniform(-1,1),
                     cp.Uniform(-1,1),
                     cp.Uniform(-1,1))

        active_subspace_eigen = ActiveSubspace(QoI, n_dominant_dimensions=1,
                                               n_monte_carlo_samples=10000,
                                               use_svd=False, read_rv_samples=False,
                                               write_rv_samples=True)
        active_subspace_eigen.getDominantDirections(QoI, jdist)

        active_subspace_svd = ActiveSubspace(QoI, n_dominant_dimensions=1,
                                             n_monte_carlo_samples=10000,
                                             use_svd=True, read_rv_samples=True,
                                             write_rv_samples=False)
        active_subspace_svd.getDominantDirections(QoI, jdist)

        # Check the iso_eigenvals
        np.testing.assert_almost_equal(active_subspace_eigen.iso_eigenvals, active_subspace_svd.iso_eigenvals)
        # check the iso_eigenvecs
        self.assertTrue(active_subspace_eigen.iso_eigenvecs.shape, active_subspace_svd.iso_eigenvecs.shape)
        for i in range(active_subspace_eigen.iso_eigenvecs.shape[1]):
            arr1 = active_subspace_eigen.iso_eigenvecs[:,i]
            arr2 = active_subspace_svd.iso_eigenvecs[:,i]
            if np.allclose(arr1, arr2) == False:
                np.testing.assert_almost_equal(arr1, -arr2)
Esempio n. 4
0
    def __init__(self):
        #inzone perm [m^2]
        var0 = RandVar('normal', [np.log(1e-13), 0.5])
        #above zone perm [m^2]
        var1 = RandVar('normal', [np.log(5e-14), 0.5])
        #inzone phi [-]
        var2 = RandVar('uniform', [0.1, 0.2])
        #above zone phi [-]
        var3 = RandVar('uniform', [0.05, 0.3])
        #aquitard thickness [m]
        var4 = RandVar('uniform', [10, 30])
        #injection rate [Mt/yr]
        var5 = RandVar('uniform', [0.5, 5])
        self.varset = [var0, var1, var2, var3, var4, var5]

        self.Nvar = len(self.varset)

        self.jointDist = cp.J(
            cp.Normal(mu=var0.param[0], sigma=var0.param[1]),
            cp.Normal(mu=var1.param[0], sigma=var1.param[1]),
            cp.Uniform(lo=var2.param[0], up=var2.param[1]),
            cp.Uniform(lo=var3.param[0], up=var3.param[1]),
            cp.Uniform(lo=var4.param[0], up=var4.param[1]),
            cp.Uniform(lo=var5.param[0], up=var5.param[1]),
        )
Esempio n. 5
0
def ExpandBank():

    hep = HEPBankReducedSmooth

    t = np.linspace(0, 350, 3500)
    t1 = cp.Uniform(0, 150)
    t2 = cp.Uniform(100, 260)

    t1 = cp.Normal(70, 1)
    t2 = cp.Normal(115, 1)
    pdf = cp.J(t1, t2)
    polynomials = cp.orth_ttr(order=2, dist=pdf)  #No good for dependent
    # polynomials = cp.orth_bert(N=2,dist=pdf)
    # polynomials = cp.orth_gs(order=2,dist=pdf)
    # polynomials = cp.orth_chol(order=2,dist=pdf)

    if 1:
        nodes, weights = cp.generate_quadrature(order=2,
                                                domain=pdf,
                                                rule="Gaussian")
        # nodes, weights = cp.generate_quadrature(order=2, domain=pdf, rule="C")
        # nodes, weights = cp.generate_quadrature(order=9, domain=pdf, rule="L")
        print nodes.shape
        samples = np.array([hep(t, *node) for node in nodes.T])
        hepPCE = cp.fit_quadrature(polynomials, nodes, weights, samples)
    else:
        nodes = pdf.sample(10, 'S')
        samples = np.array([hep(t, *node) for node in nodes.T])
        hepPCE = cp.fit_regression(polynomials, nodes, samples, rule='T')
    return hepPCE
Esempio n. 6
0
def test_dependent_density():
    """Assert that manually create dependency structure holds."""
    distribution1 = chaospy.Exponential(1)
    distribution2 = chaospy.Uniform(lower=0, upper=distribution1)
    distribution = chaospy.J(distribution1, distribution2)
    assert distribution.pdf([0.5, 0.6]) == 0
    assert distribution.pdf([0.5, 0.4]) > 0
Esempio n. 7
0
def generate_distributions_n_dim(N_terms=4):
    # Set mean (column 0) and standard deviations (column 1) for each factor z. r factors=nr. rows
    # number of factors

    # zm = np.zeros((N_terms, 2))
    # zm[0, 1] = 1
    # zm[1, 1] = 2
    # zm[2, 1] = 3
    # zm[3, 1] = 4
    # wm = np.zeros_like(zm)
    # c = 0.5
    # wm[:, 0] = [i * c for i in range(1, N_terms + 1)]
    # wm[:, 1] = zm[:, 1].copy()  # the standard deviation is the same as for  zmax

    c = 0.5
    zm = np.array([[0., i] for i in range(1, N_terms + 1)])
    wm = np.array([[i * c, i] for i in range(1, N_terms + 1)])

    Z_pdfs = []  # list to hold probability density functions (pdfs) for Z and w
    w_pdfs = []
    for i in range(N_terms):
        Z_pdfs.append(cp.Normal(zm[i, 0], zm[i, 1]))
        w_pdfs.append(cp.Normal(wm[i, 0], wm[i, 1]))

    pdfs_list = Z_pdfs + w_pdfs
    jpdf = cp.J(*pdfs_list)  # *-operator to unpack the arguments out of a list or tuple

    return jpdf
def halton_sequence(w, h, n):
    distribution = chaospy.J(chaospy.Uniform(0, w), chaospy.Uniform(0, h))
    samples = distribution.sample(n, rule="halton")
    x_samples = samples[0] + np.random.uniform(0, 1, n)
    y_samples = samples[1] + np.random.uniform(0, 1, n)

    return np.clip(x_samples, 0, w), np.clip(y_samples, 0, h)
Esempio n. 9
0
    def test_normal_integration(self):
        #print("Calculating an expectation with an Integration Operation")
        d = 2
        bigvalue = 7.0
        a = np.array([-bigvalue, -bigvalue])
        b = np.array([bigvalue, bigvalue])

        distr = []
        for _ in range(d):
            distr.append(cp.Normal(0, 2))
        distr_joint = cp.J(*distr)
        f = FunctionMultilinear([2.0, 0.0])
        fw = FunctionCustom(
            lambda coords: f(coords)[0] * float(distr_joint.pdf(coords)))

        grid = GlobalBSplineGrid(a, b)
        op = Integration(fw, grid=grid, dim=d)

        error_operator = ErrorCalculatorSingleDimVolumeGuided()
        combiinstance = SpatiallyAdaptiveSingleDimensions2(a, b, operation=op)
        #print("performSpatiallyAdaptiv…")
        v = combiinstance.performSpatiallyAdaptiv(1,
                                                  2,
                                                  error_operator,
                                                  tol=10**-3,
                                                  max_evaluations=40,
                                                  min_evaluations=25,
                                                  do_plot=False,
                                                  print_output=False)
        integral = v[3][0]
Esempio n. 10
0
def test_gc_correlation_2d_force_calc():
    """Test for low dimensional special cases the results are accurate."""
    marginals, corr_desired = get_strategies("test_gc_correlation_2d_force_calc")

    rtol, atol = 0.01, 0.01
    precision = atol + rtol * abs(corr_desired[0, 1])

    candidates = dict()
    candidates["corr"], candidates["stat"] = list(), list()

    is_success = False
    for order in [5, 10, 15, 20]:
        if is_success:
            break

        kwargs = dict()
        kwargs["order"] = order

        corr_transformed = gc_correlation(
            marginals,
            corr_desired,
            **kwargs,
            force_calc=True,
        )
        copula = cp.Nataf(cp.J(*marginals), corr_transformed)
        corr_copula = np.corrcoef(copula.sample(10000000))
        candidates["stat"].append(np.abs(corr_desired[0, 1] - corr_copula[0, 1]))
        candidates["corr"].append(corr_copula)

        is_success = candidates["stat"][-1] < precision

    corr_copula = candidates["corr"][np.argmin(candidates["stat"])]
    np.testing.assert_allclose(corr_desired, corr_copula, rtol, atol)
Esempio n. 11
0
def KG(z, evls, pnts, gp, kernel, NSAMPS=30, DEG=3, sampling=False):

    # Find initial minimum value from GP model
    min_val = 1e100
    X_sample = pnts
    Y_sample = evls
    #for x0 in [np.random.uniform(XL, XU, size=DIM) for oo in range(20)]:
    x0 = np.random.uniform(XL, XU, size=DIM)
    res = mini(gp, x0=x0,
               bounds=[(XL, XU)
                       for ss in range(DIM)])  #, method='Nelder-Mead')
    #res = mini(expected_improvement, x0=x0[0], bounds=[(XL, XU) for ss in range(DIM)], args=(X_sample, Y_sample, gp))#, callback=callb)
    #   if res.fun < min_val:
    min_val = res.fun
    min_x = res.x

    # estimate min(f^{n+1}) with MC simulation
    MEAN = 0
    points = np.atleast_2d(np.append(X_sample, z)).T
    m, s = gp(z, return_std=True)
    distribution = cp.J(cp.Normal(0, s))
    samples = distribution.sample(NSAMPS, rule='Halton')
    PCEevals = []
    for pp in range(NSAMPS):

        # construct future GP, using z as the next point
        evals = np.append(evls, m + samples[pp])
        #evals = np.append(evls, m + np.random.normal(0, s))
        gpnxt = GaussianProcessRegressor(kernel=kernel,
                                         n_restarts_optimizer=35,
                                         random_state=98765,
                                         normalize_y=True)
        gpnxt.fit(points, evals)

        # convinience function
        def gpf_next(x, return_std=False):
            alph, astd = gpnxt.predict(np.atleast_2d(x), return_std=True)
            alph = alph[0]
            if return_std:
                return (alph, astd)
            else:
                return alph

        res = mini(gpf_next, x0=x0, bounds=[(XL, XU) for ss in range(DIM)])
        min_next_val = res.fun
        min_next_x = res.x

        #print('+++++++++ ', res.fun)
        #MEAN += min_next_val
        PCEevals.append(min_next_val)
    if not sampling:
        polynomial_expansion = cp.orth_ttr(DEG, distribution)
        foo_approx = cp.fit_regression(polynomial_expansion, samples, PCEevals)
        MEAN = cp.E(foo_approx, distribution)
    else:
        MEAN = np.mean(PCEevals)
    #print(PCEevals, '...', MEAN)
    #hey
    #MEAN /= NSAMPS
    return min_val - MEAN
Esempio n. 12
0
def testCuba():
    from cubature import cubature as cuba

    CD = cp.Uniform(-0.10, 0.10)  # CD
    CL = cp.Uniform(-0.10, 0.10)  # CL
    rho0 = cp.Normal(0, 0.0333)  # rho0
    scaleHeight = cp.Uniform(-0.05, 0.05)  # scaleheight
    pdf = cp.J(CD, CL, rho0, scaleHeight)

    def PDF(x, *args, **kwargs):
        return pdf.pdf(np.array(x).T)

    x0 = np.array([-0.10, -0.10, -0.5, -0.05])
    xf = np.array([0.10, 0.10, 0.5, 0.05])
    P, err = cuba(PDF,
                  ndim=4,
                  fdim=1,
                  xmin=x0,
                  xmax=xf,
                  vectorized=True,
                  adaptive='p')

    print "Multi-dimensional integral of the PDF over its support = {}".format(
        P[0])
    print "Total error in integration = {}".format(err[0])
Esempio n. 13
0
def test_dist_addition_wrappers():
    dists = [
        chaospy.J(chaospy.Normal(2), chaospy.Normal(2), chaospy.Normal(3)),  # ShiftScale
        chaospy.J(chaospy.Uniform(1, 3), chaospy.Uniform(1, 3), chaospy.Uniform(1, 5)),  # LowerUpper
        chaospy.MvNormal([2, 2, 3], numpy.eye(3)),  # MeanCovariance
    ]
    for dist in dists:
        joint = chaospy.Add([1, 1, 3], dist)

        assert numpy.allclose(joint.inv([0.5, 0.5, 0.5]), [3, 3, 6])
        assert numpy.allclose(joint.fwd([3, 3, 6]), [0.5, 0.5, 0.5])
        density = joint.pdf([2, 2, 2], decompose=True)
        assert numpy.isclose(density[0], density[1]), (dist, density)
        assert not numpy.isclose(density[0], density[2]), dist
        assert numpy.isclose(joint[0].inv([0.5]), 3)
        assert numpy.isclose(joint[0].fwd([3]), 0.5)
Esempio n. 14
0
    def __init__(self, vary, n_mc_samples, **kwargs):
        """

        Parameters
        ----------
        + vary : a dictionary of chaospy input distributions
        + n_mc_samples : the number of MC samples. The total number of MC samples
          required to compute the Sobol indices using a Saltelli sampling plan
          will be n_mc_samples * (n_params + 1), where n_params is the number of
          uncertain parameters in vary.

        Returns
        -------
        None.

        """
        super().__init__(vary=vary, max_num=n_mc_samples, **kwargs)
        # the number of uncertain inputs
        self.n_params = len(vary)
        # the number of MC samples, for each of the n_params + 2 input matrices
        self.n_mc_samples = n_mc_samples
        self.vary = Vary(vary)
        self.count = 0
        # joint distribution
        self.joint = cp.J(*list(vary.values()))
        # create the Saltelli sampling plan
        self.saltelli(n_mc_samples)
Esempio n. 15
0
def test_gc_correlation_functioning():
    """Test the function runs successfully."""
    marginals, corr = get_strategies("test_gc_correlation_functioning")
    corr_transformed = gc_correlation(marginals, corr)
    cp.Nataf(cp.J(*marginals), corr_transformed)

    return "the function ended without error"
Esempio n. 16
0
def test_approx_dist():
    dist = [normal()]
    for d in xrange(dim-1):
        dist.append(normal() + dist[-1])
    dist = cp.J(*dist)
    out = dist.sample(samples)
    out = dist.fwd(out)
    out = dist.inv(out)
Esempio n. 17
0
def test_truncation_lower_as_dist():
    """Ensure lower bound as a distribution is supported."""
    dist1 = chaospy.Normal()
    dist2 = chaospy.Trunc(chaospy.Normal(), lower=dist1)
    joint = chaospy.J(dist1, dist2)
    ref10 = (0.5 - dist1.fwd(-1)) / (1 - dist1.fwd(-1))
    assert numpy.allclose(joint.fwd([[-1, 0, 1], [0, 0, 0]]),
                          [dist1.fwd([-1, 0, 1]), [ref10, 0, 0]])
Esempio n. 18
0
def test_dependencies():
    """Assert that mu being a 2-D distribution is fine."""
    mu = chaospy.J(chaospy.Uniform(0, 1), chaospy.Uniform(1, 2))
    dist = chaospy.MvNormal(mu=mu, sigma=[[1, 0.5], [0.5, 1]])
    joint = chaospy.J(mu, dist)

    samples = joint.sample(100000)
    mean = numpy.array([1 / 2., 3 / 2., 1 / 2., 3 / 2.])
    covariance = numpy.array([[1 / 12., 0., 1 / 12., 0.],
                              [0., 1 / 12., 0, 1 / 12.],
                              [1 / 12., 0., 13 / 12., 1 / 2.],
                              [0., 1 / 12., 1 / 2., 13 / 12.]])
    assert numpy.allclose(numpy.mean(samples, axis=-1),
                          mean,
                          rtol=1e-2,
                          atol=1e-2)
    assert numpy.allclose(numpy.cov(samples), covariance, rtol=1e-2, atol=1e-2)
Esempio n. 19
0
def test_constant_expected():
    """Test if polynomial constant behave as expected."""
    distribution = chaospy.J(chaospy.Uniform(-1.2, 1.2),
                             chaospy.Uniform(-2.0, 2.0))
    const = chaospy.polynomial(7.)
    assert chaospy.E(const, distribution[0]) == const
    assert chaospy.E(const, distribution) == const
    assert chaospy.Var(const, distribution) == 0.
Esempio n. 20
0
def test_operator_slicing():
    dists = [
        chaospy.J(chaospy.Normal(2), chaospy.Normal(2),
                  chaospy.Normal(3)),  # ShiftScale
        chaospy.J(chaospy.Uniform(1, 3), chaospy.Uniform(1, 3),
                  chaospy.Uniform(1, 5)),  # LowerUpper
        chaospy.MvNormal([2, 2, 3], numpy.eye(3)),  # MeanCovariance
    ]
    for dist in dists:

        assert numpy.allclose(dist.inv([0.5, 0.5, 0.5]), [2, 2, 3])
        assert numpy.allclose(dist.fwd([2, 2, 3]), [0.5, 0.5, 0.5])
        density = dist.pdf([2, 2, 2], decompose=True)
        assert numpy.isclose(density[0], density[1]), dist
        assert not numpy.isclose(density[0], density[2]), dist
        assert numpy.isclose(dist[0].inv([0.5]), 2)
        assert numpy.isclose(dist[0].fwd([2]), 0.5)
Esempio n. 21
0
def test_truncation_upper_as_dist():
    """Ensure upper bound as a distribution is supported."""
    dist1 = chaospy.Normal()
    dist2 = chaospy.Trunc(chaospy.Normal(), upper=dist1)
    joint = chaospy.J(dist1, dist2)
    ref12 = 0.5 / dist1.fwd(1)
    assert numpy.allclose(joint.fwd([[-1, 0, 1], [0, 0, 0]]),
                          [dist1.fwd([-1, 0, 1]), [1, 1, ref12]])
Esempio n. 22
0
def test_quasimc():
    dist = [cp.Normal()]
    for d in range(dim-1):
        dist.append(cp.Normal(dist[-1]))
    dist = cp.J(*dist)
    dist.sample(samples, "H")
    dist.sample(samples, "M")
    dist.sample(samples, "S")
Esempio n. 23
0
def test_dist():
    dist = [cp.Normal()]
    for d in range(dim-1):
        dist.append(cp.Normal(dist[-1]))
    dist = cp.J(*dist)
    out = dist.sample(samples)
    out = dist.fwd(out)
    out = dist.inv(out)
Esempio n. 24
0
def plot_figures():
    """Plot figures for multivariate distribution section."""
    rc("figure", figsize=[8., 4.])
    rc("figure.subplot", left=.08, top=.95, right=.98)
    seed(1000)

    Q1 = cp.Gamma(2)
    Q2 = cp.Normal(0, Q1)
    Q = cp.J(Q1, Q2)
    #end

    subplot(121)
    s, t = meshgrid(linspace(0, 5, 200), linspace(-6, 6, 200))
    contourf(s, t, Q.pdf([s, t]), 50)
    xlabel("$q_1$")
    ylabel("$q_2$")
    subplot(122)
    Qr = Q.sample(500)
    scatter(*Qr)
    xlabel("$Q_1$")
    ylabel("$Q_2$")
    axis([0, 5, -6, 6])

    savefig("mv1.png")
    clf()

    Q2 = cp.Gamma(1)
    Q1 = cp.Normal(Q2**2, Q2 + 1)
    Q = cp.J(Q1, Q2)
    #end

    subplot(121)
    s, t = meshgrid(linspace(-4, 7, 200), linspace(0, 3, 200))
    contourf(s, t, Q.pdf([s, t]), 30)
    xlabel("$q_1$")
    ylabel("$q_2$")
    subplot(122)
    Qr = Q.sample(500)
    scatter(*Qr)
    xlabel("$Q_1$")
    ylabel("$Q_2$")
    axis([-4, 7, 0, 3])

    savefig("mv2.png")
    clf()
Esempio n. 25
0
def EHI(x,
        gp1,
        gp2,
        xi=0.,
        x2=None,
        MD=None,
        NSAMPS=200,
        PCE=False,
        ORDER=2,
        PAR_RES=100):

    mu1, std1 = gp1(x, return_std=True)
    mu2, std2 = gp2(x, return_std=True)

    a, b, c = parEI(gp1, gp2, x2, '', EI=False, MD=MD, PAR_RES=PAR_RES)
    par = b.T[c, :]
    par += xi
    MEAN = 0  # running sum for observed hypervolume improvement
    if not PCE:  # Monte Carlo Sampling
        for ii in range(NSAMPS):

            # add new point to Pareto Front
            evl = [np.random.normal(mu1, std1), np.random.normal(mu2, std2)]
            pears = np.append(par.T, evl, 1).T
            idx = is_pareto_efficient_simple(pears)
            newPar = pears[idx, :]

            # check if Pareto front improvemed from this point
            if idx[-1]:
                MEAN += H(newPar) - H(par)

        return (MEAN / NSAMPS)
    else:
        # Polynomial Chaos
        # (assumes 2 objective functions)
        distribution = cp.J(cp.Normal(0, std1), cp.Normal(0, std2))

        # sparse grid samples
        samples = distribution.sample(NSAMPS, rule='Halton')
        PCEevals = []
        for pp in range(NSAMPS):

            # add new point to Pareto Front
            evl = [np.random.normal(mu1, std1), np.random.normal(mu2, std2)]
            pears = np.append(par.T, evl, 1).T
            idx = is_pareto_efficient_simple(pears)
            newPar = pears[idx, :]

            # check if Pareto front improvemes
            if idx[-1]:
                PCEevals.append(H(newPar) - H(par))
            else:
                PCEevals.append(0)
        polynomial_expansion = cp.orth_ttr(ORDER, distribution)
        foo_approx = cp.fit_regression(polynomial_expansion, samples, PCEevals)
        MEAN = cp.E(foo_approx, distribution)
        return (MEAN)
Esempio n. 26
0
def generate_distributions(zm, wm=None):
    # define marginal distributions
    if wm is not None:
        zm = np.append(zm, wm, axis=0)
    marginal_distributions = [cp.Normal(*mu_sig) for mu_sig in zm]
    # define joint distributions
    jpdf = cp.J(*marginal_distributions)

    return jpdf
Esempio n. 27
0
    def custom_distribution(**kws):

        prm = defaults.copy()
        prm.update(kws)
        dist = ConstructedDistribution(**prm)

        for key, function in kwargs.items():
            attr_name = LEGAL_ATTRS[key]
            setattr(dist, attr_name, types.MethodType(function, dist))
        return chaospy.J(dist)
Esempio n. 28
0
def test_gc_correlation_2d():
    """Test the results from the paper are accurate."""
    marginals, corr_desired = get_strategies("test_gc_correlation_2d")
    rtol, atol = 0.01, 0.01

    corr_transformed = gc_correlation(marginals, corr_desired)
    copula = cp.Nataf(cp.J(*marginals), corr_transformed)
    corr_copula = np.corrcoef(copula.sample(10000000))

    np.testing.assert_allclose(corr_desired, corr_copula, rtol, atol)
def test_2():
    """Return test 2."""
    p_gc_legendre_two = partial(quadrature_gauss_legendre_two, a=0, b=1)

    approaches = []
    approaches += [monte_carlo_naive_two_dimensions, monte_carlo_quasi_two_dimensions]
    approaches += [p_gc_legendre_two]
    distribution = cp.J(cp.Uniform(0, 1), cp.Uniform(0, 1))
    for approach in approaches:
        np.testing.assert_almost_equal(approach(distribution.pdf), 1.0)
Esempio n. 30
0
    def test_on_exponential(self):
        systemsize = 2
        QoI = examples.Exp_07xp03y(systemsize)

        jdist = cp.J(cp.Uniform(-1,1),
                     cp.Uniform(-1,1))

        active_subspace = ActiveSubspace(QoI, n_dominant_dimensions=1, n_monte_carlo_samples=10000)
        active_subspace.getDominantDirections(QoI, jdist)

        expected_W1 = QoI.a / np.linalg.norm(QoI.a)
        np.testing.assert_almost_equal(active_subspace.dominant_dir[:,0], expected_W1)