示例#1
0
def ExpandBank():

    hep = HEPBankReducedSmooth

    t = np.linspace(0, 350, 3500)
    t1 = cp.Uniform(0, 150)
    t2 = cp.Uniform(100, 260)

    t1 = cp.Normal(70, 1)
    t2 = cp.Normal(115, 1)
    pdf = cp.J(t1, t2)
    polynomials = cp.orth_ttr(order=2, dist=pdf)  #No good for dependent
    # polynomials = cp.orth_bert(N=2,dist=pdf)
    # polynomials = cp.orth_gs(order=2,dist=pdf)
    # polynomials = cp.orth_chol(order=2,dist=pdf)

    if 1:
        nodes, weights = cp.generate_quadrature(order=2,
                                                domain=pdf,
                                                rule="Gaussian")
        # nodes, weights = cp.generate_quadrature(order=2, domain=pdf, rule="C")
        # nodes, weights = cp.generate_quadrature(order=9, domain=pdf, rule="L")
        print nodes.shape
        samples = np.array([hep(t, *node) for node in nodes.T])
        hepPCE = cp.fit_quadrature(polynomials, nodes, weights, samples)
    else:
        nodes = pdf.sample(10, 'S')
        samples = np.array([hep(t, *node) for node in nodes.T])
        hepPCE = cp.fit_regression(polynomials, nodes, samples, rule='T')
    return hepPCE
示例#2
0
def getUncertainty(parametric=True, initial=False, knowledge=False):

    perturbations = {
        'parametric': None,
        'initial': None,
        'knowledge': None,
    }

    if parametric:
        # Define Uncertainty Joint PDF
        CD = cp.Uniform(-0.10, 0.10)  # CD
        CL = cp.Uniform(-0.10, 0.10)  # CL
        rho0 = cp.Normal(0, 0.0333 * 1.5)  # rho0
        scaleHeight = cp.Uniform(-0.025, 0.01)  # scaleheight
        perturbations['parametric'] = cp.J(CD, CL, rho0, scaleHeight)

    if knowledge:
        pitch = cp.Normal(
            0, np.radians(1. / 3.)
        )  # Initial attitude uncertainty in angle of attack, 1-deg 3-sigma
        perturbations['knowledge'] = cp.J(pitch)

    if initial:
        V = cp.Uniform(-150, 150)  # Entry velocity deviation
        gamma = cp.Normal(0,
                          2.0 / 3.0)  # Entry FPA deviation, +- 2 deg 3-sigma
        perturbations['initial'] = cp.J(V, gamma)

    return perturbations
示例#3
0
def generate_distributions_n_dim(N_terms=4):
    # Set mean (column 0) and standard deviations (column 1) for each factor z. r factors=nr. rows
    # number of factors

    # zm = np.zeros((N_terms, 2))
    # zm[0, 1] = 1
    # zm[1, 1] = 2
    # zm[2, 1] = 3
    # zm[3, 1] = 4
    # wm = np.zeros_like(zm)
    # c = 0.5
    # wm[:, 0] = [i * c for i in range(1, N_terms + 1)]
    # wm[:, 1] = zm[:, 1].copy()  # the standard deviation is the same as for  zmax

    c = 0.5
    zm = np.array([[0., i] for i in range(1, N_terms + 1)])
    wm = np.array([[i * c, i] for i in range(1, N_terms + 1)])

    Z_pdfs = []  # list to hold probability density functions (pdfs) for Z and w
    w_pdfs = []
    for i in range(N_terms):
        Z_pdfs.append(cp.Normal(zm[i, 0], zm[i, 1]))
        w_pdfs.append(cp.Normal(wm[i, 0], wm[i, 1]))

    pdfs_list = Z_pdfs + w_pdfs
    jpdf = cp.J(*pdfs_list)  # *-operator to unpack the arguments out of a list or tuple

    return jpdf
示例#4
0
    def __init__(self):
        #inzone perm [m^2]
        var0 = RandVar('normal', [np.log(1e-13), 0.5])
        #above zone perm [m^2]
        var1 = RandVar('normal', [np.log(5e-14), 0.5])
        #inzone phi [-]
        var2 = RandVar('uniform', [0.1, 0.2])
        #above zone phi [-]
        var3 = RandVar('uniform', [0.05, 0.3])
        #aquitard thickness [m]
        var4 = RandVar('uniform', [10, 30])
        #injection rate [Mt/yr]
        var5 = RandVar('uniform', [0.5, 5])
        self.varset = [var0, var1, var2, var3, var4, var5]

        self.Nvar = len(self.varset)

        self.jointDist = cp.J(
            cp.Normal(mu=var0.param[0], sigma=var0.param[1]),
            cp.Normal(mu=var1.param[0], sigma=var1.param[1]),
            cp.Uniform(lo=var2.param[0], up=var2.param[1]),
            cp.Uniform(lo=var3.param[0], up=var3.param[1]),
            cp.Uniform(lo=var4.param[0], up=var4.param[1]),
            cp.Uniform(lo=var5.param[0], up=var5.param[1]),
        )
示例#5
0
    def test_ordered(self):
        parameter_list = [["gbar_Na", 120,
                           cp.Uniform(110, 130)],
                          ["gbar_K", 36, cp.Normal(36, 1)],
                          ["gbar_L", 0.3, cp.Chi(1, 1, 0.3)]]

        parameters = Parameters(parameter_list)

        uncertain_parameters = parameters.get_from_uncertain("name")
        self.assertEqual(uncertain_parameters, ["gbar_Na", "gbar_K", "gbar_L"])

        uncertain_parameters = parameters.get("name")
        self.assertEqual(uncertain_parameters, ["gbar_Na", "gbar_K", "gbar_L"])

        parameter_list = [["gbar_K", 36, cp.Normal(36, 1)],
                          ["gbar_Na", 120,
                           cp.Uniform(110, 130)],
                          ["gbar_L", 0.3, cp.Chi(1, 1, 0.3)]]

        parameters = Parameters(parameter_list)

        uncertain_parameters = parameters.get_from_uncertain("name")
        self.assertEqual(uncertain_parameters, ["gbar_K", "gbar_Na", "gbar_L"])

        uncertain_parameters = parameters.get("name")
        self.assertEqual(uncertain_parameters, ["gbar_K", "gbar_Na", "gbar_L"])

        uncertain_parameters = parameters.get("value")
        self.assertEqual(uncertain_parameters, [36, 120, 0.3])
示例#6
0
def test_dist():
    dist = [cp.Normal()]
    for d in range(dim-1):
        dist.append(cp.Normal(dist[-1]))
    dist = cp.J(*dist)
    out = dist.sample(samples)
    out = dist.fwd(out)
    out = dist.inv(out)
示例#7
0
def test_truncation_lower_as_dist():
    """Ensure lower bound as a distribution is supported."""
    dist1 = chaospy.Normal()
    dist2 = chaospy.Trunc(chaospy.Normal(), lower=dist1)
    joint = chaospy.J(dist1, dist2)
    ref10 = (0.5 - dist1.fwd(-1)) / (1 - dist1.fwd(-1))
    assert numpy.allclose(joint.fwd([[-1, 0, 1], [0, 0, 0]]),
                          [dist1.fwd([-1, 0, 1]), [ref10, 0, 0]])
示例#8
0
def test_quasimc():
    dist = [cp.Normal()]
    for d in range(dim-1):
        dist.append(cp.Normal(dist[-1]))
    dist = cp.J(*dist)
    dist.sample(samples, "H")
    dist.sample(samples, "M")
    dist.sample(samples, "S")
示例#9
0
def test_truncation_upper_as_dist():
    """Ensure upper bound as a distribution is supported."""
    dist1 = chaospy.Normal()
    dist2 = chaospy.Trunc(chaospy.Normal(), upper=dist1)
    joint = chaospy.J(dist1, dist2)
    ref12 = 0.5 / dist1.fwd(1)
    assert numpy.allclose(joint.fwd([[-1, 0, 1], [0, 0, 0]]),
                          [dist1.fwd([-1, 0, 1]), [1, 1, ref12]])
示例#10
0
def EHI(x,
        gp1,
        gp2,
        xi=0.,
        x2=None,
        MD=None,
        NSAMPS=200,
        PCE=False,
        ORDER=2,
        PAR_RES=100):

    mu1, std1 = gp1(x, return_std=True)
    mu2, std2 = gp2(x, return_std=True)

    a, b, c = parEI(gp1, gp2, x2, '', EI=False, MD=MD, PAR_RES=PAR_RES)
    par = b.T[c, :]
    par += xi
    MEAN = 0  # running sum for observed hypervolume improvement
    if not PCE:  # Monte Carlo Sampling
        for ii in range(NSAMPS):

            # add new point to Pareto Front
            evl = [np.random.normal(mu1, std1), np.random.normal(mu2, std2)]
            pears = np.append(par.T, evl, 1).T
            idx = is_pareto_efficient_simple(pears)
            newPar = pears[idx, :]

            # check if Pareto front improvemed from this point
            if idx[-1]:
                MEAN += H(newPar) - H(par)

        return (MEAN / NSAMPS)
    else:
        # Polynomial Chaos
        # (assumes 2 objective functions)
        distribution = cp.J(cp.Normal(0, std1), cp.Normal(0, std2))

        # sparse grid samples
        samples = distribution.sample(NSAMPS, rule='Halton')
        PCEevals = []
        for pp in range(NSAMPS):

            # add new point to Pareto Front
            evl = [np.random.normal(mu1, std1), np.random.normal(mu2, std2)]
            pears = np.append(par.T, evl, 1).T
            idx = is_pareto_efficient_simple(pears)
            newPar = pears[idx, :]

            # check if Pareto front improvemes
            if idx[-1]:
                PCEevals.append(H(newPar) - H(par))
            else:
                PCEevals.append(0)
        polynomial_expansion = cp.orth_ttr(ORDER, distribution)
        foo_approx = cp.fit_regression(polynomial_expansion, samples, PCEevals)
        MEAN = cp.E(foo_approx, distribution)
        return (MEAN)
示例#11
0
def test_truncation_both_as_dist():
    """Ensure that lower and upper bound combo is supported."""
    dist1 = chaospy.Normal()
    dist2 = chaospy.Normal()
    dist3 = chaospy.Trunc(chaospy.Normal(), lower=dist1, upper=dist2)
    joint = chaospy.J(dist1, dist2, dist3)
    ref21 = (0.5 - dist1.fwd(-1)) / (1 - dist1.fwd(-1)) / dist2.fwd(1)
    assert numpy.allclose(
        joint.fwd([[-1, -1, 1, 1], [-1, 1, -1, 1], [0, 0, 0, 0]]), [
            dist1.fwd([-1, -1, 1, 1]),
            dist2.fwd([-1, 1, -1, 1]), [1, ref21, 1, 0]
        ])
示例#12
0
def test_validate_similarity_wasserstein():
    validator = uq.comparison.validate.ValidateSimilarityWasserstein()
    assert(validator.element_name() == 'validate_similarity_wasserstein')
    assert(validator.element_version() == '0.1')
    d1 = cp.Normal(0, 1)
    d2 = cp.Normal(1, 2)
    xmin = min(d1.lower[0], d2.lower[0])
    xmax = max(d1.upper[0], d2.upper[0])
    x = np.linspace(xmin, xmax, 100)
    p1 = (xmax - xmin) * d1.cdf(x)
    p2 = (xmax - xmin) * d2.cdf(x)
    distance = validator.compare(p1, p2)
    assert distance >= 0.0
示例#13
0
def test_validate_similarity_jensen_shannon():
    validator = uq.comparison.validate.ValidateSimilarityJensenShannon()
    assert(validator.element_name() == 'validate_similarity_jensen_shannon')
    assert(validator.element_version() == '0.1')
    d1 = cp.Normal(0, 1)
    d2 = cp.Normal(1, 2)
    xmin = min(d1.lower[0], d2.lower[0])
    xmax = max(d1.upper[0], d2.upper[0])
    x = np.linspace(xmin, xmax, 100)
    p1 = d1.pdf(x)
    p2 = d2.pdf(x)
    distance = validator.compare(p1, p2)
    assert distance >= 0.0 and distance <= 1.0
示例#14
0
def test_trucation_multivariate():
    """Ensure that multivariate bounds works as expected."""
    dist1 = chaospy.Iid(chaospy.Normal(), 2)
    dist2 = chaospy.Trunc(chaospy.Iid(chaospy.Normal(), 2),
                          lower=dist1,
                          upper=[1, 1])
    joint = chaospy.J(dist1, dist2)
    assert numpy.allclose(
        joint.fwd([[-1, -1, -1, -1], [-1, -1, -1, -1], [0, 0, -2, 2],
                   [-2, 2, 0, 0]]),
        [[0.15865525, 0.15865525, 0.15865525, 0.15865525],
         [0.15865525, 0.15865525, 0.15865525, 0.15865525],
         [0.48222003, 0.48222003, 0., 1.], [0., 1., 0.48222003, 0.48222003]],
    )
示例#15
0
def test_sc(tmpdir, campaign):
    params = {
        "Pe": {
            "type": "float",
            "min": "1.0",
            "max": "2000.0",
            "default": "100.0"
        },
        "f": {
            "type": "float",
            "min": "0.0",
            "max": "10.0",
            "default": "1.0"
        },
        "out_file": {
            "type": "string",
            "default": "output.csv"
        }
    }
    output_filename = params["out_file"]["default"]
    output_columns = ["u"]
    encoder = uq.encoders.GenericEncoder(
        template_fname=f'tests/sc/sc.template',
        delimiter='$',
        target_filename='sc_in.json')
    decoder = uq.decoders.SimpleCSV(target_filename=output_filename,
                                    output_columns=output_columns,
                                    header=0)
    collater = uq.collate.AggregateSamples(average=False)
    vary = {"Pe": cp.Uniform(100.0, 200.0), "f": cp.Normal(1.0, 0.1)}
    sampler = uq.sampling.SCSampler(vary=vary, polynomial_order=1)
    actions = uq.actions.ExecuteLocal(f"tests/sc/sc_model.py sc_in.json")
    stats = uq.analysis.SCAnalysis(sampler=sampler, qoi_cols=output_columns)
    campaign(tmpdir, 'sc', 'sc', params, encoder, decoder, sampler, collater,
             actions, stats, vary, 0, 1)
示例#16
0
    def test_normal_integration(self):
        #print("Calculating an expectation with an Integration Operation")
        d = 2
        bigvalue = 7.0
        a = np.array([-bigvalue, -bigvalue])
        b = np.array([bigvalue, bigvalue])

        distr = []
        for _ in range(d):
            distr.append(cp.Normal(0, 2))
        distr_joint = cp.J(*distr)
        f = FunctionMultilinear([2.0, 0.0])
        fw = FunctionCustom(
            lambda coords: f(coords)[0] * float(distr_joint.pdf(coords)))

        grid = GlobalBSplineGrid(a, b)
        op = Integration(fw, grid=grid, dim=d)

        error_operator = ErrorCalculatorSingleDimVolumeGuided()
        combiinstance = SpatiallyAdaptiveSingleDimensions2(a, b, operation=op)
        #print("performSpatiallyAdaptiv…")
        v = combiinstance.performSpatiallyAdaptiv(1,
                                                  2,
                                                  error_operator,
                                                  tol=10**-3,
                                                  max_evaluations=40,
                                                  min_evaluations=25,
                                                  do_plot=False,
                                                  print_output=False)
        integral = v[3][0]
    def calcMarginals(self, jdist):
        """
        Compute the marginal density object for the dominant space. The current
        implementation is only for Gaussian distribution.
        """

        marginal_size = len(self.dominant_indices)
        orig_mean = cp.E(jdist)
        orig_covariance = cp.Cov(jdist)

        # Step 1: Rotate the mean & covariance matrix of the the original joint
        # distribution along the eigenve
        dominant_vecs = self.iso_eigenvecs[:, self.dominant_indices]
        marginal_mean = np.dot(dominant_vecs.T, orig_mean)
        marginal_covariance = np.matmul(
            dominant_vecs.T, np.matmul(orig_covariance, dominant_vecs))

        # Step 2: Create the new marginal distribution
        if marginal_size == 1:  # Univariate distributions have to be treated separately
            marginal_std_dev = np.sqrt(np.asscalar(marginal_covariance))
            self.marginal_distribution = cp.Normal(np.asscalar(marginal_mean),
                                                   marginal_std_dev)
        else:
            self.marginal_distribution = cp.MvNormal(marginal_mean,
                                                     marginal_covariance)
示例#18
0
def test_integration():
    dist = cp.Iid(cp.Normal(), dim)
    orth, norms = cp.orth_ttr(order, dist, retall=1)
    gq = cp.generate_quadrature
    nodes, weights = gq(order, dist, rule="C")
    vals = np.zeros((len(weights), size))
    cp.fit_quadrature(orth, nodes, weights, vals, norms=norms)
示例#19
0
    def __init__(self, uq_systemsize):

        mean_v = 248.136  # Mean value of input random variable
        mean_alpha = 5  #
        mean_Ma = 0.84
        mean_re = 1.e6
        mean_rho = 0.38
        mean_cg = np.zeros((3))

        std_dev = np.diag([0.2, 0.01])  # np.diag([1.0, 0.2, 0.01, 1.e2, 0.01])
        rv_dict = {  # 'v' : mean_v,
            'alpha': mean_alpha,
            'Mach_number': mean_Ma,
            # 're' : mean_re,
            # 'rho' : mean_rho,
        }
        self.QoI = examples.OASAerodynamicWrapper(uq_systemsize, rv_dict)
        self.jdist = cp.Normal(self.QoI.rv_array, std_dev)
        self.dominant_space = DimensionReduction(
            n_arnoldi_sample=uq_systemsize + 1, exact_Hessian=False)
        self.dominant_space.getDominantDirections(self.QoI,
                                                  self.jdist,
                                                  max_eigenmodes=1)

        # print 'iso_eigenvals = ', self.dominant_space.iso_eigenvals
        # print 'iso_eigenvecs = ', '\n', self.dominant_space.iso_eigenvecs
        # print 'dominant_indices = ', self.dominant_space.dominant_indices
        # print 'ratio = ', abs(self.dominant_space.iso_eigenvals[0] / self.dominant_space.iso_eigenvals[1])
        print 'std_dev = ', cp.Std(self.jdist), '\n'
        print 'cov = ', cp.Cov(self.jdist), '\n'
示例#20
0
def KG(z, evls, pnts, gp, kernel, NSAMPS=30, DEG=3, sampling=False):

    # Find initial minimum value from GP model
    min_val = 1e100
    X_sample = pnts
    Y_sample = evls
    #for x0 in [np.random.uniform(XL, XU, size=DIM) for oo in range(20)]:
    x0 = np.random.uniform(XL, XU, size=DIM)
    res = mini(gp, x0=x0,
               bounds=[(XL, XU)
                       for ss in range(DIM)])  #, method='Nelder-Mead')
    #res = mini(expected_improvement, x0=x0[0], bounds=[(XL, XU) for ss in range(DIM)], args=(X_sample, Y_sample, gp))#, callback=callb)
    #   if res.fun < min_val:
    min_val = res.fun
    min_x = res.x

    # estimate min(f^{n+1}) with MC simulation
    MEAN = 0
    points = np.atleast_2d(np.append(X_sample, z)).T
    m, s = gp(z, return_std=True)
    distribution = cp.J(cp.Normal(0, s))
    samples = distribution.sample(NSAMPS, rule='Halton')
    PCEevals = []
    for pp in range(NSAMPS):

        # construct future GP, using z as the next point
        evals = np.append(evls, m + samples[pp])
        #evals = np.append(evls, m + np.random.normal(0, s))
        gpnxt = GaussianProcessRegressor(kernel=kernel,
                                         n_restarts_optimizer=35,
                                         random_state=98765,
                                         normalize_y=True)
        gpnxt.fit(points, evals)

        # convinience function
        def gpf_next(x, return_std=False):
            alph, astd = gpnxt.predict(np.atleast_2d(x), return_std=True)
            alph = alph[0]
            if return_std:
                return (alph, astd)
            else:
                return alph

        res = mini(gpf_next, x0=x0, bounds=[(XL, XU) for ss in range(DIM)])
        min_next_val = res.fun
        min_next_x = res.x

        #print('+++++++++ ', res.fun)
        #MEAN += min_next_val
        PCEevals.append(min_next_val)
    if not sampling:
        polynomial_expansion = cp.orth_ttr(DEG, distribution)
        foo_approx = cp.fit_regression(polynomial_expansion, samples, PCEevals)
        MEAN = cp.E(foo_approx, distribution)
    else:
        MEAN = np.mean(PCEevals)
    #print(PCEevals, '...', MEAN)
    #hey
    #MEAN /= NSAMPS
    return min_val - MEAN
示例#21
0
def testCuba():
    from cubature import cubature as cuba

    CD = cp.Uniform(-0.10, 0.10)  # CD
    CL = cp.Uniform(-0.10, 0.10)  # CL
    rho0 = cp.Normal(0, 0.0333)  # rho0
    scaleHeight = cp.Uniform(-0.05, 0.05)  # scaleheight
    pdf = cp.J(CD, CL, rho0, scaleHeight)

    def PDF(x, *args, **kwargs):
        return pdf.pdf(np.array(x).T)

    x0 = np.array([-0.10, -0.10, -0.5, -0.05])
    xf = np.array([0.10, 0.10, 0.5, 0.05])
    P, err = cuba(PDF,
                  ndim=4,
                  fdim=1,
                  xmin=x0,
                  xmax=xf,
                  vectorized=True,
                  adaptive='p')

    print "Multi-dimensional integral of the PDF over its support = {}".format(
        P[0])
    print "Total error in integration = {}".format(err[0])
示例#22
0
def test_dist_addition_wrappers():
    dists = [
        chaospy.J(chaospy.Normal(2), chaospy.Normal(2), chaospy.Normal(3)),  # ShiftScale
        chaospy.J(chaospy.Uniform(1, 3), chaospy.Uniform(1, 3), chaospy.Uniform(1, 5)),  # LowerUpper
        chaospy.MvNormal([2, 2, 3], numpy.eye(3)),  # MeanCovariance
    ]
    for dist in dists:
        joint = chaospy.Add([1, 1, 3], dist)

        assert numpy.allclose(joint.inv([0.5, 0.5, 0.5]), [3, 3, 6])
        assert numpy.allclose(joint.fwd([3, 3, 6]), [0.5, 0.5, 0.5])
        density = joint.pdf([2, 2, 2], decompose=True)
        assert numpy.isclose(density[0], density[1]), (dist, density)
        assert not numpy.isclose(density[0], density[2]), dist
        assert numpy.isclose(joint[0].inv([0.5]), 3)
        assert numpy.isclose(joint[0].fwd([3]), 0.5)
示例#23
0
def test_operator_slicing():
    dists = [
        chaospy.J(chaospy.Normal(2), chaospy.Normal(2),
                  chaospy.Normal(3)),  # ShiftScale
        chaospy.J(chaospy.Uniform(1, 3), chaospy.Uniform(1, 3),
                  chaospy.Uniform(1, 5)),  # LowerUpper
        chaospy.MvNormal([2, 2, 3], numpy.eye(3)),  # MeanCovariance
    ]
    for dist in dists:

        assert numpy.allclose(dist.inv([0.5, 0.5, 0.5]), [2, 2, 3])
        assert numpy.allclose(dist.fwd([2, 2, 3]), [0.5, 0.5, 0.5])
        density = dist.pdf([2, 2, 2], decompose=True)
        assert numpy.isclose(density[0], density[1]), dist
        assert not numpy.isclose(density[0], density[2]), dist
        assert numpy.isclose(dist[0].inv([0.5]), 2)
        assert numpy.isclose(dist[0].fwd([2]), 0.5)
示例#24
0
def test_regression():
    dist = cp.Iid(cp.Normal(), dim)
    orth, norms = cp.orth_ttr(order, dist, retall=1)
    data = dist.sample(samples)
    vals = np.zeros((samples, size))
    cp.fit_regression(orth, data, vals, "LS")
    cp.fit_regression(orth, data, vals, "T", order=0)
    cp.fit_regression(orth, data, vals, "TC", order=0)
示例#25
0
def plot_figures():
    """Plot figures for multivariate distribution section."""
    rc("figure", figsize=[8., 4.])
    rc("figure.subplot", left=.08, top=.95, right=.98)
    seed(1000)

    Q1 = cp.Gamma(2)
    Q2 = cp.Normal(0, Q1)
    Q = cp.J(Q1, Q2)
    #end

    subplot(121)
    s, t = meshgrid(linspace(0, 5, 200), linspace(-6, 6, 200))
    contourf(s, t, Q.pdf([s, t]), 50)
    xlabel("$q_1$")
    ylabel("$q_2$")
    subplot(122)
    Qr = Q.sample(500)
    scatter(*Qr)
    xlabel("$Q_1$")
    ylabel("$Q_2$")
    axis([0, 5, -6, 6])

    savefig("mv1.png")
    clf()

    Q2 = cp.Gamma(1)
    Q1 = cp.Normal(Q2**2, Q2 + 1)
    Q = cp.J(Q1, Q2)
    #end

    subplot(121)
    s, t = meshgrid(linspace(-4, 7, 200), linspace(0, 3, 200))
    contourf(s, t, Q.pdf([s, t]), 30)
    xlabel("$q_1$")
    ylabel("$q_2$")
    subplot(122)
    Qr = Q.sample(500)
    scatter(*Qr)
    xlabel("$Q_1$")
    ylabel("$Q_2$")
    axis([-4, 7, 0, 3])

    savefig("mv2.png")
    clf()
示例#26
0
def test_quadrature():
    dist = cp.Iid(cp.Normal(), dim)
    gq = cp.generate_quadrature
    nodes, weights = gq(order, dist, rule="C")
    nodes, weights = gq(order, dist, rule="E")
    nodes, weights = gq(order, dist, rule="G")
    nodes, weights = gq(order, dist, rule="C", sparse=True)
    nodes, weights = gq(order, dist, rule="E", sparse=True)
    nodes, weights = gq(order, dist, rule="G", sparse=True)
示例#27
0
def generate_distributions(zm, wm=None):
    # define marginal distributions
    if wm is not None:
        zm = np.append(zm, wm, axis=0)
    marginal_distributions = [cp.Normal(*mu_sig) for mu_sig in zm]
    # define joint distributions
    jpdf = cp.J(*marginal_distributions)

    return jpdf
示例#28
0
    def dist(self):
        """Return the actual distribution.

        Returns
        -------
        chaospy.Normal
            The actual distribution.
        """
        return chaos.Normal(mu=self.mu, sigma=self.sigma)
示例#29
0
    def test_get_error(self):
        parameter_list = [["gbar_Na", 120, cp.Uniform(110, 130)],
                         ["gbar_K", 36, cp.Normal(36, 1)],
                         ["gbar_L", 0.3, cp.Chi(1, 1, 0.3)]]

        self.parameters = Parameters(parameter_list)

        with self.assertRaises(AttributeError):
            self.parameters.get("not_a_parameter")
示例#30
0
    def test_delitem(self):
        parameter_list = [["gbar_Na", 120, cp.Uniform(110, 130)],
                         ["gbar_K", 36, cp.Normal(36, 1)],
                         ["gbar_L", 0.3, cp.Chi(1, 1, 0.3)]]

        parameters = Parameters(parameter_list)

        del parameters["gbar_Na"]

        self.assertEqual(len(parameters.parameters), 2)