Example #1
0
def ExpandBank():

    hep = HEPBankReducedSmooth

    t = np.linspace(0, 350, 3500)
    t1 = cp.Uniform(0, 150)
    t2 = cp.Uniform(100, 260)

    t1 = cp.Normal(70, 1)
    t2 = cp.Normal(115, 1)
    pdf = cp.J(t1, t2)
    polynomials = cp.orth_ttr(order=2, dist=pdf)  #No good for dependent
    # polynomials = cp.orth_bert(N=2,dist=pdf)
    # polynomials = cp.orth_gs(order=2,dist=pdf)
    # polynomials = cp.orth_chol(order=2,dist=pdf)

    if 1:
        nodes, weights = cp.generate_quadrature(order=2,
                                                domain=pdf,
                                                rule="Gaussian")
        # nodes, weights = cp.generate_quadrature(order=2, domain=pdf, rule="C")
        # nodes, weights = cp.generate_quadrature(order=9, domain=pdf, rule="L")
        print nodes.shape
        samples = np.array([hep(t, *node) for node in nodes.T])
        hepPCE = cp.fit_quadrature(polynomials, nodes, weights, samples)
    else:
        nodes = pdf.sample(10, 'S')
        samples = np.array([hep(t, *node) for node in nodes.T])
        hepPCE = cp.fit_regression(polynomials, nodes, samples, rule='T')
    return hepPCE
Example #2
0
    def __init__(self):
        #inzone perm [m^2]
        var0 = RandVar('normal', [np.log(1e-13), 0.5])
        #above zone perm [m^2]
        var1 = RandVar('normal', [np.log(5e-14), 0.5])
        #inzone phi [-]
        var2 = RandVar('uniform', [0.1, 0.2])
        #above zone phi [-]
        var3 = RandVar('uniform', [0.05, 0.3])
        #aquitard thickness [m]
        var4 = RandVar('uniform', [10, 30])
        #injection rate [Mt/yr]
        var5 = RandVar('uniform', [0.5, 5])
        self.varset = [var0, var1, var2, var3, var4, var5]

        self.Nvar = len(self.varset)

        self.jointDist = cp.J(
            cp.Normal(mu=var0.param[0], sigma=var0.param[1]),
            cp.Normal(mu=var1.param[0], sigma=var1.param[1]),
            cp.Uniform(lo=var2.param[0], up=var2.param[1]),
            cp.Uniform(lo=var3.param[0], up=var3.param[1]),
            cp.Uniform(lo=var4.param[0], up=var4.param[1]),
            cp.Uniform(lo=var5.param[0], up=var5.param[1]),
        )
Example #3
0
def test_resume():
    vary = {'a': cp.Uniform(-5, 0), 'b': cp.Uniform(2, 10)}
    sampler = QMCSampler(vary, 100, 390)
    for _ in range(10):
        sample = next(sampler)
    with pytest.raises(StopIteration):
        next(sampler)
Example #4
0
def getUncertainty(parametric=True, initial=False, knowledge=False):

    perturbations = {
        'parametric': None,
        'initial': None,
        'knowledge': None,
    }

    if parametric:
        # Define Uncertainty Joint PDF
        CD = cp.Uniform(-0.10, 0.10)  # CD
        CL = cp.Uniform(-0.10, 0.10)  # CL
        rho0 = cp.Normal(0, 0.0333 * 1.5)  # rho0
        scaleHeight = cp.Uniform(-0.025, 0.01)  # scaleheight
        perturbations['parametric'] = cp.J(CD, CL, rho0, scaleHeight)

    if knowledge:
        pitch = cp.Normal(
            0, np.radians(1. / 3.)
        )  # Initial attitude uncertainty in angle of attack, 1-deg 3-sigma
        perturbations['knowledge'] = cp.J(pitch)

    if initial:
        V = cp.Uniform(-150, 150)  # Entry velocity deviation
        gamma = cp.Normal(0,
                          2.0 / 3.0)  # Entry FPA deviation, +- 2 deg 3-sigma
        perturbations['initial'] = cp.J(V, gamma)

    return perturbations
Example #5
0
    def test_SVD_equivalence(self):
        systemsize = 4
        eigen_decayrate = 2.0

        # Create Hadmard Quadratic object
        QoI = examples.HadamardQuadratic(systemsize, eigen_decayrate)

        # Create the joint distribution
        jdist = cp.J(cp.Uniform(-1,1),
                     cp.Uniform(-1,1),
                     cp.Uniform(-1,1),
                     cp.Uniform(-1,1))

        active_subspace_eigen = ActiveSubspace(QoI, n_dominant_dimensions=1,
                                               n_monte_carlo_samples=10000,
                                               use_svd=False, read_rv_samples=False,
                                               write_rv_samples=True)
        active_subspace_eigen.getDominantDirections(QoI, jdist)

        active_subspace_svd = ActiveSubspace(QoI, n_dominant_dimensions=1,
                                             n_monte_carlo_samples=10000,
                                             use_svd=True, read_rv_samples=True,
                                             write_rv_samples=False)
        active_subspace_svd.getDominantDirections(QoI, jdist)

        # Check the iso_eigenvals
        np.testing.assert_almost_equal(active_subspace_eigen.iso_eigenvals, active_subspace_svd.iso_eigenvals)
        # check the iso_eigenvecs
        self.assertTrue(active_subspace_eigen.iso_eigenvecs.shape, active_subspace_svd.iso_eigenvecs.shape)
        for i in range(active_subspace_eigen.iso_eigenvecs.shape[1]):
            arr1 = active_subspace_eigen.iso_eigenvecs[:,i]
            arr2 = active_subspace_svd.iso_eigenvecs[:,i]
            if np.allclose(arr1, arr2) == False:
                np.testing.assert_almost_equal(arr1, -arr2)
    def test_ordered(self):
        parameter_list = [["gbar_Na", 120,
                           cp.Uniform(110, 130)],
                          ["gbar_K", 36, cp.Normal(36, 1)],
                          ["gbar_L", 0.3, cp.Chi(1, 1, 0.3)]]

        parameters = Parameters(parameter_list)

        uncertain_parameters = parameters.get_from_uncertain("name")
        self.assertEqual(uncertain_parameters, ["gbar_Na", "gbar_K", "gbar_L"])

        uncertain_parameters = parameters.get("name")
        self.assertEqual(uncertain_parameters, ["gbar_Na", "gbar_K", "gbar_L"])

        parameter_list = [["gbar_K", 36, cp.Normal(36, 1)],
                          ["gbar_Na", 120,
                           cp.Uniform(110, 130)],
                          ["gbar_L", 0.3, cp.Chi(1, 1, 0.3)]]

        parameters = Parameters(parameter_list)

        uncertain_parameters = parameters.get_from_uncertain("name")
        self.assertEqual(uncertain_parameters, ["gbar_K", "gbar_Na", "gbar_L"])

        uncertain_parameters = parameters.get("name")
        self.assertEqual(uncertain_parameters, ["gbar_K", "gbar_Na", "gbar_L"])

        uncertain_parameters = parameters.get("value")
        self.assertEqual(uncertain_parameters, [36, 120, 0.3])
def data_vectors():
    np.random.seed(10000000)
    vary = {"x1": cp.Uniform(0.0, 1.0), "x2": cp.Uniform(0.0, 1.0)}
    sampler = uq.sampling.MCSampler(vary, n_mc_samples=100)
    data = {
        ('run_id', 0): [],
        ('x1', 0): [],
        ('x2', 0): [],
        ('g', 0): [],
        ('g', 1): [],
        ('g', 2): [],
        ('h', 0): [],
        ('h', 1): []
    }
    for run_id, sample in enumerate(sampler):
        data[('run_id', 0)].append(run_id)
        data[('x1', 0)].append(sample['x1'])
        data[('x2', 0)].append(sample['x2'])
        data[('g', 0)].append(sample['x1'])
        data[('g', 1)].append(sample['x2'])
        data[('g', 2)].append(sample['x1'] + sample['x2'])
        data[('h', 0)].append(sample['x1'] * sample['x2'])
        data[('h', 1)].append(sample['x1']**sample['x2'])
    df = pd.DataFrame(data)
    return sampler, df
Example #8
0
def testCuba():
    from cubature import cubature as cuba

    CD = cp.Uniform(-0.10, 0.10)  # CD
    CL = cp.Uniform(-0.10, 0.10)  # CL
    rho0 = cp.Normal(0, 0.0333)  # rho0
    scaleHeight = cp.Uniform(-0.05, 0.05)  # scaleheight
    pdf = cp.J(CD, CL, rho0, scaleHeight)

    def PDF(x, *args, **kwargs):
        return pdf.pdf(np.array(x).T)

    x0 = np.array([-0.10, -0.10, -0.5, -0.05])
    xf = np.array([0.10, 0.10, 0.5, 0.05])
    P, err = cuba(PDF,
                  ndim=4,
                  fdim=1,
                  xmin=x0,
                  xmax=xf,
                  vectorized=True,
                  adaptive='p')

    print "Multi-dimensional integral of the PDF over its support = {}".format(
        P[0])
    print "Total error in integration = {}".format(err[0])
def halton_sequence(w, h, n):
    distribution = chaospy.J(chaospy.Uniform(0, w), chaospy.Uniform(0, h))
    samples = distribution.sample(n, rule="halton")
    x_samples = samples[0] + np.random.uniform(0, 1, n)
    y_samples = samples[1] + np.random.uniform(0, 1, n)

    return np.clip(x_samples, 0, w), np.clip(y_samples, 0, h)
Example #10
0
def test_lhc():
    vary = {'a': cp.Uniform(-5, 3), 'b': cp.Uniform(2, 10)}
    sampler = uq.sampling.quasirandom.LHCSampler(vary, max_num=10)
    for sample in sampler:
        assert (sample['a'] >= -5.0)
        assert (sample['a'] <= 3.0)
        assert (sample['b'] >= 2.0)
        assert (sample['b'] <= 10.0)
Example #11
0
def test_constant_expected():
    """Test if polynomial constant behave as expected."""
    distribution = chaospy.J(chaospy.Uniform(-1.2, 1.2),
                             chaospy.Uniform(-2.0, 2.0))
    const = chaospy.polynomial(7.)
    assert chaospy.E(const, distribution[0]) == const
    assert chaospy.E(const, distribution) == const
    assert chaospy.Var(const, distribution) == 0.
Example #12
0
def test_restart_dict():
    vary = {'a': cp.Uniform(-5, 0), 'b': cp.Uniform(2, 10)}
    sampler = QMCSampler(vary, 100)
    for _ in range(10):
        next(sampler)
    restart = sampler.get_restart_dict()
    assert(restart['vary'] == Vary(vary).serialize())
    assert(restart['count'] == 10)
    assert(restart['n_mc_samples'] == 100)
Example #13
0
def test_halton():
    vary = {'a': cp.Uniform(-5, 3), 'b': cp.Uniform(2, 10)}
    sampler = uq.sampling.quasirandom.HaltonSampler(vary, max_num=10)
    for sample in sampler:
        assert (sample['a'] >= -5.0)
        assert (sample['a'] <= 3.0)
        assert (sample['b'] >= 2.0)
        assert (sample['b'] <= 10.0)
    assert (sampler.n_samples() == 10)
def test_l_n_exception():
    vary = {
        "gravity": cp.Uniform(9.8, 1.0),
        "mass": cp.Uniform(2.0, 10.0),
    }
    with pytest.raises(RuntimeError):
        sampler = uq.sampling.SCSampler(vary=vary,
                                        polynomial_order=1,
                                        sparse=True)
Example #15
0
def setup_cannonsim_app():
    params = {
        "angle": {
            "type": "float",
            "min": 0.0,
            "max": 6.28,
            "default": 0.79},
        "air_resistance": {
            "type": "float",
            "min": 0.0,
            "max": 1.0,
            "default": 0.2},
        "height": {
            "type": "float",
            "min": 0.0,
            "max": 1000.0,
            "default": 1.0},
        "time_step": {
            "type": "float",
            "min": 0.0001,
            "max": 1.0,
            "default": 0.01},
        "gravity": {
            "type": "float",
            "min": 0.0,
            "max": 1000.0,
            "default": 9.8},
        "mass": {
            "type": "float",
            "min": 0.0001,
            "max": 1000.0,
            "default": 1.0},
        "velocity": {
            "type": "float",
            "min": 0.0,
            "max": 1000.0,
            "default": 10.0}}

    encoder = uq.encoders.GenericEncoder(
        template_fname='tests/cannonsim/test_input/cannonsim.template',
        delimiter='#',
        target_filename='in.cannon')
    decoder = uq.decoders.SimpleCSV(
        target_filename='output.csv', output_columns=[
            'Dist', 'lastvx', 'lastvy'], header=0)
    collater = uq.collate.AggregateSamples(average=False)

    vary = {
        "gravity": cp.Uniform(9.8, 1.0),
        "mass": cp.Uniform(2.0, 10.0),
    }
    cannon_sampler = uq.sampling.RandomSampler(vary=vary, max_num=5)
    cannon_action = uq.actions.ExecuteLocal("tests/cannonsim/bin/cannonsim in.cannon output.csv")
    cannon_stats = uq.analysis.BasicStats(qoi_cols=['Dist', 'lastvx', 'lastvy'])

    return params, encoder, decoder, collater, cannon_sampler, cannon_action, cannon_stats
Example #16
0
def test_sampling():
    vary = {'a': cp.Uniform(-5, 0), 'b': cp.Uniform(2, 10)}
    sampler = MCSampler(vary, 100)
    assert (sampler.n_samples() == 400)
    for _ in range(sampler.n_samples()):
        sample = next(sampler)
        assert (sample['a'] >= -5 and sample['a'] <= 0)
        assert (sample['b'] >= 2 and sample['b'] <= 10)
    with pytest.raises(StopIteration):
        next(sampler)
def test_2():
    """Return test 2."""
    p_gc_legendre_two = partial(quadrature_gauss_legendre_two, a=0, b=1)

    approaches = []
    approaches += [monte_carlo_naive_two_dimensions, monte_carlo_quasi_two_dimensions]
    approaches += [p_gc_legendre_two]
    distribution = cp.J(cp.Uniform(0, 1), cp.Uniform(0, 1))
    for approach in approaches:
        np.testing.assert_almost_equal(approach(distribution.pdf), 1.0)
Example #18
0
    def test_init_object_dist(self):
        parameter_list = [Parameter("gbar_Na", 120, cp.Uniform(110, 130)),
                          Parameter("gbar_K", 36),
                          Parameter("gbar_L", 10.3)]

        parameters = Parameters(parameter_list, distribution=cp.Uniform(110, 130))

        self.assertIsInstance(parameters, Parameters)
        self.assertIsInstance(parameters["gbar_Na"], Parameter)
        self.assertIsInstance(parameters["gbar_K"], Parameter)
        self.assertIsInstance(parameters["gbar_L"], Parameter)
        self.assertIsInstance(parameters.distribution, cp.Dist)
Example #19
0
    def test_on_exponential(self):
        systemsize = 2
        QoI = examples.Exp_07xp03y(systemsize)

        jdist = cp.J(cp.Uniform(-1,1),
                     cp.Uniform(-1,1))

        active_subspace = ActiveSubspace(QoI, n_dominant_dimensions=1, n_monte_carlo_samples=10000)
        active_subspace.getDominantDirections(QoI, jdist)

        expected_W1 = QoI.a / np.linalg.norm(QoI.a)
        np.testing.assert_almost_equal(active_subspace.dominant_dir[:,0], expected_W1)
Example #20
0
def test_get_active_sampler(tmp_path):
    camp = uq.Campaign(name='test', work_dir=tmp_path)
    vary = {
        "angle": cp.Uniform(0.0, 1.0),
        "height": cp.Uniform(2.0, 10.0),
        "velocity": cp.Normal(10.0, 1.0),
        "mass": cp.Uniform(1.0, 5.0)
    }
    sampler = uq.sampling.RandomSampler(vary=vary)
    camp.set_sampler(sampler)

    assert camp.get_active_sampler() == sampler
Example #21
0
def settings(tmpdir):
    params = {
        "temp_init": {
            "type": "float",
            "min": 0.0,
            "max": 100.0,
            "default": 95.0
        },
        "kappa": {
            "type": "float",
            "min": 0.0,
            "max": 0.1,
            "default": 0.025
        },
        "t_env": {
            "type": "float",
            "min": 0.0,
            "max": 40.0,
            "default": 15.0
        },
        "out_file": {
            "type": "string",
            "default": "output.csv"
        }
    }
    output_filename = params["out_file"]["default"]
    output_columns = ["te"]

    encoder = uq.encoders.GenericEncoder(
        template_fname='tests/cooling/cooling.template',
        delimiter='$',
        target_filename='cooling_in.json')
    decoder = uq.decoders.SimpleCSV(target_filename=output_filename,
                                    output_columns=output_columns)

    vary = {"kappa": cp.Uniform(0.025, 0.075), "t_env": cp.Uniform(15, 25)}

    cooling_sampler = uq.sampling.PCESampler(vary=vary, polynomial_order=3)
    cooling_stats = uq.analysis.PCEAnalysis(sampler=cooling_sampler,
                                            qoi_cols=output_columns)

    settings = {
        'params': params,
        'encoder': encoder,
        'decoder': decoder,
        'cooling_sampler': cooling_sampler,
        'cooling_stats': cooling_stats
    }

    return settings
Example #22
0
def get_joint_uniform_distribution(dim, lower=0, upper=1):
    if dim == 1:
        return cp.Uniform(lower, upper)
    elif dim == 2:
        return cp.J(cp.Uniform(lower, upper), cp.Uniform(lower, upper))
    elif dim == 3:
        return cp.J(cp.Uniform(lower, upper), cp.Uniform(lower, upper),
                    cp.Uniform(lower, upper))
    elif dim == 4:
        return cp.J(cp.Uniform(lower, upper), cp.Uniform(lower, upper),
                    cp.Uniform(lower, upper), cp.Uniform(lower, upper))
    else:
        print("Wrong input dimension")
        return None
Example #23
0
def run_sc_samples(work_dir):
    
    # Set up a fresh campaign called "sc"
    my_campaign = uq.Campaign(name='ocean', work_dir=work_dir)

    # Define parameter space
    params = {
        "decay_time_nu": {
            "type": "float",
            "min": 0.0,
            "max": 1000.0,
            "default": 5.0},
        "decay_time_mu": {
            "type": "float",
            "min": 0.0,
            "max": 1000.0,
            "default": 90.0},
        "out_file": {
            "type": "string",
            "default": "output.csv"}}

    output_filename = params["out_file"]["default"]
    output_columns = ["E_mean", "Z_mean", "E_std", "Z_std"]

    # Create an encoder, decoder and collation element for PCE test app
    encoder = uq.encoders.GenericEncoder(
        template_fname= HOME + '/sc/ocean.template',
        delimiter='$',
        target_filename='ocean_in.json')
    decoder = uq.decoders.SimpleCSV(target_filename=output_filename,
                                    output_columns=output_columns,
                                    header=0)
    collater = uq.collate.AggregateSamples(average=False)

    # Add the SC app (automatically set as current app)
    my_campaign.add_app(name="sc",
                        params=params,
                        encoder=encoder,
                        decoder=decoder,
                        collater=collater)    

    # Create the sampler
    vary = {
        "decay_time_nu": cp.Uniform(1.0, 5.0),
        "decay_time_mu": cp.Uniform(85.0, 95.0)
    }

    my_sampler = uq.sampling.SCSampler(vary=vary, polynomial_order=6)
    # Associate the sampler with the campaign
    my_campaign.set_sampler(my_sampler)
def test_gp(tmp_path):
    campaign = uq.Campaign(name='test', work_dir=tmp_path)
    params = {
        "temp_init": {
            "type": "float",
            "min": 0.0,
            "max": 100.0,
            "default": 95.0
        },
        "kappa": {
            "type": "float",
            "min": 0.0,
            "max": 0.1,
            "default": 0.025
        },
        "t_env": {
            "type": "float",
            "min": 0.0,
            "max": 40.0,
            "default": 15.0
        },
        "out_file": {
            "type": "string",
            "default": "output.csv"
        }
    }
    output_filename = params["out_file"]["default"]
    output_columns = ["te"]
    # Create an encoder and decoder for PCE test app
    encoder = uq.encoders.GenericEncoder(
        template_fname='tests/cooling/cooling.template',
        delimiter='$',
        target_filename='cooling_in.json')
    decoder = uq.decoders.SimpleCSV(target_filename=output_filename,
                                    output_columns=output_columns)
    execute = ExecuteLocal("{} cooling_in.json".format(
        os.path.abspath("tests/cooling/cooling_model.py")))
    actions = Actions(CreateRunDirectory('/tmp'), Encode(encoder), execute,
                      Decode(decoder))
    vary = {"kappa": cp.Uniform(0.025, 0.075), "t_env": cp.Uniform(15, 25)}
    sampler = uq.sampling.quasirandom.LHCSampler(vary=vary)

    campaign.add_app(name='test_app', params=params, actions=actions)
    campaign.set_app('test_app')
    campaign.set_sampler(sampler)
    campaign.execute(nsamples=100).collate()
    df = campaign.get_collation_result()
    analysis = uq.analysis.gp_analyse.GaussianProcessSurrogate(
        ['kappa', 't_env'], ['te'])
    result = analysis.analyse(df)
Example #25
0
def data():
    # fix random seed to make this test deterministic
    np.random.seed(10000000)
    # Create the sampler
    vary = {"x1": cp.Uniform(0.0, 1.0), "x2": cp.Uniform(0.0, 1.0)}
    sampler = uq.sampling.SCSampler(vary)
    data = {('run_id', 0): [], ('x1', 0): [], ('x2', 0): [], ('f', 0): []}
    for run_id, sample in enumerate(sampler):
        data[('run_id', 0)].append(run_id)
        data[('x1', 0)].append(sample['x1'])
        data[('x2', 0)].append(sample['x2'])
        data[('f', 0)].append(sobol_g_func([sample['x1'], sample['x2']], d=2))
    df = pd.DataFrame(data)
    return sampler, df
Example #26
0
def setup_params_dist(icestupa, params):
    params_range = []
    for param in params:
        y_lim = get_parameter_metadata(param)['ylim']
        if param in ['r_F', 'D_F']:
            param_range = cp.Uniform(
                getattr(icestupa, param) * y_lim[0],
                getattr(icestupa, param) * y_lim[1])
        else:
            param_range = cp.Uniform(y_lim[0], y_lim[1])
        params_range.append(param_range)
        print("\t%s : %s\n" % (param, param_range))

    tuned_params = {params[i]: params_range[i] for i in range(len(params))}
    return tuned_params
Example #27
0
def test_dependent_density():
    """Assert that manually create dependency structure holds."""
    distribution1 = chaospy.Exponential(1)
    distribution2 = chaospy.Uniform(lower=0, upper=distribution1)
    distribution = chaospy.J(distribution1, distribution2)
    assert distribution.pdf([0.5, 0.6]) == 0
    assert distribution.pdf([0.5, 0.4]) > 0
Example #28
0
    def distribution(parameter):
        if parameter == 0:
            raise ValueError(
                "Creating a percentage distribution around 0 does not work")

        return cp.Uniform(parameter - abs(interval / 2. * parameter),
                          parameter + abs(interval / 2. * parameter))
Example #29
0
    def test_str_uncertain(self):

        self.parameter = Parameter("gbar_Na", 120, cp.Uniform(110, 130))

        result = str(self.parameter)

        self.assertEqual(result, "gbar_Na: 120 - Uncertain")
Example #30
0
def test_sc(tmpdir, campaign):
    params = {
        "Pe": {
            "type": "float",
            "min": "1.0",
            "max": "2000.0",
            "default": "100.0"
        },
        "f": {
            "type": "float",
            "min": "0.0",
            "max": "10.0",
            "default": "1.0"
        },
        "out_file": {
            "type": "string",
            "default": "output.csv"
        }
    }
    output_filename = params["out_file"]["default"]
    output_columns = ["u"]
    encoder = uq.encoders.GenericEncoder(
        template_fname=f'tests/sc/sc.template',
        delimiter='$',
        target_filename='sc_in.json')
    decoder = uq.decoders.SimpleCSV(target_filename=output_filename,
                                    output_columns=output_columns,
                                    header=0)
    collater = uq.collate.AggregateSamples(average=False)
    vary = {"Pe": cp.Uniform(100.0, 200.0), "f": cp.Normal(1.0, 0.1)}
    sampler = uq.sampling.SCSampler(vary=vary, polynomial_order=1)
    actions = uq.actions.ExecuteLocal(f"tests/sc/sc_model.py sc_in.json")
    stats = uq.analysis.SCAnalysis(sampler=sampler, qoi_cols=output_columns)
    campaign(tmpdir, 'sc', 'sc', params, encoder, decoder, sampler, collater,
             actions, stats, vary, 0, 1)