예제 #1
0
def ExpandBank():

    hep = HEPBankReducedSmooth

    t = np.linspace(0, 350, 3500)
    t1 = cp.Uniform(0, 150)
    t2 = cp.Uniform(100, 260)

    t1 = cp.Normal(70, 1)
    t2 = cp.Normal(115, 1)
    pdf = cp.J(t1, t2)
    polynomials = cp.orth_ttr(order=2, dist=pdf)  #No good for dependent
    # polynomials = cp.orth_bert(N=2,dist=pdf)
    # polynomials = cp.orth_gs(order=2,dist=pdf)
    # polynomials = cp.orth_chol(order=2,dist=pdf)

    if 1:
        nodes, weights = cp.generate_quadrature(order=2,
                                                domain=pdf,
                                                rule="Gaussian")
        # nodes, weights = cp.generate_quadrature(order=2, domain=pdf, rule="C")
        # nodes, weights = cp.generate_quadrature(order=9, domain=pdf, rule="L")
        print nodes.shape
        samples = np.array([hep(t, *node) for node in nodes.T])
        hepPCE = cp.fit_quadrature(polynomials, nodes, weights, samples)
    else:
        nodes = pdf.sample(10, 'S')
        samples = np.array([hep(t, *node) for node in nodes.T])
        hepPCE = cp.fit_regression(polynomials, nodes, samples, rule='T')
    return hepPCE
예제 #2
0
 def multi_fidelity_update(self, y, radius, high_fidelity):
     new_points = super().multi_fidelity_update(y, radius, high_fidelity)
     new_evals = [
         high_fidelity.eval(z_) - self.eval(z_) for z_ in new_points.T
     ]
     new_poly = cpy.fit_regression(self.expansion, new_points, new_evals)
     self.proxy = self.proxy + new_poly
예제 #3
0
def KG(z, evls, pnts, gp, kernel, NSAMPS=30, DEG=3, sampling=False):

    # Find initial minimum value from GP model
    min_val = 1e100
    X_sample = pnts
    Y_sample = evls
    #for x0 in [np.random.uniform(XL, XU, size=DIM) for oo in range(20)]:
    x0 = np.random.uniform(XL, XU, size=DIM)
    res = mini(gp, x0=x0,
               bounds=[(XL, XU)
                       for ss in range(DIM)])  #, method='Nelder-Mead')
    #res = mini(expected_improvement, x0=x0[0], bounds=[(XL, XU) for ss in range(DIM)], args=(X_sample, Y_sample, gp))#, callback=callb)
    #   if res.fun < min_val:
    min_val = res.fun
    min_x = res.x

    # estimate min(f^{n+1}) with MC simulation
    MEAN = 0
    points = np.atleast_2d(np.append(X_sample, z)).T
    m, s = gp(z, return_std=True)
    distribution = cp.J(cp.Normal(0, s))
    samples = distribution.sample(NSAMPS, rule='Halton')
    PCEevals = []
    for pp in range(NSAMPS):

        # construct future GP, using z as the next point
        evals = np.append(evls, m + samples[pp])
        #evals = np.append(evls, m + np.random.normal(0, s))
        gpnxt = GaussianProcessRegressor(kernel=kernel,
                                         n_restarts_optimizer=35,
                                         random_state=98765,
                                         normalize_y=True)
        gpnxt.fit(points, evals)

        # convinience function
        def gpf_next(x, return_std=False):
            alph, astd = gpnxt.predict(np.atleast_2d(x), return_std=True)
            alph = alph[0]
            if return_std:
                return (alph, astd)
            else:
                return alph

        res = mini(gpf_next, x0=x0, bounds=[(XL, XU) for ss in range(DIM)])
        min_next_val = res.fun
        min_next_x = res.x

        #print('+++++++++ ', res.fun)
        #MEAN += min_next_val
        PCEevals.append(min_next_val)
    if not sampling:
        polynomial_expansion = cp.orth_ttr(DEG, distribution)
        foo_approx = cp.fit_regression(polynomial_expansion, samples, PCEevals)
        MEAN = cp.E(foo_approx, distribution)
    else:
        MEAN = np.mean(PCEevals)
    #print(PCEevals, '...', MEAN)
    #hey
    #MEAN /= NSAMPS
    return min_val - MEAN
def polynomial_chaos_sens(Ns_pc,
                          jpdf,
                          polynomial_order,
                          poly=None,
                          return_reg=False):
    N_terms = int(len(jpdf) / 2)
    # 1. generate orthogonal polynomials
    poly = poly or cp.orth_ttr(polynomial_order, jpdf)
    # 2. generate samples with random sampling
    samples_pc = jpdf.sample(size=Ns_pc, rule='R')
    # 3. evaluate the model, to do so transpose samples and hash input data
    transposed_samples = samples_pc.transpose()
    samples_z = transposed_samples[:, :N_terms]
    samples_w = transposed_samples[:, N_terms:]
    model_evaluations = linear_model(samples_w, samples_z)
    # 4. calculate generalized polynomial chaos expression
    gpce_regression = cp.fit_regression(poly, samples_pc, model_evaluations)
    # 5. get sensitivity indices
    Spc = cp.Sens_m(gpce_regression, jpdf)
    Stpc = cp.Sens_t(gpce_regression, jpdf)

    if return_reg:
        return Spc, Stpc, gpce_regression
    else:
        return Spc, Stpc
예제 #5
0
def EHI(x,
        gp1,
        gp2,
        xi=0.,
        x2=None,
        MD=None,
        NSAMPS=200,
        PCE=False,
        ORDER=2,
        PAR_RES=100):

    mu1, std1 = gp1(x, return_std=True)
    mu2, std2 = gp2(x, return_std=True)

    a, b, c = parEI(gp1, gp2, x2, '', EI=False, MD=MD, PAR_RES=PAR_RES)
    par = b.T[c, :]
    par += xi
    MEAN = 0  # running sum for observed hypervolume improvement
    if not PCE:  # Monte Carlo Sampling
        for ii in range(NSAMPS):

            # add new point to Pareto Front
            evl = [np.random.normal(mu1, std1), np.random.normal(mu2, std2)]
            pears = np.append(par.T, evl, 1).T
            idx = is_pareto_efficient_simple(pears)
            newPar = pears[idx, :]

            # check if Pareto front improvemed from this point
            if idx[-1]:
                MEAN += H(newPar) - H(par)

        return (MEAN / NSAMPS)
    else:
        # Polynomial Chaos
        # (assumes 2 objective functions)
        distribution = cp.J(cp.Normal(0, std1), cp.Normal(0, std2))

        # sparse grid samples
        samples = distribution.sample(NSAMPS, rule='Halton')
        PCEevals = []
        for pp in range(NSAMPS):

            # add new point to Pareto Front
            evl = [np.random.normal(mu1, std1), np.random.normal(mu2, std2)]
            pears = np.append(par.T, evl, 1).T
            idx = is_pareto_efficient_simple(pears)
            newPar = pears[idx, :]

            # check if Pareto front improvemes
            if idx[-1]:
                PCEevals.append(H(newPar) - H(par))
            else:
                PCEevals.append(0)
        polynomial_expansion = cp.orth_ttr(ORDER, distribution)
        foo_approx = cp.fit_regression(polynomial_expansion, samples, PCEevals)
        MEAN = cp.E(foo_approx, distribution)
        return (MEAN)
예제 #6
0
def test_regression():
    dist = cp.Iid(cp.Normal(), dim)
    orth, norms = cp.orth_ttr(order, dist, retall=1)
    data = dist.sample(samples)
    vals = np.zeros((samples, size))
    cp.fit_regression(orth, data, vals, "LS")
    cp.fit_regression(orth, data, vals, "T", order=0)
    cp.fit_regression(orth, data, vals, "TC", order=0)
예제 #7
0
        def __init__(self,
                     dimension=None,
                     input=None,
                     output=None,
                     order=None):

            self.dimension = dimension
            self.input = np.transpose(input)
            self.output = output
            self.order = order

            self.distribution = cp.Iid(cp.Uniform(0, 1), self.dimension)
            orthogonal_expansion = cp.orth_ttr(self.order, self.distribution)
            self.poly = cp.fit_regression(orthogonal_expansion, self.input,
                                          self.output)
예제 #8
0
    def test_circuit_model_order_2(self, order_cp: int = 2, bool_plot: bool = False):
        dim = 6
        key = ["R_b1", "R_b2", "R_f", "R_c1", "R_c2", "beta"]
        sobol_indices_quad_constantine = np.array(
            [5.0014515064e-01, 4.1167859899e-01, 7.4006053045e-02, 2.1802568214e-02, 5.1736552010e-08,
             1.4938996627e-05])

        M_constantine = np.array([50, 1e2, 5e2, 1e3, 5e3, 1e4, 5e4])
        sobol_indices_error_constantine = np.transpose(np.array(
            [[6.1114622870e-01, 2.7036543475e-01, 1.5466638009e-01, 1.2812367577e-01, 5.0229955234e-02,
              3.5420048253e-02, 1.4486328386e-02],
             [6.0074404490e-01, 3.2024096457e-01, 1.2296426366e-01, 9.6725945246e-02, 5.3143328175e-02,
              3.2748864016e-02, 1.1486316472e-02],
             [1.1789694228e-01, 4.6150927239e-02, 2.6268692965e-02, 1.8450563871e-02, 8.3656592318e-03,
              5.8550974309e-03, 2.8208921925e-03],
             [3.8013619286e-02, 1.6186288112e-02, 8.9893920304e-03, 6.3911249578e-03, 2.6219049423e-03,
              1.9215077698e-03, 9.5390224479e-04],
             [1.2340746448e-07, 4.8204289233e-08, 3.0780845307e-08, 2.5240466147e-08, 1.0551377101e-08,
              6.9506139894e-09, 3.3372151408e-09],
             [3.4241277775e-05, 1.8074628532e-05, 7.1554659714e-06, 5.0303467614e-06, 2.7593313990e-06,
              1.9529470403e-06, 7.2840043686e-07]]))

        x_lower = np.array([50, 25, 0.5, 1.2, 0.25, 50])  # table 3, constantine-2017
        x_upper = np.array([150, 70, 3.0, 2.5, 1.2, 300])  # table 3, constantine-2017

        circuit_model = CircuitModel()
        n_samples = M_constantine
        iN_vec = n_samples.astype(int)  # 8 if calc_second_order = False, else 14

        no_runs = np.zeros(len(iN_vec))
        indices = np.zeros(shape=(len(iN_vec), dim))
        indices_error = np.zeros(shape=(len(iN_vec), dim))
        idx = 0
        n_trials = 1
        no_runs_averaged = 1

        dist = cp.J(cp.Uniform(x_lower[0], x_upper[0]), cp.Uniform(x_lower[1], x_upper[1]),
                    cp.Uniform(x_lower[2], x_upper[2]), cp.Uniform(x_lower[3], x_upper[3]),
                    cp.Uniform(x_lower[4], x_upper[4]), cp.Uniform(x_lower[5], x_upper[5]))

        for iN in iN_vec:
            tmp_indices_error_av = np.zeros(dim)
            for i_trial in range(0, n_trials):
                seed = int(np.random.rand(1) * 2 ** 32 - 1)
                random_state = RandomState(seed)

                # https://github.com/jonathf/chaospy/issues/81

                dist_samples = dist.sample(iN)  # random samples or abscissas of polynomials ?
                values_f, _, _ = circuit_model.eval_model_averaged(dist_samples, no_runs_averaged,
                                                                   random_state=random_state)
                # Approximation with Chaospy
                poly = cp.orth_ttr(order_cp, dist)
                approx_model = cp.fit_regression(poly, dist_samples, values_f)
                tmp_indices_total = cp.Sens_t(approx_model, dist)

                tmp_error = relative_error_constantine_2017(tmp_indices_total, sobol_indices_quad_constantine)
                tmp_indices_error_av = tmp_indices_error_av + tmp_error
                print(iN)

            indices_error[idx, :] = tmp_indices_error_av / n_trials
            indices[idx, :] = tmp_indices_total
            no_runs[idx] = iN
            idx = idx + 1

        if bool_plot:
            col = np.array(
                [[0, 0.4470, 0.7410], [0.8500, 0.3250, 0.0980], [0.9290, 0.6940, 0.1250], [0.4940, 0.1840, 0.5560],
                 [0.4660, 0.6740, 0.1880], [0.3010, 0.7450, 0.9330]])

            plt.figure()
            for i in range(0, dim):
                plt.semilogx(no_runs, indices[:, i], '.--', label='%s (SALib)' % key[i], color=col[i, :])
                plt.semilogx([no_runs[0], max(no_runs)], sobol_indices_quad_constantine[i] * np.ones(2), 'k:',
                             label='Reference values', color=col[i, :])

            plt.xlabel('Number of samples')
            plt.ylabel('Sobol\' total indices')
            plt.legend()

            plt.figure()
            for i in range(0, dim):
                plt.loglog(no_runs, indices_error[:, i], '.--', label=key[i]+'(PC Approximation)', color=col[i, :])
                plt.loglog(M_constantine, sobol_indices_error_constantine[:, i], '.k:', color=col[i, :])

            plt.xlabel('Number of samples')
            plt.ylabel('Relative error (Sobol\' total indices)')
            plt.grid(True, 'minor', 'both')

            plt.legend()
            plt.show()

        # assure that it ran
        assert(True, True)
예제 #9
0
def u(x, a, I):
    return I * np.exp(-a * x)


dist_R = cp.J(cp.Normal(), cp.Normal())
C = [[1, 0.5], [0.5, 1]]
mu = [0, 0]
dist_Q = cp.MvNormal(mu, C)

P = cp.orth_ttr(2, dist_R)
nodes_R = dist_R.sample(2 * len(P), "M")
nodes_Q = dist_Q.inv(dist_R.fwd(nodes_R))

x = np.linspace(0, 1, 100)
samples_u = [u(x, *node) for node in nodes_Q.T]
u_hat = cp.fit_regression(P, nodes_R, samples_u)

#Rosenblat transformation using pseudo spectral


def u(x, a, I):
    return I * np.exp(-a * x)


C = [[1, 0.5], [0.5, 1]]
mu = np.array([0, 0])
dist_R = cp.J(cp.Normal(), cp.Normal())
dist_Q = cp.MvNormal(mu, C)

P = cp.orth_ttr(2, dist_R)
nodes_R, weights_R = cp.generate_quadrature(3, dist_R)
예제 #10
0
print("\n Uncertainty measures (averaged)\n")
print('\n  E(Y)  |  Std(Y) \n')
plt.figure('mean')
for model, name, color in [
    (quadratic_area_model, 'Quadratic model', '#dd3c30'),
    (logarithmic_area_model, 'Logarithmic model', '#2775b5')
]:
    sample_scheme = 'R'
    # create samples
    samples = jpdf.sample(Ns, sample_scheme)
    # create orthogonal polynomials
    orthogonal_polynomials = cp.orth_ttr(polynomial_order, jpdf)
    # evaluate the model for all samples
    Y_area = model(pressure_range, samples)
    # polynomial chaos expansion
    polynomial_expansion = cp.fit_regression(orthogonal_polynomials, samples,
                                             Y_area.T)

    # calculate statistics
    plotMeanConfidenceAlpha = 5
    expected_value = cp.E(polynomial_expansion, jpdf)
    variance = cp.Var(polynomial_expansion, jpdf)
    standard_deviation = cp.Std(polynomial_expansion, jpdf)
    prediction_interval = cp.Perc(
        polynomial_expansion,
        [plotMeanConfidenceAlpha / 2., 100 - plotMeanConfidenceAlpha / 2.],
        jpdf)
    print('{:2.5f} | {:2.5f} : {}'.format(
        np.mean(expected_value) * unit_m2_cm2,
        np.mean(std) * unit_m2_cm2, name))

    # compute sensitivity indices
예제 #11
0
def test_regression():
    dist = cp.Iid(cp.Normal(), dim)
    orth, norms = cp.expansion.stieltjes(order, dist, retall=1)
    data = dist.sample(samples)
    vals = np.zeros((samples, size))
    cp.fit_regression(orth, data, vals)
예제 #12
0
def linear_model(request, expansion_small, samples, evaluations):
    return chaospy.fit_regression(expansion_small,
                                  samples,
                                  evaluations,
                                  model=LINEAR_MODELS[request.param])
def check_convergence(samples,
                      data,
                      joint_dist,
                      max_order,
                      norm_ord=None,
                      ref_values=None,
                      zero_offset=0.0):
    """
    Args:
        samples := (first axis must be along sample index not input index)
        model_evals :=
        norm_ord := any of the orders of norms supported by numpy.linalg.norm 
        ref_values(dict) := a dictionary of reference values to compare each case
        max_order(int) :=

    Calculate and display convergence of error with respect to the mean, and random check values.
    1. Determine "maximum/reference" order
    2. calculate reference values for E, var, Sm, St
    3. For each order calculate the error wrt to the reference value for all possible sample factors
    3.a. The results will be stored in a dict like 
        {measure: 
            {order: 
                [[sample_sizes], 
                [error_value]]
            }
        }
    4. Once complete plot the convergence of each measure
        fig_idxs = {measure:idx for idx,measure in enumerate(errors_dict.keys())}
        for measure, value in errors_dict.iteritems():
            plt.figure(fig_idxs[measure])
            for order,data in value.iteritems():

            plt.plot(value[
    """

    orders = range(1, max_order)
    measures = ["mean", "var"]
    n_pts_per_order = 10
    if len(data.shape) > 1:
        model_dim = data.shape[-1]  #TODO this is only valid for 1D arrays
    else:
        model_dim = 1
    error_dim = 3
    input_dim = len(joint_dist)

    error_dict = {
        measure:
        {order: np.zeros((error_dim, n_pts_per_order))
         for order in orders}
        for measure in measures
    }

    submeasures = ["sens_m", "sens_t"]

    suberror_dict = {
        submeasure: {
            par: {
                order: np.zeros((error_dim, n_pts_per_order))
                for order in orders
            }
            for par in range(input_dim)
        }
        for submeasure in submeasures
    }

    residual_error_dict = {
        idx:
        {order: np.zeros((error_dim, n_pts_per_order))
         for order in orders}
        for idx in range(model_dim)
    }
    raw_sensitivities = {
        submeasure: {
            par: {
                order: np.zeros((2, n_pts_per_order, *data.shape[1::]))
                for order in orders
            }
            for par in range(input_dim)
        }
        for submeasure in submeasures
    }
    raw_measures = {
        measure: {
            order: np.zeros((2, n_pts_per_order) + data.shape[1::])
            for order in orders
        }
        for measure in measures
    }

    if ref_values is None:
        #Calculate reference values
        orthoPoly = cp.orth_ttr(max_order, joint_dist)
        expansion_polynomial = cp.fit_regression(orthoPoly,
                                                 samples.transpose(), data)
        ref_values = calculate_uqsa_measures(joint_dist, expansion_polynomial)

        # Monte Carlo Estimates
        ref_values["mean"] = np.mean(data, axis=0)
        ref_values["var"] = np.var(data, axis=0)

    for order in orders:
        dim = input_dim
        ncoefs = cp.bertran.terms(order, dim)
        sample_sizes = np.linspace(ncoefs,
                                   samples.shape[0],
                                   n_pts_per_order,
                                   dtype="int")
        orthoPoly = cp.orth_ttr(order, joint_dist)
        for idx, sample_size in enumerate(sample_sizes):
            expansion_polynomial = cp.fit_regression(
                orthoPoly, samples[0:sample_size].transpose(),
                data[0:sample_size])
            uqsa_data = calculate_uqsa_measures(joint_dist,
                                                expansion_polynomial)
            uhat = np.array(
                [expansion_polynomial(*sample) for sample in samples])
            err = np.abs(uhat - data)
            rel_err = err / data
            if len(err.shape) == 1:
                err.shape = (err.shape[0], 1)
                rel_err.shape = (rel_err.shape[0], 1)

            abs_err_norm = np.array(
                [np.linalg.norm(ui) / len(ui) for ui in err.T])
            rel_err_norm = np.array(
                [np.linalg.norm(ui) / len(ui) for ui in rel_err.T])
            for uidx, err_val in enumerate(rel_err_norm):
                residual_error_dict[uidx][order][
                    0] = sample_sizes  #np.tile(sample_sizes, rel_err_norm.shape + (1,)).T
                residual_error_dict[uidx][order][1, idx] = err_val
                residual_error_dict[uidx][order][2, idx] = abs_err_norm[uidx]

            for measure in measures:
                raw_measures[measure][order][0] = np.tile(
                    sample_sizes, uqsa_data[measure].shape + (1, )).T
                err = np.abs(uqsa_data[measure] - ref_values[measure])
                u = err / ref_values[measure]  #TODO what if zero ref measure
                raw_measures[measure][order][1, idx] = u  #data[measure]
                rel_err = np.linalg.norm(u.flat, ord=norm_ord) / len(u.flat)
                abs_err = np.linalg.norm(
                    err.flat, ord=norm_ord
                )  #THIS DOESN'T MAKE ANY SENSE (averaging unnormalized absolute errors)
                # Store measures in dict position idx
                error_dict[measure][order][0] = sample_sizes
                error_dict[measure][order][1, idx] = rel_err
                error_dict[measure][order][2, idx] = abs_err

            for submeasure in submeasures:
                for par, value in suberror_dict[submeasure].items():
                    par_idx = int(par)
                    raw_sensitivities[submeasure][par][order][0] = np.tile(
                        sample_sizes,
                        uqsa_data[submeasure][par_idx].shape + (1, )).T
                    err = np.abs(uqsa_data[submeasure][par_idx] -
                                 ref_values[submeasure][par_idx])
                    u = err / (np.abs(ref_values[submeasure][par_idx]) +
                               zero_offset)  #TODO what if zero ref measure
                    raw_sensitivities[submeasure][par][order][
                        1, idx] = uqsa_data[submeasure][par_idx]
                    rel_err = np.linalg.norm(u.flat, ord=norm_ord)
                    abs_err = np.linalg.norm(err.flat, ord=norm_ord)
                    # Store measures in dict position idx
                    value[order][0] = sample_sizes
                    value[order][1, idx] = rel_err
                    value[order][2, idx] = abs_err

    error_dict.update(suberror_dict)
    error_dict["res_err"] = residual_error_dict

    return error_dict, raw_measures, raw_sensitivities
예제 #14
0
            dataPoints[k] = ws[idx]
            samples_u[k] = u[idx]
    for j in range(0,SimLength):
        dist = cp.Normal(np.mean(WindSpeed_all[:,j]),np.std(WindSpeed_all[:,j]))
        orthPoly = cp.orth_ttr(polyOrder, dist)
        dataPoints_initial = dist.sample(NoOfSamples[i],rule='L')
        dataPoints = np.zeros(NoOfSamples[i])
        samples_u = np.zeros(NoOfSamples[i])
        #idx = nNoOfSamplesp.zeros(NoOfSamples[i])
        u = Gamma_all[:,j]
        ws= WindSpeed_all[:,j]
        for k in range(0,NoOfSamples[i]):
            idx= (np.abs(ws - dataPoints_initial[k])).argmin()
            dataPoints[k] = ws[idx]
            samples_u[k] = u[idx]
        approx = cp.fit_regression(orthPoly, dataPoints, samples_u)
        GammaMeanPCE[i,j] = cp.E(approx, dist)
        GammaStdPCE[i,j] = cp.Std(approx, dist)
        print(i,j)

#%%
timestr = time.strftime("%Y%m%d")

f = open(timestr+'_GammaMean.pckl', 'wb')
pickle.dump(GammaMean, f)
f.close()

f = open(timestr+'_GammaStd.pckl', 'wb')
pickle.dump(GammaStd, f)
f.close()
예제 #15
0
def u(x,a, I):
    return I*np.exp(-a*x)

dist_R = cp.J(cp.Normal(), cp.Normal())
C = [[1, 0.5], [0.5, 1]]
mu = [0, 0]
dist_Q = cp.MvNormal(mu, C)

P = cp.orth_ttr(2, dist_R)
nodes_R = dist_R.sample(2*len(P), "M")
nodes_Q = dist_Q.inv(dist_R.fwd(nodes_R))

x = np.linspace(0, 1, 100)
samples_u = [u(x, *node) for node in nodes_Q.T]
u_hat = cp.fit_regression(P, nodes_R, samples_u)




#Rosenblat transformation using pseudo spectral

def u(x,a, I):
    return I*np.exp(-a*x)

C = [[1,0.5],[0.5,1]]
mu = np.array([0, 0])
dist_R = cp.J(cp.Normal(), cp.Normal())
dist_Q = cp.MvNormal(mu, C)

P = cp.orth_ttr(2, dist_R)
	#Net.Control_Input('Trials/StochasticDemandsPWGInput.csv')
	Net.Control_Input(Directory+'Demands.csv')
	Net.MOC_Run(10)
	Output[:,i] = Net.nodes[1].TranH
	

#Net.MOC_Run(86350)
#Net.geom_Plot(plot_Node_Names = True)
#Net.transient_Node_Plot(['6','10','13','16','21','24','31'])

#Net.transient_Node_Plot(['1','2','3','4','5','6'])

#for Node in Net.nodes:
#	np.save(Directory +'MeasureData'+str(Node.Name)+'.npy',Node.TranH)
	
#for Node in Net.nodes:
#	pp.scatter([int(Node.Name)], [np.mean(Node.TranH)])
#PE = np.zeros(9999)#999)
#KE = np.zeros(9999)
#for pipe in Net.pipes:
#	PE += np.array(pipe.PE)
#	KE += np.array(pipe.KE)
polynomial_expansion = cp.orth_ttr(1, distribution)
foo_approx = cp.fit_regression(polynomial_expansion, samples[:10], Output[:,:10].T)

expected = cp.E(foo_approx, distribution)
deviation = cp.Std(foo_approx, distribution)


x,y = normal_dist(expected[-1],deviation[-1]**2)
예제 #17
0
P, norms = cp.orth_ttr(order, dist, retall=True)
nodes, weights = cp.generate_quadrature(order+1, dist, rule="G")
solves = [u(x, s[0], s[1]) for s in nodes.T]
U_hat = cp.fit_quadrature(P, nodes, weights, solves, norms=norms)

mean = cp.E(U_hat, dist)
var = cp.Var(U_hat, dist)


## Polynomial chaos expansion
## using Point collocation method and quasi-random samples
order = 5
P = cp.orth_ttr(order, dist)
nodes = dist.sample(2*len(P), "M")
solves = [u(x, s[0], s[1]) for s in nodes.T]
U_hat = cp.fit_regression(P, nodes, solves, rule="T")

mean = cp.E(U_hat, dist)
var = cp.Var(U_hat, dist)


## Polynomial chaos expansion
## using Intrusive Gallerkin method
# :math:
# u' = -a*u
# d/dx sum(c*P) = -a*sum(c*P)
# <d/dx sum(c*P),P[k]> = <-a*sum(c*P), P[k]>
# d/dx c[k]*<P[k],P[k]> = -sum(c*<a*P,P[k]>)
# d/dx c = -E( outer(a*P,P) ) / E( P*P )
#
# u(0) = I
예제 #18
0
Var1 = cp.Var(U_hat, dist)
print('Polynome de chaos utilisant la methode Pseudo-Spectrale')
print('et une quadrature Gaussienne :')
print('E : ',E1)
print('Var : ',Var1)

u_up1=E1+np.sqrt(Var1)
u_dw1=E1-np.sqrt(Var1)

# polynome de chaos
# utilisant la methode "Point Collocation" et des tirages pseudo aleatoires
ordre = 5
P = cp.orth_ttr(ordre, dist)
nodes = dist.sample(2*len(P), "M")
solves = [u(t, s[0], s[1]) for s in nodes.T]
U_hat = cp.fit_regression(P, nodes, solves, rule="T")

E2= cp.E(U_hat, dist)
Var2 = cp.Var(U_hat, dist)
print(' ')
print('Polynome de chaos utilisant la methode "Point Collocation"')
print('et des tirages pseudo aleatoires :')
print('E : ',E2)
print('Var : ',Var2)

#plot
fig, ax = plt.subplots(1)
rcParams['xtick.labelsize'] = 14
rcParams['ytick.labelsize'] = 14
rcParams['font.size']= 16
xlabel('t (s)', fontsize=14)
예제 #19
0
        stateTensor = pool.map(Simulate,samples.T)
        savemat(saveDir+'MC',{'states':stateTensor, 'samples':samples})

    else: # Polynomial Chaos Expansion !!!!
        if args.type == 'qmc':
            #Quasi MonteCarlo with 1/4 number of MC samples and PCE built from it
            samples = pdf.sample(n,'S')
            stateTensor = pool.map(Simulate,samples.T)
            if not args.no_save:
                savemat(saveDir+'SobolMC',{'states':stateTensor, 'samples':samples})
            
            # data = loadmat('./data/SobolMC')
            # samples = data['samples']
            # stateTensor = data['states']
            polynomials = cp.orth_ttr(order=2, dist=pdf)
            PCE = cp.fit_regression(polynomials, samples, stateTensor)
        elif args.type == 'pce':
            #Quadrature based PCE
            polynomials = cp.orth_ttr(order=2, dist=pdf)
            samples,weights = cp.generate_quadrature(order=2, domain=pdf, rule="Gaussian")
            stateTensor = pool.map(Simulate,samples.T)
            PCE = cp.fit_quadrature(polynomials,samples,weights,stateTensor)
            
        data = loadmat(saveDir+'MC') #Load the MC samples for an apples-to-apples comparison
        pceTestPoints = data['samples']    
        stateTensorPCE = np.array([PCE(*point) for point in pceTestPoints.T])
        Expectation = cp.E(poly=PCE,dist=pdf)
        if not args.no_save:      
            savemat(saveDir+'PCE2',{'states':stateTensorPCE, 'samples':pceTestPoints,'mean':Expectation})
        # savemat('./data/PCE2',{'states':stateTensorPCE[:,:,:,0], 'samples':pceTestPoints,'mean':Expectation})
    
예제 #20
0
def plot_figures():
    """Plot figures for tutorial."""
    numpy.random.seed(1000)

    def foo(coord, param):
        return param[0] * numpy.e**(-param[1] * coord)

    coord = numpy.linspace(0, 10, 200)
    distribution = cp.J(cp.Uniform(1, 2), cp.Uniform(0.1, 0.2))

    samples = distribution.sample(50)
    evals = numpy.array([foo(coord, sample) for sample in samples.T])

    plt.plot(coord, evals.T, "k-", lw=3, alpha=0.2)
    plt.xlabel(r"\verb;coord;")
    plt.ylabel(r"function evaluations \verb;foo;")
    plt.savefig("demonstration.png")
    plt.clf()

    samples = distribution.sample(1000, "H")
    evals = [foo(coord, sample) for sample in samples.T]
    expected = numpy.mean(evals, 0)
    deviation = numpy.std(evals, 0)

    plt.fill_between(coord,
                     expected - deviation,
                     expected + deviation,
                     color="k",
                     alpha=0.3)
    plt.plot(coord, expected, "k--", lw=3)
    plt.xlabel(r"\verb;coord;")
    plt.ylabel(r"function evaluations \verb;foo;")
    plt.title("Results using Monte Carlo simulation")
    plt.savefig("results_montecarlo.png")
    plt.clf()

    polynomial_expansion = cp.orth_ttr(8, distribution)
    foo_approx = cp.fit_regression(polynomial_expansion, samples, evals)
    expected = cp.E(foo_approx, distribution)
    deviation = cp.Std(foo_approx, distribution)

    plt.fill_between(coord,
                     expected - deviation,
                     expected + deviation,
                     color="k",
                     alpha=0.3)
    plt.plot(coord, expected, "k--", lw=3)
    plt.xlabel(r"\verb;coord;")
    plt.ylabel(r"function evaluations \verb;foo;")
    plt.title("Results using point collocation method")
    plt.savefig("results_collocation.png")
    plt.clf()

    absissas, weights = cp.generate_quadrature(8, distribution, "C")
    evals = [foo(coord, val) for val in absissas.T]
    foo_approx = cp.fit_quadrature(polynomial_expansion, absissas, weights,
                                   evals)
    expected = cp.E(foo_approx, distribution)
    deviation = cp.Std(foo_approx, distribution)

    plt.fill_between(coord,
                     expected - deviation,
                     expected + deviation,
                     color="k",
                     alpha=0.3)
    plt.plot(coord, expected, "k--", lw=3)
    plt.xlabel(r"\verb;coord;")
    plt.ylabel(r"function evaluations \verb;foo;")
    plt.title("Results using psuedo-spectral projection method")
    plt.savefig("results_spectral.png")
    plt.clf()
예제 #21
0
    # calculate sensitivity indices
    A_s, B_s, C_s, f_A, f_B, f_C, S_mc, ST_mc = mc_sensitivity_linear(Ns_mc, jpdf, w)

    Sensitivities=np.column_stack((S_mc,s**2))
    row_labels= ['S_'+str(idx) for idx in range(1,Nrv+1)]
    print("First Order Indices")
    print(pd.DataFrame(Sensitivities,columns=['Smc','Sa'],index=row_labels).round(3))
    # end Monte Carlo

    # Polychaos computations
    Ns_pc = 80
    samples_pc = jpdf.sample(Ns_pc)
    polynomial_order = 4
    poly = cp.orth_ttr(polynomial_order, jpdf)
    Y_pc = linear_model(w, samples_pc.T)
    approx = cp.fit_regression(poly, samples_pc, Y_pc, rule="T")

    exp_pc = cp.E(approx, jpdf)
    std_pc = cp.Std(approx, jpdf)
    print("Statistics polynomial chaos\n")
    print('\n        E(Y)  |  std(Y) \n')
    print('pc  : {:2.5f} | {:2.5f}'.format(float(exp_pc), std_pc))
    
    
    S_pc = cp.Sens_m(approx, jpdf)

    Sensitivities=np.column_stack((S_mc,S_pc, s**2))
    print("\nFirst Order Indices")
    print(pd.DataFrame(Sensitivities,columns=['Smc','Spc','Sa'],index=row_labels).round(3))

#     print("\nRelative errors")
예제 #22
0
import chaospy
import chaospy as cp
import pandas as pd
import numpy as np

QUAD_ORDER = 18
quad = False


def f(x, y):
    return (1 - x)**2 * 10 * (y - x**2)**2


distribution = chaospy.J(chaospy.Normal(0, 1), chaospy.Normal(0, 1))

if quad:
    polynomial_expansion = cp.orth_ttr(QUAD_ORDER, distribution)
    X, W = chaospy.generate_quadrature(QUAD_ORDER, distribution, rule="G")
    evals = [f(x[0], x[1]) for x in X.T]
    foo_approx = cp.fit_quadrature(polynomial_expansion, X, W, evals)
else:
    dat = pd.read_csv('./dakota_tabular.dat', sep=r'\s+')
    polynomial_expansion = cp.orth_ttr(QUAD_ORDER, distribution)
    samples = np.array([dat.x1, dat.x2])
    evals = dat.response_fn_1
    foo_approx = cp.fit_regression(polynomial_expansion, samples, evals)

total = chaospy.descriptives.sensitivity.total.Sens_t(foo_approx, distribution)
main = chaospy.descriptives.sensitivity.main.Sens_m(foo_approx, distribution)
예제 #23
0
def gpc(dists, distsMeta, wallModel, order, hdf5group, sampleScheme='M'):
    print "\n GeneralizedPolynomialChaos - order {}\n".format(order)

    dim = len(dists)

    expansionOrder = order
    numberOfSamples = 4 * cp.terms(expansionOrder, dim)

    # Sample in independent space
    samples = dists.sample(numberOfSamples, sampleScheme).transpose()
    model = wallModel(distsMeta)

    # Evaluate the model (which is not linear obviously)
    pool = multiprocessing.Pool()
    data = pool.map(model, samples)
    pool.close()
    pool.join()
    C_data = [retval[0] for retval in data]
    a_data = [retval[1] for retval in data]

    C_data = np.array(C_data)
    a_data = np.array(a_data)
    # Orthogonal C_polynomial from marginals
    orthoPoly = cp.orth_ttr(expansionOrder, dists)

    for data, outputName in zip([C_data, a_data], ['Compliance', 'Area']):

        # Fit the model together in independent space
        C_polynomial = cp.fit_regression(orthoPoly, samples.transpose(), data)

        # save data to dictionary
        plotMeanConfidenceAlpha = 5

        C_mean = cp.E(C_polynomial, dists)
        C_std = cp.Std(C_polynomial, dists)

        Si = cp.Sens_m(C_polynomial, dists)
        STi = cp.Sens_t(C_polynomial, dists)

        C_conf = cp.Perc(
            C_polynomial,
            [plotMeanConfidenceAlpha / 2., 100 - plotMeanConfidenceAlpha / 2.],
            dists)

        a = np.linspace(0, 100, 1000)
        da = a[1] - a[0]
        C_cdf = cp.Perc(C_polynomial, a, dists)

        C_pdf = da / (C_cdf[1::] - C_cdf[0:-1])
        # Resample to generate full histogram
        samples2 = dists.sample(numberOfSamples * 100, sampleScheme)
        C_data2 = C_polynomial(*samples2).transpose()

        # save in hdf5 file
        solutionDataGroup = hdf5group.create_group(outputName)

        solutionData = {
            'mean': C_mean,
            'std': C_std,
            'confInt': C_conf,
            'Si': Si,
            'STi': STi,
            'cDataGPC': C_data,
            'samplesGPC': samples,
            'cData': C_data2,
            'samples': samples2.transpose(),
            'C_pdf': C_pdf
        }

        for variableName, variableValue in solutionData.iteritems():
            solutionDataGroup.create_dataset(variableName, data=variableValue)
예제 #24
0
def collocation_model(expansion_small, samples_small, evaluations_small):
    return chaospy.fit_regression(expansion_small, samples_small,
                                  evaluations_small)
예제 #25
0
    def analyse(self, data_frame=None):
        """Perform PCE analysis on input `data_frame`.

        Parameters
        ----------
        data_frame : pandas DataFrame
            Input data for analysis.

        Returns
        -------
        PCEAnalysisResults
            Use it to get the sobol indices and other information.
        """
        def sobols(P, coefficients):
            """ Utility routine to calculate sobols based on coefficients
            """
            A = np.array(P.coefficients) != 0
            multi_indices = np.array(
                [P.exponents[A[:, i]].sum(axis=0) for i in range(A.shape[1])])
            sobol_mask = multi_indices != 0
            _, index = np.unique(sobol_mask, axis=0, return_index=True)
            index = np.sort(index)
            sobol_idx_bool = sobol_mask[index]
            sobol_idx_bool = np.delete(sobol_idx_bool, [0], axis=0)
            n_sobol_available = sobol_idx_bool.shape[0]
            if len(coefficients.shape) == 1:
                n_out = 1
            else:
                n_out = coefficients.shape[1]
            n_coeffs = coefficients.shape[0]
            sobol_poly_idx = np.zeros([n_coeffs, n_sobol_available])
            for i_sobol in range(n_sobol_available):
                sobol_poly_idx[:, i_sobol] = np.all(
                    sobol_mask == sobol_idx_bool[i_sobol], axis=1)
            sobol = np.zeros([n_sobol_available, n_out])
            for i_sobol in range(n_sobol_available):
                sobol[i_sobol] = np.sum(np.square(
                    coefficients[sobol_poly_idx[:, i_sobol] == 1]),
                                        axis=0)
            idx_sort_descend_1st = np.argsort(sobol[:, 0], axis=0)[::-1]
            sobol = sobol[idx_sort_descend_1st, :]
            sobol_idx_bool = sobol_idx_bool[idx_sort_descend_1st]
            sobol_idx = [0 for _ in range(sobol_idx_bool.shape[0])]
            for i_sobol in range(sobol_idx_bool.shape[0]):
                sobol_idx[i_sobol] = np.array(
                    [i for i, x in enumerate(sobol_idx_bool[i_sobol, :]) if x])
            var = ((coefficients[1:]**2).sum(axis=0))
            sobol = sobol / var
            return sobol, sobol_idx, sobol_idx_bool

        if data_frame is None:
            raise RuntimeError("Analysis element needs a data frame to "
                               "analyse")
        elif data_frame.empty:
            raise RuntimeError(
                "No data in data frame passed to analyse element")

        qoi_cols = self.qoi_cols

        results = {
            'statistical_moments': {},
            'percentiles': {},
            'sobols_first': {k: {}
                             for k in qoi_cols},
            'sobols_second': {k: {}
                              for k in qoi_cols},
            'sobols_total': {k: {}
                             for k in qoi_cols},
            'correlation_matrices': {},
            'output_distributions': {},
            'fit': {},
            'Fourier_coefficients': {},
        }

        # Get sampler informations
        P = self.sampler.P
        nodes = self.sampler._nodes
        weights = self.sampler._weights
        regression = self.sampler.regression

        # Extract output values for each quantity of interest from Dataframe
        #        samples = {k: [] for k in qoi_cols}
        #        for run_id in data_frame[('run_id', 0)].unique():
        #            for k in qoi_cols:
        #                data = data_frame.loc[data_frame[('run_id', 0)] == run_id][k]
        #                samples[k].append(data.values.flatten())

        samples = {k: [] for k in qoi_cols}
        for k in qoi_cols:
            samples[k] = data_frame[k].values

        # Compute descriptive statistics for each quantity of interest
        for k in qoi_cols:
            # Approximation solver
            if regression:
                fit, fc = cp.fit_regression(P, nodes, samples[k], retall=1)
            else:
                fit, fc = cp.fit_quadrature(P,
                                            nodes,
                                            weights,
                                            samples[k],
                                            retall=1)
            results['fit'][k] = fit
            results['Fourier_coefficients'][k] = fc

            # Percentiles: 1%, 10%, 50%, 90% and 99%
            P01, P10, P50, P90, P99 = cp.Perc(
                fit, [1, 10, 50, 90, 99], self.sampler.distribution).squeeze()
            results['percentiles'][k] = {
                'p01': P01,
                'p10': P10,
                'p50': P50,
                'p90': P90,
                'p99': P99
            }

            if self.sampling:  # use chaospy's sampling method

                # Statistical moments
                mean = cp.E(fit, self.sampler.distribution)
                var = cp.Var(fit, self.sampler.distribution)
                std = cp.Std(fit, self.sampler.distribution)
                results['statistical_moments'][k] = {
                    'mean': mean,
                    'var': var,
                    'std': std
                }

                # Sensitivity Analysis: First, Second and Total Sobol indices
                sobols_first_narr = cp.Sens_m(fit, self.sampler.distribution)
                sobols_second_narr = cp.Sens_m2(fit, self.sampler.distribution)
                sobols_total_narr = cp.Sens_t(fit, self.sampler.distribution)
                sobols_first_dict = {}
                sobols_second_dict = {}
                sobols_total_dict = {}
                for i, param_name in enumerate(self.sampler.vary.vary_dict):
                    sobols_first_dict[param_name] = sobols_first_narr[i]
                    sobols_second_dict[param_name] = sobols_second_narr[i]
                    sobols_total_dict[param_name] = sobols_total_narr[i]

                results['sobols_first'][k] = sobols_first_dict
                results['sobols_second'][k] = sobols_second_dict
                results['sobols_total'][k] = sobols_total_dict

            else:  # use PCE coefficients

                # Statistical moments
                mean = fc[0]
                var = np.sum(fc[1:]**2, axis=0)
                std = np.sqrt(var)
                results['statistical_moments'][k] = {
                    'mean': mean,
                    'var': var,
                    'std': std
                }

                # Sensitivity Analysis: First, Second and Total Sobol indices
                sobol, sobol_idx, _ = sobols(P, fc)
                varied = [_ for _ in self.sampler.vary.get_keys()]
                S1 = {_: np.zeros(sobol.shape[-1]) for _ in varied}
                ST = {_: np.zeros(sobol.shape[-1]) for _ in varied}
                #S2 = {_ : {__: np.zeros(sobol.shape[-1]) for __ in varied} for _ in varied}
                #for v in varied: del S2[v][v]
                S2 = {
                    _: np.zeros((len(varied), sobol.shape[-1]))
                    for _ in varied
                }
                for n, si in enumerate(sobol_idx):
                    if len(si) == 1:
                        v = varied[si[0]]
                        S1[v] = sobol[n]
                    elif len(si) == 2:
                        v1 = varied[si[0]]
                        v2 = varied[si[1]]
                        #S2[v1][v2] = sobol[n]
                        #S2[v2][v1] = sobol[n]
                        S2[v1][si[1]] = sobol[n]
                        S2[v2][si[0]] = sobol[n]
                    for i in si:
                        ST[varied[i]] += sobol[n]

                results['sobols_first'][k] = S1
                results['sobols_second'][k] = S2
                results['sobols_total'][k] = ST

            # Correlation matrix
            results['correlation_matrices'][k] = cp.Corr(
                fit, self.sampler.distribution)

            # Output distributions
            results['output_distributions'][k] = cp.QoI_Dist(
                fit, self.sampler.distribution)

        return PCEAnalysisResults(raw_data=results,
                                  samples=data_frame,
                                  qois=self.qoi_cols,
                                  inputs=list(self.sampler.vary.get_keys()))
예제 #26
0
    def analyse(self, data_frame=None):
        """Perform PCE analysis on input `data_frame`.

        Parameters
        ----------
        data_frame : :obj:`pandas.DataFrame`
            Input data for analysis.

        Returns
        -------
        dict:
            Contains analysis results in sub-dicts with keys -
            ['statistical_moments', 'percentiles', 'sobol_indices',
             'correlation_matrices', 'output_distributions']
        """

        if data_frame is None:
            raise RuntimeError("Analysis element needs a data frame to "
                               "analyse")
        elif data_frame.empty:
            raise RuntimeError(
                "No data in data frame passed to analyse element")

        qoi_cols = self.qoi_cols

        results = {
            'statistical_moments': {},
            'percentiles': {},
            'sobols_first': {k: {}
                             for k in qoi_cols},
            'sobols_second': {k: {}
                              for k in qoi_cols},
            'sobols_total': {k: {}
                             for k in qoi_cols},
            'correlation_matrices': {},
            'output_distributions': {},
        }

        # Get sampler informations
        P = self.sampler.P
        nodes = self.sampler._nodes
        weights = self.sampler._weights
        regression = self.sampler.regression

        # Extract output values for each quantity of interest from Dataframe
        samples = {k: [] for k in qoi_cols}
        for run_id in data_frame[('run_id', 0)].unique():
            for k in qoi_cols:
                data = data_frame.loc[data_frame[('run_id', 0)] == run_id][k]
                samples[k].append(data.values.flatten())

        # Compute descriptive statistics for each quantity of interest
        for k in qoi_cols:
            # Approximation solver
            if regression:
                fit = cp.fit_regression(P, nodes, samples[k])
            else:
                fit = cp.fit_quadrature(P, nodes, weights, samples[k])

            # Statistical moments
            mean = cp.E(fit, self.sampler.distribution)
            var = cp.Var(fit, self.sampler.distribution)
            std = cp.Std(fit, self.sampler.distribution)
            results['statistical_moments'][k] = {
                'mean': mean,
                'var': var,
                'std': std
            }

            # Percentiles: 10% and 90%
            P10 = cp.Perc(fit, 10, self.sampler.distribution)
            P90 = cp.Perc(fit, 90, self.sampler.distribution)
            results['percentiles'][k] = {'p10': P10, 'p90': P90}

            # Sensitivity Analysis: First, Second and Total Sobol indices
            sobols_first_narr = cp.Sens_m(fit, self.sampler.distribution)
            sobols_second_narr = cp.Sens_m2(fit, self.sampler.distribution)
            sobols_total_narr = cp.Sens_t(fit, self.sampler.distribution)
            sobols_first_dict = {}
            sobols_second_dict = {}
            sobols_total_dict = {}
            for i, param_name in enumerate(self.sampler.vary.vary_dict):
                sobols_first_dict[param_name] = sobols_first_narr[i]
                sobols_second_dict[param_name] = sobols_second_narr[i]
                sobols_total_dict[param_name] = sobols_total_narr[i]

            results['sobols_first'][k] = sobols_first_dict
            results['sobols_second'][k] = sobols_second_dict
            results['sobols_total'][k] = sobols_total_dict

            # Correlation matrix
            results['correlation_matrices'][k] = cp.Corr(
                fit, self.sampler.distribution)

            # Output distributions
            results['output_distributions'][k] = cp.QoI_Dist(
                fit, self.sampler.distribution)

        return PCEAnalysisResults(raw_data=results,
                                  samples=data_frame,
                                  qois=self.qoi_cols,
                                  inputs=list(self.sampler.vary.get_keys()))
u2 = cp.Uniform(0,1)
joint_distribution = cp.J(u1, u2)

# 2. generate orthogonal polynomials
polynomial_order = 3
poly = cp.orth_ttr(polynomial_order, joint_distribution)

# 3.1 generate samples
number_of_samples = 100
samples = joint_distribution.sample(size=number_of_samples, rule='R')

# 3.2 evaluate the simple model for all samples
model_evaluations = samples[0]+samples[1]*samples[0]

# 3.3 use regression to generate the polynomial chaos expansion
gpce_regression = cp.fit_regression(poly, samples, model_evaluations)
# end example linear regression


# _Spectral Projection_
# spectral projection in chaospy
cp.fit_quadrature?
# end spectral projection in chaospy


# example spectral projection
# 1. define marginal and joint distributions
u1 = cp.Uniform(0,1)
u2 = cp.Uniform(0,1)
joint_distribution = cp.J(u1, u2)
예제 #28
0
####
#	Plotting input distributions
#pp.figure()
#pp.hist(samples[0,:],density=True,color='k',bins=40,alpha=0.50,label='Samples')
#d = np.linspace(min(samples[0,:]),max(samples[0,:]),1000)
#pp.plot(d,cp.Normal(1,0.4).pdf(d),'k',label='Roughness PDF')
#pp.xlabel('Roughness (mm)')
#pp.ylabel('Frequency')
#pp.show()

Node = 4
ConditionPoints = 100
Order = 3

polynomial_expansion = cp.orth_ttr(Order, distribution)
foo_approx = cp.fit_regression(polynomial_expansion, samples[:ConditionPoints], output[:ConditionPoints,Node,:])

coefs_kernal = cp.descriptives.misc.QoI_Dist(foo_approx,distribution)


#pp.figure(figsize=(10,6))
#axs = []
#axs.append(pp.subplot2grid((2,3),(0,0),colspan = 3))
#axs.append(pp.subplot2grid((2,3),(1,0)))
#axs.append(pp.subplot2grid((2,3),(1,1)))
#axs.append(pp.subplot2grid((2,3),(1,2)))

##axs[0].plot(Transient_Times,np.mean(output[:,Node,:].T,axis=1),'k--') 
##axs[0].fill_between(Transient_Times,np.percentile(output[:,Node,:].T,5,axis=1),np.percentile(output[:,Node,:].T,95,axis=1),alpha = 0.5)

##axs[0].plot(Transient_Times,cp.E(foo_approx,demand_distribution),'k')
예제 #29
0
Input_MZ_MY_Turb2 = [yaw_1_train_scaled, yaw_2_train_scaled, Distance_train_scaled, MZ_MY_scaled]

Dist_max = np.max(Input_Power2[2])

########## Creating the individual surrogate models for the power and the DEL of the upstream and downstream turbine ###############################
####################################################################################################################################################

# Creating the surrogate model for the power of the upstream turbine
distribution1 = cp.J(cp.Normal(0, 4.95/30))
orthogonal_expansion_ttr1 = cp.orth_ttr(3, distribution1 ) 

Matrix = []
Input = Input_Power1;
for drand in range(0, 9):
    I_rand = random.sample(range(1, len(Input[0])), int(0.9*len(Input[0])))
    approx_model_ttr_Power1  = cp.fit_regression(orthogonal_expansion_ttr1, [Input[0][I_rand]],Input[1][I_rand])
    Coefs = []
    for dq in range(0, len(approx_model_ttr_Power1.keys)):
        a = approx_model_ttr_Power1.A[approx_model_ttr_Power1.keys[dq]]
        Coefs.append(a.tolist())
    Matrix.append(Coefs)
Coefs = np.mean(Matrix, axis = 0)
for dq in range(0, len(approx_model_ttr_Power1.keys)):
    approx_model_ttr_Power1.A[approx_model_ttr_Power1.keys[dq]] = Coefs[dq]

# Creating the surrogate model for the power of the downstream turbine
distribution2 = cp.J(cp.Normal(0, 4.95/30), cp.Normal(0, 4.95/30), cp.Uniform(0, 1))
orthogonal_expansion_ttr2 = cp.orth_ttr(4, distribution2 )

Input = Input_Power2;
Matrix = []

          solves1.append(qoi[0:6])
          solves2.append(qoi[6])
          solves3.append(qoi[7])








        # Create surrogate model
        #u_hat(x,t;q)=sum_n C_n(x) * P_n(q)
        qoi1_hat =  cp.fit_regression(Polynomials, nodes, solves1,rule=rgm_rule)
        qoi2_hat = cp.fit_regression(Polynomials, nodes, solves2,rule=rgm_rule)
        qoi3_hat =  cp.fit_regression(Polynomials, nodes, solves3,rule=rgm_rule)

       #output qoi metrics
        mean = cp.E(qoi1_hat, joint_KL)
        var = cp.Var(qoi1_hat, joint_KL)
        std = np.sqrt(var)
        cv = np.divide(np.array(std), np.array(mean))
        kurt = cp.Kurt(qoi1_hat,joint_KL)
        skew = cp.Skew(qoi1_hat,joint_KL)
        metrics = np.array([mean, std,var,cv,kurt,skew]).T
        rawdata = np.array(qoi1_hat(*joint_KL.sample(10**5))).T#np.array(solves1)
        prodata =  np.column_stack((qoi_names, metrics.astype(np.object)))
        #prodata[:,1].astype(float)
예제 #31
0
    def analyse(self, data_frame=None):
        """Perform PCE analysis on input `data_frame`.

        Parameters
        ----------
        data_frame : :obj:`pandas.DataFrame`
            Input data for analysis.

        Returns
        -------
        dict:
            Contains analysis results in sub-dicts with keys -
            ['statistical_moments', 'percentiles', 'sobol_indices',
             'correlation_matrices', 'output_distributions']
        """

        if data_frame is None:
            raise RuntimeError("Analysis element needs a data frame to "
                               "analyse")
        elif data_frame.empty:
            raise RuntimeError(
                "No data in data frame passed to analyse element")

        qoi_cols = self.qoi_cols

        results = {
            'statistical_moments': {},
            'percentiles': {},
            'sobols_first': {k: {}
                             for k in qoi_cols},
            'sobols_second': {k: {}
                              for k in qoi_cols},
            'sobols_total': {k: {}
                             for k in qoi_cols},
            'correlation_matrices': {},
            'output_distributions': {},
        }

        # Get the Polynomial
        P = self.sampler.P

        # Get the PCE variante to use (Regression or Projection)
        regression = self.sampler.regression

        # Compute nodes (and weights)
        if regression:
            nodes = cp.generate_samples(order=self.sampler.n_samples,
                                        domain=self.sampler.distribution,
                                        rule=self.sampler.rule)
        else:
            nodes, weights = cp.generate_quadrature(
                order=self.sampler.quad_order,
                dist=self.sampler.distribution,
                rule=self.sampler.rule,
                sparse=self.sampler.quad_sparse,
                growth=self.sampler.quad_growth)

        # Extract output values for each quantity of interest from Dataframe
        samples = {k: [] for k in qoi_cols}
        for run_id in data_frame.run_id.unique():
            for k in qoi_cols:
                data = data_frame.loc[data_frame['run_id'] == run_id][k]
                samples[k].append(data.values)

        # Compute descriptive statistics for each quantity of interest
        for k in qoi_cols:
            # Approximation solver
            if regression:
                if samples[k][0].dtype == object:
                    for i in range(self.sampler.count):
                        samples[k][i] = samples[k][i].astype("float64")
                fit = cp.fit_regression(P, nodes, samples[k], "T")
            else:
                fit = cp.fit_quadrature(P, nodes, weights, samples[k])

            # Statistical moments
            mean = cp.E(fit, self.sampler.distribution)
            var = cp.Var(fit, self.sampler.distribution)
            std = cp.Std(fit, self.sampler.distribution)
            results['statistical_moments'][k] = {
                'mean': mean,
                'var': var,
                'std': std
            }

            # Percentiles (Pxx)
            P10 = cp.Perc(fit, 10, self.sampler.distribution)
            P90 = cp.Perc(fit, 90, self.sampler.distribution)
            results['percentiles'][k] = {'p10': P10, 'p90': P90}

            # Sensitivity Analysis: First, Second and Total Sobol indices
            sobols_first_narr = cp.Sens_m(fit, self.sampler.distribution)
            sobols_second_narr = cp.Sens_m2(fit, self.sampler.distribution)
            sobols_total_narr = cp.Sens_t(fit, self.sampler.distribution)
            sobols_first_dict = {}
            sobols_second_dict = {}
            sobols_total_dict = {}
            ipar = 0
            i = 0
            for param_name in self.sampler.vary.get_keys():
                j = self.sampler.params_size[ipar]
                sobols_first_dict[param_name] = sobols_first_narr[i:i + j]
                sobols_second_dict[param_name] = sobols_second_narr[i:i + j]
                sobols_total_dict[param_name] = sobols_total_narr[i:i + j]
                i += j
                ipar += 1
            results['sobols_first'][k] = sobols_first_dict
            results['sobols_second'][k] = sobols_second_dict
            results['sobols_total'][k] = sobols_total_dict

            # Correlation matrix
            results['correlation_matrices'][k] = cp.Corr(
                fit, self.sampler.distribution)

            # Output distributions
            results['output_distributions'][k] = cp.QoI_Dist(
                fit, self.sampler.distribution)

        return results
Alpha = cp.Normal(1,0.1)
Beta = cp.Normal(0.1,0.01)
F = cp.Normal(0.02,0.001)
K = cp.Uniform(0.01,0.05)
Distributions = cp.J(Alpha,Beta,F,K)


maxT = 60.0
dT = 0.002
time = np.arange(0,maxT+dT,dT)

Order = 3
NoSamples = 40
Output = TurbData[:NoSamples,:,-1]
polynomial_expansion = cp.orth_ttr(Order, Distributions)
foo_approx = cp.fit_regression(polynomial_expansion, Samples[:,:NoSamples], Output[:,-1])
expected = cp.E(foo_approx, Distributions)
deviation = cp.Std(foo_approx, Distributions)
COV = cp.Cov(foo_approx, Distributions)
#Perc = cp.Perc(foo_approx,[5,95],Distributions)

f,axs = pp.subplots(figsize=(9, 6),nrows = 1,ncols = 1,sharex=True)
axs.plot(time[1:],expected,'k')
axs.fill_between(time[1:],expected+deviation,expected-deviation,color='k',alpha=0.25)
#axs.fill_between(time[1:],Perc[0],Perc[1],colour = 'k',alpha= 0.25)

pp.show()