def polynomial_chaos_sens(Ns_pc,
                          jpdf,
                          polynomial_order,
                          poly=None,
                          return_reg=False):
    N_terms = int(len(jpdf) / 2)
    # 1. generate orthogonal polynomials
    poly = poly or cp.orth_ttr(polynomial_order, jpdf)
    # 2. generate samples with random sampling
    samples_pc = jpdf.sample(size=Ns_pc, rule='R')
    # 3. evaluate the model, to do so transpose samples and hash input data
    transposed_samples = samples_pc.transpose()
    samples_z = transposed_samples[:, :N_terms]
    samples_w = transposed_samples[:, N_terms:]
    model_evaluations = linear_model(samples_w, samples_z)
    # 4. calculate generalized polynomial chaos expression
    gpce_regression = cp.fit_regression(poly, samples_pc, model_evaluations)
    # 5. get sensitivity indices
    Spc = cp.Sens_m(gpce_regression, jpdf)
    Stpc = cp.Sens_t(gpce_regression, jpdf)

    if return_reg:
        return Spc, Stpc, gpce_regression
    else:
        return Spc, Stpc
def calculate_uqsa_measures(joint_dist, polynomial, alpha=5):
    """ Use chaospy to calculate appropriate indices of uq and sa"""
    dists = joint_dist
    mean = cp.E(polynomial, dists)
    var = cp.Var(polynomial, dists)
    std = cp.Std(polynomial, dists)
    conInt = cp.Perc(polynomial, [alpha / 2., 100 - alpha / 2.], joint_dist)
    sens_m = cp.Sens_m(polynomial, dists)
    sens_m2 = cp.Sens_m2(polynomial, dists)
    sens_t = cp.Sens_t(polynomial, dists)
    return dict(mean=mean,
                var=var,
                std=std,
                conInt=conInt,
                sens_m=sens_m,
                sens_m2=sens_m2,
                sens_t=sens_t)
def calculate_sobol_indices(quad_deg_1D, poly_deg_1D, joint_distr, sparse_bool,
                            title_names):
    nodes, weights = cp.generate_quadrature(quad_deg_1D,
                                            joint_distr,
                                            rule='G',
                                            sparse=sparse_bool)
    c, k, f, y0, y1 = nodes

    poly = cp.orth_ttr(poly_deg_1D, joint_distr, normed=True)

    y_out = [
        discretize_oscillator_odeint(model, atol, rtol, (y0_, y1_),
                                     (c_, k_, f_, w), t)[-1]
        for c_, k_, f_, y0_, y1_ in zip(c, k, f, y0, y1)
    ]

    # find generalized Polynomial chaos and expansion coefficients
    gPC_m, expansion_coeff = cp.fit_quadrature(poly,
                                               nodes,
                                               weights,
                                               y_out,
                                               retall=True)
    #print(f'The best polynomial of degree {poly_deg_1D} that approximates f(x): {cp.around(gPC_m, 1)}')
    # gPC_m is the polynomial that approximates the most
    print(
        f'Expansion coeff [0] (mean) for poly {poly_deg_1D} = {expansion_coeff[0]}'
    )  # , expect_weights: {expect_y}')
    #mu = cp.E(gPC_m, joint_distr)
    #print(f'Mean value from gPCE: {mu}')

    # Sobol indices
    first_order_Sobol_ind = cp.Sens_m(gPC_m, joint_distr)
    total_Sobol_ind = cp.Sens_t(gPC_m, joint_distr)

    print("The number of quadrature nodes for the grid is", len(nodes.T))
    print(f'The first order Sobol indices are \n {first_order_Sobol_ind}')
    print(f"The total Sobol' indices are \n {total_Sobol_ind}")

    plot_sobol_indices(first_order_Sobol_ind, title_names[0], False)
    plot_sobol_indices(total_Sobol_ind, title_names[1], False)

    return first_order_Sobol_ind, total_Sobol_ind
Exemplo n.º 4
0
    def analyse(self, data_frame=None):
        """Perform PCE analysis on input `data_frame`.

        Parameters
        ----------
        data_frame : pandas DataFrame
            Input data for analysis.

        Returns
        -------
        PCEAnalysisResults
            Use it to get the sobol indices and other information.
        """
        def sobols(P, coefficients):
            """ Utility routine to calculate sobols based on coefficients
            """
            A = np.array(P.coefficients) != 0
            multi_indices = np.array(
                [P.exponents[A[:, i]].sum(axis=0) for i in range(A.shape[1])])
            sobol_mask = multi_indices != 0
            _, index = np.unique(sobol_mask, axis=0, return_index=True)
            index = np.sort(index)
            sobol_idx_bool = sobol_mask[index]
            sobol_idx_bool = np.delete(sobol_idx_bool, [0], axis=0)
            n_sobol_available = sobol_idx_bool.shape[0]
            if len(coefficients.shape) == 1:
                n_out = 1
            else:
                n_out = coefficients.shape[1]
            n_coeffs = coefficients.shape[0]
            sobol_poly_idx = np.zeros([n_coeffs, n_sobol_available])
            for i_sobol in range(n_sobol_available):
                sobol_poly_idx[:, i_sobol] = np.all(
                    sobol_mask == sobol_idx_bool[i_sobol], axis=1)
            sobol = np.zeros([n_sobol_available, n_out])
            for i_sobol in range(n_sobol_available):
                sobol[i_sobol] = np.sum(np.square(
                    coefficients[sobol_poly_idx[:, i_sobol] == 1]),
                                        axis=0)
            idx_sort_descend_1st = np.argsort(sobol[:, 0], axis=0)[::-1]
            sobol = sobol[idx_sort_descend_1st, :]
            sobol_idx_bool = sobol_idx_bool[idx_sort_descend_1st]
            sobol_idx = [0 for _ in range(sobol_idx_bool.shape[0])]
            for i_sobol in range(sobol_idx_bool.shape[0]):
                sobol_idx[i_sobol] = np.array(
                    [i for i, x in enumerate(sobol_idx_bool[i_sobol, :]) if x])
            var = ((coefficients[1:]**2).sum(axis=0))
            sobol = sobol / var
            return sobol, sobol_idx, sobol_idx_bool

        if data_frame is None:
            raise RuntimeError("Analysis element needs a data frame to "
                               "analyse")
        elif data_frame.empty:
            raise RuntimeError(
                "No data in data frame passed to analyse element")

        qoi_cols = self.qoi_cols

        results = {
            'statistical_moments': {},
            'percentiles': {},
            'sobols_first': {k: {}
                             for k in qoi_cols},
            'sobols_second': {k: {}
                              for k in qoi_cols},
            'sobols_total': {k: {}
                             for k in qoi_cols},
            'correlation_matrices': {},
            'output_distributions': {},
            'fit': {},
            'Fourier_coefficients': {},
        }

        # Get sampler informations
        P = self.sampler.P
        nodes = self.sampler._nodes
        weights = self.sampler._weights
        regression = self.sampler.regression

        # Extract output values for each quantity of interest from Dataframe
        #        samples = {k: [] for k in qoi_cols}
        #        for run_id in data_frame[('run_id', 0)].unique():
        #            for k in qoi_cols:
        #                data = data_frame.loc[data_frame[('run_id', 0)] == run_id][k]
        #                samples[k].append(data.values.flatten())

        samples = {k: [] for k in qoi_cols}
        for k in qoi_cols:
            samples[k] = data_frame[k].values

        # Compute descriptive statistics for each quantity of interest
        for k in qoi_cols:
            # Approximation solver
            if regression:
                fit, fc = cp.fit_regression(P, nodes, samples[k], retall=1)
            else:
                fit, fc = cp.fit_quadrature(P,
                                            nodes,
                                            weights,
                                            samples[k],
                                            retall=1)
            results['fit'][k] = fit
            results['Fourier_coefficients'][k] = fc

            # Percentiles: 1%, 10%, 50%, 90% and 99%
            P01, P10, P50, P90, P99 = cp.Perc(
                fit, [1, 10, 50, 90, 99], self.sampler.distribution).squeeze()
            results['percentiles'][k] = {
                'p01': P01,
                'p10': P10,
                'p50': P50,
                'p90': P90,
                'p99': P99
            }

            if self.sampling:  # use chaospy's sampling method

                # Statistical moments
                mean = cp.E(fit, self.sampler.distribution)
                var = cp.Var(fit, self.sampler.distribution)
                std = cp.Std(fit, self.sampler.distribution)
                results['statistical_moments'][k] = {
                    'mean': mean,
                    'var': var,
                    'std': std
                }

                # Sensitivity Analysis: First, Second and Total Sobol indices
                sobols_first_narr = cp.Sens_m(fit, self.sampler.distribution)
                sobols_second_narr = cp.Sens_m2(fit, self.sampler.distribution)
                sobols_total_narr = cp.Sens_t(fit, self.sampler.distribution)
                sobols_first_dict = {}
                sobols_second_dict = {}
                sobols_total_dict = {}
                for i, param_name in enumerate(self.sampler.vary.vary_dict):
                    sobols_first_dict[param_name] = sobols_first_narr[i]
                    sobols_second_dict[param_name] = sobols_second_narr[i]
                    sobols_total_dict[param_name] = sobols_total_narr[i]

                results['sobols_first'][k] = sobols_first_dict
                results['sobols_second'][k] = sobols_second_dict
                results['sobols_total'][k] = sobols_total_dict

            else:  # use PCE coefficients

                # Statistical moments
                mean = fc[0]
                var = np.sum(fc[1:]**2, axis=0)
                std = np.sqrt(var)
                results['statistical_moments'][k] = {
                    'mean': mean,
                    'var': var,
                    'std': std
                }

                # Sensitivity Analysis: First, Second and Total Sobol indices
                sobol, sobol_idx, _ = sobols(P, fc)
                varied = [_ for _ in self.sampler.vary.get_keys()]
                S1 = {_: np.zeros(sobol.shape[-1]) for _ in varied}
                ST = {_: np.zeros(sobol.shape[-1]) for _ in varied}
                #S2 = {_ : {__: np.zeros(sobol.shape[-1]) for __ in varied} for _ in varied}
                #for v in varied: del S2[v][v]
                S2 = {
                    _: np.zeros((len(varied), sobol.shape[-1]))
                    for _ in varied
                }
                for n, si in enumerate(sobol_idx):
                    if len(si) == 1:
                        v = varied[si[0]]
                        S1[v] = sobol[n]
                    elif len(si) == 2:
                        v1 = varied[si[0]]
                        v2 = varied[si[1]]
                        #S2[v1][v2] = sobol[n]
                        #S2[v2][v1] = sobol[n]
                        S2[v1][si[1]] = sobol[n]
                        S2[v2][si[0]] = sobol[n]
                    for i in si:
                        ST[varied[i]] += sobol[n]

                results['sobols_first'][k] = S1
                results['sobols_second'][k] = S2
                results['sobols_total'][k] = ST

            # Correlation matrix
            results['correlation_matrices'][k] = cp.Corr(
                fit, self.sampler.distribution)

            # Output distributions
            results['output_distributions'][k] = cp.QoI_Dist(
                fit, self.sampler.distribution)

        return PCEAnalysisResults(raw_data=results,
                                  samples=data_frame,
                                  qois=self.qoi_cols,
                                  inputs=list(self.sampler.vary.get_keys()))
Exemplo n.º 5
0
def gpc(dists, distsMeta, wallModel, order, hdf5group, sampleScheme='M'):
    print "\n GeneralizedPolynomialChaos - order {}\n".format(order)

    dim = len(dists)

    expansionOrder = order
    numberOfSamples = 4 * cp.terms(expansionOrder, dim)

    # Sample in independent space
    samples = dists.sample(numberOfSamples, sampleScheme).transpose()
    model = wallModel(distsMeta)

    # Evaluate the model (which is not linear obviously)
    pool = multiprocessing.Pool()
    data = pool.map(model, samples)
    pool.close()
    pool.join()
    C_data = [retval[0] for retval in data]
    a_data = [retval[1] for retval in data]

    C_data = np.array(C_data)
    a_data = np.array(a_data)
    # Orthogonal C_polynomial from marginals
    orthoPoly = cp.orth_ttr(expansionOrder, dists)

    for data, outputName in zip([C_data, a_data], ['Compliance', 'Area']):

        # Fit the model together in independent space
        C_polynomial = cp.fit_regression(orthoPoly, samples.transpose(), data)

        # save data to dictionary
        plotMeanConfidenceAlpha = 5

        C_mean = cp.E(C_polynomial, dists)
        C_std = cp.Std(C_polynomial, dists)

        Si = cp.Sens_m(C_polynomial, dists)
        STi = cp.Sens_t(C_polynomial, dists)

        C_conf = cp.Perc(
            C_polynomial,
            [plotMeanConfidenceAlpha / 2., 100 - plotMeanConfidenceAlpha / 2.],
            dists)

        a = np.linspace(0, 100, 1000)
        da = a[1] - a[0]
        C_cdf = cp.Perc(C_polynomial, a, dists)

        C_pdf = da / (C_cdf[1::] - C_cdf[0:-1])
        # Resample to generate full histogram
        samples2 = dists.sample(numberOfSamples * 100, sampleScheme)
        C_data2 = C_polynomial(*samples2).transpose()

        # save in hdf5 file
        solutionDataGroup = hdf5group.create_group(outputName)

        solutionData = {
            'mean': C_mean,
            'std': C_std,
            'confInt': C_conf,
            'Si': Si,
            'STi': STi,
            'cDataGPC': C_data,
            'samplesGPC': samples,
            'cData': C_data2,
            'samples': samples2.transpose(),
            'C_pdf': C_pdf
        }

        for variableName, variableValue in solutionData.iteritems():
            solutionDataGroup.create_dataset(variableName, data=variableValue)
Exemplo n.º 6
0
    def test_circuit_model_order_2(self, order_cp: int = 2, bool_plot: bool = False):
        dim = 6
        key = ["R_b1", "R_b2", "R_f", "R_c1", "R_c2", "beta"]
        sobol_indices_quad_constantine = np.array(
            [5.0014515064e-01, 4.1167859899e-01, 7.4006053045e-02, 2.1802568214e-02, 5.1736552010e-08,
             1.4938996627e-05])

        M_constantine = np.array([50, 1e2, 5e2, 1e3, 5e3, 1e4, 5e4])
        sobol_indices_error_constantine = np.transpose(np.array(
            [[6.1114622870e-01, 2.7036543475e-01, 1.5466638009e-01, 1.2812367577e-01, 5.0229955234e-02,
              3.5420048253e-02, 1.4486328386e-02],
             [6.0074404490e-01, 3.2024096457e-01, 1.2296426366e-01, 9.6725945246e-02, 5.3143328175e-02,
              3.2748864016e-02, 1.1486316472e-02],
             [1.1789694228e-01, 4.6150927239e-02, 2.6268692965e-02, 1.8450563871e-02, 8.3656592318e-03,
              5.8550974309e-03, 2.8208921925e-03],
             [3.8013619286e-02, 1.6186288112e-02, 8.9893920304e-03, 6.3911249578e-03, 2.6219049423e-03,
              1.9215077698e-03, 9.5390224479e-04],
             [1.2340746448e-07, 4.8204289233e-08, 3.0780845307e-08, 2.5240466147e-08, 1.0551377101e-08,
              6.9506139894e-09, 3.3372151408e-09],
             [3.4241277775e-05, 1.8074628532e-05, 7.1554659714e-06, 5.0303467614e-06, 2.7593313990e-06,
              1.9529470403e-06, 7.2840043686e-07]]))

        x_lower = np.array([50, 25, 0.5, 1.2, 0.25, 50])  # table 3, constantine-2017
        x_upper = np.array([150, 70, 3.0, 2.5, 1.2, 300])  # table 3, constantine-2017

        circuit_model = CircuitModel()
        n_samples = M_constantine
        iN_vec = n_samples.astype(int)  # 8 if calc_second_order = False, else 14

        no_runs = np.zeros(len(iN_vec))
        indices = np.zeros(shape=(len(iN_vec), dim))
        indices_error = np.zeros(shape=(len(iN_vec), dim))
        idx = 0
        n_trials = 1
        no_runs_averaged = 1

        dist = cp.J(cp.Uniform(x_lower[0], x_upper[0]), cp.Uniform(x_lower[1], x_upper[1]),
                    cp.Uniform(x_lower[2], x_upper[2]), cp.Uniform(x_lower[3], x_upper[3]),
                    cp.Uniform(x_lower[4], x_upper[4]), cp.Uniform(x_lower[5], x_upper[5]))

        for iN in iN_vec:
            tmp_indices_error_av = np.zeros(dim)
            for i_trial in range(0, n_trials):
                seed = int(np.random.rand(1) * 2 ** 32 - 1)
                random_state = RandomState(seed)

                # https://github.com/jonathf/chaospy/issues/81

                dist_samples = dist.sample(iN)  # random samples or abscissas of polynomials ?
                values_f, _, _ = circuit_model.eval_model_averaged(dist_samples, no_runs_averaged,
                                                                   random_state=random_state)
                # Approximation with Chaospy
                poly = cp.orth_ttr(order_cp, dist)
                approx_model = cp.fit_regression(poly, dist_samples, values_f)
                tmp_indices_total = cp.Sens_t(approx_model, dist)

                tmp_error = relative_error_constantine_2017(tmp_indices_total, sobol_indices_quad_constantine)
                tmp_indices_error_av = tmp_indices_error_av + tmp_error
                print(iN)

            indices_error[idx, :] = tmp_indices_error_av / n_trials
            indices[idx, :] = tmp_indices_total
            no_runs[idx] = iN
            idx = idx + 1

        if bool_plot:
            col = np.array(
                [[0, 0.4470, 0.7410], [0.8500, 0.3250, 0.0980], [0.9290, 0.6940, 0.1250], [0.4940, 0.1840, 0.5560],
                 [0.4660, 0.6740, 0.1880], [0.3010, 0.7450, 0.9330]])

            plt.figure()
            for i in range(0, dim):
                plt.semilogx(no_runs, indices[:, i], '.--', label='%s (SALib)' % key[i], color=col[i, :])
                plt.semilogx([no_runs[0], max(no_runs)], sobol_indices_quad_constantine[i] * np.ones(2), 'k:',
                             label='Reference values', color=col[i, :])

            plt.xlabel('Number of samples')
            plt.ylabel('Sobol\' total indices')
            plt.legend()

            plt.figure()
            for i in range(0, dim):
                plt.loglog(no_runs, indices_error[:, i], '.--', label=key[i]+'(PC Approximation)', color=col[i, :])
                plt.loglog(M_constantine, sobol_indices_error_constantine[:, i], '.k:', color=col[i, :])

            plt.xlabel('Number of samples')
            plt.ylabel('Relative error (Sobol\' total indices)')
            plt.grid(True, 'minor', 'both')

            plt.legend()
            plt.show()

        # assure that it ran
        assert(True, True)
for j, n in enumerate(nodes_full.T):
    # each n is a vector with 5 components
    # n[0] = c, n[1] = k, c[2] = f, n[4] = y0, n[5] = y1
    init_cond = n[3], n[4]
    args = n[0], n[1], n[2], w
    sol_odeint_full[j] = discretize_oscillator_odeint(model, atol, rtol,
                                                      init_cond, args, t,
                                                      t_interest)[-1]

# obtain the gpc approximation
sol_gpc_full_approx = cp.fit_quadrature(P, nodes_full, weights_full,
                                        sol_odeint_full)

# compute first order and total Sobol' indices
first_order_Sobol_ind_full = cp.Sens_m(sol_gpc_full_approx, distr_5D)
total_Sobol_ind_full = cp.Sens_t(sol_gpc_full_approx, distr_5D)
##################################################################

#################### full grid computations #####################
# get the sparse quadrature nodes and weight
nodes_sparse, weights_sparse = cp.generate_quadrature(quad_deg_1D,
                                                      distr_5D,
                                                      rule='G',
                                                      sparse=True)
# create vector to save the solution
sol_odeint_sparse = np.zeros(len(nodes_sparse.T))

# perform sparse pseudo-spectral approximation
for j, n in enumerate(nodes_sparse.T):
    # each n is a vector with 5 components
    # n[0] = c, n[1] = k, c[2] = f, n[4] = y0, n[5] = y1
Exemplo n.º 8
0
    def analyse(self, data_frame=None):
        """Perform PCE analysis on input `data_frame`.

        Parameters
        ----------
        data_frame : :obj:`pandas.DataFrame`
            Input data for analysis.

        Returns
        -------
        dict:
            Contains analysis results in sub-dicts with keys -
            ['statistical_moments', 'percentiles', 'sobol_indices',
             'correlation_matrices', 'output_distributions']
        """

        if data_frame is None:
            raise RuntimeError("Analysis element needs a data frame to "
                               "analyse")
        elif data_frame.empty:
            raise RuntimeError(
                "No data in data frame passed to analyse element")

        qoi_cols = self.qoi_cols

        results = {
            'statistical_moments': {},
            'percentiles': {},
            'sobols_first': {k: {}
                             for k in qoi_cols},
            'sobols_second': {k: {}
                              for k in qoi_cols},
            'sobols_total': {k: {}
                             for k in qoi_cols},
            'correlation_matrices': {},
            'output_distributions': {},
        }

        # Get the Polynomial
        P = self.sampler.P

        # Get the PCE variante to use (Regression or Projection)
        regression = self.sampler.regression

        # Compute nodes (and weights)
        if regression:
            nodes = cp.generate_samples(order=self.sampler.n_samples,
                                        domain=self.sampler.distribution,
                                        rule=self.sampler.rule)
        else:
            nodes, weights = cp.generate_quadrature(
                order=self.sampler.quad_order,
                dist=self.sampler.distribution,
                rule=self.sampler.rule,
                sparse=self.sampler.quad_sparse,
                growth=self.sampler.quad_growth)

        # Extract output values for each quantity of interest from Dataframe
        samples = {k: [] for k in qoi_cols}
        for run_id in data_frame.run_id.unique():
            for k in qoi_cols:
                data = data_frame.loc[data_frame['run_id'] == run_id][k]
                samples[k].append(data.values)

        # Compute descriptive statistics for each quantity of interest
        for k in qoi_cols:
            # Approximation solver
            if regression:
                if samples[k][0].dtype == object:
                    for i in range(self.sampler.count):
                        samples[k][i] = samples[k][i].astype("float64")
                fit = cp.fit_regression(P, nodes, samples[k], "T")
            else:
                fit = cp.fit_quadrature(P, nodes, weights, samples[k])

            # Statistical moments
            mean = cp.E(fit, self.sampler.distribution)
            var = cp.Var(fit, self.sampler.distribution)
            std = cp.Std(fit, self.sampler.distribution)
            results['statistical_moments'][k] = {
                'mean': mean,
                'var': var,
                'std': std
            }

            # Percentiles (Pxx)
            P10 = cp.Perc(fit, 10, self.sampler.distribution)
            P90 = cp.Perc(fit, 90, self.sampler.distribution)
            results['percentiles'][k] = {'p10': P10, 'p90': P90}

            # Sensitivity Analysis: First, Second and Total Sobol indices
            sobols_first_narr = cp.Sens_m(fit, self.sampler.distribution)
            sobols_second_narr = cp.Sens_m2(fit, self.sampler.distribution)
            sobols_total_narr = cp.Sens_t(fit, self.sampler.distribution)
            sobols_first_dict = {}
            sobols_second_dict = {}
            sobols_total_dict = {}
            ipar = 0
            i = 0
            for param_name in self.sampler.vary.get_keys():
                j = self.sampler.params_size[ipar]
                sobols_first_dict[param_name] = sobols_first_narr[i:i + j]
                sobols_second_dict[param_name] = sobols_second_narr[i:i + j]
                sobols_total_dict[param_name] = sobols_total_narr[i:i + j]
                i += j
                ipar += 1
            results['sobols_first'][k] = sobols_first_dict
            results['sobols_second'][k] = sobols_second_dict
            results['sobols_total'][k] = sobols_total_dict

            # Correlation matrix
            results['correlation_matrices'][k] = cp.Corr(
                fit, self.sampler.distribution)

            # Output distributions
            results['output_distributions'][k] = cp.QoI_Dist(
                fit, self.sampler.distribution)

        return results
        prodata =  np.column_stack((qoi_names, metrics.astype(np.object)))
        #prodata[:,1].astype(float)


        # saving qoi results
        txt_file.write("Pressure (kPa) = %f \n" % pressure)
        np.savetxt(txt_file, prodata, fmt=['%s','%.8f','%.8f','%.8f','%.8f','%.8f','%.8f'], delimiter =  "  ",header = "qoi, mean, stdv, vari, cova, kurt, skew")
        txt_file.write("\n")
        txt_file.write("\n")
        #np.savetxt(txt_file, metrics,fmt=['%.5f','%.5f', '%.5f','%.5f','%.5f','%.5f'], header = "mean, stdv, vari, cova, kurt, skew")
        # saving qoi raw data
        np.savetxt(hist_file, rawdata, fmt=['%.10f','%.10f', '%.10f','%.10f','%.10f','%.10f'], delimiter = " ", header = "dVol, dLen, dRen, dRep, dThi, dTwi")

        # output sensitivity index 
        sai1 = cp.Sens_m(qoi1_hat,joint_KL)
        sait = cp.Sens_t(qoi1_hat,joint_KL)
        # save sensitivity index 
        sai_file.write("Pressure (kPa) = %f \n" % pressure)
        sai_file.write("First order Sensitivity index \n" )
        #np.savetxt(sai_file, sai1.T, fmt=['%.5f','%.5f', '%.5f','%.5f','%.5f','%.5f'], header = " b_ff, b_xx, b_fx, C, K, fr_epi")
        #np.savetxt(sai_file, sai1.T, fmt=['%.5f','%.5f', '%.5f','%.5f','%.5f','%.5f','%.5f','%.5f','%.5f'], header = " b_ff, b_xx, b_fx, C, K, fr_epi, fr_endo, sr_epi, sr_endo")

        sai_file.write("\n")
        sai_file.write("Total-effect index \n" )
        #np.savetxt(sai_file, sait.T, fmt=['%.5f','%.5f', '%.5f','%.5f','%.5f','%.5f'], header = " b_ff, b_xx, b_fx, C, K, fr_epi")
        #np.savetxt(sai_file, sait.T, fmt=['%.5f','%.5f', '%.5f','%.5f','%.5f','%.5f','%.5f','%.5f','%.5f'], header = " b_ff, b_xx, b_fx, C, K, fr_epi,fr_endo, sr_epi, sr_endo")

        sai_file.write("\n")
        sai_file.write("\n")

        #output displacement/stress metrics
std_reg = cp.Std(gpce_regression, joint_distribution)
str_ps = cp.Std(gpce_quadrature, joint_distribution)

prediction_interval_reg = cp.Perc(gpce_regression, [5, 95], joint_distribution)
prediction_interval_ps = cp.Perc(gpce_quadrature, [5, 95], joint_distribution)

print("Expected values   Standard deviation            90 % Prediction intervals\n")
print(' E_reg |  E_ps     std_reg |  std_ps                pred_reg |  pred_ps')
print('  {} | {}       {:>6.3f} | {:>6.3f}       {} | {}'.format(exp_reg,
                                                                  exp_ps,
                                                                  std_reg,
                                                                  str_ps,
                                                                  ["{:.3f}".format(p) for p in prediction_interval_reg],
                                                                  ["{:.3f}".format(p) for p in prediction_interval_ps]))
# end example uq

# example sens
sensFirst_reg = cp.Sens_m(gpce_regression, joint_distribution)
sensFirst_ps = cp.Sens_m(gpce_quadrature, joint_distribution)

sensT_reg = cp.Sens_t(gpce_regression, joint_distribution)
sensT_ps = cp.Sens_t(gpce_quadrature, joint_distribution)

print("First Order Indices           Total Sensitivity Indices\n")
print('       S_reg |  S_ps                 ST_reg |  ST_ps  \n')
for k, (s_reg, s_ps, st_reg, st_ps) in enumerate(zip(sensFirst_reg, sensFirst_ps, sensT_reg, sensT_ps)):
    print('S_{} : {:>6.3f} | {:>6.3f}         ST_{} : {:>6.3f} | {:>6.3f}'.format(k, s_reg, s_ps, k, st_reg, st_ps))
# end example sens

Exemplo n.º 11
0
plot(time, u(120,36,0.3), "y",linewidth=2 )
plot(time, cp.E(U_hat,dist),"b",linewidth=2)
#plot(time, cp.Var(U_hat,dist))

plot(time, p_10,"r",linewidth=1)
fill_between(time, p_10,p_90,alpha=0.25)
plot(time, p_90,"g",linewidth=1)


title('Hodgkin-Huxley model for the action potential')
ylabel('Membrane Potential [mV]')
xlabel('Time [ms]')
xlim([0,T])
#ylim([-35,110])
#legend(["Known parameters", "Uncertain parameters","10 percentile", "90 percentile"])
#rc("figure",figsize=[6,4])
savefig("potential.png")


S_Ti = cp.Sens_t(U_hat, dist)

figure()
plot(time, S_Ti[0],linewidth=2)
plot(time, S_Ti[1],linewidth=2)
plot(time, S_Ti[2],linewidth=2)
xlabel("Time")
ylabel("Sensitivity")
legend(["gbar_Na","gbar_K","gbar_l"])

show()
Exemplo n.º 12
0
    def analyse(self, data_frame=None):
        """Perform PCE analysis on input `data_frame`.

        Parameters
        ----------
        data_frame : :obj:`pandas.DataFrame`
            Input data for analysis.

        Returns
        -------
        dict:
            Contains analysis results in sub-dicts with keys -
            ['statistical_moments', 'percentiles', 'sobol_indices',
             'correlation_matrices', 'output_distributions']
        """

        if data_frame is None:
            raise RuntimeError("Analysis element needs a data frame to "
                               "analyse")
        elif data_frame.empty:
            raise RuntimeError(
                "No data in data frame passed to analyse element")

        qoi_cols = self.qoi_cols

        results = {
            'statistical_moments': {},
            'percentiles': {},
            'sobols_first': {k: {}
                             for k in qoi_cols},
            'sobols_second': {k: {}
                              for k in qoi_cols},
            'sobols_total': {k: {}
                             for k in qoi_cols},
            'correlation_matrices': {},
            'output_distributions': {},
        }

        # Get sampler informations
        P = self.sampler.P
        nodes = self.sampler._nodes
        weights = self.sampler._weights
        regression = self.sampler.regression

        # Extract output values for each quantity of interest from Dataframe
        samples = {k: [] for k in qoi_cols}
        for run_id in data_frame[('run_id', 0)].unique():
            for k in qoi_cols:
                data = data_frame.loc[data_frame[('run_id', 0)] == run_id][k]
                samples[k].append(data.values.flatten())

        # Compute descriptive statistics for each quantity of interest
        for k in qoi_cols:
            # Approximation solver
            if regression:
                fit = cp.fit_regression(P, nodes, samples[k])
            else:
                fit = cp.fit_quadrature(P, nodes, weights, samples[k])

            # Statistical moments
            mean = cp.E(fit, self.sampler.distribution)
            var = cp.Var(fit, self.sampler.distribution)
            std = cp.Std(fit, self.sampler.distribution)
            results['statistical_moments'][k] = {
                'mean': mean,
                'var': var,
                'std': std
            }

            # Percentiles: 10% and 90%
            P10 = cp.Perc(fit, 10, self.sampler.distribution)
            P90 = cp.Perc(fit, 90, self.sampler.distribution)
            results['percentiles'][k] = {'p10': P10, 'p90': P90}

            # Sensitivity Analysis: First, Second and Total Sobol indices
            sobols_first_narr = cp.Sens_m(fit, self.sampler.distribution)
            sobols_second_narr = cp.Sens_m2(fit, self.sampler.distribution)
            sobols_total_narr = cp.Sens_t(fit, self.sampler.distribution)
            sobols_first_dict = {}
            sobols_second_dict = {}
            sobols_total_dict = {}
            for i, param_name in enumerate(self.sampler.vary.vary_dict):
                sobols_first_dict[param_name] = sobols_first_narr[i]
                sobols_second_dict[param_name] = sobols_second_narr[i]
                sobols_total_dict[param_name] = sobols_total_narr[i]

            results['sobols_first'][k] = sobols_first_dict
            results['sobols_second'][k] = sobols_second_dict
            results['sobols_total'][k] = sobols_total_dict

            # Correlation matrix
            results['correlation_matrices'][k] = cp.Corr(
                fit, self.sampler.distribution)

            # Output distributions
            results['output_distributions'][k] = cp.QoI_Dist(
                fit, self.sampler.distribution)

        return PCEAnalysisResults(raw_data=results,
                                  samples=data_frame,
                                  qois=self.qoi_cols,
                                  inputs=list(self.sampler.vary.get_keys()))
Exemplo n.º 13
0
    # calculate statistics
    plotMeanConfidenceAlpha = 5
    expected_value = cp.E(polynomial_expansion, jpdf)
    variance = cp.Var(polynomial_expansion, jpdf)
    standard_deviation = cp.Std(polynomial_expansion, jpdf)
    prediction_interval = cp.Perc(
        polynomial_expansion,
        [plotMeanConfidenceAlpha / 2., 100 - plotMeanConfidenceAlpha / 2.],
        jpdf)
    print('{:2.5f} | {:2.5f} : {}'.format(
        np.mean(expected_value) * unit_m2_cm2,
        np.mean(std) * unit_m2_cm2, name))

    # compute sensitivity indices
    S = cp.Sens_m(polynomial_expansion, jpdf)
    ST = cp.Sens_t(polynomial_expansion, jpdf)

    plt.figure('mean')
    plt.plot(pressure_range * unit_pa_mmhg,
             expected_value * unit_m2_cm2,
             label=name,
             color=color)
    plt.fill_between(pressure_range * unit_pa_mmhg,
                     prediction_interval[0] * unit_m2_cm2,
                     prediction_interval[1] * unit_m2_cm2,
                     alpha=0.3,
                     color=color)
    plt.xlabel('Pressure [mmHg]')
    plt.ylabel('Area [cm2]')
    plt.legend()
    plt.tight_layout()