Exemple #1
0
def UP_MPP(problem, driver):
    # Uses the MPP method for UP    
    
    # This routine has been updated as part of refactoring code before the port
    # from MATLAB to Python/NumPy/SciPy.  Sections of PCC_Computation that apply
    # this method have been moved here.
    
    # ----------------------  Setup  ---------------------------
    methd = 'MPP'
    method = '3'
    
    delta   = driver.MPPdelta
    mu      = [inp.get_I_mu() for inp in driver.inputs]
    sigma   = [inp.get_I_sigma() for inp in driver.inputs]
    inpt    = len(driver.inputs)
    input   = driver.inputNames
    krig    = driver.krig
    limstate= driver.limstate
    lrflag  = driver.lrflag
    n_meta  = driver.n_meta
    nEFAST  = driver.nEFAST
    nSOBOL  = driver.nSOBOL
    nMCS    = driver.nMCS
    numbins = driver.numbins
    nodes   = driver.nodes
    order   = driver.order
    otpt    = len(driver.outputNames)
    output  = driver.outputNames
    p       = driver.p
    plotf   = 0
    r       = driver.r
    simple  = driver.simple
    stvars  = driver.stvars
    
    # ----------------------  Model  ---------------------------

    G_temp = lambda x: run_list(problem, driver, x)
    Tinv = lambda x: Dist.Dist(stvars, x, inpt)
   
    #USING THE HASOFER-LIND ALGORITHM  
    alpha = zeros((otpt, inpt))
    Xl = zeros((otpt, inpt))
    Xu = zeros((otpt, inpt))
    beta1 = zeros(otpt)
    beta2 = zeros(otpt)
    if otpt>1:
        PCC = [0]*(otpt+1)
    else:
        PCC = [0]*otpt
    for k in range(otpt):
        print 'Testing output',k+1,'of',otpt    
        if krig == 1:        
            load("dmodel")        
            if limstate[k][1] == inf:            
                cdist = 1            
                G = lambda x: limstate[k][0]- predictor(x, dmodel)            
            elif limstate[k][0] == -inf:            
                cdist = 2            
                G = lambda x: predictor(x, dmodel) - limstate[k][1]            
            else:            
                cdist = 3            
                G1 = lambda x: limstate[k][0] - predictor(x, dmodel)
                G2 = lambda x: predictor(x, dmodel) - limstate[k][2]
        else:
            if limstate[k][1] == inf:            
                cdist = 1            
                G = lambda x: limstate[k][0] - G_temp(x)            
            elif limstate[k][0] == -inf:            
                cdist = 2            
                G = lambda x: G_temp(x) - limstate[k][1]           
            else:            
                cdist = 3            
                G1 = lambda x: limstate[k][0] - G_temp(x)            
                G2 = lambda x: G_temp(x) - limstate[k][1]

        I_sigma=ones((inpt));
        if cdist == 1 or cdist == 2:        
            u = zeros((inpt))        
            diff = 1.0
            while diff > .005:
                uk, alp = Hasofer.Hasofer(G, u, Tinv, k, delta, I_sigma, inpt, otpt)
                diff = abs(linalg.norm(uk) - linalg.norm(u))            
                u = uk
                print 'X =', Tinv(u)
            beta1[k] = linalg.norm(u)        
            beta2[k] = inf
            if cdist == 1:            
                Xl[k] = Tinv(u)            
                Xu[k] = ones((inpt)) * inf            
                alpha[k] = alp            
            else:            
                Xl[k] = ones((inpt)) * -inf            
                Xu[k] = Tinv(u)            
                alpha[k] = -alp
            PCC[k] = norm.cdf(beta1[k])
            alpha[k] = alp        
        else:        
            u = zeros((inpt))        
            diff = 1.0
            try:
                while diff > .005:
                    uk, alp = Hasofer.Hasofer(G1, u, Tinv, k, delta, I_sigma, inpt, otpt)        
                    diff = abs(linalg.norm(uk) - linalg.norm(u))
                    u = uk
                    print 'X =', Tinv(u)
                beta1[k] = linalg.norm(u)        
                Xl[k] = Tinv(u)
            except ValueError:
                beta1[k] = inf        
                Xl[k] = nan
            u = zeros((inpt))        
            diff = 1.0
            try:        
                while diff > .005:
                    uk, alp = Hasofer.Hasofer(G2, u, Tinv, k, delta, I_sigma, inpt, otpt)            
                    diff = abs(linalg.norm(uk) - linalg.norm(u))            
                    u = uk
                    print 'X =', Tinv(u)
                beta2[k] = linalg.norm(u) 
                Xu[k] = Tinv(u)
                alpha[k] = -alp
            except ValueError:
                beta2[k] = inf  
                Xu[k] = nan
                alpha[k] = nan 
            PCC[k] = norm.cdf(beta2[k]) - norm.cdf(-beta1[k])
    
    corr_mat = ones((otpt,otpt))
    for j in range(otpt):    
        for k in range(j,otpt):  
            corr_mat[j, k] = matrix(alpha[j]) * matrix(alpha[k]).transpose()        
            corr_mat[k, j] = corr_mat[j, k]   
     
    # ----------------------  Analyze  ---------------------------
    
    # There's not much to analyze; that already happened.  Just print out the results

    if otpt > 1 and not 0 in PCC[0:otpt]: 
        PCC[otpt] = mvstdnormcdf(-beta1, beta2, corr_mat)  

    # requested feature: replace all "NaNs" with "None"
    Xu=where(isnan(Xu), None, Xu)
    Xl=where(isnan(Xl), None, Xl)
    corr_mat=where(isnan(corr_mat), None, corr_mat)
    
    Results = {'MPPUpperBound':Xu ,'MPPLowerBound':Xl,'CorrelationMatrix': corr_mat, 'PCC': PCC}
    
    return Results
Exemple #2
0
def UP_DPCE(driver):
    # Uses the Dakota PCE method for UP

    # ----------------------  Setup  ---------------------------
    methd = 'DPCE'
    method = 6

    inpt = len(driver.inputs)
    krig = driver.krig
    limstate = driver.limstate
    order = driver.order
    otpt = len(driver.outputNames)
    output = driver.outputNames

# preprocess inputs, as Dakota won't let us specify them one at a time, only in groups according to distribution type
    norm = []
    lnorm = []
    beta = []
    unif = []
    for idx, stvar in enumerate(driver.stvars):
        if stvar.dist == 'NORM':
            norm.append(idx)
        elif stvar.dist == 'LNORM':
            lnorm.append(idx)
        elif stvar.dist == 'BETA':
            beta.append(idx)
        elif stvar.dist == 'UNIF':
            unif.append(idx)

    if len(driver.Wrapper) == 0:
        raise Exception('Must specify a path to the model wrapper file.')

    OldDir = os.getcwd()
    WorkDir = os.path.dirname(driver.Wrapper)
    if WorkDir != "":
        os.chdir(WorkDir)

    with open('parameters.csv', 'w') as f:  # write log file of inputs and outputs
        f.write(','.join(driver.inputNames)+','+','.join(driver.outputNames)+'\n')
        f.close()

    f = open('dakota_pce.in', 'w')
    f.write('strategy\n')  # look at dakota input summary
    f.write('    single_method\n')  # graphics
    f.write('method\n')
    f.write('    polynomial_chaos\n')
    f.write('        quadrature_order')
    for node in driver.nodes:
        f.write(' {0}'.format(node))
    f.write('\n        variance_based_decomp\n')  # univariate_effects
#    f.write('        num_response_levels =')
#    for x in range(otpt):
#        f.write(' 2')
#    f.write('\n')
    f.write('        response_levels')
    for limits in driver.limstate:
        f.write(' {0} {1}'.format(limits[0], limits[1]))
    f.write('\n')
#    f.write('        compute reliabilities\n') #default is probabilities
    f.write('        rng\n')
    f.write('            rnum2\n')
    f.write('        samples 10000\n')
#    f.write('        seed 12347\n')
    f.write('variables')

    if len(norm) > 0:
        f.write('\n    normal_uncertain {0}\n'.format(len(norm)))
#            v[j] = norm.ppf(norm.cdf(value[j], 0, 1), stvars[j].param[0], stvars[j].param[1])
        f.write('        means')
        for idx in norm:
            f.write(' {0}'.format(driver.stvars[idx].param[0]))
        f.write('\n        std_deviations')
        for idx in norm:
            f.write(' {0}'.format(driver.stvars[idx].param[1]))
        f.write('\n        descriptors')
        for idx in norm:
            f.write(' \'{0}\''.format(driver.stvars[idx].name))
    if len(lnorm) > 0:
        f.write('\n    lognormal_uncertain {0}\n'.format(len(lnorm)))
#            v[j] = lognorm.ppf(norm.cdf(value[j], 0, 1), stvars[j].param[1], 0, exp(stvars[j].param[0]))
        f.write('        lnuv_means')
        for idx in lnorm:
            f.write(' {0}'.format(driver.stvars[idx].param[1]))
        f.write('\n        lnuv_std_deviations')
        for idx in lnorm:
            f.write(' {0}'.format(exp(driver.stvars[idx].param[0])))
        f.write('\n        lnuv_descriptors')
        for idx in lnorm:
            f.write(' \'{0}\''.format(driver.stvars[idx].name))
    if len(beta) > 0:
        f.write('\n    beta_uncertain {0}\n'.format(len(beta)))
#            v[j] = beta.ppf(norm.cdf(value[j], 0, 1), stvars[j].param[0], stvars[j].param[1], stvars[j].param[2], stvars[j].param[3] - stvars[j].param[2])
        f.write('        alphas')
        for idx in beta:
            f.write(' {0}'.format(driver.stvars[idx].param[0]))
        f.write('\n        betas')
        for idx in beta:
            f.write(' {0}'.format(driver.stvars[idx].param[1]))
        f.write('\n        lower_bounds')
        for idx in beta:
            f.write(' {0}'.format(driver.stvars[idx].param[2]))
        f.write('\n        upper_bounds')
        for idx in beta:
            f.write(' {0}'.format(driver.stvars[idx].param[3]))
        f.write('\n        descriptors')
        for idx in beta:
            f.write(' \'{0}\''.format(driver.stvars[idx].name))
    if len(unif) > 0:
        f.write('\n    uniform_uncertain {0}\n'.format(len(unif)))
        f.write('        lower_bounds')
        for idx in unif:
            f.write(' {0}'.format(driver.stvars[idx].param[0]))
        f.write('\n        upper_bounds')
        for idx in unif:
            f.write(' {0}'.format(driver.stvars[idx].param[1]))
        f.write('\n        descriptors')
        for idx in unif:
            f.write(' \'{0}\''.format(driver.stvars[idx].name))

    f.write('\ninterface\n')
#    f.write('    fork\n')
    f.write('    fork asynchronous evaluation_concurrency = {0}\n'.format(multiprocessing.cpu_count()))
#    f.write('        analysis_drivers \'python {0}\'\n'.format(driver.workflow.__iter__().next().getFile()))
    f.write('        analysis_drivers \'python {0}\'\n'.format(driver.Wrapper))
    f.write('        parameters_file =\'params.in\'\n')
    f.write('        results_file =\'results.out\'\n')
    f.write('        work_directory\n')
    f.write('        local_evaluation_static_scheduling\n')
    f.write('        directory_tag\n')
    f.write('        copy\n')
    f.write('        template_directory =\'{0}\'\n'.format(os.path.dirname(driver.Wrapper)))
    f.write('responses\n')
    f.write('    response_functions {0}\n'.format(otpt))  # number of outputs
    f.write('    no_gradients\n')  # leave as-is?
    f.write('    no_hessians\n')  # leave as-is?
    f.close()

    command = 'dakota dakota_pce.in | tee dakota_output.txt'
    print 'Calling "{0}" as a subprocess.'.format(command)
    return_code = subprocess.call(command, shell=True)

    f = open('dakota_output.txt', 'r')
    dakota_output = f.read().split()
    f.close()

    os.chdir(OldDir)

    G_mean = zeros(otpt)
    G_kurt = zeros(otpt)
    G_skew = zeros(otpt)

    fn_start = dakota_output.index('Mean') + 5  # index of response_fn_1
    for out in range(otpt):
        G_mean[out] = float(dakota_output[fn_start+2])
        G_skew[out] = float(dakota_output[fn_start+7])
        G_kurt[out] = float(dakota_output[fn_start+8])+3
        fn_start = fn_start + 9  # go to next response function, if any

    DPCC = [0]*otpt
    fn_start = dakota_output.index('(CDF)')  # index of CDF for response_fn_1
    for out in range(otpt):
        DPCC[out] = float(dakota_output[fn_start+19]) - float(dakota_output[fn_start+17])
        fn_start = fn_start + 23  # go to next response function, if any
    print 'Dakota PCCs:', DPCC

    CovarianceMatrix = zeros((otpt, otpt))
    covpos = dakota_output.index('[[')+1
    for y in range(otpt):
        for x in range(otpt):
            CovarianceMatrix[x,y] = float(dakota_output[covpos])
            covpos = covpos + 1

    Moments = {'Mean': G_mean, 'Variance': diag(CovarianceMatrix), 'Skewness': G_skew, 'Kurtosis': G_kurt}

    # ----------------------  Analyze  ---------------------------

    if any(Moments['Variance']==0):
        print "Warning: One or more outputs does not vary over given parameter variation."

    # Calculate the PCC for the FFNI method
    if otpt>1:
        PCC = [0]*(otpt+1)
    else:
        PCC = [0]*otpt
    dtype = [0]*otpt
    Inv1 = [0]*otpt
    Inv2 = [0]*otpt
    m1 = [0]*otpt
    m2 = [0]*otpt
    a1 = [0]*otpt
    a2 = [0]*otpt
    alph = [0]*otpt
    beta = [0]*otpt
    lo = [0]*otpt
    hi = [0]*otpt
    C_Y_pdf = [0]*otpt

    if any(Moments['Variance']==0):
        print "Warning: One or more outputs does not vary over given parameter variation."

    for k in range(otpt):
        PCC[k],dtype[k],Inv1[k],m1[k],m2[k],a1[k],a2[k],alph[k],beta[k],lo[k],hi[k] =\
          pearscdf.pearscdf(limstate[k], Moments['Mean'][k], sqrt(CovarianceMatrix[k, k]), Moments['Skewness'][k], Moments['Kurtosis'][k], methd, k, output)
        if dtype[k] != None:
            if iscomplex(a1[k]):
                a1[k] = [a1[k].real, a1[k].imag]
            if iscomplex(a2[k]):
                a2[k] = [a2[k].real, a2[k].imag]

            C_Y_pdf[k] = estimate_complexity.with_distribution(dtype[k],limstate[k],Moments['Mean'][k],Moments['Variance'][k],numbins)

    sigma_mat=matrix(sqrt(diag(CovarianceMatrix)))
    seterr(invalid='ignore')    #ignore problems with divide-by-zero, just give us 'nan' as usual
    CorrelationMatrix= CovarianceMatrix/multiply(sigma_mat,sigma_mat.transpose())
    Distribution = {'PearsonType': dtype, 'm1': m1, 'm2': m2, 'a1': a1, 'a2': a2, 'Complexity': C_Y_pdf}
    Plotting = {'alpha': alph, 'beta': beta, 'lo': lo, 'hi': hi}

    CorrelationMatrix=where(isnan(CorrelationMatrix), None, CorrelationMatrix)

    if otpt > 1 and not 0 in PCC[0:otpt]:
        lower = zeros(otpt)-inf
        PCC[otpt] = mvstdnormcdf(lower, Inv1, CorrelationMatrix)

    Results = {'Moments': Moments, 'CorrelationMatrix': CorrelationMatrix,
    'CovarianceMatrix': CovarianceMatrix, 'Distribution': Distribution, 'Plotting': Plotting, 'PCC': PCC}

    return Results
Exemple #3
0
def UP_TS(driver):
    # Uses the TS method for UP

    # ----------------------  Setup  ---------------------------

    methd = 'TS'
    method = '2'

    delta   = driver.TSdelta
    mu      = [inp.get_I_mu() for inp in driver.inputs]
    sigma   = [inp.get_I_sigma() for inp in driver.inputs]
    inpt    = len(driver.inputs)
    input   = driver.inputNames
    krig    = driver.krig
    limstate= driver.limstate
    lrflag  = driver.lrflag
    n_meta  = driver.n_meta
    nEFAST  = driver.nEFAST
    nSOBOL  = driver.nSOBOL
    nMCS    = driver.nMCS
    numbins = driver.numbins
    nodes   = driver.nodes
    order   = driver.order
    otpt    = len(driver.outputNames)
    output  = driver.outputNames
    p       = driver.p
    plotf   = 0
    r       = driver.r
    simple  = driver.simple
    stvars  = driver.stvars

    rho = identity(inpt)

    # ----------------------  Model  ---------------------------

    if krig == 1:
        load("dmodel")
        G_mean = predictor(mu, dmodel).cT
        G_k = lambda x: predictor(x, dmodel)
        F1 = taylorseries.taylorseries(G_k, mu, delta*sigma, inpt, otpt)
    else:
#        G_mean = run_model(driver, mu)
#        G = lambda x: run_model(driver, x)
        values = [mu]
        values.extend(taylorseries.pretaylorseries(mu, delta*array(sigma), inpt))
        out = iter(run_list(driver, values))
        G_mean = out.next()
        G = lambda x: out.next()
        F1 = taylorseries.taylorseries(G, mu, delta*array(sigma), inpt, otpt)

    print 'Taylor Series:\n',F1

    covar_m = zeros((otpt, otpt))
    for j in range(otpt):
        for k in range(j,otpt):
            for l in range(inpt):
                for m in range(inpt):
                    covar_m[j, k] = covar_m[j, k] + F1[l, j] * F1[m, k] * sigma[l] * sigma[m] * rho[l, m]
                covar_m[k, j] = covar_m[j, k]

    CovarianceMatrix = covar_m.transpose()
    print 'Covariance Matrix:\n',CovarianceMatrix
    Moments = {'Mean': G_mean, 'Variance': diag(CovarianceMatrix), 'Skewness': zeros((otpt, 1)), 'Kurtosis': 3 * ones((otpt, 1))}

    if otpt>1:
        PCC = [0]*(otpt+1)
    else:
        PCC = [0]*otpt
    dtype = [0]*otpt
    Inv1 = [0]*otpt
    Inv2 = [0]*otpt
    m1 = [0]*otpt
    m2 = [0]*otpt
    a1 = [0]*otpt
    a2 = [0]*otpt
    alph = [0]*otpt
    beta = [0]*otpt
    lo = [0]*otpt
    hi = [0]*otpt
    C_Y_pdf = [0]*otpt

    # ----------------------  Analyze  ---------------------------

    if any(Moments['Variance']==0):
        print "Warning: One or more outputs does not vary over given parameter variation."

    for k in range(0,otpt):
        PCC[k],dtype[k],Inv1[k],m1[k],m2[k],a1[k],a2[k],alph[k],beta[k],lo[k],hi[k] = pearscdf.pearscdf(limstate[k], Moments['Mean'][k], sqrt(CovarianceMatrix[k, k]), Moments['Skewness'][k], Moments['Kurtosis'][k], methd, k, output)
        if dtype[k] != None:
            if iscomplex(a1[k]):
                a1[k] = [a1[k].real, a1[k].imag]
            if iscomplex(a2[k]):
                a2[k] = [a2[k].real, a2[k].imag]

            C_Y_pdf[k] = estimate_complexity.with_distribution(dtype[k],limstate[k],Moments['Mean'][k],Moments['Variance'][k],numbins)

    sigma_mat=matrix(sqrt(diag(CovarianceMatrix)))
    seterr(invalid='ignore')    #ignore problems with divide-by-zero, just give us 'nan' as usual
    CorrelationMatrix= CovarianceMatrix/multiply(sigma_mat,sigma_mat.transpose())
    Distribution = {'PearsonType': dtype, 'm1': m1, 'm2': m2, 'a1': a1, 'a2': a2, 'Complexity': C_Y_pdf}
    Plotting = {'alpha': alph, 'beta': beta, 'lo': lo, 'hi': hi}

    CorrelationMatrix=where(isnan(CorrelationMatrix), None, CorrelationMatrix)

    if otpt > 1 and not 0 in PCC[0:otpt]:
        lower = zeros(otpt)-inf
        PCC[otpt] = mvstdnormcdf(lower, Inv1, CorrelationMatrix)

    Results = {'Moments': Moments, 'CorrelationMatrix': CorrelationMatrix,
    'CovarianceMatrix': CovarianceMatrix, 'Distribution': Distribution, 'Plotting': Plotting, 'PCC': PCC}

    return Results
Exemple #4
0
def UP_PCE(driver):
    # Uses the PCE method for UP

    # This routine has been updated as part of refactoring code before the port
    # from MATLAB to Python/NumPy/SciPy.  Sections of PCC_Computation that apply
    # this method have been moved here.

    # ----------------------  Setup  ---------------------------
    methd = "PCE"
    method = 6

    inpt = len(driver.inputs)
    krig = driver.krig
    limstate = driver.limstate
    nodes = driver.nodes
    order = driver.order
    otpt = len(driver.outputNames)
    output = driver.outputNames
    stvars = driver.stvars
    numbins = driver.numbins

    # current settings for these two vars
    ii = 0
    jj = 0

    # ----------------------  Model  ---------------------------
    mu_g = zeros(inpt)
    sigma_g = ones(inpt)
    node_t = zeros((inpt, nodes[0]))
    weight_t = zeros((inpt, nodes[0]))
    for i in range(inpt):
        node_t[i], weight_t[i] = gaussquad.gaussquad(nodes[i], "NORM", mu_g[i], sigma_g[i])

    x = []
    for i in range(inpt):
        x.append(symbols("x" + str(i)))
    x = array(x)

    j = fullfact(nodes)

    pts = shape(j)[0]
    node = zeros((pts, inpt))
    wj = zeros((pts, inpt))
    for y in range(pts):
        for i in range(inpt):
            node[y][i] = node_t[i][j[y][i]]
            wj[y][i] = weight_t[i][j[y][i]]
    weight = prod(wj, 1)

    P = zeros(order)
    P[0] = 1
    for p in range(1, order):
        term2 = 0
        for s in range(1, p + 1):
            term1 = 1
            for r in range(s):
                term1 = term1 * (inpt + r)
            term2 = term2 + (1.0 / int(scipy.misc.factorial(s))) * term1
        if p == 1:
            P[p] = term2
        else:
            P[p] = term2 - sum(P[range(1, p + 1)])

    G_s = zeros((pts, otpt))
    if krig == 1:
        t = strcat("SS_K", num2str(ii), num2str(jj))
        load(char(t))
        for j in range(pts):
            # Rosenblatt Transformation
            T_L = Dist.Dist(stvars, node[j], inpt)
            G_s[j] = predictor(T_L, dmodel)
    else:
        values = []
        for j in range(pts):
            # Rosenblatt Transformation
            #            print 'Running simulation',j+1,'of',pts
            T_L = Dist.Dist(stvars, node[j], inpt)
            #            G_s[j] = run_model(driver, T_L)
            values.append(T_L)
        G_s = run_list(driver, values)

    indx = 0
    bn = zeros((sum(P), otpt))
    bd = zeros(sum(P))
    for k in range(order):
        vec = xvector.xvector(k, inpt)
        for j in range(int(P[k])):
            for i in range(pts):
                L = node[i]
                if k == 0:
                    bn[indx] = bn[indx] + weight[i] * G_s[i]
                    bd[indx] = bd[indx] + weight[i]
                else:

                    h, h_sym = hermite.hermite(k, vec[j], L, x)
                    bn[indx] += weight[i] * G_s[i] * h
                    bd[indx] += weight[i] * (h ** 2)
            indx += 1

    b = zeros((sum(P), otpt))
    for l in range(otpt):
        b[:, l] = bn[:, l] / bd

    indx = 0
    U_sum = 0
    for k in range(order):
        vec = xvector.xvector(k, inpt)
        for j in range(int(P[k])):
            if k == 0:
                U_sum = b[0]
            else:
                h, h_sym = hermite.hermite(k, vec[j], L, x)
                U_sum = U_sum + b[indx] * N(h_sym)
            indx += 1
    U = U_sum

    U_s = zeros((pts, otpt))
    G_mean = zeros(otpt)
    G_kurt = zeros(otpt)
    G_skew = zeros(otpt)
    covar_m = zeros((otpt, otpt))
    for i in range(pts):
        for k in range(otpt):
            U_s[i][k] = U[k].subs(dict(zip(x, node[i])))
    for k in range(otpt):
        #        G_mean[k] = sum(matrix(weight) * matrix(U_s[:, k]).transpose())
        G_mean[k] = sum(weight * U_s[:, k])
    for k in range(otpt):
        for j in range(k, otpt):
            covar_m[k, j] = sum(weight * (U_s[:, k] - G_mean[k]) * (G_s[:, j] - G_mean[j]))
            covar_m[j, k] = covar_m[k, j]
        G_skew[k] = sum(weight * (U_s[:, k] - G_mean[k]) ** 3) / covar_m[k, k] ** 1.5
        G_kurt[k] = sum(weight * (U_s[:, k] - G_mean[k]) ** 4) / covar_m[k, k] ** 2

    CovarianceMatrix = covar_m.transpose()
    Moments = {"Mean": G_mean, "Variance": diag(CovarianceMatrix), "Skewness": G_skew, "Kurtosis": G_kurt}

    # ----------------------  Analyze  ---------------------------

    if any(Moments["Variance"] == 0):
        print "Warning: One or more outputs does not vary over given parameter variation."

    # Calculate the PCC for the FFNI method
    if otpt > 1:
        PCC = [0] * (otpt + 1)
    else:
        PCC = [0] * otpt
    dtype = [0] * otpt
    Inv1 = [0] * otpt
    Inv2 = [0] * otpt
    m1 = [0] * otpt
    m2 = [0] * otpt
    a1 = [0] * otpt
    a2 = [0] * otpt
    alph = [0] * otpt
    beta = [0] * otpt
    lo = [0] * otpt
    hi = [0] * otpt
    C_Y_pdf = [0] * otpt

    if any(Moments["Variance"] == 0):
        print "Warning: One or more outputs does not vary over given parameter variation."

    for k in range(otpt):
        PCC[k], dtype[k], Inv1[k], m1[k], m2[k], a1[k], a2[k], alph[k], beta[k], lo[k], hi[k] = pearscdf.pearscdf(
            limstate[k],
            Moments["Mean"][k],
            sqrt(CovarianceMatrix[k, k]),
            Moments["Skewness"][k],
            Moments["Kurtosis"][k],
            methd,
            k,
            output,
        )
        if dtype[k] != None:
            if iscomplex(a1[k]):
                a1[k] = [a1[k].real, a1[k].imag]
            if iscomplex(a2[k]):
                a2[k] = [a2[k].real, a2[k].imag]

            C_Y_pdf[k] = estimate_complexity.with_distribution(
                dtype[k], limstate[k], Moments["Mean"][k], Moments["Variance"][k], numbins
            )

    sigma_mat = matrix(sqrt(diag(CovarianceMatrix)))
    seterr(invalid="ignore")  # ignore problems with divide-by-zero, just give us 'nan' as usual
    CorrelationMatrix = CovarianceMatrix / multiply(sigma_mat, sigma_mat.transpose())
    Distribution = {"PearsonType": dtype, "m1": m1, "m2": m2, "a1": a1, "a2": a2, "Complexity": C_Y_pdf}
    Plotting = {"alpha": alph, "beta": beta, "lo": lo, "hi": hi}

    CorrelationMatrix = where(isnan(CorrelationMatrix), None, CorrelationMatrix)

    if otpt > 1 and not 0 in PCC[0:otpt]:
        lower = zeros(otpt) - inf
        PCC[otpt] = mvstdnormcdf(lower, Inv1, CorrelationMatrix)

    Results = {
        "Moments": Moments,
        "CorrelationMatrix": CorrelationMatrix,
        "CovarianceMatrix": CovarianceMatrix,
        "Distribution": Distribution,
        "Plotting": Plotting,
        "PCC": PCC,
    }

    return Results
Exemple #5
0
def UP_UDR(driver):
# Uses the UDR method for UP

    methd = 'UDR'
    method = 5

    mu      = [inp.get_I_mu() for inp in driver.inputs]
    I_sigma = [inp.get_I_sigma() for inp in driver.inputs]
    inpt    = len(driver.inputs)
    input   = driver.inputNames
    krig    = driver.krig
    limstate= driver.limstate
    lrflag  = driver.lrflag
    n_meta  = driver.n_meta
    nEFAST  = driver.nEFAST
    nSOBOL  = driver.nSOBOL
    nMCS    = driver.nMCS
    numbins = driver.numbins
    nodes   = driver.nodes
    order   = driver.order
    otpt    = len(driver.outputNames)
    output  = driver.outputNames
    p       = driver.p
    plotf   = 0
    r       = driver.r
    simple  = driver.simple
    stvars  = driver.stvars

    node,weight = params.params(method, nodes, inpt, stvars)

# ----------------------  Model  ---------------------------
    # set_printoptions(precision=4)
    # set_printoptions(suppress=True)

    x = kron(mu, ones((inpt * nodes[0], 1)))
    for ii in range(0,inpt):
        k = ii * nodes[ii]
        l = (ii+1) * nodes[ii]
        x[k:l, ii] = node[ii,:]

#    G_mu = run_model(driver, mu)
    values = [mu]

    G_s = zeros((inpt,max(nodes),otpt))
    for k,inputname in enumerate(driver._json_tree['Configurations']['Configuration']['PCCInputArguments']['StochasticInputs']['InputDistributions']):
        if krig == 1:
            load('dmodel')
            for j in  range(0,nodes[k]):
                var = k * nodes[k] + j
                X = x[var, :]
                G_s[k, j] = predictor(X, dmodel)
        else:
            for j in  range(0,nodes[k]):
                var = k * nodes[k] + j
                X = x[var, :]
#                print 'Running simulation on node',j,'of input',inputname['Name']
#                G_s[k, j] = run_model(driver, X)
                values.append(X)

    out = iter(run_list(driver, values))
    G_mu = out.next()
    for k,inputname in enumerate(driver._json_tree['Configurations']['Configuration']['PCCInputArguments']['StochasticInputs']['InputDistributions']):
        for j in  range(0,nodes[k]):
            G_s[k, j] = out.next()

    G_mean = zeros(otpt)
    G_kurt = zeros(otpt)
    G_skew = zeros(otpt)
    G_sigma = zeros(otpt)
    covar_m = zeros((otpt,otpt))
    gs = zeros(otpt)
    gk = zeros(otpt)
    moms = []
    for l in range(0,otpt):
        moms.append(newmoment(inpt, nodes[0], weight, G_s[:, :, l], G_mu[l], G_mean[l]))
        G_mean[l] = moment(1, inpt, nodes[0], weight, G_s[:, :, l], G_mu[l], G_mean[l])
    for l in range(0,otpt):
        moms.append(newmoment(inpt, nodes[0], weight, G_s[:, :, l], G_mu[l], G_mean[l]))
        G_sigma[l] = moment(2, inpt, nodes[0], weight, G_s[:, :, l], G_mu[l], G_mean[l])
        gs[l] = moment(3, inpt, nodes[0], weight, G_s[:, :, l], G_mu[l], G_mean[l])
        G_skew[l] = moment(3, inpt, nodes[0], weight, G_s[:, :, l], G_mu[l], G_mean[l]) / G_sigma[l] ** 1.5
        gk[l] = moment(4, inpt, nodes[0], weight, G_s[:, :, l], G_mu[l], G_mean[l])
        G_kurt[l] = moment(4, inpt, nodes[0], weight, G_s[:, :, l], G_mu[l], G_mean[l]) / G_sigma[l] ** 2
        for j in range(l,otpt):
            covar_m[l, j] = moment2(1, inpt, nodes[0], weight, G_s[:, :, l], l, G_s[:, :, j], j, G_mu, G_mean)
            covar_m[j, l] = covar_m[l, j]

    CovarianceMatrix = covar_m.transpose()
    Moments = {'Mean': G_mean, 'Variance': diag(CovarianceMatrix), 'Skewness': G_skew, 'Kurtosis': G_kurt}

    # ----------------------  Analyze  ---------------------------

    # Calculate the PCC for the FFNI method
    if otpt>1:
        PCC = [0]*(otpt+1)
    else:
        PCC = [0]*otpt
    dtype = [0]*otpt
    Inv1 = [0]*otpt
    Inv2 = [0]*otpt
    m1 = [0]*otpt
    m2 = [0]*otpt
    a1 = [0]*otpt
    a2 = [0]*otpt
    alph = [0]*otpt
    beta = [0]*otpt
    lo = [0]*otpt
    hi = [0]*otpt
    C_Y_pdf = [0]*otpt

    if any(Moments['Variance']==0):
        print "Warning: One or more outputs does not vary over given parameter variation."

    for k in range(0,otpt):
        PCC[k],dtype[k],Inv1[k],m1[k],m2[k],a1[k],a2[k],alph[k],beta[k],lo[k],hi[k] = pearscdf.pearscdf(limstate[k], Moments['Mean'][k], sqrt(CovarianceMatrix[k, k]), Moments['Skewness'][k], Moments['Kurtosis'][k], methd, k, output)
        if dtype[k] != None:
            if iscomplex(a1[k]):
                a1[k] = [a1[k].real, a1[k].imag]
            if iscomplex(a2[k]):
                a2[k] = [a2[k].real, a2[k].imag]

            C_Y_pdf[k] = estimate_complexity.with_distribution(dtype[k],limstate[k],Moments['Mean'][k],Moments['Variance'][k],numbins)

    sigma_mat=matrix(sqrt(diag(CovarianceMatrix)))
    seterr(invalid='ignore')    #ignore problems with divide-by-zero, just give us 'nan' as usual
    CorrelationMatrix= CovarianceMatrix/multiply(sigma_mat,sigma_mat.transpose())
    Distribution = {'PearsonType': dtype, 'm1': m1, 'm2': m2, 'a1': a1, 'a2': a2, 'Complexity': C_Y_pdf}
    Plotting = {'alpha': alph, 'beta': beta, 'lo': lo, 'hi': hi}

    CorrelationMatrix=where(isnan(CorrelationMatrix), None, CorrelationMatrix)

    if otpt > 1 and not 0 in PCC[0:otpt]:
        lower = zeros(otpt)-inf
        PCC[otpt] = mvstdnormcdf(lower, Inv1, CorrelationMatrix)

    Results = {'Moments': Moments, 'CorrelationMatrix': CorrelationMatrix,
    'CovarianceMatrix': CovarianceMatrix, 'Distribution': Distribution, 'Plotting': Plotting, 'PCC': PCC}

    return Results
Exemple #6
0
def UP_DPCE(problem, driver):
    # Uses the Dakota PCE method for UP

    # ----------------------  Setup  ---------------------------
    methd = 'DPCE'
    method = 6

    inpt = len(driver.inputs)
    krig = driver.krig
    limstate = driver.limstate
    order = driver.order
    otpt = len(driver.outputNames)
    output = driver.outputNames

# preprocess inputs, as Dakota won't let us specify them one at a time, only in groups according to distribution type
    norm = []
    lnorm = []
    beta = []
    unif = []
    for idx, stvar in enumerate(driver.stvars):
        if stvar.dist == 'NORM':
            norm.append(idx)
        elif stvar.dist == 'LNORM':
            lnorm.append(idx)
        elif stvar.dist == 'BETA':
            beta.append(idx)
        elif stvar.dist == 'UNIF':
            unif.append(idx)

    if len(driver.Wrapper) == 0:
        raise Exception('Must specify a path to the model wrapper file.')

    OldDir = os.getcwd()
    WorkDir = os.path.dirname(driver.Wrapper)
    if WorkDir != "":
        os.chdir(WorkDir)

    with open('parameters.csv', 'w') as f:  # write log file of inputs and outputs
        f.write(','.join(driver.inputNames)+','+','.join(driver.outputNames)+'\n')
        f.close()

    f = open('dakota_pce.in', 'w')
    f.write('strategy\n')  # look at dakota input summary
    f.write('    single_method\n')  # graphics
    f.write('method\n')
    f.write('    polynomial_chaos\n')
    f.write('        quadrature_order')
    for node in driver.nodes:
        f.write(' {0}'.format(node))
    f.write('\n        variance_based_decomp\n')  # univariate_effects
#    f.write('        num_response_levels =')
#    for x in range(otpt):
#        f.write(' 2')
#    f.write('\n')
    f.write('        response_levels')
    for limits in driver.limstate:
        f.write(' {0} {1}'.format(limits[0], limits[1]))
    f.write('\n')
#    f.write('        compute reliabilities\n') #default is probabilities
    f.write('        rng\n')
    f.write('            rnum2\n')
    f.write('        samples 10000\n')
#    f.write('        seed 12347\n')
    f.write('variables')

    if len(norm) > 0:
        f.write('\n    normal_uncertain {0}\n'.format(len(norm)))
#            v[j] = norm.ppf(norm.cdf(value[j], 0, 1), stvars[j].param[0], stvars[j].param[1])
        f.write('        means')
        for idx in norm:
            f.write(' {0}'.format(driver.stvars[idx].param[0]))
        f.write('\n        std_deviations')
        for idx in norm:
            f.write(' {0}'.format(driver.stvars[idx].param[1]))
        f.write('\n        descriptors')
        for idx in norm:
            f.write(' \'{0}\''.format(driver.stvars[idx].name))
    if len(lnorm) > 0:
        f.write('\n    lognormal_uncertain {0}\n'.format(len(lnorm)))
#            v[j] = lognorm.ppf(norm.cdf(value[j], 0, 1), stvars[j].param[1], 0, exp(stvars[j].param[0]))
        f.write('        lnuv_means')
        for idx in lnorm:
            f.write(' {0}'.format(driver.stvars[idx].param[1]))
        f.write('\n        lnuv_std_deviations')
        for idx in lnorm:
            f.write(' {0}'.format(exp(driver.stvars[idx].param[0])))
        f.write('\n        lnuv_descriptors')
        for idx in lnorm:
            f.write(' \'{0}\''.format(driver.stvars[idx].name))
    if len(beta) > 0:
        f.write('\n    beta_uncertain {0}\n'.format(len(beta)))
#            v[j] = beta.ppf(norm.cdf(value[j], 0, 1), stvars[j].param[0], stvars[j].param[1], stvars[j].param[2], stvars[j].param[3] - stvars[j].param[2])
        f.write('        alphas')
        for idx in beta:
            f.write(' {0}'.format(driver.stvars[idx].param[0]))
        f.write('\n        betas')
        for idx in beta:
            f.write(' {0}'.format(driver.stvars[idx].param[1]))
        f.write('\n        lower_bounds')
        for idx in beta:
            f.write(' {0}'.format(driver.stvars[idx].param[2]))
        f.write('\n        upper_bounds')
        for idx in beta:
            f.write(' {0}'.format(driver.stvars[idx].param[3]))
        f.write('\n        descriptors')
        for idx in beta:
            f.write(' \'{0}\''.format(driver.stvars[idx].name))
    if len(unif) > 0:
        f.write('\n    uniform_uncertain {0}\n'.format(len(unif)))
        f.write('        lower_bounds')
        for idx in unif:
            f.write(' {0}'.format(driver.stvars[idx].param[0]))
        f.write('\n        upper_bounds')
        for idx in unif:
            f.write(' {0}'.format(driver.stvars[idx].param[1]))
        f.write('\n        descriptors')
        for idx in unif:
            f.write(' \'{0}\''.format(driver.stvars[idx].name))

    f.write('\ninterface\n')
#    f.write('    fork\n')
    f.write('    fork asynchronous evaluation_concurrency = {0}\n'.format(multiprocessing.cpu_count()))
#    f.write('        analysis_drivers \'python {0}\'\n'.format(driver.workflow.__iter__().next().getFile()))
    f.write('        analysis_drivers \'python {0}\'\n'.format(driver.Wrapper))
    f.write('        parameters_file =\'params.in\'\n')
    f.write('        results_file =\'results.out\'\n')
    f.write('        work_directory\n')
    f.write('        local_evaluation_static_scheduling\n')
    f.write('        directory_tag\n')
    f.write('        copy\n')
    f.write('        template_directory =\'{0}\'\n'.format(os.path.dirname(driver.Wrapper)))
    f.write('responses\n')
    f.write('    response_functions {0}\n'.format(otpt))  # number of outputs
    f.write('    no_gradients\n')  # leave as-is?
    f.write('    no_hessians\n')  # leave as-is?
    f.close()

    command = 'dakota dakota_pce.in | tee dakota_output.txt'
    print 'Calling "{0}" as a subprocess.'.format(command)
    return_code = subprocess.call(command, shell=True)

    f = open('dakota_output.txt', 'r')
    dakota_output = f.read().split()
    f.close()

    os.chdir(OldDir)

    G_mean = zeros(otpt)
    G_kurt = zeros(otpt)
    G_skew = zeros(otpt)

    fn_start = dakota_output.index('Mean') + 5  # index of response_fn_1
    for out in range(otpt):
        G_mean[out] = float(dakota_output[fn_start+2])
        G_skew[out] = float(dakota_output[fn_start+7])
        G_kurt[out] = float(dakota_output[fn_start+8])+3
        fn_start = fn_start + 9  # go to next response function, if any

    DPCC = [0]*otpt
    fn_start = dakota_output.index('(CDF)')  # index of CDF for response_fn_1
    for out in range(otpt):
        DPCC[out] = float(dakota_output[fn_start+19]) - float(dakota_output[fn_start+17])
        fn_start = fn_start + 23  # go to next response function, if any
    print 'Dakota PCCs:', DPCC

    CovarianceMatrix = zeros((otpt, otpt))
    covpos = dakota_output.index('[[')+1
    for y in range(otpt):
        for x in range(otpt):
            CovarianceMatrix[x,y] = float(dakota_output[covpos])
            covpos = covpos + 1

    Moments = {'Mean': G_mean, 'Variance': diag(CovarianceMatrix), 'Skewness': G_skew, 'Kurtosis': G_kurt}

    # ----------------------  Analyze  ---------------------------

    if any(Moments['Variance']==0):
        print "Warning: One or more outputs does not vary over given parameter variation."

    # Calculate the PCC for the FFNI method
    if otpt>1:
        PCC = [0]*(otpt+1)
    else:
        PCC = [0]*otpt
    dtype = [0]*otpt
    Inv1 = [0]*otpt
    Inv2 = [0]*otpt
    m1 = [0]*otpt
    m2 = [0]*otpt
    a1 = [0]*otpt
    a2 = [0]*otpt
    alph = [0]*otpt
    beta = [0]*otpt
    lo = [0]*otpt
    hi = [0]*otpt
    C_Y_pdf = [0]*otpt

    if any(Moments['Variance']==0):
        print "Warning: One or more outputs does not vary over given parameter variation."

    for k in range(otpt):
        PCC[k],dtype[k],Inv1[k],m1[k],m2[k],a1[k],a2[k],alph[k],beta[k],lo[k],hi[k] =\
          pearscdf.pearscdf(limstate[k], Moments['Mean'][k], sqrt(CovarianceMatrix[k, k]), Moments['Skewness'][k], Moments['Kurtosis'][k], methd, k, output)
        if dtype[k] != None:
            if iscomplex(a1[k]):
                a1[k] = [a1[k].real, a1[k].imag]
            if iscomplex(a2[k]):
                a2[k] = [a2[k].real, a2[k].imag]

            C_Y_pdf[k] = estimate_complexity.with_distribution(dtype[k],limstate[k],Moments['Mean'][k],Moments['Variance'][k],numbins)

    sigma_mat=matrix(sqrt(diag(CovarianceMatrix)))
    seterr(invalid='ignore')    #ignore problems with divide-by-zero, just give us 'nan' as usual
    CorrelationMatrix= CovarianceMatrix/multiply(sigma_mat,sigma_mat.transpose())
    Distribution = {'PearsonType': dtype, 'm1': m1, 'm2': m2, 'a1': a1, 'a2': a2, 'Complexity': C_Y_pdf}
    Plotting = {'alpha': alph, 'beta': beta, 'lo': lo, 'hi': hi}

    CorrelationMatrix=where(isnan(CorrelationMatrix), None, CorrelationMatrix)

    if otpt > 1 and not 0 in PCC[0:otpt]:
        lower = zeros(otpt)-inf
        PCC[otpt] = mvstdnormcdf(lower, Inv1, CorrelationMatrix)

    Results = {'Moments': Moments, 'CorrelationMatrix': CorrelationMatrix,
    'CovarianceMatrix': CovarianceMatrix, 'Distribution': Distribution, 'Plotting': Plotting, 'PCC': PCC}

    return Results
Exemple #7
0
def UP_PCE(problem, driver):
    # Uses the PCE method for UP
    
    # This routine has been updated as part of refactoring code before the port
    # from MATLAB to Python/NumPy/SciPy.  Sections of PCC_Computation that apply
    # this method have been moved here.
    
    # ----------------------  Setup  ---------------------------
    methd = 'PCE'
    method = 6

    inpt    = len(driver.inputs)
    krig    = driver.krig
    limstate= driver.limstate
    nodes   = driver.nodes
    order   = driver.order
    otpt    = len(driver.outputNames)
    output  = driver.outputNames
    stvars  = driver.stvars
    numbins = driver.numbins
    
    #current settings for these two vars
    ii = 0
    jj = 0
    
    # ----------------------  Model  ---------------------------
    mu_g = zeros(inpt)
    sigma_g = ones(inpt)
    node_t = zeros((inpt,nodes[0]))
    weight_t = zeros((inpt,nodes[0]))
    for i in range(inpt):
        node_t[i], weight_t[i] = gaussquad.gaussquad(nodes[i], 'NORM', mu_g[i], sigma_g[i])

    x=[]
    for i in range(inpt):
        x.append(symbols('x'+str(i)))
    x=array(x)

    j=fullfact(nodes);

    pts = shape(j)[0]
    node=zeros((pts,inpt))
    wj=zeros((pts,inpt))
    for y in range(pts):
        for i in range(inpt):
            node[y][i] = node_t[i][j[y][i]]
            wj[y][i] = weight_t[i][j[y][i]]
    weight=prod(wj,1);
    
    P = zeros(order)
    P[0] = 1
    for p in range(1,order):    
        term2 = 0
        for s in range(1,p+1):
            term1 = 1        
            for r in range(s):
                term1 = term1 * (inpt + r)
            term2 = term2 + (1.0 / int(scipy.misc.factorial(s))) * term1
        if p == 1:        
            P[p] = term2        
        else:        
            P[p] = term2 - sum(P[range(1,p+1)])
    
    G_s = zeros((pts, otpt))
    if krig == 1:    
        t = strcat('SS_K', num2str(ii), num2str(jj))    
        load(char(t))    
        for j in range(pts):       
            #Rosenblatt Transformation
            T_L = Dist.Dist(stvars, node[j], inpt)       
            G_s[j] = predictor(T_L, dmodel)
    else:
        values = []    
        for j in range(pts):     
            #Rosenblatt Transformation
#            print 'Running simulation',j+1,'of',pts
            T_L = Dist.Dist(stvars, node[j], inpt)         
#            G_s[j] = run_model(driver, T_L)
            values.append(T_L)
        G_s = run_list(problem, driver, values)

    indx = 0
    bn = zeros((sum(P), otpt))
    bd = zeros(sum(P))
    for k in range(order):    
        vec = xvector.xvector(k, inpt)
        for j in range(int(P[k])):   
            for i in range(pts):
                L=node[i]       
                if k == 0:
                    bn[indx] = bn[indx] + weight[i] * G_s[i]
                    bd[indx] = bd[indx] + weight[i]
                else:                

                    h, h_sym = hermite.hermite(k, vec[j], L, x)
                    bn[indx] += weight[i] * G_s[i] * h          
                    bd[indx] += weight[i] * (h ** 2)
            indx+=1

    b = zeros((sum(P),otpt))
    for l in range(otpt):    
        b[:, l] = bn[:, l] / bd
    
    indx = 0
    U_sum = 0
    for k in range(order):    
        vec = xvector.xvector(k, inpt)
        for j in range(int(P[k])):        
            if k == 0:            
                U_sum = b[0]
            else:       
                h, h_sym = hermite.hermite(k, vec[j], L, x)
                U_sum = U_sum + b[indx] * N(h_sym)
            indx+=1
    U = U_sum
    
    U_s = zeros((pts,otpt))
    G_mean = zeros(otpt)
    G_kurt = zeros(otpt)
    G_skew = zeros(otpt)
    covar_m = zeros((otpt,otpt))
    for i in range(pts):
        for k in range(otpt):
            U_s[i][k] = U[k].subs(dict(zip(x, node[i])))
    for k in range(otpt):
#        G_mean[k] = sum(matrix(weight) * matrix(U_s[:, k]).transpose())
        G_mean[k] = sum(weight * U_s[:, k])
    for k in range(otpt):    
        for j in range(k,otpt):    
            covar_m[k, j] = sum(weight * (U_s[:, k] - G_mean[k]) * (G_s[:, j] - G_mean[j]))        
            covar_m[j, k] = covar_m[k, j]
        G_skew[k] = sum(weight * (U_s[:, k] - G_mean[k]) ** 3) / covar_m[k, k] ** 1.5    
        G_kurt[k] = sum(weight * (U_s[:, k] - G_mean[k]) ** 4) / covar_m[k, k] ** 2
    
    CovarianceMatrix = covar_m.transpose()
    Moments = {'Mean': G_mean, 'Variance': diag(CovarianceMatrix), 'Skewness': G_skew, 'Kurtosis': G_kurt}
    
    # ----------------------  Analyze  ---------------------------

    if any(Moments['Variance']==0):
        print "Warning: One or more outputs does not vary over given parameter variation."

    # Calculate the PCC for the FFNI method
    if otpt>1:
        PCC = [0]*(otpt+1)
    else:
        PCC = [0]*otpt
    dtype = [0]*otpt
    Inv1 = [0]*otpt
    Inv2 = [0]*otpt
    m1 = [0]*otpt
    m2 = [0]*otpt
    a1 = [0]*otpt
    a2 = [0]*otpt
    alph = [0]*otpt
    beta = [0]*otpt
    lo = [0]*otpt
    hi = [0]*otpt
    C_Y_pdf = [0]*otpt

    if any(Moments['Variance']==0):
        print "Warning: One or more outputs does not vary over given parameter variation."

    for k in range(otpt):
        PCC[k],dtype[k],Inv1[k],m1[k],m2[k],a1[k],a2[k],alph[k],beta[k],lo[k],hi[k] =\
          pearscdf.pearscdf(limstate[k], Moments['Mean'][k], sqrt(CovarianceMatrix[k, k]), Moments['Skewness'][k], Moments['Kurtosis'][k], methd, k, output)
        if dtype[k] != None:
            if iscomplex(a1[k]):
                a1[k] = [a1[k].real, a1[k].imag]
            if iscomplex(a2[k]):
                a2[k] = [a2[k].real, a2[k].imag]

            C_Y_pdf[k] = estimate_complexity.with_distribution(dtype[k],limstate[k],Moments['Mean'][k],Moments['Variance'][k],numbins)

    sigma_mat=matrix(sqrt(diag(CovarianceMatrix)))
    seterr(invalid='ignore')    #ignore problems with divide-by-zero, just give us 'nan' as usual
    CorrelationMatrix= CovarianceMatrix/multiply(sigma_mat,sigma_mat.transpose())
    Distribution = {'PearsonType': dtype, 'm1': m1, 'm2': m2, 'a1': a1, 'a2': a2, 'Complexity': C_Y_pdf}
    Plotting = {'alpha': alph, 'beta': beta, 'lo': lo, 'hi': hi}

    CorrelationMatrix=where(isnan(CorrelationMatrix), None, CorrelationMatrix)

    if otpt > 1 and not 0 in PCC[0:otpt]:
        lower = zeros(otpt)-inf
        PCC[otpt] = mvstdnormcdf(lower, Inv1, CorrelationMatrix)

    Results = {'Moments': Moments, 'CorrelationMatrix': CorrelationMatrix,
    'CovarianceMatrix': CovarianceMatrix, 'Distribution': Distribution, 'Plotting': Plotting, 'PCC': PCC}

    return Results
def UP_FFNI(driver):

    # Uses the FFNI method for UP

    # ----------------------  Setup  ---------------------------
    methd = 'FFNI'
    method = 4

    mu      = [inp.get_I_mu() for inp in driver.inputs]
    I_sigma = [inp.get_I_sigma() for inp in driver.inputs]
    inpt    = len(driver.inputs)
    input   = driver.inputNames
    krig    = driver.krig
    limstate= driver.limstate
    lrflag  = driver.lrflag
    n_meta  = driver.n_meta
    nEFAST  = driver.nEFAST
    nSOBOL  = driver.nSOBOL
    nMCS    = driver.nMCS
    numbins = driver.numbins
    nodes   = driver.nodes
    order   = driver.order
    otpt    = len(driver.outputNames)
    output  = driver.outputNames
    p       = driver.p
    plotf   = 0
    r       = driver.r
    simple  = driver.simple
    stvars  = driver.stvars

    node,w = params.params(method, nodes, inpt, stvars)    # Do I need to transpose these matrices?
    #[quadpts] = params(method, nodes, inpt, stvars)

    # ----------------------  Model  ---------------------------

    # Create full factorial experiment from individual nodes and weights
    j = fullfact(nodes)
    pts = shape(j)[0]
    x=zeros((pts,inpt))
    wj=zeros((pts,inpt))
    for y in range(pts):
        for i in range(inpt):
            x[y][i] = node[i][j[y][i]]
            wj[y][i] = w[i][j[y][i]]
    weight = prod(wj, 1)

    if krig == 1:
        load("dmodel")
        G_s = predictor(x, dmodel)
    else:
#        G_s = zeros((pts, otpt))
#        for i in range(pts):
#            print 'Running simulation',i+1,'of',pts
#            G_s[i] = run_model(driver, x[i])
            # G_s[i] = modelica.RunModelica(x[i], modelname, properties)
        G_s = run_list(driver, x)

    G_mean = zeros(otpt)
    G_kurt = zeros(otpt)
    G_skew = zeros(otpt)
    covar_m = zeros((otpt,otpt))
    for k in range(otpt):
        G_mean[k] = sum(weight * G_s[:, k])
    for k in range(otpt):
        for j in range(otpt):
            covar_m[k, j] = sum(weight * (G_s[:, k] - G_mean[k]) * (G_s[:, j] - G_mean[j]))
            covar_m[j, k] = covar_m[k, j]
        G_skew[k] = sum(weight * (G_s[:, k] - G_mean[k]) ** 3) / covar_m[k, k] ** 1.5
        G_kurt[k] = sum(weight * (G_s[:, k] - G_mean[k]) ** 4) / covar_m[k, k] ** 2

    CovarianceMatrix = covar_m.transpose()
    Moments = {'Mean': G_mean, 'Variance': diag(CovarianceMatrix), 'Skewness': G_skew, 'Kurtosis': G_kurt}

    # ----------------------  Analyze  ---------------------------

    # Calculate the PCC for the FFNI method
    if otpt>1:
        PCC = [0]*(otpt+1)
    else:
        PCC = [0]*otpt
    dtype = [0]*otpt
    Inv1 = [0]*otpt
    Inv2 = [0]*otpt
    m1 = [0]*otpt
    m2 = [0]*otpt
    a1 = [0]*otpt
    a2 = [0]*otpt
    alph = [0]*otpt
    beta = [0]*otpt
    lo = [0]*otpt
    hi = [0]*otpt
    C_Y_pdf = [0]*otpt

    if any(Moments['Variance']==0):
        print "Warning: One or more outputs does not vary over given parameter variation."

    for k in range(0,otpt):
        PCC[k],dtype[k],Inv1[k],m1[k],m2[k],a1[k],a2[k],alph[k],beta[k],lo[k],hi[k] = pearscdf.pearscdf(limstate[k], Moments['Mean'][k], sqrt(CovarianceMatrix[k, k]), Moments['Skewness'][k], Moments['Kurtosis'][k], methd, k, output)
        if dtype[k] != None:
            if iscomplex(a1[k]):
                a1[k] = [a1[k].real, a1[k].imag]
            if iscomplex(a2[k]):
                a2[k] = [a2[k].real, a2[k].imag]

            C_Y_pdf[k] = estimate_complexity.with_distribution(dtype[k],limstate[k],Moments['Mean'][k],Moments['Variance'][k],numbins)

    sigma_mat=matrix(sqrt(diag(CovarianceMatrix)))
    seterr(invalid='ignore')    #ignore problems with divide-by-zero, just give us 'nan' as usual
    CorrelationMatrix= CovarianceMatrix/multiply(sigma_mat,sigma_mat.transpose())
    Distribution = {'PearsonType': dtype, 'm1': m1, 'm2': m2, 'a1': a1, 'a2': a2, 'Complexity': C_Y_pdf}
    Plotting = {'alpha': alph, 'beta': beta, 'lo': lo, 'hi': hi}

    CorrelationMatrix=where(isnan(CorrelationMatrix), None, CorrelationMatrix)

    if otpt > 1 and not 0 in PCC[0:otpt]:
        lower = zeros(otpt)-inf
        PCC[otpt] = mvstdnormcdf(lower, Inv1, CorrelationMatrix)

    Results = {'Moments': Moments, 'CorrelationMatrix': CorrelationMatrix,
    'CovarianceMatrix': CovarianceMatrix, 'Distribution': Distribution, 'Plotting': Plotting, 'PCC': PCC}

    return Results
Exemple #9
0
def UP_TS(problem, driver):
    # Uses the TS method for UP

    # ----------------------  Setup  ---------------------------

    methd = 'TS'
    method = '2'

    delta = driver.TSdelta
    mu = [inp.get_I_mu() for inp in driver.inputs]
    sigma = [inp.get_I_sigma() for inp in driver.inputs]
    inpt = len(driver.inputs)
    input = driver.inputNames
    krig = driver.krig
    limstate = driver.limstate
    lrflag = driver.lrflag
    n_meta = driver.n_meta
    nEFAST = driver.nEFAST
    nSOBOL = driver.nSOBOL
    nMCS = driver.nMCS
    numbins = driver.numbins
    nodes = driver.nodes
    order = driver.order
    otpt = len(driver.outputNames)
    output = driver.outputNames
    p = driver.p
    plotf = 0
    r = driver.r
    simple = driver.simple
    stvars = driver.stvars

    rho = identity(inpt)

    # ----------------------  Model  ---------------------------

    if krig == 1:
        load("dmodel")
        G_mean = predictor(mu, dmodel).cT
        G_k = lambda x: predictor(x, dmodel)
        F1 = taylorseries.taylorseries(G_k, mu, delta * sigma, inpt, otpt)
    else:
        #        G_mean = run_model(driver, mu)
        #        G = lambda x: run_model(driver, x)
        values = [mu]
        values.extend(
            taylorseries.pretaylorseries(mu, delta * array(sigma), inpt))
        out = iter(run_list(problem, driver, values))
        G_mean = out.next()
        G = lambda x: out.next()
        F1 = taylorseries.taylorseries(G, mu, delta * array(sigma), inpt, otpt)

    print 'Taylor Series:\n', F1

    covar_m = zeros((otpt, otpt))
    for j in range(otpt):
        for k in range(j, otpt):
            for l in range(inpt):
                for m in range(inpt):
                    covar_m[j, k] = covar_m[j, k] + F1[l, j] * F1[
                        m, k] * sigma[l] * sigma[m] * rho[l, m]
                covar_m[k, j] = covar_m[j, k]

    CovarianceMatrix = covar_m.transpose()
    print 'Covariance Matrix:\n', CovarianceMatrix
    Moments = {
        'Mean': G_mean,
        'Variance': diag(CovarianceMatrix),
        'Skewness': zeros((otpt, 1)),
        'Kurtosis': 3 * ones((otpt, 1))
    }

    if otpt > 1:
        PCC = [0] * (otpt + 1)
    else:
        PCC = [0] * otpt
    dtype = [0] * otpt
    Inv1 = [0] * otpt
    Inv2 = [0] * otpt
    m1 = [0] * otpt
    m2 = [0] * otpt
    a1 = [0] * otpt
    a2 = [0] * otpt
    alph = [0] * otpt
    beta = [0] * otpt
    lo = [0] * otpt
    hi = [0] * otpt
    C_Y_pdf = [0] * otpt

    # ----------------------  Analyze  ---------------------------

    if any(Moments['Variance'] == 0):
        print "Warning: One or more outputs does not vary over given parameter variation."

    for k in range(0, otpt):
        PCC[k], dtype[k], Inv1[k], m1[k], m2[k], a1[k], a2[k], alph[k], beta[
            k], lo[k], hi[k] = pearscdf.pearscdf(limstate[k],
                                                 Moments['Mean'][k],
                                                 sqrt(CovarianceMatrix[k, k]),
                                                 Moments['Skewness'][k],
                                                 Moments['Kurtosis'][k], methd,
                                                 k, output)
        if dtype[k] != None:
            if iscomplex(a1[k]):
                a1[k] = [a1[k].real, a1[k].imag]
            if iscomplex(a2[k]):
                a2[k] = [a2[k].real, a2[k].imag]

            C_Y_pdf[k] = estimate_complexity.with_distribution(
                dtype[k], limstate[k], Moments['Mean'][k],
                Moments['Variance'][k], numbins)

    sigma_mat = matrix(sqrt(diag(CovarianceMatrix)))
    seterr(invalid='ignore'
           )  #ignore problems with divide-by-zero, just give us 'nan' as usual
    CorrelationMatrix = CovarianceMatrix / multiply(sigma_mat,
                                                    sigma_mat.transpose())
    Distribution = {
        'PearsonType': dtype,
        'm1': m1,
        'm2': m2,
        'a1': a1,
        'a2': a2,
        'Complexity': C_Y_pdf
    }
    Plotting = {'alpha': alph, 'beta': beta, 'lo': lo, 'hi': hi}

    CorrelationMatrix = where(isnan(CorrelationMatrix), None,
                              CorrelationMatrix)

    if otpt > 1 and not 0 in PCC[0:otpt]:
        lower = zeros(otpt) - inf
        PCC[otpt] = mvstdnormcdf(lower, Inv1, CorrelationMatrix)

    Results = {
        'Moments': Moments,
        'CorrelationMatrix': CorrelationMatrix,
        'CovarianceMatrix': CovarianceMatrix,
        'Distribution': Distribution,
        'Plotting': Plotting,
        'PCC': PCC
    }

    return Results
def UP_MPP(driver):
    # Uses the MPP method for UP    
    
    # This routine has been updated as part of refactoring code before the port
    # from MATLAB to Python/NumPy/SciPy.  Sections of PCC_Computation that apply
    # this method have been moved here.
    
    # ----------------------  Setup  ---------------------------
    methd = 'MPP'
    method = '3'
    
    delta   = driver.MPPdelta
    mu      = [inp.get_I_mu() for inp in driver.inputs]
    sigma   = [inp.get_I_sigma() for inp in driver.inputs]
    inpt    = len(driver.inputs)
    input   = driver.inputNames
    krig    = driver.krig
    limstate= driver.limstate
    lrflag  = driver.lrflag
    n_meta  = driver.n_meta
    nEFAST  = driver.nEFAST
    nSOBOL  = driver.nSOBOL
    nMCS    = driver.nMCS
    numbins = driver.numbins
    nodes   = driver.nodes
    order   = driver.order
    otpt    = len(driver.outputNames)
    output  = driver.outputNames
    p       = driver.p
    plotf   = 0
    r       = driver.r
    simple  = driver.simple
    stvars  = driver.stvars
    
    # ----------------------  Model  ---------------------------

    G_temp = lambda x: run_list(driver, x)
    Tinv = lambda x: Dist.Dist(stvars, x, inpt)
   
    #USING THE HASOFER-LIND ALGORITHM  
    alpha = zeros((otpt, inpt))
    Xl = zeros((otpt, inpt))
    Xu = zeros((otpt, inpt))
    beta1 = zeros(otpt)
    beta2 = zeros(otpt)
    if otpt>1:
        PCC = [0]*(otpt+1)
    else:
        PCC = [0]*otpt
    for k in range(otpt):
        print 'Testing output',k+1,'of',otpt    
        if krig == 1:        
            load("dmodel")        
            if limstate[k][1] == inf:            
                cdist = 1            
                G = lambda x: limstate[k][0]- predictor(x, dmodel)            
            elif limstate[k][0] == -inf:            
                cdist = 2            
                G = lambda x: predictor(x, dmodel) - limstate[k][1]            
            else:            
                cdist = 3            
                G1 = lambda x: limstate[k][0] - predictor(x, dmodel)
                G2 = lambda x: predictor(x, dmodel) - limstate[k][2]
        else:
            if limstate[k][1] == inf:            
                cdist = 1            
                G = lambda x: limstate[k][0] - G_temp(x)            
            elif limstate[k][0] == -inf:            
                cdist = 2            
                G = lambda x: G_temp(x) - limstate[k][1]           
            else:            
                cdist = 3            
                G1 = lambda x: limstate[k][0] - G_temp(x)            
                G2 = lambda x: G_temp(x) - limstate[k][1]

        I_sigma=ones((inpt));
        if cdist == 1 or cdist == 2:        
            u = zeros((inpt))        
            diff = 1.0
            while diff > .005:
                uk, alp = Hasofer.Hasofer(G, u, Tinv, k, delta, I_sigma, inpt, otpt)
                diff = abs(linalg.norm(uk) - linalg.norm(u))            
                u = uk
                print 'X =', Tinv(u)
            beta1[k] = linalg.norm(u)        
            beta2[k] = inf
            if cdist == 1:            
                Xl[k] = Tinv(u)            
                Xu[k] = ones((inpt)) * inf            
                alpha[k] = alp            
            else:            
                Xl[k] = ones((inpt)) * -inf            
                Xu[k] = Tinv(u)            
                alpha[k] = -alp
            PCC[k] = norm.cdf(beta1[k])
            alpha[k] = alp        
        else:        
            u = zeros((inpt))        
            diff = 1.0
            try:
                while diff > .005:
                    uk, alp = Hasofer.Hasofer(G1, u, Tinv, k, delta, I_sigma, inpt, otpt)        
                    diff = abs(linalg.norm(uk) - linalg.norm(u))
                    u = uk
                    print 'X =', Tinv(u)
                beta1[k] = linalg.norm(u)        
                Xl[k] = Tinv(u)
            except ValueError:
                beta1[k] = inf        
                Xl[k] = nan
            u = zeros((inpt))        
            diff = 1.0
            try:        
                while diff > .005:
                    uk, alp = Hasofer.Hasofer(G2, u, Tinv, k, delta, I_sigma, inpt, otpt)            
                    diff = abs(linalg.norm(uk) - linalg.norm(u))            
                    u = uk
                    print 'X =', Tinv(u)
                beta2[k] = linalg.norm(u) 
                Xu[k] = Tinv(u)
                alpha[k] = -alp
            except ValueError:
                beta2[k] = inf  
                Xu[k] = nan
                alpha[k] = nan 
            PCC[k] = norm.cdf(beta2[k]) - norm.cdf(-beta1[k])
    
    corr_mat = ones((otpt,otpt))
    for j in range(otpt):    
        for k in range(j,otpt):  
            corr_mat[j, k] = matrix(alpha[j]) * matrix(alpha[k]).transpose()        
            corr_mat[k, j] = corr_mat[j, k]   
     
    # ----------------------  Analyze  ---------------------------
    
    # There's not much to analyze; that already happened.  Just print out the results

    if otpt > 1 and not 0 in PCC[0:otpt]: 
        PCC[otpt] = mvstdnormcdf(-beta1, beta2, corr_mat)  

    # requested feature: replace all "NaNs" with "None"
    Xu=where(isnan(Xu), None, Xu)
    Xl=where(isnan(Xl), None, Xl)
    corr_mat=where(isnan(corr_mat), None, corr_mat)
    
    Results = {'MPPUpperBound':Xu ,'MPPLowerBound':Xl,'CorrelationMatrix': corr_mat, 'PCC': PCC}
    
    return Results
Exemple #11
0
def UP_FFNI(problem, driver):

    # Uses the FFNI method for UP

    # ----------------------  Setup  ---------------------------
    methd = 'FFNI'
    method = 4

    mu      = [inp.get_I_mu() for inp in driver.inputs]
    I_sigma = [inp.get_I_sigma() for inp in driver.inputs]
    inpt    = len(driver.inputs)
    input   = driver.inputNames
    krig    = driver.krig
    limstate= driver.limstate
    lrflag  = driver.lrflag
    n_meta  = driver.n_meta
    nEFAST  = driver.nEFAST
    nSOBOL  = driver.nSOBOL
    nMCS    = driver.nMCS
    numbins = driver.numbins
    nodes   = driver.nodes
    order   = driver.order
    otpt    = len(driver.outputNames)
    output  = driver.outputNames
    p       = driver.p
    plotf   = 0
    r       = driver.r
    simple  = driver.simple
    stvars  = driver.stvars

    node,w = params.params(method, nodes, inpt, stvars)    # Do I need to transpose these matrices?
    #[quadpts] = params(method, nodes, inpt, stvars)

    # ----------------------  Model  ---------------------------

    # Create full factorial experiment from individual nodes and weights
    j = fullfact(nodes)
    pts = shape(j)[0]
    x=zeros((pts,inpt))
    wj=zeros((pts,inpt))
    for y in range(pts):
        for i in range(inpt):
            x[y][i] = node[i][j[y][i]]
            wj[y][i] = w[i][j[y][i]]
    weight = prod(wj, 1)

    if krig == 1:
        load("dmodel")
        G_s = predictor(x, dmodel)
    else:
#        G_s = zeros((pts, otpt))
#        for i in range(pts):
#            print 'Running simulation',i+1,'of',pts
#            G_s[i] = run_model(driver, x[i])
            # G_s[i] = modelica.RunModelica(x[i], modelname, properties)
        G_s = run_list(problem, driver, x)

    G_mean = zeros(otpt)
    G_kurt = zeros(otpt)
    G_skew = zeros(otpt)
    covar_m = zeros((otpt,otpt))
    for k in range(otpt):
        G_mean[k] = sum(weight * G_s[:, k])
    for k in range(otpt):
        for j in range(otpt):
            covar_m[k, j] = sum(weight * (G_s[:, k] - G_mean[k]) * (G_s[:, j] - G_mean[j]))
            covar_m[j, k] = covar_m[k, j]
        G_skew[k] = sum(weight * (G_s[:, k] - G_mean[k]) ** 3) / covar_m[k, k] ** 1.5
        G_kurt[k] = sum(weight * (G_s[:, k] - G_mean[k]) ** 4) / covar_m[k, k] ** 2

    CovarianceMatrix = covar_m.transpose()
    Moments = {'Mean': G_mean, 'Variance': diag(CovarianceMatrix), 'Skewness': G_skew, 'Kurtosis': G_kurt}

    # ----------------------  Analyze  ---------------------------

    # Calculate the PCC for the FFNI method
    if otpt>1:
        PCC = [0]*(otpt+1)
    else:
        PCC = [0]*otpt
    dtype = [0]*otpt
    Inv1 = [0]*otpt
    Inv2 = [0]*otpt
    m1 = [0]*otpt
    m2 = [0]*otpt
    a1 = [0]*otpt
    a2 = [0]*otpt
    alph = [0]*otpt
    beta = [0]*otpt
    lo = [0]*otpt
    hi = [0]*otpt
    C_Y_pdf = [0]*otpt

    if any(Moments['Variance']==0):
        print "Warning: One or more outputs does not vary over given parameter variation."

    for k in range(0,otpt):
        PCC[k],dtype[k],Inv1[k],m1[k],m2[k],a1[k],a2[k],alph[k],beta[k],lo[k],hi[k] = pearscdf.pearscdf(limstate[k], Moments['Mean'][k], sqrt(CovarianceMatrix[k, k]), Moments['Skewness'][k], Moments['Kurtosis'][k], methd, k, output)
        if dtype[k] != None:
            if iscomplex(a1[k]):
                a1[k] = [a1[k].real, a1[k].imag]
            if iscomplex(a2[k]):
                a2[k] = [a2[k].real, a2[k].imag]

            C_Y_pdf[k] = estimate_complexity.with_distribution(dtype[k],limstate[k],Moments['Mean'][k],Moments['Variance'][k],numbins)

    sigma_mat=matrix(sqrt(diag(CovarianceMatrix)))
    seterr(invalid='ignore')    #ignore problems with divide-by-zero, just give us 'nan' as usual
    CorrelationMatrix= CovarianceMatrix/multiply(sigma_mat,sigma_mat.transpose())
    Distribution = {'PearsonType': dtype, 'm1': m1, 'm2': m2, 'a1': a1, 'a2': a2, 'Complexity': C_Y_pdf}
    Plotting = {'alpha': alph, 'beta': beta, 'lo': lo, 'hi': hi}

    CorrelationMatrix=where(isnan(CorrelationMatrix), None, CorrelationMatrix)

    if otpt > 1 and not 0 in PCC[0:otpt]:
        lower = zeros(otpt)-inf
        PCC[otpt] = mvstdnormcdf(lower, Inv1, CorrelationMatrix)

    Results = {'Moments': Moments, 'CorrelationMatrix': CorrelationMatrix,
    'CovarianceMatrix': CovarianceMatrix, 'Distribution': Distribution, 'Plotting': Plotting, 'PCC': PCC}

    return Results
Exemple #12
0
def UP_UDR(problem, driver):
    # Uses the UDR method for UP

    methd = 'UDR'
    method = 5

    mu = [inp.get_I_mu() for inp in driver.inputs]
    I_sigma = [inp.get_I_sigma() for inp in driver.inputs]
    inpt = len(driver.inputs)
    input = driver.inputNames
    krig = driver.krig
    limstate = driver.limstate
    lrflag = driver.lrflag
    n_meta = driver.n_meta
    nEFAST = driver.nEFAST
    nSOBOL = driver.nSOBOL
    nMCS = driver.nMCS
    numbins = driver.numbins
    nodes = driver.nodes
    order = driver.order
    otpt = len(driver.outputNames)
    output = driver.outputNames
    p = driver.p
    plotf = 0
    r = driver.r
    simple = driver.simple
    stvars = driver.stvars

    node, weight = params.params(method, nodes, inpt, stvars)

    # ----------------------  Model  ---------------------------
    # set_printoptions(precision=4)
    # set_printoptions(suppress=True)

    x = kron(mu, ones((inpt * nodes[0], 1)))
    for ii in range(0, inpt):
        k = ii * nodes[ii]
        l = (ii + 1) * nodes[ii]
        x[k:l, ii] = node[ii, :]


#    G_mu = run_model(driver, mu)
    values = [mu]

    G_s = zeros((inpt, max(nodes), otpt))
    for k, inputname in enumerate(
            driver._json_tree['Configurations']['Configuration']
        ['PCCInputArguments']['StochasticInputs']['InputDistributions']):
        if krig == 1:
            load('dmodel')
            for j in range(0, nodes[k]):
                var = k * nodes[k] + j
                X = x[var, :]
                G_s[k, j] = predictor(X, dmodel)
        else:
            for j in range(0, nodes[k]):
                var = k * nodes[k] + j
                X = x[var, :]
                #                print 'Running simulation on node',j,'of input',inputname['Name']
                #                G_s[k, j] = run_model(driver, X)
                values.append(X)

    out = iter(run_list(problem, driver, values))
    G_mu = out.next()
    for k, inputname in enumerate(
            driver._json_tree['Configurations']['Configuration']
        ['PCCInputArguments']['StochasticInputs']['InputDistributions']):
        for j in range(0, nodes[k]):
            G_s[k, j] = out.next()

    G_mean = zeros(otpt)
    G_kurt = zeros(otpt)
    G_skew = zeros(otpt)
    G_sigma = zeros(otpt)
    covar_m = zeros((otpt, otpt))
    gs = zeros(otpt)
    gk = zeros(otpt)
    moms = []
    for l in range(0, otpt):
        moms.append(
            newmoment(inpt, nodes[0], weight, G_s[:, :, l], G_mu[l],
                      G_mean[l]))
        G_mean[l] = moment(1, inpt, nodes[0], weight, G_s[:, :, l], G_mu[l],
                           G_mean[l])
    for l in range(0, otpt):
        moms.append(
            newmoment(inpt, nodes[0], weight, G_s[:, :, l], G_mu[l],
                      G_mean[l]))
        G_sigma[l] = moment(2, inpt, nodes[0], weight, G_s[:, :, l], G_mu[l],
                            G_mean[l])
        gs[l] = moment(3, inpt, nodes[0], weight, G_s[:, :, l], G_mu[l],
                       G_mean[l])
        G_skew[l] = moment(3, inpt, nodes[0], weight, G_s[:, :, l], G_mu[l],
                           G_mean[l]) / G_sigma[l]**1.5
        gk[l] = moment(4, inpt, nodes[0], weight, G_s[:, :, l], G_mu[l],
                       G_mean[l])
        G_kurt[l] = moment(4, inpt, nodes[0], weight, G_s[:, :, l], G_mu[l],
                           G_mean[l]) / G_sigma[l]**2
        for j in range(l, otpt):
            covar_m[l, j] = moment2(1, inpt, nodes[0], weight, G_s[:, :, l], l,
                                    G_s[:, :, j], j, G_mu, G_mean)
            covar_m[j, l] = covar_m[l, j]

    CovarianceMatrix = covar_m.transpose()
    Moments = {
        'Mean': G_mean,
        'Variance': diag(CovarianceMatrix),
        'Skewness': G_skew,
        'Kurtosis': G_kurt
    }

    # ----------------------  Analyze  ---------------------------

    # Calculate the PCC for the FFNI method
    if otpt > 1:
        PCC = [0] * (otpt + 1)
    else:
        PCC = [0] * otpt
    dtype = [0] * otpt
    Inv1 = [0] * otpt
    Inv2 = [0] * otpt
    m1 = [0] * otpt
    m2 = [0] * otpt
    a1 = [0] * otpt
    a2 = [0] * otpt
    alph = [0] * otpt
    beta = [0] * otpt
    lo = [0] * otpt
    hi = [0] * otpt
    C_Y_pdf = [0] * otpt

    if any(Moments['Variance'] == 0):
        print "Warning: One or more outputs does not vary over given parameter variation."

    for k in range(0, otpt):
        PCC[k], dtype[k], Inv1[k], m1[k], m2[k], a1[k], a2[k], alph[k], beta[
            k], lo[k], hi[k] = pearscdf.pearscdf(limstate[k],
                                                 Moments['Mean'][k],
                                                 sqrt(CovarianceMatrix[k, k]),
                                                 Moments['Skewness'][k],
                                                 Moments['Kurtosis'][k], methd,
                                                 k, output)
        if dtype[k] != None:
            if iscomplex(a1[k]):
                a1[k] = [a1[k].real, a1[k].imag]
            if iscomplex(a2[k]):
                a2[k] = [a2[k].real, a2[k].imag]

            C_Y_pdf[k] = estimate_complexity.with_distribution(
                dtype[k], limstate[k], Moments['Mean'][k],
                Moments['Variance'][k], numbins)

    sigma_mat = matrix(sqrt(diag(CovarianceMatrix)))
    seterr(invalid='ignore'
           )  #ignore problems with divide-by-zero, just give us 'nan' as usual
    CorrelationMatrix = CovarianceMatrix / multiply(sigma_mat,
                                                    sigma_mat.transpose())
    Distribution = {
        'PearsonType': dtype,
        'm1': m1,
        'm2': m2,
        'a1': a1,
        'a2': a2,
        'Complexity': C_Y_pdf
    }
    Plotting = {'alpha': alph, 'beta': beta, 'lo': lo, 'hi': hi}

    CorrelationMatrix = where(isnan(CorrelationMatrix), None,
                              CorrelationMatrix)

    if otpt > 1 and not 0 in PCC[0:otpt]:
        lower = zeros(otpt) - inf
        PCC[otpt] = mvstdnormcdf(lower, Inv1, CorrelationMatrix)

    Results = {
        'Moments': Moments,
        'CorrelationMatrix': CorrelationMatrix,
        'CovarianceMatrix': CovarianceMatrix,
        'Distribution': Distribution,
        'Plotting': Plotting,
        'PCC': PCC
    }

    return Results