コード例 #1
0
ファイル: UP_UDR.py プロジェクト: dyao-vu/meta-core
def UP_UDR(driver):
# Uses the UDR method for UP

    methd = 'UDR'
    method = 5

    mu      = [inp.get_I_mu() for inp in driver.inputs]
    I_sigma = [inp.get_I_sigma() for inp in driver.inputs]
    inpt    = len(driver.inputs)
    input   = driver.inputNames
    krig    = driver.krig
    limstate= driver.limstate
    lrflag  = driver.lrflag
    n_meta  = driver.n_meta
    nEFAST  = driver.nEFAST
    nSOBOL  = driver.nSOBOL
    nMCS    = driver.nMCS
    numbins = driver.numbins
    nodes   = driver.nodes
    order   = driver.order
    otpt    = len(driver.outputNames)
    output  = driver.outputNames
    p       = driver.p
    plotf   = 0
    r       = driver.r
    simple  = driver.simple
    stvars  = driver.stvars

    node,weight = params.params(method, nodes, inpt, stvars)

# ----------------------  Model  ---------------------------
    # set_printoptions(precision=4)
    # set_printoptions(suppress=True)

    x = kron(mu, ones((inpt * nodes[0], 1)))
    for ii in range(0,inpt):
        k = ii * nodes[ii]
        l = (ii+1) * nodes[ii]
        x[k:l, ii] = node[ii,:]

#    G_mu = run_model(driver, mu)
    values = [mu]

    G_s = zeros((inpt,max(nodes),otpt))
    for k,inputname in enumerate(driver._json_tree['Configurations']['Configuration']['PCCInputArguments']['StochasticInputs']['InputDistributions']):
        if krig == 1:
            load('dmodel')
            for j in  range(0,nodes[k]):
                var = k * nodes[k] + j
                X = x[var, :]
                G_s[k, j] = predictor(X, dmodel)
        else:
            for j in  range(0,nodes[k]):
                var = k * nodes[k] + j
                X = x[var, :]
#                print 'Running simulation on node',j,'of input',inputname['Name']
#                G_s[k, j] = run_model(driver, X)
                values.append(X)

    out = iter(run_list(driver, values))
    G_mu = out.next()
    for k,inputname in enumerate(driver._json_tree['Configurations']['Configuration']['PCCInputArguments']['StochasticInputs']['InputDistributions']):
        for j in  range(0,nodes[k]):
            G_s[k, j] = out.next()

    G_mean = zeros(otpt)
    G_kurt = zeros(otpt)
    G_skew = zeros(otpt)
    G_sigma = zeros(otpt)
    covar_m = zeros((otpt,otpt))
    gs = zeros(otpt)
    gk = zeros(otpt)
    moms = []
    for l in range(0,otpt):
        moms.append(newmoment(inpt, nodes[0], weight, G_s[:, :, l], G_mu[l], G_mean[l]))
        G_mean[l] = moment(1, inpt, nodes[0], weight, G_s[:, :, l], G_mu[l], G_mean[l])
    for l in range(0,otpt):
        moms.append(newmoment(inpt, nodes[0], weight, G_s[:, :, l], G_mu[l], G_mean[l]))
        G_sigma[l] = moment(2, inpt, nodes[0], weight, G_s[:, :, l], G_mu[l], G_mean[l])
        gs[l] = moment(3, inpt, nodes[0], weight, G_s[:, :, l], G_mu[l], G_mean[l])
        G_skew[l] = moment(3, inpt, nodes[0], weight, G_s[:, :, l], G_mu[l], G_mean[l]) / G_sigma[l] ** 1.5
        gk[l] = moment(4, inpt, nodes[0], weight, G_s[:, :, l], G_mu[l], G_mean[l])
        G_kurt[l] = moment(4, inpt, nodes[0], weight, G_s[:, :, l], G_mu[l], G_mean[l]) / G_sigma[l] ** 2
        for j in range(l,otpt):
            covar_m[l, j] = moment2(1, inpt, nodes[0], weight, G_s[:, :, l], l, G_s[:, :, j], j, G_mu, G_mean)
            covar_m[j, l] = covar_m[l, j]

    CovarianceMatrix = covar_m.transpose()
    Moments = {'Mean': G_mean, 'Variance': diag(CovarianceMatrix), 'Skewness': G_skew, 'Kurtosis': G_kurt}

    # ----------------------  Analyze  ---------------------------

    # Calculate the PCC for the FFNI method
    if otpt>1:
        PCC = [0]*(otpt+1)
    else:
        PCC = [0]*otpt
    dtype = [0]*otpt
    Inv1 = [0]*otpt
    Inv2 = [0]*otpt
    m1 = [0]*otpt
    m2 = [0]*otpt
    a1 = [0]*otpt
    a2 = [0]*otpt
    alph = [0]*otpt
    beta = [0]*otpt
    lo = [0]*otpt
    hi = [0]*otpt
    C_Y_pdf = [0]*otpt

    if any(Moments['Variance']==0):
        print "Warning: One or more outputs does not vary over given parameter variation."

    for k in range(0,otpt):
        PCC[k],dtype[k],Inv1[k],m1[k],m2[k],a1[k],a2[k],alph[k],beta[k],lo[k],hi[k] = pearscdf.pearscdf(limstate[k], Moments['Mean'][k], sqrt(CovarianceMatrix[k, k]), Moments['Skewness'][k], Moments['Kurtosis'][k], methd, k, output)
        if dtype[k] != None:
            if iscomplex(a1[k]):
                a1[k] = [a1[k].real, a1[k].imag]
            if iscomplex(a2[k]):
                a2[k] = [a2[k].real, a2[k].imag]

            C_Y_pdf[k] = estimate_complexity.with_distribution(dtype[k],limstate[k],Moments['Mean'][k],Moments['Variance'][k],numbins)

    sigma_mat=matrix(sqrt(diag(CovarianceMatrix)))
    seterr(invalid='ignore')    #ignore problems with divide-by-zero, just give us 'nan' as usual
    CorrelationMatrix= CovarianceMatrix/multiply(sigma_mat,sigma_mat.transpose())
    Distribution = {'PearsonType': dtype, 'm1': m1, 'm2': m2, 'a1': a1, 'a2': a2, 'Complexity': C_Y_pdf}
    Plotting = {'alpha': alph, 'beta': beta, 'lo': lo, 'hi': hi}

    CorrelationMatrix=where(isnan(CorrelationMatrix), None, CorrelationMatrix)

    if otpt > 1 and not 0 in PCC[0:otpt]:
        lower = zeros(otpt)-inf
        PCC[otpt] = mvstdnormcdf(lower, Inv1, CorrelationMatrix)

    Results = {'Moments': Moments, 'CorrelationMatrix': CorrelationMatrix,
    'CovarianceMatrix': CovarianceMatrix, 'Distribution': Distribution, 'Plotting': Plotting, 'PCC': PCC}

    return Results
コード例 #2
0
ファイル: SA_FAST.py プロジェクト: dyao-vu/meta-core
def SA_FAST(driver):
    
    # First order indicies for a given model computed with Fourier Amplitude Sensitivity Test (FAST).
    # R. I. Cukier, C. M. Fortuin, Kurt E. Shuler, A. G. Petschek and J. H. Schaibly.
    # Study of the sensitivity of coupled reaction systems to uncertainties in rate coefficients.
    # I-III Theory/Applications/Analysis The Journal of Chemical Physics
    #
    # Input:
    # inpt : no. of input factors
    #
    # Output:
    # SI[] : sensitivity indices
    # Other used variables/constants:
    # OM[] : frequencies of parameters
    # S[] : search curve
    # X[] : coordinates of sample points
    # Y[] : output of model
    # OMAX : maximum frequency
    # N : number of sample points
    # AC[],BC[]: fourier coefficients
    # V : total variance
    # VI : partial variances

    # ----------------------  Setup  ---------------------------
    methd = 'FAST'
    method = '9'
    
    mu      = [inp.get_I_mu() for inp in driver.inputs]
    I_sigma = [inp.get_I_sigma() for inp in driver.inputs]
    inpt    = len(driver.inputs)
    input   = driver.inputNames
    krig    = driver.krig
    limstate= driver.limstate
    lrflag  = driver.lrflag
    n_meta  = driver.n_meta
    nEFAST  = driver.nEFAST
    nSOBOL  = driver.nSOBOL
    nMCS    = driver.nMCS
    nodes   = driver.nodes
    order   = driver.order
    otpt    = len(driver.outputNames)
    output  = driver.outputNames
    p       = driver.p
    plotf   = 0
    r       = driver.r
    simple  = driver.simple
    stvars  = driver.stvars
    
    # ----------------------  Model  ---------------------------
    
    #
    MI = 4#: maximum number of fourier coefficients that may be retained in
    # calculating the partial variances without interferences between the assigned frequencies
    #
    # Frequency assignment to input factors.
    OM = SETFREQ(inpt)
    # Computation of the maximum frequency
    # OMAX and the no. of sample points N.
    OMAX = int(OM[inpt-1])
    N = 2 * MI * OMAX + 1
    # Setting the relation between the scalar variable S and the coordinates
    # {X(1),X(2),...X(inpt)} of each sample point.
    S = pi / 2.0 * (2 * arange(1,N+1) - N-1) / N
    ANGLE = matrix(OM).T * matrix(S)
    X = 0.5 + arcsin(sin(ANGLE.T)) / pi
    # Transform distributions from standard uniform to general.

    for j in range(inpt):    
        if stvars[j].dist == 'NORM':
            X[:,j] = norm.ppf(uniform.cdf(X[:,j], 0, 1), stvars[j].param[0], stvars[j].param[1])        
        elif stvars[j].dist == 'LNORM':        
            X[:,j] = lognorm.ppf(uniform.cdf(X[:, j], 0, 1), stvars[j].param[1], 0, exp(stvars[j].param[0]))
        elif stvars[j].dist == 'BETA':        
            X[:,j] = beta.ppf(uniform.cdf(X[:, j], 0, 1), stvars[j].param[0], stvars[j].param[1], stvars[j].param[2], stvars[j].param[3] - stvars[j].param[2])
        elif stvars[j].dist == 'UNIF':        
            X[:,j] = uniform.ppf(uniform.cdf(X[:,j], 0, 1), stvars[j].param[0], stvars[j].param[1])

    # Do the N model evaluations.
    Y = zeros((N, otpt))        
    if krig == 1:            
        load("dmodel")            
        Y = predictor(X, dmodel)            
    else:
        values = []            
        for p in range(N):
#            print 'Running simulation on test',p+1,'of',N
#            Y[p] = run_model(driver, array(X[p])[0])
            values.append(array(X[p])[0])
        Y = run_list(driver, values)

    # Computation of Fourier coefficients.
    AC = zeros((N, otpt))# initially zero
    BC = zeros((N, otpt))# initially zero
#    q = int(N / 2)-1
    q = (N-1)/2
    for j in range(2,N+1,2):    # j is even
#        print "Y[q]",Y[q]
#        print "matrix(cos(pi * j * arange(1,q+) / N))",matrix(cos(pi * j * arange(1,q+1) / N))
#        print "matrix(Y[q + arange(0,q)] + Y[q - arange(0,q)])",matrix(Y[q + arange(1,q+1)] + Y[q - arange(1,q+1)])
        AC[j-1] = 1.0 / N * matrix(Y[q] + matrix(cos(pi * j * arange(1,q+1) / N)) * matrix(Y[q + arange(1,q+1)] + Y[q - arange(1,q+1)]))
    for j in range(1,N+1,2):    # j is odd
        BC[j-1] = 1.0 / N * matrix(sin(pi * j * arange(1,q+1) / N)) * matrix(Y[q + arange(1,q+1)] - Y[q - arange(1,q+1)])

    # Computation of the general variance V in the frequency domain.
    V = 2 * (matrix(AC).T * matrix(AC) + matrix(BC).T * matrix(BC))
    # Computation of the partial variances and sensitivity indices.
    # Si=zeros(inpt,otpt);
    Si = zeros((otpt,otpt,inpt));
    for i in range(inpt):    
        Vi = zeros((otpt, otpt))    
        for j in range(1,MI+1): 
            idx = j * OM[i]-1     
            Vi = Vi + AC[idx].T * AC[idx] + BC[idx].T * BC[idx]
        Vi = 2. * Vi
        Si[:, :, i] = Vi / V

    if lrflag == 1:
        SRC, stat = SRC_regress.SRC_regress(X, Y, otpt, N)

    # ----------------------  Analyze  ---------------------------
    
    Sti = []# appears right after the call to this method in the original PCC_Computation.m
    
#    if plotf == 1:    
#        piecharts(inpt, otpt, Si, Sti, method, output)
    if simple == 1:
        Si_t = zeros((inpt,otpt))
        for p in range(inpt):        
            Si_t[p] = diag(Si[:, :, p])
        Si = Si_t.T

    Results = {'FirstOrderSensitivity': Si}
    if lrflag == 1:
        Results.update({'SRC': SRC, 'R^2': stat})
    return Results
コード例 #3
0
def SA_FAST(problem, driver):

    # First order indicies for a given model computed with Fourier Amplitude Sensitivity Test (FAST).
    # R. I. Cukier, C. M. Fortuin, Kurt E. Shuler, A. G. Petschek and J. H. Schaibly.
    # Study of the sensitivity of coupled reaction systems to uncertainties in rate coefficients.
    # I-III Theory/Applications/Analysis The Journal of Chemical Physics
    #
    # Input:
    # inpt : no. of input factors
    #
    # Output:
    # SI[] : sensitivity indices
    # Other used variables/constants:
    # OM[] : frequencies of parameters
    # S[] : search curve
    # X[] : coordinates of sample points
    # Y[] : output of model
    # OMAX : maximum frequency
    # N : number of sample points
    # AC[],BC[]: fourier coefficients
    # V : total variance
    # VI : partial variances

    # ----------------------  Setup  ---------------------------
    methd = 'FAST'
    method = '9'

    mu = [inp.get_I_mu() for inp in driver.inputs]
    I_sigma = [inp.get_I_sigma() for inp in driver.inputs]
    inpt = len(driver.inputs)
    input = driver.inputNames
    krig = driver.krig
    limstate = driver.limstate
    lrflag = driver.lrflag
    n_meta = driver.n_meta
    nEFAST = driver.nEFAST
    nSOBOL = driver.nSOBOL
    nMCS = driver.nMCS
    nodes = driver.nodes
    order = driver.order
    otpt = len(driver.outputNames)
    output = driver.outputNames
    p = driver.p
    plotf = 0
    r = driver.r
    simple = driver.simple
    stvars = driver.stvars

    # ----------------------  Model  ---------------------------

    #
    MI = 4  #: maximum number of fourier coefficients that may be retained in
    # calculating the partial variances without interferences between the assigned frequencies
    #
    # Frequency assignment to input factors.
    OM = SETFREQ(inpt)
    # Computation of the maximum frequency
    # OMAX and the no. of sample points N.
    OMAX = int(OM[inpt - 1])
    N = 2 * MI * OMAX + 1
    # Setting the relation between the scalar variable S and the coordinates
    # {X(1),X(2),...X(inpt)} of each sample point.
    S = pi / 2.0 * (2 * arange(1, N + 1) - N - 1) / N
    ANGLE = matrix(OM).T * matrix(S)
    X = 0.5 + arcsin(sin(ANGLE.T)) / pi
    # Transform distributions from standard uniform to general.

    for j in range(inpt):
        if stvars[j].dist == 'NORM':
            X[:, j] = norm.ppf(uniform.cdf(X[:, j], 0, 1), stvars[j].param[0],
                               stvars[j].param[1])
        elif stvars[j].dist == 'LNORM':
            X[:, j] = lognorm.ppf(uniform.cdf(X[:, j], 0,
                                              1), stvars[j].param[1], 0,
                                  exp(stvars[j].param[0]))
        elif stvars[j].dist == 'BETA':
            X[:, j] = beta.ppf(uniform.cdf(X[:, j], 0, 1), stvars[j].param[0],
                               stvars[j].param[1], stvars[j].param[2],
                               stvars[j].param[3] - stvars[j].param[2])
        elif stvars[j].dist == 'UNIF':
            X[:, j] = uniform.ppf(uniform.cdf(X[:, j], 0, 1),
                                  stvars[j].param[0], stvars[j].param[1])

    # Do the N model evaluations.
    Y = zeros((N, otpt))
    if krig == 1:
        load("dmodel")
        Y = predictor(X, dmodel)
    else:
        values = []
        for p in range(N):
            #            print 'Running simulation on test',p+1,'of',N
            #            Y[p] = run_model(driver, array(X[p])[0])
            values.append(array(X[p])[0])
        Y = run_list(problem, driver, values)

    # Computation of Fourier coefficients.
    AC = zeros((N, otpt))  # initially zero
    BC = zeros((N, otpt))  # initially zero
    #    q = int(N / 2)-1
    q = (N - 1) / 2
    for j in range(2, N + 1, 2):  # j is even
        #        print "Y[q]",Y[q]
        #        print "matrix(cos(pi * j * arange(1,q+) / N))",matrix(cos(pi * j * arange(1,q+1) / N))
        #        print "matrix(Y[q + arange(0,q)] + Y[q - arange(0,q)])",matrix(Y[q + arange(1,q+1)] + Y[q - arange(1,q+1)])
        AC[j - 1] = 1.0 / N * matrix(
            Y[q] + matrix(cos(pi * j * arange(1, q + 1) / N)) *
            matrix(Y[q + arange(1, q + 1)] + Y[q - arange(1, q + 1)]))
    for j in range(1, N + 1, 2):  # j is odd
        BC[j - 1] = 1.0 / N * matrix(sin(
            pi * j * arange(1, q + 1) / N)) * matrix(Y[q + arange(1, q + 1)] -
                                                     Y[q - arange(1, q + 1)])

    # Computation of the general variance V in the frequency domain.
    V = 2 * (matrix(AC).T * matrix(AC) + matrix(BC).T * matrix(BC))
    # Computation of the partial variances and sensitivity indices.
    # Si=zeros(inpt,otpt);
    Si = zeros((otpt, otpt, inpt))
    for i in range(inpt):
        Vi = zeros((otpt, otpt))
        for j in range(1, MI + 1):
            idx = j * OM[i] - 1
            Vi = Vi + AC[idx].T * AC[idx] + BC[idx].T * BC[idx]
        Vi = 2. * Vi
        Si[:, :, i] = Vi / V

    if lrflag == 1:
        SRC, stat = SRC_regress.SRC_regress(X, Y, otpt, N)

    # ----------------------  Analyze  ---------------------------

    Sti = [
    ]  # appears right after the call to this method in the original PCC_Computation.m

    #    if plotf == 1:
    #        piecharts(inpt, otpt, Si, Sti, method, output)
    if simple == 1:
        Si_t = zeros((inpt, otpt))
        for p in range(inpt):
            Si_t[p] = diag(Si[:, :, p])
        Si = Si_t.T

    Results = {'FirstOrderSensitivity': Si}
    if lrflag == 1:
        Results.update({'SRC': SRC, 'R^2': stat})
    return Results
コード例 #4
0
ファイル: SA_EFAST.py プロジェクト: dyao-vu/meta-core
def SA_EFAST(driver):

    #[SI,STI] = EFAST(K,WANTEDN)
    # First order and total effect indices for a given model computed with
    # Extended Fourier Amplitude Sensitivity Test (EFAST).
    # Andrea Saltelli, Stefano Tarantola and Karen Chan. 1999
    # A quantitative model-independent method for global sensitivity analysis of model output.
    # Technometrics 41:39-56
    #
    # Input:
    # inpt : no. of input factors
    # WANTEDN : wanted no. of sample points
    #
    # Output:
    # SI[] : first order sensitivity indices
    # STI[] : total effect sensitivity indices
    # Other used variables/constants:
    # OM[] : vector of inpt frequencies
    # OMI : frequency for the group of interest
    # OMCI[] : set of freq. used for the compl. group
    # X[] : parameter combination rank matrix
    # AC[],BC[]: fourier coefficients
    # FI[] : random phase shift
    # V : total output variance (for each curve)
    # VI : partial var. of par. i (for each curve)
    # VCI : part. var. of the compl. set of par...
    # AV : total variance in the time domain
    # AVI : partial variance of par. i
    # AVCI : part. var. of the compl. set of par.
    # Y[] : model output
    # N : no. of runs on each curve

    # ----------------------  Setup  ---------------------------
    methd = 'EFAST'
    method = '10'

    mu      = [inp.get_I_mu() for inp in driver.inputs]
    I_sigma = [inp.get_I_sigma() for inp in driver.inputs]
    inpt    = len(driver.inputs)
    input   = driver.inputNames
    krig    = driver.krig
    limstate= driver.limstate
    lrflag  = driver.lrflag
    n_meta  = driver.n_meta
    nEFAST  = driver.nEFAST
    nSOBOL  = driver.nSOBOL
    nMCS    = driver.nMCS
    nodes   = driver.nodes
    order   = driver.order
    otpt    = len(driver.outputNames)
    output  = driver.outputNames
    p       = driver.p
    plotf   = 0
    r       = driver.r
    simple  = driver.simple
    stvars  = driver.stvars

    # ----------------------  Model  ---------------------------

    NR = 1#: no. of search curves
    MI = 4#: maximum number of fourier coefficients that may be retained in calculating
    # the partial variances without interferences between the assigned frequencies
    #
    # Computation of the frequency for the group of interest OMi and the no. of sample points N.
    OMi = int(floor((nEFAST / NR - 1) / (2 * MI) / inpt))
    N = 2 * MI * OMi + 1
    total_sims = N*NR*inpt
    sim = 0
    if (N * NR < 65):
        logging.error('sample size must be >= 65 per factor.')
        raise ValueError,'sample size must be >= 65 per factor.'

    # Algorithm for selecting the set of frequencies. OMci(i), i=1:inpt-1, contains
    # the set of frequencies to be used by the complementary group.
    OMci = SETFREQ(N - 1, OMi / 2 / MI)
    # Loop over the inpt input factors.
    Si = zeros((otpt,otpt,inpt));
    Sti = zeros((otpt,otpt,inpt));
    for i in range(inpt):
        # Initialize AV,AVi,AVci to zero.
        AV = 0
        AVi = 0
        AVci = 0
        # Loop over the NR search curves.
        for L in range(NR):
            # Setting the vector of frequencies OM for the inpt factors.
            cj = 1
            OM = zeros(inpt)
            for j in range(inpt):
                if (j == i):
                    # For the factor of interest.
                    OM[i] = OMi
                else:
                    # For the complementary group.
                    OM[j] = OMci[cj]
                    cj = cj + 1
            # Setting the relation between the scalar variable S and the coordinates
            # {X(1),X(2),...X(inpt)} of each sample point.
            FI = zeros(inpt)
            for j in range(inpt):
                FI[j] = random.random() * 2 * pi        # random phase shift
            S_VEC = pi * (2 * arange(1,N+1) - N - 1) / N
            OM_VEC = OM[range(inpt)]
            FI_MAT = transpose(array([FI]*N))
            ANGLE = matrix(OM_VEC).T*matrix(S_VEC) + matrix(FI_MAT)
            X = 0.5 + arcsin(sin(ANGLE.T)) / pi
            # Transform distributions from standard uniform to general.

            for j in range(inpt):
                if stvars[j].dist == 'NORM':
                    X[:,j] = norm.ppf(uniform.cdf(X[:,j], 0, 1), stvars[j].param[0], stvars[j].param[1])
                elif stvars[j].dist == 'LNORM':
                    X[:,j] = lognorm.ppf(uniform.cdf(X[:, j], 0, 1), stvars[j].param[1], 0, exp(stvars[j].param[0]))
                elif stvars[j].dist == 'BETA':
                    X[:,j] = beta.ppf(uniform.cdf(X[:, j], 0, 1), stvars[j].param[0], stvars[j].param[1], stvars[j].param[2], stvars[j].param[3] - stvars[j].param[2])
                elif stvars[j].dist == 'UNIF':
                    X[:,j] = uniform.ppf(uniform.cdf(X[:,j], 0, 1), stvars[j].param[0], stvars[j].param[1])

            # Do the N model evaluations.
            Y = zeros((N, otpt))
            if krig == 1:
                load("dmodel")
                Y = predictor(X, dmodel)
            else:
                values = []
                for p in range(N):
#                    sim += 1
#                    print 'Running simulation on test',sim,'of',total_sims
#                    Y[p] = run_model(driver, array(X[p])[0])
                    values.append(array(X[p])[0])
                Y = run_list(driver, values)

            # Subtract the average value.
            Y = Y - kron(mean(Y,0), ones((N, 1)))

            # Fourier coeff. at [1:OMi/2].
            NQ = int(N / 2)-1
            N0 = NQ + 1
            COMPL = 0
            Y_VECP = Y[N0+1:] + Y[NQ::-1]
            Y_VECM = Y[N0+1:] - Y[NQ::-1]
#            AC = zeros((int(ceil(OMi / 2)), otpt))
#            BC = zeros((int(ceil(OMi / 2)), otpt))
            AC = zeros((OMi * MI, otpt))
            BC = zeros((OMi * MI, otpt))
            for j in range(int(ceil(OMi / 2))+1):
                ANGLE = (j+1) * 2 * arange(1,NQ+2) * pi / N
                C_VEC = cos(ANGLE)
                S_VEC = sin(ANGLE)
                AC[j] = (Y[N0] +matrix(C_VEC)*matrix(Y_VECP)) / N
                BC[j] = matrix(S_VEC) * matrix(Y_VECM) / N
                COMPL = COMPL + matrix(AC[j]).T * matrix(AC[j]) + matrix(BC[j]).T * matrix(BC[j])
            # Computation of V_{(ci)}.
            Vci = 2 * COMPL
            AVci = AVci + Vci
            # Fourier coeff. at [P*OMi, for P=1:MI].
            COMPL = 0
# Do these need to be recomputed at all?
#            Y_VECP = Y[N0 + range(NQ)] + Y[N0 - range(NQ)]
#            Y_VECM = Y[N0 + range(NQ)] - Y[N0 - range(NQ)]
            for j in range(OMi, OMi * MI + 1, OMi):
                ANGLE = j * 2 * arange(1,NQ+2) * pi / N
                C_VEC = cos(ANGLE)
                S_VEC = sin(ANGLE)
                AC[j-1] = (Y[N0] + matrix(C_VEC)*matrix(Y_VECP)) / N
                BC[j-1] = matrix(S_VEC) * matrix(Y_VECM) / N
                COMPL = COMPL + matrix(AC[j-1]).T * matrix(AC[j-1]) + matrix(BC[j-1]).T * matrix(BC[j-1])
            # Computation of V_i.
            Vi = 2 * COMPL
            AVi = AVi + Vi
            # Computation of the total variance in the time domain.
            AV = AV +  matrix(Y).T * matrix(Y) / N
        # Computation of sensitivity indicies.
        AV = AV / NR
        AVi = AVi / NR
        AVci = AVci / NR
        Si[:, :, i] = AVi / AV
        Sti[:, :, i] = 1 - AVci / AV

    if lrflag == 1:
        SRC, stat = SRC_regress.SRC_regress(X, Y, otpt, N)

    # ----------------------  Analyze  ---------------------------

#    if plotf == 1:
#        piecharts(inpt, otpt, Si, Sti, methd, output)
    if simple == 1:
        Si_t = zeros((inpt,otpt))
        for p in range(inpt):
            Si_t[p] = diag(Si[:, :, p])
        Si = Si_t.T
    if simple == 1:
        Sti_t = zeros((inpt,otpt))
        for p in range(inpt):
            Sti_t[p] = diag(Sti[:, :, p])
        Sti = Sti_t.T
    Results = {'FirstOrderSensitivity': Si, 'TotalEffectSensitivity': Sti}
    if lrflag == 1:
        Results.update({'SRC': SRC, 'R^2': stat})
    return Results
コード例 #5
0
ファイル: UP_PCE.py プロジェクト: hitej/meta-core
def UP_PCE(driver):
    # Uses the PCE method for UP

    # This routine has been updated as part of refactoring code before the port
    # from MATLAB to Python/NumPy/SciPy.  Sections of PCC_Computation that apply
    # this method have been moved here.

    # ----------------------  Setup  ---------------------------
    methd = "PCE"
    method = 6

    inpt = len(driver.inputs)
    krig = driver.krig
    limstate = driver.limstate
    nodes = driver.nodes
    order = driver.order
    otpt = len(driver.outputNames)
    output = driver.outputNames
    stvars = driver.stvars
    numbins = driver.numbins

    # current settings for these two vars
    ii = 0
    jj = 0

    # ----------------------  Model  ---------------------------
    mu_g = zeros(inpt)
    sigma_g = ones(inpt)
    node_t = zeros((inpt, nodes[0]))
    weight_t = zeros((inpt, nodes[0]))
    for i in range(inpt):
        node_t[i], weight_t[i] = gaussquad.gaussquad(nodes[i], "NORM", mu_g[i], sigma_g[i])

    x = []
    for i in range(inpt):
        x.append(symbols("x" + str(i)))
    x = array(x)

    j = fullfact(nodes)

    pts = shape(j)[0]
    node = zeros((pts, inpt))
    wj = zeros((pts, inpt))
    for y in range(pts):
        for i in range(inpt):
            node[y][i] = node_t[i][j[y][i]]
            wj[y][i] = weight_t[i][j[y][i]]
    weight = prod(wj, 1)

    P = zeros(order)
    P[0] = 1
    for p in range(1, order):
        term2 = 0
        for s in range(1, p + 1):
            term1 = 1
            for r in range(s):
                term1 = term1 * (inpt + r)
            term2 = term2 + (1.0 / int(scipy.misc.factorial(s))) * term1
        if p == 1:
            P[p] = term2
        else:
            P[p] = term2 - sum(P[range(1, p + 1)])

    G_s = zeros((pts, otpt))
    if krig == 1:
        t = strcat("SS_K", num2str(ii), num2str(jj))
        load(char(t))
        for j in range(pts):
            # Rosenblatt Transformation
            T_L = Dist.Dist(stvars, node[j], inpt)
            G_s[j] = predictor(T_L, dmodel)
    else:
        values = []
        for j in range(pts):
            # Rosenblatt Transformation
            #            print 'Running simulation',j+1,'of',pts
            T_L = Dist.Dist(stvars, node[j], inpt)
            #            G_s[j] = run_model(driver, T_L)
            values.append(T_L)
        G_s = run_list(driver, values)

    indx = 0
    bn = zeros((sum(P), otpt))
    bd = zeros(sum(P))
    for k in range(order):
        vec = xvector.xvector(k, inpt)
        for j in range(int(P[k])):
            for i in range(pts):
                L = node[i]
                if k == 0:
                    bn[indx] = bn[indx] + weight[i] * G_s[i]
                    bd[indx] = bd[indx] + weight[i]
                else:

                    h, h_sym = hermite.hermite(k, vec[j], L, x)
                    bn[indx] += weight[i] * G_s[i] * h
                    bd[indx] += weight[i] * (h ** 2)
            indx += 1

    b = zeros((sum(P), otpt))
    for l in range(otpt):
        b[:, l] = bn[:, l] / bd

    indx = 0
    U_sum = 0
    for k in range(order):
        vec = xvector.xvector(k, inpt)
        for j in range(int(P[k])):
            if k == 0:
                U_sum = b[0]
            else:
                h, h_sym = hermite.hermite(k, vec[j], L, x)
                U_sum = U_sum + b[indx] * N(h_sym)
            indx += 1
    U = U_sum

    U_s = zeros((pts, otpt))
    G_mean = zeros(otpt)
    G_kurt = zeros(otpt)
    G_skew = zeros(otpt)
    covar_m = zeros((otpt, otpt))
    for i in range(pts):
        for k in range(otpt):
            U_s[i][k] = U[k].subs(dict(zip(x, node[i])))
    for k in range(otpt):
        #        G_mean[k] = sum(matrix(weight) * matrix(U_s[:, k]).transpose())
        G_mean[k] = sum(weight * U_s[:, k])
    for k in range(otpt):
        for j in range(k, otpt):
            covar_m[k, j] = sum(weight * (U_s[:, k] - G_mean[k]) * (G_s[:, j] - G_mean[j]))
            covar_m[j, k] = covar_m[k, j]
        G_skew[k] = sum(weight * (U_s[:, k] - G_mean[k]) ** 3) / covar_m[k, k] ** 1.5
        G_kurt[k] = sum(weight * (U_s[:, k] - G_mean[k]) ** 4) / covar_m[k, k] ** 2

    CovarianceMatrix = covar_m.transpose()
    Moments = {"Mean": G_mean, "Variance": diag(CovarianceMatrix), "Skewness": G_skew, "Kurtosis": G_kurt}

    # ----------------------  Analyze  ---------------------------

    if any(Moments["Variance"] == 0):
        print "Warning: One or more outputs does not vary over given parameter variation."

    # Calculate the PCC for the FFNI method
    if otpt > 1:
        PCC = [0] * (otpt + 1)
    else:
        PCC = [0] * otpt
    dtype = [0] * otpt
    Inv1 = [0] * otpt
    Inv2 = [0] * otpt
    m1 = [0] * otpt
    m2 = [0] * otpt
    a1 = [0] * otpt
    a2 = [0] * otpt
    alph = [0] * otpt
    beta = [0] * otpt
    lo = [0] * otpt
    hi = [0] * otpt
    C_Y_pdf = [0] * otpt

    if any(Moments["Variance"] == 0):
        print "Warning: One or more outputs does not vary over given parameter variation."

    for k in range(otpt):
        PCC[k], dtype[k], Inv1[k], m1[k], m2[k], a1[k], a2[k], alph[k], beta[k], lo[k], hi[k] = pearscdf.pearscdf(
            limstate[k],
            Moments["Mean"][k],
            sqrt(CovarianceMatrix[k, k]),
            Moments["Skewness"][k],
            Moments["Kurtosis"][k],
            methd,
            k,
            output,
        )
        if dtype[k] != None:
            if iscomplex(a1[k]):
                a1[k] = [a1[k].real, a1[k].imag]
            if iscomplex(a2[k]):
                a2[k] = [a2[k].real, a2[k].imag]

            C_Y_pdf[k] = estimate_complexity.with_distribution(
                dtype[k], limstate[k], Moments["Mean"][k], Moments["Variance"][k], numbins
            )

    sigma_mat = matrix(sqrt(diag(CovarianceMatrix)))
    seterr(invalid="ignore")  # ignore problems with divide-by-zero, just give us 'nan' as usual
    CorrelationMatrix = CovarianceMatrix / multiply(sigma_mat, sigma_mat.transpose())
    Distribution = {"PearsonType": dtype, "m1": m1, "m2": m2, "a1": a1, "a2": a2, "Complexity": C_Y_pdf}
    Plotting = {"alpha": alph, "beta": beta, "lo": lo, "hi": hi}

    CorrelationMatrix = where(isnan(CorrelationMatrix), None, CorrelationMatrix)

    if otpt > 1 and not 0 in PCC[0:otpt]:
        lower = zeros(otpt) - inf
        PCC[otpt] = mvstdnormcdf(lower, Inv1, CorrelationMatrix)

    Results = {
        "Moments": Moments,
        "CorrelationMatrix": CorrelationMatrix,
        "CovarianceMatrix": CovarianceMatrix,
        "Distribution": Distribution,
        "Plotting": Plotting,
        "PCC": PCC,
    }

    return Results
コード例 #6
0
ファイル: UP_FFNI.py プロジェクト: landunin/meta-core
def UP_FFNI(problem, driver):

    # Uses the FFNI method for UP

    # ----------------------  Setup  ---------------------------
    methd = 'FFNI'
    method = 4

    mu      = [inp.get_I_mu() for inp in driver.inputs]
    I_sigma = [inp.get_I_sigma() for inp in driver.inputs]
    inpt    = len(driver.inputs)
    input   = driver.inputNames
    krig    = driver.krig
    limstate= driver.limstate
    lrflag  = driver.lrflag
    n_meta  = driver.n_meta
    nEFAST  = driver.nEFAST
    nSOBOL  = driver.nSOBOL
    nMCS    = driver.nMCS
    numbins = driver.numbins
    nodes   = driver.nodes
    order   = driver.order
    otpt    = len(driver.outputNames)
    output  = driver.outputNames
    p       = driver.p
    plotf   = 0
    r       = driver.r
    simple  = driver.simple
    stvars  = driver.stvars

    node,w = params.params(method, nodes, inpt, stvars)    # Do I need to transpose these matrices?
    #[quadpts] = params(method, nodes, inpt, stvars)

    # ----------------------  Model  ---------------------------

    # Create full factorial experiment from individual nodes and weights
    j = fullfact(nodes)
    pts = shape(j)[0]
    x=zeros((pts,inpt))
    wj=zeros((pts,inpt))
    for y in range(pts):
        for i in range(inpt):
            x[y][i] = node[i][j[y][i]]
            wj[y][i] = w[i][j[y][i]]
    weight = prod(wj, 1)

    if krig == 1:
        load("dmodel")
        G_s = predictor(x, dmodel)
    else:
#        G_s = zeros((pts, otpt))
#        for i in range(pts):
#            print 'Running simulation',i+1,'of',pts
#            G_s[i] = run_model(driver, x[i])
            # G_s[i] = modelica.RunModelica(x[i], modelname, properties)
        G_s = run_list(problem, driver, x)

    G_mean = zeros(otpt)
    G_kurt = zeros(otpt)
    G_skew = zeros(otpt)
    covar_m = zeros((otpt,otpt))
    for k in range(otpt):
        G_mean[k] = sum(weight * G_s[:, k])
    for k in range(otpt):
        for j in range(otpt):
            covar_m[k, j] = sum(weight * (G_s[:, k] - G_mean[k]) * (G_s[:, j] - G_mean[j]))
            covar_m[j, k] = covar_m[k, j]
        G_skew[k] = sum(weight * (G_s[:, k] - G_mean[k]) ** 3) / covar_m[k, k] ** 1.5
        G_kurt[k] = sum(weight * (G_s[:, k] - G_mean[k]) ** 4) / covar_m[k, k] ** 2

    CovarianceMatrix = covar_m.transpose()
    Moments = {'Mean': G_mean, 'Variance': diag(CovarianceMatrix), 'Skewness': G_skew, 'Kurtosis': G_kurt}

    # ----------------------  Analyze  ---------------------------

    # Calculate the PCC for the FFNI method
    if otpt>1:
        PCC = [0]*(otpt+1)
    else:
        PCC = [0]*otpt
    dtype = [0]*otpt
    Inv1 = [0]*otpt
    Inv2 = [0]*otpt
    m1 = [0]*otpt
    m2 = [0]*otpt
    a1 = [0]*otpt
    a2 = [0]*otpt
    alph = [0]*otpt
    beta = [0]*otpt
    lo = [0]*otpt
    hi = [0]*otpt
    C_Y_pdf = [0]*otpt

    if any(Moments['Variance']==0):
        print "Warning: One or more outputs does not vary over given parameter variation."

    for k in range(0,otpt):
        PCC[k],dtype[k],Inv1[k],m1[k],m2[k],a1[k],a2[k],alph[k],beta[k],lo[k],hi[k] = pearscdf.pearscdf(limstate[k], Moments['Mean'][k], sqrt(CovarianceMatrix[k, k]), Moments['Skewness'][k], Moments['Kurtosis'][k], methd, k, output)
        if dtype[k] != None:
            if iscomplex(a1[k]):
                a1[k] = [a1[k].real, a1[k].imag]
            if iscomplex(a2[k]):
                a2[k] = [a2[k].real, a2[k].imag]

            C_Y_pdf[k] = estimate_complexity.with_distribution(dtype[k],limstate[k],Moments['Mean'][k],Moments['Variance'][k],numbins)

    sigma_mat=matrix(sqrt(diag(CovarianceMatrix)))
    seterr(invalid='ignore')    #ignore problems with divide-by-zero, just give us 'nan' as usual
    CorrelationMatrix= CovarianceMatrix/multiply(sigma_mat,sigma_mat.transpose())
    Distribution = {'PearsonType': dtype, 'm1': m1, 'm2': m2, 'a1': a1, 'a2': a2, 'Complexity': C_Y_pdf}
    Plotting = {'alpha': alph, 'beta': beta, 'lo': lo, 'hi': hi}

    CorrelationMatrix=where(isnan(CorrelationMatrix), None, CorrelationMatrix)

    if otpt > 1 and not 0 in PCC[0:otpt]:
        lower = zeros(otpt)-inf
        PCC[otpt] = mvstdnormcdf(lower, Inv1, CorrelationMatrix)

    Results = {'Moments': Moments, 'CorrelationMatrix': CorrelationMatrix,
    'CovarianceMatrix': CovarianceMatrix, 'Distribution': Distribution, 'Plotting': Plotting, 'PCC': PCC}

    return Results
コード例 #7
0
ファイル: UP_MCS.py プロジェクト: landunin/meta-core
def UP_MCS(problem, driver):
    # Uses the MCS method for UP

    mu = [inp.get_I_mu() for inp in driver.inputs]
    I_sigma = [inp.get_I_sigma() for inp in driver.inputs]
    inpt = len(driver.inputs)
    input = driver.inputNames
    krig = driver.krig
    limstate = driver.limstate
    lrflag = driver.lrflag
    n_meta = driver.n_meta
    nEFAST = driver.nEFAST
    nSOBOL = driver.nSOBOL
    nMCS = driver.nMCS
    nodes = driver.nodes
    order = driver.order
    otpt = len(driver.outputNames)
    output = driver.outputNames
    p = driver.p
    plotf = 0
    r = driver.r
    simple = driver.simple
    stvars = driver.stvars

    #*****************RANDOM DRAWS FROM INPUT DISTRIBUTIONS********************
    value = asarray(LHS.LHS(inpt, nMCS))

    for j in range(inpt):
        if stvars[j].dist == 'NORM':
            value[:, j] = norm.ppf(uniform.cdf(value[:, j], 0, 1),
                                   stvars[j].param[0], stvars[j].param[1])
        elif stvars[j].dist == 'LNORM':
            value[:, j] = lognorm.ppf(uniform.cdf(value[:, j], 0, 1),
                                      stvars[j].param[1], 0,
                                      exp(stvars[j].param[0]))
        elif stvars[j].dist == 'BETA':
            value[:, j] = beta.ppf(uniform.cdf(value[:, j], 0,
                                               1), stvars[j].param[0],
                                   stvars[j].param[1], stvars[j].param[2],
                                   stvars[j].param[3] - stvars[j].param[2])
        elif stvars[j].dist == 'UNIF':
            value[:, j] = uniform.ppf(uniform.cdf(value[:, j], 0, 1),
                                      stvars[j].param[0], stvars[j].param[1])

    # ----------------------  Model  ---------------------------

    out = zeros((nMCS, otpt))
    if krig == 1:
        load("dmodel")
        out = predictor(value, dmodel)
    else:
        #        for i in range(nMCS):
        #            print 'Running simulation', i+1, 'of', nMCS, 'with inputs', value[i]
        #            out[i] = run_model(driver, value[i])
        out = run_list(problem, driver, value)

    limstate = asarray(limstate)
    limstate1 = asarray(kron(limstate[:, 0],
                             ones(nMCS))).reshape(otpt, nMCS).transpose()
    limstate2 = asarray(kron(limstate[:, 1],
                             ones(nMCS))).reshape(otpt, nMCS).transpose()
    B = logical_and(greater_equal(out, limstate1), less_equal(out, limstate2))
    PCC = sum(B, 0) / nMCS
    B_t = B[sum(B, 1) == otpt]
    if otpt > 1 and not 0 in PCC[0:otpt]:
        PCC = append(PCC, len(B_t) / nMCS)

    #Moments
    CovarianceMatrix = matrix(cov(out, None, 0))  #.transpose()
    Moments = {
        'Mean': mean(out, 0),
        'Variance': diag(CovarianceMatrix),
        'Skewness': skew(out),
        'Kurtosis': kurtosis(out, fisher=False)
    }

    # combine the display of the correlation matrix with setting a var that will be needed below
    sigma_mat = matrix(sqrt(diag(CovarianceMatrix)))
    CorrelationMatrix = CovarianceMatrix / multiply(sigma_mat,
                                                    sigma_mat.transpose())

    # ----------------------  Analyze  ---------------------------

    if any(Moments['Variance'] == 0):
        print "Warning: One or more outputs does not vary over given parameter variation."

    C_Y = [0] * otpt
    for k in range(0, otpt):
        if Moments['Variance'][k] != 0:
            C_Y[k] = estimate_complexity.with_samples(out[:, k], nMCS)

    sigma_mat = matrix(sqrt(diag(CovarianceMatrix)))
    seterr(
        invalid='ignore'
    )  # ignore problems with divide-by-zero, just give us 'nan' as usual
    CorrelationMatrix = CovarianceMatrix / multiply(sigma_mat,
                                                    sigma_mat.transpose())

    Distribution = {'Complexity': C_Y}

    CorrelationMatrix = where(isnan(CorrelationMatrix), None,
                              CorrelationMatrix)

    Results = {
        'Moments': Moments,
        'CorrelationMatrix': CorrelationMatrix,
        'CovarianceMatrix': CovarianceMatrix,
        'Distribution': Distribution,
        'PCC': PCC
    }

    return Results
コード例 #8
0
def UP_FFNI(driver):

    # Uses the FFNI method for UP

    # ----------------------  Setup  ---------------------------
    methd = 'FFNI'
    method = 4

    mu      = [inp.get_I_mu() for inp in driver.inputs]
    I_sigma = [inp.get_I_sigma() for inp in driver.inputs]
    inpt    = len(driver.inputs)
    input   = driver.inputNames
    krig    = driver.krig
    limstate= driver.limstate
    lrflag  = driver.lrflag
    n_meta  = driver.n_meta
    nEFAST  = driver.nEFAST
    nSOBOL  = driver.nSOBOL
    nMCS    = driver.nMCS
    numbins = driver.numbins
    nodes   = driver.nodes
    order   = driver.order
    otpt    = len(driver.outputNames)
    output  = driver.outputNames
    p       = driver.p
    plotf   = 0
    r       = driver.r
    simple  = driver.simple
    stvars  = driver.stvars

    node,w = params.params(method, nodes, inpt, stvars)    # Do I need to transpose these matrices?
    #[quadpts] = params(method, nodes, inpt, stvars)

    # ----------------------  Model  ---------------------------

    # Create full factorial experiment from individual nodes and weights
    j = fullfact(nodes)
    pts = shape(j)[0]
    x=zeros((pts,inpt))
    wj=zeros((pts,inpt))
    for y in range(pts):
        for i in range(inpt):
            x[y][i] = node[i][j[y][i]]
            wj[y][i] = w[i][j[y][i]]
    weight = prod(wj, 1)

    if krig == 1:
        load("dmodel")
        G_s = predictor(x, dmodel)
    else:
#        G_s = zeros((pts, otpt))
#        for i in range(pts):
#            print 'Running simulation',i+1,'of',pts
#            G_s[i] = run_model(driver, x[i])
            # G_s[i] = modelica.RunModelica(x[i], modelname, properties)
        G_s = run_list(driver, x)

    G_mean = zeros(otpt)
    G_kurt = zeros(otpt)
    G_skew = zeros(otpt)
    covar_m = zeros((otpt,otpt))
    for k in range(otpt):
        G_mean[k] = sum(weight * G_s[:, k])
    for k in range(otpt):
        for j in range(otpt):
            covar_m[k, j] = sum(weight * (G_s[:, k] - G_mean[k]) * (G_s[:, j] - G_mean[j]))
            covar_m[j, k] = covar_m[k, j]
        G_skew[k] = sum(weight * (G_s[:, k] - G_mean[k]) ** 3) / covar_m[k, k] ** 1.5
        G_kurt[k] = sum(weight * (G_s[:, k] - G_mean[k]) ** 4) / covar_m[k, k] ** 2

    CovarianceMatrix = covar_m.transpose()
    Moments = {'Mean': G_mean, 'Variance': diag(CovarianceMatrix), 'Skewness': G_skew, 'Kurtosis': G_kurt}

    # ----------------------  Analyze  ---------------------------

    # Calculate the PCC for the FFNI method
    if otpt>1:
        PCC = [0]*(otpt+1)
    else:
        PCC = [0]*otpt
    dtype = [0]*otpt
    Inv1 = [0]*otpt
    Inv2 = [0]*otpt
    m1 = [0]*otpt
    m2 = [0]*otpt
    a1 = [0]*otpt
    a2 = [0]*otpt
    alph = [0]*otpt
    beta = [0]*otpt
    lo = [0]*otpt
    hi = [0]*otpt
    C_Y_pdf = [0]*otpt

    if any(Moments['Variance']==0):
        print "Warning: One or more outputs does not vary over given parameter variation."

    for k in range(0,otpt):
        PCC[k],dtype[k],Inv1[k],m1[k],m2[k],a1[k],a2[k],alph[k],beta[k],lo[k],hi[k] = pearscdf.pearscdf(limstate[k], Moments['Mean'][k], sqrt(CovarianceMatrix[k, k]), Moments['Skewness'][k], Moments['Kurtosis'][k], methd, k, output)
        if dtype[k] != None:
            if iscomplex(a1[k]):
                a1[k] = [a1[k].real, a1[k].imag]
            if iscomplex(a2[k]):
                a2[k] = [a2[k].real, a2[k].imag]

            C_Y_pdf[k] = estimate_complexity.with_distribution(dtype[k],limstate[k],Moments['Mean'][k],Moments['Variance'][k],numbins)

    sigma_mat=matrix(sqrt(diag(CovarianceMatrix)))
    seterr(invalid='ignore')    #ignore problems with divide-by-zero, just give us 'nan' as usual
    CorrelationMatrix= CovarianceMatrix/multiply(sigma_mat,sigma_mat.transpose())
    Distribution = {'PearsonType': dtype, 'm1': m1, 'm2': m2, 'a1': a1, 'a2': a2, 'Complexity': C_Y_pdf}
    Plotting = {'alpha': alph, 'beta': beta, 'lo': lo, 'hi': hi}

    CorrelationMatrix=where(isnan(CorrelationMatrix), None, CorrelationMatrix)

    if otpt > 1 and not 0 in PCC[0:otpt]:
        lower = zeros(otpt)-inf
        PCC[otpt] = mvstdnormcdf(lower, Inv1, CorrelationMatrix)

    Results = {'Moments': Moments, 'CorrelationMatrix': CorrelationMatrix,
    'CovarianceMatrix': CovarianceMatrix, 'Distribution': Distribution, 'Plotting': Plotting, 'PCC': PCC}

    return Results
コード例 #9
0
ファイル: UP_PCE.py プロジェクト: landunin/meta-core
def UP_PCE(problem, driver):
    # Uses the PCE method for UP
    
    # This routine has been updated as part of refactoring code before the port
    # from MATLAB to Python/NumPy/SciPy.  Sections of PCC_Computation that apply
    # this method have been moved here.
    
    # ----------------------  Setup  ---------------------------
    methd = 'PCE'
    method = 6

    inpt    = len(driver.inputs)
    krig    = driver.krig
    limstate= driver.limstate
    nodes   = driver.nodes
    order   = driver.order
    otpt    = len(driver.outputNames)
    output  = driver.outputNames
    stvars  = driver.stvars
    numbins = driver.numbins
    
    #current settings for these two vars
    ii = 0
    jj = 0
    
    # ----------------------  Model  ---------------------------
    mu_g = zeros(inpt)
    sigma_g = ones(inpt)
    node_t = zeros((inpt,nodes[0]))
    weight_t = zeros((inpt,nodes[0]))
    for i in range(inpt):
        node_t[i], weight_t[i] = gaussquad.gaussquad(nodes[i], 'NORM', mu_g[i], sigma_g[i])

    x=[]
    for i in range(inpt):
        x.append(symbols('x'+str(i)))
    x=array(x)

    j=fullfact(nodes);

    pts = shape(j)[0]
    node=zeros((pts,inpt))
    wj=zeros((pts,inpt))
    for y in range(pts):
        for i in range(inpt):
            node[y][i] = node_t[i][j[y][i]]
            wj[y][i] = weight_t[i][j[y][i]]
    weight=prod(wj,1);
    
    P = zeros(order)
    P[0] = 1
    for p in range(1,order):    
        term2 = 0
        for s in range(1,p+1):
            term1 = 1        
            for r in range(s):
                term1 = term1 * (inpt + r)
            term2 = term2 + (1.0 / int(scipy.misc.factorial(s))) * term1
        if p == 1:        
            P[p] = term2        
        else:        
            P[p] = term2 - sum(P[range(1,p+1)])
    
    G_s = zeros((pts, otpt))
    if krig == 1:    
        t = strcat('SS_K', num2str(ii), num2str(jj))    
        load(char(t))    
        for j in range(pts):       
            #Rosenblatt Transformation
            T_L = Dist.Dist(stvars, node[j], inpt)       
            G_s[j] = predictor(T_L, dmodel)
    else:
        values = []    
        for j in range(pts):     
            #Rosenblatt Transformation
#            print 'Running simulation',j+1,'of',pts
            T_L = Dist.Dist(stvars, node[j], inpt)         
#            G_s[j] = run_model(driver, T_L)
            values.append(T_L)
        G_s = run_list(problem, driver, values)

    indx = 0
    bn = zeros((sum(P), otpt))
    bd = zeros(sum(P))
    for k in range(order):    
        vec = xvector.xvector(k, inpt)
        for j in range(int(P[k])):   
            for i in range(pts):
                L=node[i]       
                if k == 0:
                    bn[indx] = bn[indx] + weight[i] * G_s[i]
                    bd[indx] = bd[indx] + weight[i]
                else:                

                    h, h_sym = hermite.hermite(k, vec[j], L, x)
                    bn[indx] += weight[i] * G_s[i] * h          
                    bd[indx] += weight[i] * (h ** 2)
            indx+=1

    b = zeros((sum(P),otpt))
    for l in range(otpt):    
        b[:, l] = bn[:, l] / bd
    
    indx = 0
    U_sum = 0
    for k in range(order):    
        vec = xvector.xvector(k, inpt)
        for j in range(int(P[k])):        
            if k == 0:            
                U_sum = b[0]
            else:       
                h, h_sym = hermite.hermite(k, vec[j], L, x)
                U_sum = U_sum + b[indx] * N(h_sym)
            indx+=1
    U = U_sum
    
    U_s = zeros((pts,otpt))
    G_mean = zeros(otpt)
    G_kurt = zeros(otpt)
    G_skew = zeros(otpt)
    covar_m = zeros((otpt,otpt))
    for i in range(pts):
        for k in range(otpt):
            U_s[i][k] = U[k].subs(dict(zip(x, node[i])))
    for k in range(otpt):
#        G_mean[k] = sum(matrix(weight) * matrix(U_s[:, k]).transpose())
        G_mean[k] = sum(weight * U_s[:, k])
    for k in range(otpt):    
        for j in range(k,otpt):    
            covar_m[k, j] = sum(weight * (U_s[:, k] - G_mean[k]) * (G_s[:, j] - G_mean[j]))        
            covar_m[j, k] = covar_m[k, j]
        G_skew[k] = sum(weight * (U_s[:, k] - G_mean[k]) ** 3) / covar_m[k, k] ** 1.5    
        G_kurt[k] = sum(weight * (U_s[:, k] - G_mean[k]) ** 4) / covar_m[k, k] ** 2
    
    CovarianceMatrix = covar_m.transpose()
    Moments = {'Mean': G_mean, 'Variance': diag(CovarianceMatrix), 'Skewness': G_skew, 'Kurtosis': G_kurt}
    
    # ----------------------  Analyze  ---------------------------

    if any(Moments['Variance']==0):
        print "Warning: One or more outputs does not vary over given parameter variation."

    # Calculate the PCC for the FFNI method
    if otpt>1:
        PCC = [0]*(otpt+1)
    else:
        PCC = [0]*otpt
    dtype = [0]*otpt
    Inv1 = [0]*otpt
    Inv2 = [0]*otpt
    m1 = [0]*otpt
    m2 = [0]*otpt
    a1 = [0]*otpt
    a2 = [0]*otpt
    alph = [0]*otpt
    beta = [0]*otpt
    lo = [0]*otpt
    hi = [0]*otpt
    C_Y_pdf = [0]*otpt

    if any(Moments['Variance']==0):
        print "Warning: One or more outputs does not vary over given parameter variation."

    for k in range(otpt):
        PCC[k],dtype[k],Inv1[k],m1[k],m2[k],a1[k],a2[k],alph[k],beta[k],lo[k],hi[k] =\
          pearscdf.pearscdf(limstate[k], Moments['Mean'][k], sqrt(CovarianceMatrix[k, k]), Moments['Skewness'][k], Moments['Kurtosis'][k], methd, k, output)
        if dtype[k] != None:
            if iscomplex(a1[k]):
                a1[k] = [a1[k].real, a1[k].imag]
            if iscomplex(a2[k]):
                a2[k] = [a2[k].real, a2[k].imag]

            C_Y_pdf[k] = estimate_complexity.with_distribution(dtype[k],limstate[k],Moments['Mean'][k],Moments['Variance'][k],numbins)

    sigma_mat=matrix(sqrt(diag(CovarianceMatrix)))
    seterr(invalid='ignore')    #ignore problems with divide-by-zero, just give us 'nan' as usual
    CorrelationMatrix= CovarianceMatrix/multiply(sigma_mat,sigma_mat.transpose())
    Distribution = {'PearsonType': dtype, 'm1': m1, 'm2': m2, 'a1': a1, 'a2': a2, 'Complexity': C_Y_pdf}
    Plotting = {'alpha': alph, 'beta': beta, 'lo': lo, 'hi': hi}

    CorrelationMatrix=where(isnan(CorrelationMatrix), None, CorrelationMatrix)

    if otpt > 1 and not 0 in PCC[0:otpt]:
        lower = zeros(otpt)-inf
        PCC[otpt] = mvstdnormcdf(lower, Inv1, CorrelationMatrix)

    Results = {'Moments': Moments, 'CorrelationMatrix': CorrelationMatrix,
    'CovarianceMatrix': CovarianceMatrix, 'Distribution': Distribution, 'Plotting': Plotting, 'PCC': PCC}

    return Results
コード例 #10
0
ファイル: UP_MCS.py プロジェクト: hitej/meta-core
def UP_MCS(driver):
    # Uses the MCS method for UP

    mu      = [inp.get_I_mu() for inp in driver.inputs]
    I_sigma = [inp.get_I_sigma() for inp in driver.inputs]
    inpt    = len(driver.inputs)
    input   = driver.inputNames
    krig    = driver.krig
    limstate= driver.limstate
    lrflag  = driver.lrflag
    n_meta  = driver.n_meta
    nEFAST  = driver.nEFAST
    nSOBOL  = driver.nSOBOL
    nMCS    = driver.nMCS
    nodes   = driver.nodes
    order   = driver.order
    otpt    = len(driver.outputNames)
    output  = driver.outputNames
    p       = driver.p
    plotf   = 0
    r       = driver.r
    simple  = driver.simple
    stvars  = driver.stvars

    #*****************RANDOM DRAWS FROM INPUT DISTRIBUTIONS********************
    value = asarray(LHS.LHS(inpt, nMCS))

    for j in range(inpt):
        if stvars[j].dist == 'NORM':
            value[:,j] = norm.ppf(uniform.cdf(value[:,j], 0, 1), stvars[j].param[0], stvars[j].param[1])
        elif stvars[j].dist == 'LNORM':
            value[:,j] = lognorm.ppf(uniform.cdf(value[:, j], 0, 1), stvars[j].param[1], 0, exp(stvars[j].param[0]))
        elif stvars[j].dist == 'BETA':
            value[:,j] = beta.ppf(uniform.cdf(value[:, j], 0, 1), stvars[j].param[0], stvars[j].param[1], stvars[j].param[2], stvars[j].param[3] - stvars[j].param[2])
        elif stvars[j].dist == 'UNIF':
            value[:,j] = uniform.ppf(uniform.cdf(value[:,j], 0, 1), stvars[j].param[0], stvars[j].param[1])

    # ----------------------  Model  ---------------------------

    out = zeros((nMCS, otpt))
    if krig == 1:
        load("dmodel")
        out = predictor(value, dmodel)
    else:
#        for i in range(nMCS):
#            print 'Running simulation',i+1,'of',nMCS,'with inputs',value[i]
#            out[i] = run_model(driver, value[i])
        out = run_list(driver, value)

    limstate = asarray(limstate)
    limstate1 = asarray(kron(limstate[:, 0], ones(nMCS))).reshape(otpt,nMCS).transpose()
    limstate2 = asarray(kron(limstate[:, 1], ones(nMCS))).reshape(otpt,nMCS).transpose()
    B = logical_and(greater_equal(out,limstate1),less_equal(out,limstate2))
    PCC = sum(B,0) / nMCS
    B_t = B[sum(B,1) == otpt]
    if otpt > 1 and not 0 in PCC[0:otpt]:
        PCC = append(PCC,len(B_t) / nMCS)

    #Moments
    CovarianceMatrix = matrix(cov(out,None,0))#.transpose()
    Moments = {'Mean': mean(out,0), 'Variance': diag(CovarianceMatrix), 'Skewness': skew(out), 'Kurtosis': kurtosis(out,fisher=False)}

    # combine the display of the correlation matrix with setting a var that will be needed below
    sigma_mat=matrix(sqrt(diag(CovarianceMatrix)))
    CorrelationMatrix= CovarianceMatrix/multiply(sigma_mat,sigma_mat.transpose())

    # ----------------------  Analyze  ---------------------------

    if any(Moments['Variance']==0):
        print "Warning: One or more outputs does not vary over given parameter variation."

    C_Y = [0]*otpt
    for k in range(0,otpt):
        if Moments['Variance'][k]!=0:
            C_Y[k] = estimate_complexity.with_samples(out[:,k],nMCS)

    sigma_mat=matrix(sqrt(diag(CovarianceMatrix)))
    seterr(invalid='ignore')    #ignore problems with divide-by-zero, just give us 'nan' as usual
    CorrelationMatrix= CovarianceMatrix/multiply(sigma_mat,sigma_mat.transpose())

    Distribution = {'Complexity': C_Y}

    CorrelationMatrix=where(isnan(CorrelationMatrix), None, CorrelationMatrix)
			
    Results = {'Moments': Moments, 'CorrelationMatrix': CorrelationMatrix,
    'CovarianceMatrix': CovarianceMatrix, 'Distribution': Distribution, 'PCC': PCC}

    return Results
コード例 #11
0
def UP_TS(problem, driver):
    # Uses the TS method for UP

    # ----------------------  Setup  ---------------------------

    methd = 'TS'
    method = '2'

    delta = driver.TSdelta
    mu = [inp.get_I_mu() for inp in driver.inputs]
    sigma = [inp.get_I_sigma() for inp in driver.inputs]
    inpt = len(driver.inputs)
    input = driver.inputNames
    krig = driver.krig
    limstate = driver.limstate
    lrflag = driver.lrflag
    n_meta = driver.n_meta
    nEFAST = driver.nEFAST
    nSOBOL = driver.nSOBOL
    nMCS = driver.nMCS
    numbins = driver.numbins
    nodes = driver.nodes
    order = driver.order
    otpt = len(driver.outputNames)
    output = driver.outputNames
    p = driver.p
    plotf = 0
    r = driver.r
    simple = driver.simple
    stvars = driver.stvars

    rho = identity(inpt)

    # ----------------------  Model  ---------------------------

    if krig == 1:
        load("dmodel")
        G_mean = predictor(mu, dmodel).cT
        G_k = lambda x: predictor(x, dmodel)
        F1 = taylorseries.taylorseries(G_k, mu, delta * sigma, inpt, otpt)
    else:
        #        G_mean = run_model(driver, mu)
        #        G = lambda x: run_model(driver, x)
        values = [mu]
        values.extend(
            taylorseries.pretaylorseries(mu, delta * array(sigma), inpt))
        out = iter(run_list(problem, driver, values))
        G_mean = out.next()
        G = lambda x: out.next()
        F1 = taylorseries.taylorseries(G, mu, delta * array(sigma), inpt, otpt)

    print 'Taylor Series:\n', F1

    covar_m = zeros((otpt, otpt))
    for j in range(otpt):
        for k in range(j, otpt):
            for l in range(inpt):
                for m in range(inpt):
                    covar_m[j, k] = covar_m[j, k] + F1[l, j] * F1[
                        m, k] * sigma[l] * sigma[m] * rho[l, m]
                covar_m[k, j] = covar_m[j, k]

    CovarianceMatrix = covar_m.transpose()
    print 'Covariance Matrix:\n', CovarianceMatrix
    Moments = {
        'Mean': G_mean,
        'Variance': diag(CovarianceMatrix),
        'Skewness': zeros((otpt, 1)),
        'Kurtosis': 3 * ones((otpt, 1))
    }

    if otpt > 1:
        PCC = [0] * (otpt + 1)
    else:
        PCC = [0] * otpt
    dtype = [0] * otpt
    Inv1 = [0] * otpt
    Inv2 = [0] * otpt
    m1 = [0] * otpt
    m2 = [0] * otpt
    a1 = [0] * otpt
    a2 = [0] * otpt
    alph = [0] * otpt
    beta = [0] * otpt
    lo = [0] * otpt
    hi = [0] * otpt
    C_Y_pdf = [0] * otpt

    # ----------------------  Analyze  ---------------------------

    if any(Moments['Variance'] == 0):
        print "Warning: One or more outputs does not vary over given parameter variation."

    for k in range(0, otpt):
        PCC[k], dtype[k], Inv1[k], m1[k], m2[k], a1[k], a2[k], alph[k], beta[
            k], lo[k], hi[k] = pearscdf.pearscdf(limstate[k],
                                                 Moments['Mean'][k],
                                                 sqrt(CovarianceMatrix[k, k]),
                                                 Moments['Skewness'][k],
                                                 Moments['Kurtosis'][k], methd,
                                                 k, output)
        if dtype[k] != None:
            if iscomplex(a1[k]):
                a1[k] = [a1[k].real, a1[k].imag]
            if iscomplex(a2[k]):
                a2[k] = [a2[k].real, a2[k].imag]

            C_Y_pdf[k] = estimate_complexity.with_distribution(
                dtype[k], limstate[k], Moments['Mean'][k],
                Moments['Variance'][k], numbins)

    sigma_mat = matrix(sqrt(diag(CovarianceMatrix)))
    seterr(invalid='ignore'
           )  #ignore problems with divide-by-zero, just give us 'nan' as usual
    CorrelationMatrix = CovarianceMatrix / multiply(sigma_mat,
                                                    sigma_mat.transpose())
    Distribution = {
        'PearsonType': dtype,
        'm1': m1,
        'm2': m2,
        'a1': a1,
        'a2': a2,
        'Complexity': C_Y_pdf
    }
    Plotting = {'alpha': alph, 'beta': beta, 'lo': lo, 'hi': hi}

    CorrelationMatrix = where(isnan(CorrelationMatrix), None,
                              CorrelationMatrix)

    if otpt > 1 and not 0 in PCC[0:otpt]:
        lower = zeros(otpt) - inf
        PCC[otpt] = mvstdnormcdf(lower, Inv1, CorrelationMatrix)

    Results = {
        'Moments': Moments,
        'CorrelationMatrix': CorrelationMatrix,
        'CovarianceMatrix': CovarianceMatrix,
        'Distribution': Distribution,
        'Plotting': Plotting,
        'PCC': PCC
    }

    return Results
コード例 #12
0
def SA_SOBOL(driver):
    # Uses the Sobel Method for SA.
    # Input:
    # inpt : no. of input factors
    # N: number of Sobel samples
    #
    # Output:
    # SI[] : sensitivity indices
    # STI[] : total effect sensitivity indices
    # Other used variables/constants:
    # V : total variance
    # VI : partial variances

    # ----------------------  Setup  ---------------------------

    methd = 'SOBOL'
    method = '7'

    mu      = [inp.get_I_mu() for inp in driver.inputs]
    I_sigma = [inp.get_I_sigma() for inp in driver.inputs]
    inpt    = len(driver.inputs)
    input   = driver.inputNames
    krig    = driver.krig
    limstate= driver.limstate
    lrflag  = driver.lrflag
    n_meta  = driver.n_meta
    nEFAST  = driver.nEFAST
    nSOBOL  = driver.nSOBOL
    nMCS    = driver.nMCS
    nodes   = driver.nodes
    order   = driver.order
    otpt    = len(driver.outputNames)
    output  = driver.outputNames
    p       = driver.p
    plotf   = 0
    r       = driver.r
    simple  = driver.simple
    stvars  = driver.stvars

    # ----------------------  Model  ---------------------------
    value = asarray(LHS.LHS(2*inpt, nSOBOL))

    for j in range(inpt):
        if stvars[j].dist == 'NORM':
            value[:,j] = norm.ppf(uniform.cdf(value[:,j], 0, 1), stvars[j].param[0], stvars[j].param[1])
            value[:,j+inpt] = norm.ppf(uniform.cdf(value[:,j+inpt], 0, 1), stvars[j].param[0], stvars[j].param[1])
        elif stvars[j].dist == 'LNORM':
            value[:,j] = lognorm.ppf(uniform.cdf(value[:, j], 0, 1), stvars[j].param[1], 0, exp(stvars[j].param[0]))
            value[:,j+inpt] = lognorm.ppf(uniform.cdf(value[:, j+inpt], 0, 1), stvars[j].param[1], 0, exp(stvars[j].param[0]))
        elif stvars[j].dist == 'BETA':
            value[:,j] = beta.ppf(uniform.cdf(value[:, j], 0, 1), stvars[j].param[0], stvars[j].param[1], stvars[j].param[2], stvars[j].param[3] - stvars[j].param[2])
            value[:,j+inpt] = beta.ppf(uniform.cdf(value[:, j+inpt], 0, 1), stvars[j].param[0], stvars[j].param[1], stvars[j].param[2], stvars[j].param[3] - stvars[j].param[2])
        elif stvars[j].dist == 'UNIF':
            value[:,j] = uniform.ppf(uniform.cdf(value[:,j], 0, 1), stvars[j].param[0], stvars[j].param[1])
            value[:,j+inpt] = uniform.ppf(uniform.cdf(value[:,j+inpt], 0, 1), stvars[j].param[0], stvars[j].param[1])

    values = []
    XMA = value[0:nSOBOL, 0:inpt]
    XMB = value[0:nSOBOL, inpt:2 * inpt]
    YXMA = zeros((nSOBOL, otpt))
    YXMB = zeros((nSOBOL, otpt))
    if krig == 1:
        load("dmodel")
        YXMA = predictor(XMA, dmodel)
        YXMB = predictor(XMB, dmodel)
    else:
        values.extend(list(XMA))
        values.extend(list(XMB))

    YXMC = zeros((inpt, nSOBOL, otpt))
    for i in range(inpt):
        XMC = deepcopy(XMB)
        XMC[:, i] = deepcopy(XMA[:, i])
        if krig == 1:
            YXMC[i] = predictor(XMC, dmodel)
        else:
            values.extend(list(XMC))

    if krig != 1:
        out = iter(run_list(driver, values))
        for i in range(nSOBOL):
            YXMA[i] = out.next()
        for i in range(nSOBOL):
            YXMB[i] = out.next()
        for i in range(inpt):
            for j in range(nSOBOL):
                YXMC[i, j] = out.next()

    f0 = mean(YXMA,0)
    if otpt==1:
        V = cov(YXMA,None,0,1)
    else:  #multiple outputs
        V = diag(cov(YXMA,None,0,1))
    Vi = zeros((otpt, inpt))
    Vci = zeros((otpt, inpt))
    for i in range(inpt):
        for p in range(otpt):
            Vi[p,i] = 1.0/nSOBOL*sum(YXMA[:,p]*YXMC[i,:,p])-f0[p]**2;
            Vci[p,i]= 1.0/nSOBOL*sum(YXMB[:,p]*YXMC[i,:,p])-f0[p]**2;

    Si = zeros((otpt,inpt));
    Sti = zeros((otpt,inpt));
    for j in range(inpt):
        Si[:, j] = Vi[:, j] / V
        Sti[:, j] = 1 - Vci[:, j] / V

    if lrflag == 1:
        SRC, stat = SRC_regress.SRC_regress(XMA, YXMA, otpt, nSOBOL)

# ----------------------  Analyze  ---------------------------

    Results = {'FirstOrderSensitivity': Si, 'TotalEffectSensitivity': Sti}
    if lrflag == 1:
        Results.update({'SRC': SRC, 'R^2': stat})
    return Results
コード例 #13
0
def UP_MPP(driver):
    # Uses the MPP method for UP    
    
    # This routine has been updated as part of refactoring code before the port
    # from MATLAB to Python/NumPy/SciPy.  Sections of PCC_Computation that apply
    # this method have been moved here.
    
    # ----------------------  Setup  ---------------------------
    methd = 'MPP'
    method = '3'
    
    delta   = driver.MPPdelta
    mu      = [inp.get_I_mu() for inp in driver.inputs]
    sigma   = [inp.get_I_sigma() for inp in driver.inputs]
    inpt    = len(driver.inputs)
    input   = driver.inputNames
    krig    = driver.krig
    limstate= driver.limstate
    lrflag  = driver.lrflag
    n_meta  = driver.n_meta
    nEFAST  = driver.nEFAST
    nSOBOL  = driver.nSOBOL
    nMCS    = driver.nMCS
    numbins = driver.numbins
    nodes   = driver.nodes
    order   = driver.order
    otpt    = len(driver.outputNames)
    output  = driver.outputNames
    p       = driver.p
    plotf   = 0
    r       = driver.r
    simple  = driver.simple
    stvars  = driver.stvars
    
    # ----------------------  Model  ---------------------------

    G_temp = lambda x: run_list(driver, x)
    Tinv = lambda x: Dist.Dist(stvars, x, inpt)
   
    #USING THE HASOFER-LIND ALGORITHM  
    alpha = zeros((otpt, inpt))
    Xl = zeros((otpt, inpt))
    Xu = zeros((otpt, inpt))
    beta1 = zeros(otpt)
    beta2 = zeros(otpt)
    if otpt>1:
        PCC = [0]*(otpt+1)
    else:
        PCC = [0]*otpt
    for k in range(otpt):
        print 'Testing output',k+1,'of',otpt    
        if krig == 1:        
            load("dmodel")        
            if limstate[k][1] == inf:            
                cdist = 1            
                G = lambda x: limstate[k][0]- predictor(x, dmodel)            
            elif limstate[k][0] == -inf:            
                cdist = 2            
                G = lambda x: predictor(x, dmodel) - limstate[k][1]            
            else:            
                cdist = 3            
                G1 = lambda x: limstate[k][0] - predictor(x, dmodel)
                G2 = lambda x: predictor(x, dmodel) - limstate[k][2]
        else:
            if limstate[k][1] == inf:            
                cdist = 1            
                G = lambda x: limstate[k][0] - G_temp(x)            
            elif limstate[k][0] == -inf:            
                cdist = 2            
                G = lambda x: G_temp(x) - limstate[k][1]           
            else:            
                cdist = 3            
                G1 = lambda x: limstate[k][0] - G_temp(x)            
                G2 = lambda x: G_temp(x) - limstate[k][1]

        I_sigma=ones((inpt));
        if cdist == 1 or cdist == 2:        
            u = zeros((inpt))        
            diff = 1.0
            while diff > .005:
                uk, alp = Hasofer.Hasofer(G, u, Tinv, k, delta, I_sigma, inpt, otpt)
                diff = abs(linalg.norm(uk) - linalg.norm(u))            
                u = uk
                print 'X =', Tinv(u)
            beta1[k] = linalg.norm(u)        
            beta2[k] = inf
            if cdist == 1:            
                Xl[k] = Tinv(u)            
                Xu[k] = ones((inpt)) * inf            
                alpha[k] = alp            
            else:            
                Xl[k] = ones((inpt)) * -inf            
                Xu[k] = Tinv(u)            
                alpha[k] = -alp
            PCC[k] = norm.cdf(beta1[k])
            alpha[k] = alp        
        else:        
            u = zeros((inpt))        
            diff = 1.0
            try:
                while diff > .005:
                    uk, alp = Hasofer.Hasofer(G1, u, Tinv, k, delta, I_sigma, inpt, otpt)        
                    diff = abs(linalg.norm(uk) - linalg.norm(u))
                    u = uk
                    print 'X =', Tinv(u)
                beta1[k] = linalg.norm(u)        
                Xl[k] = Tinv(u)
            except ValueError:
                beta1[k] = inf        
                Xl[k] = nan
            u = zeros((inpt))        
            diff = 1.0
            try:        
                while diff > .005:
                    uk, alp = Hasofer.Hasofer(G2, u, Tinv, k, delta, I_sigma, inpt, otpt)            
                    diff = abs(linalg.norm(uk) - linalg.norm(u))            
                    u = uk
                    print 'X =', Tinv(u)
                beta2[k] = linalg.norm(u) 
                Xu[k] = Tinv(u)
                alpha[k] = -alp
            except ValueError:
                beta2[k] = inf  
                Xu[k] = nan
                alpha[k] = nan 
            PCC[k] = norm.cdf(beta2[k]) - norm.cdf(-beta1[k])
    
    corr_mat = ones((otpt,otpt))
    for j in range(otpt):    
        for k in range(j,otpt):  
            corr_mat[j, k] = matrix(alpha[j]) * matrix(alpha[k]).transpose()        
            corr_mat[k, j] = corr_mat[j, k]   
     
    # ----------------------  Analyze  ---------------------------
    
    # There's not much to analyze; that already happened.  Just print out the results

    if otpt > 1 and not 0 in PCC[0:otpt]: 
        PCC[otpt] = mvstdnormcdf(-beta1, beta2, corr_mat)  

    # requested feature: replace all "NaNs" with "None"
    Xu=where(isnan(Xu), None, Xu)
    Xl=where(isnan(Xl), None, Xl)
    corr_mat=where(isnan(corr_mat), None, corr_mat)
    
    Results = {'MPPUpperBound':Xu ,'MPPLowerBound':Xl,'CorrelationMatrix': corr_mat, 'PCC': PCC}
    
    return Results
コード例 #14
0
ファイル: SA_EFAST.py プロジェクト: landunin/meta-core
def SA_EFAST(problem, driver):

    #[SI,STI] = EFAST(K,WANTEDN)
    # First order and total effect indices for a given model computed with
    # Extended Fourier Amplitude Sensitivity Test (EFAST).
    # Andrea Saltelli, Stefano Tarantola and Karen Chan. 1999
    # A quantitative model-independent method for global sensitivity analysis of model output.
    # Technometrics 41:39-56
    #
    # Input:
    # inpt : no. of input factors
    # WANTEDN : wanted no. of sample points
    #
    # Output:
    # SI[] : first order sensitivity indices
    # STI[] : total effect sensitivity indices
    # Other used variables/constants:
    # OM[] : vector of inpt frequencies
    # OMI : frequency for the group of interest
    # OMCI[] : set of freq. used for the compl. group
    # X[] : parameter combination rank matrix
    # AC[],BC[]: fourier coefficients
    # FI[] : random phase shift
    # V : total output variance (for each curve)
    # VI : partial var. of par. i (for each curve)
    # VCI : part. var. of the compl. set of par...
    # AV : total variance in the time domain
    # AVI : partial variance of par. i
    # AVCI : part. var. of the compl. set of par.
    # Y[] : model output
    # N : no. of runs on each curve

    # ----------------------  Setup  ---------------------------
    methd = 'EFAST'
    method = '10'

    mu = [inp.get_I_mu() for inp in driver.inputs]
    I_sigma = [inp.get_I_sigma() for inp in driver.inputs]
    inpt = len(driver.inputs)
    input = driver.inputNames
    krig = driver.krig
    limstate = driver.limstate
    lrflag = driver.lrflag
    n_meta = driver.n_meta
    nEFAST = driver.nEFAST
    nSOBOL = driver.nSOBOL
    nMCS = driver.nMCS
    nodes = driver.nodes
    order = driver.order
    otpt = len(driver.outputNames)
    output = driver.outputNames
    p = driver.p
    plotf = 0
    r = driver.r
    simple = driver.simple
    stvars = driver.stvars

    # ----------------------  Model  ---------------------------

    NR = 1  #: no. of search curves
    MI = 4  #: maximum number of fourier coefficients that may be retained in calculating
    # the partial variances without interferences between the assigned frequencies
    #
    # Computation of the frequency for the group of interest OMi and the no. of sample points N.
    OMi = int(floor((nEFAST / NR - 1) / (2 * MI) / inpt))
    N = 2 * MI * OMi + 1
    total_sims = N * NR * inpt
    sim = 0
    if (N * NR < 65):
        logging.error('sample size must be >= 65 per factor.')
        raise ValueError, 'sample size must be >= 65 per factor.'

    # Algorithm for selecting the set of frequencies. OMci(i), i=1:inpt-1, contains
    # the set of frequencies to be used by the complementary group.
    OMci = SETFREQ(N - 1, OMi / 2 / MI)
    # Loop over the inpt input factors.
    Si = zeros((otpt, otpt, inpt))
    Sti = zeros((otpt, otpt, inpt))
    for i in range(inpt):
        # Initialize AV,AVi,AVci to zero.
        AV = 0
        AVi = 0
        AVci = 0
        # Loop over the NR search curves.
        for L in range(NR):
            # Setting the vector of frequencies OM for the inpt factors.
            cj = 1
            OM = zeros(inpt)
            for j in range(inpt):
                if (j == i):
                    # For the factor of interest.
                    OM[i] = OMi
                else:
                    # For the complementary group.
                    OM[j] = OMci[cj]
                    cj = cj + 1
            # Setting the relation between the scalar variable S and the coordinates
            # {X(1),X(2),...X(inpt)} of each sample point.
            FI = zeros(inpt)
            for j in range(inpt):
                FI[j] = random.random() * 2 * pi  # random phase shift
            S_VEC = pi * (2 * arange(1, N + 1) - N - 1) / N
            OM_VEC = OM[range(inpt)]
            FI_MAT = transpose(array([FI] * N))
            ANGLE = matrix(OM_VEC).T * matrix(S_VEC) + matrix(FI_MAT)
            X = 0.5 + arcsin(sin(ANGLE.T)) / pi
            # Transform distributions from standard uniform to general.

            for j in range(inpt):
                if stvars[j].dist == 'NORM':
                    X[:, j] = norm.ppf(uniform.cdf(X[:, j], 0, 1),
                                       stvars[j].param[0], stvars[j].param[1])
                elif stvars[j].dist == 'LNORM':
                    X[:, j] = lognorm.ppf(uniform.cdf(X[:, j], 0, 1),
                                          stvars[j].param[1], 0,
                                          exp(stvars[j].param[0]))
                elif stvars[j].dist == 'BETA':
                    X[:, j] = beta.ppf(uniform.cdf(X[:, j], 0,
                                                   1), stvars[j].param[0],
                                       stvars[j].param[1], stvars[j].param[2],
                                       stvars[j].param[3] - stvars[j].param[2])
                elif stvars[j].dist == 'UNIF':
                    X[:, j] = uniform.ppf(uniform.cdf(X[:, j], 0,
                                                      1), stvars[j].param[0],
                                          stvars[j].param[1])

            # Do the N model evaluations.
            Y = zeros((N, otpt))
            if krig == 1:
                load("dmodel")
                Y = predictor(X, dmodel)
            else:
                values = []
                for p in range(N):
                    #                    sim += 1
                    #                    print 'Running simulation on test',sim,'of',total_sims
                    #                    Y[p] = run_model(driver, array(X[p])[0])
                    values.append(array(X[p])[0])
                Y = run_list(problem, driver, values)

            # Subtract the average value.
            Y = Y - kron(mean(Y, 0), ones((N, 1)))

            # Fourier coeff. at [1:OMi/2].
            NQ = int(N / 2) - 1
            N0 = NQ + 1
            COMPL = 0
            Y_VECP = Y[N0 + 1:] + Y[NQ::-1]
            Y_VECM = Y[N0 + 1:] - Y[NQ::-1]
            #            AC = zeros((int(ceil(OMi / 2)), otpt))
            #            BC = zeros((int(ceil(OMi / 2)), otpt))
            AC = zeros((OMi * MI, otpt))
            BC = zeros((OMi * MI, otpt))
            for j in range(int(ceil(OMi / 2)) + 1):
                ANGLE = (j + 1) * 2 * arange(1, NQ + 2) * pi / N
                C_VEC = cos(ANGLE)
                S_VEC = sin(ANGLE)
                AC[j] = (Y[N0] + matrix(C_VEC) * matrix(Y_VECP)) / N
                BC[j] = matrix(S_VEC) * matrix(Y_VECM) / N
                COMPL = COMPL + matrix(AC[j]).T * matrix(AC[j]) + matrix(
                    BC[j]).T * matrix(BC[j])
            # Computation of V_{(ci)}.
            Vci = 2 * COMPL
            AVci = AVci + Vci
            # Fourier coeff. at [P*OMi, for P=1:MI].
            COMPL = 0
            # Do these need to be recomputed at all?
            #            Y_VECP = Y[N0 + range(NQ)] + Y[N0 - range(NQ)]
            #            Y_VECM = Y[N0 + range(NQ)] - Y[N0 - range(NQ)]
            for j in range(OMi, OMi * MI + 1, OMi):
                ANGLE = j * 2 * arange(1, NQ + 2) * pi / N
                C_VEC = cos(ANGLE)
                S_VEC = sin(ANGLE)
                AC[j - 1] = (Y[N0] + matrix(C_VEC) * matrix(Y_VECP)) / N
                BC[j - 1] = matrix(S_VEC) * matrix(Y_VECM) / N
                COMPL = COMPL + matrix(AC[j - 1]).T * matrix(
                    AC[j - 1]) + matrix(BC[j - 1]).T * matrix(BC[j - 1])
            # Computation of V_i.
            Vi = 2 * COMPL
            AVi = AVi + Vi
            # Computation of the total variance in the time domain.
            AV = AV + matrix(Y).T * matrix(Y) / N
        # Computation of sensitivity indicies.
        AV = AV / NR
        AVi = AVi / NR
        AVci = AVci / NR
        Si[:, :, i] = AVi / AV
        Sti[:, :, i] = 1 - AVci / AV

    if lrflag == 1:
        SRC, stat = SRC_regress.SRC_regress(X, Y, otpt, N)

    # ----------------------  Analyze  ---------------------------

#    if plotf == 1:
#        piecharts(inpt, otpt, Si, Sti, methd, output)
    if simple == 1:
        Si_t = zeros((inpt, otpt))
        for p in range(inpt):
            Si_t[p] = diag(Si[:, :, p])
        Si = Si_t.T
    if simple == 1:
        Sti_t = zeros((inpt, otpt))
        for p in range(inpt):
            Sti_t[p] = diag(Sti[:, :, p])
        Sti = Sti_t.T
    Results = {'FirstOrderSensitivity': Si, 'TotalEffectSensitivity': Sti}
    if lrflag == 1:
        Results.update({'SRC': SRC, 'R^2': stat})
    return Results
コード例 #15
0
ファイル: UP_TS.py プロジェクト: dyao-vu/meta-core
def UP_TS(driver):
    # Uses the TS method for UP

    # ----------------------  Setup  ---------------------------

    methd = 'TS'
    method = '2'

    delta   = driver.TSdelta
    mu      = [inp.get_I_mu() for inp in driver.inputs]
    sigma   = [inp.get_I_sigma() for inp in driver.inputs]
    inpt    = len(driver.inputs)
    input   = driver.inputNames
    krig    = driver.krig
    limstate= driver.limstate
    lrflag  = driver.lrflag
    n_meta  = driver.n_meta
    nEFAST  = driver.nEFAST
    nSOBOL  = driver.nSOBOL
    nMCS    = driver.nMCS
    numbins = driver.numbins
    nodes   = driver.nodes
    order   = driver.order
    otpt    = len(driver.outputNames)
    output  = driver.outputNames
    p       = driver.p
    plotf   = 0
    r       = driver.r
    simple  = driver.simple
    stvars  = driver.stvars

    rho = identity(inpt)

    # ----------------------  Model  ---------------------------

    if krig == 1:
        load("dmodel")
        G_mean = predictor(mu, dmodel).cT
        G_k = lambda x: predictor(x, dmodel)
        F1 = taylorseries.taylorseries(G_k, mu, delta*sigma, inpt, otpt)
    else:
#        G_mean = run_model(driver, mu)
#        G = lambda x: run_model(driver, x)
        values = [mu]
        values.extend(taylorseries.pretaylorseries(mu, delta*array(sigma), inpt))
        out = iter(run_list(driver, values))
        G_mean = out.next()
        G = lambda x: out.next()
        F1 = taylorseries.taylorseries(G, mu, delta*array(sigma), inpt, otpt)

    print 'Taylor Series:\n',F1

    covar_m = zeros((otpt, otpt))
    for j in range(otpt):
        for k in range(j,otpt):
            for l in range(inpt):
                for m in range(inpt):
                    covar_m[j, k] = covar_m[j, k] + F1[l, j] * F1[m, k] * sigma[l] * sigma[m] * rho[l, m]
                covar_m[k, j] = covar_m[j, k]

    CovarianceMatrix = covar_m.transpose()
    print 'Covariance Matrix:\n',CovarianceMatrix
    Moments = {'Mean': G_mean, 'Variance': diag(CovarianceMatrix), 'Skewness': zeros((otpt, 1)), 'Kurtosis': 3 * ones((otpt, 1))}

    if otpt>1:
        PCC = [0]*(otpt+1)
    else:
        PCC = [0]*otpt
    dtype = [0]*otpt
    Inv1 = [0]*otpt
    Inv2 = [0]*otpt
    m1 = [0]*otpt
    m2 = [0]*otpt
    a1 = [0]*otpt
    a2 = [0]*otpt
    alph = [0]*otpt
    beta = [0]*otpt
    lo = [0]*otpt
    hi = [0]*otpt
    C_Y_pdf = [0]*otpt

    # ----------------------  Analyze  ---------------------------

    if any(Moments['Variance']==0):
        print "Warning: One or more outputs does not vary over given parameter variation."

    for k in range(0,otpt):
        PCC[k],dtype[k],Inv1[k],m1[k],m2[k],a1[k],a2[k],alph[k],beta[k],lo[k],hi[k] = pearscdf.pearscdf(limstate[k], Moments['Mean'][k], sqrt(CovarianceMatrix[k, k]), Moments['Skewness'][k], Moments['Kurtosis'][k], methd, k, output)
        if dtype[k] != None:
            if iscomplex(a1[k]):
                a1[k] = [a1[k].real, a1[k].imag]
            if iscomplex(a2[k]):
                a2[k] = [a2[k].real, a2[k].imag]

            C_Y_pdf[k] = estimate_complexity.with_distribution(dtype[k],limstate[k],Moments['Mean'][k],Moments['Variance'][k],numbins)

    sigma_mat=matrix(sqrt(diag(CovarianceMatrix)))
    seterr(invalid='ignore')    #ignore problems with divide-by-zero, just give us 'nan' as usual
    CorrelationMatrix= CovarianceMatrix/multiply(sigma_mat,sigma_mat.transpose())
    Distribution = {'PearsonType': dtype, 'm1': m1, 'm2': m2, 'a1': a1, 'a2': a2, 'Complexity': C_Y_pdf}
    Plotting = {'alpha': alph, 'beta': beta, 'lo': lo, 'hi': hi}

    CorrelationMatrix=where(isnan(CorrelationMatrix), None, CorrelationMatrix)

    if otpt > 1 and not 0 in PCC[0:otpt]:
        lower = zeros(otpt)-inf
        PCC[otpt] = mvstdnormcdf(lower, Inv1, CorrelationMatrix)

    Results = {'Moments': Moments, 'CorrelationMatrix': CorrelationMatrix,
    'CovarianceMatrix': CovarianceMatrix, 'Distribution': Distribution, 'Plotting': Plotting, 'PCC': PCC}

    return Results
コード例 #16
0
ファイル: SA_SOBOL.py プロジェクト: landunin/meta-core
def SA_SOBOL(problem, driver):
    # Uses the Sobel Method for SA.
    # Input:
    # inpt : no. of input factors
    # N: number of Sobel samples
    #
    # Output:
    # SI[] : sensitivity indices
    # STI[] : total effect sensitivity indices
    # Other used variables/constants:
    # V : total variance
    # VI : partial variances

    # ----------------------  Setup  ---------------------------

    methd = 'SOBOL'
    method = '7'

    mu = [inp.get_I_mu() for inp in driver.inputs]
    I_sigma = [inp.get_I_sigma() for inp in driver.inputs]
    inpt = len(driver.inputs)
    input = driver.inputNames
    krig = driver.krig
    limstate = driver.limstate
    lrflag = driver.lrflag
    n_meta = driver.n_meta
    nEFAST = driver.nEFAST
    nSOBOL = driver.nSOBOL
    nMCS = driver.nMCS
    nodes = driver.nodes
    order = driver.order
    otpt = len(driver.outputNames)
    output = driver.outputNames
    p = driver.p
    plotf = 0
    r = driver.r
    simple = driver.simple
    stvars = driver.stvars

    # ----------------------  Model  ---------------------------
    value = asarray(LHS.LHS(2 * inpt, nSOBOL))

    for j in range(inpt):
        if stvars[j].dist == 'NORM':
            value[:, j] = norm.ppf(uniform.cdf(value[:, j], 0, 1),
                                   stvars[j].param[0], stvars[j].param[1])
            value[:,
                  j + inpt] = norm.ppf(uniform.cdf(value[:, j + inpt], 0, 1),
                                       stvars[j].param[0], stvars[j].param[1])
        elif stvars[j].dist == 'LNORM':
            value[:, j] = lognorm.ppf(uniform.cdf(value[:, j], 0, 1),
                                      stvars[j].param[1], 0,
                                      exp(stvars[j].param[0]))
            value[:, j + inpt] = lognorm.ppf(
                uniform.cdf(value[:, j + inpt], 0, 1), stvars[j].param[1], 0,
                exp(stvars[j].param[0]))
        elif stvars[j].dist == 'BETA':
            value[:, j] = beta.ppf(uniform.cdf(value[:, j], 0,
                                               1), stvars[j].param[0],
                                   stvars[j].param[1], stvars[j].param[2],
                                   stvars[j].param[3] - stvars[j].param[2])
            value[:,
                  j + inpt] = beta.ppf(uniform.cdf(value[:, j + inpt], 0,
                                                   1), stvars[j].param[0],
                                       stvars[j].param[1], stvars[j].param[2],
                                       stvars[j].param[3] - stvars[j].param[2])
        elif stvars[j].dist == 'UNIF':
            value[:, j] = uniform.ppf(uniform.cdf(value[:, j], 0, 1),
                                      stvars[j].param[0], stvars[j].param[1])
            value[:, j + inpt] = uniform.ppf(
                uniform.cdf(value[:, j + inpt], 0, 1), stvars[j].param[0],
                stvars[j].param[1])

    values = []
    XMA = value[0:nSOBOL, 0:inpt]
    XMB = value[0:nSOBOL, inpt:2 * inpt]
    YXMA = zeros((nSOBOL, otpt))
    YXMB = zeros((nSOBOL, otpt))
    if krig == 1:
        load("dmodel")
        YXMA = predictor(XMA, dmodel)
        YXMB = predictor(XMB, dmodel)
    else:
        values.extend(list(XMA))
        values.extend(list(XMB))

    YXMC = zeros((inpt, nSOBOL, otpt))
    for i in range(inpt):
        XMC = deepcopy(XMB)
        XMC[:, i] = deepcopy(XMA[:, i])
        if krig == 1:
            YXMC[i] = predictor(XMC, dmodel)
        else:
            values.extend(list(XMC))

    if krig != 1:
        out = iter(run_list(problem, driver, values))
        for i in range(nSOBOL):
            YXMA[i] = out.next()
        for i in range(nSOBOL):
            YXMB[i] = out.next()
        for i in range(inpt):
            for j in range(nSOBOL):
                YXMC[i, j] = out.next()

    f0 = mean(YXMA, 0)
    if otpt == 1:
        V = cov(YXMA, None, 0, 1)
    else:  #multiple outputs
        V = diag(cov(YXMA, None, 0, 1))
    Vi = zeros((otpt, inpt))
    Vci = zeros((otpt, inpt))
    for i in range(inpt):
        for p in range(otpt):
            Vi[p,
               i] = 1.0 / nSOBOL * sum(YXMA[:, p] * YXMC[i, :, p]) - f0[p]**2
            Vci[p,
                i] = 1.0 / nSOBOL * sum(YXMB[:, p] * YXMC[i, :, p]) - f0[p]**2

    Si = zeros((otpt, inpt))
    Sti = zeros((otpt, inpt))
    for j in range(inpt):
        Si[:, j] = Vi[:, j] / V
        Sti[:, j] = 1 - Vci[:, j] / V

    if lrflag == 1:
        SRC, stat = SRC_regress.SRC_regress(XMA, YXMA, otpt, nSOBOL)

# ----------------------  Analyze  ---------------------------

    Results = {'FirstOrderSensitivity': Si, 'TotalEffectSensitivity': Sti}
    if lrflag == 1:
        Results.update({'SRC': SRC, 'R^2': stat})
    return Results
コード例 #17
0
ファイル: UP_MPP.py プロジェクト: landunin/meta-core
def UP_MPP(problem, driver):
    # Uses the MPP method for UP    
    
    # This routine has been updated as part of refactoring code before the port
    # from MATLAB to Python/NumPy/SciPy.  Sections of PCC_Computation that apply
    # this method have been moved here.
    
    # ----------------------  Setup  ---------------------------
    methd = 'MPP'
    method = '3'
    
    delta   = driver.MPPdelta
    mu      = [inp.get_I_mu() for inp in driver.inputs]
    sigma   = [inp.get_I_sigma() for inp in driver.inputs]
    inpt    = len(driver.inputs)
    input   = driver.inputNames
    krig    = driver.krig
    limstate= driver.limstate
    lrflag  = driver.lrflag
    n_meta  = driver.n_meta
    nEFAST  = driver.nEFAST
    nSOBOL  = driver.nSOBOL
    nMCS    = driver.nMCS
    numbins = driver.numbins
    nodes   = driver.nodes
    order   = driver.order
    otpt    = len(driver.outputNames)
    output  = driver.outputNames
    p       = driver.p
    plotf   = 0
    r       = driver.r
    simple  = driver.simple
    stvars  = driver.stvars
    
    # ----------------------  Model  ---------------------------

    G_temp = lambda x: run_list(problem, driver, x)
    Tinv = lambda x: Dist.Dist(stvars, x, inpt)
   
    #USING THE HASOFER-LIND ALGORITHM  
    alpha = zeros((otpt, inpt))
    Xl = zeros((otpt, inpt))
    Xu = zeros((otpt, inpt))
    beta1 = zeros(otpt)
    beta2 = zeros(otpt)
    if otpt>1:
        PCC = [0]*(otpt+1)
    else:
        PCC = [0]*otpt
    for k in range(otpt):
        print 'Testing output',k+1,'of',otpt    
        if krig == 1:        
            load("dmodel")        
            if limstate[k][1] == inf:            
                cdist = 1            
                G = lambda x: limstate[k][0]- predictor(x, dmodel)            
            elif limstate[k][0] == -inf:            
                cdist = 2            
                G = lambda x: predictor(x, dmodel) - limstate[k][1]            
            else:            
                cdist = 3            
                G1 = lambda x: limstate[k][0] - predictor(x, dmodel)
                G2 = lambda x: predictor(x, dmodel) - limstate[k][2]
        else:
            if limstate[k][1] == inf:            
                cdist = 1            
                G = lambda x: limstate[k][0] - G_temp(x)            
            elif limstate[k][0] == -inf:            
                cdist = 2            
                G = lambda x: G_temp(x) - limstate[k][1]           
            else:            
                cdist = 3            
                G1 = lambda x: limstate[k][0] - G_temp(x)            
                G2 = lambda x: G_temp(x) - limstate[k][1]

        I_sigma=ones((inpt));
        if cdist == 1 or cdist == 2:        
            u = zeros((inpt))        
            diff = 1.0
            while diff > .005:
                uk, alp = Hasofer.Hasofer(G, u, Tinv, k, delta, I_sigma, inpt, otpt)
                diff = abs(linalg.norm(uk) - linalg.norm(u))            
                u = uk
                print 'X =', Tinv(u)
            beta1[k] = linalg.norm(u)        
            beta2[k] = inf
            if cdist == 1:            
                Xl[k] = Tinv(u)            
                Xu[k] = ones((inpt)) * inf            
                alpha[k] = alp            
            else:            
                Xl[k] = ones((inpt)) * -inf            
                Xu[k] = Tinv(u)            
                alpha[k] = -alp
            PCC[k] = norm.cdf(beta1[k])
            alpha[k] = alp        
        else:        
            u = zeros((inpt))        
            diff = 1.0
            try:
                while diff > .005:
                    uk, alp = Hasofer.Hasofer(G1, u, Tinv, k, delta, I_sigma, inpt, otpt)        
                    diff = abs(linalg.norm(uk) - linalg.norm(u))
                    u = uk
                    print 'X =', Tinv(u)
                beta1[k] = linalg.norm(u)        
                Xl[k] = Tinv(u)
            except ValueError:
                beta1[k] = inf        
                Xl[k] = nan
            u = zeros((inpt))        
            diff = 1.0
            try:        
                while diff > .005:
                    uk, alp = Hasofer.Hasofer(G2, u, Tinv, k, delta, I_sigma, inpt, otpt)            
                    diff = abs(linalg.norm(uk) - linalg.norm(u))            
                    u = uk
                    print 'X =', Tinv(u)
                beta2[k] = linalg.norm(u) 
                Xu[k] = Tinv(u)
                alpha[k] = -alp
            except ValueError:
                beta2[k] = inf  
                Xu[k] = nan
                alpha[k] = nan 
            PCC[k] = norm.cdf(beta2[k]) - norm.cdf(-beta1[k])
    
    corr_mat = ones((otpt,otpt))
    for j in range(otpt):    
        for k in range(j,otpt):  
            corr_mat[j, k] = matrix(alpha[j]) * matrix(alpha[k]).transpose()        
            corr_mat[k, j] = corr_mat[j, k]   
     
    # ----------------------  Analyze  ---------------------------
    
    # There's not much to analyze; that already happened.  Just print out the results

    if otpt > 1 and not 0 in PCC[0:otpt]: 
        PCC[otpt] = mvstdnormcdf(-beta1, beta2, corr_mat)  

    # requested feature: replace all "NaNs" with "None"
    Xu=where(isnan(Xu), None, Xu)
    Xl=where(isnan(Xl), None, Xl)
    corr_mat=where(isnan(corr_mat), None, corr_mat)
    
    Results = {'MPPUpperBound':Xu ,'MPPLowerBound':Xl,'CorrelationMatrix': corr_mat, 'PCC': PCC}
    
    return Results
コード例 #18
0
def UP_UDR(problem, driver):
    # Uses the UDR method for UP

    methd = 'UDR'
    method = 5

    mu = [inp.get_I_mu() for inp in driver.inputs]
    I_sigma = [inp.get_I_sigma() for inp in driver.inputs]
    inpt = len(driver.inputs)
    input = driver.inputNames
    krig = driver.krig
    limstate = driver.limstate
    lrflag = driver.lrflag
    n_meta = driver.n_meta
    nEFAST = driver.nEFAST
    nSOBOL = driver.nSOBOL
    nMCS = driver.nMCS
    numbins = driver.numbins
    nodes = driver.nodes
    order = driver.order
    otpt = len(driver.outputNames)
    output = driver.outputNames
    p = driver.p
    plotf = 0
    r = driver.r
    simple = driver.simple
    stvars = driver.stvars

    node, weight = params.params(method, nodes, inpt, stvars)

    # ----------------------  Model  ---------------------------
    # set_printoptions(precision=4)
    # set_printoptions(suppress=True)

    x = kron(mu, ones((inpt * nodes[0], 1)))
    for ii in range(0, inpt):
        k = ii * nodes[ii]
        l = (ii + 1) * nodes[ii]
        x[k:l, ii] = node[ii, :]


#    G_mu = run_model(driver, mu)
    values = [mu]

    G_s = zeros((inpt, max(nodes), otpt))
    for k, inputname in enumerate(
            driver._json_tree['Configurations']['Configuration']
        ['PCCInputArguments']['StochasticInputs']['InputDistributions']):
        if krig == 1:
            load('dmodel')
            for j in range(0, nodes[k]):
                var = k * nodes[k] + j
                X = x[var, :]
                G_s[k, j] = predictor(X, dmodel)
        else:
            for j in range(0, nodes[k]):
                var = k * nodes[k] + j
                X = x[var, :]
                #                print 'Running simulation on node',j,'of input',inputname['Name']
                #                G_s[k, j] = run_model(driver, X)
                values.append(X)

    out = iter(run_list(problem, driver, values))
    G_mu = out.next()
    for k, inputname in enumerate(
            driver._json_tree['Configurations']['Configuration']
        ['PCCInputArguments']['StochasticInputs']['InputDistributions']):
        for j in range(0, nodes[k]):
            G_s[k, j] = out.next()

    G_mean = zeros(otpt)
    G_kurt = zeros(otpt)
    G_skew = zeros(otpt)
    G_sigma = zeros(otpt)
    covar_m = zeros((otpt, otpt))
    gs = zeros(otpt)
    gk = zeros(otpt)
    moms = []
    for l in range(0, otpt):
        moms.append(
            newmoment(inpt, nodes[0], weight, G_s[:, :, l], G_mu[l],
                      G_mean[l]))
        G_mean[l] = moment(1, inpt, nodes[0], weight, G_s[:, :, l], G_mu[l],
                           G_mean[l])
    for l in range(0, otpt):
        moms.append(
            newmoment(inpt, nodes[0], weight, G_s[:, :, l], G_mu[l],
                      G_mean[l]))
        G_sigma[l] = moment(2, inpt, nodes[0], weight, G_s[:, :, l], G_mu[l],
                            G_mean[l])
        gs[l] = moment(3, inpt, nodes[0], weight, G_s[:, :, l], G_mu[l],
                       G_mean[l])
        G_skew[l] = moment(3, inpt, nodes[0], weight, G_s[:, :, l], G_mu[l],
                           G_mean[l]) / G_sigma[l]**1.5
        gk[l] = moment(4, inpt, nodes[0], weight, G_s[:, :, l], G_mu[l],
                       G_mean[l])
        G_kurt[l] = moment(4, inpt, nodes[0], weight, G_s[:, :, l], G_mu[l],
                           G_mean[l]) / G_sigma[l]**2
        for j in range(l, otpt):
            covar_m[l, j] = moment2(1, inpt, nodes[0], weight, G_s[:, :, l], l,
                                    G_s[:, :, j], j, G_mu, G_mean)
            covar_m[j, l] = covar_m[l, j]

    CovarianceMatrix = covar_m.transpose()
    Moments = {
        'Mean': G_mean,
        'Variance': diag(CovarianceMatrix),
        'Skewness': G_skew,
        'Kurtosis': G_kurt
    }

    # ----------------------  Analyze  ---------------------------

    # Calculate the PCC for the FFNI method
    if otpt > 1:
        PCC = [0] * (otpt + 1)
    else:
        PCC = [0] * otpt
    dtype = [0] * otpt
    Inv1 = [0] * otpt
    Inv2 = [0] * otpt
    m1 = [0] * otpt
    m2 = [0] * otpt
    a1 = [0] * otpt
    a2 = [0] * otpt
    alph = [0] * otpt
    beta = [0] * otpt
    lo = [0] * otpt
    hi = [0] * otpt
    C_Y_pdf = [0] * otpt

    if any(Moments['Variance'] == 0):
        print "Warning: One or more outputs does not vary over given parameter variation."

    for k in range(0, otpt):
        PCC[k], dtype[k], Inv1[k], m1[k], m2[k], a1[k], a2[k], alph[k], beta[
            k], lo[k], hi[k] = pearscdf.pearscdf(limstate[k],
                                                 Moments['Mean'][k],
                                                 sqrt(CovarianceMatrix[k, k]),
                                                 Moments['Skewness'][k],
                                                 Moments['Kurtosis'][k], methd,
                                                 k, output)
        if dtype[k] != None:
            if iscomplex(a1[k]):
                a1[k] = [a1[k].real, a1[k].imag]
            if iscomplex(a2[k]):
                a2[k] = [a2[k].real, a2[k].imag]

            C_Y_pdf[k] = estimate_complexity.with_distribution(
                dtype[k], limstate[k], Moments['Mean'][k],
                Moments['Variance'][k], numbins)

    sigma_mat = matrix(sqrt(diag(CovarianceMatrix)))
    seterr(invalid='ignore'
           )  #ignore problems with divide-by-zero, just give us 'nan' as usual
    CorrelationMatrix = CovarianceMatrix / multiply(sigma_mat,
                                                    sigma_mat.transpose())
    Distribution = {
        'PearsonType': dtype,
        'm1': m1,
        'm2': m2,
        'a1': a1,
        'a2': a2,
        'Complexity': C_Y_pdf
    }
    Plotting = {'alpha': alph, 'beta': beta, 'lo': lo, 'hi': hi}

    CorrelationMatrix = where(isnan(CorrelationMatrix), None,
                              CorrelationMatrix)

    if otpt > 1 and not 0 in PCC[0:otpt]:
        lower = zeros(otpt) - inf
        PCC[otpt] = mvstdnormcdf(lower, Inv1, CorrelationMatrix)

    Results = {
        'Moments': Moments,
        'CorrelationMatrix': CorrelationMatrix,
        'CovarianceMatrix': CovarianceMatrix,
        'Distribution': Distribution,
        'Plotting': Plotting,
        'PCC': PCC
    }

    return Results