Ejemplo n.º 1
0
def is_posdefinite(matrix):
    """
    The test for positive definiteness using the determinants of the nested 
    principal minor matrices is taken from Varian; "Microeconomic Analysis". 
    Returns True if input matrix is positive definite, False otherwise. 
    """

    flag = True

    ndim = squaredim(matrix, 'is_posdefinite')

    for k in range(0, ndim):
        '''# Test No. 1 - Necessary condition for positive SEMI-definiteness:
        if matrix[k][k] <= 0.0:
            flag = False
            break'''
        # (Test No. 2 -) Sufficient condition for positive definiteness:
        minor = Matrix()
        kp1 = k + 1
        minor.zero(kp1, kp1)
        for j in range(0, kp1):
            for i in range(0, kp1): minor[j][i] = matrix[j][i]
        x = determinant(minor)
        del minor
        if x <= 0.0:
            flag = False
            break

    return flag
Ejemplo n.º 2
0
def ludcmp_chol(matrix, test=False):
    """
    Decomposes/factorizes square, positive definite input matrix into 
    one lower and one upper matrix. The upper matrix is the transpose of 
    the lower matrix. 
    
    NB. It only works on square, symmetric, positive definite matrices!!! 
    """

    if test:
        errortext1 = "Input matrix not positive definite in ludcmp_chol!"
        assert is_posdefinite(matrix), errortext1
        errortext2 = "Input matrix not symmetric in ludcmp_chol!"
        assert is_symmetrical(matrix), errortext2

    ndim = squaredim(matrix, 'ludcmp_chol')

    # Create new square matrix of the same size as the input matrix:
    clower = Matrix()
    clower.zero(ndim, ndim)

    # Perform the necessary manipulations:
    for k in range(0, ndim):
        kp1 = k + 1
        for j in range(0, kp1):
            summ = 0.0
            for i in range(0, j): summ += clower[k][i]*clower[j][i]
            if j == k: clower[k][j] = sqrt(matrix[k][j] - summ)
            else:      clower[k][j] = (matrix[k][j]-summ) / float(clower[j][j])

    clowert = transposed(clower)

    return clower, clowert
Ejemplo n.º 3
0
def is_posdefinite(matrix):
    """
    The test for positive definiteness using the determinants of the nested 
    principal minor matrices is taken from Varian; "Microeconomic Analysis". 
    Returns True if input matrix is positive definite, False otherwise. 
    """

    flag = True

    ndim = squaredim(matrix, 'is_posdefinite')

    for k in range(0, ndim):
        '''# Test No. 1 - Necessary condition for positive SEMI-definiteness:
        if matrix[k][k] <= 0.0:
            flag = False
            break'''
        # (Test No. 2 -) Sufficient condition for positive definiteness:
        minor = Matrix()
        kp1 = k + 1
        minor.zero(kp1, kp1)
        for j in range(0, kp1):
            for i in range(0, kp1):
                minor[j][i] = matrix[j][i]
        x = determinant(minor)
        del minor
        if x <= 0.0:
            flag = False
            break

    return flag
Ejemplo n.º 4
0
def ludcmp_chol(matrix, test=False):
    """
    Decomposes/factorizes square, positive definite input matrix into 
    one lower and one upper matrix. The upper matrix is the transpose of 
    the lower matrix. 
    
    NB. It only works on square, symmetric, positive definite matrices!!! 
    """

    if test:
        errortext1 = "Input matrix not positive definite in ludcmp_chol!"
        assert is_posdefinite(matrix), errortext1
        errortext2 = "Input matrix not symmetric in ludcmp_chol!"
        assert is_symmetrical(matrix), errortext2

    ndim = squaredim(matrix, 'ludcmp_chol')

    # Create new square matrix of the same size as the input matrix:
    clower = Matrix()
    clower.zero(ndim, ndim)

    # Perform the necessary manipulations:
    for k in range(0, ndim):
        kp1 = k + 1
        for j in range(0, kp1):
            summ = 0.0
            for i in range(0, j):
                summ += clower[k][i] * clower[j][i]
            if j == k: clower[k][j] = sqrt(matrix[k][j] - summ)
            else: clower[k][j] = (matrix[k][j] - summ) / float(clower[j][j])

    clowert = transposed(clower)

    return clower, clowert
Ejemplo n.º 5
0
    def _jake3(self, t, y, hf=0.5**20, ha=0.5**40):
        """
        Auxiliary function/method used by implicit methods to compute 
        the Jacobian. Cannot be used for dicts!!!!!
        """

        yplus  = array('d', [])
        yminus = array('d', [])
        for n in self._sequence:
            why = y[n]
            yplus.append( (1.0 + hf) * why + ha)
            yminus.append((1.0 - hf) * why - ha)

        jacob = Matrix()
        for n in self._sequence:
            derivs = array('d', [])
            for m in self._sequence:
                yn    = deepcopy(y)
                yp    = yplus[m]
                yn[m] = yp
                fp    = self._model(t, yn)[n]
                ym    = yminus[m]
                yn[m] = ym
                fm    = self._model(t, yn)[n]
                deriv = (fp-fm) / (yp-ym)
                derivs.append(deriv)
            jacob.append(derivs)

        return jacob
Ejemplo n.º 6
0
 def _fidfi(y):
     # fi first
     f    = self._model(tnext, y)
     fi   = deepcopy(f)
     beta = deepcopy(fi)
     for n in self._sequence:
         fi[n]   = 137.0*y[n] - 300.0*s[n] + \
                   300.0*self.__prev[3][n] - \
                   200.0*self.__prev[2][n] + \
                    75.0*self.__prev[1][n] - \
                    12.0*self.__prev[0][n] -  60.0*h*f[n]
         beta[n] = - fi[n]
     # then fid = dfi[n]/dy[m] = 137*dy[n]/dy[m] - 60*h*df[n]/dy[m], 
     # or  - 60*h*df[n]/dy[m]; n != m, and
     # 137 - 60*h*df[n]/dy[m]; n == m
     jacob = self._jake3(tnext, y, hf, ha)
     fid   = Matrix()
     for n in self._sequence:
         derivs = array('d', [])
         for m in self._sequence:
             deriv = 137.0*krond(n, m) - 60.0*h*jacob[n][m]
             derivs.append(deriv)
         fid.append(derivs)
     alpha = Matrix(fid)
     return alpha, beta
Ejemplo n.º 7
0
    def antithet_sample(self, nparams):
        """
        Generates a matrix having two rows, the first row being a list of 
        uniformly distributed random numbers p in [0.0, 1.0], each row 
        containing nparams elements. The second row contains the corresponding 
        antithetic sample with the complements 1-p. 
        """

        rstream = self.rstream

        antimatrix = Matrix()  # antimatrix belongs to the Matrix class
        for k in range(0, nparams):
            pvector = array('d', [])
            p1  =  rstream.runif01()
            pvector.append(p1)
            dum =  rstream.runif01()  # For synchronization only - never used
            p2  =  1.0 - p1
            p2 = kept_within(0.0, p2, 1.0) # Probabilities must be in [0.0, 1.0]
            pvector.append(p2)
            antimatrix.append(pvector)

        # Matrix must be transposed in order for each sample to occupy one row.
        # Sample vector k is in antimatrix[k], where k is 0 or 1
        antimatrix.transpose()

        return antimatrix
Ejemplo n.º 8
0
def scaled(matrix, scalar):
    """
    Multiply matrix by scalar. 
    """

    sized(matrix, 'scaled')

    copymx = deepcopy(matrix)

    return Matrix(xmap((lambda x: scalar * x), copymx))
Ejemplo n.º 9
0
 def _fidfi(y):
     # fi first
     f    = self._model(tnext, y)
     fi   = deepcopy(f)
     beta = deepcopy(fi)
     for n in self._sequence:
         fi[n]   = y[n] - s[n] - h*f[n]
         beta[n] = - fi[n]
     # then fid = dfi[n]/dy[m] = dy[n]/dy[m] - h*df[n]/dy[m], or
     # - h*df[n]/dy[m]; n != m, and
     # 1.0 - h*df[n]/dy[m]; n == m
     jacob = self._jake3(tnext, y, hf, ha)
     fid   = Matrix()
     for n in self._sequence:
         derivs = array('d', [])
         for m in self._sequence:
             deriv = 1.0*krond(n, m) - h*jacob[n][m]
             derivs.append(deriv)
         fid.append(derivs)
     alpha = Matrix(fid)
     return alpha, beta
Ejemplo n.º 10
0
    def antithet_sample(self, nparams):
        """
        Generates a matrix having two rows, the first row being a list of 
        uniformly distributed random numbers p in [0.0, 1.0], each row 
        containing nparams elements. The second row contains the corresponding 
        antithetic sample with the complements 1-p. 
        """

        rstream = self.rstream

        antimatrix = Matrix()  # antimatrix belongs to the Matrix class
        for k in range(0, nparams):
            pvector = array('d', [])
            p1 = rstream.runif01()
            pvector.append(p1)
            dum = rstream.runif01()  # For synchronization only - never used
            p2 = 1.0 - p1
            p2 = kept_within(0.0, p2,
                             1.0)  # Probabilities must be in [0.0, 1.0]
            pvector.append(p2)
            antimatrix.append(pvector)

        # Matrix must be transposed in order for each sample to occupy one row.
        # Sample vector k is in antimatrix[k], where k is 0 or 1
        antimatrix.transpose()

        return antimatrix
Ejemplo n.º 11
0
def ludcmp_crout(matrix):
    """
    Decomposes/factorizes square input matrix into a lower and an 
    upper matrix using Crout's algorithm WITHOUT pivoting. 
    
    NB. It only works for square matrices!!! 
    """

    ndim = squaredim(matrix, 'ludcmp_crout')

    # Copy object instance to new matrix in order for the original instance
    # not to be destroyed.
    # Create two new square matrices of the same sized as the input matrix:
    # one unity matrix (to be the lower matrix), one zero matrix (to be
    # the upper matrix)
    copymx = deepcopy(matrix)
    lower = Matrix()
    lower.unity(ndim)
    upper = Matrix()
    upper.zero(ndim, ndim)
    permlist = list(range(0, ndim))

    # Perform the necessary manipulations:
    for j in range(0, ndim):
        iu = 0
        while iu <= j:
            k = 0
            summ = 0.0
            while k < iu:
                summ += lower[iu][k] * upper[k][j]
                k = k + 1
            upper[iu][j] = copymx[iu][j] - summ
            iu = iu + 1
        il = j + 1
        while il < ndim:
            k = 0
            summ = 0.0
            while k < j:
                summ += lower[il][k] * upper[k][j]
                k = k + 1
            divisor = float(upper[j][j])
            if abs(divisor) < TINY: divisor = fsign(divisor) * TINY
            lower[il][j] = (copymx[il][j] - summ) / divisor
            il = il + 1

    parity = 1.0

    return lower, upper, permlist, parity
Ejemplo n.º 12
0
 def _fidfi(y):
     # fi first
     f    = self._model(tnext, y)
     fi   = deepcopy(f)
     beta = deepcopy(fi)
     for n in self._sequence:
         fi[n]   = 25.0*y[n] - 48.0*s[n] + 36.0*self.__prev[2][n] - \
                   16.0*self.__prev[1][n] + \
                    3.0*self.__prev[0][n] - 12.0*h*f[n]
         beta[n] = - fi[n]
     # then fid = dfi[n]/dy[m] = 3*dy[n]/dy[m] - 2*h*df[n]/dy[m], or
     # - 12*h*df[n]/dy[m]; n != m, and
     # 25 - 12*h*df[n]/dy[m]; n == m
     jacob = self._jake3(tnext, y, hf, ha)
     fid   = Matrix()
     for n in self._sequence:
         derivs = array('d', [])
         for m in self._sequence:
             deriv = 25.0*krond(n, m) - 12.0*h*jacob[n][m]
             derivs.append(deriv)
         fid.append(derivs)
     alpha = Matrix(fid)
     return alpha, beta
Ejemplo n.º 13
0
def corrmatrix(inputmatrix):
    """
    Computes the correlation matrix of the input matrix. Each row is assumed 
    to contain the vector for one parameter. 
    """

    ndim = len(inputmatrix)  # = the number of rows/parameters
    
    # First create unity output matrix
    corrmatrix = Matrix()
    corrmatrix.unity(ndim)

    # Then fill it with correlation coefficients
    for k in range(0, ndim):
        kp1 = k + 1
        for j in range(0, kp1):
            if j != k:
                #amk,amj,vk,vj,covkj, rhokj  = covar(inputmatrix[k], \
                #                                    inputmatrix[j])
                #corrmatrix[k][j] = corrmatrix[j][k] = rhokj
                corrmatrix[k][j] = corrmatrix[j][k] = \
                           covar(inputmatrix[k], inputmatrix[j])[5]  # = rhokj

    return corrmatrix
Ejemplo n.º 14
0
def corrmatrix(inputmatrix):
    """
    Computes the correlation matrix of the input matrix. Each row is assumed 
    to contain the vector for one parameter. 
    """

    ndim = len(inputmatrix)  # = the number of rows/parameters

    # First create unity output matrix
    corrmatrix = Matrix()
    corrmatrix.unity(ndim)

    # Then fill it with correlation coefficients
    for k in range(0, ndim):
        kp1 = k + 1
        for j in range(0, kp1):
            if j != k:
                #amk,amj,vk,vj,covkj, rhokj  = covar(inputmatrix[k], \
                #                                    inputmatrix[j])
                #corrmatrix[k][j] = corrmatrix[j][k] = rhokj
                corrmatrix[k][j] = corrmatrix[j][k] = \
                           covar(inputmatrix[k], inputmatrix[j])[5]  # = rhokj

    return corrmatrix
Ejemplo n.º 15
0
def transposed(matrix):
    """
    Transpose matrix. 
    """

    nrows, ncols = sized(matrix, 'transposed')

    newmatrix = ncols * [float('nan')]
    for k in range(0, ncols):  # List comprehension used for the innermost loop
        newmatrix[k] = array('d', [row[k] for row in matrix])
    tmatrix = Matrix(newmatrix)
    del newmatrix
    '''tmatrix = Matrix(matrix)
    tmatrix.transpose() # would be slower'''

    return tmatrix
Ejemplo n.º 16
0
def inverted(matrix, pivoting=True):
    """
    Only square matrices can be inverted! 
    """

    ndim = squaredim(matrix, 'inverted')

    # First: LU-decompose matrix to be inverted
    if pivoting:
        lower, upper, permlist, parity = ludcmp_crout_piv(matrix)
    else:
        lower, upper, permlist, parity = ludcmp_crout(matrix)

    # Create unity matrix
    unitymatrix = Matrix()
    unitymatrix.unity(ndim)

    # Loop over the columns in unity matrix and substitute
    # (uses the fact that rows and columns are the same in a unity matrix)
    columns = Matrix()
    columns.zero(ndim, ndim)
    for k in range(0, ndim):
        columns[k] = lusubs(lower, upper, unitymatrix[k], permlist)
        # preparations below for changing lusubs to handling column vector
        # instead of list
        #row = Matrix([unitymatrix[k]])
        #column = transpose(row)
        #columns[k] = lusubs(lower, upper, column, permlist)
        #del column

    # Transpose matrix to get inverse
    newmatrix = ndim * [float('nan')]
    for k in range(0,
                   ndim):  # List comprehension is used for the innermost loop
        newmatrix[k] = array('d', [row[k] for row in columns])
    imatrix = Matrix(newmatrix)
    del newmatrix

    return imatrix
Ejemplo n.º 17
0
def ludcmp_crout(matrix):
    """
    Decomposes/factorizes square input matrix into a lower and an 
    upper matrix using Crout's algorithm WITHOUT pivoting. 
    
    NB. It only works for square matrices!!! 
    """

    ndim = squaredim(matrix, 'ludcmp_crout')

    # Copy object instance to new matrix in order for the original instance 
    # not to be destroyed.
    # Create two new square matrices of the same sized as the input matrix:
    # one unity matrix (to be the lower matrix), one zero matrix (to be 
    # the upper matrix)
    copymx   = deepcopy(matrix)
    lower    = Matrix()
    lower.unity(ndim)
    upper    = Matrix()
    upper.zero(ndim, ndim)
    permlist = list(range(0, ndim))

    # Perform the necessary manipulations:
    for j in range(0, ndim):
        iu = 0
        while iu <= j:
            k    = 0
            summ = 0.0
            while k < iu:
                summ += lower[iu][k]*upper[k][j]
                k   = k + 1
            upper[iu][j] = copymx[iu][j] - summ
            iu = iu + 1
        il = j + 1
        while il < ndim:
            k    = 0
            summ = 0.0
            while k < j:
                summ += lower[il][k]*upper[k][j]
                k = k + 1
            divisor = float(upper[j][j])
            if abs(divisor) < TINY: divisor = fsign(divisor)*TINY
            lower[il][j] = (copymx[il][j]-summ) / divisor
            il = il + 1

    parity = 1.0


    return lower, upper, permlist, parity
Ejemplo n.º 18
0
def inverted(matrix, pivoting=True):
    """
    Only square matrices can be inverted! 
    """

    ndim = squaredim(matrix, 'inverted')

    # First: LU-decompose matrix to be inverted
    if pivoting:
        lower, upper, permlist, parity = ludcmp_crout_piv(matrix)
    else:
        lower, upper, permlist, parity = ludcmp_crout(matrix)

    # Create unity matrix
    unitymatrix = Matrix()
    unitymatrix.unity(ndim)

    # Loop over the columns in unity matrix and substitute
    # (uses the fact that rows and columns are the same in a unity matrix)
    columns = Matrix()
    columns.zero(ndim, ndim)
    for k in range(0, ndim):
        columns[k] = lusubs(lower, upper, unitymatrix[k], permlist)
        # preparations below for changing lusubs to handling column vector 
        # instead of list
        #row = Matrix([unitymatrix[k]])
        #column = transpose(row)
        #columns[k] = lusubs(lower, upper, column, permlist)
        #del column

    # Transpose matrix to get inverse
    newmatrix = ndim*[float('nan')]
    for k in range(0, ndim): # List comprehension is used for the innermost loop
        newmatrix[k] = array('d', [row[k] for row in columns])
    imatrix = Matrix(newmatrix)
    del newmatrix

    return imatrix
Ejemplo n.º 19
0
def ludcmp_crout_piv(matrix):
    """
    Decomposes/factorizes square input matrix into a lower 
    and an upper matrix using Crout's algorithm WITH pivoting. 
    
    NB. It only works on square matrices!!! 
    """

    ndim = squaredim(matrix, 'ludcmp_crout_piv')
    ndm1 = ndim - 1
    vv = array('d', ndim * [0.0])
    permlist = list(range(0, ndim))
    parity = 1.0
    imax = 0

    # Copy to matrix to be processed (maintains the original matrix intact)
    compactlu = deepcopy(matrix)

    for i in range(0, ndim):  # Copy and do some other stuff
        big = 0.0
        for j in range(0, ndim):
            temp = abs(compactlu[i][j])
            if temp > big: big = temp
        assert big > 0.0
        vv[i] = 1.0 / big

    # Perform the necessary manipulations:
    for j in range(0, ndim):
        for i in range(0, j):
            sum = compactlu[i][j]
            for k in range(0, i):
                sum -= compactlu[i][k] * compactlu[k][j]
            compactlu[i][j] = sum
        big = 0.0
        for i in range(j, ndim):
            sum = compactlu[i][j]
            for k in range(0, j):
                sum -= compactlu[i][k] * compactlu[k][j]
            compactlu[i][j] = sum
            dum = vv[i] * abs(sum)
            if dum > big:
                big = dum
                imax = i
        if j != imax:
            # Substitute row imax and row j
            imaxdum = permlist[imax]  # NB in !!!!!!!!!!!!!!!!
            jdum = permlist[j]  # NB in !!!!!!!!!!!!!!!!
            permlist[j] = imaxdum  # NB in !!!!!!!!!!!!!!!!
            permlist[imax] = jdum  # NB in !!!!!!!!!!!!!!!!
            for k in range(0, ndim):
                dum = compactlu[imax][k]
                compactlu[imax][k] = compactlu[j][k]
                compactlu[j][k] = dum
            parity = -parity
            vv[imax] = vv[j]
        #permlist[j] = imax   # NB out !!!!!!!!!!!!!!!!!!!!!
        divisor = float(compactlu[j][j])
        if abs(divisor) < TINY: divisor = fsign(divisor) * TINY
        dum = 1.0 / divisor
        if j != ndm1:
            jp1 = j + 1
            for i in range(jp1, ndim):
                compactlu[i][j] *= dum

    lower = Matrix()
    lower.zero(ndim, ndim)
    upper = Matrix()
    upper.zero(ndim, ndim)

    for i in range(0, ndim):
        for j in range(i, ndim):
            lower[j][i] = compactlu[j][i]
    for i in range(0, ndim):
        lower[i][i] = 1.0

    for i in range(0, ndim):
        for j in range(i, ndim):
            upper[i][j] = compactlu[i][j]

    del compactlu

    return lower, upper, permlist, parity
Ejemplo n.º 20
0
    def lhs_sample(self, nparams, nintervals, rcorrmatrix=None, checklevel=0):
        """
        Generates a full Latin Hypercube Sample of uniformly distributed 
        random variates in [0.0, 1.0] placed in a matrix with one realization 
        in each row. A target rank correlation matrix can be given (must have 
        the dimension nsamples*nsamples).
        
        checklevel may be 0, 1 or 2 and is used to control trace printout. 
        0 produces no trace output, whereas 2 produces the most.

        NB. IN ORDER FOR LATIN HYPERCUBE SAMPLING TO BE MEANINGFUL THE OUTPUT 
        STREAM OF RANDOM VARIATES MUST BE HANDLED BY INVERSE METHODS !!!! 

        Latin Hypercube Sampling was first described by McKay, Conover & 
        Beckman in a Technometrics article 1979. The use of the LHS technique 
        to introduce rank correlations was first described by Iman & Conover 
        1982 in an issue of Communications of Statistics.
        """

        # lhs_sample uses the Matrix class to a great extent

        if nparams > nintervals:
            warn("nparams > nintervals in RandomStructure.lhs_sample")

        nsamples = nintervals  # Just to remember
        rstreaminner = self.rstream
        rstreamouter = self.rstream2

        factor = 1.0 / float(nintervals)

        tlhsmatrix1 = Matrix()  # tlhsmatrix1 belongs to the Matrix class
        if rcorrmatrix: tscorematrix = Matrix()
        for k in range(0, nparams):
            if rcorrmatrix:
                tnvector, tscorevector = \
                            self.scramble_range(nsamples, rstreamouter, True)
                rowk = array('d', tscorevector)
                tscorematrix.append(rowk)
            else:
                tnvector = self.scramble_range(nsamples, rstreamouter)
            pvector = array('d', [])
            for number in tnvector:
                p = factor * (float(number) + rstreaminner.runif01())
                p = max(p, 0.0)  # Probabilities must be in [0.0, 1.0]
                p = min(p, 1.0)
                pvector.append(p)
            tlhsmatrix1.append(pvector)

        # tlhsmatrix1 (and tscorematrix) are now transposed to run with
        # one subsample per row to fit with output as well as Iman-Conover
        # formulation. tlhsmatrix1 and tscorematrix will be used anyway
        # for some manipulations which are more simple when matrices run
        # with one variable per row

        lhsmatrix1 = transposed(tlhsmatrix1)
        if rcorrmatrix: scorematrix = transposed(tscorematrix)

        if checklevel == 2:
            print("lhs_sample: Original LHS sample matrix")
            mxdisplay(lhsmatrix1)
            if rcorrmatrix:
                print("lhs_sample: Target rank correlation matrix")
                mxdisplay(rcorrmatrix)
        if checklevel == 1 or checklevel == 2:
            print("lhs_sample: Rank correlation matrix of")
            print("            original LHS sample matrix")
            trankmatrix1 = Matrix()
            for k in range(0, nparams):
                rowk = array('d', extract_ranks(tlhsmatrix1[k]))
                trankmatrix1.append(rowk)
            mxdisplay(Matrix(corrmatrix(trankmatrix1)))

        if not rcorrmatrix:
            return lhsmatrix1

        else:
            scorecorr = Matrix(corrmatrix(tscorematrix))
            if checklevel == 2:
                print("lhs_sample: Score matrix of original LHS sample matrix")
                mxdisplay(scorematrix)
                print("lhs_sample: Correlation matrix of scores of")
                print("            original LHS sample")
                mxdisplay(scorecorr)

            slower, slowert = ludcmp_chol(scorecorr)
            slowerinverse = inverted(slower)
            tslowerinverse = transposed(slowerinverse)
            clower, clowert = ludcmp_chol(rcorrmatrix)
            scoresnostar = scorematrix * tslowerinverse  # Matrix multiplication
            if checklevel == 2:
                print("lhs_sample: Correlation matrix of scoresnostar")
                mxdisplay(corrmatrix(transposed(scoresnostar)))

            scoresstar = scoresnostar * clowert  # Matrix multiplication
            tscoresstar = transposed(scoresstar)
            trankmatrix = Matrix()
            for k in range(0, nparams):
                trankmatrix.append(extract_ranks(tscoresstar[k]))
            if checklevel == 2:
                print("lhs_sample: scoresstar matrix")
                mxdisplay(scoresstar)
                print("lhs_sample: Correlation matrix of scoresstar")
                mxdisplay(corrmatrix(tscoresstar))
                print("lhs_sample: scoresstar matrix converted to rank")
                mxdisplay(transposed(trankmatrix))
                for k in range(0, nparams):
                    tlhsmatrix1[k] = array('d', sorted(list(tlhsmatrix1[k])))
                print("RandomStructure.lhs_sample: Sorted LHS sample matrix")
                mxdisplay(transposed(tlhsmatrix1))

            tlhsmatrix2 = Matrix()
            for k in range(0, nparams):
                # Sort each row in tlhsmatrix1 and reorder
                # according to trankmatrix rows
                auxvec = reorder(tlhsmatrix1[k], trankmatrix[k], \
                                                 straighten=True)
                tlhsmatrix2.append(auxvec)
            lhsmatrix2 = transposed(tlhsmatrix2)
            if checklevel == 2:
                print("lhs_sample: Corrected/reordered LHS sample matrix")
                mxdisplay(transposed(tlhsmatrix2))

            if checklevel == 1 or checklevel == 2:
                trankmatrix2 = Matrix()
                auxmatrix2 = tlhsmatrix2
                for k in range(0, nparams):
                    trankmatrix2.append(extract_ranks(auxmatrix2[k]))
                print("lhs_sample: Rank correlation matrix of corrected/")
                print("            /reordered LHS sample matrix")
                mxdisplay(corrmatrix(trankmatrix2))

            return lhsmatrix2
Ejemplo n.º 21
0
def ludcmp_crout_piv(matrix):
    """
    Decomposes/factorizes square input matrix into a lower 
    and an upper matrix using Crout's algorithm WITH pivoting. 
    
    NB. It only works on square matrices!!! 
    """

    ndim     = squaredim(matrix, 'ludcmp_crout_piv')
    ndm1     = ndim - 1
    vv       = array('d', ndim*[0.0])
    permlist = list(range(0, ndim))
    parity   = 1.0
    imax     = 0

    # Copy to matrix to be processed (maintains the original matrix intact)
    compactlu = deepcopy(matrix)

    for i in range(0, ndim):   # Copy and do some other stuff
        big = 0.0
        for j in range(0, ndim):
            temp = abs(compactlu[i][j])
            if temp > big: big = temp
        assert big > 0.0
        vv[i] = 1.0/big

    # Perform the necessary manipulations:
    for j in range(0, ndim):
        for i in range(0, j):
            sum = compactlu[i][j]
            for k in range(0, i): sum -= compactlu[i][k] * compactlu[k][j]
            compactlu[i][j] = sum
        big = 0.0
        for i in range(j, ndim):
            sum = compactlu[i][j]
            for k in range(0, j): sum -= compactlu[i][k] * compactlu[k][j]
            compactlu[i][j] = sum
            dum = vv[i] * abs(sum)
            if dum > big:
                big  = dum
                imax = i
        if j != imax:
            # Substitute row imax and row j
            imaxdum        = permlist[imax]   # NB in !!!!!!!!!!!!!!!!
            jdum           = permlist[j]      # NB in !!!!!!!!!!!!!!!!
            permlist[j]    = imaxdum          # NB in !!!!!!!!!!!!!!!!
            permlist[imax] = jdum             # NB in !!!!!!!!!!!!!!!!
            for k in range(0, ndim):
                dum                = compactlu[imax][k]
                compactlu[imax][k] = compactlu[j][k]
                compactlu[j][k]    = dum
            parity   = - parity
            vv[imax] = vv[j]
        #permlist[j] = imax   # NB out !!!!!!!!!!!!!!!!!!!!!
        divisor = float(compactlu[j][j])
        if abs(divisor) < TINY: divisor = fsign(divisor)*TINY
        dum = 1.0 / divisor
        if j != ndm1:
            jp1 = j + 1
            for i in range(jp1, ndim): compactlu[i][j] *= dum

    lower = Matrix()
    lower.zero(ndim, ndim)
    upper = Matrix()
    upper.zero(ndim, ndim)

    for i in range(0, ndim):
        for j in range(i, ndim): lower[j][i] = compactlu[j][i]
    for i in range(0, ndim):
        lower[i][i] = 1.0

    for i in range(0, ndim):
        for j in range(i, ndim): upper[i][j] = compactlu[i][j]

    del compactlu


    return lower, upper, permlist, parity
Ejemplo n.º 22
0
def nelder_mead(objfunc, point0, spans, \
                trace=False, tolf=SQRTMACHEPS, tola=SQRTTINY, maxniter=256, \
                rho=1.0, xsi=2.0, gamma=0.5, sigma=0.5):
    """
    The Nelder & Mead downhill simplex method is designed to find the minimum 
    of an objective function that has a multi-dimensional input, (see for 
    instance Lagarias et al. (1998), "Convergence Properties of the Nelder-Mead 
    Simplex in Low Dimensions", SIAM J. Optim., Society for Industrial and 
    Applied Mathematics Vol. 9, No. 1, pp. 112-147 for details). The algorithm 
    is said to first have been presented by Nelder and Mead in Computer Journal,
    Vol. 7, pp. 308-313 (1965).

    The initial simplex must be entered by entering an initial point (an 
    array of coordinates), plus an array of spans for the corresponding 
    point coordinates.

    For trace=True a trace is printed to stdout consisting of the present 
    number of iterations, the present low value of the objective function, 
    the present value of the absolute value of difference between the high and
    the low value of the objective function, and the present list of vertices 
    of the low value of the objective function = the present "best" point.
    
    tolf is the fractional tolerance and tola is the absolute tolerance of 
    the absolute value of difference between the high and the low value of 
    the objective function.

    maxniter is the maximum allowed number of iterations.

    rho, xsi, gamma and sigma are the parameters for reflection, expansion,
    contraction and shrinkage, respectively (cf. the references above).
    """

    # Check the input parameters
    assert is_nonneginteger(maxniter), \
       "max number of iterations must be a non-negative integer in nelder_mead!"
    if tolf < MACHEPS:
        tolf = MACHEPS
        wtext  = "fractional tolerance smaller than machine epsilon is not "
        wtext += "recommended in nelder_mead. Machine epsilon is used instead"
        warn(wtext)
    assert rho > 0.0, "rho must be positive in nelder_mead!"
    assert xsi > 1.0, "xsi must be > 1.0 in nelder_mead!"
    assert xsi > rho, "xsi must be > rho in nelder_mead!"
    assert 0.0 < gamma < 1.0, "gamma must be in (0.0, 1.0) in nelder_mead!"
    assert 0.0 < sigma < 1.0, "sigma be in (0.0, 1.0) in nelder_mead!"
    assert tola >= 0.0, "absolute tolerance must be positive in nelder_mead!"

    # Prepare matrix of vertices
    ndim     = len(point0)
    assert len(spans) == ndim
    vertices = Matrix()
    vertices.append(array('d', list(point0)))
    ndimp1   = ndim + 1
    fndim    = float(ndim)
    for j in range(0, ndim): vertices.append(array('d', list(point0)))
    for j in range(0, ndim): vertices[j+1][j] += spans[j]

    # Prepare a few variants of parameters
    oneprho = 1.0 + rho

    # LOOP!!!!!!!!
    niter = 0
    while True:
        niter += 1
        if niter > maxniter:
            txt1 = "nelder_mead did not converge. Absolute error = "
            txt2 = str(abs(high-low)) + " for " + str(niter-1)
            txt3 = " iterations. Consider new tols or maxniter!"
            raise Error(txt1+txt2+txt3)
        # Compute the objective function values for the vertices
        flist = array('d', [])
        for k in range(0, ndimp1):
            fk = objfunc(vertices[k])
            flist.append(fk)

        # Establish the highest point, the next highest point and the lowest
        low   = flist[0]
        high  = nxhi = low
        ilow  = 0
        ihigh = 0
        for k in range(1, ndimp1):
            fk = flist[k]
            if fk > high:
                nxhi   = high
                high   = fk
                ihigh  = k
            elif fk < low:
                low  = fk
                ilow = k

        if trace: print(niter, low, abs(high-low), list(vertices[ilow]))
        if low < tola: tol = tola
        else:          tol = abs(low)*tolf
        if abs(high-low) < tol: return low, list(vertices[ilow])

        # Reflect the high point
        # First find a new vertix = the centroid of the non-max vertices
        cntr  = array('d', ndim*[float('nan')])
        newr  = array('d', ndim*[float('nan')])
        for j in range(0, ndim):
            xsum = 0.0
            for k in range(0, ndimp1):
                if k != ihigh:
                    xsum += vertices[k][j]
            cntr[j] = xsum/fndim
        # Then move from the centroid in an away-from-max direction
        for j in range(0, ndim):
            newr[j] = oneprho*cntr[j] - rho*vertices[ihigh][j]

        # Check the new vertix
        accepted = False
        phir = objfunc(newr)
        if low <= phir < nxhi:
            # Everything is OK!
            if trace: print("Reflection sufficient")
            vertices[ihigh] = newr
            phi             = phir
            accepted        = True
        elif phir < low:
            # Expand:
            if trace: print("Expansion")
            newe = array('d', ndim*[float('nan')])
            for j in range(0, ndim):
                newe[j] = cntr[j] + xsi*(newr[j]-cntr[j])
            phie = objfunc(newe)
            if phie < phir:
                vertices[ihigh] = newe
                phi             = phie
            else:
                vertices[ihigh] = newr
                phi             = phir
            accepted = True
        elif phir >= nxhi:
            # Contract
            if phir < high:
                # -outside:
                if trace: print("Outside contraction")
                newo = array('d', ndim*[float('nan')])
                for j in range(0, ndim):
                    newo[j] = cntr[j] + gamma*(newr[j]-cntr[j])
                phio = objfunc(newo)
                if phio <= phir:
                    vertices[ihigh] = newo
                    phi             = phio
                    accepted        = True
            else:
                # -inside:
                if trace: print("Inside contraction")
                newi = array('d', ndim*[float('nan')])
                for j in range(0, ndim):
                    newi[j] = cntr[j] - gamma*(cntr[j]-vertices[ihigh][j])
                phii = objfunc(newi)
                if phii <= high:
                    vertices[ihigh] = newi
                    phi             = phii
                    accepted        = True
        if not accepted:
            # Shrink:
            if trace: print("Shrinkage")
            for k in range(0, ndimp1):
                for j in range(j, ndim):
                    vertices[k][j] = vertices[ilow][j] + sigma*(vertices[k][j] -
                                                             vertices[ilow][j])

# end of nelder_mead

# ------------------------------------------------------------------------------
Ejemplo n.º 23
0
    def lhs_sample(self, nparams, nintervals, rcorrmatrix=None, checklevel=0):

        """
        Generates a full Latin Hypercube Sample of uniformly distributed 
        random variates in [0.0, 1.0] placed in a matrix with one realization 
        in each row. A target rank correlation matrix can be given (must have 
        the dimension nsamples*nsamples).
        
        checklevel may be 0, 1 or 2 and is used to control trace printout. 
        0 produces no trace output, whereas 2 produces the most.

        NB. IN ORDER FOR LATIN HYPERCUBE SAMPLING TO BE MEANINGFUL THE OUTPUT 
        STREAM OF RANDOM VARIATES MUST BE HANDLED BY INVERSE METHODS !!!! 

        Latin Hypercube Sampling was first described by McKay, Conover & 
        Beckman in a Technometrics article 1979. The use of the LHS technique 
        to introduce rank correlations was first described by Iman & Conover 
        1982 in an issue of Communications of Statistics.
        """

        # lhs_sample uses the Matrix class to a great extent

        if nparams > nintervals:
            warn("nparams > nintervals in RandomStructure.lhs_sample")

        nsamples     = nintervals   # Just to remember
        rstreaminner = self.rstream
        rstreamouter = self.rstream2

        factor  =  1.0 / float(nintervals)

        tlhsmatrix1 = Matrix()  # tlhsmatrix1 belongs to the Matrix class
        if rcorrmatrix: tscorematrix = Matrix()
        for k in range(0, nparams):
            if rcorrmatrix:
                tnvector, tscorevector = \
                            self.scramble_range(nsamples, rstreamouter, True)
                rowk = array('d', tscorevector)
                tscorematrix.append(rowk)
            else:
                tnvector = self.scramble_range(nsamples, rstreamouter)
            pvector = array('d', [])
            for number in tnvector:
                p  =  factor * (float(number) + rstreaminner.runif01())
                p  =  max(p, 0.0) # Probabilities must be in [0.0, 1.0]
                p  =  min(p, 1.0)
                pvector.append(p)
            tlhsmatrix1.append(pvector)
                
        
        # tlhsmatrix1 (and tscorematrix) are now transposed to run with 
        # one subsample per row to fit with output as well as Iman-Conover 
        # formulation. tlhsmatrix1 and tscorematrix will be used anyway 
        # for some manipulations which are more simple when matrices run 
        # with one variable per row

        lhsmatrix1  = transposed(tlhsmatrix1)
        if rcorrmatrix: scorematrix = transposed(tscorematrix)

        if checklevel == 2:
            print("lhs_sample: Original LHS sample matrix")
            mxdisplay(lhsmatrix1)
            if rcorrmatrix: 
                print("lhs_sample: Target rank correlation matrix")
                mxdisplay(rcorrmatrix)
        if checklevel == 1 or checklevel == 2:
            print("lhs_sample: Rank correlation matrix of")
            print("            original LHS sample matrix")
            trankmatrix1 = Matrix()
            for k in range (0, nparams):
                rowk = array('d', extract_ranks(tlhsmatrix1[k]))
                trankmatrix1.append(rowk)
            mxdisplay(Matrix(corrmatrix(trankmatrix1)))

        if not rcorrmatrix:
            return lhsmatrix1

        else:
            scorecorr = Matrix(corrmatrix(tscorematrix))
            if checklevel == 2:
                print("lhs_sample: Score matrix of original LHS sample matrix")
                mxdisplay(scorematrix)
                print("lhs_sample: Correlation matrix of scores of")
                print("            original LHS sample")
                mxdisplay(scorecorr)

            slower, slowert = ludcmp_chol(scorecorr)
            slowerinverse   = inverted(slower)
            tslowerinverse  = transposed(slowerinverse)
            clower, clowert = ludcmp_chol(rcorrmatrix)
            scoresnostar    = scorematrix*tslowerinverse # Matrix multiplication
            if checklevel == 2:
                print("lhs_sample: Correlation matrix of scoresnostar")
                mxdisplay(corrmatrix(transposed(scoresnostar)))

            scoresstar  = scoresnostar*clowert    # Matrix multiplication
            tscoresstar = transposed(scoresstar)
            trankmatrix = Matrix()
            for k in range (0, nparams):
                trankmatrix.append(extract_ranks(tscoresstar[k]))
            if checklevel == 2:
                print("lhs_sample: scoresstar matrix")
                mxdisplay(scoresstar)
                print("lhs_sample: Correlation matrix of scoresstar")
                mxdisplay(corrmatrix(tscoresstar))
                print("lhs_sample: scoresstar matrix converted to rank")
                mxdisplay(transposed(trankmatrix))
                for k in range(0, nparams):
                    tlhsmatrix1[k] = array('d', sorted(list(tlhsmatrix1[k])))
                print("RandomStructure.lhs_sample: Sorted LHS sample matrix")
                mxdisplay(transposed(tlhsmatrix1))

            tlhsmatrix2 = Matrix()
            for k in range(0, nparams):
                # Sort each row in tlhsmatrix1 and reorder 
                # according to trankmatrix rows
                auxvec = reorder(tlhsmatrix1[k], trankmatrix[k], \
                                                 straighten=True)
                tlhsmatrix2.append(auxvec)
            lhsmatrix2 = transposed(tlhsmatrix2)
            if checklevel == 2:
                print("lhs_sample: Corrected/reordered LHS sample matrix")
                mxdisplay(transposed(tlhsmatrix2))

            if checklevel == 1 or checklevel == 2:
                trankmatrix2 = Matrix()
                auxmatrix2   = tlhsmatrix2
                for k in range (0, nparams):
                    trankmatrix2.append(extract_ranks(auxmatrix2[k]))
                print("lhs_sample: Rank correlation matrix of corrected/")
                print("            /reordered LHS sample matrix")
                mxdisplay(corrmatrix(trankmatrix2))


            return lhsmatrix2