示例#1
0
def getVectorEst(type,pimc,outName,reduceFlag,xlab,ylab, skip=0):
    ''' Return the arrays consisting of the reduec averaged vector 
        estimators. '''

    fileNames = pimc.getFileList(type)
    try:
        headers   = pimchelp.getHeadersFromFile(fileNames[0])

        numParams = len(fileNames)
        Nx = len(headers)

        x   = zeros([numParams,Nx],float)
        ave = zeros([numParams,Nx],float)
        err = zeros([numParams,Nx],float)

        
        for i,fname in enumerate(fileNames):

            # Get the estimator data and compute averages
            data = loadtxt(fname,ndmin=2)[skip:,:]
            ave[i,:],err[i,:] = getStats(data)

            # get the headers
            x[i,:] = pimchelp.getHeadersFromFile(fname)

            # Compute the normalized averages and error for the OBDM
            if type == 'obdm':
                norm = ave[i,0]
                ave[i,:] /= norm
                err[i,:] /= norm

        # output the vector data to disk
        outFile = open('%s-%s' % (type,outName),'w');

        # the headers
        lab = '%s = %4.2f' % (reduceFlag[0],float(pimc.params[pimc.id[0]][reduceFlag[1]])) # error is happening here. 'List index out of range' cuz id is empty
        outFile.write('#%15s%16s%16s' % ('',lab,''))
        for j in range(numParams-1):
            lab = '%s = %4.2f' % (reduceFlag[0],float(pimc.params[pimc.id[j+1]][reduceFlag[1]]))
            outFile.write('%16s%16s%16s' % ('',lab,''))
        outFile.write('\n')
        outFile.write('#%15s%16s%16s' % (xlab,ylab,'+/-'))
        for j in range(numParams-1):
            outFile.write('%16s%16s%16s' % (xlab,ylab,'+/-'))
        outFile.write('\n')

        # the data
        for i,h in enumerate(headers):
            for j in range(numParams):
                outFile.write('%16.8E%16.8E%16.8E' % (x[j,i],ave[j,i],err[j,i]))
            outFile.write('\n')
        outFile.close()
        return x,ave,err

    except:
        print 'Problem Reducing %s files' % type
        return 0,0,0
示例#2
0
def getKappa(pimc,outName,reduceFlag,skip=0):
    ''' Return the arrays containing the reduced averaged compressibility. '''

    fileNames = pimc.getFileList('estimator')
    headers   = pimchelp.getHeadersFromFile(fileNames[0])

    aveKappa = zeros([len(fileNames)],float)
    errKappa = zeros([len(fileNames)],float)

    for i,fname in enumerate(fileNames):

        # Now get the temperature, volume and linear system size
        ID = pimc.getID(fname)
        T = float(pimc.params[ID]['Temperature'])

        # We need to get the correct volume, depending on whether or not we are
        # looking at the core
        if len(glob.glob('../CYLINDER')) > 0:
            V = pi*(1.75)**2*float(pimc.params[ID]['Container Length'])
        else:
            V = float(pimc.params[ID]['Container Volume'])

        # Compute the average compressibility and its error
        estData = loadtxt(fname,ndmin=2)

        N     = estData[:,headers.index('N')]
        N2    = estData[:,headers.index('N^2')] 
        N3 = N*N2

        numBins = len(N)

        # Get the averages
        aveN,errN = getStats(N)
        aveN2,errN2 = getStats(N2)
        aveNN2,errNN2 = getStats(N3)


        # Get the covariance
        # This is finite, so it must be calculated!
        covNN2 = (aveNN2 - aveN*aveN2)/(1.0*numBins-1.0)

        # Get the value of rho^2 * kappa and the error
        aveKappa[i] = (aveN2-aveN**2)/(T*V)
        errKappa[i] = sqrt(errN2**2 + 4.0*errN**2*aveN**2 - 4.0*aveN*covNN2)/(T*V)
    
    # output the estimator data to disk
    outFile = open('%s-%s' % ('kappa',outName),'w');

    # the headers
    outFile.write('#%15s' % reduceFlag[0])
    outFile.write('%16s%16s' % ('kappa','+/-'))
    outFile.write('\n')

    # the data
    for i in range(len(fileNames)):
        outFile.write('%16.8E' % float(pimc.params[pimc.id[i]][reduceFlag[1]]))
        outFile.write('%16.8E%16.8E\n' % (aveKappa[i],errKappa[i]))
    outFile.close()

    return aveKappa,errKappa
示例#3
0
def getScalarEst(type,pimc,outName,reduceFlag, skip=0):
    ''' Return the arrays containing the reduced averaged scalar
        estimators in question.'''

    fileNames = pimc.getFileList(type)
    headers   = pimchelp.getHeadersFromFile(fileNames[0])

    ave = zeros([len(fileNames),len(headers)],float)
    err = zeros([len(fileNames),len(headers)],float)
    for i,fname in enumerate(fileNames):
        # Compute the averages and error
        data = loadtxt(fname,ndmin=2)[skip:,:]
        ave[i,:],err[i,:] = getStats(data)
    
    # compute single centroid virial specific heat if possible
    # if 'dEdB' in headers:
    #     Cv = ave[:,headers.index('EEcv*Beta^2')] - ave[:,headers.index('Ecv*Beta')]**2 - ave[:,headers.index('dEdB')]
    #     aveNew = zeros([len(fileNames),len(headers)+1],float)
    #     errNew = zeros([len(fileNames),len(headers)+1],float)
    #     for i,a in enumerate(ave):
    #         a = append(a, ave[:,headers.index('EEcv*Beta^2')][i] \
    #                 - ave[:,headers.index('Ecv*Beta')][i]**2 \
    #                 - ave[:,headers.index('dEdB')][i])
    #         aveNew[i] = a
    #     for i, e in enumerate(err):
    #         e = append(e, err[:,headers.index('EEcv*Beta^2')][i] \
    #                 - err[:,headers.index('Ecv*Beta')][i]**2 \
    #                 - err[:,headers.index('dEdB')][i])
    #         errNew[i] = e 
    #     headers.append('Cv')
    #     ave = aveNew
    #     err = errNew

    # output the estimator data to disk
    outFile = open('%s-%s' % (type,outName),'w');

    # the headers
    outFile.write('#%15s' % reduceFlag[0])
    for head in headers:
        outFile.write('%16s%16s' % (head,'+/-'))
    outFile.write('\n')

    # the data
    for i,f in enumerate(fileNames):
        outFile.write('%16.8E' % float(pimc.params[pimc.id[i]][reduceFlag[1]]))
        for j,h in enumerate(headers):
            outFile.write('%16.8E%16.8E' % (ave[i,j],err[i,j]))
        outFile.write('\n')
    outFile.close()

    return headers,ave,err;
示例#4
0
def getScalarEst(type,pimc,outName,reduceFlag, skip=0):
    ''' Return the arrays containing the reduced averaged scalar
        estimators in question.'''
    
    # Get the files corresponding to this particular estimator
    fileNames = pimc.getFileList(type)
    
    # Grab the headers from the data files
    for i in range(len(fileNames)):
        fileNames[i]=pimc.baseDir+fileNames[i]
    headers   = pimchelp.getHeadersFromFile(fileNames[0])
    
    # If the last header is "bins", we are dealing with a MERGED file and must 
    # handle things appropriately
    if headers[-1] == 'bins':
        merged = True
        # Remove the bins header at the end because it is unecessary in a reduced file
        headers.pop(-1)
        # Remove all the "variance" headers because these are also unneccessary 
        headers = filter(lambda string: string != "variance", headers)
    else: 
        merged = False
        
    ave = np.zeros([len(fileNames),len(headers)],float)
    err = np.zeros([len(fileNames),len(headers)],float)
    
    if merged:
        # Compute the averages and error using appropriate weighting
        for i,fname in enumerate(fileNames):
            # Grab the file data
            filedata = np.loadtxt(fname,ndmin=2,skiprows=skip+2) # Skip number of bins user specified +2 for the two comment rows
            # All the data is in the first through 2nd to last columns for a merged file
            data = np.array_split(filedata,[filedata.shape[1]-1,filedata.shape[1]],axis=1)[0]
            # The weights are the last column
            weights = np.array_split(filedata,[filedata.shape[1]-1,filedata.shape[1]],axis=1)[1]
            # Pass getStats the data and weights and get the averages and errors
            ave[i,:],err[i,:] = getScalarStats(data,weights,merged)
    else: 
        # Compute the averages and errors as usual 
        for i,fname in enumerate(fileNames):
            # Grab the data
            data = np.loadtxt(fname,ndmin=2,skiprows=skip+2) # Skip number of bins user specified +2 for the two comment rows
            # The file isn't a merged file so give np.average None for weights in getStats and all weights will be 1
            weights = None
            ave[i,:],err[i,:] = getScalarStats(data,weights,merged)

    # output the estimator data to disk
    outFile = open(pimc.baseDir+'%s-%s' % (type,outName),'w');

    # the headers
    outFile.write('#%15s' % reduceFlag[0])
    for head in headers:
        outFile.write('%16s%16s' % (head,'+/-'))
    outFile.write('\n')

    # the data
    for i,f in enumerate(fileNames):
        outFile.write('%16.8E' % float(pimc.params[pimc.id[i]][reduceFlag[1]]))
        for j,h in enumerate(headers):
            outFile.write('%16.8E%16.8E' % (ave[i,j],err[i,j]))
        outFile.write('\n')
    outFile.close()

    return headers,ave,err;
示例#5
0
def getVectorEst(type,pimc,outName,reduceFlag,xlab,ylab, skip=0):
    ''' Return the arrays consisting of the reduec averaged vector 
        estimators. '''

    fileNames = pimc.getFileList(type)
    try:
        # Grab the headers from the data files
        for i in range(len(fileNames)):
            fileNames[i]=pimc.baseDir+fileNames[i]
        headers   = pimchelp.getHeadersFromFile(fileNames[0])
        
        # If the last header is "bins", we are dealing with a MERGED file and must 
        # handle things appropriately
        if headers[-1] == 'bins':
            merged = True
            # Remove the bins header at the end because it is unecessary in a reduced file
            headers.pop(-1)
        else: 
            merged = False 
        
    
        numParams = len(fileNames)
        Nx = len(headers)
    
        x   = np.zeros([numParams,Nx],float)
        ave = np.zeros([numParams,Nx],float)
        err = np.zeros([numParams,Nx],float)
    
        if merged:
            # Compute the averages and errors using appropriate weighting 
            for i,fname in enumerate(fileNames):
                # Grab the file data
                filedata = np.loadtxt(fname,ndmin=2,skiprows=skip+2) # Skip number of bins user specified +2 for the two column rows
                # All the data is in the first through 2nd to last columns for a merged file
                data = np.array_split(filedata,[filedata.shape[1]-1,filedata.shape[1]],axis=1)[0]
                # The weights are the last column
                weights = np.array_split(filedata,[filedata.shape[1]-1,filedata.shape[1]],axis=1)[1]
                # Pass getStats the data and weights and get the averages and errors
                ave[i,:],err[i,:] = getVectorStats(data,weights,merged)
                    
                # get the headers
                headers2 = pimchelp.getHeadersFromFile(fname)
                headers2.pop(-1)
                x[i,:] = headers2
    
                # Compute the normalized averages and error for the OBDM
                if type == 'obdm':
                    norm = ave[i,0]
                    ave[i,:] /= norm
                    err[i,:] /= norm
        else: 
            # Compute averages and errors as usual
            data = np.loadtxt(fname,ndmin=2)[skip:,:]
            ave[i,:],err[i,:] = getVectorStats(data)
    
            # get the headers
            x[i,:] = pimchelp.getHeadersFromFile(fname)
    
            # Compute the normalized averages and error for the OBDM
            if type == 'obdm':
                norm = ave[i,0]
                ave[i,:] /= norm
                err[i,:] /= norm
    
        # output the vector data to disk
        outFile = open(pimc.baseDir+'%s-%s' % (type,outName),'w');
    
        # the headers
        lab = '%s = %4.2f' % (reduceFlag[0],float(pimc.params[pimc.id[0]][reduceFlag[1]])) # error is happening here. 'List index out of range' cuz id is empty
        outFile.write('#%15s%16s%16s' % ('',lab,''))
        for j in range(numParams-1):
            lab = '%s = %4.2f' % (reduceFlag[0],float(pimc.params[pimc.id[j+1]][reduceFlag[1]]))
            outFile.write('%16s%16s%16s' % ('',lab,''))
        outFile.write('\n')
        outFile.write('#%15s%16s%16s' % (xlab,ylab,'+/-'))
        for j in range(numParams-1):
            outFile.write('%16s%16s%16s' % (xlab,ylab,'+/-'))
        outFile.write('\n')
    
        # the data
        for i,h in enumerate(headers):
            for j in range(numParams):
                outFile.write('%16.8E%16.8E%16.8E' % (x[j,i],ave[j,i],err[j,i]))
            outFile.write('\n')
        outFile.close()
        return x,ave,err

    except:
        print 'Problem Reducing %s files' % type
        return 0,0,0
示例#6
0
def getScalarEst(type, pimc, outName, reduceFlag, skip=0):
    ''' Return the arrays containing the reduced averaged scalar
        estimators in question.'''

    # Get the files corresponding to this particular estimator
    fileNames = pimc.getFileList(type)

    # Grab the headers from the data files
    for i in range(len(fileNames)):
        fileNames[i] = pimc.baseDir + fileNames[i]
    headers = pimchelp.getHeadersFromFile(fileNames[0])

    # If the last header is "bins", we are dealing with a MERGED file and must
    # handle things appropriately
    if headers[-1] == 'bins':
        merged = True
        # Remove the bins header at the end because it is unecessary in a reduced file
        headers.pop(-1)
        # Remove all the "variance" headers because these are also unneccessary
        headers = filter(lambda string: string != "variance", headers)
    else:
        merged = False

    ave = np.zeros([len(fileNames), len(headers)], float)
    err = np.zeros([len(fileNames), len(headers)], float)

    if merged:
        # Compute the averages and error using appropriate weighting
        for i, fname in enumerate(fileNames):
            # Grab the file data
            filedata = np.loadtxt(
                fname, ndmin=2, skiprows=skip + 2
            )  # Skip number of bins user specified +2 for the two comment rows
            # All the data is in the first through 2nd to last columns for a merged file
            data = np.array_split(filedata,
                                  [filedata.shape[1] - 1, filedata.shape[1]],
                                  axis=1)[0]
            # The weights are the last column
            weights = np.array_split(
                filedata, [filedata.shape[1] - 1, filedata.shape[1]],
                axis=1)[1]
            # Pass getStats the data and weights and get the averages and errors
            ave[i, :], err[i, :] = getScalarStats(data, weights, merged)
    else:
        # Compute the averages and errors as usual
        for i, fname in enumerate(fileNames):
            # Grab the data
            data = np.loadtxt(
                fname, ndmin=2, skiprows=skip + 2
            )  # Skip number of bins user specified +2 for the two comment rows
            # The file isn't a merged file so give np.average None for weights in getStats and all weights will be 1
            weights = None
            ave[i, :], err[i, :] = getScalarStats(data, weights, merged)

    # output the estimator data to disk
    outFile = open(pimc.baseDir + '%s-%s' % (type, outName), 'w')

    # the headers
    outFile.write('#%15s' % reduceFlag[0])
    for head in headers:
        outFile.write('%16s%16s' % (head, '+/-'))
    outFile.write('\n')

    # the data
    for i, f in enumerate(fileNames):
        outFile.write('%16.8E' % float(pimc.params[pimc.id[i]][reduceFlag[1]]))
        for j, h in enumerate(headers):
            outFile.write('%16.8E%16.8E' % (ave[i, j], err[i, j]))
        outFile.write('\n')
    outFile.close()

    return headers, ave, err
示例#7
0
def getVectorEst(type, pimc, outName, reduceFlag, xlab, ylab, skip=0):
    ''' Return the arrays consisting of the reduec averaged vector 
        estimators. '''

    fileNames = pimc.getFileList(type)
    try:
        # Grab the headers from the data files
        for i in range(len(fileNames)):
            fileNames[i] = pimc.baseDir + fileNames[i]
        headers = pimchelp.getHeadersFromFile(fileNames[0])

        # If the last header is "bins", we are dealing with a MERGED file and must
        # handle things appropriately
        if headers[-1] == 'bins':
            merged = True
            # Remove the bins header at the end because it is unecessary in a reduced file
            headers.pop(-1)
        else:
            merged = False

        numParams = len(fileNames)
        Nx = len(headers)

        x = np.zeros([numParams, Nx], float)
        ave = np.zeros([numParams, Nx], float)
        err = np.zeros([numParams, Nx], float)

        if merged:
            # Compute the averages and errors using appropriate weighting
            for i, fname in enumerate(fileNames):
                # Grab the file data
                filedata = np.loadtxt(
                    fname, ndmin=2, skiprows=skip + 2
                )  # Skip number of bins user specified +2 for the two column rows
                # All the data is in the first through 2nd to last columns for a merged file
                data = np.array_split(
                    filedata, [filedata.shape[1] - 1, filedata.shape[1]],
                    axis=1)[0]
                # The weights are the last column
                weights = np.array_split(
                    filedata, [filedata.shape[1] - 1, filedata.shape[1]],
                    axis=1)[1]
                # Pass getStats the data and weights and get the averages and errors
                ave[i, :], err[i, :] = getVectorStats(data, weights, merged)

                # get the headers
                headers2 = pimchelp.getHeadersFromFile(fname)
                headers2.pop(-1)
                x[i, :] = headers2

                # Compute the normalized averages and error for the OBDM
                if type == 'obdm':
                    norm = ave[i, 0]
                    ave[i, :] /= norm
                    err[i, :] /= norm
        else:
            # Compute averages and errors as usual
            data = np.loadtxt(fname, ndmin=2)[skip:, :]
            ave[i, :], err[i, :] = getVectorStats(data)

            # get the headers
            x[i, :] = pimchelp.getHeadersFromFile(fname)

            # Compute the normalized averages and error for the OBDM
            if type == 'obdm':
                norm = ave[i, 0]
                ave[i, :] /= norm
                err[i, :] /= norm

        # output the vector data to disk
        outFile = open(pimc.baseDir + '%s-%s' % (type, outName), 'w')

        # the headers
        lab = '%s = %4.2f' % (
            reduceFlag[0], float(pimc.params[pimc.id[0]][reduceFlag[1]])
        )  # error is happening here. 'List index out of range' cuz id is empty
        outFile.write('#%15s%16s%16s' % ('', lab, ''))
        for j in range(numParams - 1):
            lab = '%s = %4.2f' % (
                reduceFlag[0], float(
                    pimc.params[pimc.id[j + 1]][reduceFlag[1]]))
            outFile.write('%16s%16s%16s' % ('', lab, ''))
        outFile.write('\n')
        outFile.write('#%15s%16s%16s' % (xlab, ylab, '+/-'))
        for j in range(numParams - 1):
            outFile.write('%16s%16s%16s' % (xlab, ylab, '+/-'))
        outFile.write('\n')

        # the data
        for i, h in enumerate(headers):
            for j in range(numParams):
                outFile.write('%16.8E%16.8E%16.8E' %
                              (x[j, i], ave[j, i], err[j, i]))
            outFile.write('\n')
        outFile.close()
        return x, ave, err

    except:
        print 'Problem Reducing %s files' % type
        return 0, 0, 0