示例#1
0
def getScalarStats(data, weights, mergedstatus, dim=0):
    ''' Get the average and error of all columns in the data matrix for a scalar
    estimator.'''
    if data.ndim > dim:
        if mergedstatus:
            # Columns alternate between actual data and their variances
            dataArray = data[:, 0::2]
            varArray = data[:, 1::2]
            # Compute the standard error using the variances (requires unflattened
            # weights array
            dataErr = np.sqrt(
                np.sum(varArray * weights**2, dim) / (np.sum(weights, 0))**2)
            # Average all the data (requires flattened weights array)
            weights = weights.flatten()
            dataAve = np.average(dataArray, dim, weights=weights)
        else:
            weights = weights.flatten()
            numBins = np.size(data, dim)
            dataAve = np.average(data, dim, weights=weights)
            dataAve2 = np.average(data * data, dim, weights=weights)
            bins = MCstat.bin(data)
            dataErr = np.amax(bins, axis=0)
            dataErr2 = np.sqrt(
                abs(dataAve2 - dataAve**2) / (1.0 * numBins - 1.0))

    else:
        dataAve = data
        dataErr = 0.0 * data

    return dataAve, dataErr
示例#2
0
def getStats(data,dim=0):
    ''' Get the average and error of all columns in the data matrix. '''

    if ndim(data) > dim:
        numBins  = size(data,dim) 
        dataAve  = average(data,dim) 
        dataAve2 = average(data*data,dim) 
        bins = MCstat.bin(data) 
        dataErr = amax(bins,axis=0)
        dataErr2 = sqrt( abs(dataAve2-dataAve**2)/(1.0*numBins-1.0) ) 

#        for n,d in enumerate(dataErr):
#            if d > 2.0*dataErr2[n]:
#                dataErr[n] = 2.0*dataErr2[n]

#        try:
#            bins = MCstat.bin(data) 
#            dataErr = amax(bins,axis=0)
#        except:
#            dataErr   = sqrt( abs(dataAve2-dataAve**2)/(1.0*numBins-1.0) ) 
    else:
        dataAve = data
        dataErr = 0.0*data

    return dataAve,dataErr
示例#3
0
def getStats(data,SpanJobAverage=-1,waverage=False,dim=0):
    ''' Get the average and error of all columns in the data matrix. '''

    #SpanJobAverage tells whether the estimator file contains the Bins
    #column added by the merge scripts during merge of a spanned job
    if ndim(data) > dim:
        numBins  = size(data,dim) 
        if SpanJobAverage != -1:
           dataAve  = sum(data[:,:-1]*data[:,-1][:,newaxis],dim)/(1.0*sum(data[:,-1])) 
           dataErr = std(data[:,:-1],0)/sqrt(len(data[:,0])-1.0) 
        elif waverage:
            vals = data[:,0::2]
            errs = data[:,1::2]
            weights = 1.0/(errs*errs)
            
            # http://en.wikipedia.org/wiki/Weighted_arithmetic_mean#Dealing_with_variance
            dataErr  = np.sqrt(1.0/np.sum(weights,axis=dim))
            dataAve  = np.average(vals,axis=0,weights=weights) 
            # Little hack of the average function to compute the weighted std
            #dataErr  = np.sqrt(np.average(1.0/weights,axis=dim, weights=weights)/weights.shape[0])
        else:
            dataAve  = average(data,dim) 
            bins = MCstat.bin(data) 
            dataErr = amax(bins,axis=0)
        
        
    return dataAve,dataErr
示例#4
0
def getScalarStats(data,weights,mergedstatus,dim=0):
    ''' Get the average and error of all columns in the data matrix for a scalar
    estimator.'''
    if data.ndim > dim: 
        if mergedstatus:
            # Columns alternate between actual data and their variances
            dataArray = data[:,0::2]
            varArray = data[:,1::2]
            # Compute the standard error using the variances (requires unflattened
            # weights array
            dataErr = np.sqrt(np.sum(varArray*weights**2,dim)/(np.sum(weights,0))**2)
            # Average all the data (requires flattened weights array)
            weights=weights.flatten()
            dataAve = np.average(dataArray,dim,weights=weights)
        else: 
            weights=weights.flatten()
            numBins  = np.size(data,dim) 
            dataAve  = np.average(data,dim,weights=weights) 
            dataAve2 = np.average(data*data,dim,weights=weights)
            bins = MCstat.bin(data)
            dataErr = np.amax(bins,axis=0)
            dataErr2 = np.sqrt( abs(dataAve2-dataAve**2)/(1.0*numBins-1.0) ) 

    else:
        dataAve = data
        dataErr = 0.0*data

    return dataAve,dataErr
示例#5
0
def addFromToSkipping(FromfileName,outFile,skip=0,average=False,waverage=False):
    '''add SSEXY measurements from FromfileName to toFile, while skipping skip first measurements'''
     
    #Read the measurements we want to keep
    inList,numLines = ReadSkipping(FromfileName,skip)

    if len(inList) == 0:
       return 0

    #Write them
    if len(inList) != 0:
       if average or waverage:
          data = np.loadtxt(inList)
          aves = np.average(data,0)
          print "From: %s" %FromfileName
          if  average:
              for ave in aves:
                  outFile.write('%16.8E' %ave)
              outFile.write('%16d' %len(data[0:]))    
          if  waverage:
              errs = amax(MCstat.bin(data),axis=0)
              # check if aves is a scalar
              if not hasattr(aves, "__len__"): aves = np.array([aves]) 
              for (ave,err) in zip(aves,errs):
                  outFile.write('%16.8E%16.8E' %(ave,err))
             
          outFile.write('\n')
       else:
           outFile.writelines(inList) 

   
    return numLines
示例#6
0
def CreateFileSkipping(FromfileName,oldSSEXYID,newSSEXYID,skip=0,average=False,waverage=False):
    '''Create a new file whose oldSSEXYID is replaced with newSSEXYID and
      skip ${skip} first measurements
      Set average to True in order to write out just one average'''

    #Read and adapt the header
    inFile = open(FromfileName,'r');
    #firstLine  = inFile.readline().replace(str(oldSSEXYID),str(newSSEXYID));
    secondLine = inFile.readline()
    inFile.close();

    #Read the measurements we want to keep
    inList,numLines = ReadSkipping(FromfileName,skip)
    if len(inList) == 0:
       return '',0
    # get the output file name and open the file for writing
    outName = FromfileName.replace(str(oldSSEXYID),str(newSSEXYID))
    outFile = open('MERGED/' + outName,'w');
    print('To: MERGED/%-80s' % outName)
 
    #write the header
    #outFile.write(firstLine)
    secondLine = secondLine.rstrip('\n')
    if waverage:
       headers = secondLine.split()
       temp = '#%15s%16s' %(headers[1],'+/-')
       for header in headers[2:]:
           temp += '%16s%16s' %(header,'+/-')
       secondLine = temp 
    if average:
       secondLine += '%16s' %('Bins')
    
    
    outFile.write(secondLine+'\n')
    #if there is more measurements than we want to skip
    if numLines > 0:
       #write the content
       if average or waverage:
          data = np.loadtxt(inList)
          aves = np.average(data,0)
          print "From: %s" %FromfileName
          if  average:
              for ave in aves:
                  outFile.write('%16.8E' %ave)
              outFile.write('%16d' %len(data[0:]))    
          if  waverage:
              errs = amax(MCstat.bin(data),axis=0)
              for (ave,err) in zip(aves,errs):
                  outFile.write('%16.8E%16.8E' %(ave,err))
             
          outFile.write('\n')
       else:
           outFile.writelines(inList) 

    return outFile,numLines
示例#7
0
def main():

    parser = argparse.ArgumentParser(description='')
    parser.add_argument('--MC','-Q', help='Quantum training file',   nargs='+')
    parser.add_argument('--ED','-C', help='Classical training file', nargs='+')
    parser.add_argument('--clamped', help='Clamped simulation', default=False, action='store_true')
    args = vars(parser.parse_args())


    if 'MC' in args.keys():
        skip = 3 # skip estimators we don't care about 
        for filename in args['MC']:
            fparams = ssexyhelp.getReduceParamMap(filename)
            data    = np.loadtxt(filename)
            headers = Hfile.getHeaders(filename)
            if not(args['clamped']): (first, last) = (headers.index('X0'),  headers.index('sX0'))
            else:                    (first, last) = (headers.index('sX0'), len(headers))
            aves    = np.zeros(last-first)
            stds    = np.zeros(last-first)
            for i, c in enumerate(range(first, last)):
                cdata   = data[~np.isnan(data[:,c]),c]
                aves[i] = np.mean(cdata)
                stds[i] = np.amax(MCstat.bin(cdata[np.newaxis].T))
            
    if 'ED' in args.keys():
        for filename in args['ED']:
            fparams = ssexyhelp.getReduceParamMap(filename)
            ED    = np.loadtxt(filename)
    
    print ED
    print aves
    print stds
    colors = ["#66CAAE", "#CF6BDD", "#E27844", "#7ACF57", "#92A1D6", "#E17597", "#C1B546"]


    fig = pl.figure(1, figsize=(10,5))
    pl.connect('key_press_event',kevent.press)
    ax  = pl.subplot(111)
    ax.plot((ED-aves)/stds, color=colors[0])#, lw=2, m='o', ls='')#, label=r'$data$')
    
    pl.ylabel(r'$(ED-MC)/\Delta_{MC}$')
    pl.xlabel(r'$Averages$')
    lgd = pl.legend(loc = 'best')
    
    lheaders = []
    for head in Hfile.getHeaders(args['ED'][0]):
        lheaders += [r'$%s$' %head]
    pl.xticks(range(len(lheaders)), lheaders, rotation='vertical')

    #lgd.draggable(state=True)
    #lgd.draw_frame(False)
    pl.tight_layout()
    pl.show()
示例#8
0
def Re_Bin(path ='', plot = True):
    """This function uses MCstat to rebin all data,
    plots and returns the data"""
    
    if path[-1] != '/':
        path +='/'
    
    bins_o = np.loadtxt(path + "01.data")

    bin_err = MCstat.bin(bins_o)
    
    if plot == True:
        #get names
        tags = open(path+ "01.data", "r").readline().lstrip("#").split()
        #replace("_", "-").split() #issue with latex and using '_'
        #begin plotting subroutine
        fig = plt.figure()
        ax = fig.add_subplot(111)
        bin_level = range(len(bin_err))
        #loop over all cut names
        
        print "Plotting ReBining: %s ..." %(path)
        
        #check for data to make sure data stored in each row
        for i in range(len(tags)):
            ax.plot(bin_level, bin_err[:,i], label = tags[i], linewidth = 2)
        
        ax.legend(loc = 'upper left',fontsize = 'xx-small')
        ax.set_title("Re-Bin of raw %s" %(path.strip('/').split('/')[-1]))
        ax.set_xlabel("Bin Level")
        ax.set_ylabel("Error")
        plt.savefig(path + 'ReBin_Error.png')
        plt.close() #removes figure/canvas 
        
        print "Done"
    
    #returns the maximum error and its associated mean (without nasty loops)
    index = bin_err.argmax(axis = 0)
        
    return bin_err[index, range(len(index))]
示例#9
0
def getVectorStats(data, weights, mergedstatus, dim=0):
    ''' Get the average and error of all columns in the data matrix for a vector 
    estimator.'''

    if data.ndim > dim:
        if mergedstatus:
            numBins = np.size(data, dim)
            # Average all the data (requires flattened weights array)
            weights = weights.flatten()
            dataAve = np.average(data, dim, weights=weights)
            dataAve2 = np.average(data * data, dim, weights=weights)
            # For a vector estimator get the error the usual way, ignoring the
            # weights. This introduces a slight inaccurary in error bars but saves
            # a lot of time and effort

            # MC stat will not work unless you have more than 32 random seeds.
            # The binning analysis done in MCstat is for a long run of correlated
            # bins. However, for a merged file all "bins" (avrgs of random seeds)
            # are completetely uncorrelated so we don't even need to do the binning
            # analysis

            #bins = MCstat.bin(data)
            #dataErr = np.amax(bins,axis=0)
            dataErr = np.sqrt(
                abs(dataAve2 - dataAve**2) / (1.0 * numBins - 1.0))
        else:
            weights = weights.flatten()
            numBins = np.size(data, dim)
            dataAve = np.average(data, dim, weights=weights)
            dataAve2 = np.average(data * data, dim, weights=weights)
            bins = MCstat.bin(data)
            dataErr = np.amax(bins, axis=0)
            dataErr2 = np.sqrt(
                abs(dataAve2 - dataAve**2) / (1.0 * numBins - 1.0))

    else:
        dataAve = data
        dataErr = 0.0 * data

    return dataAve, dataErr
示例#10
0
def getVectorStats(data,weights,mergedstatus,dim=0):
    ''' Get the average and error of all columns in the data matrix for a vector 
    estimator.'''

    if data.ndim > dim: 
        if mergedstatus:
            numBins  = np.size(data,dim) 
            # Average all the data (requires flattened weights array)
            weights=weights.flatten()
            dataAve = np.average(data,dim,weights=weights)
            dataAve2 = np.average(data*data,dim,weights=weights)
            # For a vector estimator get the error the usual way, ignoring the 
            # weights. This introduces a slight inaccurary in error bars but saves
            # a lot of time and effort 
            
            # MC stat will not work unless you have more than 32 random seeds.
            # The binning analysis done in MCstat is for a long run of correlated 
            # bins. However, for a merged file all "bins" (avrgs of random seeds)
            # are completetely uncorrelated so we don't even need to do the binning 
            # analysis 
            
            #bins = MCstat.bin(data)
            #dataErr = np.amax(bins,axis=0)
            dataErr = np.sqrt( abs(dataAve2-dataAve**2)/(1.0*numBins-1.0) ) 
        else: 
            weights  = weights.flatten()
            numBins  = np.size(data,dim) 
            dataAve  = np.average(data,dim,weights=weights) 
            dataAve2 = np.average(data*data,dim,weights=weights)
            bins = MCstat.bin(data)
            dataErr = np.amax(bins,axis=0)
            dataErr2 = np.sqrt( abs(dataAve2-dataAve**2)/(1.0*numBins-1.0) ) 

    else:
        dataAve = data
        dataErr = 0.0*data

    return dataAve,dataErr
示例#11
0
def main():

    # setup the command line parser options
    parser = argparse.ArgumentParser(description='Plot binning analysis for \
                                            MC Data for Scalar Estimators.')
    parser.add_argument('fileNames', help='Scalar estimator files', nargs='+')
    parser.add_argument('--estimator',
                        '-e',
                        help='A list of estimator names \
                                            that are to be plotted.',
                        type=str)
    parser.add_argument('--skip',
                        '-s',
                        help='Number of measurements to be \
                        skipped in the binning analysis.',
                        type=int,
                        default=0)
    parser.add_argument('--scale',
                        help='Option to compare binning results \
                        for different parameters',
                        action='store_true')
    args = parser.parse_args()

    fileNames = args.fileNames
    scale = args.scale

    if len(fileNames) < 1:
        parser.error("Need to specify at least one scalar estimator file")

    # We count the number of lines in the estimator file to make sure we have
    # some data and grab the headers
    headers = pimchelp.getHeadersDict(fileNames[0])

    # If we don't choose an estimator, provide a list of possible ones
    if not args.estimator or args.estimator not in headers:
        errorString = "Need to specify one of:\n"
        for head, index in headers.iteritems():
            errorString += "\"%s\"" % head + "   "
        parser.error(errorString)

    numFiles = len(fileNames)
    col = list([headers[args.estimator]])

    # Attempt to find a 'pretty name' for the label, otherwise just default to
    # the column heading
    label = pimchelp.Description()
    try:
        yLong = label.estimatorLongName[args.estimator]
    except:
        yLong = args.estimator
    try:
        yShort = label.estimatorShortName[args.estimator]
    except:
        yShort = args.estimator

    # ============================================================================
    # Figure 1 : Error vs. bin level
    # ============================================================================
    figure(1)
    connect('key_press_event', kevent.press)

    colors = loadgmt.getColorList('cw/1', 'cw1-029', max(numFiles, 2))

    n = 0
    for fileName in fileNames:

        dataFile = open(fileName, 'r')
        dataLines = dataFile.readlines()
        dataFile.close()

        if len(dataLines) > 2:
            data = loadtxt(fileName, usecols=col)
            if not pyutils.isList(data):
                data = list([data])

            delta = MCstat.bin(data[args.skip:])
            if n == 0:
                delta_ar = np.zeros((numFiles, delta.shape[0]))
            delta_ar[n, :] = delta.T
            #delta_ar[n,:len(delta)] = delta.T
            n += 1

    if n > 1:
        if scale:
            for m in range(n):
                plot(np.arange(len(delta_ar)),delta_ar[m],marker='s',markersize=4,\
                         linestyle='-',linewidth=1.0,color=colors[m],\
                         markeredgecolor=colors[m])
        else:
            Delta = np.average(delta_ar, 0)
            dDelta = np.std(delta_ar, 0) / np.sqrt(n)
            errorbar(np.arange(len(Delta)),Delta,dDelta,marker='s',markersize=4,\
                     linestyle='-',linewidth=1.0,color=colors[0],\
                     markeredgecolor=colors[0])
            bin_ac = MCstat.bin_ac(Delta, dDelta)
            bin_conv = MCstat.bin_conv(Delta, dDelta)
            print 'Convergence Ratio: %1.2f+/-%1.2f' % (bin_conv['CF'],
                                                        bin_conv['dCF'])
            print 'autocorrlelation time: %2.1f+/-%2.1f' % \
                                                (bin_ac['tau'],bin_ac['dtau'])
    else:
        plot(delta,marker='s',markersize=4,linestyle='-',linewidth=1.0,\
             color=colors[0],markeredgecolor=colors[0])
        print 'Convergence Ratio: %1.3f' % MCstat.bin_conv(delta)['CF']
        print 'autocorrlelation time: %3.3f' % MCstat.bin_ac(delta)['tau']

    ylabel(r"$\Delta_l$")
    xlabel("$l$")
    title("Bin scaling: " + yLong)

    show()
示例#12
0
def main(): 

    # setup the command line parser options 
    parser = OptionParser() 
    parser.add_option("-s", "--skip", dest="skip", type="int",\
            help="how many input lines should we skip?")
    parser.set_defaults(skip=0)

    # parse the command line options and get the file name
    (options, args) = parser.parse_args() 
    if len(args) < 1: 
        parser.error("need a file name")
    
    fileNames = args

    for fileName in fileNames:
        normalize = False;

        # We check to see if we are dealing with the one body density matrix
        if fileName.find('obdm') != -1:
            normalize = True

        # We count the number of lines in the estimator file to make sure we 
        # have some data and grab the headers
        estFile = open(fileName,'r');
        estLines = estFile.readlines();
        numLines = len(estLines) - 2    # We expect two comment lines
        pimcid = estLines[0]
        headers = estLines[1].split()
        estFile.close()

        # If we have data, compute averages and error
        if numLines-options.skip > 0:
            estData = pyutils.loadFile(fileName)

            # Now we skip data rows to test for convergence
            for n in range(options.skip):
                estData.pop(0)

            estAve = pyutils.average(estData,1)
            bins = MCstat.bin(np.array(estData))
            estErr = bins[-1,:]
            numData = len(estData)

            print pimcid, '# Number Samples %6d' %  numData
            if not normalize:
                for n,ave in enumerate(estAve):
                    if len(headers) - 1 ==  len(estAve):
                        label = headers[n+1]
                    else:
                        label = 'Col #%02d:' % n
                    print '%-16s%12.5f\t%12.5f' % (label,estAve[n],estErr[n])
            else:
                for n,ave in enumerate(estAve):
                    normAve = estAve[n]/estAve[0]
                    if abs(estAve[n]) > 1E-10:
                        normErr = (estErr[n] / estAve[n]) * normAve
                    else: 
                        normErr = 0.0;

                    if len(headers) - 1 ==  len(estAve):
                        label = headers[n+1]
                    else:
                        label = 'Col #%02d:' % n
                    print '%-16s%12.5f\t%12.5f' % (label,normAve,normErr)
示例#13
0
def main(): 

    # setup the command line parser options 
    parser = argparse.ArgumentParser(description='Plot Raw MC Equilibration Data for Scalar Estimators.')
    parser.add_argument('fileNames', help='Scalar estimator files', nargs='+')
    parser.add_argument('--estimator','-e', help='A list of estimator names that \
                        are to be plotted.', type=str)
    parser.add_argument('--skip','-s', help='Number of measurements to be skipped \
                        in the average plot.', type=int, default=0)
    parser.add_argument('--period','-p', help='Period of the simple moving \
                        average. default=50', type=int, default=50)

    args = parser.parse_args()
    fileNames = args.fileNames
    if len(fileNames) < 1:
        parser.error("Need to specify at least one scalar estimator file")
    
    toSort = True
    for fileName in fileNames:
        if fileName.find('/') != -1:
           toSort = False
    if toSort:     
       fileNames.sort()
    
    ffile = open(fileNames[0],'r')
    headers = ffile.readline().lstrip('#').split()
    headers += ['srt']
    print headers

    # If we don't choose an estimator, provide a list of possible ones
    if (not args.estimator) or (args.estimator not in headers):
        errorString = "Need to specify one of:\n"      
        for head,index in headers.iteritems():
            errorString += "\"%s\"" % head + "   " 
        parser.error(errorString)



    numFiles = len(fileNames)

    #colors  = loadgmt.getColorList('cw/1','cw1-029',max(numFiles,2))
    colors = ["#66CAAE", "#CF6BDD", "#E27844", "#7ACF57", "#92A1D6", "#E17597", "#C1B546",'b']
 
    fig = figure(1,figsize=(13,6))
    ax = subplot(111)
    connect('key_press_event',kevent.press)
   
    #rcParams.update(mplrc.aps['params'])
    n = 0
    E0 = 0
    Es  = []
    dEs = []
    srt = False
    if args.estimator=='srt': srt=True
    for i,fileName in enumerate(fileNames):
        dataFile  = open(fileName,'r');
        dataLines = dataFile.readlines();
        ffile = open(fileName,'r')
        headers = ffile.readline().lstrip('#').split()
        headers += ['srt']
        if len(dataLines) > 2:
            #params = GetFileParams(fileName)
            if  srt and not('ALRatio' in headers):
                col    = GetHeaderNumber(fileName,'nAred')
                dataR = loadtxt(fileName,usecols=col)
                col    = GetHeaderNumber(fileName,'nAext')
                dataE = loadtxt(fileName,usecols=col)
                    
                daveR = amax(MCstat.bin(dataR[args.skip:]),axis=0)
                aveR  = average(dataR[args.skip:])
                daveE = amax(MCstat.bin(dataE[args.skip:]),axis=0)
                aveE  = average(dataE[args.skip:])
                #S2 = -umath.log(ufloat(aveE,daveE)/ufloat(aveR,daveR))
                S2 = -1.0*umath.log(ufloat(aveE,daveE)/ufloat(aveR,daveR))
                print 'Entropy = ', S2
                Es  += [S2.n]
                dEs += [S2.s]
            else:
                if srt: col    = GetHeaderNumber(fileName,'ALRatio')
                else  : col    = GetHeaderNumber(fileName,args.estimator)
                data = loadtxt(fileName,usecols=col)

                ID = fileName[-14:-4]
                if  size(data) > 1:
                    sma = simpleMovingAverage(args.period,data[args.skip:])
                    ax.plot(sma,color=colors[i%len(colors)],linewidth=3,linestyle='-')
                    bins = MCstat.bin(data[args.skip:]) 
                    dataErr = amax(bins,axis=0)
                    dataAve = average(data[args.skip:])
                    if srt or (args.estimator=='ALRatio'):
                        S2 = -1.0*umath.log(ufloat(dataAve,dataErr)) 
                        print 'Entropy = ', S2
                        Es  += [S2.n]
                        dEs += [S2.s]
                    #ax.plot(bins,color=colors[i%len(colors)],linewidth=1,marker='None',linestyle='-')
                    #if E0 == 0: E0 = dataAve
                    print '%0.6f +/- %0.6f ' %(dataAve-E0,dataErr)
                    #print dataAve-E0
        else:
            print '%s contains no measurements' %fileName
    xlabel('MC bins (p=%s)' %args.period)
    ylabel(args.estimator)
    #tight_layout()
    print Es
    print dEs
    #legend() 
    show()
示例#14
0
def main(): 
    
    # setup the command line parser options 
    parser = argparse.ArgumentParser(description='Plot binning analysis for \
                                            MC Data for Scalar Estimators.')
    parser.add_argument('fileNames', help='Scalar estimator files', nargs='+')
    parser.add_argument('--estimator','-e', help='A list of estimator names \
                                            that are to be plotted.', type=str)
    parser.add_argument('--skip','-s', help='Number of measurements to be \
                        skipped in the binning analysis.', type=int, default=0)
    parser.add_argument('--scale', help='Option to compare binning results \
                        for different parameters', action='store_true')
    args = parser.parse_args()
    
    fileNames = args.fileNames
    scale = args.scale
    
    if len(fileNames) < 1:
        parser.error("Need to specify at least one scalar estimator file")
    
    # We count the number of lines in the estimator file to make sure we have
    # some data and grab the headers
    headers = pimchelp.getHeadersDict(fileNames[0])
    
    # If we don't choose an estimator, provide a list of possible ones
    if not args.estimator or args.estimator not in headers:
        errorString = "Need to specify one of:\n"
        for head,index in headers.iteritems():
            errorString += "\"%s\"" % head + "   "
        parser.error(errorString)
    
    numFiles = len(fileNames)
    col = list([headers[args.estimator]])
    
    # Attempt to find a 'pretty name' for the label, otherwise just default to
    # the column heading
    label = pimchelp.Description()
    try:
        yLong = label.estimatorLongName[args.estimator]
    except:
        yLong = args.estimator
    try:
        yShort = label.estimatorShortName[args.estimator]
    except:
        yShort = args.estimator
    
    # ============================================================================
    # Figure 1 : Error vs. bin level
    # ============================================================================
    figure(1)
    connect('key_press_event',kevent.press)
    
    colors  = loadgmt.getColorList('cw/1','cw1-029',max(numFiles,2))
    
    n = 0
    for fileName in fileNames:
        
        dataFile = open(fileName,'r');
        dataLines = dataFile.readlines();
        dataFile.close()
        
        if len(dataLines) > 2:
            data = loadtxt(fileName,usecols=col)
            if not pyutils.isList(data):
               data = list([data])
            
            delta = MCstat.bin(data[args.skip:])
            if n == 0:
                delta_ar = np.zeros((numFiles,delta.shape[0]))
            delta_ar[n,:] = delta.T
            #delta_ar[n,:len(delta)] = delta.T
            n += 1
    
    if n > 1:
        if scale:
            for m in range(n):
                plot(np.arange(len(delta_ar)),delta_ar[m],marker='s',markersize=4,\
                         linestyle='-',linewidth=1.0,color=colors[m],\
                         markeredgecolor=colors[m])
        else:
            Delta = np.average(delta_ar,0)
            dDelta = np.std(delta_ar,0)/np.sqrt(n)             
            errorbar(np.arange(len(Delta)),Delta,dDelta,marker='s',markersize=4,\
                     linestyle='-',linewidth=1.0,color=colors[0],\
                     markeredgecolor=colors[0])
            bin_ac = MCstat.bin_ac(Delta,dDelta)
            bin_conv = MCstat.bin_conv(Delta,dDelta)
            print 'Convergence Ratio: %1.2f+/-%1.2f'%(bin_conv['CF'],bin_conv['dCF'])
            print 'autocorrlelation time: %2.1f+/-%2.1f' % \
                                                (bin_ac['tau'],bin_ac['dtau'])
    else:
        plot(delta,marker='s',markersize=4,linestyle='-',linewidth=1.0,\
             color=colors[0],markeredgecolor=colors[0])
        print 'Convergence Ratio: %1.3f' % MCstat.bin_conv(delta)['CF']
        print 'autocorrlelation time: %3.3f' % MCstat.bin_ac(delta)['tau']
    
    ylabel(r"$\Delta_l$")
    xlabel("$l$")
    title("Bin scaling: "+yLong)
    
    show()
示例#15
0
def main():

    # setup the command line parser options
    parser = OptionParser()
    parser.add_option("-s", "--skip", dest="skip", type="int",\
            help="how many input lines should we skip?")
    parser.set_defaults(skip=0)

    # parse the command line options and get the file name
    (options, args) = parser.parse_args()
    if len(args) < 1:
        parser.error("need a file name")

    fileNames = args

    for fileName in fileNames:
        normalize = False

        # We check to see if we are dealing with the one body density matrix
        if fileName.find('obdm') != -1:
            normalize = True

        # We count the number of lines in the estimator file to make sure we
        # have some data and grab the headers
        estFile = open(fileName, 'r')
        estLines = estFile.readlines()
        numLines = len(estLines) - 2  # We expect two comment lines
        pimcid = estLines[0]
        headers = estLines[1].split()
        estFile.close()

        # If we have data, compute averages and error
        if numLines - options.skip > 0:
            estData = pyutils.loadFile(fileName)

            # Now we skip data rows to test for convergence
            for n in range(options.skip):
                estData.pop(0)

            estAve = pyutils.average(estData, 1)
            bins = MCstat.bin(np.array(estData))
            estErr = bins[-1, :]
            numData = len(estData)

            print pimcid, '# Number Samples %6d' % numData
            if not normalize:
                for n, ave in enumerate(estAve):
                    if len(headers) - 1 == len(estAve):
                        label = headers[n + 1]
                    else:
                        label = 'Col #%02d:' % n
                    print '%-16s%12.5f\t%12.5f' % (label, estAve[n], estErr[n])
            else:
                for n, ave in enumerate(estAve):
                    normAve = estAve[n] / estAve[0]
                    if abs(estAve[n]) > 1E-10:
                        normErr = (estErr[n] / estAve[n]) * normAve
                    else:
                        normErr = 0.0

                    if len(headers) - 1 == len(estAve):
                        label = headers[n + 1]
                    else:
                        label = 'Col #%02d:' % n
                    print '%-16s%12.5f\t%12.5f' % (label, normAve, normErr)
示例#16
0
def main(): 
    
    # setup the command line parser options 
    parser = argparse.ArgumentParser(description=
                                        'Plot PIMC estimator vs. parameter')
    parser.add_argument('fileNames', help='Scalar estimator files', nargs='+')
    args = parser.parse_args()
                                    
    fileName = args.fileNames[0]
    
    dataType = fileName.split('-')[1]
    
    # Get first line of file
    MCfile = open(fileName,'r')
    MCfile.readline()
    x = np.array(MCfile.readline().split()[1:]).astype(np.float)
    MCfile.close()
    
    # Get MC data
    MCdata = np.loadtxt(fileName)
    mean = np.average(MCdata,axis=0)
    # error = np.std(MCdata,axis=0)/np.sqrt(MCdata.shape[0])
    bins = MCstat.bin(np.array(MCdata))
    error = bins[-1,:]    
    
    if dataType == 'occ':
        N = np.sum(MCdata[0,:])
        y = mean/N
        dy = error/N
        n0 = y[0]
        dn0 = dy[0]
    elif dataType == 'pn0':
        N = 2*x[-1]
        y = mean
        dy = error
        n0 = np.sum( mean*2*x)/N
        
        ai = 1.0-(2.0*x)/N
        dn0 = np.sqrt( np.sum((error[0:-1]*ai[0:-1])**2) )
                
        nzInds = np.nonzero(y[:-1] > 2.0*dy[:-1])[0]        
        ynz = y[nzInds]
        dynz = dy[nzInds]
        
        y = ynz
        dy = dynz
        x = x[nzInds]
        
        Sigma = np.sum(ynz)
        Svn = -np.sum( ynz*np.log(ynz) ) - (1.0-Sigma)*np.log(1.0-Sigma)
        dSdP = -(np.log(ynz) - np.log(1.0-Sigma))
        dSvn = np.sqrt(np.sum( (dSdP*dynz)**2  ))
                
        S2 = -np.log( np.sum( ynz**2 ) + (1.0-Sigma)**2)
        dS2dP = (-1.0)*np.exp(S2)*(2.0*ynz-2.0*(1.0-Sigma))
        dS2 = np.sqrt(np.sum( (dS2dP*dynz)**2  ))
        
        print "\tvon Neumann entropy:\t" + str(Svn) + ' +/- ' + str(dSvn)
        print "\t2nd Renyi entropy:\t" + str(S2) + ' +/- ' + str(dS2) 
         
        
    print "\tCondensate fraction:\t" + str(n0) + ' +/- ' + str(dn0)    
    
    f,ax = plt.subplots()
    ax.errorbar(x,y,dy,linestyle='--',marker='o',markersize=4,capsize=4)
    ax.set_yscale('log')
    plt.xlim([x[0]-0.25,x[-1]+0.25])
    plt.xlabel(r'$m$')
    plt.ylabel(r'$n$')
    plt.show()