def main(): # define the mapping between short names and label names shortFlags = ['n','T','N','t','u','V','L','W','D'] parMap = {'n':'Initial Density', 'T':'Temperature', 'N':'Initial Number Particles', 't':'Imaginary Time Step', 'u':'Chemical Potential', 'V':'Container Volume', 'L':'Container Length', 'W':'Virial Window', 'M':'Update Length'} #'M':'Update Slices (Mbar)'} # setup the command line parser options parser = OptionParser() parser.add_option("-T", "--temperature", dest="T", type="float", help="simulation temperature in Kelvin") parser.add_option("-N", "--number-particles", dest="N", type="int", help="number of particles") parser.add_option("-n", "--density", dest="n", type="float", help="number density in Angstroms^{-d}") parser.add_option("-t", "--imag-time-step", dest="tau", type="float", help="imaginary time step") parser.add_option("-u", "--chemical-potential", dest="mu", type="float", help="chemical potential in Kelvin") parser.add_option("-L", "--Lz", dest="L", type="float", help="Length in Angstroms") parser.add_option("-V", "--volume", dest="V", type="float", help="volume in Angstroms^d") parser.add_option("-r", "--reduce", dest="reduce", choices=['T','N','n','u','t','L','V','W','M'], help="variable name for reduction [T,N,n,u,t,L,V,W,M]") parser.add_option("--canonical", action="store_true", dest="canonical", help="are we in the canonical ensemble?") parser.add_option("-p", "--plot", action="store_true", dest="plot", help="do we want to produce data plots?") parser.add_option("-R", "--radius", dest="R", type="float", help="radius in Angstroms") parser.add_option("-s", "--skip", dest="skip", type="int", help="number of measurements to skip") parser.add_option("-e", "--estimator", dest="estimator", type="str", help="specify a single estimator to reduce") parser.add_option("-i", "--pimcid", dest="pimcid", type="str", help="specify a single pimcid") parser.set_defaults(canonical=False) parser.set_defaults(plot=False) parser.set_defaults(skip=0) # parse the command line options and get the reduce flag (options, args) = parser.parse_args() # Determine the working directory if args: baseDir = args[0] if baseDir == '.': baseDir = '' else: baseDir = '' skip = options.skip if (not options.reduce): parser.error("need a correct reduce flag (-r,--reduce): [T,N,n,u,t,L,V,W,D]") # Check that we are in the correct ensemble pimchelp.checkEnsemble(options.canonical) dataName,outName = pimchelp.getFileString(options) reduceFlag = [] reduceFlag.append(options.reduce) reduceFlag.append(parMap[options.reduce]) # Create the PIMC analysis helper and fill up the simulation parameters maps pimc = pimchelp.PimcHelp(dataName,options.canonical,baseDir=baseDir) pimc.getSimulationParameters() # Form the full output file name if options.R == None: outName += '.dat' else: outName += '-R-%04.1f.dat' % options.R # possible types of estimators we may want to reduce estList = ['estimator', 'super', 'obdm', 'pair', 'radial', 'number', 'radwind', 'radarea', 'planedensity', 'planearea', 'planewind','virial','linedensity','linepotential'] estDo = {e:False for e in estList} # if we specify a single estimator, only do that one if options.estimator: estDo[options.estimator] = True # otherwise test to see if the file exists else: for e in estList: if pimc.getFileList(e): estDo[e] = True else: estDo[e] = False # We first reduce the scalar estimators and output them to disk if estDo['estimator']: head1,scAve1,scErr1 = getScalarEst('estimator',pimc,outName,reduceFlag,skip=skip) if estDo['virial']: head1,scAve1,scErr1 = getScalarEst('virial',pimc,outName,reduceFlag,skip=skip) if estDo['super']: head2,scAve2,scErr2 = getScalarEst('super',pimc,outName,reduceFlag,skip=skip) # Now we do the normalized one body density matrix if estDo['obdm']: x1,ave1,err1 = getVectorEst('obdm',pimc,outName,reduceFlag,'r [A]','n(r)',skip=skip) # Now we do the pair correlation function if estDo['pair']: x2,ave2,err2 = getVectorEst('pair',pimc,outName,reduceFlag,'r [A]','g(r)',skip=skip) # The radial Density if estDo['radial']: x3,ave3,err3 = getVectorEst('radial',pimc,outName,reduceFlag,'r [A]','rho(r)',skip=skip) # Compute the number distribution function and compressibility if we are in # the grand canonical ensemble if estDo['number']: x4,ave4,err4 = getVectorEst('number',pimc,outName,reduceFlag,'N','P(N)',skip=skip) # I don't know why this isn't working, MCStat is giving me an error, will # return to this later. AGD #kappa,kappaErr = getKappa(pimc,outName,reduceFlag) # The radially averaged Winding superfluid density if estDo['radwind']: x5,ave5,err5 = getVectorEst('radwind',pimc,outName,reduceFlag,'r [A]','rho_s(r)',skip=skip) # The radially averaged area superfliud density if estDo['radarea']: x6,ave6,err6 = getVectorEst('radarea',pimc,outName,reduceFlag,'r [A]','rho_s(r)',skip=skip) if estDo['planewind']: x7,ave7,err7 = getVectorEst('planewind',pimc,outName,reduceFlag,'n','rho_s(r)',skip=skip) if estDo['planearea']: x8,ave8,err8 = getVectorEst('planearea',pimc,outName,reduceFlag,'n','rho_s(r)',skip=skip) if estDo['planedensity']: x9,ave9,err9 = getVectorEst('planedensity',pimc,outName,reduceFlag,'n','rho(r)',skip=skip) if estDo['linedensity']: x10,ave10,err10 = getVectorEst('linedensity',pimc,outName,reduceFlag,\ 'r [A]','rho1d(r)',skip=skip) if estDo['linepotential']: x11,ave11,err11 = getVectorEst('linepotential',pimc,outName,reduceFlag,\ 'r [A]','V1d(r)',skip=skip) # Do we show plots? if options.plot: figNum = 1 # Get the changing parameter that we are plotting against param = [] for ID in pimc.id: param.append(float(pimc.params[ID][reduceFlag[1]])) numParams = len(param) markers = loadgmt.getMarkerList() colors = loadgmt.getColorList('cw/1','cw1-029',10) # ----------------------------------------------------------------------------- # Plot the averaged data # ----------------------------------------------------------------------------- if estDo['estimator']: headLab = ['E/N','K/N','V/N','N', 'diagonal'] dataCol = [] for head in headLab: n = 0 for h in head1: if head == h: dataCol.append(n) break n += 1 yLabelCol = ['Energy / N', 'Kinetic Energy / N', 'Potential Energy / N',\ 'Number Particles', 'Diagonal Fraction'] # ============================================================================ # Figure -- Various thermodynamic quantities # ============================================================================ for n in range(len(dataCol)): figure(figNum) connect('key_press_event',kevent.press) errorbar(param, scAve1[:,dataCol[n]], yerr=scErr1[:,dataCol[n]],\ color=colors[n],marker=markers[n],markeredgecolor=colors[n],\ markersize=8,linestyle='None',capsize=4) xlabel('%s'%options.reduce) ylabel(yLabelCol[n]) tight_layout() figNum += 1 # ============================================================================ # Figure -- The superfluid density # ============================================================================ if estDo['super']: figure(figNum) connect('key_press_event',kevent.press) errorbar(param, scAve2[:,0], yerr=scErr2[:,0],\ color=colors[0],marker=markers[0],markeredgecolor=colors[0],\ markersize=8,linestyle='None',capsize=4) tight_layout() xlabel('%s'%options.reduce) ylabel('Superfluid Density') # ============================================================================ # Figure -- The one body density matrix # ============================================================================ if estDo['obdm']: figNum += 1 figure(figNum) connect('key_press_event',kevent.press) ax = subplot(111) for n in range(numParams): lab = '%s = %s' % (options.reduce,param[n]) errorbar(x1[n,:], (ave1[n,:]+1.0E-15), err1[n,:],color=colors[n],marker=markers[0],\ markeredgecolor=colors[n], markersize=8,linestyle='None',label=lab) #axis([0,21,1.0E-5,1.1]) xlabel('r [Angstroms]') ylabel('One Body Density Matrix') tight_layout() legend(loc='best', frameon=False, prop={'size':16},ncol=2) # ============================================================================ # Figure -- The pair correlation function # ============================================================================ if estDo['pair']: figNum += 1 figure(figNum) connect('key_press_event',kevent.press) for n in range(numParams): lab = '%s = %s' % (options.reduce,param[n]) errorbar(x2[n,:], ave2[n,:], yerr=err2[n,:],color=colors[n],marker=markers[0],\ markeredgecolor=colors[n], markersize=8,linestyle='None',label=lab,capsize=6) # axis([0,256,1.0E-5,1.2]) xlabel('r [Angstroms]') ylabel('Pair Correlation Function') legend(loc='best', frameon=False, prop={'size':16},ncol=2) tight_layout() # We only plot the compressibility if we are in the grand-canonical ensemble if not options.canonical: # ============================================================================ # Figure -- The Number distribution # ============================================================================ if estDo['number']: figNum += 1 figure(figNum) connect('key_press_event',kevent.press) # Find which column contains the average number of particles for hn,h in enumerate(head1): if h == 'N': break for n in range(numParams): lab = '%s = %s' % (options.reduce,param[n]) aN = scAve1[n,hn] errorbar(x4[n,:]-aN, ave4[n,:], err4[n,:],color=colors[n],marker=markers[0],\ markeredgecolor=colors[n],\ markersize=8,linestyle='None',label=lab,capsize=6) axis([-30,30,0.0,1.2]) xlabel(r'$N-\langle N \rangle$') ylabel('P(N)') tight_layout() legend(loc='best', frameon=False, prop={'size':16},ncol=2) # ============================================================================ # Figure -- The Compressibility # ============================================================================ #figNum += 1 #figure(figNum) #connect('key_press_event',kevent.press) #errorbar(param, kappa, yerr=kappaErr, color=colors[0],marker=markers[0],\ # markeredgecolor=colors[0], markersize=8,linestyle='None',capsize=6) #tight_layout() #xlabel('%s'%options.reduce) #ylabel(r'$\rho^2 \kappa$') # ============================================================================ # Figure -- The radial density # ============================================================================ if len(glob.glob('CYLINDER')) > 0: figNum += 1 figure(figNum) connect('key_press_event',kevent.press) ax = subplot(111) for n in range(numParams): lab = '%s = %s' % (options.reduce,param[n]) errorbar(x3[n,:], (ave3[n,:]+1.0E-15), err3[n,:],color=colors[n],marker=markers[0],\ markeredgecolor=colors[n], markersize=8,linestyle='None',label=lab) #axis([0,21,1.0E-5,1.1]) tight_layout() xlabel('r [Angstroms]') ylabel('Radial Density') legend(loc='best', frameon=False, prop={'size':16},ncol=2) show()
def main(): # setup the command line parser options parser = OptionParser() parser.add_option("-T", "--temperature", dest="T", type="float", \ help="simulation temperature in Kelvin") parser.add_option("-N", "--number-particles", dest="N", type="int",\ help="number of particles") parser.add_option("-n", "--density", dest="n", type="float",\ help="number density in Angstroms^{-d}") parser.add_option("-P", "--number-time-slices", dest="P", type="int",\ help="number of time slices") parser.add_option("-u", "--chemical-potential", dest="mu", type="float",\ help="chemical potential in Kelvin") parser.add_option("-L", "--length", dest="L", type="float",\ help="length in Angstroms") parser.add_option("-t", "--imag-time-step", dest="tau", type="float",\ help="imaginary time step") parser.add_option("--canonical", action="store_true", dest="canonical", help="are we in the canonical ensemble?") parser.add_option("-i", "--id", action="append", dest="pimcID", type="int",\ help="a list of PIMC ID numbers to include") parser.add_option("-e", "--exclude", action="append", dest="exID", type="int",\ help="a list of PIMC ID numbers to exclude") parser.add_option("-c", "--cluster", dest="cluster", choices=['westgrid','sharcnet','scinet','vacc'],\ help="target cluster: [westgrid,sharcnet,scinet,vacc]") parser.set_defaults(canonical=False) # parse the command line options and get the reduce flag (options, args) = parser.parse_args() if (not options.cluster): parser.error("need to specify a cluster") # Check that we are in the correct ensemble pimchelp.checkEnsemble(options.canonical) # create a file string that will be used to name the submit file outName = '' if options.T: outName += '-%06.3f' % options.T if options.L: outName += '-%07.3f' % options.L # Get the data string and create the pimc helper object dataName = pimchelp.getFileString(options,reduce=False) pimc = pimchelp.PimcHelp(dataName,options.canonical) # We get either all the log files in the current directory, or just the # requested files by their ID number logFileNames = pimc.getFileList('log',idList=options.pimcID) # If we have excluded any ID's we remove them from the list if options.exID: for id in options.exID: for n,fname in enumerate(logFileNames): if int(id) == pimc.getID(fname): logFileNames.pop(n) # Now create the submission files if options.cluster == 'westgrid': westgrid(logFileNames,outName) if options.cluster == 'sharcnet': sharcnet(logFileNames,outName) if options.cluster == 'scinet': scinet(logFileNames,outName) if options.cluster == 'vacc': vacc(logFileNames,outName)
def main(): # setup the command line parser options parser = OptionParser() parser.add_option("-T", "--temperature", dest="T", type="float", help="simulation temperature in Kelvin") parser.add_option("-N", "--number-particles", dest="N", type="int", help="number of particles") parser.add_option("-n", "--density", dest="n", type="float", help="number density in Angstroms^{-d}") parser.add_option("-t", "--imag-time-step", dest="tau", type="float", help="imaginary time step") parser.add_option("-M", "--number-time-slices", dest="M", type="int", help="number of time slices") parser.add_option("-u", "--chemical-potential", dest="mu", type="float", help="chemical potential in Kelvin") parser.add_option("-V", "--volume", dest="V", type="float", help="volume in Angstroms^d") parser.add_option("-L", "--Lz", dest="L", type="float", help="Length in Angstroms") parser.add_option("--canonical", action="store_true", dest="canonical", help="are we in the canonical ensemble?") parser.add_option("-s", "--skip", dest="skip", type="int", help="how many input lines should we skip?") parser.add_option("--cumulative", action="store_true", dest="cumulative", help="Merge cumulative estimators?") parser.add_option("-i", "--id", action="append", dest="pimcID", type="int",\ help="a list of PIMC ID numbers to include") parser.set_defaults(skip=0) parser.set_defaults(canonical=False) parser.set_defaults(cumulative=False) # parse the command line options and get the reduce flag (options, args) = parser.parse_args() # Determine the working directory if args: baseDir = args[0] if baseDir == '.': baseDir = '' else: baseDir = '' # We check if we have a MERGED directory, if not create it if len(glob.glob(baseDir + 'MERGED')) == 0: os.system('mkdir %sMERGED' % baseDir) # Create a .donotbackup file os.system('touch %sMERGED/.donotbackup' % baseDir) # Check that we are in the correct ensemble pimchelp.checkEnsemble(options.canonical) dataName = pimchelp.getFileString(options,reduce=False) # Create the PIMC analysis helper and fill up the simulation parameters maps pimc = pimchelp.PimcHelp(dataName,options.canonical,baseDir=baseDir) pimc.getSimulationParameters(idList=options.pimcID) # We try to find a new PIMCID which is the average of the ones to merge, and # make sure it doesn't already exist newID = 0 for id in pimc.id: newID += int(id) newID = int(newID/(1.0*len(pimc.id))) # Now we keep incrementing the ID number until we are sure it is unique while ( (len(glob.glob(baseDir + '*estimator*-%09d*' % newID)) > 0) or (len(glob.glob(baseDir + 'MERGED/*estimator*-%09d*' % newID)) > 0) ): newID += 1 # Merge all the output files print 'Merged data files:' for ftype in pimc.dataType: mergeData(pimc,ftype,newID,options.skip,baseDir,idList=options.pimcID) # Now perform the merge for possible cumulative average files if options.cumulative: for ftype in ['position','locsuper']: mergeCumulativeData(pimc,ftype,newID,baseDir,idList=options.pimcID) # copy over the log file oldLogName = pimc.getFileList('log',idList=options.pimcID)[0] newLogName = os.path.basename(oldLogName).replace(str(pimc.id[0]),str(newID)) os.system('cp %s %s' % (oldLogName,baseDir+'MERGED/'+newLogName)) # Do the same if we are merging cylinder files if len(glob.glob(baseDir + 'MERGED/CYLINDER')) > 0: print "CYLINDER" os.system('cp %s %s' % (oldLogName,baseDir+'MERGED/CYLINDER/'+newLogName))
def main(): # setup the command line parser options parser = OptionParser() parser.add_option("-T", "--temperature", dest="T", type="float", help="simulation temperature in Kelvin") parser.add_option("-N", "--number-particles", dest="N", type="int", help="number of particles") parser.add_option("-n", "--density", dest="n", type="float", help="number density in Angstroms^{-d}") parser.add_option("-t", "--imag-time-step", dest="tau", type="float", help="imaginary time step") parser.add_option("-M", "--number-time-slices", dest="M", type="int", help="number of time slices") parser.add_option("-u", "--chemical-potential", dest="mu", type="float", help="chemical potential in Kelvin") parser.add_option("-V", "--volume", dest="V", type="float", help="volume in Angstroms^d") parser.add_option("-L", "--Lz", dest="L", type="float", help="Length in Angstroms") parser.add_option("-R", "--radius", dest="R", type="float", help="radius in Angstroms") parser.add_option("-v", "--varp", dest="varp", choices=['T','N','n','u','t','L','V','r'], help="varying parameter, one of [T,N,n,u,t,L,V,r]") parser.add_option("--canonical", action="store_true", dest="canonical", help="are we in the canonical ensemble?") parser.add_option("--restarted", action="store_true", dest="restarted", help="are we merging pimcs that got restarted from a parent state?") parser.add_option("-s", "--skip", dest="skip", type="int", help="how many input lines should we skip?") parser.set_defaults(skip=0) parser.set_defaults(canonical=False) parser.set_defaults(restarted=False) # parse the command line options and get the reduce flag (options, args) = parser.parse_args() if len(args) > 0: parser.error("incorrect number of arguments") # We check if we have a MERGED directory, if not create it if os.path.exists('MERGED/CYLINDER') == False: os.makedirs('MERGED/CYLINDER') # Check that we are in the correct ensemble pimchelp.checkEnsemble(options.canonical) # Form a pattern of pimc output filenames' structure satysfying our options dataName = pimchelp.getFileString(options,reduce=False) # Create the PIMC analysis helper pimc = pimchelp.PimcHelp(dataName,options.canonical) # Fill up the simulation parameters maps pimc.getSimulationParameters() # Delete those pimcIDs that do not satysfy parameters # that are not contained in the pimc output filanames' structure #"implicit" parameters pimc.ApplyImplicitParameters() #if there is not need to merge with a varying parameter if (not options.varp): #Create new pimcID newID = getNewPIMCID(pimc) # Merge all the output files print 'Merged data files:' for type in pimc.dataType: mergeData(pimc,type,newID,options.skip,options.restarted) # copy over the log file oldLogName = pimc.getFileList('log')[0] newLogName = oldLogName.replace(str(pimc.id[0]),str(newID)) os.system('cp %s %s' % (oldLogName,'MERGED/'+newLogName)) #with a varying parameter, one needs to group corresponding pimcIds else: #group pimcIds with the same varying parameter MergeSets = getMergeSets(pimc,options.varp) for varp in sorted(MergeSets.iterkeys()): mergeSet = MergeSets[varp] print '\nMerged data files for %s=%s:\n' %(options.varp,varp) print 'PIMCids to merge: %s' %mergeSet #if there is only one pimcId with a varp, the just copy the files if (len(mergeSet) == 1): lsCommand = 'ls *log*%s*' %mergeSet[0] LogName = os.popen(lsCommand).read().split('\n')[0] shutil.copyfile(LogName,'MERGED/'+LogName) for type in pimc.dataType: lsCommand = "ls *%s*%s*" %(type,mergeSet[0]) fileName = os.popen(lsCommand).read().split('\n')[0] outFile,numLines = CreateFileSkipping(fileName,mergeSet[0],mergeSet[0],options.skip) print '%10d' %numLines outFile.close lsCommand = "ls CYLINDER/*%s*" %mergeSet[0] fileNames = os.popen(lsCommand).read().split('\n') fileNames.pop() for files in fileNames: outFile,numLines = CreateFileSkipping(files,mergeSet[0],mergeSet[0],options.skip) print '%10d' %numLines outFile.close #otherwise we need to be careful what files do we merge together else: #Create new pimcID newID = getNewPIMCID(pimc) for type in pimc.dataType: mergeData(pimc,type,newID,options.skip,options.restarted,mergeSet) lsCommand = 'ls *log*%s*' %mergeSet[0] oldLogName = os.popen(lsCommand).read().split('\n')[0] newLogName = oldLogName.replace(str(mergeSet[0]),str(newID)) shutil.copyfile(oldLogName,'MERGED/'+newLogName)
def main(): # setup the command line parser options parser = OptionParser() parser.add_option("-T", "--temperature", dest="T", type="float", \ help="simulation temperature in Kelvin") parser.add_option("-N", "--number-particles", dest="N", type="int",\ help="number of particles") parser.add_option("-n", "--density", dest="n", type="float",\ help="number density in Angstroms^{-d}") parser.add_option("-P", "--number-time-slices", dest="P", type="int",\ help="number of time slices") parser.add_option("-u", "--chemical-potential", dest="mu", type="float",\ help="chemical potential in Kelvin") parser.add_option("-L", "--length", dest="L", type="float",\ help="length in Angstroms") parser.add_option("-t", "--imag-time-step", dest="tau", type="float",\ help="imaginary time step") parser.add_option("--canonical", action="store_true", dest="canonical", help="are we in the canonical ensemble?") parser.add_option("-i", "--id", action="append", dest="pimcID", type="int",\ help="a list of PIMC ID numbers to include") parser.add_option("-e", "--exclude", action="append", dest="exID", type="int",\ help="a list of PIMC ID numbers to exclude") parser.add_option("-c", "--cluster", dest="cluster", choices=['westgrid','sharcnet','scinet','vacc'],\ help="target cluster: [westgrid,sharcnet,scinet,vacc]") parser.set_defaults(canonical=False) # parse the command line options and get the reduce flag (options, args) = parser.parse_args() if (not options.cluster): parser.error("need to specify a cluster") # Check that we are in the correct ensemble pimchelp.checkEnsemble(options.canonical) # create a file string that will be used to name the submit file outName = '' if options.T: outName += '-%06.3f' % options.T if options.L: outName += '-%07.3f' % options.L # Get the data string and create the pimc helper object dataName = pimchelp.getFileString(options, reduce=False) pimc = pimchelp.PimcHelp(dataName, options.canonical) # We get either all the log files in the current directory, or just the # requested files by their ID number logFileNames = pimc.getFileList('log', idList=options.pimcID) # If we have excluded any ID's we remove them from the list if options.exID: for id in options.exID: for n, fname in enumerate(logFileNames): if int(id) == pimc.getID(fname): logFileNames.pop(n) # Now create the submission files if options.cluster == 'westgrid': westgrid(logFileNames, outName) if options.cluster == 'sharcnet': sharcnet(logFileNames, outName) if options.cluster == 'scinet': scinet(logFileNames, outName) if options.cluster == 'vacc': vacc(logFileNames, outName)