def run(args): if len(args) == 0: usage() subcmd = command_importer(double_dash_commands.get(args[0], args[0])) subcmd.run(args[1:]) sys.exit(0)
def __init__(self, name, specification): match = FIELD_SPECIFICATION_RE.match(specification) if match is None: usage('invalid field specification: ' + specification) field_type, field_options, field_optional = match.group(1), match.group(2), match.group(3) if field_options is not None: field_options = field_options[1:-1] self.name = name self.optional = field_optional self.options = field_options self.type = field_type
def find_schema(config, database, table): found_schema = None for schema in config['mysql']['schemas']: databases = config['mysql']['schemas'][schema]['databases'] tables = config['mysql']['schemas'][schema]['tables'] if database in databases and table in tables: if found_schema: usage('two different schemas cannot have identical database names') found_schema = schema if not found_schema: usage('could not find schema with specified database and table') return found_schema
def __init__(self, name, specification): match = FIELD_SPECIFICATION_RE.match(specification) if match is None: usage('invalid field specification: ' + specification) field_type, field_options, field_optional = match.group( 1), match.group(2), match.group(3) if field_options is not None: field_options = field_options[1:-1] self.name = name self.optional = field_optional self.options = field_options self.type = field_type
def produce_messages(f_consume, args, config): seed = config['generator']['seed'] producers = [] # iterate all schema, database and table for schema in config['mysql']['schemas']: for database in config['mysql']['schemas'][schema]['databases']: for table in config['mysql']['schemas'][schema]['tables']: producers.extend(generate_producers_for_table(seed, schema, database, table, config)) # Filter producer by arguments producers = filter(lambda x: args.schema is None or x.table.schema == args.schema, producers) if len(producers) == 0: usage('could not find specified schema') producers = filter(lambda x: args.database is None or x.table.database == args.database, producers) if len(producers) == 0: usage('could not find specified database') producers = filter(lambda x: args.table is None or x.table.table_name == args.table, producers) if len(producers) == 0: usage('could not find specified table') # Check lag and try produce every 10 ms start_time = time() * 1000.0 while True: time_elapsed = time() * 1000.0 - start_time # generate the list of (message timestamp, producer) pairs, sort to ensure the output order ts_producer_pairs = flatmap(lambda p: zip_with(p.msg_timings(time_elapsed), p), producers) ts_producer_pairs.sort() for _, producer in ts_producer_pairs: f_consume(*producer.produce_one()) sleep(0.01)
def engine(switches): """main engine loop""" if switches.has_key('-d'): # file deletion mode if switches.has_key('-i'): # single file mode file_name = switches['-i'] if os.path.exists(file_name): utils.check_prompt_and_delete(switches['-i'], switches.has_key('-y')) else: print "filename ERROR : "+file_name else: # batch mode if switches.has_key('-m'): if switches['-m'] == '1': print "mode 1 selected" for i in os.listdir('.'): if os.path.isdir(i): os.chdir(i) for j in os.listdir('.'): if os.path.isdir(j): os.chdir(j) for k in os.listdir('.'): utils.check_prompt_and_delete(k, switches.has_key('-y')) os.chdir('..') os.chdir('..') elif switches['-m'] == '2': print "mode 2 selected" for i in os.listdir('.'): if os.path.isdir(i): os.chdir(i) for j in os.listdir('.'): utils.check_prompt_and_delete(j, switches.has_key('-y')) else: print "incorrect mode!" utils.usage() sys.exit(1)
def produce_messages(f_consume, args, config): seed = config['generator']['seed'] producers = [] # iterate all schema, database and table for schema in config['mysql']['schemas']: for database in config['mysql']['schemas'][schema]['databases']: for table in config['mysql']['schemas'][schema]['tables']: topic = config['mysql']['schemas'][schema]['tables'][table][ database]['topic'] producers.extend( generate_producers_for_table(topic, seed, schema, database, table, config)) # Filter producer by arguments producers = filter( lambda x: args.schema is None or x.table.schema == args.schema, producers) if len(producers) == 0: usage('could not find specified schema') producers = filter( lambda x: args.database is None or x.table.database == args.database, producers) if len(producers) == 0: usage('could not find specified database') producers = filter( lambda x: args.table is None or x.table.table_name == args.table, producers) if len(producers) == 0: usage('could not find specified table') # Check lag and try produce every 10 ms start_time = time() * 1000.0 while True: time_elapsed = time() * 1000.0 - start_time # generate the list of (message timestamp, producer) pairs, sort to ensure the output order ts_producer_pairs = flatmap( lambda p: zip_with(p.msg_timings(time_elapsed), p), producers) ts_producer_pairs.sort() for _, producer in ts_producer_pairs: f_consume(*producer.produce_one()) sleep(0.01)
#!/usr/bin/python import sys from math import * import subprocess import os from scipy import array, zeros import pylab as pl # mine import outcarIO import lammpsIO import orderParam import utils utils.usage(["<outcar> <l> -2neighb"], 2, 3, "-2neighb: 2nd shell neighbors are used when calculating Ql") rcut = None filename = sys.argv[1] lval = int(sys.argv[2]) sh2 = False if "-2neighb" in sys.argv: sh2 = True outcarFlag = False lammpsFlag = False if "OUTCAR" in filename: outcarFlag = True lammpsFlag = False else: outcarFlag = False
import plotRemote #theirs import sys import matplotlib as mpl from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt from scipy import weave from scipy.weave import converters from numpy import * from mpl_toolkits.mplot3d.art3d import Line3DCollection #mine import rootMeanSquareDist from struct_tools import minImageTranslation import utils utils.usage(["<xdatfile>","opfile (tetra,cn,rmsd or \"time\" or \"number\""],2,2) coarseGrainN=1 xdatcarFileName,opFile = sys.argv[1],sys.argv[2] #centralAtomIndex=atomsRequested[0] #centralAtomIndex=atomsRequested.index(centralAtomIndex) ##Parsing the xdatcar for atomic trajectories startBlockFlag=False headlen=0 nAtom = 0 atomsTraj = [] count = 0 for i,line in enumerate(open(xdatcarFileName,"r")): if i==0:
q=qmax weave.inline(ISFFullSphereRefCode,['nqVecs','steps','nStep','isfs','atoms','nTime','nAtom','q']) return steps,isfs if __name__ == "__main__": flags = "Possible Flags\n\ -noPlot: turns off plotting duh\n\ -logt : turns on log of the time steps\n\ -scale #: the time scale (how much time per step in seconds)\n\ -nStep #: number of steps on the time scale (100 small, 500 big)\n\ -2sh : applies 2 shell averaging before criterion are set\ " utils.usage(["<dump.dat or OUTCAR>","<\'s\'-self or \'t\'-total', deflt: t>","<criteria>","<ensemble file>"],4,8,flags) RMAX = 10.0 #Parse Flags plotEnable = True logtEnable = False sh2Enable = False scale = None #units in seconds nStep = 250 if "-noPlot" in sys.argv: sys.argv.remove("-noPlot") plotEnable = False if "-logt" in sys.argv: sys.argv.remove("-logt")
atomCR = atoms[i][ where( cr[i] > cutoff)[0] ] print "nAtom=%d atoms meet criteria %s"%(len(atomCR),criteria) rbins,rdist = rdf_periodic(atomCR,basis,rcut,nbins) else: rbins,rdist = rdf_periodic(atoms[i],basis,rcut,nbins) rdf += rdist rdf /= nTime return rbins,rdf if __name__ == "__main__": flags = "Possible Flags\n\ -noPlot: turns off plotting duh\n\ -perTS: turns on logging per timestep per atom instead of using last step\n\ " utils.usage(["<dump.dat or OUTCAR>","<criteria>","<ensemble file>"],3,8,flags) RMAX = 10.0 #Parse Flags plotEnable = True perTSEnable = False if "-noPlot" in sys.argv: sys.argv.remove("-noPlot") plotEnable = False if "-perTS" in sys.argv: sys.argv.remove("-perTS") perTSEnable = True #Parse Args inputFile = sys.argv[1]
q=qmax weave.inline(ISFFullSphereRefCode,['nqVecs','steps','nStep','isfs','atoms','nTime','nAtom','q']) return steps,isfs if __name__ == "__main__": flags = "Possible Flags\n\ -noPlot: turns off plotting duh\n\ -logt : turns on log of the time steps\n\ -scale #: the time scale (how much time per step in seconds)\n\ -nStep #: number of steps on the time scale (100 small, 500 big)\ -linear\ " utils.usage(["<dump.dat or OUTCAR>","<\'s\'-self \'d\'-distinct or \'t\'-total'>, default total"],2,6,flags) RMAX = 10.0 plotEnable = True logtEnable = False linEnable = False scale = None #units in seconds nStep = 250 if "-noPlot" in sys.argv: sys.argv.remove("-noPlot") plotEnable = False if "-logt" in sys.argv: sys.argv.remove("-logt") logtEnable = True if "-scale" in sys.argv:
def default(): msg_to_send = {'type': 'all', 'msg': None} while True: msg = yield msg_to_send msg_to_send['type'] = 'all' msg_to_send['msg'] = utils.usage(' '.join(msg.content))
class SGMLtools: _globals = {} _classes = {} _autoconf = {} def __init__(self, autoconf): """Create an SGMLtools object. This method hunts for backend modules and does some other assorted initialization things. The autoconf argument contains some assorted settings that are passed down from autoconf. """ self._autoconf = autoconf # # Expand path # sys.path.append(os.path.join(autoconf['shrdir'], 'python')) sys.path = sys.path + autoconf['backends'] # # Import backends, instantiate a BackendGlobals object for # each of them, and stash it away. # files = [] for dir in autoconf['backends']: pattern = os.path.join(dir, '*.py') files = files + glob.glob(pattern) for file in files: name, junk = os.path.splitext(file) dir, module = os.path.split(name) cmd = 'from %s import %s, %s' % (module, module, module + 'Globals') exec cmd cmd = 'glob = %sGlobals()' % module exec cmd self._globals[glob.getName()] = glob cmd = 'cls = %s' % module exec cmd self._classes[glob.getName()] = cls # # Read alias file # self._aliases = utils.readAliases(autoconf) # # Setup SGML environment # if not os.environ.has_key('SGML_CATALOG_FILES'): os.environ['SGML_CATALOG_FILES'] = \ os.path.join(autoconf['etcdir'], 'catalog') \ + ":" + "/usr/share/sgml/stylesheets/sgmltools/sgmltools.cat" \ + ":" + "/usr/share/sgml/CATALOG.docbkdsl" def processOptions(self, args): """Process command line options. Process command line options, dynamically expanding them based on the --backend option, and returning the list of files that's left. """ # # Hunt down the backend option. The first test tests for # "-b x", the second for "-bx" (or the equivalend long versions). # numArgs = len(args) for i in range(numArgs): arg = args[i] if arg in ["-b", "--backend"]: if i + 1 >= numArgs: raise getopt.error, "option %s requires an argument" % arg miniargs = [arg, args[i + 1]] break if arg[:2] == "-b" or arg[:10] == "--backend=": miniargs = [arg] break else: # # Default to the HTML backend. # miniargs = ["--backend=onehtml"] # # We should have a backend option now. Ask getopt to parse it. Once # we have it, ask the backend for extra options so we can get # down to business. # opt, junk = getopt.getopt(miniargs, 'b:', ['backend=']) # # if opt = 'txt', check for 'w3m' else fallback to 'lynx' # if opt[0][1] == "txt": if not self._autoconf['progs']['w3m'] == 'N/A': self._curbackend = "w3m" else: self._curbackend = "lynx" else: self._curbackend = opt[0][1] try: self._curglobal = self._globals[self._curbackend] except KeyError: utils.usage(None, "Unknown backend " + self._curbackend) if not self._globals.has_key(self._curbackend): utils.usage(None, "Unknown backend " + self._curbackend) # # Merge all the options and parse them. Return whatever is # left (the list of files we need to run). # shortopts, longopts = utils.makeOpts(self._curglobal) try: options, retval = getopt.getopt(args, shortopts, longopts) except getopt.error, e: utils.usage(self._curglobal, 'Error parsing arguments: ' + ` e `) self._options = utils.normalizeOpts(self._curglobal, options) # # Check for help/version/... options # if utils.findOption(self._options, 'help'): utils.version(self._autoconf['shrdir']) print utils.usage(self._curglobal, None) if utils.findOption(self._options, 'version'): utils.version(self._autoconf['shrdir']) sys.exit(0) if utils.findOption(self._options, 'license'): utils.license() return retval
#!/usr/bin/env python2.6 # # Copyright (C) 2011 by Brian Weck # Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php # import utils from cssbot import log, queue # utils.usage(1, "usage: %s id") thing_id = utils.argv(1) # log = log.getLogger("cssbot.dequeue") # queue = queue.Queue() thing = queue.contains({"data.id": thing_id}) # if we have an item, and up for queueing. if thing and "next_ts" in thing: log.warn("removing item %s from queue", thing_id) queue.dequeue(thing) else: log.error("did not remove thing %s from the queue", thing_id)
#!/usr/bin/python import sys from math import * import subprocess import os from numpy import array #mine import outcarIO import lammpsIO import parserGens import utils possibleSuffix = [".rmsd",".cn",".tetra",".rmsd50",".rmsd100",".rmsd1000",".rmsd5000",".rmsd10000",".2shelltetra",".2shellcn",".2shellrmsd100",".tetra.avg1000",".tetra.avg5000",".tetra.avg100",".tetra.avg500"] utils.usage(["<outcar,lamps dump>"],1,1,"Note: Automatically looks for <OUTCAR/dump.suffix> for per atom ensembles. to include in the dump") fname = sys.argv[1] dumpFilename = fname+"lmp.dump" possibleSuffix = [i for i in possibleSuffix if os.path.isfile(fname+i)] print possibleSuffix ensembleHead = " ".join([i.strip(".") for i in possibleSuffix]) ensembles = None if len(possibleSuffix)>0: ensembles = [parserGens.parseEnsemble(fname+i) for i in possibleSuffix] if "OUTCAR" in fname: nAtoms = outcarIO.nAtoms(fname) basis = array(map(array,outcarIO.basis(fname))) bounds = [[0,basis[0][0]],[0,basis[1][1]],[0,basis[2][2]]] types = outcarIO.types(fname)
#!/usr/bin/python import sys #mine import utils import parserGens import datatools utils.usage(["<.ensembleFile>","<window size (int or all)>"],2,2) inputFile = sys.argv[1] possibleSuffixes = [".tetra",".rmsd",".cn"] if not any([suffix for suffix in possibleSuffixes if suffix in inputFile]): print "wrong input file dummy." exit(0) windowSize = sys.argv[2] outputFile = inputFile+".avg"+windowSize #assume first line is a header head = open(inputFile,"r").readline() configIterator = parserGens.parseEnsemble(inputFile,keepAvg=True) ops = zip(*[a for a in configIterator]) if windowSize == "all": opAvg = [sum(op)/len(op) for op in ops] opLines = [" ".join(map(str,opAvg))+"\n"] else: windowSize = int(windowSize)
#!/usr/bin/python import sys #mine import lammpsIO import utils utils.usage(["<input dump file>","<configuration #>","<output config file>"],3,3,"LAMMPS dump file must have bounds, atom type and atomic locations.") iDump=sys.argv[1] cfg=int(sys.argv[2]) bounds,types,atoms=lammpsIO.readConfig(iDump,cfg) oCnfg=open(sys.argv[3],"w") lammpsIO.dumpWriteConfig(oCnfg,bounds,types,atoms,"config made by %s, from file %s config %d"%(sys.argv[0].split("/")[-1],iDump,cfg))
#!/usr/bin/env python2.6 # # Copyright (C) 2011 by Brian Weck # Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php # # import utils from cssbot import reddit utils.usage(1, "usage: %s id") fetch_id = utils.argv(1) r = reddit.APIWrapper() r.num_retries = 0 x = r.get_comments(fetch_id) print utils.format_json(x)
#!/usr/bin/python import sys # mine import utils import parserGens import datatools utils.usage(["<.tetraFile>", "<window size (int or all)>"], 2, 2) inputFile = sys.argv[1] if ".tetra" not in inputFile: print "wrong input file dummy." exit(0) windowSize = sys.argv[2] outputFile = inputFile + ".avg" + windowSize configIterator = parserGens.parseEnsemble(inputFile, keepAvg=True) tetras = zip(*[a for a in configIterator]) if windowSize == "all": tetraAvg = [sum(atet) / len(atet) for atet in tetras] tetraLines = [" ".join(map(str, tetraAvg)) + "\n"] else: windowSize = int(windowSize) tetraAvg = zip(*[datatools.wsmooth(atet, windowSize) for atet in tetras]) tetraLines = [" ".join(map(str, line)) + "\n" for line in tetraAvg]
#!/usr/bin/python import sys import itertools #mine import utils,parserGens,datatools utils.usage(["<file.neighbs>","<ensemble file (tetra, cn, rmsd, w/e)>"],2,2) neighbFile = sys.argv[1] ensembFile = sys.argv[2] neighbGen = parserGens.parseNeighbor(neighbFile) ensembGen = parserGens.parseEnsemble(ensembFile) #secondShellEnsemb=list() efp = ensembFile.split(".") outfile = ".".join(efp[0:-1]) + ".2shell" + efp[-1] out = open(outfile,"w") out.write("2ndShellAvg%s 2ndShellPerAtom%s\n"%(efp[1],efp[1])) stopFlag=False for ensembG,neighbG in itertools.izip(ensembGen,neighbGen): secondShellEnsemb = list() for a,firstNeighbs in enumerate(neighbG): secondNeighbs = [neighbG[i] for i in firstNeighbs] totalNeighb = set(datatools.flatten([firstNeighbs]+secondNeighbs)) N = len(totalNeighb) if N==0: secondShellEnsemb.append(ensembG[a])
print dat """ if dat == "": print "UN OWEN WAS HER" if dat and "mort\n" in dat: print "mort\n" break elif dat and "ok\n" in dat: print "ok" s.sendall("avance") print "j'avance\n" elif dat and "elevation en cours\n" in dat: print "elevation en cours\n" dat = receive(s) print dat elif not dat: print "NO DATA ARGHFGDHFH" continue else: print dat """ s.shutdown(socket.SHUT_WR) s.close() if (len(sys.argv) < 5) or (len(sys.argv) > 7): utils.usage() else: infos = infos() connect.init_infos(sys.argv, infos) s = connect.connect(infos) play(s)
#!/usr/bin/python import sys import operator #mine from parserGens import parseLammpsColumns import utils, lammpsIO #Given a lammps dump and some criteria, dumps the atoms that have valid criteria #also gives easy access to their coordinates for further analysis utils.usage(["<lmpdump file>","<selection crit>","lt/gt val eg: gt0.8"],3,3) lmpFile = sys.argv[1] selHead = sys.argv[2] criteria = sys.argv[3] outFileAtoms = lmpFile+"_"+selHead+"_"+criteria+".atoms" outFileSize = lmpFile+"_"+selHead+"_"+criteria+".len" op = operator.lt if "gt" in criteria: op = operator.gt val = float(criteria[2:]) def condition(compareVal): return op(compareVal,val) nAtom = lammpsIO.nAtoms(lmpFile) basis = lammpsIO.basis(lmpFile) #find the header labels
def processOptions(self, args): """Process command line options. Process command line options, dynamically expanding them based on the --backend option, and returning the list of files that's left. """ # # Hunt down the backend option. The first test tests for # "-b x", the second for "-bx" (or the equivalend long versions). # numArgs = len(args) for i in range(numArgs): arg = args[i] if arg in ["-b", "--backend"]: if i + 1 >= numArgs: raise getopt.error, "option %s requires an argument" % arg miniargs = [arg, args[i + 1]] break if arg[:2] == "-b" or arg[:10] == "--backend=": miniargs = [arg] break else: # # Default to the HTML backend. # miniargs = ["--backend=onehtml"] # # We should have a backend option now. Ask getopt to parse it. Once # we have it, ask the backend for extra options so we can get # down to business. # opt, junk = getopt.getopt(miniargs, 'b:', ['backend=']) # # if opt = 'txt', check for 'w3m' else fallback to 'lynx' # if opt[0][1] == "txt": if not self._autoconf['progs']['w3m'] == 'N/A': self._curbackend = "w3m" else: self._curbackend = "lynx" else: self._curbackend = opt[0][1] try: self._curglobal = self._globals[self._curbackend] except KeyError: utils.usage(None, "Unknown backend " + self._curbackend) if not self._globals.has_key(self._curbackend): utils.usage(None, "Unknown backend " + self._curbackend) # # Merge all the options and parse them. Return whatever is # left (the list of files we need to run). # shortopts, longopts = utils.makeOpts(self._curglobal) try: options, retval = getopt.getopt(args, shortopts, longopts) except getopt.error, e: utils.usage(self._curglobal, 'Error parsing arguments: ' + ` e `)
#!/usr/bin/python import sys import numpy as np import scipy.optimize as optimize import pylab as pl #mine import utils utils.usage(["<ISFs file> <cutoff>"],1,3) def parseISF(isfFile): dat = open(isfFile,"r").readlines()[1:] head = dat.pop(0) time,isfs = zip(*map(lambda x: map(float,x.split()),dat)) return time,isfs def vtf(t,beta,a,tao): return a*np.exp(- (t/tao)**beta) time,isfs = parseISF(sys.argv[1]) cutoff = 1.0 if len(sys.argv)==3: cutoff=float(sys.argv[2]) icut = sum([1 for i in time if i<cutoff]) #Fitting beta,a,tao = optimize.curve_fit(kww,time[icut:],isfs[icut:])[0] print "Beta = %f\n A = %f\n tao = %f\n"%(beta,a,tao)
['access_token=', 'The access token.', True, None], ['access_token_secret=', 'The access token secret.', True, None], ['stream_type=', 'The type of stream to run: sample, location, keyword.', True, None], ['output_directory=', 'Where to save output files.', True, None], ['stream_filename=', 'The name of the file containing parameters for this stream. Required for location and keyword.', False, ''], ['pid_file=', 'Save the pid of this job to the given file.', False, None], ['log_filename=', 'The log file.', True, None], ] # Start main method here. command_line = '%s' options_hash, remainder = parseCommandLine(options, command_line=command_line) if (len(remainder) != 0): print usage(sys.argv, command_line, options) sys.exit() consumer_key = options_hash['consumer_key'] consumer_secret = options_hash['consumer_secret'] access_token = options_hash['access_token'] access_token_secret = options_hash['access_token_secret'] output_directory = options_hash['output_directory'] stream_type = options_hash['stream_type'] stream_filename = options_hash.setdefault('stream_filename', None) log_filename = options_hash['log_filename'] logger = logging.getLogger('tweepy_streaming') handler = logging.FileHandler(log_filename, mode='a')
#!/usr/bin/python import sys from math import * import subprocess import os #mine import utils import outcarIO import lammpsIO import rootMeanSquareDist from scipy import array,zeros import pylab as pl utils.usage(["<Outcar/Lammpsdump>","<windowsz>"],2,2) rcut = 3.2 filename = sys.argv[1] outcarFlag=False lammpsFlag=False if "OUTCAR" in filename: outcarFlag=True lammpsFlag=False else: outcarFlag=False lammpsFlag=True window = int(sys.argv[2])
#!/usr/bin/python import sys from numpy import * import utils #mine import plotRemote as pr utils.usage(["<OUTCAR.rmds>"],1,1) rmsdFile = sys.argv[1] if rmsdFile[-4:]!="rmsd": exit(0) rcut=1.5 rmsdAvg=list() rmsdPerAtom=list() jumpCount50=[0]*50 jumpCount100=[0]*100 jumpCount200=[0]*200 jumpCount300=[0]*300 jumpCount400=[0]*400 jumpCount500=[0]*500 jumpCount1000=[0]*1000 jumpedFlag50=[] #how many times has the atom jumped >2.5A in 50ts jumpedFlag100=[] #how many times has the atom jumped >2.5A in 100ts jumpedFlag200=[] jumpedFlag300=[] jumpedFlag400=[]
def processOptions(self, args): """Process command line options. Process command line options, dynamically expanding them based on the --backend option, and returning the list of files that's left. """ # # Hunt down the backend option. The first test tests for # "-b x", the second for "-bx" (or the equivalend long versions). # numArgs = len(args) for i in range(numArgs): arg = args[i] if arg in ["-b", "--backend"]: if i+1 >= numArgs: raise getopt.error, "option %s requires an argument" % arg miniargs = [arg, args[i+1]] break if arg[:2] == "-b" or arg[:10] == "--backend=": miniargs = [arg] break else: # # Default to the HTML backend. # miniargs = [ "--backend=onehtml" ]; # # We should have a backend option now. Ask getopt to parse it. Once # we have it, ask the backend for extra options so we can get # down to business. # opt, junk = getopt.getopt(miniargs, 'b:', ['backend=']) # # if opt = 'txt', check for 'w3m' else fallback to 'lynx' # if opt[0][1] == "txt": if not self._autoconf['progs']['w3m'] == 'N/A': self._curbackend = "w3m" else: self._curbackend = "lynx" else: self._curbackend = opt[0][1] try: self._curglobal = self._globals[self._curbackend] except KeyError: utils.usage(None, "Unknown backend " + self._curbackend) if not self._globals.has_key(self._curbackend): utils.usage(None, "Unknown backend " + self._curbackend) # # Merge all the options and parse them. Return whatever is # left (the list of files we need to run). # shortopts, longopts = utils.makeOpts(self._curglobal) try: options, retval = getopt.getopt(args, shortopts, longopts) except getopt.error, e: utils.usage(self._curglobal, 'Error parsing arguments: ' + `e`)
#!/usr/bin/python import sys import numpy as np # mine import utils utils.usage(["<columns file>"], 1, 1) colf = sys.argv[1] if ".col" not in colf: print "run on a column file from outcar2columns.py" exit(0) def grabV(colf): d = open(colf) [d.readline() for i in range(3)] l = d.readline() V = float(l.split()[-1]) n = int(l.split()[0].split("=")[-1].strip(",")) return V * n def grabT(colf): d = open(colf) [d.readline() for i in range(8)] return float(d.readline().split()[1])
#!/usr/bin/python import sys #mine import utils import lammpsIO utils.usage(["<siesta MD_CAR>","<optional: dumpfile, default:prefix.dat>"],1,2) inputFile = sys.argv[1] if len(sys.argv)==3: outputFile = sys.argv[2] else: outputFile = ".".join(inputFile.split(".")[:-1]+["dat"]) mdcar = open(inputFile,"r") opdat = open(outputFile,"w") count = 0 three = range(3) while True: mdcar.readline() #header mdcar.readline() #volume ratio v1 = map(float,mdcar.readline().split()) v2 = map(float,mdcar.readline().split()) v3 = map(float,mdcar.readline().split()) nAtom = int(mdcar.readline()) mdcar.readline() #direct atoms = [map(float,mdcar.readline().split()) for i in range(nAtom)]
False, '' ], [ 'pid_file=', 'Save the pid of this job to the given file.', False, None ], ['log_filename=', 'The log file.', True, None], ] # Start main method here. command_line = '%s' options_hash, remainder = parseCommandLine(options, command_line=command_line) if (len(remainder) != 0): print usage(sys.argv, command_line, options) sys.exit() consumer_key = options_hash['consumer_key'] consumer_secret = options_hash['consumer_secret'] access_token = options_hash['access_token'] access_token_secret = options_hash['access_token_secret'] output_directory = options_hash['output_directory'] stream_type = options_hash['stream_type'] stream_filename = options_hash.setdefault('stream_filename', None) log_filename = options_hash['log_filename'] logger = logging.getLogger('tweepy_streaming') handler = logging.FileHandler(log_filename, mode='a')
if norm: Ndensity=nAtom/volume(basis) for i,r in enumerate(rbins): if i==0: vol=4.0*pi*dr*dr*dr/3.0 else: vol=4.0*pi*r*r*dr for k in range(nStep): bins[k][i] /= vol*Ndensity return rbins,bins if __name__ == "__main__": utils.usage(["<dump.dat or OUTCAR>","<comma seperated step sizes [e.g. 1,2,3]>","<\'s\'-self \'d\'-distinct or \'t\'-total'>, default total"],2,4) RMAX = 10.0 norm = False #an arbitrary normalization is applied if "-norm" in sys.argv: norm = True sys.argv.remove("-norm") inputFile = sys.argv[1] steps = map(int,sys.argv[2].split(",")) vhType = "total" if len(sys.argv)==4: if sys.argv[3][0] in ['s','S']: vhType = "self" if sys.argv[3][0] in ['d','D']:
#!/usr/bin/python #calculates the dynamic susceptibility of the structure factor import plotRemote as pr import sys #mine import utils from interpolate import interp1d from scipy import fftpack import numpy as np utils.usage(["<.isfS file>"],1,1) isfsFile = sys.argv[1] if "isfS" not in isfsFile: print "Expected .isfS file for input" logTime = list() isfs = list() for isfSline in open(isfsFile,"r").readlines(): try: a = map(float,isfSline.split()) t = a.pop(0) i = sum(a)/len(a) except ValueError: continue logTime.append(t) isfs.append(i)
isfsen = zeros([nStep,nAtom]).ravel() q=qmax weave.inline(ISFSelfEnsembleSphereRefCode,['nqVecs','steps','nStep','isfsen','atoms','nTime','nAtom','q']) isfsen.shape = [nStep,nAtom] return steps,isfsen if __name__ == "__main__": flags = "Possible Flags\n\ -logt : turns on log of the time steps\n\ -scale #: the time scale (how much time per step in seconds)\n\ -nStep #: number of steps on the time scale (100 small, 500 big)\n\ -nqVec #: the number of random qvectors to generate and average over\ " utils.usage(["<dump.dat or OUTCAR>"],1,6,flags) RMAX = 10.0 logtEnable = False linEnable = False scale = None #units in seconds nStep = 250 nqVecs = 5 if "-logt" in sys.argv: sys.argv.remove("-logt") logtEnable = True if "-scale" in sys.argv: i = sys.argv.index("-scale") scale = float(sys.argv[i+1])
#!/usr/bin/python import sys from math import * import subprocess import os from numpy import * #mine import outcarIO import lammpsIO import rootMeanSquareDist import utils utils.usage(["<Outcar/Lammpsdump>"],1,1) filename = sys.argv[1] header=["TimeStep AverageRMSD\n"] rmsddata = header outcarFlag=False lammpsFlag=False if "OUTCAR" in filename: outcarFlag=True lammpsFlag=False else: outcarFlag=False lammpsFlag=True atoms=list() if outcarFlag: nAtoms = outcarIO.nAtoms(filename)
nStep = steps.shape[0] isfs = zeros(nStep) k = kmax weave.inline(ISFSelfSphereRefCode, ["nqVecs", "steps", "nStep", "isfs", "atoms", "nTime", "nAtom", "k", "tetra"]) return steps, isfs def fourierDensity(atoms, basis): atoms = array(atoms) basis = array(basis) if __name__ == "__main__": utils.usage(["<dump.dat or OUTCAR>", "<'s'-self 'd'-distinct or 't'-total'>, default total"], 1, 3) RMAX = 10.0 plotEnable = True logtEnable = False if "-noPlot" in sys.argv: sys.argv.remove("-noPlot") plotEnable = False if "-logt" in sys.argv: sys.argv.remove("-logt") logtEnable = True inputFile = sys.argv[1] configIterator = parserGens.parseEnsemble(inputFile + ".tetra") tetraTime = array([tetra for tetra in configIterator])