def WriteMemory(baseadd, dat): clear_rxq() length = len(dat) add1 = 0 num1 = length ans = bytearray(b'') bk = bytearray(256) while 256 < num1: sz = 256 for i in range(sz): bk[i] = dat[i+add1] # print('*', end='') print('--------------------------- {:08X}'.format(baseadd+add1)) dump(baseadd+add1, bk ) cmdWriteMemory(baseadd+add1, bk ) add1 += sz num1 -= sz if 0 < num1: bk1 = bytearray(num1) for i in range(num1): bk1[i] = dat[add1+i] print('\r--------------------------- {:08X}'.format(baseadd+add1)) dump( baseadd+add1, bk1 ) cmdWriteMemory(baseadd+add1, bk1 ) print('') return
def adv_delay(self, T, pid): if self.roundD[pid] + T <= self.delta: self.D[pid] = self.D[pid] + T self.roundD[pid] = self.roundD[pid] + T self.f2a.write(('delay-set', pid)) else: # ignore the message dump.dump()
def WriteMemory(self, baseadd, dat): self.set_loadermode() self.clear_rxq() self.timeout = 0.1 length = len(dat) add1 = 0 num1 = length bk = bytearray(256) while 256 < num1: sz = 256 for i in range(sz): bk[i] = dat[i + add1] # print('*', end='') print('--------------------------- {:08X}'.format(baseadd + add1)) dump(baseadd + add1, bk) self.cmdWriteMemory(baseadd + add1, bk) add1 += sz num1 -= sz if 0 < num1: bk1 = bytearray(num1) for i in range(num1): bk1[i] = dat[add1 + i] print('\r--------------------------- {:08X}'.format(baseadd + add1)) dump(baseadd + add1, bk1) self.cmdWriteMemory(baseadd + add1, bk1) print('') return
def adversary_msg(self, msg): if msg[0] == 'corrupt': self.adv_corrupt(msg[1]) elif msg[0] == 'get-leaks': self.adv_get_leaks() else: dump.dump()
def __init__(self, sid, pid, _p2f, _f2p, _p2a, _a2p, _p2z, _z2p): # All protocols do this (below) self.channels = { 'a2p': _a2p, 'p2a': _p2a, 'f2p': _f2p, 'p2f': _p2f, 'z2p': _z2p, 'p2z': _p2z } #self.p2f = _p2f; self.f2p = _f2p #self.p2a = _p2a; self.a2p = _a2p #self.p2z = _p2z; self.z2p = _z2p #self.channels_to_read = [self.a2p, self.z2p, self.f2p] self.handlers = { _a2p: lambda x: dump.dump(), _f2p: lambda x: dump.dump(), _z2p: self.input_msg } ITMSyncProtocol.__init__(self, sid, pid, self.channels, self.handlers) # specific to the broadcast if self.pid == 1: self.leader = True else: self.leader = False self.leaderpid = 11 self.valaccepted = False self.val = None self.echoreceived = 0 self.readyreceived = 0
def spell_correction(s): dump.dump() s1=s.split(' ') x=list() for i in s1: x.append(correction(i)) fsearch=' '.join(x) return fsearch
def input_msg(self, sender, msg): sid, pid = sender if msg[0] == 'RoundOK' and pid in self.parties: self.input_roundok(pid) elif msg[0] == 'RequestRound' and pid in self.parties: self.input_requestround(pid) else: dump.dump()
def func_msg(self, msg): # Assumed to be F_sfe fro, msg = msg if msg[0] == 'input': self.leak_input(*msg) elif msg[0] == 'activated': self.leak_activation(*msg) else: dump.dump()
def input_msg(self, msg): sender, msg = msg sid, pid = sender if msg[0] == 'input' and pid in self.parties: self.input_input(pid, msg[1]) elif msg[0] == 'output' and pid in self.parties: self.input_output(pid) else: dump.dump()
def adv_msg(self, msg): if msg[0] == 'corrupt': self.adv_corrupt(msg[1]) elif msg[0] == 'get-leaks': self.adv_get_leaks() elif msg[0] == 'delay': self.adv_delay(msg[1], msg[2]) else: dump.dump()
def handlePacket(self,data,src_ip,src_port=-1): if len(data) < 5: return [] if data[4] == 'i': # A2A_PING #print 'Got A2A_PING from %s:%i' % (src_ip,src_port) self.ping += 1 return self._createResponse(self.A2A_PING_RESPONSE,"") elif data[4] == '\x57': # A2S_SERVERQUERY_GETCHALLENGE #print 'Got A2S_SERVERQUERY_GETCHALLENGE from %s:%i' % (src_ip,src_port) self.challenge += 1 return self._createResponse(self.A2S_CHALLENGE_RESPONSE,struct.pack("i",random.randint(2,999999))) elif data[4] == 'T': # A2S_INFO #print 'Got A2S_INFO from %s:%i' % (src_ip,src_port) self.info += 1 return self._createResponse(self.A2S_INFO_RESPONSE,self._getInfoData()) elif data[4] == '\x55': # A2S_PLAYER #print 'Got A2S_PLAYER from %s:%i' % (src_ip,src_port) if len(data) < 9: print 'not enough data' return [] if (struct.unpack("i",data[5:9]) == -1): self.challenge += 1 return self._createResponse(self.A2S_CHALLENGE_RESPONSE,struct.pack("i",random.randint(2,999999))) self.player += 1 return self._createResponse(self.A2S_PLAYER_RESPONSE,self._getPlayerData()) elif data[4] == '\x56': # A2S_RULES #print 'Got A2S_RULES from %s:%i' % (src_ip,src_port) if len(data) < 9: print 'not enough data' return [] if (struct.unpack("i",data[5:9]) == -1): self.challenge += 1 return self._createResponse(self.A2S_CHALLENGE_RESPONSE,struct.pack("i",random.randint(2,999999))) self.rules += 1 return self._createResponse(self.A2S_RULES_RESPONSE,self._getRulesData()) elif data[4] == HL2QueryProtocol.A2S_INFO_RESPONSE: #print "got info packet" #print dump(data) serverinfo = self.parseInfoPacket(src_ip,src_port,data) self.callback(serverinfo) elif data[4] == HL2QueryProtocol.A2S_CHALLENGE_RESPONSE: #print "got challenge response" return self.getRulesPacket(data[5:]) elif data[4] == HL2QueryProtocol.A2S_RULES_RESPONSE: #print "got rules response" rules = self.parseRulesPacket(data) self.rulesCallback(src_ip,src_port,rules) else: print "got invalid packet" print dump(data) return []
def input_msg(self, sender, msg): sid, pid = sender if msg[0] == 'send' and sender == self.sender and ishonest( self.sid, self.sender): self.input_send(msg[1]) elif msg[0] == 'fetch' and sender == self.receiver and ishonest( self.sid, self.receiver): self.input_fetch() else: dump.dump()
def adv_delay(self, T): if self.Dhat + T <= self.delta: self.D += T self.Dhat += T self.f2a.write(('delay-set', )) dump.dump() else: print('Delay failed with T=', T, 'Dhat=', self.Dhat, 'delta=', self.delta) dump.dump()
def every_page(page): companies = data_getter.companies_list(page) pool = ThreadPoolExecutor(max_workers=5) all_task = [] for company in companies: all_task.append(pool.submit(company_detail, company)) #all_task.append(pool.submit(company_jobs, company)) for task in as_completed(all_task): dump.dump(task.result()) return "page %d is finished"%page
def adversary_msg(self, msg): if msg[0] == 'delay': self.adv_delay(msg[1]) elif msg[0] == 'get-leaks': self.adv_get_leaks() elif msg[0] == 'send' and isdishonest(self.sid, self.sender): self.input_send(msg[1]) elif msg[0] == 'fetch' and isdishonest(self.sid, self.receiver): self.input_fetch() else: dump.dump()
def input_msg(self, msg): self.start_sync() if msg[0] == 'input' and self.leader: self.input_input(msg[1]) # TODO change this to be automatically handled in base class elif msg[0] == 'output': self.check_round_ok() elif msg[0] == 'sync': self.sync() else: dump.dump()
def load(): legs= leg.load() for x in sorted(legs['wp'].keys()): idsobj= legs['wp'][x]['id'] if 'govtrack' in idsobj: congid = idsobj['govtrack'] cache.cacheweb('http://api.opencongress.org/people?person_id=%d' % congid) htmlstr = cache.cacheweb('http://www.opencongress.org/people/show/%d' % congid) parse(htmlstr,idsobj) dump.dump(legs)
def load (filename): #filename= "maplight-convert/FEC2012c2.csv" fecs = {} legs= leg.load() for x in sorted(legs['wp'].keys()): obj= legs['wp'][x] idsobj= obj['id'] name = obj['name']['official_full'] if 'fec' in idsobj: for fec in idsobj['fec']: #print "fec",fec fecs[fec]=obj fieldnames=[ "TransactionTypeCode","TransactionType","ElectionCycle","ReportingCommitteeMLID","ReportingCommitteeFECID","ReportingCommitteeName","ReportingCommitteeNameNormalized","PrimaryGeneralIndicator","TransactionID","FileNumber","RecordNumberML","RecordNumberFEC","TransactionDate","TransactionAmount", "RecipientName","RecipientNameNormalized","RecipientCity","RecipientState","RecipientZipCode","RecipientCommitteeMLID","RecipientCommitteeFECID","RecipientCommitteeName","RecipientCommitteeNameNormalized","RecipientCommitteeTreasurer","RecipientCommitteeDesignationCode","RecipientCommitteeDesignation","RecipientCommitteeTypeCode","RecipientCommitteeType","RecipientCommitteeParty","RecipientCandidateMLID","RecipientCandidateFECID","RecipientCandidateName","RecipientCandidateNameNormalized","RecipientCandidateParty","RecipientCandidateICO","RecipientCandidateStatus","RecipientCandidateOfficeState","RecipientCandidateOffice","RecipientCandidateDistrict","RecipientCandidateGender", "DonorName","DonorNameNormalized","DonorCity","DonorState","DonorZipCode","DonorEmployer","DonorEmployerNormalized","DonorOccupation","DonorOccupationNormalized","DonorOrganization","DonorEntityTypeCode","DonorEntityType","DonorCommitteeMLID","DonorCommitteeFECID","DonorCommitteeName","DonorCommitteeNameNormalized","DonorCommitteeTreasurer","DonorCommitteeDesignationCode","DonorCommitteeDesignation","DonorCommitteeTypeCode","DonorCommitteeType","DonorCommitteeParty","DonorCandidateMLID","DonorCandidateFECID","DonorCandidateName","DonorCandidateNameNormalized","DonorCandidateParty","DonorCandidateICO","DonorCandidateStatus","DonorCandidateOfficeState","DonorCandidateOffice","DonorCandidateDistrict","DonorCandidateGender","UpdateTimestamp" ] fec_dict_reader = csv.DictReader(open(filename), delimiter=',', quotechar='"', restkey=100, fieldnames=fieldnames) from collections import defaultdict matrix = defaultdict(dict) #print fec_dict_reader.fieldnames f = open(filename + ".xml", 'w') for line in fec_dict_reader: for k in fieldnames : d = line[k] if (isinstance( d, int )): d= str(d) if d is not None and d != '': val = line['TransactionAmount'] if (val == 'TransactionAmount'): continue if (len(val)> 0): try: val = int(val) except: print "'%s'" % val, "failed" else: val = 0 if string.find(k,"CandidateFECID") > 0: if d not in fecs: fecs[d]= {} fecs[d]["fec_2012_2total"] =val else: if "fec_2012_2total" in fecs[d]: fecs[d]["fec_2012_2total"] = fecs[d]["fec_2012_2total"] + val else: fecs[d]["fec_2012_2total"] = val dump.dump(fecs)
def party_msg(self, msg): sender, msg = msg sid, pid = sender if sid != self.sid: dump.dump() return #print('Sender={}, msg={}, self.sid={}, parties={}'.format(sender, msg, self.sid, self.parties)) if msg[0] == 'RoundOK' and pid in self.parties: self.input_roundok(pid) elif msg[0] == 'RequestRound' and pid in self.parties: self.input_requestround(pid) else: dump.dump()
def run(self): while True: ready = gevent.wait( objects=[self.a2f, self.z2f, self.p2f], count=1 ) r = ready[0] if r == self.z2f: self.z2f.reset() dump.dump() elif r == self.a2f: self.
def train(self): self.product_freq = self.database.productFreq() #产品及其被购买次数的映射,{product : freq} if os.path.exists(dump_file): print 'loading matrix file : {0}'.format(dump_file) self.matrix = load() return self.matrix = dok_matrix((len(self.products), len(self.products)),dtype=float) #产品相关性矩阵,使用稀疏矩阵,节约内存 users = self.database.usersMoreThanOnce() for i in range(len(users)): userid = users[i] print 'taking step : {0} , {1} / {2}'.format(userid, i , len(users)) self._takestep(userid) dump(self.matrix)
def load(): legs = leg.load() for x in sorted(legs['wp'].keys()): idsobj = legs['wp'][x]['id'] if 'govtrack' in idsobj: congid = idsobj['govtrack'] cache.cacheweb('http://api.opencongress.org/people?person_id=%d' % congid) htmlstr = cache.cacheweb( 'http://www.opencongress.org/people/show/%d' % congid) parse(htmlstr, idsobj) dump.dump(legs)
def input_input(self, v): if self.clock_round != 1: dump.dump() return self.newtodo = [] for p in self.except_me(): fbdsid = (self.ssid, (self.sid, self.pid), (self.sid, p), self.clock_round) self.newtodo.append( (self.send_message, (fbdsid, ('send', ('VAL', v))))) print('Sending VAL for', self.clock_round) self.val = v # dealer won't deliver to himself, so just set it now self.todo = self.newtodo dump.dump()
def get_prediction_content(): dump.dump() l = list() wb = openpyxl.load_workbook("c:\\Python27\\myproject\\myapp\\MASTER.xlsx") first_sheet = wb.get_sheet_names()[0] worksheet = wb.get_sheet_by_name(first_sheet) for row in range(1, worksheet.max_row + 1): for column in "A": cell_name = "{}{}".format(column, row) text2 = worksheet[cell_name].value if text2 is None: break else: l.append(text2) return l
def main(): inifile = configparser.ConfigParser() inifile.read('bootloader.ini') comport = inifile.get('settings', 'comport') # baudrate = inifile.get('settings', 'baudrate') print('--------------------------------------') bl = stm32bootloader(comport) if bl.init() < 0: return 1 # error if 0 != bl.set_loadermode(): print('boot loaderに同期できません。') return 1 # error add = 0x08000000 buf = bl.ReadMemoryQuiet(add, 0x100) dump(add, buf) print('Program start') bl.ProgramStart() return 0 # no error
def ProgramStart(self): self.set_loadermode() jmp = self.ReadMemoryQuiet(0x08000004, 4) dump(0x08000004, jmp) add = 0 add <<= 8 add |= jmp[3] add <<= 8 add |= jmp[2] add <<= 8 add |= jmp[1] add <<= 8 add |= jmp[0] if 0xffffffff == add: print('ProgramStart():ERROR add: {:08X}'.format(add)) return # print('add: {:08X}'.format(add)) self.cmdGo(add)
def ProgramStart(): clear_rxq() jmp = ReadMemoryQuiet( 0x08000004, 4 ) dump( 0x08000004, jmp ) add = 0 add <<= 8 add |= jmp[3] add <<= 8 add |= jmp[2] add <<= 8 add |= jmp[1] add <<= 8 add |= jmp[0] if 0xffffffff == add: print('ProgramStart():ERROR add: {:08X}'.format(add)) return print('ProgramStart(0x{:08X})'.format(add)) cmdGo(add)
def main(argv): mydump = '' myparaview = '' mysingle = '' try: opts, args = getopt.getopt(argv, "hi:o:s", ["input=", "output=", "single="]) except getopt.GetoptError: print 'convert.py -i <dump filename> -o <paraview output name> -s <snapshot number>' sys.exit(2) for opt, arg in opts: if opt == '-h': print 'convert.py -i <dump filename> -o <paraview output name> -s <snapshot number>' sys.exit() elif opt in ("-i", "--input"): mydump = arg elif opt in ("-o", "--output"): myparaview = arg elif opt in ("-s", "--single"): mysingle = arg patt = 'ITEM: ATOMS(.*)' fd = open(mydump, 'r') for line in fd: m = re.search(patt, line) if m: after = m.group(1) after = after.strip() dumplist = after.split(' ') break fd.close() maplist = list(dumplist) for i in range(len(dumplist), 0, -1): maplist.insert(i - 1, i) tmplist = list(dumplist[5:]) print tmplist paralist = list() for i in range(0, len(tmplist), 1): paralist.append(tmplist[i]) paralist.append(tmplist[i]) print "maplist: ", maplist d = dump(int(mysingle), mydump) print "paraview: ", paralist d.map(*maplist) e = ensight(d) print "single snapshot: ", str(mysingle).zfill(4) e.single(int(mysingle), myparaview, *paralist)
def main(): inifile = configparser.ConfigParser() inifile.read('bootloader.ini') comport = inifile.get('settings', 'comport') # baudrate = inifile.get('settings', 'baudrate') fn = inifile.get('settings','hex_filename') print('filename is[{}]'.format(fn)) return bl = stm32bootloader(comport) print('-------------------------------------------------------- START {}'.format(sys.argv[0])) # bl.cmdGet() mem = maketest() dump(0x20004000, mem) add = 0x20004000 bl.WriteMemory(add, mem) mem = bl.ReadMemory(0x20004000, 0x100) dump(0x0000, mem) return
def run(self): while True: ready = gevent.wait(objects=[self.p2f, self.a2f, self.z2f], count=1) assert len(ready) == 1 r = ready[0] if r == self.a2f: msg = r.read() self.a2f.reset() self.adversary_msg(msg) elif r == self.p2f: msg = r.read() sender, msg = msg self.p2f.reset() self.input_msg(sender, msg) elif r == self.z2f: self.z2f.reset() dump.dump() else: dump.dump()
def lppWorker(input): flist = input["filelist"] debugMode = input["debugMode"] outfileName = input["output"] overwrite = input["overwrite"] flistlen = len(flist) # generate name of manyGran splitfname = flist[0].rsplit(".") if outfileName == "": granName = splitfname[len(splitfname) - 1] elif outfileName.endswith("/"): granName = outfileName + splitfname[len(splitfname) - 1] else: granName = outfileName # if no-overwrite: read timestamp in first line of file # if corresponding dump-file does not already exists: add it to 'shortFlist' # shortFlist ... list of files to finally be processed by dump, and vtk. # elements of flist that are not in shortFlist already exist and will not be # converted anew and replaced shortFlist = [] if overwrite == True: shortFlist = flist else: for f in flist: try: # read time ff = open(f) ff.readline() time = int(ff.readline()) sys.stdout.flush() ff.close() except: continue # generate filename from time like in vtk, # check if file exists; if yes: do not add to list filename, file_bb, file_walls = vtk.generateFilename( granName, [time], 0) if not os.path.isfile(filename): shortFlist.append(f) # call dump, vtk, manyGran on shortFlist try: d = dump({"filelist": shortFlist, "debugMode": debugMode}) v = vtk.vtk(d) if debugMode: print "\nfileNums: ", d.fileNums, "\n" v.manyGran(granName, fileNos=d.fileNums, output=debugMode) except KeyboardInterrupt: raise return 0
def check_round_ok(self): if self.outputset: if len(self.todo) > 0: self.todo.pop(0) dump.dump() else: self.p2z.write(self.val) return # If RoundOK has been sent, then wait until we have a new round if self.roundok: self.p2f.write(((self.sid, 'F_clock'), ('RequestRound', ))) fro, di = self.wait_for(self.f2p) if di == 0: # this means the round has ended self.clock_round += 1 self.read_messages( ) # reads messagesna dn queues the messages to be sent self.roundok = False else: self.p2z.write(('early', )) return #TODO change to check if len(self.todo) > 0: # pop off todo and do it f, args = self.todo.pop(0) if f: f(*args) else: dump.dump() elif len(self.todo) == 0 and not self.outputset: self.p2f.write(((self.sid, 'F_clock'), ('RoundOK', ))) self.roundok = True else: dump.dump()
def run(self): while True: ready = gevent.wait( objects=[self.z2a, self.f2a, self.p2a, self._a2z, self._p2z], count=1) r = ready[0] if r == self.z2a: m = self.z2a.read() self.z2a.reset() self.env_msg(m) #self._z2a.write( m ) elif r == self.f2a: m = self.f2a.read() self.f2a.reset() self.func_msg(m) elif r == self.p2a: self.p2a.reset() dump.dump() elif r == self._a2z: m = self._a2z.read() self._a2z.reset() self.a2z.write(m) elif r == self._p2z: m = self._p2z.read() self._p2z.reset() dump.dump() else: dump.dump()
def lppWorker(input): flist = input["filelist"] debugMode = input["debugMode"] outfileName = input["output"] overwrite = input["overwrite"] flistlen = len(flist) # generate name of manyGran splitfname = flist[0].rsplit(".") if outfileName == "": granName = splitfname[len(splitfname)-1] elif outfileName.endswith("/"): granName = outfileName + splitfname[len(splitfname)-1] else: granName = outfileName # if no-overwrite: read timestamp in first line of file # if corresponding dump-file does not already exists: add it to 'shortFlist' # shortFlist ... list of files to finally be processed by dump, and vtk. # elements of flist that are not in shortFlist already exist and will not be # converted anew and replaced shortFlist = [] if overwrite == True: shortFlist = flist else: for f in flist: try: # read time ff = open(f) ff.readline() time = int(ff.readline()) sys.stdout.flush() ff.close() except: continue # generate filename from time like in vtk, # check if file exists; if yes: do not add to list filename,file_bb,file_walls = vtk.generateFilename(granName,[time],0) if not os.path.isfile(filename): shortFlist.append(f) # call dump, vtk, manyGran on shortFlist try: d = dump({"filelist":shortFlist, "debugMode":debugMode}) v = vtkp.vtkp(d) if debugMode: print "\nfileNums: ",d.fileNums,"\n" v.manyGran(granName,fileNos=d.fileNums,output=debugMode) except KeyboardInterrupt: raise return 0
def main(): try: opts, args = getopt.getopt(sys.argv[1:], "whifg:v", ["help", "field=","field2=", "int", "wiki", "verbose"]) except getopt.GetoptError as err: # print help information and exit: print str(err) # will print something like "option -a not recognized" usage() sys.exit(2) field = None field2 = None verbose = False wiki=False convertInt=False for o, a in opts: if o in ("-v" , "--verbose"): verbose = True elif o in ("-h", "--help"): usage() sys.exit() elif o in ("-f", "--field"): field = a elif o in ("-w", "--wiki"): wiki = True elif o in ("-g", "--field2"): field2 = a elif o in ("-i", "--int"): convertInt=True else: assert False, "unhandled option" compare(legs,legs2,field,field2,convertInt,verbose,wiki) # leg.apply(legs) dump.dump(legs2)
#print 'Number of arguments:', len(sys.argv), 'arguments.' #print 'Argument List:', str(sys.argv) from dump import dump if len(sys.argv) != 3: print 'exacute: python -f PostSubChainAnl.py d R'; sys.exit() d=float(sys.argv[1]); print 'd=',d R=float(sys.argv[2]); print 'R=',R ascale=2*R+d; InterCri=(R+3)*(R+3); #Load in the Post Position dP=dump("../Run/step2.dump.PA.lammpstrj"); Px,Py,Pz=dP.vecs(0,"x","y","z") for Nindex in range(1,5,1): ##Load in Chain dC=dump("P"+str(Nindex)+"step3.postforvtk"); dC.map(1,"id",2,"type",3,"x",4,"y",5,"z",6,"xu",7,"yu",8,"zu",9,"vx",10,"vy",11,"vz"); time,box,atoms,bonds,tris,lines = dC.viz(0); dC.sort("id") t = dC.time(); t0=t[0]; tdiff=t[2]-t[1]; #Ilist= open('P'+str(Nindex)+'Interactionlist.dat', 'w') Iliststat= open('NewP'+str(Nindex)+'InteractionStat.dat', 'w') #t, number of contact bead for each post Iliststatplus= open('NewP'+str(Nindex)+'InteractionStat.dat', 'w') #t, numbe of contacting post,
#!/usr/bin/env python # Script: dumpsort.py # Purpose: sort the snapshots in a LAMMPS dump file by atom ID # Syntax: dumpsort.py oldfile N newfile # oldfile = old LAMMPS dump file in native LAMMPS format # N = column # for atom ID (usually 1) # newfile = new sorted LAMMPS dump file # Author: Steve Plimpton (Sandia), sjplimp at sandia.gov import sys,os path = os.environ["LAMMPS_PYTHON_TOOLS"] sys.path.append(path) from dump import dump if len(sys.argv) != 4: raise StandardError, "Syntax: dumpsort.py oldfile N newfile" oldfile = sys.argv[1] ncolumn = int(sys.argv[2]) newfile = sys.argv[3] d = dump(oldfile) d.map(ncolumn,"id") d.sort() d.write(newfile)
# wrapper on GL window via Pizza.py gl tool # just proc 0 handles reading of dump file and viz if me == 0: tkroot = None try: import Tkinter except: import tkinter as Tkinter tkroot = Tkinter.Tk() tkroot.withdraw() from dump import dump from gl import gl d = dump("tmp.dump",0) g = gl(d) d.next() d.unscale() g.zoom(1) g.shift(0,0) g.rotate(0,270) g.q(10) g.box(1) g.show(ntimestep) # run nfreq steps at a time w/out pre/post, read dump snapshot, display it while ntimestep < nsteps: lmp.command("run %d pre no post no" % nfreq) ntimestep += nfreq
def convert_lammpsdump2ttree(dump_file, root_file): """ Convert a LAMMPS dump output file to a ROOT TTree structure. def convert_lammpsdump2ttree(dump_file, root_file) Note: needs to be run through the pizza.py package. Input: dump_file: string of the name of the lammps output dump file (dump information) root_file: string of the name of the .root file to generate (overwrites) Output: Writes information to file. Structure: Tree with one branch for main data content. Branch contains TH2D. x is atom number. y is property. Bin content is value. Leaves are snaphots. Derek Fujimoto April 2016 Last Updated: November 2016 """ # Check that file exists if not os.path.isfile(dump_file): print "convert_lammpsdump2ttree:: %s not found" % dump_file return # count timesteps in dumpfile n_steps = count_timesteps(dump_file) # Make a new root file if not root_file.endswith(".root", -5): root_file = root_file + ".root" fid = TFile(root_file, "UPDATE") # Get dump information d = dump.dump(dump_file, 0) t = d.next() # get timestep at the same time ncol = len(d.names) # number of columns (y) nameinv = d.names2str().split() # list of names in order of appearance # get first step info natom = d.snaps[0].natoms # number of atoms (x) boxsize = [ d.snaps[0].xlo, d.snaps[0].xhi, # box size d.snaps[0].ylo, d.snaps[0].yhi, d.snaps[0].zlo, d.snaps[0].zhi, ] boxsizelabels = ["xlo", "xhi", "ylo", "yhi", "zlo", "zhi"] # make arrays for tree linking tlink = array("i", [t]) # t pointer natomslink = array("i", [natom]) # natoms pointer bounds = TString(d.snaps[0].boxstr) # box boundaries type boxlink = list() # box size for i in range(6): boxlink.append(array("d", [boxsize[i]])) # prevent printing from pizza.py sys.stdout = open(os.devnull, "w") # make TH2D hist = make_th2d(natom, ncol, d.names2str()) # Make ttree and link ttree tree = TTree("dump_dat", "Dump data from LAMMPS run") tree.Branch("snapshot", "TH2D", hist) tree.Branch("natoms", natomslink, "natom/I") tree.Branch("timestep", tlink, "t/I") tree.Branch("boxbounds", "TString", bounds) for i in range(6): tree.Branch(boxsizelabels[i], boxlink[i], boxsizelabels[i] + "/D") # iterate over snapshots and fill hist and fill tree istep = 0 while t >= 0: # save memory d.tselect.one(t) d.delete() d.sort() # check natoms if natom != d.snaps[0].natoms: natom = d.snaps[0].natoms natomslink[0] = natom hist.Delete() hist = make_th2d(natom, ncol, d.names2str()) # check bounds type if d.snaps[0].boxstr != bounds: bounds.Delete() bounds = TString(d.snaps[0].boxstr) tree.SetBranchAddress("boxbounds", bounds) # update box size boxlink[0][0] = d.snaps[0].xlo boxlink[1][0] = d.snaps[0].xhi boxlink[2][0] = d.snaps[0].ylo boxlink[3][0] = d.snaps[0].yhi boxlink[4][0] = d.snaps[0].zlo boxlink[5][0] = d.snaps[0].zhi # fill hist for i in range(natom): for j in range(ncol): hist.SetBinContent(i, j, d.snaps[0].atoms[i][j]) # fill tree and get next tlink[0] = t tree.Fill() t = d.next() istep += 1 if i % 100: sys.__stdout__.write("converting dump file %4.2f %s \r" % (istep * 100.0 / n_steps, "%")) sys.__stdout__.flush() sys.__stdout__.write("converting dump file ... done\n") tree.Write("", TObject.kOverwrite) fid.Close()
#P"+str(Nindex)+"step3.postforvtk #Output #f:Distance from center post (Bead level) #f: P1OrientationCorr.dat #Active: TAP1OrientationCorr.dat #Efficiency: 2m48.106s 1 Sample 1000 timsteps from numpy import * from math import * from dump import dump Nlist=[160,80,40,20] for Nindex in range(1,5,1): N=Nlist[Nindex-1]; #print N #Load in Chain dC=dump("P"+str(Nindex)+"step3.postforvtk"); dC.map(1,"id",2,"type",3,"x",4,"y",5,"z",6,"xu",7,"yu",8,"zu",9,"vx",10,"vy",11,"vz") t = dC.time(); #f= open('P'+str(Nindex)+'OrientationCorr.dat', 'w') f2= open('TAP'+str(Nindex)+'OrientationCorr.dat', 'w') TC3D=[0]*(int(N/2)); TC2D=[0]*(int(N/2)); #print f for j in range(len(t)): xt,yt,zt=dC.vecs(j*(t[1]-t[0]),"xu","yu","zu") ##Absolute Unwrapped Value C3D=[0]*int(len(xt)/2); C2D=[0]*int(len(xt)/2); for k in range(0,int(len(xt)/2),1): #k=i-j v1=[0]*3; v2=[0]*3; l=0;
def dump(args): import dump dump.dump(args)
runflag = 0 temptarget = 1.0 # wrapper on VMD window via Pizza.py vmd tool # just proc 0 handles reading of dump file and viz if me == 0: from vmd import vmd v = vmd() v('menu main off') v.rep('VDW') from dump import dump from pdbfile import pdbfile d = dump('tmp.dump',0) p = pdbfile(d) d.next() d.unscale() p.single(ntimestep) v.new('tmp.pdb','pdb') # display GUI with run/stop buttons and slider for temperature if me == 0: try: from Tkinter import * except: from tkinter import * tkroot = Tk() tkroot.withdraw()
def load(self): self.__p = [] dirList = os.listdir(self.__configpath) for i in dirList: self.__p.append(dump.dump(self.__backuppath,os.path.join(self.__configpath, i), self.__mailer))
for i in xrange(nbonds): btype[i] = int(bond[i][1] - 1) iatom[i] = int(bond[i][2] - 1) jatom[i] = int(bond[i][3] - 1) ntypes = 0 for i in xrange(nbonds): ntypes = max(bond[i][1],ntypes) ntypes = int(ntypes) ncount = ntypes * [0] bin = nbins * [0] for i in xrange(nbins): bin[i] = ntypes * [0] # read snapshots one-at-a-time d = dump(files,0) d.map(1,"id",2,"type",3,"x",4,"y",5,"z") while 1: time = d.next() if time == -1: break box = (d.snaps[-1].xlo,d.snaps[-1].ylo,d.snaps[-1].zlo, d.snaps[-1].xhi,d.snaps[-1].yhi,d.snaps[-1].zhi) xprd = box[3] - box[0] yprd = box[4] - box[1] zprd = box[5] - box[2] d.unscale() d.sort()
ilast = iarg + 1 while ilast < narg and argv[ilast][0] != '-': ilast += 1 rfiles = argv[iarg+1:ilast] iarg = ilast else: break if iarg < narg or not outfile or not rfiles: print "Syntax: neb_final.py -o outfile -b backfile -r dump1 dump2 ..." sys.exit() if os.path.exists(outfile): os.remove(outfile) # nback = additional atoms in each snapshot if backfile: back = dump(backfile) t = back.time() back.tselect.one(t[-1]) nback = back.snaps[-1].nselect else: nback = 0 # write out each snapshot # time = replica # # natoms = ntotal, by overwriting nselect # add background atoms if requested n = 1 for file in rfiles: neb = dump(file) t = neb.time() neb.tselect.one(t[-1])
def test(self, req): req.content_type = "text/plain" apache.log_error("request!", apache.APLOG_NOTICE) req.write("\nParsed URI:\n-------------\n") req.write(dump.dump(req.parsed_uri)) req.write("\nModPython Options:\n-------------\n") req.write(dump.dump(req.get_options())) req.write("\nModPython Config:\n-------------\n") req.write(dump.dump(req.get_config())) req.write("\nOS Env:\n-------------\n") req.write(dump.dump(os.environ)) req.write("\nProcess Env:\n-------------\n") req.add_common_vars() req.write(dump.dump(req.subprocess_env)) req.write("\n") req.write("server_root=" + apache.server_root() + "\n") req.write("document_root=" + req.document_root() + "\n") req.write("loglevel=" + dump.dump(req.server.loglevel)) req.write("is_virtual=" + dump.dump(req.server.is_virtual)) req.write("phase=" + dump.dump(req.phase)) req.write("handler=" + dump.dump(req.handler)) req.write("uri=" + dump.dump(req.uri)) req.write("filename=" + dump.dump(req.filename)) req.write("py interpreter=" + dump.dump(req.interpreter)) req.write("\n") req.write("__file__=" + __file__ + "\n") req.write("dir=" + os.path.dirname(__file__) + "\n") req.write("\n") if apache.mpm_query(apache.AP_MPMQ_IS_THREADED): req.write("mpm is threaded\n") else: req.write("mpm is NOT threaded\n") if apache.mpm_query(apache.AP_MPMQ_IS_FORKED): req.write("mpm is forked\n") else: req.write("mpm is NOT forked\n") req.write("\n") req.write("sys.path: %s\n" % sys.path) req.write("\n") req.write("POST form data:\n") req.write("content length: " + dump.dump(req.clength)) req.write(dump.dump(req.read())) #req.write(dump.dump(apache.config_tree())) return apache.OK
# Purpose: launch vcr tool on LAMMPS dump files # Syntax: dview.py dump.1 dump.2 ... # files = one or more dump files # Example: dview.py dump.* # Author: Steve Plimpton (Sandia) # enable script to run from Python directly w/out Pizza.py import sys from dump import dump # w/out Pizza.py these lines need to come before import of gl tool import Tkinter tkroot = Tkinter.Tk() tkroot.withdraw() from gl import gl from vcr import vcr if not globals().has_key("argv"): argv = sys.argv # main script if len(argv) < 2: raise StandardError, "Syntax: dview.py dump.1 ..." files = ' '.join(argv[1:]) d = dump(files) g = gl(d) v = vcr(g)
def parse(): index= { }; with open("maplight-convert/active_incumbents.json") as infile: for line in infile: if 'person_id' in line: data = json.loads(line[:-2]) person_id =str(data['person_id'] ) # print person_id , data index[person_id]= data['display_name']#data else: print line return index data =parse() #print data for x in sorted(legs['wp'].keys()): idsobj= legs['wp'][x]['id'] if 'maplight' in idsobj: maplight_id = str(idsobj['maplight']) if maplight_id in data : # print "foun",maplight_id mp=data[maplight_id] idsobj['maplight_name']=mp else: # pass print "missing",maplight_id leg.apply(legs) dump.dump(legs)
# (usually 1,2,3,4,5) # pdbfile = new PDB file # template = PDB file to use as template for creating new PDB file # this arg is optional, if not used a generic PDB file is created # Author: Steve Plimpton (Sandia), sjplimp at sandia.gov import sys,os path = os.environ["LAMMPS_PYTHON_TOOLS"] sys.path.append(path) from dump import dump from pdbfile import pdbfile if len(sys.argv) != 8 and len(sys.argv) != 9: raise StandardError, "Syntax: dump2pdb.py dumpfile Nid Ntype Nx Ny Nz pdbfile template" dumpfile = sys.argv[1] nid = int(sys.argv[2]) ntype = int(sys.argv[3]) nx = int(sys.argv[4]) ny = int(sys.argv[5]) nz = int(sys.argv[6]) pfile = sys.argv[7] if len(sys.argv) == 9: template = sys.argv[8] else: template = "" d = dump(dumpfile) d.map(nid,"id",ntype,"type",nx,"x",ny,"y",nz,"z") if template: p = pdbfile(template,d) else: p = pdbfile(d) p.one(pfile)
def save(): dump.dump(_legs)
nprads = len(nave) prad = np.zeros(nprads) prad2=np.zeros(nprads) for i in range(nprads): prad[i]=(nave[i]+8.61906)/7.56055 prad2[i]=prad[i]*prad[i] tol = 0 #exactly equal number of particles #Input file name trajectory="{:}.lammpstrj".format(filename) print "Input file:{0:s}".format(trajectory) #Commands from the pizza.py librbary. VEry usefull to read lammps trajectories. Will be usefull for the future. d=dump(trajectory); d.sort() time=d.time() stats=[] snap=[] loc=[] frac=[] for i in range(nprads): stats.append([]) snap.append([]) loc.append([]) frac.append([])
#!/usr/bin/env python import sys,os,dump if len(sys.argv) < 4: print "Specify (1) a fieldname, (2) a number of bins, (3) a run-dir with rootfiles with phasespaces." sys.exit() fieldname = sys.argv[-3] #Ekine and EmissionPointZ most likely nrbin = float(sys.argv[-2]) rootfiles = [x.strip() for x in os.popen("find "+sys.argv[-1]+" -print | grep -i 'interps.*.root$'").readlines()] outname = filter(str.isalnum, sys.argv[-1]) + fieldname print >> sys.stderr, 'Dumping', rootfiles out = dump.dump(rootfiles,fieldname,nrbin,-nrbin,nrbin)#assume 1 bin = 2 mm out.tofile(outname+'.txt', sep='\n')
def dump(val): import dump, os dump_file = os.environ.get('DUMP_FILE', '') os.environ['DUMP_FILE'] = '' dump.dump(val) os.environ['DUMP_FILE'] = dump_file
ntags = Pe*(x2-x1)*L #Input file name #filename = "SDFlatSinMagnet_phi{0:g}_pe100_step1000000_Tau10.L{1:g}.Lx100.lammpstrj".format(Pe,L) if Lx==100: filename="FlatSinMagnet_phi{0:g}_pe100_step1000000_Tau10.L{1:g}.lammpstrj".format(Pe,L) else: filename="FlatSinMagnet_phi{0:g}_pe100_step1000000_Tau10.L{1:g}.Lx{2:g}.lammpstrj".format(Pe,L,Lx) print filename #Commands from the pizza.py librbary. VEry usefull to read lammps trajectories. Will be usefull for the future. d=dump(filename); d.sort() time=d.time() if collect*skip1+skip>len(time): print "Warning, not enough frames in trajectory for collection parameters" dens=np.zeros(100*Lx) count=0 sumx=0 sumy=0 grandsumx=0 grandsumy=0 for t in time: if count>skip and count%skip1==0 and ((count-skip)/skip1)<collect+1: #elegant module in pizza.py library. Easy way to process the lammpstrj files. d.vecs() goes frame by frame.
#!/usr/bin/env python """ convert a SPPARKS multi-timestep dump file to single-timestep vtks with pizza.py """ import sys import dump # pizza.py module import vtk # pizza.py module infile = "" try: infile = sys.argv[1] except: sys.exit("Please supply the filename of a valid SPPARKS dump file.") print("loading dump file timestep:") d = dump.dump(infile) v = vtk.vtk(d) print("writing vtk file for timestep:") v.many("potts")
def readFile(self, index = 0): """ Read dump file by dumpy.py. """ self.d = dp.dump(self.ifile) self.nsnaps = self.d.nsnaps # choose a snapshot which will be analyzed here between 0 to nsnaps-1. # for now choose the first snapshots, e.g. index = 0! assert index <= self.nsnaps-1 and index >= 0 self.snapsIndex = self.d.time()[index] self.natoms = self.d.snaps[index].natoms assert self.natoms == self.numBeads self.xBox = self.d.snaps[index].xhi - self.d.snaps[index].xlo self.yBox = self.d.snaps[index].yhi - self.d.snaps[index].ylo self.zBox = self.d.snaps[index].zhi - self.d.snaps[index].zlo self.xhi = self.d.snaps[index].xhi self.xlo = self.d.snaps[index].xlo self.yhi = self.d.snaps[index].yhi self.ylo = self.d.snaps[index].ylo self.zhi = self.d.snaps[index].zhi self.zlo = self.d.snaps[index].zlo self.box = np.array([self.xBox, self.yBox, self.zBox]) # # lable Molecule ID number to each group, one PC/PA chain are one single molecule. # counterion and salt atom is considered as one molecule. # self.atomsInfo = sorted(self.d.snaps[index].atoms, key = lambda x:x[0]) self.atomsType = np.array([self.atomsInfo[i][1] for i in range(0, self.natoms)]) self.atomsType = self.atomsType.astype(int) self.atomsId = np.array([self.atomsInfo[i][0] for i in range(0, self.natoms)]) self.atomsId = self.atomsId.astype(int) self.atomsCoord = np.array([self.atomsInfo[i][2:] for i in range(0, self.natoms)]) self.molId = np.zeros(self.natoms, dtype = int) self.molId[:self.numPcBeads] = (self.atomsId[:self.numPcBeads] - 1) / self.lenPc + 1 self.molId[self.numPcBeads:self.numPaPcBeads] = self.numPc + (self.atomsId[self.numPcBeads:self.numPaPcBeads] - self.numPcBeads -1)/self.lenPa + 1 self.molId[self.numPaPcBeads:] = self.atomsId[self.numPaPcBeads:] - self.numPaPcBeads + self.numPa + self.numPc self.numMols = self.natoms - self.numPaPcBeads + self.numPa + self.numPc assert self.numMols == self.molId[-1] self.atomsCharge = np.zeros(self.natoms, dtype = int) for i in range(self.natoms): if i < self.numPcBeads: if (i+1)%self.chargeRepeat != 0: self.atomsCharge[i] = 0 else: self.atomsCharge[i] = 1 elif i < self.numPaPcBeads: if (i+1)%self.chargeRepeat != 0: self.atomsCharge[i] = 0 else: self.atomsCharge[i] = -1 elif i < (self.numPaPcBeads + self.numPcCounterion): self.atomsCharge[i] = -1 elif i < (self.numPaPcBeads + self.numCounterion): self.atomsCharge[i] = 1 elif i < (self.numPaPcBeads + self.numCounterion + self.numSalt/2): self.atomsCharge[i] = -1 else: self.atomsCharge[i] = 1 self.numBonds = self.numPc * (self.lenPc -1) + self.numPa * (self.lenPa - 1) self.bondList = [] for i in range(self.numBonds): if i < self.numPc * (self.lenPc - 1): j = i/(self.lenPc - 1) k = i%(self.lenPc - 1) self.bondList.append([self.lenPc * j + k + 1, self.lenPc * j + k + 2]) else: i = i - self.numPc * (self.lenPc - 1) j = i/(self.lenPa - 1) k = i%(self.lenPa - 1) self.bondList.append([self.numPcBeads + self.lenPa*j + k + 1, self.numPcBeads + self.lenPa*j + k + 2])
iarg = ilast else: break if iarg < narg or not outfile or not rfiles: print "Syntax: neb_combine.py -o outfile -b backfile -r dump1 dump2 ..." sys.exit() if os.path.exists(outfile): os.remove(outfile) # ntotal = total atoms in each snapshot # reset IDs of atoms in each NEB dump file ntotal = 0 d = [] for file in rfiles: one = dump(file) nnew = one.snaps[0].nselect idvec = range(ntotal+1,ntotal+nnew+1) one.setv("id",idvec) ntotal += nnew d.append(one) # nback = additional atoms in each snapshot # reset IDs of atoms in background file if backfile: back = dump(backfile) t = back.time() back.tselect.one(t[0]) nback = back.snaps[0].nselect idvec = range(ntotal+1,ntotal+nback+1)
pgsrc_rpct_x = pgsrc_rpct_x+(volume_offset-pgsrc_rpct_x[0]) #offset for pg source image pgsrc_rpct_xhist = pgsrc_rpct_xhist+(volume_offset-pgsrc_rpct_xhist[0]) #same #pgsrc_ctfo=auger.get_fop(pgsrc_ct_x,pgsrc_ct_y) pgsrc_rpctfo=auger.get_fop(pgsrc_rpct_x,pgsrc_rpct_y) ########################################################################################################### #emission in worldframe smooth_param = 20 #IBA: 20, Priegnitz 2015 pgemis_ct_x=np.linspace(-149.5,149.5,300) pgemis_rpct_x=np.linspace(-149.5,149.5,300) #pgemis_ct = dump.thist2np_xy('fromcluster/run.8ZQJ/output.2399064/GammProdCount-Prod.root') pgemis_rpct_y = np.array(dump.dump(['fromcluster/run.13px/output.2401114/pgprod-worldframe.root'],'X',300,-150,150)) #pgemis_ct_y = pgemis_ct_y/pgemis_ct_y.max() pgemis_rpct_y = pgemis_rpct_y/pgemis_rpct_y.max() #pgemis_ct_y_smooth = gaussian_filter(pgemis_ct_y, sigma=smooth_param) #pgemis_ct_y_smooth = pgemis_ct_y_smooth/pgemis_ct_y_smooth.max() pgemis_rpct_y_smooth = gaussian_filter(pgemis_rpct_y, sigma=smooth_param) pgemis_rpct_y_smooth = pgemis_rpct_y_smooth/pgemis_rpct_y_smooth.max() #pgemis_ctfo=auger.get_fop(pgemis_ct_x,pgemis_ct_y) pgemis_rpctfo=auger.get_fop(pgemis_rpct_x,pgemis_rpct_y) #pgemis_smooth_ctfo=auger.get_fop(pgemis_ct_x,pgemis_ct_y_smooth) pgemis_smooth_rpctfo=auger.get_fop(pgemis_rpct_x,pgemis_rpct_y_smooth)