def nm_to_u(s): """Get the user part of a nickmask. (The source of an Event is a nickmask.) """ s = string.split(s, "!")[1] return string.split(s, "@")[0]
def readLine(self, line): #this is the inner loop self._lineNo = self._lineNo + 1 stripped = string.lstrip(line) if len(stripped) == 0: if self._mode == PLAIN: self.endPara() else: #preformatted, append it self._buf.append(line) elif line[0]=='.': # we have a command of some kind self.endPara() words = string.split(stripped[1:]) cmd, args = words[0], words[1:] #is it a parser method? if hasattr(self.__class__, cmd): #this was very bad; any type error in the method was hidden #we have to hack the traceback try: getattr(self,cmd)(*args) except TypeError, err: sys.stderr.write("Parser method: %s(*%s) %s at line %d\n" % (cmd, args, err, self._lineNo)) raise else: # assume it is a paragraph style - # becomes the formatter's problem self.endPara() #end the last one words = string.split(stripped, ' ', 1) assert len(words)==2, "Style %s but no data at line %d" % (words[0], self._lineNo) (styletag, data) = words self._style = styletag[1:] self._buf.append(data)
def parser(): text = open("dadesED2014-100.txt", "r") #Obrim el document lines = text.readlines() #I guardem les linies a la variable 'lines' text.close() newTable = [0 for x in range(100)] #Creem la taula on guardarem la informacio dels usuaris i = 0 for users in lines: #Amb aquest bucle for, iterem sobre cada usuari, que equival a una linia newUser = [] TableOR = string.split(users, "||") #Dividim el user amb split separant les '||' TableAND = string.split(TableOR[2], "&&&") #Fem el mateix amb els '&&&' Table100 = string.split(TableOR[3], "&&&") nTable100 = [] #Aquesta es una tabla temporal on guardem la llista de artistes escoltats for elem in Table100: #Amb aquesta iteracio dividim els '%%' i els incloim a la llista de artistes nElem = elem.split("%%") nTable100.append(nElem) for j in range(2): #Guardem a la nostra taula de usuaris les dades aprofitables dels primers camps (ID i Pais) newUser.append(TableOR[j]) #I en aquest guardem tota la informacio del seu artista preferit (Nom, Plays i mitja) newUser.append(TableAND) newUser.append(nTable100) newTable[i] = newUser#Finalment incloiem la llista de artistes escoltats al final de la taula final i += 1 #Repetim per a cada usuari fins a acabar return newTable
def init2(): global condor_sbin_path # try using condor commands to find it out try: condor_sbin_path=iexe_cmd("condor_config_val SBIN")[0][:-1] # remove trailing newline except ExeError,e: # try to find the RELEASE_DIR, and append bin try: release_path=iexe_cmd("condor_config_val RELEASE_DIR") condor_sbin_path=os.path.join(release_path[0][:-1],"sbin") except ExeError,e: # try condor_q in the path try: condora_sbin_path=iexe_cmd("which condor_advertise") condor_sbin_path=os.path.dirname(condora_sbin_path[0][:-1]) except ExeError,e: # look for condor_config in /etc if os.environ.has_key("CONDOR_CONFIG"): condor_config=os.environ["CONDOR_CONFIG"] else: condor_config="/etc/condor/condor_config" try: # BIN = <path> bin_def=iexe_cmd('grep "^ *SBIN" %s'%condor_config) condor_sbin_path=string.split(bin_def[0][:-1])[2] except ExeError, e: try: # RELEASE_DIR = <path> release_def=iexe_cmd('grep "^ *RELEASE_DIR" %s'%condor_config) condor_sbin_path=os.path.join(string.split(release_def[0][:-1])[2],"sbin") except ExeError, e: pass # don't know what else to try
def readUNIPROTACC(): # # parse UniProt-to-InterPro associations via the UniProt/Acc file # # dictionary contains: # key = uniprot id # value = interpro id (IPR#####) # fp = open(uniprotFile,'r') for line in fp.readlines(): tokens = string.split(line[:-1], '\t') key = tokens[0] # not all uniprot ids have interpro ids... if len(tokens[6]) == 0: continue values = string.split(tokens[6], ',') if not uniprot_to_ip.has_key(key): uniprot_to_ip[key] = [] for v in values: uniprot_to_ip[key].append(v) fp.close() return 0
def authToESMySQL(mysqlHost,userMySQL="",passwordMySQL="",adminLogin=""): """Provides authentication with EventStore DB.""" #if not userMySQL or not passwordMySQL: # read them from $HOME/.esdb.conf file loginInfo,adminInfo,dbName,host,port,socket = readConfigFile() #print "In authToESMySQL()" #print "mysqlHost = %s userMySQL = %s passwordMySQL = %s adminLogin = %s"%(mysqlHost,userMySQL,passwordMySQL,adminLogin) #print "loginInfo = %s adminInfo = %s dbName = %s host = %s port = %s socket = %s"%(loginInfo,adminInfo,dbName,host,port,socket) try: if adminLogin: userMySQL,passwordMySQL = string.split(adminInfo,":") else: userMySQL,passwordMySQL = string.split(loginInfo,":") except: print "Fail to decode login/password information from your $HOME/.esdb.conf" raise # we used md5crypt.md5crypt(password,'') to generate these hashes #if mysqlHost=="localhost": md5user = ["$1$$e9gJaPbMtkdCzRfQ60OFF/","$1$$ix2ie71gC4Xwad5LhaC3S1"] md5pass = ["$1$$sbkdtWBHO51xPwl./H9N81","$1$$sbkdtWBHO51xPwl./H9N81"] if not md5user.count(md5crypt.unix_md5_crypt(userMySQL,"")): print "Fail to login to mysql server, incorrect login. Please try again." sys.exit() if not md5pass.count(md5crypt.unix_md5_crypt(passwordMySQL,"")): print "Fail to login to mysql server, incorrect password. Please try again." sys.exit() #return (userMySQL,passwordMySQL) # actual DB isn't protected by a password right now return (userMySQL,"")
def update(self, objfile): self.filename = objfile (sysname, nodename, release, version, machine) = os.uname() if sysname == "Linux": fp = os.popen("nm -P " + objfile, "r") symbols = map(self.split_, fp.readlines()) elif sysname == "SunOS": fp = os.popen("nm -p " + objfile, "r") symbols = map(lambda l: string.split(l[12:]), fp.readlines()) pass elif sysname == "IRIX": fp = os.popen("nm -B " + objfile, "r") symbols = map(lambda l: string.split(l[8:]), fp.readlines()) pass object = objfile for (type, symbol) in symbols: if not self.objects.has_key(object): self.objects.update({ object : dict({ "exports" : dict(), "imports" : dict() }) }) pass if type == "U": self.objects[object]["imports"].update({ symbol : dict() }) elif type in ["C", "D", "T"]: self.objects[object]["exports"].update({ symbol : dict() }) pass pass fp.close() return (None)
def read_uxyz(xyz_name): 'Takes the last unit cell and structure from an xyz file' xyz_file = open(xyz_name,'r') while 1: line = xyz_file.readline() if not line: break words = string.split(line) if not words: break nat = int(words[0]) title = xyz_file.readline() if title[:4] == 'CELL': uc = map(float,string.split(title)[1:]) else: uc = None atoms = [] for i in range(nat): line = xyz_file.readline() words = string.split(line) sym = words[0] sym = cleansym(sym) x,y,z = float(words[1]),float(words[2]),float(words[3]) atoms.append((sym,x,y,z)) xyz_file.close() return uc,atoms
def dissassembleDBName(db): """Dissassemble the input db string. @type db: string @param db: input db string, e.g. EventStore@lnx151:3306:/var/log/mysql @rtype: tuple (dbName,dbHost,dbPort,dbSocket) @return: DB name, hostname of DB, its port and socket. """ dbPort = "" dbSocket = "" # check if db name doesn't have "%" character if string.find(db,"%")!=-1: raise "'%' is not allowed in DB name, if you'd like to specify port, please use ':' separator" # we need to parse db into dbName,dbHost,dbPort,dbSocket dbComponents = string.split(db,"@") if len(dbComponents)==2: dbName = dbComponents[0] content= string.split(dbComponents[1],":") dbHost = content[0] if len(content)>1 and content[1]: dbPort = content[1] if len(content)==3 and content[2]: dbSocket = content[2] else: dbName = "EventStoreTMP" dbHost = dbComponents[0] dbPort = "" dbSocket = "" return (dbName,dbHost,dbPort,dbSocket)
def process(dataMatrix,samples, sample,genes, cancer,infile,flog, valuePOS, LOG2, maxLength): # one sample a file fin=open(infile,'U') fin.readline() for line in fin.readlines(): data =string.split(line[:-1],"\t") hugo = data[0] value= data[valuePOS] hugo = string.split(hugo,"|")[0] if hugo=="?": hugo=data[0] if hugo not in genes: p=len(genes) genes[hugo]=p l=[] for j in range (0,maxLength): l.append("") dataMatrix.append(l) if value not in ["","null","NULL","Null","NA"]: if LOG2: value = float(value) if value<0: value = "" else: value = math.log(float(value+1),2) x=genes[hugo] y=samples[sample] dataMatrix[x][y]=value fin.close() return
def deg2HMS(ra='', dec='', round=False): import string RA, DEC= '', '' if dec: if string.count(str(dec),':')==2: dec00=string.split(dec,':') dec0,dec1,dec2=float(dec00[0]),float(dec00[1]),float(dec00[2]) if '-' in str(dec0): DEC=(-1)*((dec2/60.+dec1)/60.+((-1)*dec0)) else: DEC=(dec2/60.+dec1)/60.+dec0 else: if str(dec)[0]=='-': dec0=(-1)*abs(int(dec)) else: dec0=abs(int(dec)) dec1=int((abs(dec)-abs(dec0))*(60)) dec2=((((abs(dec))-abs(dec0))*60)-abs(dec1))*60 DEC='00'[len(str(dec0)):]+str(dec0)+':'+'00'[len(str(dec1)):]+str(dec1)+':'+'00'[len(str(int(dec2))):]+str(dec2) if ra: if string.count(str(ra),':')==2: ra00=string.split(ra,':') ra0,ra1,ra2=float(ra00[0]),float(ra00[1]),float(ra00[2]) RA=((ra2/60.+ra1)/60.+ra0)*15. else: ra0=int(ra/15.) ra1=int(((ra/15.)-ra0)*(60)) ra2=((((ra/15.)-ra0)*60)-ra1)*60 RA='00'[len(str(ra0)):]+str(ra0)+':'+'00'[len(str(ra1)):]+str(ra1)+':'+'00'[len(str(int(ra2))):]+str(ra2) if ra and dec: return RA, DEC else: return RA or DEC
def urlParse(self, url): """ return path as list and query string as dictionary strip / from path ignore empty values in query string for example: if url is: /xyz?a1=&a2=0%3A1 then result is: (['xyz'], { 'a2' : '0:1' } ) if url is: /a/b/c/ then result is: (['a', 'b', 'c'], None ) if url is: /? then result is: ([], {} ) """ x = string.split(url, '?') pathlist = filter(None, string.split(x[0], '/')) d = {} if len(x) > 1: q = x[-1] # eval query string x = string.split(q, '&') for kv in x: y = string.split(kv, '=') k = y[0] try: v = urllib.unquote_plus(y[1]) if v: # ignore empty values d[k] = v except: pass return (pathlist, d)
def _getHeight(self): "splits into lines" self.comment1lines = string.split(self.comment1, "\n") self.codelines = string.split(self.code, "\n") self.comment2lines = string.split(self.comment2, "\n") textheight = len(self.comment1lines) + len(self.code) + len(self.comment2lines) + 18 return max(textheight, self.drawHeight)
def testObject(test, obj, expect): contents = test.read(test.workpath('work1', obj)) line1 = string.split(contents,'\n')[0] actual = string.join(string.split(line1)) if not expect == actual: print "%s: %s != %s\n" % (obj, repr(expect), repr(actual)) test.fail_test()
def getClusterRecord(self, cl): #print 'in getClusterRecord' clRecList = [] curList = [] #curList gets list of conf info curInd = int(split(cl[0])[0]) ctr = 1 for l in cl: ll = split(l) #when built, newList is #[Rank,SubRank,Run,DockedEnergy,ClusterRMSD,RefREMSD] newList = map(lambda x:int(x),ll[:3]) #3/29/05 if self.wroteAll and self.version!=4.0: #print "setting run number to ", ctr newList[2] = ctr ctr = ctr + 1 newList2 = map(lambda x:float(x),ll[3:-1]) newList.extend(newList2) if newList[0]==curInd: curList.append(newList) else: clRecList.append(curList) curList = [newList] curInd = newList[0] clRecList.append(curList) self.clusterRecord = clRecList
def cleanup_output(infile): skiplines(infile, 5) result = [] while True: line = infile.readline() cols = string.split(line) if len(cols) == 0: break Vm = float(cols[6]) result.append(["B", cols[1], Vm]) skiplines(infile, 2) while True: line = infile.readline() cols = string.split(line) if len(cols) == 0: break P1 = float(cols[len(cols) - 4]) P2 = float(cols[len(cols) - 2]) P = max(abs(P1), abs(P2)) result.append(["L", cols[0], cols[1], P]) return result
def getHMDBData(species): program_type,database_dir = unique.whatProgramIsThis() filename = database_dir+'/'+species+'/gene/HMDB.txt' x=0 fn=filepath(filename) for line in open(fn,'rU').xreadlines(): data = cleanUpLine(line) if x==0: x=1 else: t = string.split(data,'\t') try: hmdb_id,symbol,description,secondary_id,iupac,cas_number,chebi_id,pubchem_compound_id,Pathways,ProteinNames = t except Exception: ### Bad Tab introduced from HMDB hmdb_id = t[0]; symbol = t[1]; ProteinNames = t[-1] symbol_hmdb_db[symbol]=hmdb_id hmdb_symbol_db[hmdb_id] = symbol ProteinNames=string.split(ProteinNames,',') ### Add gene-metabolite interactions to databases for protein_name in ProteinNames: try: for ensembl in symbol_ensembl_db[protein_name]: z = InteractionInformation(hmdb_id,ensembl,'HMDB','Metabolic') interaction_annotation_dbase[ensembl,hmdb_id] = z ### This is the interaction direction that is appropriate try: interaction_db[hmdb_id][ensembl]=1 except KeyError: db = {ensembl:1}; interaction_db[hmdb_id] = db ###weight of 1 (weights currently not-supported) try: interaction_db[ensembl][hmdb_id]=1 except KeyError: db = {hmdb_id:1}; interaction_db[ensembl] = db ###weight of 1 (weights currently not-supported) except Exception: None
def listdirectory_nux(path, taille_min, ext_i, ext_e): l = glob.glob(path+'/*') for i in l: if os.path.isdir(i): #print "Dossier: ",i listdirectory_nux(i,taille_min, ext_i, ext_e) else: if os.path.getsize(i) >= taille_min: chemin = string.split(i,"/") nom = chemin[len(chemin)-1] del chemin[len(chemin)-1] chemin = string.join(chemin,"/")+"/" ext = string.split(nom,".") if len(ext) == 1: ext = "." else: ext = ext[len(ext) - 1] if (ext_i != {} and ext.lower() in ext_i) or (ext_i == {} and not ext.lower() in ext_e): if not (chemin+nom in (os.getcwd()+"/arbow_d.py",os.getcwd()+"/arbow_s.py",os.getcwd()+"/arbow_d.txt",os.getcwd()+"/arbow_s.txt")): liste.append(fichier(chemin,nom,os.path.getsize(i))) if verbose: print "fichier trouvé: " print "Adresse: ",chemin print "Nom: ",nom print "Taille: ",liste[len(liste)-1].taille," Octets" print "--------------------" elif verbose: if ext != '.': print "Fichier "+i+" d'extention ."+ext else: print "Fichier "+i+" sans extention" print "Il sera ignoré." print "--------------------" elif verbose: print "Fichier ", i, "de taille ", format_taille(os.path.getsize(i)), "il sera ignoré !"
def main(argv): inputfile = '' outputfile = '' name = '' try: opts, args = getopt.getopt(argv,"hi:o:n:",["ifile=","ofile=","name="]) except getopt.GetoptError: print "plotEigenValues.py -i <inputfile> -o <outputfile> -n <name>" sys.exit() for opt, arg in opts: if opt == '-h': print "plotEigenValues.py -i <inputfile> -o <outputfile> -n <name>" sys.exit() elif opt in("-i", "--ifile"): inputfile = arg elif opt in ("-o", "--outfile"): outputfile = arg elif opt in ("-n", "--name"): name = arg print "Input file is:", inputfile print "Output file is:", outputfile vals = [] files = string.split(inputfile, ":") for f in files: val = [] file = open(f, "rb") for line in file.xreadlines(): val.append([float(v) for v in string.split(line, ",") if v != '\n']) vals.append(val) plot(vals, name)
def formatElement(el,path): if (el.find("{http://www.w3.org/2001/XMLSchema}annotation") is not None): splitPath=string.split(path,'/') #set default component to "scenario", i.e. write to the "Scenario" worksheet below component="scenario" printPath=string.replace(path,"/"+component,"") #update component if a know child element of scenario, i.e. write to another worksheet below if (len(splitPath)>2): if (splitPath[2] in worksheets): component=splitPath[2] printPath=string.replace(printPath,"/"+component,"") sheet= worksheets[component][0] row=worksheets[component][1] sheet.write(row,0,string.lstrip(printPath,"/")) annotation=el.find("{http://www.w3.org/2001/XMLSchema}annotation") docuElem=annotation.find("{http://www.w3.org/2001/XMLSchema}documentation") if (docuElem is not None): docu=docuElem.text else: docu="TODO" content=string.strip(docu) sheet.write(row,1,normalizeNewlines(content),docu_xf) appInfoElem=el.find("{http://www.w3.org/2001/XMLSchema}annotation").find("{http://www.w3.org/2001/XMLSchema}appinfo") if (appInfoElem is not None): appInfo=string.strip(appInfoElem.text) else: appInfo="name:TODO" appInfoList=string.split(appInfo,";")[0:-1] for keyValue in appInfoList: splitPair=string.split(keyValue,":") colIndex=appinfoOrder.index(string.strip(str(splitPair[0])))+2 sheet.write(row,colIndex,splitPair[1],docu_xf) #update next row to be written in that sheet worksheets[component][1]=worksheets[component][1]+1
def lire_arguments(argv): taille_min = -1 extensions_i = {} extensions_e = {"db"} #------------Ignore par défaut les fichiers *.db ! if "win" in sys.platform.lower(): output = os.getcwd()+"\\arbow_d.txt" else: output = os.getcwd()+"/arbow_d.txt" global verbose chemin = os.getcwd() i = 0 while i < len(argv) - 1: if argv[i] == "-s" or argv[i] == "--size": taille_min = deformat_taille(argv[i+1]) elif argv[i] == "-ei" or argv[i] == "--extension_incluses": extensions_i = string.split(argv[i+1].lower(),",") elif argv[i] == "-ee" or argv[i] == "--extension_excluses": extensions_e = string.split(argv[i+1].lower(),",") elif argv[i] == "-o" or argv[i] == "--output": output = chemin_abs(argv[i+1]) elif argv[i] == "-c" or argv[i] == "--chemin": chemin = chemin_abs(argv[i+1]) elif argv[i+1] == "-h" or argv[i+1] == "--help" or argv[i] == "-h" or argv[i] == "--help": print "Utilisation: py arbow_s.py [-s/--size Taille minimale] [-ei/--extensions_incluses ext1,ext2,...] [-ee/--extensions_excluses ext1,ext2,...] [-c/--chemin Chemin relatif ou absolu du dossier à analyser (défaut: . )] [-v/--verbose]" print "Pour désigner les fichiers sans extensions, utiliser .\n" print "Exemple: py arbo.py -s 1M -ei py,txt,.,pyc -c C:\python27" print "Cherche les fichiers de plus de 1 Mo, d'extension py, txt, pyc ou sans extension dans le dossier C:\python27" exit(0) if ("-v" in argv) or ("--verbose" in argv): verbose = True i+=1 return (taille_min, extensions_i, extensions_e, verbose, chemin, output)
def run(self): self.thread_running = True while self.thread_running: if self.thread_message == "get": try: url = "http://mulx.playonlinux.com/wine/linux-i386/LIST" req = urllib2.Request(url) handle = urllib2.urlopen(req) time.sleep(1) available_versions = handle.read() available_versions = string.split(available_versions, "\n") self.i = 0 self.versions_ = [] while self.i < len(available_versions) - 1: informations = string.split(available_versions[self.i], ";") version = informations[1] package = informations[0] sha1sum = informations[2] if not os.path.exists(Variables.playonlinux_rep + "/WineVersions/" + version): self.versions_.append(version) self.i += 1 self.versions_.reverse() self.versions = self.versions_[:] self.thread_message = "Ok" except: time.sleep(1) self.thread_message = "Err" self.versions = ["Wine packages website is unavailable"] else: time.sleep(0.2)
def convertVersionToInt(version): # Code par MulX en Bash, adapte en python par Tinou #rajouter pour les vesions de dev -> la version stable peut sortir #les personnes qui utilise la version de dev sont quand même informé d'une MAJ #ex 3.8.1 < 3.8.2-dev < 3.8.2 print "Deprecated !" if("dev" in version or "beta" in version or "alpha" in version or "rc" in version): version = string.split(version,"-") version = version[0] versionDev = -5 else: versionDev = 0 version_s = string.split(version,".") #on fait des maths partie1 elever au cube et multiplier par 1000 try: versionP1 = int(version_s[0])*int(version_s[0])*int(version_s[0])*1000 except: versionP1 = 0 try: versionP2 = int(version_s[1])*int(version_s[1])*100 except: versionP2 = 0 try: versionP3 = int(version_s[2])*10 except: versionP3 = 0 return(versionDev + versionP1 + versionP2 + versionP3)
def get_setup(): global image_directory file_name = image_directory + "setup.dat" try: f = open(file_name, "r") except ValueError: sys.exit("ERROR: golf_setup must be run before golf") # find limb s = string.split(f.readline()) if len(s) >= 3: if s[2] == "left" or s[2] == "right": limb = s[2] else: sys.exit("ERROR: invalid limb in %s" % file_name) else: sys.exit("ERROR: missing limb in %s" % file_name) # find distance to table s = string.split(f.readline()) if len(s) >= 3: try: distance = float(s[2]) except ValueError: sys.exit("ERROR: invalid distance in %s" % file_name) else: sys.exit("ERROR: missing distance in %s" % file_name) return limb, distance
def query(self, query_string, max_rows=1000): self._use_TM and self._register() desc = None result = () # XXX deal with a typical mistake that the user appends # an unnecessary and rather harmful semicolon at the end. # Unfortunately, MySQLdb does not want to be graceful. if query_string[-1:] == ";": query_string = query_string[:-1] for qs in filter(None, map(strip, split(query_string, "\0"))): qtype = upper(split(qs, None, 1)[0]) if qtype == "SELECT" and max_rows: qs = "%s LIMIT %d" % (qs, max_rows) r = 0 c = self._query(qs) if desc is not None: if c and (c.describe() != desc): raise "Query Error", ("Multiple select schema are not allowed") if c: desc = c.describe() result = c.fetch_row(max_rows) else: desc = None if desc is None: return (), () items = [] func = items.append defs = self.defs for d in desc: item = {"name": d[0], "type": defs.get(d[1], "t"), "width": d[2], "null": d[6]} func(item) return items, result
def WhereIs(file, path=None, pathext=None, reject=[]): if path is None: try: path = os.environ['PATH'] except KeyError: return None if is_String(path): path = string.split(path, os.pathsep) if pathext is None: try: pathext = os.environ['PATHEXT'] except KeyError: pathext = '.COM;.EXE;.BAT;.CMD' if is_String(pathext): pathext = string.split(pathext, os.pathsep) for ext in pathext: if string.lower(ext) == string.lower(file[-len(ext):]): pathext = [''] break if not is_List(reject) and not is_Tuple(reject): reject = [reject] for dir in path: f = os.path.join(dir, file) for ext in pathext: fext = f + ext if os.path.isfile(fext): try: reject.index(fext) except ValueError: return os.path.normpath(fext) continue return None
def decrypt(data, msgtype, servername, args): global decrypted hostmask, chanmsg = string.split(args, "PRIVMSG ", 1) channelname, message = string.split(chanmsg, " :", 1) if re.match(r'^\[\d{2}:\d{2}:\d{2}]\s', message): timestamp = message[:11] message = message[11:] else: timestamp = '' if channelname[0] == "#": username=channelname else: username, rest = string.split(hostmask, "!", 1) username = username[1:] nick = channelname.strip() if os.path.exists(weechat_dir + '/' + username + '.db'): a = Axolotl(nick, dbname=weechat_dir+'/'+username+'.db', dbpassphrase=getPasswd(username)) a.loadState(nick, username) decrypted = a.decrypt(a2b_base64(message)) a.saveState() del a if decrypted == "": return args decrypted = ''.join(c for c in decrypted if ord(c) > 31 or ord(c) == 9 or ord(c) == 2 or ord(c) == 3 or ord(c) == 15) return hostmask + "PRIVMSG " + channelname + " :" + chr(3) + "04" + weechat.config_get_plugin("message_indicator") + chr(15) + timestamp + decrypted else: return args
def getPrefix(shortcut): # Get prefix name from shortcut if(os.path.isdir(os.environ["POL_USER_ROOT"]+"/shortcuts/"+shortcut)): return "" fichier = open(os.environ["POL_USER_ROOT"]+"/shortcuts/"+shortcut,'r').read() fichier = string.split(fichier,"\n") i = 0 while(i < len(fichier)): if("export WINEPREFIX=" in fichier[i]): break i += 1 try: prefix = string.split(fichier[i],"\"") prefix = prefix[1].replace("//","/") prefix = string.split(prefix,"/") if(os.environ["POL_OS"] == "Mac"): index_of_dotPOL = prefix.index("PlayOnMac") prefix = prefix[index_of_dotPOL + 2] else: index_of_dotPOL = prefix.index(".PlayOnLinux") prefix = prefix[index_of_dotPOL + 2] except: prefix = "" return prefix
def CheckFillFile(fillfile): myfile=open(fillfile,'r').readlines() valid=True for r in myfile[1:]: temp=string.split(r) if len(temp)<3: valid=False print "Bad line '%s' in fillfile '%s': not enough arguments"%(r,fillfile) return valid try: fill=string.atoi(temp[0]) except: print "Bad line '%s' in fillfile '%s': can't read fill # as an integer"%(r,fillfile) valid=False return valid try: runs=string.strip(temp[2]) runs=string.split(runs,",") for run in runs: try: string.atoi(run) except: valid=False print "Bad line '%s' in fillfile '%s': can't parse runs"%(r,fillfile) return valid except: print "Bad line '%s' in fillfile '%s': can't ID runs"%(r,fillfile) valid=False return valid print "<CheckFillFile> fill file '%s' appears to be formatted correctly!"%fillfile return valid
def run(self): global STATUS while 1: data = "" try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((config.HDDTEMP_HOST, config.HDDTEMP_PORT)) while 1: # give hddtemp time to send data d = sock.recv(1024) if (d == ""): break; data = data + d except socket.error, msg: print "Hddtemp_Thread could not connect to hddtemp daemon: %s" % msg else: if ((len(data)<3) or (data[0] != '|') or (data[len(data)-1] != '|')): print "Hddtemp_Thread received malformed data: %s" % data else: # data = |/dev/hdb|Maxtor 6Y120P0|41|C||/dev/hdc|Maxtor 6Y120P0|30|C| data = data[1:len(data)-1] # data = /dev/hdb|Maxtor 6Y120P0|41|C||/dev/hdc|Maxtor 6Y120P0|30|C list = string.split(data, '||') # data = "/dev/hdb|Maxtor 6Y120P0|41|C","/dev/hdc|Maxtor 6Y120P0|30|C" for i in range(len(list)): drive = string.split(list[i], '|') STATUS[drive[0]] = drive[2] sock.close() time.sleep(config.HDDTEMP_POLL)
def __init__(self, filename, dictionary={}): self.indx = {} if '.gz' in filename: f = gzip.open(filename, 'rb') else: f = open(filename, 'r') for line in f: # if ("@ " in line and "%le" in line) : # FIX to take DPP %s if ("@ " not in line and "@" in line): line = replace(line, "@", "@ ") if ("@ " in line and "%" in line and "s" not in split(line)[2]): label = split(line)[1] try: exec "self." + label + "= " + str( float(split(replace(line, '"', ''))[3])) except: print "Problem parsing:", line print "Going to be parsed as string" try: exec "self." + label + "= \"" + replace( split(line)[3], '"', '') + "\"" except: print "Problem persits, let's ignore it!" elif ("@ " in line and "s" in split(line)[2]): label = split(line)[1] exec "self." + label + "= \"" + split(replace(line, '"', ''))[3] + "\"" if ("* " in line): alllabels = split(line) print "alllabels", len(alllabels) for j in range(1, len(alllabels)): exec "self." + alllabels[j] + "= []" if ("$ " in line): alltypes = split(line) if ("@" not in line and "*" not in line and "$ " not in line): values = split(line) for j in range(0, len(values)): if ("%hd" in alltypes[j + 1]): exec "self." + alllabels[j + 1] + ".append(" + str( int(values[j])) + ")" if ("%le" in alltypes[j + 1]): exec "self." + alllabels[j + 1] + ".append(" + str( float(values[j])) + ")" if ("s" in alltypes[j + 1]): try: exec "self." + alllabels[ j + 1] + ".append(" + values[j] + ")" except: exec "self." + alllabels[ j + 1] + ".append(\"" + values[ j] + "\")" #To allow with or without "" if "NAME" == alllabels[j + 1]: self.indx[replace(values[j], '"', '')] = len(self.NAME) - 1 self.indx[replace(values[j], '"', '').upper()] = len(self.NAME) - 1 self.indx[replace(values[j], '"', '').lower()] = len(self.NAME) - 1 f.close() try: alllabels alltypes except: print "From Metaclass: Bad format or empy file ", filename print "Leaving Metaclass" exit() for j in range(1, len(alllabels)): if (("%le" in alltypes[j]) | ("%hd" in alltypes[j])): exec "self." + alllabels[j] + "= array(self." + alllabels[ j] + ")" if len(dictionary) > 0: self.forknames(dictionary)
import string import pylab WORDLIST_FILENAME = "words.txt" print "Loading word list from file..." # inFile: file inFile = open(WORDLIST_FILENAME, 'r', 0) # line: string line = inFile.readline() # wordlist: list of strings wordList = string.split(line) print " ", len(wordList), "words loaded." letterOccurrence = {'a': 0, 'b': 0, 'c': 0, 'd': 0, 'e': 0,\ 'f': 0, 'g': 0, 'h': 0, 'i': 0, 'j': 0,\ 'k': 0, 'l': 0, 'm': 0, 'n': 0, 'o': 0,\ 'p': 0, 'q': 0, 'r': 0, 's': 0, 't': 0,\ 'u': 0, 'v': 0, 'w': 0, 'x': 0, 'y': 0, 'z': 0 } #Fill in the dictionary with occurrences of each letter for word in wordList: for letter in word: letterOccurrence[letter] += 1 #Due to arbitrary presentation of dictionary, a list [ ( , ) ] is used instead listForPlot = [] for c in letterOccurrence.keys(): listForPlot.append((c, letterOccurrence[c])) listForPlot.sort() occurrences = [] for tup in listForPlot:
def finalize_options(self): # This method (and its pliant slaves, like 'finalize_unix()', # 'finalize_other()', and 'select_scheme()') is where the default # installation directories for modules, extension modules, and # anything else we care to install from a Python module # distribution. Thus, this code makes a pretty important policy # statement about how third-party stuff is added to a Python # installation! Note that the actual work of installation is done # by the relatively simple 'install_*' commands; they just take # their orders from the installation directory options determined # here. # Check for errors/inconsistencies in the options; first, stuff # that's wrong on any platform. if ((self.prefix or self.exec_prefix or self.home) and (self.install_base or self.install_platbase)): raise DistutilsOptionError, \ ("must supply either prefix/exec-prefix/home or " + "install-base/install-platbase -- not both") if self.home and (self.prefix or self.exec_prefix): raise DistutilsOptionError, \ "must supply either home or prefix/exec-prefix -- not both" if self.user and (self.prefix or self.exec_prefix or self.home or self.install_base or self.install_platbase): raise DistutilsOptionError( "can't combine user with prefix, " "exec_prefix/home, or install_(plat)base") # Next, stuff that's wrong (or dubious) only on certain platforms. if os.name != "posix": if self.exec_prefix: self.warn("exec-prefix option ignored on this platform") self.exec_prefix = None # Now the interesting logic -- so interesting that we farm it out # to other methods. The goal of these methods is to set the final # values for the install_{lib,scripts,data,...} options, using as # input a heady brew of prefix, exec_prefix, home, install_base, # install_platbase, user-supplied versions of # install_{purelib,platlib,lib,scripts,data,...}, and the # INSTALL_SCHEME dictionary above. Phew! self.dump_dirs("pre-finalize_{unix,other}") if os.name == 'posix': self.finalize_unix() else: self.finalize_other() self.dump_dirs("post-finalize_{unix,other}()") # Expand configuration variables, tilde, etc. in self.install_base # and self.install_platbase -- that way, we can use $base or # $platbase in the other installation directories and not worry # about needing recursive variable expansion (shudder). py_version = (string.split(sys.version))[0] (prefix, exec_prefix) = get_config_vars('prefix', 'exec_prefix') self.config_vars = { 'dist_name': self.distribution.get_name(), 'dist_version': self.distribution.get_version(), 'dist_fullname': self.distribution.get_fullname(), 'py_version': py_version, 'py_version_short': py_version[0:3], 'py_version_nodot': py_version[0] + py_version[2], 'sys_prefix': prefix, 'prefix': prefix, 'sys_exec_prefix': exec_prefix, 'exec_prefix': exec_prefix, 'userbase': self.install_userbase, 'usersite': self.install_usersite, } self.expand_basedirs() self.dump_dirs("post-expand_basedirs()") # Now define config vars for the base directories so we can expand # everything else. self.config_vars['base'] = self.install_base self.config_vars['platbase'] = self.install_platbase if DEBUG: from pprint import pprint print "config vars:" pprint(self.config_vars) # Expand "~" and configuration variables in the installation # directories. self.expand_dirs() self.dump_dirs("post-expand_dirs()") # Create directories in the home dir: if self.user: self.create_home_path() # Pick the actual directory to install all modules to: either # install_purelib or install_platlib, depending on whether this # module distribution is pure or not. Of course, if the user # already specified install_lib, use their selection. if self.install_lib is None: if self.distribution.ext_modules: # has extensions: non-pure self.install_lib = self.install_platlib else: self.install_lib = self.install_purelib # Convert directories from Unix /-separated syntax to the local # convention. self.convert_paths('lib', 'purelib', 'platlib', 'scripts', 'data', 'headers', 'userbase', 'usersite') # Well, we're not actually fully completely finalized yet: we still # have to deal with 'extra_path', which is the hack for allowing # non-packagized module distributions (hello, Numerical Python!) to # get their own directories. self.handle_extra_path() self.install_libbase = self.install_lib # needed for .pth file self.install_lib = os.path.join(self.install_lib, self.extra_dirs) # If a new root directory was supplied, make all the installation # dirs relative to it. if self.root is not None: self.change_roots('libbase', 'lib', 'purelib', 'platlib', 'scripts', 'data', 'headers') self.dump_dirs("after prepending root") # Find out the build directories, ie. where to install from. self.set_undefined_options('build', ('build_base', 'build_base'), ('build_lib', 'build_lib'))
def build_extension(self, ext): sources = ext.sources if sources is None or type(sources) not in (ListType, TupleType): raise DistutilsSetupError, \ ("in 'ext_modules' option (extension '%s'), " + "'sources' must be present and must be " + "a list of source filenames") % ext.name sources = list(sources) fullname = self.get_ext_fullname(ext.name) if self.inplace: # ignore build-lib -- put the compiled extension into # the source tree along with pure Python modules modpath = string.split(fullname, '.') package = string.join(modpath[0:-1], '.') base = modpath[-1] build_py = self.get_finalized_command('build_py') package_dir = build_py.get_package_dir(package) ext_filename = os.path.join(package_dir, self.get_ext_filename(base)) else: ext_filename = os.path.join(self.build_lib, self.get_ext_filename(fullname)) depends = sources + ext.depends if not (self.force or newer_group(depends, ext_filename, 'newer')): log.debug("skipping '%s' extension (up-to-date)", ext.name) return else: log.info("building '%s' extension", ext.name) # First, scan the sources for SWIG definition files (.i), run # SWIG on 'em to create .c files, and modify the sources list # accordingly. sources = self.swig_sources(sources) # Next, compile the source code to object files. # XXX not honouring 'define_macros' or 'undef_macros' -- the # CCompiler API needs to change to accommodate this, and I # want to do one thing at a time! # Two possible sources for extra compiler arguments: # - 'extra_compile_args' in Extension object # - CFLAGS environment variable (not particularly # elegant, but people seem to expect it and I # guess it's useful) # The environment variable should take precedence, and # any sensible compiler will give precedence to later # command line args. Hence we combine them in order: extra_args = ext.extra_compile_args or [] macros = ext.define_macros[:] for undef in ext.undef_macros: macros.append((undef,)) objects = self.compiler.compile(sources, output_dir=self.build_temp, macros=macros, include_dirs=ext.include_dirs, debug=self.debug, extra_postargs=extra_args, depends=ext.depends) # XXX -- this is a Vile HACK! # # The setup.py script for Python on Unix needs to be able to # get this list so it can perform all the clean up needed to # avoid keeping object files around when cleaning out a failed # build of an extension module. Since Distutils does not # track dependencies, we have to get rid of intermediates to # ensure all the intermediates will be properly re-built. # self._built_objects = objects[:] # Now link the object files together into a "shared object" -- # of course, first we have to figure out all the other things # that go into the mix. if ext.extra_objects: objects.extend(ext.extra_objects) extra_args = ext.extra_link_args or [] # Detect target language, if not provided language = ext.language or self.compiler.detect_language(sources) self.compiler.link_shared_object( objects, ext_filename, libraries=self.get_libraries(ext), library_dirs=ext.library_dirs, runtime_library_dirs=ext.runtime_library_dirs, extra_postargs=extra_args, export_symbols=self.get_export_symbols(ext), debug=self.debug, build_temp=self.build_temp, target_lang=language)
def finalize_options (self): from distutils import sysconfig self.set_undefined_options('build', ('build_lib', 'build_lib'), ('build_temp', 'build_temp'), ('compiler', 'compiler'), ('debug', 'debug'), ('force', 'force')) if self.package is None: self.package = self.distribution.ext_package self.extensions = self.distribution.ext_modules # Make sure Python's include directories (for Python.h, pyconfig.h, # etc.) are in the include search path. py_include = sysconfig.get_python_inc() plat_py_include = sysconfig.get_python_inc(plat_specific=1) if self.include_dirs is None: self.include_dirs = self.distribution.include_dirs or [] if type(self.include_dirs) is StringType: self.include_dirs = string.split(self.include_dirs, os.pathsep) # Put the Python "system" include dir at the end, so that # any local include dirs take precedence. self.include_dirs.append(py_include) if plat_py_include != py_include: self.include_dirs.append(plat_py_include) if type(self.libraries) is StringType: self.libraries = [self.libraries] # Life is easier if we're not forever checking for None, so # simplify these options to empty lists if unset if self.libraries is None: self.libraries = [] if self.library_dirs is None: self.library_dirs = [] elif type(self.library_dirs) is StringType: self.library_dirs = string.split(self.library_dirs, os.pathsep) if self.rpath is None: self.rpath = [] elif type(self.rpath) is StringType: self.rpath = string.split(self.rpath, os.pathsep) # for extensions under windows use different directories # for Release and Debug builds. # also Python's library directory must be appended to library_dirs if os.name == 'nt': self.library_dirs.append(os.path.join(sys.exec_prefix, 'libs')) if self.debug: self.build_temp = os.path.join(self.build_temp, "Debug") else: self.build_temp = os.path.join(self.build_temp, "Release") # Append the source distribution include and library directories, # this allows distutils on windows to work in the source tree self.include_dirs.append(os.path.join(sys.exec_prefix, 'PC')) self.library_dirs.append(os.path.join(sys.exec_prefix, 'PCBuild')) # OS/2 (EMX) doesn't support Debug vs Release builds, but has the # import libraries in its "Config" subdirectory if os.name == 'os2': self.library_dirs.append(os.path.join(sys.exec_prefix, 'Config')) # for extensions under Cygwin and AtheOS Python's library directory must be # appended to library_dirs if sys.platform[:6] == 'cygwin' or sys.platform[:6] == 'atheos': if string.find(sys.executable, sys.exec_prefix) != -1: # building third party extensions self.library_dirs.append(os.path.join(sys.prefix, "lib", "python" + get_python_version(), "config")) else: # building python standard extensions self.library_dirs.append('.') # The argument parsing will result in self.define being a string, but # it has to be a list of 2-tuples. All the preprocessor symbols # specified by the 'define' option will be set to '1'. Multiple # symbols can be separated with commas. if self.define: defines = string.split(self.define, ',') self.define = map(lambda symbol: (symbol, '1'), defines) # The option for macros to undefine is also a string from the # option parsing, but has to be a list. Multiple symbols can also # be separated with commas here. if self.undef: self.undef = string.split(self.undef, ',')
#!/usr/bin/env python #portable serial port access with python #this is a wrapper module for different platform implementations # # (C)2001-2002 Chris Liechti <*****@*****.**> # this is distributed under a free software license, see license.txt import sys, os, string VERSION = string.split("$Revision: 1.2 $")[1] #extract CVS version #chose an implementation, depending on os if os.name == 'nt': #sys.platform == 'win32': from serialwin32 import * elif os.name == 'posix': from serialposix import * elif os.name == 'java': from serialjava import * else: raise "Sorry no implementation for your platform available." #no "mac" implementation. someone want's to write it? i have no access to a mac.
def clean_rogue_characters(string): exclude = ['\\', "\'", '"'] string = ''.join(string.split('\\n')) string = ''.join(ch for ch in string if ch not in exclude) return string
def parse( self, parsedom=True, parseseq=True, parsecomplex=True ): ### HPRD Parsing method. Generates Mappings, HPRD data dictionary, Domain dictionary & Sequences '''HPRD Parsing method. Generates Mappings, HPRD data dictionary, Domain dictionary & Sequences.''' try: ### ~ Parse HPRD Mappings onto other database IDs from HPRD_ID_MAPPINGS.txt ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ### self.dict['HPRD'] = {} self.dict['Mapping'] = {} hprd = self.loadFromFile('%sHPRD_ID_MAPPINGS.txt' % self.info['HPRDPath'], v=1, checkpath=True, chomplines=True) hx = float(len(hprd)) while hprd: entry = hprd.pop(0) px = 100.0 * (hx - len(hprd)) / hx self.log.printLog('\r#HPRD', 'Parsing HPRD_ID_MAPPINGS: %.1f%%' % px, newline=False, log=False) data = string.split(entry) ## Check ## if len(data) < 7: continue if self.dict['HPRD'].has_key(data[0]): self.log.errorLog('HPRD ID %s duplicated! Aaargh!' % data[0], printerror=False) ## Update ## self.dict['HPRD'][data[0].upper()] = { 'gene': data[1].upper(), 'gb': data[3], 'entrez': data[4], 'omim': data[5], 'sp': data[6].upper(), 'desc': string.join(data[7:]) } for i in [1, 3, 6]: self.dict['Mapping'][data[i].upper()] = data[0] self.log.printLog('\r#HPRD', 'Parsing HPRD_ID_MAPPINGS complete!') ### ~ Parse HPRD Domain Mappings from PROTEIN_Architecture.txt ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ### self.dict['Domains'] = {} self.dict['DomainSource'] = {} if parsedom: hprd = self.loadFromFile('%sPROTEIN_Architecture.txt' % self.info['HPRDPath'], v=1, checkpath=True, chomplines=True) hx = float(len(hprd)) while hprd: entry = hprd.pop(0) px = 100.0 * (hx - len(hprd)) / hx self.log.printLog('\r#HPRD', 'Parsing PROTEIN_Architecture: %.1f%%' % px, newline=False, log=False) data = string.split(entry) ## Check ## if len(data) < 9: continue (hid, domain, type, source) = (data[0], data[4], data[5], data[8]) if type != 'Domain': continue ## Update ## if domain not in self.dict['Domains']: self.dict['Domains'][domain] = [hid] elif hid not in self.dict['Domains'][domain]: self.dict['Domains'][domain].append(hid) if domain not in self.dict['DomainSource']: self.dict['DomainSource'][domain] = [source] elif source not in self.dict['DomainSource'][domain]: self.dict['DomainSource'][domain].append(source) self.log.printLog('\r#HPRD', 'Parsing PROTEIN_Architecture complete!') ### ~ Make SeqList from PROTEIN_SEQUENCES.txt ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ### if parseseq: scmd = self.cmd_list + [ 'autoload=T', 'gnspacc=F', 'seqin=%sPROTEIN_SEQUENCES.txt' % self.info['HPRDPath'], 'autofilter=F', 'accnr=F', 'seqnr=F' ] self.obj['SeqList'] = rje_seq.SeqList(self.log, scmd) self.obj['SeqList'].info[ 'Name'] = self.info['OutDir'] + 'hprd.fas' sx = 0.0 for seq in self.obj['SeqList'].seq[ 0:]: # seq.info['ID'] should be the HPRD ID # ## Initial processing of sequence. Only keep if AllIso or isoform 1 ## self.log.printLog('\r#SEQ', 'Processing HPRD Sequences: %.1f%%' % (sx / self.obj['SeqList'].seqNum()), newline=False, log=False) iso = 'X' h = seq.info['ID'] try: iso = rje.matchExp('^\d+\|\d+_(\d+)\|', seq.info['Name'])[0] except: self.deBug(seq.info['Name']) try: if h not in self.dict['HPRD']: self.printLog( '\r#ERR', 'Missing from HPRD_ID_MAPPINGS?: %s' % seq.info['Name']) data = string.split(seq.info['Name'], '|') self.dict['HPRD'][h] = { 'gene': '-', 'gb': data[2], 'entrez': '', 'omim': '', 'sp': '', 'desc': string.join(data[3:], '|') } if not self.opt['AllIso'] and self.dict['HPRD'][ h].has_key('Seq') and iso != '1': self.obj['SeqList'].seq.remove(seq) continue #x#if h == '00001': self.deBug('%s = %s' % (h,iso)) sx += 100.0 seq.setInfo({ 'Gene': self.dict['HPRD'][h]['gene'], 'Description': self.dict['HPRD'][h]['desc'] + ' [Gene:%s HPRD:%s; gb:%s; sp:%s]' % (self.dict['HPRD'][h]['gene'], h, self.dict['HPRD'] [h]['gb'], self.dict['HPRD'][h]['sp']), 'AccNum': self.dict['HPRD'][h]['sp'] }) ## AllIso options ## if self.opt['AllIso']: if 'Seq' not in self.dict['HPRD'][h]: self.dict['HPRD'][h]['Seq'] = [seq] else: self.dict['HPRD'][h]['Seq'].append(seq) seq.setInfo({'AccNum': '%s-%s' % (h, iso)}) else: self.dict['HPRD'][h]['Seq'] = seq #x#print h, self.dict['HPRD'][h]['Seq'] ## Finish formatting ## if seq.info['Gene'] == '-': self.dict['HPRD'][h]['gene'] = seq.info[ 'Gene'] = 'HPRD' + h if seq.info['AccNum'] == '-': seq.info['AccNum'] = self.dict['HPRD'][h]['gb'] seq.info['ID'] = '%s_HUMAN' % seq.info['Gene'] seq.info['Name'] = '%s__%s %s' % ( seq.info['ID'], seq.info['AccNum'], seq.info['Description']) except: self.errorLog('Protein Parse Error (%s)' % seq.info['Name']) self.log.printLog('\r#SEQ', 'Processing HPRD Sequences complete!') ### ~ Make PPI Data from BINARY_PROTEIN_PROTEIN_INTERACTIONS.txt ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ### missing = [] self.dict['PPI'] = {} ppi = self.loadFromFile( '%sBINARY_PROTEIN_PROTEIN_INTERACTIONS.txt' % self.info['HPRDPath'], v=1, checkpath=True, chomplines=True) hx = float(len(ppi)) ix = 0 while ppi: entry = ppi.pop(0) px = 100.0 * (hx - len(ppi)) / hx self.log.printLog( '\r#PPI', 'Parsing BINARY_PROTEIN_PROTEIN_INTERACTIONS: %.1f%%' % px, newline=False, log=False) data = string.split(entry, '\t') ## Check ## if len(data) < 7: continue types = string.split(data[6], ';') if not types: types = ['unknown'] for type in types[0:]: if type in self.list['BadType'] or ( self.list['PPIType'] and type not in self.list['PPIType']): types.remove(type) if not types: continue ix += 1 ## Update ## (p1, p2) = (data[1].upper(), data[4].upper()) if p1 not in self.dict['HPRD']: if p1 not in missing: missing.append(p1) self.log.printLog( '#ERR', 'HPRD ID "%s" missing from HPRD_ID_MAPPINGS!' % p1, screen=False) continue if p2 not in self.dict['HPRD']: if p2 not in missing: missing.append(p2) self.log.printLog( '#ERR', 'HPRD ID "%s" missing from HPRD_ID_MAPPINGS!' % p1, screen=False) continue if not self.dict['PPI'].has_key(p1): self.dict['PPI'][p1] = [] if p2 not in self.dict['PPI'][p1]: self.dict['PPI'][p1].append(p2) if not self.dict['PPI'].has_key(p2): self.dict['PPI'][p2] = [] if p1 not in self.dict['PPI'][p2]: self.dict['PPI'][p2].append(p1) if p1 not in self.dict['Evidence']: self.dict['Evidence'][p1] = {} if p2 not in self.dict['Evidence'][p1]: self.dict['Evidence'][p1][p2] = [] for type in types: if type not in self.dict['Evidence'][p1][p2]: self.dict['Evidence'][p1][p2].append(type) #x#if p1 == '12422': self.deBug(self.dict['PPI'][p1]) self.log.printLog( '\r#PPI', 'Parsing BINARY_PROTEIN_PROTEIN_INTERACTIONS complete!') ### ~ Parse protein Complex data from PROTEIN_COMPLEXES.txt ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ### self.dict['Complex'] = {} ppi = self.loadFromFile('%sPROTEIN_COMPLEXES.txt' % self.info['HPRDPath'], v=1, checkpath=True, chomplines=True) hx = float(len(ppi)) while ppi: entry = ppi.pop(0) px = 100.0 * (hx - len(ppi)) / hx self.log.printLog('\r#PPI', 'Parsing PROTEIN_COMPLEXES: %.1f%%' % px, newline=False, log=False) data = string.split(entry) ## Check ## if len(data) < 5: continue ## Update ## (complex, hprd) = (data[0], data[1]) if hprd == 'None': continue if not self.dict['Complex'].has_key(complex): self.dict['Complex'][complex] = [] if hprd not in self.dict['Complex'][complex]: self.dict['Complex'][complex].append(hprd) #x#if p1 == '12422': self.deBug(self.dict['PPI'][p1]) self.log.printLog('\r#PPI', 'Parsing PROTEIN_COMPLEXES complete!') ### ~ Update PPI from protein Complex data if appropriate ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ### type = 'complex' if type not in self.list['BadType'] and ( not self.list['PPIType'] or type in self.list['PPIType']): cx = 0.0 for complex in self.dict['Complex']: self.log.printLog( '\r#PPI', 'Adding protein complex data to PPI: %.1f%%' % (cx / len(self.dict['Complex'])), newline=False, log=False) cx += 100.0 for p1 in self.dict['Complex'][complex]: for p2 in self.dict['Complex'][complex]: if not self.dict['PPI'].has_key(p1): self.dict['PPI'][p1] = [] if p2 not in self.dict['PPI'][p1]: self.dict['PPI'][p1].append(p2) if p1 not in self.dict['Evidence']: self.dict['Evidence'][p1] = {} if p2 not in self.dict['Evidence'][p1]: self.dict['Evidence'][p1][p2] = [] if type not in self.dict['Evidence'][p1][p2]: self.dict['Evidence'][p1][p2].append(type) self.log.printLog( '\r#PPI', 'Added protein complex data to PPI for %s complexes' % rje.integerString(len(self.dict['Complex']))) ptxt = '%s proteins; %s interactions' % (rje.integerString( len(self.dict['PPI'])), rje.integerString(ix)) self.log.printLog('\r#PPI', 'Parsing interactions complete: %s.' % ptxt) if missing: open('HPRD.missing.txt', 'w').write(string.join(missing, '\n')) except: self.log.errorLog('Error in HPRD.parse()', printerror=True, quitchoice=False) raise
import string __version__ = string.split('$Revision: 1.1 $')[1] __date__ = string.join(string.split('$Date: 2006/01/20 17:25:15 $')[1:3], ' ') __author__ = ('C. Donour Sizemore <*****@*****.**>', ) import gnuplot import arch import csv if arch.WXVERSION: import wxversion wxversion.select("2.6") import wx from wxPython.lib.mixins.listctrl import wxListCtrlAutoWidthMixin #from wxPython.lib.filebrowsebutton import FileBrowseButton #from wx.lib.intctrl import IntCtrl def histogram(data, boxsize=10): data.sort() data = map(lambda x: x - x % boxsize, data) points = [] for i in range(data[0], data[-1], boxsize): points.append([i, 0]) points.append([data[-1], 0]) for i in data: idx = int((i - data[0]) / boxsize) points[idx][1] = points[idx][1] + 1
def addToGeneCards( self, cards, addcards=True ): ### Reconfigures and adds parsed HPRD data to GeneCards ''' Reconfigures and adds parsed HPRD data to GeneCards. >> cards:rje_genecards.GeneCards object >> addcards:boolean [True] = whether to add genes from HPRD to the GeneCards dictionary ''' ### Add relevant headers for future output ### for h in ['HPRD', 'OMIM', 'EntrezCheck', 'Desc']: if h not in cards.list['Headers']: cards.list['Headers'].append(h) for gene in cards.list['Genes']: if h not in cards.dict['GeneCard'][gene]: cards.dict['GeneCard'][gene][h] = '' ### Add to GeneCards ### (hx, htot) = (0.0, len(self.dict['HPRD'])) for hprd in self.dict['HPRD']: self.log.printLog('\r#HPRD', 'Adding HPRD to GeneCards: %.1f%%' % (hx / htot), newline=False, log=False) hx += 100.0 self.deBug(self.dict['HPRD'][hprd]) gene = self.dict['HPRD'][hprd]['gene'] omim = self.dict['HPRD'][hprd]['omim'] entrez = self.dict['HPRD'][hprd]['entrez'] if gene in cards.list['Genes']: if cards.dict['GeneCard'][gene]['HPRD'] == '': cards.dict['GeneCard'][gene]['HPRD'] = hprd elif hprd not in string.split( cards.dict['GeneCard'][gene]['HPRD'], ','): cards.dict['GeneCard'][gene]['HPRD'] = string.join( string.split(cards.dict['GeneCard'][gene]['HPRD'], ',') + [hprd], ',') if cards.dict['GeneCard'][gene]['OMIM'] == '': cards.dict['GeneCard'][gene]['OMIM'] = omim elif omim not in string.split( cards.dict['GeneCard'][gene]['OMIM'], ','): cards.dict['GeneCard'][gene]['OMIM'] = string.join( string.split(cards.dict['GeneCard'][gene]['OMIM'], ',') + [omim], ',') if cards.dict['GeneCard'][gene]['EntrezCheck'] == '': cards.dict['GeneCard'][gene]['EntrezCheck'] = entrez elif entrez not in string.split( cards.dict['GeneCard'][gene]['EntrezCheck'], ','): cards.dict['GeneCard'][gene]['EntrezCheck'] = string.join( string.split( cards.dict['GeneCard'][gene]['EntrezCheck'], ',') + [entrez], ',') elif addcards: if gene == '-': gene = 'HPRD' + hprd cards.list['Genes'].append(gene) cards.dict['GeneCard'][gene] = { 'Symbol': '!FAILED!', 'HPRD': hprd, 'OMIM': omim, 'EntrezCheck': entrez, 'Desc': self.dict['HPRD'][hprd]['desc'] } self.log.printLog( '\r#HPRD', 'Added %s HPRD genes to GeneCards.' % (rje.integerString(htot)))
##You have been given an A array consisting of N integers. All the elements in this array are guaranteed to be unique. For each position i in the array A you need to find the position should be present in, if the array was a sorted array. You need to find this for each i and print the resulting solution. import string n = int(input()) string = input() array = string.split(" ") num_array = [] num_array.append(int(array[0])) for i in range(1, n): temp = int(array[i]) free = True for j in range(0, i): if temp < num_array[j] and free: num_array.insert(j, temp) free = False elif j == (i - 1) and free: num_array.append(temp) print(num_array) for i in range(0, n): temp = int(array[i]) looking = True for j in range(0, n): if looking and temp == num_array[j]: print((j + 1), end=' ') num_array[ j] = 0 #note the specs give all nums > 0 so this allows non unique nums in array looking = False
def constructUserLFNs(jobID, vo, owner, outputFiles, outputPath): """ This method is used to supplant the standard job wrapper output data policy for ILC. The initial convention adopted for user output files is the following: If outputpath is not defined: * <vo>/user/<initial e.g. s>/<owner e.g. sposs>/<yearMonth e.g. 2010_02>/<subdir>/<fileName> Otherwise: * <vo>/user/<initial e.g. s>/<owner e.g. sposs>/<outputPath>/<fileName> :param int jobID: the jobID :param string vo: the vo of the owners proxy :param string owner: the username :param list outputFiles: the list of outputfiles found for the job :param string outputPath: the outputpath defined for the job :returns: S_OK with list of output file lfns """ initial = owner[:1] subdir = str(jobID / 1000) timeTup = datetime.date.today().timetuple() yearMonth = '%s_%s' % (timeTup[0], string.zfill(str(timeTup[1]), 2)) outputLFNs = {} if not vo: res = getVOfromProxyGroup() if not res['OK']: gLogger.error('Could not get VO from CS, assuming ilc') vo = 'ilc' else: vo = res['Value'] ops = Operations(vo=vo) lfn_prefix = ops.getValue("LFNUserPrefix", "user") #Strip out any leading or trailing slashes but allow fine structure if outputPath: outputPathList = string.split(outputPath, os.sep) newPath = [] for i in outputPathList: if i: newPath.append(i) outputPath = string.join(newPath, os.sep) if not isinstance(outputFiles, list): outputFiles = [outputFiles] for outputFile in outputFiles: #strip out any fine structure in the output file specified by the user, restrict to output file names #the output path field can be used to describe this outputFile = outputFile.replace('LFN:', '') lfn = '' if outputPath: lfn = os.sep + os.path.join( vo, lfn_prefix, initial, owner, outputPath + os.sep + os.path.basename(outputFile)) else: lfn = os.sep + os.path.join( vo, lfn_prefix, initial, owner, yearMonth, subdir, str(jobID)) + os.sep + os.path.basename(outputFile) outputLFNs[outputFile] = lfn outputData = outputLFNs.values() if outputData: gLogger.info('Created the following output data LFN(s):\n%s' % (string.join(outputData, '\n'))) else: gLogger.info('No output LFN(s) constructed') return S_OK(outputData)
def build3LevelsTree(self, atomlines): """ Function to build a 3 levels hierarchy Molecule-substructure-atoms.""" self.mol = Protein() self.mol.allAtoms = AtomSet() self.mol.atmNum = {} self.mol.parser = self if self.mol.name == 'NoName': self.mol.name = os.path.basename( os.path.splitext(self.filename)[0]) self.mol.children = ResidueSet([]) self.mol.childrenName = 'residues' self.mol.childrenSetClass = ResidueSet self.mol.elementType = Residue self.mol.curRes = Residue() self.mol.curRes.hasCA = 0 self.mol.curRes.hasO = 0 self.mol.levels = [Protein, Residue, Atom] for atmline in atomlines: if len(atmline) >= 10: status = string.split(atmline[9], '|') else: status = None resName = atmline[7][:3] resSeq = atmline[7][3:] if resSeq != self.mol.curRes.number or \ resName != self.mol.curRes.type: # check if this residue already exists na = string.strip(resName) + string.strip(resSeq) res = self.mol.get(na) if res: self.mol.curRes = res[0] else: self.mol.curRes = Residue(resName, resSeq, '', self.mol, top=self.mol) name = atmline[1] if name == 'CA': self.mol.curRes.hasCA = 1 if name == 'O': self.mol.curRes.hasO = 2 atom = Atom(name, self.mol.curRes, top=self.mol, chemicalElement=string.split(atmline[5], '.')[0]) #atom.element = atmline[5][0] atom.element = atom.chemElem atom.number = int(atmline[0]) self.mol.atmNum[atom.number] = atom atom._coords = [[ float(atmline[2]), float(atmline[3]), float(atmline[4]) ]] atom._charges['mol2'] = float(atmline[8]) atom.chargeSet = mol2 # atom.conformation = 0 atom.hetatm = 0 #Add a data member containing a list of string describing # the Sybyl status bis of the atoms. atom.status = status #add altname so buildBondsByDist doesn't croak atom.altname = None self.mol.allAtoms.append(atom) self.mol.residues = self.mol.children assert hasattr(self.mol, 'chains') delattr(self.mol, 'chains') delattr(self.mol, 'curRes')
def wbem_request(url, data, creds, headers=None, debug=0, x509=None, verify_callback=None, ca_certs=None, no_verification=False): """Send request over HTTP. Send XML data over HTTP to the specified url. Return the response in XML. Uses Python's build-in http_client. x509 may be a dictionary containing the location of the SSL certificate and key files. """ if headers is None: headers = [] host, port, use_ssl = pywbem.cim_http.parse_url(url) key_file = None cert_file = None if use_ssl and x509 is not None: cert_file = x509.get('cert_file') key_file = x509.get('key_file') numTries = 0 localAuthHeader = None tryLimit = 5 if isinstance(data, six.text_type): data = data.encode('utf-8') data = '<?xml version="1.0" encoding="utf-8" ?>\n' + data if not no_verification and ca_certs is None: ca_certs = get_default_ca_certs() elif no_verification: ca_certs = None if use_ssl: h = HTTPSConnection(host, port=port, key_file=key_file, cert_file=cert_file, ca_certs=ca_certs, no_verification=no_verification) locallogin = None while numTries < tryLimit: numTries = numTries + 1 h.putrequest('POST', '/cimom') h.putheader('Content-type', 'application/xml; charset="utf-8"') h.putheader('Content-length', len(data)) if localAuthHeader is not None: h.putheader(*localAuthHeader) elif creds is not None: h.putheader( 'Authorization', 'Basic %s' % base64.encodestring('%s:%s' % (creds[0], creds[1])).replace('\n', '')) elif locallogin is not None: h.putheader('PegasusAuthorization', 'Local "%s"' % locallogin) for hdr in headers: if isinstance(hdr, six.text_type): hdr = hdr.encode('utf-8') s = map(lambda x: string.strip(x), string.split(hdr, ":", 1)) h.putheader(urllib.parse.quote(s[0]), urllib.parse.quote(s[1])) try: h.endheaders() try: h.send(data) except socket.error as arg: if arg[0] != 104 and arg[0] != 32: raise response = h.getresponse() body = response.read() if response.status != 200: raise pywbem.cim_http.Error('HTTP error') except http_client.BadStatusLine as arg: msg = (_("Bad Status line returned: %(arg)s.") % {'arg': arg}) raise pywbem.cim_http.Error(msg) except socket.sslerror as arg: msg = (_("SSL error: %(arg)s.") % {'arg': arg}) raise pywbem.cim_http.Error(msg) except socket.error as arg: msg = (_("Socket error: %(arg)s.") % {'arg': arg}) raise pywbem.cim_http.Error(msg) break return body
# The following two branches are for 1.5.2 compatibility. if sys.platform == 'aix4': # what about AIX 3.x ? # Linker script is in the config directory, not in Modules as the # Makefile says. python_lib = get_python_lib(standard_lib=1) ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix') python_exp = os.path.join(python_lib, 'config', 'python.exp') g['LDSHARED'] = "%s %s -bI:%s" % (ld_so_aix, g['CC'], python_exp) elif sys.platform == 'beos': # Linker script is in the config directory. In the Makefile it is # relative to the srcdir, which after installation no longer makes # sense. python_lib = get_python_lib(standard_lib=1) linkerscript_path = string.split(g['LDSHARED'])[0] linkerscript_name = os.path.basename(linkerscript_path) linkerscript = os.path.join(python_lib, 'config', linkerscript_name) # XXX this isn't the right place to do this: adding the Python # library to the link, if needed, should be in the "build_ext" # command. (It's also needed for non-MS compilers on Windows, and # it's taken care of for them by the 'build_ext.get_libraries()' # method.) g['LDSHARED'] = ("%s -L%s/lib -lpython%s" % (linkerscript, PREFIX, sys.version[0:3])) global _config_vars _config_vars = g
def build4LevelsTree(self, subst_chain, atomlines): """ Function to build a 4 level hierarchy Protein-Chain-Residue-Atom. """ self.mol = Protein() self.mol.allAtoms = AtomSet() self.mol.atmNum = {} self.mol.parser = self if self.mol.name == 'NoName': self.mol.name = os.path.basename( os.path.splitext(self.filename)[0]) self.mol.curChain = Chain() self.mol.curRes = Residue() self.mol.levels = [Protein, Chain, Residue, Atom] i = 1 for atmline in atomlines: if len(atmline) >= 10: status = string.split(atmline[9], '|') else: status = None if len(atmline) == 8: tmp = [atmline[5][:5], atmline[5][5:]] atmline[5] = tmp[0] atmline.insert(6, tmp[1]) if status and status[0] == 'WATER': chainID = 'W' atmline[7] = 'HOH' + str(i) subst_chain[atmline[7]] = chainID i = i + 1 if subst_chain == {}: chainID = 'default' elif atmline[7] not in subst_chain: if '****' in subst_chain: try: chainID = subst_chain[atmline[7]] except: chainID = 'default' else: chainID = 'default' elif type(subst_chain[atmline[7]]) is bytes: # that is to say that only chains has this substructure name. chainID = subst_chain[atmline[7]] elif type(subst_chain[atmline[7]]) is list: # That is to say that several chains have the same substructure. chainID = subst_chain[atmline[7]][0] subst_chain[atmline[7]] = subst_chain[atmline[7]].remove( chainID) if chainID != self.mol.curChain.id: if not self.mol.chains.id or not chainID in self.mol.chains.id: self.mol.curChain = Chain(chainID, self.mol, top=self.mol) else: self.mol.curChain = self.mol.chains.get(chainID)[0] if len(atmline) < 7: # test if the atmline has a res name and resseq: resName = 'RES' resSeq = '1' else: resName = atmline[7][:3] resSeq = atmline[7][3:] if resSeq != self.mol.curRes.number or \ resName != self.mol.curRes.type: # check if this residue already exists na = string.strip(resName) + string.strip(resSeq) res = self.mol.curChain.get(na) if res: self.mol.curRes = res[0] else: self.mol.curRes = Residue(resName, resSeq, '', self.mol.curChain, top=self.mol) name = atmline[1] if name == 'CA': self.mol.curRes.hasCA = 1 if name == 'O': self.mol.curRes.hasO = 2 atom = Atom(name, self.mol.curRes, top=self.mol, chemicalElement=string.split(atmline[5], '.')[0]) #atom.element = atmline[5][0] atom.element = atom.chemElem atom.number = int(atmline[0]) self.mol.atmNum[atom.number] = atom atom._coords = [[ float(atmline[2]), float(atmline[3]), float(atmline[4]) ]] if len(atmline) >= 9: atom._charges['mol2'] = float(atmline[8]) atom.chargeSet = 'mol2' # atom.conformation = 0 atom.hetatm = 0 #Add a data member containing a list of string describing # the Sybyl status bis of the atoms. atom.status = status #add altname so buildBondsByDist doesn't croak atom.altname = None self.mol.allAtoms.append(atom) delattr(self.mol, 'curRes') delattr(self.mol, 'curChain')
def buildTestCase(testCaseDirectives, testCaseSourceDir, toolsDir, sdkDir, minOsOptionsName, defaultMinOS, archOptions, testCaseDestDirBuild, testCaseDestDirRun): scratchDir = tempfile.mkdtemp() if testCaseDirectives["BUILD_MIN_OS"]: minOS = testCaseDirectives["BUILD_MIN_OS"] else: minOS = defaultMinOS compilerSearchOptions = " -isysroot " + sdkDir + " -I" + sdkDir + "/System/Library/Frameworks/System.framework/PrivateHeaders" if minOsOptionsName == "mmacosx-version-min": taskForPidCommand = "touch " envEnableCommand = "touch " else: taskForPidCommand = "codesign --force --sign - --entitlements " + testCaseSourceDir + "/../../task_for_pid_entitlement.plist " envEnableCommand = "codesign --force --sign - --entitlements " + testCaseSourceDir + "/../../get_task_allow_entitlement.plist " buildSubs = { "CC": toolsDir + "/usr/bin/clang " + archOptions + " -" + minOsOptionsName + "=" + str(minOS) + compilerSearchOptions, "CXX": toolsDir + "/usr/bin/clang++ " + archOptions + " -" + minOsOptionsName + "=" + str(minOS) + compilerSearchOptions, "BUILD_DIR": testCaseDestDirBuild, "RUN_DIR": testCaseDestDirRun, "TEMP_DIR": scratchDir, "TASK_FOR_PID_ENABLE": taskForPidCommand, "DYLD_ENV_VARS_ENABLE": envEnableCommand } os.makedirs(testCaseDestDirBuild) os.chdir(testCaseSourceDir) print >> sys.stderr, "cd " + testCaseSourceDir for line in testCaseDirectives["BUILD"]: cmd = string.Template(line).safe_substitute(buildSubs) print >> sys.stderr, cmd if "&&" in cmd: result = subprocess.call(cmd, shell=True) else: cmdList = [] cmdList = string.split(cmd) result = subprocess.call(cmdList) if result: return result shutil.rmtree(scratchDir, ignore_errors=True) sudoSub = "" if minOsOptionsName == "mmacosx-version-min": sudoSub = "sudo" runSubs = { "RUN_DIR": testCaseDestDirRun, "REQUIRE_CRASH": "nocr -require_crash", "SUDO": sudoSub, } runFilePath = testCaseDestDirBuild + "/run.sh" with open(runFilePath, "a") as runFile: runFile.write("#!/bin/sh\n") runFile.write("cd " + testCaseDestDirRun + "\n") os.chmod(runFilePath, 0755) for runline in testCaseDirectives["RUN"]: runFile.write( string.Template(runline).safe_substitute(runSubs) + "\n") runFile.write("\n") runFile.close() return 0
def finalize_options(self): from distutils import sysconfig self.set_undefined_options( 'build', ('build_lib', 'build_lib'), ('build_temp', 'build_temp'), ('compiler', 'compiler'), ('debug', 'debug'), ('force', 'force'), ('plat_name', 'plat_name'), ) if self.package is None: self.package = self.distribution.ext_package self.extensions = self.distribution.ext_modules # Make sure Python's include directories (for Python.h, pyconfig.h, # etc.) are in the include search path. py_include = sysconfig.get_python_inc() plat_py_include = sysconfig.get_python_inc(plat_specific=1) if self.include_dirs is None: self.include_dirs = self.distribution.include_dirs or [] if isinstance(self.include_dirs, str): self.include_dirs = self.include_dirs.split(os.pathsep) # Put the Python "system" include dir at the end, so that # any local include dirs take precedence. self.include_dirs.append(py_include) if plat_py_include != py_include: self.include_dirs.append(plat_py_include) self.ensure_string_list('libraries') self.ensure_string_list('link_objects') # Life is easier if we're not forever checking for None, so # simplify these options to empty lists if unset if self.libraries is None: self.libraries = [] if self.library_dirs is None: self.library_dirs = [] elif type(self.library_dirs) is StringType: self.library_dirs = string.split(self.library_dirs, os.pathsep) if self.rpath is None: self.rpath = [] elif type(self.rpath) is StringType: self.rpath = string.split(self.rpath, os.pathsep) # for extensions under windows use different directories # for Release and Debug builds. # also Python's library directory must be appended to library_dirs if os.name == 'nt' and not self.plat_name.startswith(('mingw')): # the 'libs' directory is for binary installs - we assume that # must be the *native* platform. But we don't really support # cross-compiling via a binary install anyway, so we let it go. self.library_dirs.append(os.path.join(sys.exec_prefix, 'libs')) if self.debug: self.build_temp = os.path.join(self.build_temp, "Debug") else: self.build_temp = os.path.join(self.build_temp, "Release") # Append the source distribution include and library directories, # this allows distutils on windows to work in the source tree self.include_dirs.append(os.path.join(sys.exec_prefix, 'PC')) if MSVC_VERSION == 9: # Use the .lib files for the correct architecture if self.plat_name == 'win32': suffix = '' else: # win-amd64 or win-ia64 suffix = self.plat_name[4:] # We could have been built in one of two places; add both for d in ('PCbuild', ), ('PC', 'VS9.0'): new_lib = os.path.join(sys.exec_prefix, *d) if suffix: new_lib = os.path.join(new_lib, suffix) self.library_dirs.append(new_lib) elif MSVC_VERSION == 8: self.library_dirs.append( os.path.join(sys.exec_prefix, 'PC', 'VS8.0')) elif MSVC_VERSION == 7: self.library_dirs.append( os.path.join(sys.exec_prefix, 'PC', 'VS7.1')) else: self.library_dirs.append( os.path.join(sys.exec_prefix, 'PC', 'VC6')) # OS/2 (EMX) doesn't support Debug vs Release builds, but has the # import libraries in its "Config" subdirectory if os.name == 'os2': self.library_dirs.append(os.path.join(sys.exec_prefix, 'Config')) # for extensions under Cygwin and AtheOS Python's library directory must be # appended to library_dirs if sys.platform[: 6] == 'cygwin' or sys.platform[: 6] == 'atheos' or self.plat_name.startswith( ('mingw')): if sys.executable.startswith(os.path.join(sys.exec_prefix, "bin")): # building third party extensions config_dir_name = os.path.basename( sysconfig.get_config_var('LIBPL')) self.library_dirs.append( os.path.join(sys.prefix, "lib", "python" + get_python_version(), config_dir_name)) else: # building python standard extensions self.library_dirs.append('.') # For building extensions with a shared Python library, # Python's library directory must be appended to library_dirs # See Issues: #1600860, #4366 if (sysconfig.get_config_var('Py_ENABLE_SHARED')): if not sysconfig.python_build: # building third party extensions self.library_dirs.append(sysconfig.get_config_var('LIBDIR')) else: # building python standard extensions self.library_dirs.append('.') # The argument parsing will result in self.define being a string, but # it has to be a list of 2-tuples. All the preprocessor symbols # specified by the 'define' option will be set to '1'. Multiple # symbols can be separated with commas. if self.define: defines = self.define.split(',') self.define = map(lambda symbol: (symbol, '1'), defines) # The option for macros to undefine is also a string from the # option parsing, but has to be a list. Multiple symbols can also # be separated with commas here. if self.undef: self.undef = self.undef.split(',') if self.swig_opts is None: self.swig_opts = [] else: self.swig_opts = self.swig_opts.split(' ') # Finally add the user include and library directories if requested if self.user: user_include = os.path.join(USER_BASE, "include") user_lib = os.path.join(USER_BASE, "lib") if os.path.isdir(user_include): self.include_dirs.append(user_include) if os.path.isdir(user_lib): self.library_dirs.append(user_lib) self.rpath.append(user_lib)
def _make_spec_file(self): """Generate the text of an RPM spec file and return it as a list of strings (one per line). """ # definitions and headers spec_file = [ '%define name ' + self.distribution.get_name(), '%define version ' + self.distribution.get_version().replace('-', '_'), '%define release ' + self.release.replace('-', '_'), '', 'Summary: ' + self.distribution.get_description(), ] # put locale summaries into spec file # XXX not supported for now (hard to put a dictionary # in a config file -- arg!) #for locale in self.summaries.keys(): # spec_file.append('Summary(%s): %s' % (locale, # self.summaries[locale])) spec_file.extend([ 'Name: %{name}', 'Version: %{version}', 'Release: %{release}', ]) # XXX yuck! this filename is available from the "sdist" command, # but only after it has run: and we create the spec file before # running "sdist", in case of --spec-only. if self.use_bzip2: spec_file.append('Source0: %{name}-%{version}.tar.bz2') else: spec_file.append('Source0: %{name}-%{version}.tar.gz') spec_file.extend([ 'License: ' + self.distribution.get_license(), 'Group: ' + self.group, 'BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot', 'Prefix: %{_prefix}', ]) if not self.force_arch: # noarch if no extension modules if not self.distribution.has_ext_modules(): spec_file.append('BuildArch: noarch') else: spec_file.append('BuildArch: %s' % self.force_arch) for field in ( 'Vendor', 'Packager', 'Provides', 'Requires', 'Conflicts', 'Obsoletes', ): val = getattr(self, string.lower(field)) if type(val) is ListType: spec_file.append('%s: %s' % (field, string.join(val))) elif val is not None: spec_file.append('%s: %s' % (field, val)) if self.distribution.get_url() != 'UNKNOWN': spec_file.append('Url: ' + self.distribution.get_url()) if self.distribution_name: spec_file.append('Distribution: ' + self.distribution_name) if self.build_requires: spec_file.append('BuildRequires: ' + string.join(self.build_requires)) if self.icon: spec_file.append('Icon: ' + os.path.basename(self.icon)) if self.no_autoreq: spec_file.append('AutoReq: 0') spec_file.extend( ['', '%description', self.distribution.get_long_description()]) # put locale descriptions into spec file # XXX again, suppressed because config file syntax doesn't # easily support this ;-( #for locale in self.descriptions.keys(): # spec_file.extend([ # '', # '%description -l ' + locale, # self.descriptions[locale], # ]) # rpm scripts # figure out default build script def_setup_call = "%s %s" % (self.python, os.path.basename(sys.argv[0])) def_build = "%s build" % def_setup_call if self.use_rpm_opt_flags: def_build = 'env CFLAGS="$RPM_OPT_FLAGS" ' + def_build # insert contents of files # XXX this is kind of misleading: user-supplied options are files # that we open and interpolate into the spec file, but the defaults # are just text that we drop in as-is. Hmmm. script_options = [ ('prep', 'prep_script', "%setup"), ('build', 'build_script', def_build), ('install', 'install_script', ("%s install " "--root=$RPM_BUILD_ROOT " "--record=INSTALLED_FILES") % def_setup_call), ('clean', 'clean_script', "rm -rf $RPM_BUILD_ROOT"), ('verifyscript', 'verify_script', None), ('pre', 'pre_install', None), ('post', 'post_install', None), ('preun', 'pre_uninstall', None), ('postun', 'post_uninstall', None), ] for (rpm_opt, attr, default) in script_options: # Insert contents of file referred to, if no file is referred to # use 'default' as contents of script val = getattr(self, attr) if val or default: spec_file.extend([ '', '%' + rpm_opt, ]) if val: spec_file.extend(string.split(open(val, 'r').read(), '\n')) else: spec_file.append(default) # files section spec_file.extend([ '', '%files -f INSTALLED_FILES', '%defattr(-,root,root)', ]) if self.doc_files: spec_file.append('%doc ' + string.join(self.doc_files)) if self.changelog: spec_file.extend([ '', '%changelog', ]) spec_file.extend(self.changelog) return spec_file
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # // Author: Filippov Ilia import common import sys import os import string print_debug = common.print_debug error = common.error take_lines = common.take_lines exists = [False, False, False, False, False, False, False, False] names = ["m4", "bison", "flex", "sde", "ispc", "clang", "gcc", "icc"] PATH_dir = string.split(os.getenv("PATH"), os.pathsep) for counter in PATH_dir: for i in range(0,8): if os.path.exists(counter + os.sep + names[i]): exists[i] = True print_debug("=== in PATH: ===\n", False, "") print_debug("Tools:\n", False, "") for i in range(0,3): if exists[i]: print_debug(take_lines(names[i] + " --version", "first"), False, "") else: error("you don't have " + names[i], 0) if exists[0] and exists[1] and exists[2]: if common.check_tools(2): print_debug("Tools' versions are ok\n", False, "")
def extractFeatures(species, countsFileDir): import export ExonsPresent = False lastgene = None lastend = None genes_detected = {} count = 0 first_last_exons = { } ### Make strand fake junction comprised of the first and last exon if 'counts.' in countsFileDir: ### The feature_file contains only ExonID or Gene IDs and associated coordinates feature_file = string.replace(countsFileDir, 'counts.', 'features.') fe = export.ExportFile(feature_file) firstLine = True for line in open(countsFileDir, 'rU').xreadlines(): if firstLine: firstLine = False else: feature_info = string.split(line, '\t')[0] fe.write(feature_info + '\n') junction_annotation = string.split(feature_info, '=')[0] if '-' in junction_annotation: geneid = string.split(junction_annotation, ':')[0] genes_detected[geneid] = [] if ExonsPresent == False: exon = string.split(feature_info, '=')[0] if '-' not in exon: ExonsPresent = True ### Add exon-info if necessary exons_file = unique.filepath('AltDatabase/ensembl/' + species + '/' + species + '_Ensembl_exon.txt') firstLine = True for line in open(exons_file, 'rU').xreadlines(): if firstLine: firstLine = False else: line = line.rstrip('\n') t = string.split(line, '\t') gene, exon, chr, strand, start, end = t[:6] if gene != lastgene: if len( genes_detected ) == 0 or gene in genes_detected: ### restrict to detected genes first_last_exons[gene, strand] = [(chr, start)] if len( genes_detected ) == 0 or lastgene in genes_detected: ### restrict to detected genes try: first_last_exons[lastgene, laststrand].append(lastend) except Exception: pass ### occurs for the first gene if ExonsPresent == False: fe.write(gene + ':' + exon + '=' + chr + ':' + start + '-' + end + '\n') lastgene = gene lastend = end laststrand = strand if len(genes_detected) == 0 or lastgene in genes_detected: first_last_exons[lastgene, laststrand].append(lastend) ### Add strand fake junction for the whole gene for (gene, strand) in first_last_exons: (chr, start), end = first_last_exons[gene, strand] if strand == '-': start, end = end, start # Need to encode strand in this annotation, do this by strand orienting the positions fe.write(gene + ':E1.1-E100.1' + '=' + chr + ':' + start + '-' + end + '\n') fe.close() return feature_file ### return the location of the exon and gene coordinates file
sdkDir = "/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.13.Internal.sdk" toolsDir = os.getenv("TOOLCHAIN_DIR", "/") defaultMinOS = "" minVersNum = "10.12" minOSOption = os.getenv("DEPLOYMENT_TARGET_CLANG_FLAG_NAME", "") if minOSOption: minOSVersName = os.getenv("DEPLOYMENT_TARGET_CLANG_ENV_NAME", "") if minOSVersName: minVersNum = os.getenv(minOSVersName, "") else: minOSOption = "mmacosx-version-min" platformName = os.getenv("PLATFORM_NAME", "osx") archOptions = "" archList = os.getenv("RC_ARCHS", "") if archList: for arch in string.split(archList, " "): archOptions = archOptions + " -arch " + arch else: archList = os.getenv("ARCHS_STANDARD_32_64_BIT", "") if platformName == "watchos": archOptions = "-arch armv7k" elif platformName == "appletvos": archOptions = "-arch arm64" else: if archList: for arch in string.split(archList, " "): archOptions = archOptions + " -arch " + arch else: archOptions = "-arch x86_64" allTests = [] for f in os.listdir(testsSrcTopDir):
def exportToGff(incl_junction, excl_junction, feature_positions, gene): """ Write the exon and gene coordinates to strand gff file for any alternatively regulated and flanking exons """ proceed = False if '-' in excl_junction: selected_event = excl_junction ### should be the exclusion junction else: selected_event = incl_junction ### For the first and last exon, determine their positions then later loop through the exons in between gene_prefix = gene + '__' pds = feature_positions[selected_event] e1, e2 = string.split(selected_event, '-') e1 = checkForNovelJunctions(e1) e2 = checkForNovelJunctions(e2) chr = pds.Chr() if pds.Start() > pds.End(): strand = '-' else: strand = '+' try: pd1 = feature_positions[e1] except Exception: e1 = string.split( e1, '.' )[0] + '.1' ### occurs with IDS such as ENSMUSG00000002107__E26.27, where .27 is not in the database (not sure why) try: pd1 = feature_positions[e1] except Exception: ### Occurs with gene entry, e.g., ENSMUSG00000028180__E1.1-E100.1 pd1 = PositionData(chr + '__' + str(pds.Start()) + '-' + str(pds.Start())) #chr3:157544964-157545122 selected_event = gene ### Again, set this for gene entry coordinates try: pd2 = feature_positions[gene_prefix + e2] except Exception: e2 = string.split( e2, '.' )[0] + '.1' ### occurs with IDS such as ENSMUSG00000002107__E26.27, where .27 is not in the database (not sure why) try: pd2 = feature_positions[gene_prefix + e2] except Exception: ### Occurs with gene entry, e.g., ENSMUSG00000028180__E1.1-E100.1 pd2 = PositionData(chr + '__' + str(pds.End()) + '-' + str(pds.End())) #chr3:157544964-157545122 selected_event = gene ### Again, set this for gene entry coordinates #print pd.Start(),pd.End(), pd1.Start(),pd1.End(), pd2.Start(), pd2.End() first_start = pd1.Start() last_end = pd2.End() ### Now, loop through all gene features and only select the ones that are in between the spliced exons for exonID in feature_positions: proceed = False if '-' not in exonID: ### Hence strand junction geneID, block_region = string.split(exonID, '__') exon_region = string.replace(block_region, 'E', '') if ('I' not in exon_region) and (';' not in exon_region) and ( '-' not in exon_region): pd = feature_positions[exonID] exon_start = pd.Start() exon_end = pd.End() if (exon_start >= first_start and exon_end <= last_end): proceed = True if (exon_start <= first_start and exon_end >= last_end): proceed = True if proceed: export_string = chr + '\t' + 'SE' + '\t' + 'exon' + '\t' + str( exon_start ) + '\t' + str( exon_end ) + '\t' + '.' + '\t' + strand + '\t' + '.' + '\t' + 'ID=' + exonID + ';' + 'Parent=' + selected_event + '.STRAND;' + '\n' if 'M' not in chr: key = exonID, selected_event #if key not in added_gff_entries: gff_export_obj.write(export_string) added_gff_entries[key] = [] if strand == '-': junction_exon_start = int( pd2.Start()) - 300 ### increase the window size junction_exon_end = int(pd1.End()) + 300 ### increase the window size else: junction_exon_start = int( pd1.Start()) - 300 ### increase the window size junction_exon_end = int(pd2.End()) + 300 ### increase the window size return chr, str(junction_exon_start), str( junction_exon_end), strand, selected_event
def correct_split(string, splitchar): rv = string.split(splitchar) if rv == ['']: rv = [] return rv
def makeStudent(infoStr): name, hours, qpoints = string.split(infoStr, "\t") return Student(name, hours, qpoints)
def __init__(self, position_str): self.chr, interval = string.split(position_str, '__') #chr16__18810389-18807423 self.start, self.end = string.split(interval, '-')
def bsd_list_generate(): print "[+] Generating system call matrix data structure" fdin = open(PATH_BSD_TEMP_FILE, "r+b") data = fdin.read() fdin.close() # Split data by lines data = string.split(data, '\n') # Remove all empty lines for i, line in enumerate(data): if not line: data.pop(i) # Remove all asm comments ';' tmp = [] for i, line in enumerate(data): if i == 0 or not line.startswith(';'): tmp.append(line) data = tmp # Crafting syscall table (as 2 dimensional matrix) syscall_matrix = [] for line in data: entry = [] if line.startswith("#"): # Apple shitheads are lousy. if line.find("#endif") != -1: entry.append("#endif") syscall_matrix.append(entry) continue entry.append(line) syscall_matrix.append(entry) continue line = line.replace(" (", "(") elems = line.split() # Syscall ID entry.append(elems[0]) # Syscall return type entry.append(elems[4]) # Syscall name entry.append(elems[5][0:elems[5].find('(')]) # Enumerating arguments of the syscall # This shit is kinda tricky. For couple of reasons: 1) There _are_ # corner cases (e.g. no args), 2) Unknown # of args per syscall, 3) # argument can be more than a tuple e.g. 'struct name arg' # i = 5 argnum = 0 # we will use it to count number of args arg = "" # we're using it to create each arg tmp = [] # temporary list of args for each syscall while True: # Corner case where syscall takes 0 args (denoted "void") if (elems[6] == '}' or elems[6] == "NO_SYSCALL_STUB;"): entry.append(str(argnum)) break # If argnum > 0 then it's our first iteration always elif (i == 5): arg = elems[i][elems[i].find('(') + 1:] i += 1 continue elif (elems[i] != '}' and elems[i] != "NO_SYSCALL_STUB;"): if (elems[i].find(')') != -1): arg += ' ' + elems[i][:elems[i].find(')')] tmp.append(arg) argnum += 1 entry.append(str(argnum)) break if (elems[i].find(',') == -1): arg += ' ' + elems[i] i += 1 continue else: arg += ' ' + elems[i][:elems[i].find(',')] tmp.append(arg) argnum += 1 arg = "" i += 1 else: break # We're adding args from our temporary list for el in tmp: entry.append(el) # Strip prepended spaces from syscall's args for i, elem in enumerate(entry): entry[i] = elem.strip() syscall_matrix.append(entry) ''' for entry in syscall_matrix: print entry sys.exit() ''' # Clean-up cmd = "rm " + PATH_BSD_TEMP_FILE os.system(cmd) print "\tdone" return syscall_matrix
class ResourceDB: def __init__(self, file=None, string=None, resources=None): self.db = {} self.lock = lock.allocate_lock() if file is not None: self.insert_file(file) if string is not None: self.insert_string(string) if resources is not None: self.insert_resources(resources) def insert_file(self, file): """insert_file(file) Load resources entries from FILE, and insert them into the database. FILE can be a filename (a string)or a file object. """ if type(file) is types.StringType: file = open(file, 'r') self.insert_string(file.read()) def insert_string(self, data): """insert_string(data) Insert the resources entries in the string DATA into the database. """ # First split string into lines lines = string.split(data, '\n') while lines: line = lines[0] del lines[0] # Skip empty line if not line: continue # Skip comments if comment_re.match(line): continue # Handle continued lines while line[-1] == '\\': if lines: line = line[:-1] + lines[0] del lines[0] else: line = line[:-1] break # Split line into resource and value m = resource_spec_re.match(line) # Bad line, just ignore it silently if not m: continue res, value = m.group(1, 2) # Convert all escape sequences in value splits = value_escape_re.split(value) for i in range(1, len(splits), 2): s = splits[i] if len(s) == 3: splits[i] = chr(string.atoi(s, 8)) elif s == 'n': splits[i] = '\n' # strip the last value part to get rid of any # unescaped blanks splits[-1] = string.rstrip(splits[-1]) value = string.join(splits, '') self.insert(res, value) def insert_resources(self, resources): """insert_resources(resources) Insert all resources entries in the list RESOURCES into the database. Each element in RESOURCES should be a tuple: (resource, value) Where RESOURCE is a string and VALUE can be any Python value. """ for res, value in resources: self.insert(res, value) def insert(self, resource, value): """insert(resource, value) Insert a resource entry into the database. RESOURCE is a string and VALUE can be any Python value. """ # Split res into components and bindings parts = resource_parts_re.split(resource) # If the last part is empty, this is an invalid resource # which we simply ignore if parts[-1] == '': return self.lock.acquire() db = self.db for i in range(1, len(parts), 2): # Create a new mapping/value group if not db.has_key(parts[i - 1]): db[parts[i - 1]] = ({}, {}) # Use second mapping if a loose binding, first otherwise if '*' in parts[i]: db = db[parts[i - 1]][1] else: db = db[parts[i - 1]][0] # Insert value into the derived db if db.has_key(parts[-1]): db[parts[-1]] = db[parts[-1]][:2] + (value, ) else: db[parts[-1]] = ({}, {}, value) self.lock.release() def __getitem__(self, (name, cls)): """db[name, class] Return the value matching the resource identified by NAME and CLASS. If no match is found, KeyError is raised. """ # Split name and class into their parts namep = string.split(name, '.') clsp = string.split(cls, '.') # It is an error for name and class to have different number # of parts if len(namep) != len(clsp): raise ValueError( 'Different number of parts in resource name/class: %s/%s' % (name, cls)) complen = len(namep) matches = [] # Lock database and wrap the lookup code in a try-finally # block to make sure that it is unlocked. self.lock.acquire() try: # Precedence order: name -> class -> ? if self.db.has_key(namep[0]): bin_insert(matches, _Match((NAME_MATCH, ), self.db[namep[0]])) if self.db.has_key(clsp[0]): bin_insert(matches, _Match((CLASS_MATCH, ), self.db[clsp[0]])) if self.db.has_key('?'): bin_insert(matches, _Match((WILD_MATCH, ), self.db['?'])) # Special case for the unlikely event that the resource # only has one component if complen == 1 and matches: x = matches[0] if x.final(complen): return x.value() else: raise KeyError((name, cls)) # Special case for resources which begins with a loose # binding, e.g. '*foo.bar' if self.db.has_key(''): bin_insert(matches, _Match((), self.db[''][1])) # Now iterate over all components until we find the best match. # For each component, we choose the best partial match among # the mappings by applying these rules in order: # Rule 1: If the current group contains a match for the # name, class or '?', we drop all previously found loose # binding mappings. # Rule 2: A matching name has precedence over a matching # class, which in turn has precedence over '?'. # Rule 3: Tight bindings have precedence over loose # bindings. while matches: # Work on the first element == the best current match x = matches[0] del matches[0] # print 'path: ', x.path # if x.skip: # print 'skip: ', x.db # else: # print 'group: ', x.group # print i = x.match_length() for part, score in ((namep[i], NAME_MATCH), (clsp[i], CLASS_MATCH), ('?', WILD_MATCH)): # Attempt to find a match in x match = x.match(part, score) if match: # Hey, we actually found a value! if match.final(complen): return match.value() # Else just insert the new match else: bin_insert(matches, match) # Generate a new loose match match = x.skip_match(complen) if match: bin_insert(matches, match) # Oh well, nothing matched raise KeyError((name, cls)) finally: self.lock.release()
def convert(self, oeb_book, output_path, input_plugin, opts, log): from lxml import etree from calibre.ebooks.snb.snbfile import SNBFile from calibre.ebooks.snb.snbml import SNBMLizer, ProcessFileName self.opts = opts from calibre.ebooks.oeb.transforms.rasterize import SVGRasterizer, Unavailable try: rasterizer = SVGRasterizer() rasterizer(oeb_book, opts) except Unavailable: log.warn('SVG rasterizer unavailable, SVG will not be converted') # Create temp dir with TemporaryDirectory('_snb_output') as tdir: # Create stub directories snbfDir = os.path.join(tdir, 'snbf') snbcDir = os.path.join(tdir, 'snbc') snbiDir = os.path.join(tdir, 'snbc/images') os.mkdir(snbfDir) os.mkdir(snbcDir) os.mkdir(snbiDir) # Process Meta data meta = oeb_book.metadata if meta.title: title = unicode(meta.title[0]) else: title = '' authors = [unicode(x) for x in meta.creator if x.role == 'aut'] if meta.publisher: publishers = unicode(meta.publisher[0]) else: publishers = '' if meta.language: lang = unicode(meta.language[0]).upper() else: lang = '' if meta.description: abstract = unicode(meta.description[0]) else: abstract = '' # Process Cover g, m, s = oeb_book.guide, oeb_book.manifest, oeb_book.spine href = None if 'titlepage' not in g: if 'cover' in g: href = g['cover'].href # Output book info file bookInfoTree = etree.Element("book-snbf", version="1.0") headTree = etree.SubElement(bookInfoTree, "head") etree.SubElement(headTree, "name").text = title etree.SubElement(headTree, "author").text = ' '.join(authors) etree.SubElement(headTree, "language").text = lang etree.SubElement(headTree, "rights") etree.SubElement(headTree, "publisher").text = publishers etree.SubElement(headTree, "generator").text = __appname__ + ' ' + __version__ etree.SubElement(headTree, "created") etree.SubElement(headTree, "abstract").text = abstract if href is not None: etree.SubElement(headTree, "cover").text = ProcessFileName(href) else: etree.SubElement(headTree, "cover") bookInfoFile = open(os.path.join(snbfDir, 'book.snbf'), 'wb') bookInfoFile.write(etree.tostring(bookInfoTree, pretty_print=True, encoding='utf-8')) bookInfoFile.close() # Output TOC tocInfoTree = etree.Element("toc-snbf") tocHead = etree.SubElement(tocInfoTree, "head") tocBody = etree.SubElement(tocInfoTree, "body") outputFiles = {} if oeb_book.toc.count() == 0: log.warn('This SNB file has no Table of Contents. ' 'Creating a default TOC') first = iter(oeb_book.spine).next() oeb_book.toc.add(_('Start page'), first.href) else: first = iter(oeb_book.spine).next() if oeb_book.toc[0].href != first.href: # The pages before the fist item in toc will be stored as # "Cover Pages". # oeb_book.toc does not support "insert", so we generate # the tocInfoTree directly instead of modifying the toc ch = etree.SubElement(tocBody, "chapter") ch.set("src", ProcessFileName(first.href) + ".snbc") ch.text = _('Cover pages') outputFiles[first.href] = [] outputFiles[first.href].append(("", _("Cover pages"))) for tocitem in oeb_book.toc: if tocitem.href.find('#') != -1: item = string.split(tocitem.href, '#') if len(item) != 2: log.error('Error in TOC item: %s' % tocitem) else: if item[0] in outputFiles: outputFiles[item[0]].append((item[1], tocitem.title)) else: outputFiles[item[0]] = [] if "" not in outputFiles[item[0]]: outputFiles[item[0]].append(("", tocitem.title + _(" (Preface)"))) ch = etree.SubElement(tocBody, "chapter") ch.set("src", ProcessFileName(item[0]) + ".snbc") ch.text = tocitem.title + _(" (Preface)") outputFiles[item[0]].append((item[1], tocitem.title)) else: if tocitem.href in outputFiles: outputFiles[tocitem.href].append(("", tocitem.title)) else: outputFiles[tocitem.href] = [] outputFiles[tocitem.href].append(("", tocitem.title)) ch = etree.SubElement(tocBody, "chapter") ch.set("src", ProcessFileName(tocitem.href) + ".snbc") ch.text = tocitem.title etree.SubElement(tocHead, "chapters").text = '%d' % len(tocBody) tocInfoFile = open(os.path.join(snbfDir, 'toc.snbf'), 'wb') tocInfoFile.write(etree.tostring(tocInfoTree, pretty_print=True, encoding='utf-8')) tocInfoFile.close() # Output Files oldTree = None mergeLast = False lastName = None for item in s: from calibre.ebooks.oeb.base import OEB_DOCS, OEB_IMAGES if m.hrefs[item.href].media_type in OEB_DOCS: if item.href not in outputFiles: log.debug('File %s is unused in TOC. Continue in last chapter' % item.href) mergeLast = True else: if oldTree is not None and mergeLast: log.debug('Output the modified chapter again: %s' % lastName) outputFile = open(os.path.join(snbcDir, lastName), 'wb') outputFile.write(etree.tostring(oldTree, pretty_print=True, encoding='utf-8')) outputFile.close() mergeLast = False log.debug('Converting %s to snbc...' % item.href) snbwriter = SNBMLizer(log) snbcTrees = None if not mergeLast: snbcTrees = snbwriter.extract_content(oeb_book, item, outputFiles[item.href], opts) for subName in snbcTrees: postfix = '' if subName != '': postfix = '_' + subName lastName = ProcessFileName(item.href + postfix + ".snbc") oldTree = snbcTrees[subName] outputFile = open(os.path.join(snbcDir, lastName), 'wb') outputFile.write(etree.tostring(oldTree, pretty_print=True, encoding='utf-8')) outputFile.close() else: log.debug('Merge %s with last TOC item...' % item.href) snbwriter.merge_content(oldTree, oeb_book, item, [('', _("Start"))], opts) # Output the last one if needed log.debug('Output the last modified chapter again: %s' % lastName) if oldTree is not None and mergeLast: outputFile = open(os.path.join(snbcDir, lastName), 'wb') outputFile.write(etree.tostring(oldTree, pretty_print=True, encoding='utf-8')) outputFile.close() mergeLast = False for item in m: if m.hrefs[item.href].media_type in OEB_IMAGES: log.debug('Converting image: %s ...' % item.href) content = m.hrefs[item.href].data # Convert & Resize image self.HandleImage(content, os.path.join(snbiDir, ProcessFileName(item.href))) # Package as SNB File snbFile = SNBFile() snbFile.FromDir(tdir) snbFile.Output(output_path)
samples['DY'] = { 'name': [], 'weight': 'puW*baseW*(1.08683 * (0.95 - 0.0657370*TMath::Erf((gen_ptll-12.5151)/5.51582)))*bPogSF*effTrigW*std_vector_lepton_idisoWcut_WP_Tight80X[0]*std_vector_lepton_idisoWcut_WP_Tight80X[1]*std_vector_lepton_recoW[0]*std_vector_lepton_recoW[1]*GEN_weight_SM/abs(GEN_weight_SM)*std_vector_lepton_genmatched[0]*std_vector_lepton_genmatched[1]', } directory = '/pnfs/iihe/cms/store/user/xjanssen/HWW2015/Feb2017_summer16/MCl2looseCut__hadd__bSFL2pTEffCut__l2tight/' for DataSet in ['latino_DYJetsToLL_M-10to50', 'latino_DYJetsToLL_M-50__part']: fileCmd = 'ls ' + directory + DataSet + '*' proc = subprocess.Popen(fileCmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True) out, err = proc.communicate() FileTarget = string.split(out) for iFile in FileTarget: samples['DY']['name'].append(os.path.basename(iFile)) # ... no SF at all samples['DY-noSF'] = { 'name': [], 'weight': 'puW*baseW*(1.08683 * (0.95 - 0.0657370*TMath::Erf((gen_ptll-12.5151)/5.51582)))*bPogSF*GEN_weight_SM/abs(GEN_weight_SM)*std_vector_lepton_genmatched[0]*std_vector_lepton_genmatched[1]', } directory = '/pnfs/iihe/cms/store/user/xjanssen/HWW2015/Feb2017_summer16/MCl2looseCut__hadd__bSFL2pTEffCut__l2tight/' for DataSet in ['latino_DYJetsToLL_M-10to50', 'latino_DYJetsToLL_M-50__part']: fileCmd = 'ls ' + directory + DataSet + '*' proc = subprocess.Popen(fileCmd,