def xmldescription(url): xmlfilename =urllib.urlopen(url) #Ouverture du fichier via le web. dom= parse(xmlfilename) #Lecture du fichier Description = dom.getElementsByTagName('summary') #Recherche l'élément 'summary' nbSummary = len(Description) #Nombre de summary condition = False NoSummary = 1 while condition == False: if string.count(Description[NoSummary].firstChild.nodeValue ,u"Température:"): TxtNodeValue = Description[NoSummary].firstChild.nodeValue #Retourne le X ieme enfant du noeud soit le sommaire HTML condition = True NoSummary = NoSummary + 1 if NoSummary == nbSummary: TxtNodeValue = "nil" condition = True # Traitement du noeud XML ListeB = string.split(TxtNodeValue, "<b>") # Division des informations du noeud à '<b>' retourne un liste de plusieurs éléments. if len(ListeB) > 1: # S'il y a de l'information dans la variable for elements in ListeB: # Pour chaque élément de la liste. if string.count(elements, "Temp") > 0: # S'il y a le texte 'TEMP' dans l'élément regardé. TextTemp = string.split(elements, "</b>")[1] # Division pour extraire la température NombreTemp = string.replace(TextTemp, "°C <br/>", "") # Efface par remplacement les éléments indésirables else: NombreTemp = "-9999" # Si aucune température, mettre la valeur -9999. return string.strip(string.replace(NombreTemp, ",", ".")) # Retour de la valeur avec remplacement des , par des .
def deg2HMS(ra='', dec='', round=False): import string RA, DEC= '', '' if dec: if string.count(str(dec),':')==2: dec00=string.split(dec,':') dec0,dec1,dec2=float(dec00[0]),float(dec00[1]),float(dec00[2]) if '-' in str(dec0): DEC=(-1)*((dec2/60.+dec1)/60.+((-1)*dec0)) else: DEC=(dec2/60.+dec1)/60.+dec0 else: if str(dec)[0]=='-': dec0=(-1)*abs(int(dec)) else: dec0=abs(int(dec)) dec1=int((abs(dec)-abs(dec0))*(60)) dec2=((((abs(dec))-abs(dec0))*60)-abs(dec1))*60 DEC='00'[len(str(dec0)):]+str(dec0)+':'+'00'[len(str(dec1)):]+str(dec1)+':'+'00'[len(str(int(dec2))):]+str(dec2) if ra: if string.count(str(ra),':')==2: ra00=string.split(ra,':') ra0,ra1,ra2=float(ra00[0]),float(ra00[1]),float(ra00[2]) RA=((ra2/60.+ra1)/60.+ra0)*15. else: ra0=int(ra/15.) ra1=int(((ra/15.)-ra0)*(60)) ra2=((((ra/15.)-ra0)*60)-ra1)*60 RA='00'[len(str(ra0)):]+str(ra0)+':'+'00'[len(str(ra1)):]+str(ra1)+':'+'00'[len(str(int(ra2))):]+str(ra2) if ra and dec: return RA, DEC else: return RA or DEC
def fixcode(block, doctype='UNKNOWN'): # Some XML preparation block = LeftMargin(block) # Pull out title if available re_title = re.compile('^#\-+ (.+) \-+#$', re.M) if_title = re_title.match(block) if if_title: title = if_title.group(1) block = re_title.sub('', block) # take title out of code else: title = '' # Process the code block with dw_colorize (if possible and appropriate) if py_formatter and (string.count(title,'.py') or string.count(title,'Python') or string.count(title,'python') or string.count(title,'py_') or doctype == 'PYTHON'): print ('<p><heading refname="code1" type="code" toc="yes">%s</heading>' % Typography(title)) print '<code type="section">', dw_colorize.Parser(block.rstrip()).toXML() print '</code></p>' # elif the-will-and-the-way-is-there-to-format-language-X: # elif the-will-and-the-way-is-there-to-format-language-Y: else: block = Detag(block) print code_block % (Typography(title), block.strip())
def parseResults(output): results = {} for line in output: print line, if string.count(line, "SIGSEG"): results[0] = ["FAULT", string.strip(line)] continue # look for something of the form: # filename:line:message msg = string.split(line,":",2) if len(msg)<3: continue if msg[0]!=inputfilename: continue if len(msg[1])==0: continue if not msg[1][0] in string.digits: continue # it's in the right form; parse it linenumber = int(msg[1]) msgtype = "UNKNOWN" uppermsg = string.upper(msg[2]) if string.count(uppermsg,"ERROR"): msgtype = "ERROR" if string.count(uppermsg,"WARNING"): msgtype = "WARNING" msgtext = string.strip(msg[2]) ignore = 0 for ignoreExpr in ignoreExprList: if re.search(ignoreExpr,msgtext)!=None: ignore = 1 if not ignore: results[linenumber]=[msgtype,string.strip(msg[2])] return results
def bitrigramFeature(self): bi_template=[] tri_template=[] biFeature={} triFeature={} for l in string.lowercase: for m in string.lowercase: for n in string.lowercase: tri_template.append(l+m+n) bi_template.append(l+m) words='' for temp in self.sentences: if self.output==False: length=len(temp)-3 else: length=len(temp)-2 for i in range(length): words +=temp[i]+' ' for elem in bi_template: biFeature[elem]=float(string.count(words,elem)) for elem in tri_template: triFeature[elem]=float(string.count(words,elem)) triSum=sum(triFeature.values()) biSum=sum(biFeature.values()) for elem in biFeature: biFeature[elem]=biFeature[elem]/biSum for elem in triFeature: triFeature[elem]=triFeature[elem]/triSum return biFeature, triFeature
def remove_polyA(self, at_percentage = .9): """Remove patterns which are likely due to polyA tails from the lists. This is just a helper function to remove pattenrs which are likely just due to polyA tails, and thus are not really great motifs. This will also get rid of stuff like ATATAT, which might be a useful motif, so use at your own discretion. XXX Could we write a more general function, based on info content or something like that? Arguments: o at_percentage - The percentage of A and T residues in a pattern that qualifies it for being removed. """ remove_list = [] # find all of the really AT rich patterns for pattern_info in self._pattern_list: pattern_at = (float(string.count(pattern_info[1], 'A') + string.count(pattern_info[1], 'T')) / float(len(pattern_info[1]))) if pattern_at > at_percentage: remove_list.append(pattern_info) # now remove them from the master list for to_remove in remove_list: self._pattern_list.remove(to_remove)
def ScriptURL(target, web_page_url=None, absolute=0): """target - scriptname only, nothing extra web_page_url - the list's configvar of the same name absolute - a flag which if set, generates an absolute url """ if web_page_url is None: web_page_url = mm_cfg.DEFAULT_URL if web_page_url[-1] <> '/': web_page_url = web_page_url + '/' fullpath = os.environ.get('REQUEST_URI') if fullpath is None: fullpath = os.environ.get('SCRIPT_NAME', '') + \ os.environ.get('PATH_INFO', '') baseurl = urlparse.urlparse(web_page_url)[2] if not absolute and fullpath[:len(baseurl)] == baseurl: # Use relative addressing fullpath = fullpath[len(baseurl):] i = string.find(fullpath, '?') if i > 0: count = string.count(fullpath, '/', 0, i) else: count = string.count(fullpath, '/') path = ('../' * count) + target else: path = web_page_url + target return path + mm_cfg.CGIEXT
def find_module_for_item(modules, item): """Find the matching module for a given item""" dbg("find_module_for_item: Searching for: %s" % item) module = None # We need a shortcut here for root level items if not ARGUMENTS.standalone and string.count(item, '.') == 1: dbg("find_module_for_item: Using root-level shortcut") module = "hs" # Methods are very easy to shortcut if string.count(item, ':') == 1: dbg("find_module_for_item: Using method shortcut") module = item.split(':')[0] if not module: matches = [] for mod in modules: if item.startswith(mod): matches.append(mod) matches.sort() dbg("find_module_for_item: Found options: %s" % matches) try: module = matches[-1] except IndexError: err("Unable to find module for: %s" % item) dbg("find_module_for_item: Found: %s" % module) return module
def extratabs(self, line): tabcount = 0 for c in line: if c <> "\t": break tabcount = tabcount + 1 last = 0 cleanline = "" tags = PyFontify.fontify(line) # strip comments and strings for tag, start, end, sublist in tags: if tag in ("string", "comment"): cleanline = cleanline + line[last:start] last = end cleanline = cleanline + line[last:] cleanline = string.strip(cleanline) if cleanline and cleanline[-1] == ":": tabcount = tabcount + 1 else: # extra indent after unbalanced (, [ or { for open, close in (("(", ")"), ("[", "]"), ("{", "}")): count = string.count(cleanline, open) if count and count > string.count(cleanline, close): tabcount = tabcount + 2 break return tabcount
def load(self,name): """ Returns a two dimensional numpy array where a[:,0] is wavelength in Angstroms and a[:,1] is flux in counts/sec/angstrom/cm^2 Noisy spectra are smoothed with window_len in the .txt file. Ergs and AB Mag units are automatically converted to counts. """ fname = self.objects[name]['dataFile'] fullFileName = os.path.join(self.this_dir,"data",fname[0]) if (string.count(fullFileName,"fit")): a = self.loadSdssSpecFits(fullFileName) else: a = numpy.loadtxt(fullFileName) len = int(self.objects[name]['window_len'][0]) if len > 1: a[:,1] = smooth.smooth(a[:,1], window_len=len)[len/2:-(len/2)] try: fluxUnit = self.objects[name]['fluxUnit'][0] scale = float(fluxUnit.split()[0]) a[:,1] *= scale except ValueError: print "error" ergs = string.count(self.objects[name]['fluxUnit'][0],"ergs") if ergs: a[:,1] *= (a[:,0] * self.k) mag = string.count(self.objects[name]['fluxUnit'][0],"mag") if mag: a[:,1] = (10**(-2.406/2.5))*(10**(-0.4*a[:,1]))/(a[:,0]**2) * (a[:,0] * self.k) return a
def callback(match): if not any([match.group(0).startswith('/%s' % s) for s in self.start_markers]): return match.group(0) start, end = match.start(), match.end() start_line = string.count(text, '\n', 0, start) start_col = start - string.rfind(text, '\n', 0, start) - 1 if start_col < 0: start_col = 0 end_line = string.count(text, '\n', 0, end) end_col = end - string.rfind(text, '\n', 0, end) - 1 offset_line = start_line + 1 raw_text = match.group(0) # Compute offset column. idx = raw_text.find('\n') + 1 for offset_col in xrange(idx, len(raw_text)): if raw_text[offset_col] not in [' ', '\t']: break if raw_text[offset_col] == '*': offset_col += 1 if raw_text[offset_col] in [' ', '\t']: offset_col += 1 offset_col -= idx proc_text = self._stripText(raw_text, offset_col) comments.append(Comment(start_line, start_col, start, end_line, end_col, end, offset_line, offset_col, proc_text, raw_text)) return match.group(0)
def process_request(self, req): req.perm.assert_permission ('WIKI_MODIFY') page_name = req.args['target_page'][req.args['target_page'].find('wiki')+5:] p = WikiPage(self.env, page_name ) author_name = req.authname comment_text = req.args['comment'] comment_parent = req.args['comment_parent'] dt = datetime.now() comment_date = dt.strftime("%Y-%m-%d %H:%M:%S") comment_id = "%032x" % random.getrandbits(128) redirect_url = "%s%s#%s" % (req.base_path, req.args['target_page'],comment_id) changeset_comment = "%s..." % comment_text[:20] insertion_index = string.find( p.text, "=%s" % comment_parent ) if ( insertion_index != -1 ): heads = string.count(p.text,"{{{#!WikiComments",0,insertion_index) tails = string.count(p.text,"}}}",0,insertion_index) level = heads - tails padding = "" comment_out = '%s{{{#!WikiComments author="%s" date="%s" id="%s""\n%s%s\n%s=%s\n%s}}}\n' \ % (padding, author_name,comment_date,comment_id,padding,comment_text,padding,comment_id,padding) p.text = p.text[:insertion_index]+comment_out+p.text[insertion_index:] p.save( author_name, changeset_comment, req.remote_addr ) req.redirect(redirect_url)
def writeArray (xy, file=None, format='%g ', comments=[], commentChar='#'): """ Write a numeric array xy to file (or stdout if unspecified). (format must have one, two, or xy.shape[1] specifiers, line feed is appended if necessary.) """ if file: if hasattr(file,'closed'): if file.mode=='w': out=file else: print 'ERROR --- IO.writeArray: file "' + file + '" opened in readmode!' else: out=open(file,'w') # print command line as first line to output file if not (sys.argv[0]=='' or 'ipython' in sys.argv[0]): sys.argv[0] = os.path.basename(sys.argv[0]) out.write (commentChar + ' ' + join(sys.argv) + '\n' + commentChar + '\n') else: out = sys.stdout for com in comments: out.write ( '%s %s\n' % (commentChar, strip(com)) ) if len(xy.shape)==1: if count(format,'\n')==0: format = format.rstrip()+'\n' for i in range(xy.shape[0]): out.write (format % xy[i] ) elif len(xy.shape)==2: npc = count(format,'%') if npc==1: format = xy.shape[1] * format elif npc==2 and xy.shape[1]>2: f2 = rfind(format,'%') format = format[:f2] + (xy.shape[1]-1) * (' '+format[f2:]) elif npc!=xy.shape[1]: print "ERROR --- IO.writeArray: check format (number of format specs does'nt match number of columns in data)" return if count(format,'\n')==0: format = format.rstrip()+'\n' for i in range(xy.shape[0]): out.write (format % tuple(xy[i,:]) ) else: print 'ERROR --- IO.writeArray: writing arrays with more than 2 dimensions not supported!' if not out.closed: out.close()
def defswarp(namefile, imgname, _combine, gain=''): import floyds import string, re, os if _combine.lower() in ['median']: _combine = 'MEDIAN' elif _combine.lower() in ['average']: _combine = 'AVERAGE' elif _combine.lower() in ['sum']: _combine = 'SUM' swarpfile = floyds.__path__[0] + '/standard/sex/default.swarp' f = open(swarpfile, 'r') ss = f.readlines() f.close() ff = open(namefile, 'w') for i in ss: if string.count(i, 'IMAGEOUT_NAME') == 1: ff.write('IMAGEOUT_NAME ' + str(imgname) + ' # Output filename \n') elif string.count(i, 'WEIGHTOUT_NAME') == 1: ff.write('WEIGHTOUT_NAME ' + str( re.sub('.fits', '.weight.fits', imgname)) + ' # Output weight-map filename \n') elif string.count(i, 'COMBINE_TYPE') == 1: ff.write('COMBINE_TYPE ' + str(_combine) + ' # MEDIAN,AVERAGE,MIN,MAX,WEIGHTED,CHI2 \n') elif string.count(i, 'GAIN_DEFAULT') == 1: if gain: ff.write('GAIN_DEFAULT ' + str(gain) + ' # Default gain if no FITS keyword found \n') else: ff.write(i) else: ff.write(i) ff.close() return namefile
def main(): pathname = EasyDialogs.AskFileForOpen(message="File to check end-of-lines in:") if not pathname: sys.exit(0) fp = open(pathname, "rb") try: data = fp.read() except MemoryError: EasyDialogs.Message("Sorry, file is too big.") sys.exit(0) if len(data) == 0: EasyDialogs.Message("File is empty.") sys.exit(0) number_cr = string.count(data, "\r") number_lf = string.count(data, "\n") if number_cr == number_lf == 0: EasyDialogs.Message("File contains no lines.") if number_cr == 0: EasyDialogs.Message("File has unix-style line endings") elif number_lf == 0: EasyDialogs.Message("File has mac-style line endings") elif number_cr == number_lf: EasyDialogs.Message("File probably has MSDOS-style line endings") else: EasyDialogs.Message("File has no recognizable line endings (binary file?)") sys.exit(0)
def scoreParse(self, parse, correct): """Compares the algorithms parse and the correct parse and assigns a score for that parse. The score is based upon how far apart the right and left parentheses are from each other in the two parses and also includes a penalty for parentheses in one parse but not the other. """ score = 0 compCnt = string.count(parse, "(") realCnt = string.count(correct, "(") compPos = -1 realPos = -1 for i in range(compCnt): compPos = string.find(parse, "(", compPos+1, len(parse)) realPos = string.find(correct, "(", realPos+1, len(correct)) score += abs(compPos - realPos) score += abs(compCnt - realCnt) compCnt = string.count(parse, ")") realCnt = string.count(correct, ")") compPos = -1 realPos = -1 for i in range(compCnt): compPos = string.find(parse, ")", compPos+1, len(parse)) realPos = string.find(correct, ")", realPos+1, len(correct)) score += abs(compPos - realPos) score += abs(compCnt - realCnt) return score
def fixcode(block, doctype): # Some HTML preparation block = Detag(block) block = LeftMargin(block) # Pull out title if available re_title = re.compile('^#\-+ (.+) \-+#$', re.M) if_title = re_title.match(block) if if_title: title = if_title.group(1) block = re_title.sub('', block) # take title out of code else: title = '' #block = string.strip(block) # no surrounding whitespace # Process the code block with Py2HTML (if possible and appropriate) if py_formatter and (string.count(title,'.py') or string.count(title,'Python') or string.count(title,'python') or string.count(title,'py_') or doctype == 'PYTHON'): fh = open('tmp', 'w') fh.write(block) fh.close() py2html.main([None, '-format:rawhtml', 'tmp']) block = open('tmp.html').read() block = code_block % (title, block) # elif the-will-and-the-way-is-there-to-format-language-X: # elif the-will-and-the-way-is-there-to-format-language-Y: else: block = code_block % (title, '<pre>'+block+'</pre>') return block
def myFeatures(string): all_notes = re.findall(r"[0-9]0? *?/ *?10", string) if all_notes: print(all_notes) all_notes = [int(x.split("/")[0].strip()) for x in all_notes] mean = np.mean(all_notes) maxim = np.max(all_notes) minim = np.min(all_notes) print(all_notes, mean, maxim, minim) else: mean = -1 maxim = -1 minim = -1 return [ len(string), string.count("."), string.count("!"), string.count("?"), len(re.findall(r"[^0-9a-zA-Z_ ]", string)), # Non aplha numeric len(re.findall(r"10", string)), len(re.findall(r"[0-9]", string)), string.count("<"), len(re.findall(r"star(s)?", string)), mean, maxim, minim, len(re.findall(r"[A-Z]", string)), ]
def readworkingdirfile(fname): # retun the contents of file fname # if it is a dos file convert it to Mac fp=open(getworkingdir()+fname,'rb') try: data = fp.read() except MemoryError: print 'Sorry, file is too big.' return if len(data) == 0: print 'File is empty.' return number_cr = string.count(data, '\r') number_lf = string.count(data, '\n') #if number_cr == number_lf == 0: #EasyDialogs.Message('File contains no lines.') if number_cr == 0: #EasyDialogs.Message('File has unix-style line endings') data=string.replace(data,'\n','\r') #make it a mac file #elif number_lf == 0: #EasyDialogs.Message('File has mac-style line endings') elif number_cr == number_lf: #EasyDialogs.Message('File probably has MSDOS-style line endings') data=string.replace(data,'\n','') #make it a mac file #else: #EasyDialogs.Message('File has no recognizable line endings (binary file?)') #f=open('Macintosh HD:Python 2.0:santosfiles:gumbhere1','w') #f.write(data) fp.close() return data
def run(self): try: if osflag: proc=Popen(self.cmd,shell=False,stdin=None,stdout=PIPE,\ stderr=STDOUT,bufsize=0) else: from subprocess import STARTUPINFO si=STARTUPINFO() si.dwFlags|=1 si.wShowWindow=0 proc=Popen(self.cmd,shell=False,stdin=None,stdout=PIPE,\ stderr=STDOUT,bufsize=0,startupinfo=si) while 1: if self.stop_flag: if osflag: proc.send_signal(signal.SIGKILL) else: proc.kill() break if osflag: if proc.stdout in select.select([proc.stdout],[],[],1)[0]: line=proc.stdout.readline() else: line=' \n' else: line=proc.stdout.readline() if not len(line): break else: if count(line,'ttl') or count(line,'TTL'): self.retries=0 else: self.retries=self.retries+1 line=' ' sleep(0.5) proc.poll() except: pass
def action(self, user, cmd_list): t = self.telnet t.write("\n") login_prompt = "login: "******"Password:"******"%s\n" % user) response = t.read_until(password_prompt, 3) if string.count(response, password_prompt): print response else: return 0 t.write("%s\n" % self.passwd[user]) response = t.read_until(self.command_prompt, 5) if not string.count(response, self.command_prompt): return 0 for cmd in cmd_list: t.write("%s\n" % cmd) response = t.read_until(self.command_prompt, self.timeout) if not string.count(response, self.command_prompt): return 0 print response return 1
def _cmd_telnet_exec_usr_raw_cmd(host, user, password, cmd): """ Telnet into a machine and execute a command at the user level and gets raw output from remote. Args: host: Host IP address user: User login name password: Password cmd: Command to execute after successful telnet Returns: True, response: Successful telnet login and response to command executed NOTE: Command might fail, but still it returns True meaning that the command is executed on remote. False, response: Unsuccessful telnet login and empty response """ command_prompt = "$ " response = " " try: tn = telnetlib.Telnet(host) except: return(False, '') login_prompt = "login:"******"\n") password_prompt = "Password:"******"\n") response = tn.read_until(command_prompt, 3) if not string.count(response, command_prompt): return (False, '') tn.write("%s\n" % cmd) tn.write(b"exit\n") cmdResponse = tn.read_all().decode('ascii') tn.close() ''' # Try this ---- cmdResponse = '' while cmdResponse.find('$') == -1: cmdResponse = tn.read_all().decode('ascii') #cmdResponse = tn.read_very_eager() print cmdResponse # ends here ---- ''' if not string.count(cmdResponse, "exit"): print cmdResponse return (False, None) return (True, cmdResponse) # SUCCESS - Exec cmd after telnet
def calculate_gc(x): """Calculates the GC content of DNA sequence x. x: a string composed only of A's, T's, G's, and C's.""" if x == '': return 1 x = x.upper() gc = float(string.count(x, "G") +string.count(x, "C")) / len(x) return gc
def is_configuration_removed(self): flag = False removed_config = self.get_system_configuration() numsec = string.count(removed_config, 'insecure = [0]') numpp = string.count (removed_config, 'proxy_port = []') if numsec == 1 and numpp == 1: flag = True return flag
def count_assignment(patch): num_assign = 0 patch_split = re.split("\n",patch) leng = len(patch_split) for i in range(0,leng): patch_line = patch_split[i] if (patch_line.startswith("+") or patch_line.startswith("-")) and (patch_line.startswith("+++")==False and patch_line.startswith("---")==False): num_assign += string.count(patch_line,"=") - string.count(patch_line,"==")*2 - string.count(patch_line,"!=") - string.count(patch_line,">=") - string.count(patch_line,"<=") - string.count(patch_line,"?=") return num_assign
def setName(self, fileName): self.name = fileName if (string.count(self.name,"gdc")): fileName = string.lstrip(fileName,"gdc") if (string.count(self.name,"dat")): fileName = string.rstrip(fileName,".dat") if (string.count(self.name,".000")): fileName = string.rstrip(fileName,".000") self.runNumber = int(fileName)
def is_configuration_modified(self, output): flag = False modified_config = self.get_system_configuration() numsec = string.count(modified_config, 'insecure = %s' % self.modinsecure) numpp = string.count (modified_config, 'proxy_port = %s' % self.modproxyport) if numsec == 1 and numpp == 1: flag = True logger.info("It's successful to verify modified configration") return flag
def mergeUnbalancedPipes(self, patterns): ctr = 0 while ctr < len(patterns)-1: if string.count(patterns[ctr],'[') != string.count(patterns[ctr], ']'): patterns[ctr] = patterns[ctr] + '|' +patterns[ctr+1] else: ctr = ctr + 1 patterns = map(lambda x:string.strip(x), patterns) return patterns
def is_configuration_modified(self): flag = False modified_config = self.get_system_configuration() numsec = string.count(modified_config, 'insecure = %s' % self.modinsecure) numpp = string.count (modified_config, 'proxy_port = %s' % self.modproxyport) # if numsec == 3 and numpp == 3: if numsec == 1 and numpp == 1: flag = True return flag
def test(instring): """Test to check if instring is well-formed value of a bibtex field: returns ( number of double quotes mod 2, number of left braces - number right braces ) """ instring = string.replace(instring, '\\"', "") quote_count = string.count(instring, '"') lb_count = string.count(instring, "{") rb_count = string.count(instring, "}") return (divmod(quote_count, 2)[1], lb_count - rb_count)
def generate_frequency_table(text, charset): ''' Generate a character frequency table for a given text and charset as list of chars, digraphs, etc text - A sample of plaintext to analyze for frequency data charset - (list of strings) The set of items to count in the plaintext such as ['a','b','c', ... 'z','aa','ab','ac', ... 'zz'] ''' freq_table = {} text_len = 0 for char in charset: freq_table[char] = 0 for char in text: if char in charset: freq_table[char] += 1 text_len += 1 for multigraph in filter(lambda x: len(x) > 1, charset): freq_table[multigraph] = string.count(text, multigraph) # Normalize frequencies with length of text for key in freq_table.keys(): if text_len != 0: freq_table[key] /= float(text_len) else: freq_table[key] = 0 return freq_table
def checkNameValidity(self, params): import string if not self.name.isNone: vname = self.name.getValue() # check if we have a name if (not vname) or (vname == ""): # the name is mandatory return "Relationship name must be specified" # now check that the name is valid (a variable name) if string.count(vname, " ") > 0: return "Invalid relationship name, no white spaces allowed" # check first character if (vname[0] >= '0') and (vname[0] <= '9'): # a number return "Invalid variable name, first character must be a letter or '_'" if vname[0] != '_' and (vname[0] < 'A' or vname[0] > 'z'): return "Invalid relationship name, first character must be a letter or '_'" # now check for the rest of not allowed characters... for c in range(len(vname) - 1): if vname[c + 1] < 'A' or vname[c + 1] > 'z': # not a letter if vname[c + 1] < '0' or vname[c + 1] > '9': # not a number if vname[c + 1] != '_': # not underscore return ("Invalid relationship name, character '" + vname[c + 1] + "' is not allowed", self.graphObject_) return None
def print_error(input, err, scanner): """This is a really dumb long function to print error messages nicely.""" p = err.pos # Figure out the line number line = count(input[:p], '\n') print err.msg + " on line " + repr(line + 1) + ":" # Now try printing part of the line text = input[max(p - 80, 0):p + 80] p = p - max(p - 80, 0) # Strip to the left i = rfind(text[:p], '\n') j = rfind(text[:p], '\r') if i < 0 or (0 <= j < i): i = j if 0 <= i < p: p = p - i - 1 text = text[i + 1:] # Strip to the right i = find(text, '\n', p) j = find(text, '\r', p) if i < 0 or (0 <= j < i): i = j if i >= 0: text = text[:i] # Now shorten the text while len(text) > 70 and p > 60: # Cut off 10 chars text = "..." + text[10:] p = p - 7 # Now print the string, along with an indicator print '> ', text print '> ', ' ' * p + '^' print 'List of nearby tokens:', scanner
def collectRaidStatus(configobj): # https://github.com/jnv/ansible-fedora-infra/blob/master/roles/nagios_client/files/scripts/check_raid.py devices = [] mdstat = string.split(open('/proc/mdstat').read(), '\n') error = "" i = 0 for line in mdstat: if line[0:2] == 'md': device = string.split(line)[0] devices.append(device) status = string.split(mdstat[i + 1])[3] if string.count(status, "_"): # see if we can figure out what's going on err = string.split(mdstat[i + 2]) msg = "device=%s status=%s" % (device, status) if len(err) > 0: msg = msg + " rebuild=%s" % err[0] if not error: error = msg else: error = error + ", " + msg i = i + 1 if not error: return True else: return error
def _diff_line_based(left_text, right_text, include_deletions=True, include_insertions=True, replace_as_insert=False, replace_as_delete=False, ratio_skip=0.7, line_length=model.Text.LINE_LENGTH): from difflib import SequenceMatcher left = _decompose(left_text) right = _decompose(right_text) s = SequenceMatcher(None, left, right) html_match = '' for op, i1, i2, j1, j2 in s.get_opcodes(): if op == 'equal': html_match += _compose(left[i1:i2]) elif op == 'delete' and include_deletions: html_match += '<del>' + _compose(left[i1:i2]) + '</del>' elif op == 'insert' and include_insertions: html_match += '<ins>' + _compose(right[j1:j2]) + '</ins>' elif op == 'replace': if replace_as_delete: html_match += '<del>' + _compose(left[i1:i2]) + '</del>' elif replace_as_insert: html_match += '<ins>' + _compose(right[j1:j2]) + '</ins>' else: html_match += '<span>' + _compose(right[j1:j2]) + '</span>' carry = [] lines = [] for line in linify(html_match, line_length): for val in reversed(carry): line = val + line carry = [] for tag_begin, tag_end in (('<span>', '</span>'), ('<ins>', '</ins>'), ('<del>', '</del>')): begin_count = count(line, tag_begin) end_count = count(line, tag_end) if begin_count > end_count: line = line + tag_end carry.append(tag_begin) elif begin_count < end_count: line = tag_begin + line lines.append(line) return lines
def create_tcp_controller(self, tcp_name=None, initial_controller=None, require_type=None): """Create new instance of TCPController. Arguments: tcp_name -- String to reference new TCPController over TCP Optional arguments: initial_controller -- Initial value of TCPController instance require_type -- force this as TCPController instance's return_type """ class Parser: def __init__(self, tcp_name, most_recent_command): self.tcp_name = tcp_name self.most_recent_command = most_recent_command def parse_func(self, match): # Could make this into a lambda function self.most_recent_command[self.tcp_name] = match.groups()[-1] return "" if tcp_name is None: raise ValueError("Must specify tcp_name") if tcp_name in self.names.keys(): raise ValueError('tcp_name "%s" already in use.' % tcp_name) if string.count(tcp_name, ' '): raise ValueError('tcp_name "%s" cannot have spaces.' % tcp_name) if tcp_name == "quit": raise ValueError('tcp_name "%s" conflicts with reserved word.' % tcp_name) if initial_controller is None: # create default controller initial_controller = VisionEgg.FlowControl.ConstantController( during_go_value=1.0, between_go_value=0.0) else: if not isinstance(initial_controller, VisionEgg.FlowControl.Controller): print initial_controller raise ValueError( 'initial_controller not an instance of VisionEgg.FlowControl.Controller' ) if require_type is None: require_type = initial_controller.returns_type() # Create initial None value for self.last_command dict self.last_command[tcp_name] = None # Create values for self.names dict tuple ( controller, name_re, most_recent_command, parser ) controller = TCPController(tcp_name=tcp_name, initial_controller=initial_controller) name_re_str = re.compile("^" + tcp_name + r"\s*=\s*(.*)\s*\n$", re.MULTILINE) parser = Parser(tcp_name, self.last_command).parse_func self.names[tcp_name] = (controller, name_re_str, parser, require_type) self.socket.send('"%s" controllable with this connection.\n' % tcp_name) return controller
def calc_entropy(string): """Calculates the Shannon entropy of a string""" prob = [ float(string.count(c)) / len(string) for c in dict.fromkeys(list(string)) ] entropy = -sum([p * math.log(p) / math.log(2.0) for p in prob]) return entropy
def test4(): print r'/n', u'abc', ur'Hello\nWorld!' s, t, g = 'fofff1', 'obr2', 'lilang' nex = zip(s, t, g) print nex print string.capitalize(s) print string.center(s, 10), string.count(s, 'f'), s.find('f', 5, 5), s.isalnum()
def receive_handover(self): global a_success_count global b_success_count global c_success_count a = "MSG: [S1ap: eNB->MME] ENB STATUS TRFR" b = "MSG: [S1ap: eNB<-MME] UE CONTEXT REL CMD" c = "MSG: [S1ap: eNB->MME] UE CONTEXT REL CMPL" f = open('mintlog.txt', 'r') line = f.read() a_success_count = string.count(line, a) b_success_count = string.count(line, b) c_success_count = string.count(line, c) f.close() print "a_success_count is :", a_success_count print "b_success_count is :", b_success_count print "c_success_count is :", c_success_count
def Query(szMutex): szLines = os.popen('p4mutex query ' + szMutex).read() count = string.count(szLines, 'HELD') if (count == 0): return 1 else: print "Lock held" return 0
def getOSType(prototypesDir): ostype = string.lower(sys.platform) if '-' in ostype: ostype = ostype[:string.find(ostype, '-')] for proto in os.listdir(prototypesDir): if string.count(ostype, string.lower(proto)): return proto return ostype
def equal_string(a, b): if (a == 'alpha' and b == 'alpha'): e = False elif a == '' and b == '': e = True elif (a == '' and b != '') or (a != '' and b == ''): e = False else: sub = a[0:4] nsub = string.count(a, sub) if nsub != string.count(b, sub): e = False else: a = string.replace(a, sub, '') b = string.replace(b, sub, '') e = equal_string(a, b) return e
def update_pos(self): "Updates (line,col)-pos by checking processed blocks." breaks = string.count(self.data, "\n", self.last_upd_pos, self.pos) self.last_upd_pos = self.pos if breaks > 0: self.line = self.line + breaks self.last_break = string.rfind(self.data, "\n", 0, self.pos)
def is_isogram2(string): if string == '': return True else: countList = [] string = list(string.lower()) print(string) print(string.count('o')) for i in string: countList.append(string.count(i)) print(countList) for j in countList: if j > 1: return False else: return True print(countList)
def most_repeated_letters(a): '''Return the number of times the most frequent letter repeats.''' b = 0 c = 0 while b < len(a): c = max(string.count(a, a[b]), c) b = b + 1 return c
def get_rating(self): """Find the film's rating. From 0 to 10. Convert if needed when assigning.""" tmp_rating = gutils.trim(self.page, "ticas por:</B></Center>", "c_critica.pl?id=") if tmp_rating: self.rating = str( float(string.count(tmp_rating, 'estrela.gif')) * 2)
def strip_disk(path): """ Strip disk from windows path example: c:\ot\test returns \ot\test """ if (int(string.count(path,':'))==0): return path else: return str(string.split(path,':\\')[1])
def run(): from django.core.management import call_command call_command('syncdb') creates = [] for sqlhash in connection.queries: if string.count(sqlhash['sql'], "CREATE TABLE") == 1: creates.append(sqlhash['sql']) return creates
def deja_vu(): string = input() dv = False for i in string: if string.count(i) > 1: dv = True break print("Deja Vu" if dv else "Unique")
def parselist(string, sep): if string.count(sep) < 1: return [string] j = string.split(sep) l = [] for i in j: l.append(i.strip()) return l
def Entropy(string, base=2.0): import math dct = dict.fromkeys(list(string)) # frequencies pkvec = [float(string.count(c)) / len(string) for c in dct] # entropy H = -sum([pk * math.log(pk) / math.log(base) for pk in pkvec]) return H
def countCharInFile(fileName, char): string = '' with open(fileName) as fo: line = fo.readline() while(line): string += line +"\n" line = fo.readline() print(char + ' occurs ' + str(string.count(char)) + ' times in string')
def Compression(self, string, path_save_code, path_save_table, path_save_output): ''' This function compression ''' #print('Do') Tree = [] l = len(string) i = 0 while self.Sum(Tree) < l: #print('Xuong') while string[i] in [row[0] for row in Tree]: i = i + 1 #print('i',i) Tree.append([string[i], string.count(string[i])]) i = i + 1 print(Tree) # Sort Tree.sort(key=lambda Tree: Tree[1], reverse=1) # Build Tree len_l = len(Tree) self.Build_Tree(Tree, 0, len_l - 1) # Convert list to dictionary dict = {} for i in range(len_l): dict[Tree[i][0]] = Tree[i][1:] self.Tree = dict print('Shannon Fano Tree', self.Tree) # Covert code for Shannon Fano for i in range(l): temp_str = '' for char in self.Tree[string[i]][1:]: temp_str = temp_str + str(char) self.encode = self.encode + temp_str pickle.dump(self.encode, open(path_save_code, 'wb')) # Create Table Probability Table = {} for key in self.Tree.keys(): Table[key] = self.Tree[key][0] print('TABLE Probability: ', Table) #pickle.dump(self.Tree,open(path_save_tree,'wb')) pickle.dump(Table, open(path_save_table, 'wb')) self.B1 = math.ceil( len(self.encode) / 8) + os.stat(path_save_table).st_size # Save encode to file pickle.dump((self.encode, self.Tree), open(path_save_output, 'wb')) # Save tree #pickle.dump(self.Tree,open(path_save_tree,'wb')) return self.encode
def run_transmiter(n,nbytes,count,carrier_map,tb): tb.start() # start flow graph #nbytes = int(1e6 * options.megabytes) #n = 0 tx_success=0 pktno = 0 pkt_size = int(options.size) start_time = 0 last_time = 0 #carrier_map="FFFFFE7FFFFFF" new_freq = options.tx_freq count_init=0 print "carrier_map_passed_by_main=",carrier_map # string_pipe=open('string_pipe','r') while n < nbytes: #if count_init >2000: ## carrier_map_new=string_pipe.readline() ## carrier_map_new=carrier_map[0:(len(carrier_map)-1)] #if (pktno%40000)==0: ##carrier_map = os.read(fd,options.occupied_tones/4) #The sensor is a part of this code so no need to read from pipe #if string.count(carrier_map,'0') >= 180: #carrier_map="000000000000" #tx_success=tb.txpath.sink.set_center_freq(new_freq+1e6) #if tx_success: #new_frew=new_freq+1e6 #print "successfully" #print "Freq changed to " #print new_freq if string.count(carrier_map,'0') >= 190: print "Waiting no bandwidth available" return n if options.from_file is None: data = (pkt_size - 2) * chr(pktno & 0xff) else: if pktno < 20: data = (pkt_size - 2) * chr(pktno & 0xff) else: data = source_file.read(pkt_size - 2) if data == '': break; if (count==0): break; count=count-1 count_init=count_init + 1 payload = struct.pack('!H', pktno & 0xffff) + data send_pkt(tb,payload,carrier_map) n += len(payload) sys.stderr.write('.') if options.discontinuous and pktno % 10 == 1: time.sleep(1) pktno += 1 #send_pkt(eof=True) tb.stop() tb.wait() print "n before retunning from trans loop=",n print "nbytes after retunning from trans loop=",nbytes return n
def getForces(names, coords, forces, L, fframe): na = len(names) l = len(L) ## no of lines in file line_min = 0 cur_frame = 0 for line in range(l): ## loop over lines of file1 if line > line_min: num_matches3 = string.count(L[line], searchterm3) if num_matches3: return num_matches1 = string.count(L[line], searchterm1) num_matches2 = string.count(L[line], searchterm2) num_matches3 = string.count(L[line], 'Timer:') num_matches = num_matches1 + num_matches2 if num_matches and not num_matches3: j = 0 print 'Frame: ', cur_frame, ' Read forces starting at line ', line, ' for ', na, ' atoms' for line2 in range(line + 1, line + na + 2): if string.count(L[line2], '##') > 0: words = string.split(L[line2]) shift = 0 while words[shift] != '##': shift = shift + 1 shift = shift + 1 if words[shift] == '*': shift = shift + 1 word = words[shift:] name = word[0] #print name x = word[1] y = word[2] z = word[3] fx = word[4] fy = word[5] fz = word[6] names[j] = name coords[j] = x + '\t' + y + '\t' + z forces[j] = fx + '\t' + fy + '\t' + fz j = j + 1 line_min = line + na + 2 if fframe == cur_frame: print 'break' break cur_frame = cur_frame + 1
def testCompile(self): 'Check we can compile a fractal and the resulting .so looks ok' ff = self.compiler.files["gf4d.frm"] self.assertNotEqual(string.index(ff.contents, "Modified for Gf4D"), -1) self.assertNotEqual(ff.get_formula("T03-01-G4"), None) self.assertEqual(len(ff.formulas) > 0, 1) f = self.compiler.get_formula("gf4d.frm", "T03-01-G4") self.assertEqual(f.errors, []) commands.getoutput("rm -f test-out.so") cg = self.compiler.compile(f) self.compiler.generate_code(f, cg, "test-out.so", None) # check the output contains the right functions (status, output) = commands.getstatusoutput('nm test-out.so') self.assertEqual(status, 0) self.assertEqual(string.count(output, "pf_new"), 1) self.assertEqual(string.count(output, "pf_calc"), 1) self.assertEqual(string.count(output, "pf_init"), 1) self.assertEqual(string.count(output, "pf_kill"), 1)
def isNiceStr2(s): temps = s isDoubleSubs = 0 while (len(temps) > 2): sub = temps[:2] string = temps[2:] if (string.count(sub) > 0): isDoubleSubs = string.count(sub) break temps = temps[1:] temps = s isRepeatBetween = 0 while (len(temps) > 2): if (temps[0] == temps[2]): isRepeatBetween = 1 break temps = temps[1:] return (isDoubleSubs > 0 and isRepeatBetween > 0)
def count_occ(f_name, str1): ct = 0 dr = open(f_name, 'r') for line in dr: ct0 = string.count(line, str1) ct += ct0 dr.close() return ct
def getNumOrbitals(L): searchterm = 'Number of states' for line in L: ## loop over lines of file num_matches = string.count(line, searchterm) if num_matches: word = string.split(line) no = eval(word[4]) break return no
def init_plot(): if os.path.exists(path_plot): print '\n\tpath %s exist!' % (path_plot) else: print '\n\tmaking directory' os.makedirs(path_plot) print '\t%s created successfully' % (path_plot) dirs = os.listdir(path_out) dirs.sort() for d in dirs: if string.count(d, 'l') == 0 and string.count(d, 'T') == 1: print '\n\tStart working on ' + d #adjust(path_out, d) plot_def(d) print '\n\tEnd working on ' + d print '\n\tFinally End\n'