def demo_string_func(): stra = "h e l l o w o r l d" print string.capwords(stra) ##H E L L O W O R L D but "hello world" -> "Hello World" strFrom = "HeloWd" strTo = "WorlHe" map = string.maketrans(strFrom, strTo) print "Hello World".translate(map) ## build map H -> W ;e -> o; l -> r; o -> l; W -> h; d -> e strb = "192" print string.atof(strb) # 192.0 print string.atoi(strb) # 192 string to int print string.atol(strb) # 192 string to long print string.capitalize(stra) # Return a copy of stra with only its first character capitalized. print string.find(stra, 'h e') ##Return the lowest index in s where the substring sub is found such that sub is wholly contained in s[start:end]. Return -1 on failure. Defaults for start and end and interpretation of negative values is the same as for slices. print string.rfind(stra, 'w') # Like find() but find the highest index. print string.index(stra, 'l') ##Like find() but raise ValueError when the substring is not found. print string.rindex(stra, 'l') # Like rfind() but raise ValueError when the substring is not found. print string.count(stra, 'o') # Return the number of (non-overlapping) occurrences of substring sub in string s[start:end]. Defaults for start and end and interpretation of negative values are the same as for slices. print string.lower(stra) # first char is lower h e l l o w o r l d print string.split(stra, 'o') ##split the stra with condition 'o' print 1, string.capwords(stra) print 2, stra.isalpha() print 3, stra.replace("hello", "123") print 4, stra.strip() print 5, len(stra)
def establish_sqlite_connection(address, profile): """Establish an SQLite connection.""" import sys i = "" if sys.platform != "win32": try: i = string.rindex(address, "/") except ValueError: print ValueError else: try: i = string.rindex(address, "\\") except ValueError: return ValueError name = address[i:] address = address[:i+1] #check if already connected to database dbID = profile.conn_not_exist('sqlite', name, address) if dbID != False: connID = datahandler.ConnectionManager.create_new_data_connection(u"sqlite", address, name, dbID = dbID) else: connID = datahandler.ConnectionManager.create_new_data_connection(u"sqlite", address, name) if connID != False: if datahandler.DataHandler.add(connID): pub.sendMessage('dataconnection.save', connID = connID, type = "sqlite", address = address, dbName = name) pub.sendMessage('database.added', connID) return True else: return False else: return False
def prosite_to_grouped_re(pattern): """convert a valid Prosite pattern into an re with groups for each term""" flg = (pattern[:2] == "[<") s = string.replace(pattern, "{", "[^") # Don't delete the "-" characters: use them to place the ()s s = string.translate(s, _prosite_trans, ".") # Get the [< and >] terms correct if flg: i = string.index(s, "]") s = "(?:^|[" + s[2:i] + "])" + s[i+1:] if s[-2:] == "$]": i = string.rindex(s, "[") s = s[:i] + "(?:" + s[i:-2] + "]|$)" if s[-3:] == "$]$": i = string.rindex(s, "[") s = s[:i] + "(?:" + s[i:-3] + "]|$)$" # Watch out for unescaped < and > terms if s[:1] == "^": s = "^(" + s[1:] else: s = "(" + s if s[-1:] == "$": s = s[:-1] + ")$" else: s = s + ")" return string.replace(s, "-", ")(")
def uncompressdir(dir, androiddir, uncatdir, undecompdir): tarpath = dir + '/*.tar*' tarfiles = glob.glob(tarpath) for onetar in tarfiles: periodindex = string.index(onetar, ".tar") lastslashindex = string.rindex(onetar, "/") tarname = onetar[lastslashindex+1 : periodindex] nowtarpath = dir + '/' + tarname if not os.path.exists(nowtarpath): os.makedirs(nowtarpath) tar = tarfile.open(onetar, 'r') for item in tar: tar.extract(item, nowtarpath) categorizedir(nowtarpath, androiddir, uncatdir) zippath = dir + '/*.zip' zipfiles = glob.glob(zippath) for onezip in zipfiles: periodindex = string.index(onezip, ".zip") lastslashindex = string.rindex(onezip, "/") zipname = onezip[lastslashindex+1 : periodindex] nowzippath = dir + '/' + zipname if not os.path.exists(nowzippath): os.makedirs(nowzippath) fZip = open(onezip, 'rb') zip = zipfile.ZipFile(fZip) is_encpted = 0 for zinfo in zip.infolist(): is_encpted = zinfo.flag_bits & 0x1 if is_encpted: break if is_encpted: passwd = 'infected666' + zipname[len(zipname) - 1] # This is default password used, need change for other uses for item in zip.namelist(): try: zip.extract(item, nowzippath, passwd) # Sometimes password is needed except RuntimeError as e: if 'password' in e[0]: passwd = 'infected' try: zip.extract(item, nowzippath, passwd) except RuntimeError as e: print 'nowzip', print onezip print 'RuntimeError in second trail e: ', print e[0] os.system("mv " + onezip + " " + undecompdir) os.system("rm -rf " + nowzippath) break else: for item in zip.namelist(): zip.extract(item, nowzippath) categorizedir(nowzippath, androiddir, uncatdir)
def shortDescription(self): file = "%s.pdf.%s.%d.%d" % (self.file[string.rindex(self.file, '/') + 1:], self.device, self.dpi, self.band) rasterfilename = gsconf.rasterdbdir + file + ".gz" if self.band: banded = "banded" else: banded = "noband" filename_base = os.path.basename(self.file) filename_details = "%s (%s/%ddpi/%s)" % (filename_base, self.device, self.dpi, banded) message = "pdfwrite testing " + filename_details if not os.access(rasterfilename, os.F_OK): message = "ERROR \ncannot find " + rasterfilename + " for " + filename_details print myself, message self.skip = 1 else: ct = time.localtime(os.stat(rasterfilename)[stat.ST_MTIME]) baseline_date = "%s %d, %4d %02d:%02d" % ( calendar.month_abbr[ct[1]], ct[2], ct[0], ct[3], ct[4]) message = "Checking pdfwrite of %s against baseline set on %s" % ( filename_details, baseline_date) return message
def Spider(): request = urllib2.Request("http://bbs.xuegod.cn") response = urllib2.urlopen(request) reader = response.read() #print reader#网页源代码 usernump = re.compile(r'人数<br><em>.*?</em>') #匹配式 usernummatch = usernump.findall(reader) #print usernummatch if usernummatch: currentnum = usernummatch[0] #print currentnum currentnum = currentnum[string.index(currentnum, '>') + 5:string.rindex(currentnum, '<')] #print currentnum #当前在线人数 print "当前时间:", time.strftime('%Y年%m月%d日%H时%M分', time.localtime( time.time())), '当前论坛在线人数:', currentnum result = open('test.txt', 'a') result.write( '{year: new Date(' + time.strftime('%Y年%m月%d日%H时%M分', time.localtime(time.time())) + '),value:' + currentnum + '},\n') result.close()
def prosite_to_re(pattern): """convert a valid Prosite pattern into an re string""" flg = (pattern[:2] == "[<") s = string.replace(pattern, "{", "[^") s = string.translate(s, _prosite_trans, "-.") # special case "[<" and ">]", if they exist if flg: i = string.index(s, "]") s = "(?:^|[" + s[2:i] + "])" + s[i+1:] if s[-2:] == "$]": i = string.rindex(s, "[") s = s[:i] + "(?:" + s[i:-2] + "]|$)" elif s[-3:] == "$]$": i = string.rindex(s, "[") s = s[:i] + "(?:" + s[i:-3] + "]|$)$" return s
def umlComment(witness): quotedelim = split(witness, '"'); test = split(witness)[1]+"-"+split(witness)[3][0] term = ' '.join(quotedelim[1].split()) if "uML type error" in witness: shouldbe = "type error" else: shouldbe = quotedelim[3] if "uncaught exception" in witness: got = "exception "+witness[rindex(witness, ' ')+1:-3] elif "CPU time" in witness: got = "CPU timeout" elif "signalled a bug in type inference" in witness: if "typed-untypeable" in witness: got = "signaled bug: "+quotedelim[3] else: got = "signaled bug: "+quotedelim[5] elif "typed-incorrectly" in witness: got = quotedelim[5] elif "did-not-type" in witness: got = "type error: "+quotedelim[5] elif "wrote the error message" in witness: if "typed-untypeable" in witness: got = "error: "+quotedelim[3] else: got = "error: "+quotedelim[5] elif "typed-untypeable" in witness: got = quotedelim[3] else: stderr.write('Warning: Unable to finish comment for "'+witness+'"\n') return ["Term "+term, "Is "+shouldbe] term = re.sub(UML_PATTERN, '&\g<0>&', term) shouldbe = re.sub(UML_PATTERN, '&\g<0>&', shouldbe) got = re.sub(UML_PATTERN, '&\g<0>&', got) return ["Test "+test,"Term "+term, "Is "+shouldbe, "Got "+got]
def scrapeHTML(self, htmlText): self.agentList = [] scraper = BeautifulSoup() scraper.feed(htmlText) #~ print ">>>\n", list = scraper.fetch('a', {'href':'/$%'}) #~ for l in list: #~ print "AREF >>>", l #~ print "\n\n Second tests..." #~ print 'Fetch List...' for s in list: s = str(s) #~ print 's >>>', s start = string.index(s, ">") end = string.rindex(s, "<") t = s[start+1:end] #~ print "AREF...", t self.agentList.append(str(t)) #~ alphabet = [ #~ 'Alpha','Beta','Gamma','Delta','Epsilon', #~ 'Zeta','Eta','Theta','Iota','Kappa', #~ 'Lambda','Mu','Nu','Xi','Omicron', #~ 'Pi','Rho','Sigma','Tau','Upsilon', #~ 'Phi','Chi','Psi','Omega' #~ ] #~ for item in alphabet: #~ self.agentList.append(item) return self.agentList
def _dateConvertFromDB(d): if d == None: return None try: return DateTime.strptime(d, '%Y-%m-%d') #just Y/M/D except: pass try: return DateTime.strptime(d, '%H:%M:%S') #just hh:mm:ss except: pass dashind = string.rindex(d, '-') tz = d[dashind:] d = d[:dashind] try: return DateTime.strptime(d, '%H:%M:%S'), tz # timetz except: pass # NO -- it was already stripped off, above! -- js Thu Aug 9 11:51:23 2001 #strip off offset from gmt #d = d[:string.rindex(d, '-')] try: return DateTime.strptime(d, '%Y-%m-%d %H:%M:%S') # full date except: #print "date passed to convert function: |%s|" % d raise
def __canonicalizeURL(self, URL): # URL to convert """Canonicalize a URL by making it absolute and discarding the fragment.""" (scheme, location, path, parameters, query, fragment) = \ urlparse.urlparse(URL) if scheme == '': scheme = self.__url.scheme() if scheme != 'http': return URL # Non-HTTP URLs are left unchanged if location == '' and path == '': return '' # Turn fragment-only URLs into nothing fragment = '' if location != '' or path == '': # # The URL is absolute. Just return it. # return urlparse.urlunparse( (scheme, location, path, parameters, query, fragment)) else: # # The URL is relative. Concatenate it with the base URL # and return it. In this case it is always local. # if location == '': location = self.__url.site() if path[0] == '/': # # The URL is a reference to the top directory on the # current site. Rebuild it without the leading slash. # return urlparse.urlunparse( (scheme, location, path[1:], parameters, query, fragment)) else: # # The URL is relative. Add the base directory of the URL. # baseDir = self.__url.path() if baseDir == '': baseDir = '/' elif baseDir[-1] != '/': # # The base directory is the path, stripped of its # last component. # lastSlash = string.rfind(baseDir, '/') if lastSlash > 0: # # Strip the last component from the path, # leaving the slash. # baseDir = baseDir[0:string.rindex(baseDir, '/') + 1] else: # # There is no last component. The base # directory is just "/". # baseDir = '/' return urlparse.urlunparse( (scheme, location, WebPage.__normPath(baseDir + path), parameters, query, fragment))
def callback_load(self): files = tkinter.filedialog.askopenfilenames( filetypes=[("Mastro Spectrum", ".Chn")], title="Select files to load (hint Ctrl-A)") files = list(files) files.sort() data = [] total_time = 0.0 basename = files[0] if basename.count("/") > 0: i = string.rindex(basename, "/") + 1 basename = basename[i:] if basename.count("\\") > 0: i = string.rindex(basename, "\\") + 1 basename = basename[i:] print("basename = ", basename) chunks = re.split("[0-9]+\.[cC][hH][nN]", basename) if len(chunks) > 0: basename = chunks[0] self.basename = basename for i in range(len(files)): if i > 0: self.canvas.delete(t) t = self.canvas.create_text(32, 32, text="Reading %s" % (files[i]), fill="white", anchor=tkinter.W) current = Chn(files[i]) data.append(current) total_time = total_time + current.real_time self.canvas.delete(t) if total_time < 300: tstr = "%.1f seconds" % (total_time) elif total_time < 7200: tstr = "%.1f minutes" % (total_time / 60) elif total_time < 2e5: tstr = "%.1f hours" % (total_time / 3600) else: tstr = "%.1f days" t = self.canvas.create_text( 32, 32, text="Loaded %d files,\n%s of collection time" % (len(data), tstr), fill="white", anchor=tkinter.W) self.data = data
def newProcess(self, sensor): # determine the executable mainFile = None for main in Sensor.VALID_MAINS: target = os.path.join(gettempdir(), 'sonar', sensor.name, main) if os.path.exists(target): mainFile = main break # break if there is no main file if mainFile == None: print 'missing main file for sensor %s' % (sensor.name) return # determine the executable (python, ..) executable = None main = None try: index = string.rindex(mainFile, '.') ending = mainFile[(index + 1):] if ending == 'py': executable = 'python' main = 'main.py' elif ending == 'sh': executable = 'bash' main = 'main.sh' elif ending == 'exe': executable = None main = 'main.exe' except ValueError: executable = None main = None # create a new process try: path = os.path.join(gettempdir(), 'sonar', sensor.name, main) # configure executable and main file if executable is None: executable = [path, sensor.name] else: executable = [executable, path, sensor.name] # check if the sensor configuration has parameters if sensor.settings.parameters is not None: paramLen = len(sensor.settings.parameters) if paramLen > 0: print 'sensor parameter exists, appending same as command line arguments' for parameter in sensor.settings.parameters: paramValue = parameter.key + '=' + parameter.value executable.append(paramValue) process = Popen(executable, stdout=PIPE, bufsize=1, universal_newlines=True) print 'PID %i' % (process.pid) return process except Exception, e: print 'error starting process: %s' % (e) return None
def newProcess(self, sensor): # determine the executable mainFile = None for main in Sensor.VALID_MAINS: target = os.path.join(gettempdir(), "sonar", sensor.name, main) if os.path.exists(target): mainFile = main break # break if there is no main file if mainFile == None: print "missing main file for sensor %s" % (sensor.name) return # determine the executable (python, ..) executable = None main = None try: index = string.rindex(mainFile, ".") ending = mainFile[(index + 1) :] if ending == "py": executable = "python" main = "main.py" elif ending == "sh": executable = "bash" main = "main.sh" elif ending == "exe": executable = None main = "main.exe" except ValueError: executable = None main = None # create a new process try: path = os.path.join(gettempdir(), "sonar", sensor.name, main) # configure executable and main file if executable is None: executable = [path, sensor.name] else: executable = [executable, path, sensor.name] # check if the sensor configuration has parameters if sensor.settings.parameters is not None: paramLen = len(sensor.settings.parameters) if paramLen > 0: print "sensor parameter exists, appending same as command line arguments" for parameter in sensor.settings.parameters: paramValue = parameter.key + "=" + parameter.value executable.append(paramValue) process = Popen(executable, stdout=PIPE, bufsize=1, universal_newlines=True) print "PID %i" % (process.pid) return process except Exception, e: print "error starting process: %s" % (e) return None
def calcOutputFilename(self, inputFilename): try: posn = string.rindex(inputFilename, '.') cut = inputFilename[:posn] except: # no '.', so use the whole string cut = inputFilename return cut + self.fileExtension()
def split_package(modulename): """Splits a module name into package, and module""" from string import rindex try: ix = rindex(modulename, '.') return modulename[:ix], modulename[ix + 1:] except ValueError: return None, modulename
def split_package(modulename): """Splits a module name into package, and module""" from string import rindex try: ix = rindex(modulename, '.') return modulename[:ix], modulename[ix+1:] except ValueError: return None, modulename
def wrap(text, width, font): """Wrap a line of text, returning a list of lines.""" lines = [] while text: if font.size(text)[0] <= width: return lines + [text] try: i = string.rindex(text, ' ') while font.size(text[:i])[0] > width: i = string.rindex(text, ' ', 0, i) except ValueError: i = len(text)-1 while font.size(text[:i])[0] > width and i: i = i - 1 if not i: raise ValueError, 'width %d too narrow' % width lines.append(text[:i]) text = string.lstrip(text[i:]) return lines
def _create_ob_from_function(c, id, file, path): try: i = string.rindex(c, '.') m, c = c[:i], c[i+1:] m = __import__(m, globals(), locals(), (c,)) c = getattr(m, c) f = getattr(c, 'createSelf').im_func if f.func_code.co_varnames == ('id', 'file'): return _wrap_ob(f(id, file), path) except: pass
def _create_ob_from_factory(c, id, file, path): try: i = string.rindex(c, '.') m, c = c[:i], c[i+1:] c = getObject(m, c) f = c() ob = _wrap_ob(f(id, file), path) ob.__factory = f return ob except: pass
def dump_hdf(self, directory, owner): global QUIET sys.path.insert(0, "../python") sys.path.insert(0, "python") import neo_cgi, neo_util hdf = neo_util.HDF() date = time.strftime("%d %B %Y", time.localtime(time.time())) if not self._funcs.items(): return for name, f in self._funcs.items(): if f._title is None and f._desc is None and f._args is None and f._retr is None: if not QUIET: sys.stderr.write('-W- No info for function "%s()"\n' % name) continue if f._defn is None: if not QUIET: sys.stderr.write('-W- No defn for function "%s()"\n' % name) hdf.setValue("Code.%s" % name, name) obj = hdf.getObj("Code.%s" % name) obj.setValue("Name", name) obj.setValue("filename", self._filename) if f._title: obj.setValue("Title", f._title) if f._defn: obj.setValue("Define", neo_cgi.text2html(f._defn)) if f._args: obj.setValue("Args", neo_cgi.text2html(f._args)) if f._desc: obj.setValue("Desc", neo_cgi.text2html(f._desc)) if string.strip(f._other): obj.setValue("Other", neo_cgi.text2html(string.strip(f._other))) if f._output: obj.setValue("Output", neo_cgi.text2html(f._output)) n = 0 for func in self._funcs.keys(): obj.setValue("related.%d" % n, func) n = n + 1 fname = self._filename x = string.rindex(fname, "/") if x != -1: fname = fname[x + 1:] x = string.rindex(fname, '.') if x != -1: fname = fname[:x] hdf.writeFile("%s/%s.hdf" % (directory, fname))
def getMajorMinor(deviceName, dmsetupLs): """ Given output of dmsetup ls this will return themajor:minor (block name) of the device deviceName """ startingIndex = string.rindex(dmsetupLs, deviceName) + len(deviceName) endingIndex = string.index(dmsetupLs[startingIndex:], "\n") + startingIndex # trim the preceding tab and ()'s newStr = dmsetupLs[startingIndex + 2: endingIndex - 1] return newStr
def process_request(self, req): htdocs_dir = self.config.get('docs', 'htdocs_dir') # Handle processing of /pdf/somefilename requests if (string.find(req.path_info, "docs/pdf/") != -1): pdf_file_name = (string.split(req.path_info, '/')[3]) req.send_file(htdocs_dir + '/pdf/' + pdf_file_name) return None elif (string.find(req.path_info, "docs/html/") != -1): html_file_name = req.path_info[string.rindex(req.path_info, 'docs/html/') + 10:] if html_file_name.endswith('.html'): html = codecs.open(htdocs_dir + '/html/' + html_file_name, 'r', 'utf-8', 'replace') content = re.compile(r'^.*<body >\s*<p>\s*(.*)<p>\s*<br>\s*<hr>\s*<address>.*$', re.MULTILINE | re.IGNORECASE | re.DOTALL).match(html.read()) html.close() if content is not None: req.hdf.set_unescaped('content', content.group(1)) else: req.hdf['content'] = 'No content' return 'htmldoc.cs', None else: req.send_file(htdocs_dir + '/html/' + html_file_name) return None elif (string.find(req.path_info, "docs/irc/") != -1): irc_file_name = (string.split(req.path_info, '/')[3]) irc = codecs.open(htdocs_dir + '/irc/' + irc_file_name, 'r', 'utf-8', 'replace') content = '' try: for line in irc: content += wiki_to_html(line, self.env, req) finally: irc.close() if content is not None: req.hdf.set_unescaped('content', content) else: req.hdf['content'] = 'No content' return 'ircdoc.cs', None # Handle the default case else: pdfs_dir = htdocs_dir + '/pdf/' pdfs = filter(os.listdir(pdfs_dir), "*.pdf") pdfs.sort() pdfs = map(lambda x: x.rstrip('.pdf'), pdfs) req.hdf['pdfs'] = pdfs irc_dir = htdocs_dir + '/irc/' irc = filter(os.listdir(irc_dir), "lifecyclemanager-dev.log.*") irc.sort() req.hdf['irc'] = irc add_stylesheet(req, 'hw/css/docs.css') return 'docs.cs', None
def unsubscribe(self, kn_route_location, statushandler = None): """ unsubscribe - Remove an entry from the dispatch table, then enqueue "route" request to submit to the server. unsubscribe(kn_route_location, [status-handler]) Inputs: a route URI, a status handler. Output: none. What it does: 1. Remove any corresponding dispatch table entries. 2. Parse the route URI into source topic and route ID. If parsing failed, skip steps 3 and 4. 3. Build a "route" request using the source topic, route ID, and empty string as the destination. 4. Enqueue request. """ parts = urlparse.urlparse(kn_route_location) end_of_topic = -1 try: end_of_topic = string.rindex(parts[2], self._KN_ROUTES_) except ValueError: if statushandler: statushandler.onStatus( { "status" : "400 Bad Request", "kn_payload" : "Client will not delete a route without the magic '%s' substring." % self._KN_ROUTES_ }) return kn_id = unicode( urllib.unquote(parts[2][end_of_topic + len(self._KN_ROUTES_) : ]), "UTF-8", "replace") parts = parts[:2] + (parts[2][ : end_of_topic ],) + parts[3:] requestMessage = { "kn_from" : urlparse.urlunparse(parts), "kn_to" : "", "do_method" : "route", "kn_id" : kn_id, "kn_expires" : "+5" } requestMessage["kn_from"] = canonicalizeTopic( self.getServerURL(), requestMessage["kn_from"]) self.enqueue(requestMessage, statushandler)
def get_filenames(self): fn_index = None for line in NoPipeExtractor.get_filenames(self): if self.border_re.match(line): if fn_index is not None: break else: fn_index = string.rindex(line, ' ') + 1 elif fn_index is not None: yield line[fn_index:] self.archive.close()
def readValueFromUUID(self, uuid): raw = self.readRawUUID(uuid).strip() try: value = raw[string.rindex(raw, "value:") + 7:] except(ValueError): print "Error reading value from UUID: " + str(uuid) print "[ERROR] " + raw sys.exit() return value
def unsubscribe(self, kn_route_location, statushandler=None): """ unsubscribe - Remove an entry from the dispatch table, then enqueue "route" request to submit to the server. unsubscribe(kn_route_location, [status-handler]) Inputs: a route URI, a status handler. Output: none. What it does: 1. Remove any corresponding dispatch table entries. 2. Parse the route URI into source topic and route ID. If parsing failed, skip steps 3 and 4. 3. Build a "route" request using the source topic, route ID, and empty string as the destination. 4. Enqueue request. """ parts = urlparse.urlparse(kn_route_location) end_of_topic = -1 try: end_of_topic = string.rindex(parts[2], self._KN_ROUTES_) except ValueError: if statushandler: statushandler.onStatus({ "status": "400 Bad Request", "kn_payload": "Client will not delete a route without the magic '%s' substring." % self._KN_ROUTES_ }) return kn_id = unicode( urllib.unquote(parts[2][end_of_topic + len(self._KN_ROUTES_):]), "UTF-8", "replace") parts = parts[:2] + (parts[2][:end_of_topic], ) + parts[3:] requestMessage = { "kn_from": urlparse.urlunparse(parts), "kn_to": "", "do_method": "route", "kn_id": kn_id, "kn_expires": "+5" } requestMessage["kn_from"] = canonicalizeTopic( self.getServerURL(), requestMessage["kn_from"]) self.enqueue(requestMessage, statushandler)
def readValueFromHandle(self, handle): raw = self.readRawHandle(handle).strip() try: value = raw[string.rindex(raw, "Characteristic value/descriptor:") + 33:] except(ValueError): print "Error reading value from Handle: " + handle print "[ERROR] " + raw sys.exit() return value
def pull(self, sources_dir): #Download the list of sources. package_file_path = self.downloadFile(sources_dir, 'sha1-all') with open(package_file_path) as f: packages = f.readlines() for package in packages: i = string.rindex(package, '-') package_name = string.strip(package[:i]) package_sha1 = string.strip(package[i+1:]) package_path = self.downloadFile(sources_dir, package_name, package_sha1) if package_sha1 != self.getFileHash(package_path): raise Exception('Invalid sha1 for package %s' % package_name)
def updatepos(self, i, j): if i >= j: return j rawdata = self.rawdata nlines = string.count(rawdata, "\n", i, j) if nlines: self.lineno = self.lineno + nlines pos = string.rindex(rawdata, "\n", i, j) # Should not fail self.offset = j-(pos+1) else: self.offset = self.offset + j-i return j
def newProcess(self, name): # determine the executable mainFile = None for main in ProcessLoader.VALID_MAINS: target = os.path.join(tempfile.gettempdir(), 'relay', name, main) if os.path.exists(target): mainFile = main break # break if there is no main file if mainFile == None: print 'missing main file for sensor %s' % (name) return None # determine the executable (python, ..) executable = None main = None try: index = string.rindex(mainFile, '.') ending = mainFile[(index + 1):] if ending == 'py': executable = 'python' main = 'main.py' elif ending == 'sh': executable = 'bash' main = 'main.sh' elif ending == 'exe': executable = None main = 'main.exe' except ValueError: executable = None main = None return None # create a new process try: path = os.path.join(tempfile.gettempdir(), 'relay', name, main) cwd = os.path.join(tempfile.gettempdir(), 'relay', name) # configure executable and main file if executable is None: executable = [path, name] else: executable = [executable, path, name] process = Popen(executable, stdout=PIPE, bufsize=0, universal_newlines=True, cwd=cwd) print 'PID %i' % (process.pid) return process except Exception as e: print 'error starting process %s' % (e) return None
def pull(self, sources_dir): #Download the list of sources. package_file_path = self.downloadFile(sources_dir, 'sha1-all') with open(package_file_path) as f: packages = f.readlines() for package in packages: i = string.rindex(package, '-') package_name = string.strip(package[:i]) package_sha1 = string.strip(package[i + 1:]) package_path = self.downloadFile(sources_dir, package_name, package_sha1) if package_sha1 != self.getFileHash(package_path): raise Exception('Invalid sha1 for package %s' % package_name)
def _findBrandMatches(fdn, index): extind = string.rindex(fdn, '.') ext = fdn[extind:] lenext = -len(ext) extind = extind + 1 root = fdn[:extind] retlist = [] for l in index['stml'].keys(): #if root and extension match if l[:extind] == root and l[lenext:] == ext: retlist.append(l) print 'branded matches are:', retlist return retlist
def __get_file_type(self, page): urltuple = urlparse(page) try: idx = string.rindex(urltuple[2], ".") file_type = urltuple[2][idx + 1:] if len(file_type) > 10: file_type = "Unknown" return file_type except: pass return "html"
def _findBrandMatches(fdn, index): extind = string.rindex(fdn, ".") ext = fdn[extind:] lenext = -len(ext) extind = extind + 1 root = fdn[:extind] retlist = [] for l in index["stml"].keys(): # if root and extension match if l[:extind] == root and l[lenext:] == ext: retlist.append(l) print "branded matches are:", retlist return retlist
def BuildFromFile(self, file): """Parse and build my data from a file Reads the next line in the file, and matches it as an argument description. If not a valid argument line, an error_not_found exception is raised. """ line = file.readline() if self.regex.search(line)<0: raise error_not_found self.name = self.regex.group(3) self.inout = string.split(self.regex.group(1),'][') typ = string.strip(self.regex.group(2)) self.raw_type = typ self.indirectionLevel = 0 if self.regex.group(4): # Has "[ ]" decl self.arrayDecl = 1 try: pos = string.rindex(typ, "__RPC_FAR") self.indirectionLevel = self.indirectionLevel + 1 typ = string.strip(typ[:pos]) except ValueError: pass while 1: try: pos = string.rindex(typ, "__RPC_FAR *") self.indirectionLevel = self.indirectionLevel + 1 typ = string.strip(typ[:pos]) except ValueError: break self.type = typ if self.type[:6]=="const ": self.unc_type = self.type[6:] else: self.unc_type = self.type if VERBOSE: print " Arg %s of type %s%s (%s)" % (self.name, self.type, "*" * self.indirectionLevel, self.inout)
def dump_hdf (self, directory, owner): global QUIET sys.path.insert (0, "../python") sys.path.insert (0, "python") import neo_cgi, neo_util hdf = neo_util.HDF() date = time.strftime("%d %B %Y", time.localtime(time.time())) if not self._funcs.items(): return for name, f in self._funcs.items(): if f._title is None and f._desc is None and f._args is None and f._retr is None: if not QUIET: sys.stderr.write('-W- No info for function "%s()"\n' % name) continue if f._defn is None: if not QUIET: sys.stderr.write('-W- No defn for function "%s()"\n' % name) hdf.setValue ("Code.%s" % name, name) obj = hdf.getObj ("Code.%s" % name) obj.setValue ("Name", name) obj.setValue ("filename", self._filename) if f._title: obj.setValue ("Title", f._title) if f._defn: obj.setValue ("Define", neo_cgi.text2html(f._defn)) if f._args: obj.setValue ("Args", neo_cgi.text2html(f._args)) if f._desc: obj.setValue ("Desc", neo_cgi.text2html(f._desc)) if string.strip(f._other): obj.setValue ("Other", neo_cgi.text2html(string.strip(f._other))) if f._output: obj.setValue ("Output", neo_cgi.text2html(f._output)) n = 0 for func in self._funcs.keys(): obj.setValue ("related.%d" % n, func) n = n + 1 fname = self._filename x = string.rindex (fname, "/") if x != -1: fname = fname[x+1:] x = string.rindex (fname, '.') if x != -1: fname = fname[:x] hdf.writeFile ("%s/%s.hdf" % (directory, fname))
def parse_dataset_name(name): ''' Check if we have been given the name of the fits file instead of the dataset name, and if so try to determine and return the dataset name 100103 ksl Coded because it is natural in some cases to give the filename ''' xname = name if string.count(xname, '.') > 0 or string.count(xname, '/') > 0: # This must be a filename i = string.rindex(xname, '/') xname = xname[i + 1:i + 10] return xname
def runTest(self): if hasattr(self, "skip") and self.skip == 1: self.assert_(True) return outputfile = "%s.%s.%d.%d" % (self.file[string.rindex(self.file, '/') + 1:], self.device, self.dpi, self.band) gs = gstestgs.Ghostscript() gs.gsroot = self.gsroot gs.device = self.device gs.dpi = self.dpi gs.band = self.band gs.infile = self.file gs.outfile = outputfile if self.log_stdout: gs.log_stdout = self.log_stdout if self.log_stderr: gs.log_stderr = self.log_stderr if gs.process(): sum = gssum.make_sum(outputfile) else: sum = '' if os.path.exists(outputfile): shutil.move(outputfile, gsconf.datadir + "/raster.daily") # os.unlink(outputfile) if sum and self.track_daily: # add test result to daily database if gsconf.__dict__.has_key("checksumdb") and gsconf.checksumdb: dbname = gsconf.dailydir + gsconf.checksumdb # mhw +".db" else: dbname = gsconf.get_dailydb_name() gssum.add_file(outputfile, dbname=dbname, sum=sum) if not sum: message = myself + " output file " + outputfile + " was not created for input file: " + self.file self.fail(message) else: if gssum.exists(outputfile, gsconf.baselinedb): sum_baseline = gssum.get_sum(outputfile, gsconf.baselinedb) message = myself + ' checksum did not match baseline (' + outputfile + ') for input file: ' + self.file self.assertEqual(sum, sum_baseline, message) else: message = myself + " no baseline checksum (" + outputfile + ") for file: " + self.file self.fail(message)
def decode(cls, text, shift=None): if shift == None: shift = [3] * len(text) elif shift < len(text): shift *= len(text) else: pass text = (sub('[^a-zA-Z]', '', text)).lower() new = '' for n in xrange(0, len(text)): x = f().get_value(n, text, None) z = f().get_value((rindex(ascii_lowercase, x)) - shift[n], ascii_lowercase * 2, None) new += z return new
def tags_thread(self, path): ind = string.rindex(path, '/') tagName = path[ind+1:] if tagName.strip()=='': log.error("the tagName '%s' doesn't contain any tags", tagName) return log.info("started for %s", tagName) sendtagList = ','.join(tagName.split(':')) if(path.startswith('/tags/personal')): user_id = self.NSID else: user_id = None for b in self.transfl.getTaggedPhotos(sendtagList, user_id): info = self.transfl.parseInfoFromPhoto(b) self._mkfileWithMeta(path, info)
def runTest(self): if hasattr(self, "skip") and self.skip == 1: self.assert_(True) return outputfile = "%s.%s.%d.%d" % (self.file[string.rindex(self.file, '/') + 1:], self.device, self.dpi, self.band) gs = gstestgs.Ghostscript() gs.gsroot = self.gsroot gs.device = self.device gs.dpi = self.dpi gs.band = self.band gs.infile = self.file gs.outfile = outputfile if self.log_stdout: gs.log_stdout = self.log_stdout if self.log_stderr: gs.log_stderr = self.log_stderr if gs.process(): sum = gssum.make_sum(outputfile) else: sum = '' if os.path.exists(outputfile): shutil.move(outputfile, gsconf.datadir+"/raster.daily") # os.unlink(outputfile) if sum and self.track_daily: # add test result to daily database if gsconf.__dict__.has_key("checksumdb") and gsconf.checksumdb: dbname=gsconf.dailydir+gsconf.checksumdb # mhw +".db" else: dbname=gsconf.get_dailydb_name() gssum.add_file(outputfile, dbname=dbname, sum=sum) if not sum: message=myself+" output file "+outputfile+" was not created for input file: " + self.file self.fail(message) else: if gssum.exists(outputfile,gsconf.baselinedb): sum_baseline=gssum.get_sum(outputfile,gsconf.baselinedb) message=myself+' checksum did not match baseline (' + outputfile + ') for input file: ' + self.file self.assertEqual(sum,sum_baseline,message) else: message = myself+" no baseline checksum (" + outputfile + ") for file: " + self.file self.fail(message)
def write_File_text(sfile, array): for i in range(len(array)): array[i] += "\n" # try: f = open(sfile, "w") try: f.writelines(array) # Write a sequence of strings to a file except: print("\n\nERROR in _write_File_text" + sfile) finally: f.close() return sfile except: t1 = string.rindex(sfile, ".") sn = sfile[0:t1] + "_1." + sfile[t1 + 1:len(sfile)] write_File_text(sn, array)
def resolve_func(spec): """Resolve a function by name Given a function specified by 'module.function', return a callable object (ie, the function itself) """ try: idx = string.rindex(spec, ".") mname = spec[:idx] fname = spec[idx+1:] # Dont attempt to optimize by looking in sys.modules, # as another thread may also be performing the import - this # way we take advantage of the built-in import lock. module = _import_module(mname) return getattr(module, fname) except ValueError: # No "." in name - assume in this module return globals()[spec]
def getTooltipHeadingText( th ) : ''' BeautifulSoup is being confused by row headings with gobs of tooltip text. Pull what we can and strip the brackety stuff in front So blah, (Passes Attempted)">Cmp% ==> Cmp% ''' import string text = ''.join( th.findAll( text=True )) try : last = string.rindex( text, '">' ) text = text[ last + 2 : ] except : pass return text
def getTooltipHeadingText(th): ''' BeautifulSoup is being confused by row headings with gobs of tooltip text. Pull what we can and strip the brackety stuff in front So blah, (Passes Attempted)">Cmp% ==> Cmp% ''' import string text = ''.join(th.findAll(text=True)) try: last = string.rindex(text, '">') text = text[last + 2:] except: pass return text
def FuzzyScanSelection(self, frompos, topos, margin): fulltext = self.GetValue() #search left margin start = frompos - margin if start < 0: start = 0 if frompos == 0: frompos = 1 #search right margin finish = topos + margin if finish > len(fulltext): finish = len(fulltext) if topos > len(fulltext) - 2: topos = len(fulltext) - 2 try: left = string.rindex(fulltext, '<', start, frompos) right = string.index(fulltext, '>', topos, finish) + 1 except ValueError: wx.LogMessage("FuzzyScan went wrong") return '' return fulltext[left:right], left, right
def execute_command():#执行命令 #爬取数据 request = urllib2.Request("http://bbs.xuegod.cn") response = urllib2.urlopen(request) reader = response.read() response.close() #print reader #匹配规则 usernump = re.compile(r'人数<br><em>.*?</em>') usernummatch = usernump.findall(reader) if usernummatch: #print usernummatch currentnum = usernummatch[0] currentnum = currentnum[string.index(currentnum,'>')+5:string.rindex(currentnum,'<')] #print currentnum print "当前时间:",time.strftime('%Y年%m月%d日%H时%M分',time.localtime(time.time())),'论坛在线人数:',currentnum
def FuzzyScanSelection(self, frompos, topos, margin): fulltext = self.GetValue() #search left margin start = frompos - margin if start < 0: start = 0 if frompos == 0: frompos = 1 #search right margin finish = topos + margin if finish > len(fulltext): finish = len(fulltext) if topos > len(fulltext)-2: topos = len (fulltext)-2 try: left = string.rindex(fulltext, '<', start, frompos) right = string.index(fulltext, '>', topos, finish)+1 except ValueError: wx.LogMessage("FuzzyScan went wrong") return '' return fulltext[left:right], left,right
def __init__(self, path): audio = EasyID3(path) f = MP3(path) try: self.name = audio['title'][0] except KeyError: self.name = os.path.basename(os.path.splitext(audio.filename)[0]) try: self.pub_year = audio['date'][0] except KeyError: pass try: self.artist = Tools.get_or_create(db.session, Artist, name=audio['artist'][0]) except KeyError: pass try: self.path = path except KeyError: pass try: self.bitrate = f.info.bitrate / 1000 except KeyError: pass try: self.album = Tools.get_or_create(db.session, Album, name=audio['album'][0]) self.album.artist = self.artist except KeyError: pass try: self.genre = audio['genre'][0] except KeyError: pass # try: # with store_context(store): # self.cover.from_blob(image_binary) # except: # pass self.duration = f.info.length self.file_url = url_for( 'static', filename=path[string.rindex(path, 'library/'):])
def process_line(line, width): lst = [] i, end = 0, 0 step = int(width) length = len(line) while i < length: if i + step < length: if line[i + step] != ' ': end = string.rindex(line[:i + step], ' ') lst.append(line[i:end]) i = end + 1 else: lst.append(line[:i]) i = i + 1 else: lst.append(line[i:].strip()) break return lst
def process_line(line, width): lst = [] i, end = 0, 0 step = int(width) length = len(line) while i < length: if i+step < length: if line[i+step] != ' ': end = string.rindex(line[:i+step], ' ') lst.append(line[i:end]) i = end + 1 else: lst.append(line[:i]) i = i + 1 else: lst.append(line[i:].strip()) break; return lst
def umlErrors(witness): comment = umlComment(witness) if "uncaught exception" in witness: return Error(witness[rindex(witness, ' ')+1:-3], "Exception"), comment elif "CPU time" in witness: return Error("CPU Time", "Runtime"), comment elif "wrote the error message" in witness: return Error("Wrote Error", "Runtime"), comment elif "signalled a bug in type inference" in witness: return Error("Signalled Bug", "Type Inference"), comment elif "typed-incorrectly" in witness: return Error("Wrong Type", "Type Inference"), comment elif "did-not-type" in witness: return Error("Did Not Type", "Type Inference"), comment elif "typed-untypeable" in witness: return Error("Typed Untypeable", "Type Inference"), comment else: stderr.write('Warning: Unrecognized witness "'+witness+'"\n') return None
def execute_command(): request = urllib2.Request("http://bbs.xuegod.cn") response = urllib2.urlopen(request) reader = response.read() response.close() #匹配规则 usernump = re.compile(r'人数<br><em>.*?</em>') usernummatch = usernump.findall(reader) if usernummatch: currentnum = usernummatch[0] currentnum = currentnum[string.index(currentnum, '>') + 5:string.rindex(currentnum, '<')] lit = "当前时间:" + time.strftime( "%y年%m月%d日%H时%M分", time.localtime( time.time())) + "论坛在线人数:" + currentnum + "\n" print lit f = open('word.txt', 'a+') f.write(lit)
def umlErrors(witness): comment = umlComment(witness) if "uncaught exception" in witness: return Error(witness[rindex(witness, ' ') + 1:-3], "Exception"), comment elif "CPU time" in witness: return Error("CPU Time", "Runtime"), comment elif "wrote the error message" in witness: return Error("Wrote Error", "Runtime"), comment elif "signalled a bug in type inference" in witness: return Error("Signalled Bug", "Type Inference"), comment elif "typed-incorrectly" in witness: return Error("Wrong Type", "Type Inference"), comment elif "did-not-type" in witness: return Error("Did Not Type", "Type Inference"), comment elif "typed-untypeable" in witness: return Error("Typed Untypeable", "Type Inference"), comment else: stderr.write('Warning: Unrecognized witness "' + witness + '"\n') return None