def dump_inter_dep(pathF, pathP): logF = open(pathF, "r") logP = open(pathP, "w") pFrame = 0 while True: aLine = logF.readline() if (aLine == ""): break if ("+" in aLine): pFrame = 0 tokens = re.split(":", aLine) if ("65" in tokens[1]): #it's P frame pFrame = string.atoi(string.lstrip(tokens[0], "+")) if (pFrame == 0): #skip all I-frames continue if ("&&&" in aLine) : #the motion estimation dependency writeStr = str(pFrame) + ":" + string.lstrip(aLine, "&") logP.write(writeStr) continue logF.close() logP.close()
def instantiateFilter(self, d, filter_str): new_args = [] for a in self.args: an = string.lstrip(string.lstrip(self.subject.name, "?"), "$") if not (an in d): new_args.append(a) return Query(self.prefs, new_args, self.body, self.distinct, self.filter_nested + ' ' + filter_str)
def search_filename(filename, languages): title, year = xbmc.getCleanMovieTitle(filename) log(__name__, "clean title: \"%s\" (%s)" % (title, year)) try: yearval = int(year) except ValueError: yearval = 0 if title and yearval > 1900: search_string = title + "+" + year search_argenteam_api(search_string) else: match = re.search( r'\WS(?P<season>\d\d)E(?P<episode>\d\d)', title, flags=re.IGNORECASE ) if match is not None: tvshow = string.strip(title[:match.start('season')-1]) season = string.lstrip(match.group('season'), '0') episode = string.lstrip(match.group('episode'), '0') search_string = "%s S%#02dE%#02d" % ( tvshow, int(season), int(episode) ) search_argenteam_api(search_string) else: search_argenteam_api(filename)
def getInfoIO(self, query): subquery = self.service.getTriples() vars_order_by=[x for v in query.order_by for x in v.getVars() ] vs = list(set(self.service.getVars()))# - set(self.service.filters_vars)) # Modified this by mac: 31-01-2014 #print "service", vs, self.service.filters_vars predictVar=set(self.service.getPredVars()) variables = [string.lstrip(string.lstrip(v, "?"), "$") for v in vs] if query.args == []: projvars = vs else: projvars = list(set([v.name for v in query.args if not v.constant])) subvars = list((query.join_vars | set(projvars)) & set(vs) ) if subvars == []: subvars=vs subvars = list(set(subvars) | predictVar | set(vars_order_by)) # This corresponds to the case when the subquery is the same as the original query. # In this case, we project the variables of the original query. if query.body.show(" ").count("SERVICE") == 1: subvars = list(set(projvars) | set(vars_order_by)) subvars = string.joinfields(subvars, " ") #MEV distinct pushed down to the sources if query.distinct: d = "DISTINCT " else: d = "" subquery = "SELECT "+d+ subvars + " WHERE {" + subquery + "\n" + query.filter_nested + "\n}" return (self.service.endpoint, query.getPrefixes()+subquery, set(variables))
def constructmenuitemstring(self,items,keybindingsdict): def emptystring(num): if num<=0: return "" return " "+emptystring(num-1) if len(items)==1: return items[0] keykey=string.lstrip(string.rstrip(items[1])) if keykey not in keybindingsdict: ret = string.rstrip(items[0]) commands[string.lstrip(ret)] = string.lstrip(items[1]) return ret key=keybindingsdict[keykey] qualifier="" for item in key[1]: qualifier+=get_key_name(item)+" + " #print "items[0]",items[0],len(items[0]),qualifier stripped0 = string.rstrip(items[0]) ret = stripped0 + emptystring(41-len(stripped0)) + qualifier + get_key_name(key[0][0]) commands[string.lstrip(ret)] = string.lstrip(items[1]) return ret
def LookForNext(newlines, i): currLevel = GetLevel(newlines[i]) if currLevel==1: return string.lstrip(newlines[i], '-') #if it's the last element in input list if i==len(newlines)-1: parent = LookForParent(newlines,i) firstChild = newlines.index('-'*(currLevel-1)+parent)+1 return string.lstrip(newlines[firstChild], '-') #from the next in input list to the end for j in range(i+1, len(newlines)): level = GetLevel(newlines[j]) #if further is "upper" if level < currLevel: #looke for parent parent = LookForParent(newlines,i) #first parent's child is the next for current item firstChild = newlines.index('-'*(currLevel-1)+parent)+1 return string.lstrip(newlines[firstChild], '-') if level == currLevel: return string.lstrip(newlines[j], '-') for j in range(0, len(newlines)): level = GetLevel(newlines[j]) if level == currLevel: return string.lstrip(newlines[j], '-')
def logProject(self, gitdir, projectname, year=None, projectdesc=None, county=None): """ Figures out the commit string and the tag. Subclass should figure out the rest. Returns the SHA1 hash ID of the git commit of the project applied """ commitstr = self.getCommit(gitdir) tag = self.getTags(gitdir, commitstr) if year: yearstr = "%4d" % year else: yearstr = " " WranglerLogger.info("%-4s | %-5s | %-40s | %-40s | %-10s | %s" % (yearstr, tag if tag else "notag", commitstr if commitstr else "", string.lstrip(projectname) if projectname else "", string.lstrip(county) if county else "", string.lstrip(projectdesc) if projectdesc else "" ) ) self.appliedProjects[projectname] = tag if tag else commitstr return commitstr
def start_logging(self): # Register Ctrl+C handler so that we can gracefully logout signal.signal(signal.SIGINT, self.stop_logging) while(1): data = self.IRCsock.recv(2048) print data msg = string.split(data) #print msg # Handle Alive state if msg[0] == "PING": self.irc_send_command("PONG %s" % msg[1]) # Handle private messages to bot if ('PRIVMSG' == msg[1] and self.NICKNAME == msg[2] ) or ('PRIVMSG' == msg[1] and self.LOGCHANNEL == msg[2] and self.NICKNAME == string.strip(msg[3], ':,')): self.nick_name = string.lstrip(msg[0][:string.find(msg[0],"!")], ':') self.privmsg = ":Heya there! I'm LoggerBot, Do Not Disturb Me!" self.irc_send_command("PRIVMSG %s %s" % (self.nick_name, self.privmsg)) # Actual logging of channel if msg[1] == "PRIVMSG" and msg[2] == self.LOGCHANNEL: self.logfile = open("/tmp/channel.log", "a+") self.nick_name = msg[0][:string.find(msg[0],"!")] timenow = self.convert_utc2local(strftime("%Y-%m-%d %H:%M:%S", gmtime()), 'Asia/Kolkata') message = ' '.join(msg[3:]) self.logfile.write(timenow + " " + string.lstrip(self.nick_name, ':') + ' -> ' + string.lstrip(message, ':') + '\n') self.logfile.flush()
def strip_opening_quotes(s): s = lstrip(s) s = lstrip(s, '"') s = lstrip(s) s = lstrip(s, "'") s = lstrip(s) return s
def execute(self, operation, parameters = None): """Execute a query or command""" # TODO: # Support parameters. See the todo under callproc sqlCom = self.con.CreateCommand() sqlCom.Transaction = self.tran sqlCom.CommandText = operation self._disposedatareader() #( Needed even for ExecuteNonQuery(). if string.upper(string.lstrip(operation))[:6] == 'SELECT': sqlCom.CommandType = System.Data.CommandType.Text #( Just in case self._executereader(sqlCom) elif string.upper(string.lstrip(operation))[:4] == 'EXEC': # TODO: Include this command: # sqlCom.CommandType = System.Data.CommandType.StoredProcedure # TODO: # This isn't working for some reason with this command from the # calling program: # cur2.execute('exec Test 1, "test2", 3, 4') # self._executereader(sqlCom) pass else: sqlCom.CommandType = System.Data.CommandType.Text self.datareader = sqlCom.ExecuteNonQuery() sqlCom.Dispose()
def getCount(self, query, vars, endpointType): subquery = self.service.getTriples() if len(vars) == 0: vs = self.service.getVars() variables = [string.lstrip(string.lstrip(v, "?"), "$") for v in vs] vars_str = "*" else: variables = vars service_vars = self.service.getVars() vars2 = [] for v1 in vars: for v2 in service_vars: if (v1 == v2[1:]): vars2.append(v2) break if len(vars2) > 0: vars_str = string.joinfields(vars2, " ") else: vars_str = "*" d = "DISTINCT " if (endpointType=="V"): subquery = "SELECT COUNT "+d+ vars_str + " WHERE {" + subquery + "\n"+ query.filter_nested +"}" else: subquery = "SELECT ( COUNT ("+d+ vars_str + ") AS ?cnt) WHERE {" + subquery +"\n"+ query.filter_nested + "}" return (self.service.endpoint, query.getPrefixes()+subquery)
def location(self): for env_variable in self.container_info['Config']['Env']: if re.search('ETCD_SERVICES', env_variable): return os.path.join('/services', \ string.lstrip( string.split( env_variable, '=' )[1].strip(), '/'), \ string.lstrip( self.container_info['Name'], '/')) return ''
def getctrldir(readin,jobid): setting={} molset = readin #open(sys.argv[1],'rt').read() molset= re.sub("@ ?",'\n',molset) exec(molset) setting['ATOM___']=atom setting['PZ___']= pz setting['FSMOM___']='%d' % fsmom setting['ALAT___']= '%9.4f' % alat rmt = discenter/2.0*rstar rsmh= rmt/2.0 eh_c =' %3.1f' % eh # converted to char eh2_c=' %3.1f' % eh2 rsmh_c= ' %3.3f' % max(rsmh,0.5) setting['RMT___']=' %3.3f' % rmt setting['EH___'] =' EH=' +4*eh_c setting['EH2___'] ='EH2=' +4*eh2_c dname1 = dirhead + setout(setting,'ATOM___') \ +',fsmom=' + setout(setting,'FSMOM___') \ +',alat=' + setout(setting,'ALAT___') dname2 = \ 'rmt='+ setout(setting,'RMT___') \ + ',EH=' + string.lstrip(eh_c) + ',EH2='+ string.lstrip(eh2_c) \ + ',' + setout(setting,'PZ___')+',' return dname1,dname2
def autoindent(self, command): # Extremely basic autoindenting. Needs more work here. indent = len(command) - len(string.lstrip(command)) if string.lstrip(command): self.text.insert("insert", command[:indent]) if string.rstrip(command)[-1] == ":": self.text.insert("insert", " ")
def emgr_params(self): """ Reads EMGR_SNAP and returns LIST with info about installed ifixes """ EMGR = [] if self.EMGR_SNAP: conffile = self.EMGR_SNAP while True: line = conffile.readline() if 'LABEL:' in line: emgr_label = lstrip(line.split(':')[1].rstrip('\r\n')) conffile.readline() conffile.readline() line = conffile.readline() emgr_status = lstrip(line.split(':')[1].rstrip('\r\n')) conffile.readline() line = conffile.readline() emgr_abstruct = lstrip(line.split(':')[1].rstrip('\r\n')) conffile.readline() conffile.readline() line = conffile.readline() emgr_instdate = lstrip(line.split(':')[1].split()[0].rstrip('\r\n')) EMGR.append({'emgr_label': emgr_label, 'emgr_status': emgr_status, 'emgr_abstruct': emgr_abstruct, 'emgr_instdate': emgr_instdate}) if not line: break else : return None return EMGR
def dump_gop_rec(pathF, pathP): logF = open(pathF, "r") logP = open(pathP, "w") iFrame = 0 pFrame = 0 gopStarted = 0 totalFrameNum = 0 while True: aLine = logF.readline() if (aLine == ""): break if ("+" in aLine): tokens = re.split(":", aLine) if ("1" in tokens[1]): iFrame = string.atoi(string.lstrip(tokens[0], "+")) totalFrameNum = totalFrameNum + 1 if (gopStarted == 0): logP.write(str(iFrame) + ":") gopStarted = 1 else: logP.write(str(iFrame-1) + ":\n") logP.write(str(iFrame) + ":") if ("65" in tokens[1]): #it's p frame pFrame = string.atoi(string.lstrip(tokens[0], "+")) totalFrameNum = totalFrameNum + 1 #print pFrame logP.write(str(totalFrameNum) + ":\n") logF.close() logP.close()
def instantiate(self, d): new_args = [] for a in self.args: an = string.lstrip(string.lstrip(self.subject.name, "?"), "$") if not (an in d): new_args.append(a) return Query(self.prefs, new_args, self.body.instantiate(d), self.distinct)
def get_dir(self, path, dest='', env='base', gzip=None): ''' Get a directory recursively from the salt-master ''' # TODO: We need to get rid of using the string lib in here ret = [] # Strip trailing slash path = string.rstrip(self._check_proto(path), '/') # Break up the path into a list containing the bottom-level directory # (the one being recursively copied) and the directories preceding it separated = string.rsplit(path, '/', 1) if len(separated) != 2: # No slashes in path. (This means all files in env will be copied) prefix = '' else: prefix = separated[0] # Copy files from master for fn_ in self.file_list(env): if fn_.startswith(path): # Prevent files in "salt://foobar/" (or salt://foo.sh) from # matching a path of "salt://foo" try: if fn_[len(path)] != '/': continue except IndexError: continue # Remove the leading directories from path to derive # the relative path on the minion. minion_relpath = string.lstrip(fn_[len(prefix):], '/') ret.append( self.get_file( 'salt://{0}'.format(fn_), '{0}/{1}'.format(dest, minion_relpath), True, env, gzip ) ) # Replicate empty dirs from master try: for fn_ in self.file_list_emptydirs(env): if fn_.startswith(path): # Prevent an empty dir "salt://foobar/" from matching a path of # "salt://foo" try: if fn_[len(path)] != '/': continue except IndexError: continue # Remove the leading directories from path to derive # the relative path on the minion. minion_relpath = string.lstrip(fn_[len(prefix):], '/') minion_mkdir = '{0}/{1}'.format(dest, minion_relpath) if not os.path.isdir(minion_mkdir): os.makedirs(minion_mkdir) ret.append(minion_mkdir) except TypeError: pass ret.sort() return ret
def assembleNode(fout, nodeLines): labelToAddr = {} # scan for labels, removing them for addr in range(len(nodeLines)): line = string.lstrip(nodeLines[addr]) while 1: m = re.match(r'(\w+):.*', line) if not m: break label = m.group(1) labelToAddr[label] = addr print "set %s to address %d" % (label, addr) line = string.lstrip(line[len(label)+1:]) nodeLines[addr] = line # add empty lines so that 15 instructions are assembled if len(nodeLines) > 15: raise Exception("node has more than 15 instructions!") nodeLines = nodeLines + ([""] * (15-len(nodeLines))) # now assemble for line in nodeLines: m = re.match(r'^(\w+) (.+?)(?:, (.+))?$', line) if m: mnem = m.group(1) opers = list(m.group(2, 3)) opcode = None if mnem in defs.opcStrToId: opcode = defs.opcStrToId[mnem] else: raise Exception("unknown mnemonic \"%s\"" % mnem) # map text operand to id for i in range(len(opers)): if opers[i] == None: opers[i] = 0 elif re.match(r'-?\d+', opers[i]): opers[i] = int(opers[i]) elif opers[i] in labelToAddr: opers[i] = labelToAddr[opers[i]] elif opers[i] in defs.operToId: opers[i] = defs.operToId[opers[i]] else: print "illegal operand \"%s\"" % opers[i] # write 'em data = struct.pack('<Bhh', opcode, opers[0], opers[1]) fout.write(data) elif re.match(r'^\s*$', line): data = struct.pack('<BHH', OPCODE_NULL, 0, 0) fout.write(data) else: print "syntax error for instruction \"%s\"" % line sys.exit(-1)
def LookForParent(newlines, i): currLevel = GetLevel(newlines[i]) if currLevel==1: return string.lstrip(newlines[i], '-') for j in range(i-1, -1, -1): level = GetLevel(newlines[j]) if level<currLevel: return string.lstrip(newlines[j], '-')
def StripContMark(self,line,line_num,col_num,next): # continuation mark only if next line is a continued one #if next!=self.NEW_STATEMENT: contmatch = self.re_contmark.match(line) if contmatch: text = lstrip(contmatch.group()) self.AppendAttribute( ContMarker(text, (line_num, col_num)) ) line = lstrip(line)[1:] return line
def downloadToFileFromHTTPFormSubmit(self, url, method, paramDict, outputFile): self.httpLock.acquire() self.setupHandle() self.httpHeaders = StringIO.StringIO() oFile = None try: oFile = open(outputFile, 'wb') except: #unable to create a file for storing content self.httpLock.release() return -2 if method == 'get': url += '?' + urllib.urlencode(paramDict) strippedURL = string.lstrip(url, 'http://') url = 'http://' + urllib.quote(strippedURL) self.httpHandle.setopt(pycurl.URL, url) else: strippedURL = string.lstrip(url, 'http://') url = 'http://' + urllib.quote(strippedURL) self.httpHandle.setopt(pycurl.URL, url) self.httpHandle.setopt(pycurl.POST, 1) self.httpHandle.setopt(pycurl.POSTFIELDS, urllib.urlencode(paramDict)) self.httpHandle.setopt(pycurl.WRITEDATA, oFile) try: self.httpHandle.perform() except: #exception in performing HTTP action self.httpLock.release() return -1 self.httpHeaders.flush() headerData = self.httpHeaders.getvalue() self.httpHeaders.close() del self.httpHeaders self.httpHeaders = None oFile.flush() oFile.close() if string.find(headerData, '200 OK') > 0: self.httpLock.release() return 1 os.remove(outputFile) self.setupHandle() self.httpLock.release() return 0
def read(file): """Read in a file and create a data strucuture that is a hash with members 'header' and 'data'. The 'header' is a hash of header keywords, the data is a hash of columns. To get to the nth element of column NAME use hdu[data][NAME][n]. To get the header information use hdu[header][KEYWORD]. """ f = open(file,'r') lines=f.readlines() f.close() import re, string keywords=[] values=[] header={} cdata={} for line in lines: if ( re.match(r'^##',line)): ## header line m=string.split(string.lstrip(line[2:])) if not m: sys.stderr.write( "Ill formed header line in %s \ n " % ( file, )) sys.stderr.write( line) continue keywords=m continue if ( re.match(r'^# ',line)): ## this is a keyword value line. m=string.split(string.lstrip(line[1:])) values=m if len(values)!= len(keywords): sys.stderr.write( "Ill formed header, keyword/value missmatach\n") for index in range(len(values)): header[keywords[index]]=values[index] keywords=[] values=[] continue ### must be the actual data ### expect a previous header line to define column headers if not keywords: sys.stderr.write( "No keywords for data columns, assuming x,y,mag,msky\n") keywords=('X','Y','MAG','MSKY') values = string.split(string.lstrip(line)) if len(values)!=len(keywords): sys.stderr.write("Keyword and value index have different length?\n") if not cdata: for keyword in keywords: cdata[keyword]=[] for index in range(len(values)): cdata[keywords[index]].append(values[index]) hdu={'data': cdata, 'header': header} ### the hdu is a dictionary with the header and data return hdu
def handle_rating(request): import string, json record_id = int(string.lstrip(request.POST['widget_id'], "rec_")) rating_obj = Rating.objects.filter(record__id=record_id) if 'fetch' in request.POST: if rating_obj.count() < 1: resp_data = {'widget_id' : "rec_" + str(record_id), 'number_votes' : "0", "total_points":"0", 'dec_avg':'0', 'whole_avg':'0'} else: resp_data = {'widget_id' : "rec_" + str(record_id), 'number_votes' : rating_obj[0].count, "total_points":"0", 'dec_avg':rating_obj[0].avg_rating, 'whole_avg':int(round(rating_obj[0].avg_rating))} return HttpResponse(json.dumps(resp_data)) else: clicked_rating = int(string.lstrip(string.split(request.POST['clicked_on'], ' ')[0], "star_")) user_rating = Userrating.objects.filter(rated_by__id=request.user.id, rated_for__id=record_id) print "rating_count:" + str(rating_obj.count()) if rating_obj.count() < 1: print "rating object not present" if user_rating.count() < 1: print "user rating not present111" Userrating.objects.create(rated_by_id=request.user.id, rated_for_id=record_id, rating=clicked_rating, rated_on=datetime.now()) Rating.objects.create(record_id=record_id, count=1, avg_rating=clicked_rating) print "Here" resp_data = {'widget_id' : "rec_" + str(record_id), 'number_votes' : "1", "total_points":"0", 'dec_avg':clicked_rating, 'whole_avg':clicked_rating} print "Here2" else: print "rating object present" if user_rating.count() < 1: print "user rating object not present" Userrating.objects.create(rated_by_id=request.user.id, rated_for_id=record_id, rating=clicked_rating, rated_on=datetime.now()) existing_score = (rating_obj[0].avg_rating * rating_obj[0].count) new_avg = (existing_score + clicked_rating)/(rating_obj[0].count + 1) #rating_obj[0].avg_rating = new_avg #rating_obj[0].count = rating_obj[0].count + 1 #rating_obj[0].save() Rating.objects.filter(id=rating_obj[0].id).update(avg_rating=new_avg, count=rating_obj[0].count + 1) resp_data = {'widget_id' : "rec_" + str(record_id), 'number_votes' : rating_obj[0].count, "total_points":"0",'dec_avg':rating_obj[0].avg_rating, 'whole_avg':round(rating_obj[0].avg_rating)} else: print "user rating object present" existing_avg = rating_obj[0].avg_rating existing_count = rating_obj[0].count existing_user_rating = user_rating[0].rating Userrating.objects.filter(id=user_rating[0].id).update(rating=clicked_rating) print "Old User Rating:" + str(existing_user_rating) print "New User Rating:" + str(clicked_rating) new_avg = ((existing_avg * existing_count)-existing_user_rating + clicked_rating)/existing_count Rating.objects.filter(id=rating_obj[0].id).update(avg_rating=new_avg) print "New Avg:" + str(new_avg) resp_data = {'widget_id' : "rec_" + str(record_id), 'number_votes' : existing_count, "total_points":"0",'dec_avg':rating_obj[0].avg_rating, 'whole_avg':round(rating_obj[0].avg_rating)} return HttpResponse(json.dumps(resp_data))
def StrToNum(string_num): """ To Convert String to number representation maximum length of string is 4 """ STRING_NUM = { '0' : 'zero', '1' : 'one', '2' : 'two', '3' : 'three', '4' : 'four', '5' : 'five', '6' : 'six', '7' : 'seven', '8' : 'eight', '9' : 'nine', '10' : 'ten', '11' : 'eleven', '12' : 'twelve', '13' : 'thirteen', '14' : 'fourteen', '15' : 'fifteen', '16' : 'sixteen', '17' : 'seventeen', '18' : 'eighteen', '19' : 'nineteen', } MAPPING = { '2' : 'twenty', '3' : 'thirty', '4' : 'forty', '5' : 'fifty', '6' : 'sixty', '7' : 'seventy', '8' : 'eight', '9' : 'ninety', } number = int(string_num) if number < 20: return STRING_NUM[string_num] elif 20 <= number < 100: if number % 10 == 0: return MAPPING[string_num[0]] return MAPPING[string_num[0]] + ' '+ STRING_NUM[string_num[1]] elif 100 <= number < 1000: if number % 100 == 0: return STRING_NUM[string_num[0]] + ' hundred' return STRING_NUM[string_num[0]] + ' hundred and ' + StrToNum(string.lstrip(string_num[1:],'0')) elif 1000 <= number < 10000: if number % 1000 == 0: return STRING_NUM[string_num[0]] + ' thousand' return STRING_NUM[string_num[0]] + ' thousand ' + StrToNum(string.lstrip(string_num[1:],'0'))
def run_filelog( self, *args ): raw = self.run( 'filelog', args ) if (not self.tagged): # untagged mode returns simple strings, which breaks the code below return raw result = [] for h in raw: r = None if isinstance( h, dict ): df = DepotFile( h[ "depotFile" ] ) for n, rev in enumerate( h[ "rev" ]): # Create a new revision of this file ready for populating r = df.new_revision() # Populate the base attributes of each revision r.rev = int( rev ) r.change = int( h[ "change" ][ n ] ) r.action = h[ "action" ][ n ] r.type = h[ "type" ][ n ] r.time = datetime.datetime.utcfromtimestamp( int( h[ "time" ][ n ]) ) r.user = h[ "user" ][ n ] r.client = h[ "client" ][ n ] r.desc = h[ "desc" ][ n ] if "digest" in h: r.digest = h[ "digest" ][ n ] if "fileSize" in h: r.fileSize = h[ "fileSize" ][ n ] # Now if there are any integration records for this revision, # add them in too if (not "how" in h) or (n >= len(h["how"]) or h["how"][n] == None): continue else: for m, how in enumerate( h[ "how" ][ n ] ): file = h[ "file" ][ n ][ m ] srev = string.lstrip(h[ "srev" ][ n ][ m ], '#') erev = string.lstrip(h[ "erev" ][ n ][ m ], '#') if srev == "none": srev = 0 else: srev = int( srev ) if erev == "none": erev = 0 else: erev = int( erev ) r.integration( how, file, srev, erev ) else: r = h result.append( df ) return result
def check_versions(str_filename,ffdoc_filename): f = open(str_filename, 'r') for line in f.readlines(): if line.startswith("* For use with CGenFF version"): entry = re.split('\s+', string.lstrip(line)) print "--Version of CGenFF detected in ",str_filename,":",entry[6] f.close() f = open(ffdoc_filename, 'r') for line in f.readlines(): if line.startswith("Parameters taken from CHARMM36 and CGenFF"): entry = re.split('\s+', string.lstrip(line)) print "--Version of CGenFF detected in ",ffdoc_filename,":",entry[6] f.close()
def __init__( self, block_line_list = [], source_line_list = [] ): self.items = [] # current ( marker, contents ) list self.section = None # section this block belongs to self.filename = "unknown" # filename defining this block self.lineno = 0 # line number in filename marker = None # current marker content = [] # current content lines list alphanum = string.letters + string.digits + "_" self.name = None for line in block_line_list: line2 = string.lstrip( line ) l = len( line2 ) margin = len( line ) - l if l > 3: ender = None if line2[0] == '<': ender = '>' elif line2[0] == '@': ender = ':' if ender: i = 1 while i < l and line2[i] in alphanum: i = i + 1 if i < l and line2[i] == ender: if marker and content: self.add( marker, content ) marker = line2[1 : i] content = [] line2 = string.lstrip( line2[i+1 :] ) l = len( line2 ) line = " " * margin + line2 content.append( line ) if marker and content: self.add( marker, content ) self.source = [] if self.items: self.source = source_line_list # now retrieve block name when possible # if self.items: first = self.items[0] self.name = first[1].get_identifier()
def _write(self, f, level=0): """Internal method used to write the XML representation of each node.""" if self._name == "": return indnt = indent[level] # handle comments if self._name == "_comment_": f.write("\n" + indnt + "<!--") if len(self._value) > 0: if self._value[0] <> " ": self._value = " " + self._value if self._value[-1] <> " ": self._value = self._value + " " f.write(self._value + "-->") return # write the opening tag and attributes f.write(indnt + "<" + self._name) for a in self._attribs.keys(): f.write(" " + a + '="' + self._attribs[a] + '"') if self._value == "" and self.nChildren() == 0: f.write("/>") else: f.write(">") if self._value <> "": vv = string.lstrip(self._value) ieol = vv.find("\n") if ieol >= 0: while 1 > 0: ieol = vv.find("\n") if ieol >= 0: f.write("\n " + indnt + vv[:ieol]) vv = string.lstrip(vv[ieol + 1 :]) else: f.write("\n " + indnt + vv) break else: f.write(self._value) for c in self._children: f.write("\n") c._write(f, level + 2) if self.nChildren() > 0: f.write("\n" + indnt) f.write("</" + self._name + ">")
def fetch_data(self, plugindict): # # # XXX rename plugindict to something else apirooturl = 0 apikey = 0 apiuri = "/projects/" permalink = 0 print "Fetching data" if plugindict is not None: print plugindict apirooturl = plugindict.get('datasource.freshmeat.api.root.uri') apikey = plugindict.get('datasource.freshmeat.apikey') permalink = plugindict.get('datasource.freshmeat.permalink') # # field1 - votescore # # Use Freshmeat.net Data API - Project, fetch as JSON # print "calling Freshmeat.net api: " + apirooturl + apiuri + permalink + '.json' + '?auth_code=' + apikey apiReq = urllib2.urlopen(apirooturl + apiuri + permalink + '.json' + '?auth_code=' + apikey) # # XXX This sucks. I need a real JSON parser # apiResp = [string.strip(elem) for elem in string.rsplit(apiReq.read(), ',')] print apiResp # XXX Debug output self.dataDict["votescore"] = [string.lstrip(string.split(elem, ':')[1]) for elem in apiResp if elem.find("vote_score") > -1][0] # # field2 - popularity # self.dataDict["popularity"] = [string.lstrip(string.split(elem, ':')[1]) for elem in apiResp if elem.find("popularity") > -1][0] # # field3 - vitality # self.dataDict["vitality"] = [string.lstrip(string.split(elem, ':')[1]) for elem in apiResp if elem.find("vitality") > -1][0] # # field4 - subscribers # self.dataDict["subscribers"] = [string.lstrip(string.split(elem, ':')[1]) for elem in apiResp if elem.find("subscriptions_count") > -1][0] print self.dataDict return self.dataDict
def instantiate(self, d): sn = string.lstrip(string.lstrip(self.subject.name, "?"), "$") pn = string.lstrip(string.lstrip(self.predicate.name, "?"), "$") on = string.lstrip(string.lstrip(self.theobject.name, "?"), "$") if (not self.subject.constant) and (sn in d): s = Argument(d[sn], True) else: s = self.subject if (not self.predicate.constant) and (pn in d): p = Argument(d[pn], True) else: p = self.predicate if (not self.theobject.constant) and (on in d): o = Argument(d[on], True) else: o = self.theobject return Triple(s, p, o)
def parse(fp): stack = [()] line = fp.readline() while line: while line: line = string.lstrip(line) if not line: break elif line[0] == '(': stack.append(()) line = line[1:] elif line[0] == ')': closed = stack[-1] del stack[-1] stack[-1] = stack[-1] + (closed, ) line = line[1:] elif line[0] == '"': pos = string.index(line[1:], '"') stack[-1] = stack[-1] + (eval(line[:pos + 2]), ) line = line[pos + 2:] elif line[0] in string.digits: str = "" while line and line[0] in "0123456789+-.": str = str + line[0] line = line[1:] stack[-1] = stack[-1] + (string.atof(str), ) elif line[0] == ';': break else: str = "" while line and line[0] not in " \t();\n": str = str + line[0] line = line[1:] stack[-1] = stack[-1] + (str, ) line = fp.readline() if len(stack) != 1: raise IOError, "parentheses don't match" return stack[0]
def readLine(self, line): #this is the inner loop self._lineNo = self._lineNo + 1 stripped = string.lstrip(line) if len(stripped) == 0: if self._mode == PLAIN: self.endPara() else: #preformatted, append it self._buf.append(line) elif line[0] == '.': # we have a command of some kind self.endPara() words = string.split(stripped[1:]) cmd, args = words[0], words[1:] #is it a parser method? if hasattr(self.__class__, cmd): #this was very bad; any type error in the method was hidden #we have to hack the traceback try: getattr(self, cmd)(*args) except TypeError(err): sys.stderr.write("Parser method: %s(*%s) %s at line %d\n" % (cmd, args, err, self._lineNo)) raise else: # assume it is a paragraph style - # becomes the formatter's problem self.endPara() #end the last one words = string.split(stripped, ' ', 1) assert len(words) == 2, "Style %s but no data at line %d" % ( words[0], self._lineNo) (styletag, data) = words self._style = styletag[1:] self._buf.append(data) else: #we have data, add to para self._buf.append(line)
def split_quoted(s): if _wordchars_re is None: _init_regex() s = string.strip(s) words = [] pos = 0 while s: m = _wordchars_re.match(s, pos) end = m.end() if end == len(s): words.append(s[:end]) break if s[end] in string.whitespace: words.append(s[:end]) s = string.lstrip(s[end:]) pos = 0 elif s[end] == '\\': s = s[:end] + s[end + 1:] pos = end + 1 elif s[end] == "'": m = _squote_re.match(s, end) elif s[end] == '"': m = _dquote_re.match(s, end) else: raise RuntimeError, "this can't happen (bad char '%c')" % s[end] if m is None: raise ValueError, 'bad string (mismatched %s quotes?)' % s[end] (beg, end) = m.span() s = s[:beg] + s[beg + 1:end - 1] + s[end:] pos = m.end() - 2 if pos >= len(s): words.append(s) break continue return words
def dedent(text): """dedent(text : string) -> string Remove any whitespace than can be uniformly removed from the left of every line in `text`. This can be used e.g. to make triple-quoted strings line up with the left edge of screen/whatever, while still presenting it in the source code in indented form. For example: def test(): # end first line with \ to avoid the empty line! s = '''\ hello world ''' print repr(s) # prints ' hello\n world\n ' print repr(dedent(s)) # prints 'hello\n world\n' """ lines = string.split(string.expandtabs(text), '\n') margin = None for line in lines: content = string.lstrip(line) if not content: continue indent = len(line) - len(content) if margin is None: margin = indent else: margin = min(margin, indent) if margin is not None and margin > 0: for i in range(len(lines)): lines[i] = lines[i][margin:] return string.join(lines, "\n")
def _partition_date(date): """ Partition a date string into three sub-strings year, month, day The following styles are understood: (1) 22-AUG-1993 or 22-Aug-03 (2000+yy if yy<50) (2) 20010131 (3) mm/dd/yy or mm/dd/yyyy (4) 10Aug2004 or 10Aug04 (5) yyyy-mm-dd """ date = string.lstrip(string.rstrip(date)) for reg, idx in date_re_list: mo = reg.match(date) if mo != None: return (mo.group(idx[0]), mo.group(idx[1]), mo.group(idx[2])) raise Exception("couldn't partition date: %s" % date)
def runFile(self, fd, isRethrow=0): """ Read and execute commands from open file object. If script fails, then sends the exception info to debug (with the current verbosity level) and returns non-zero. Otherwise returns 0. """ line_no = 0 while 1: line = fd.readline() line_no += 1 if not line: return 0 line = string.strip(line) if len(line) and string.lstrip(line)[0] == "#": continue # Ignore comments res = self.onecmd(line, 1, isRethrow) self.postcmd(1, line) if res: print >> sys.stderr, "Stop processing at line: ", line_no return res return 0
def generate_target_file(src, target, release): '''generate file from the template''' with open(src, "r") as reader: cont = reader.read() src_name = os.path.basename(src) dir_name = os.path.dirname(src) if src_name == "Dockerfile": with open(os.path.join(target, "spoke-builder", "Dockerfile"), "w") as out: out.write(cont.replace('$release', release)) with open(os.path.join(os.getcwd(), "spoke-builder", "Dockerfile"), "w") as out: out.write(cont.replace('$release', 'master')) shutil.copy(os.path.join(dir_name, "app.sh"), os.path.join(target, "spoke-builder")) shutil.copy(os.path.join(dir_name, "app.sh"), os.path.join(os.getcwd(), "spoke-builder")) else: with open(os.path.join(target, "frontpage.yml"), "w") as out: out.write(cont.replace('$release', string.lstrip(release, 'v'))) with open(os.path.join(os.getcwd(), "frontpage.yml"), "w") as out: out.write(cont.replace('$release', 'latest'))
def _convert_time_zone_offset_str_to_int(text): offset_sec = 0 text = string.lstrip(text) try: i = string.find(text,':') if i > -1: # Eg "-5:30" offset_sec = int(text[:i]) * 3600 offset_sec_from_minutes = int(text[i+1:]) * 60 if offset_sec < 0: offset_sec_from_minutes = -offset_sec_from_minutes offset_sec += offset_sec_from_minutes else: # Eg "-0530" west = 0 i = 0 if text[0] == '-': west = 1 i = 1 offset_sec = int(text[i:i+2]) * 3600 + int(text[i+2:i+4]) * 60 if west: offset_sec = -offset_sec except: msglog.exception() return 0 return offset_sec
def precmd(self, line): """Handle alias expansion and ';;' separator.""" if not line: return line args = string.split(line) while self.aliases.has_key(args[0]): line = self.aliases[args[0]] ii = 1 for tmpArg in args[1:]: line = string.replace(line, "%" + str(ii), tmpArg) ii = ii + 1 line = string.replace(line, "%*", string.join(args[1:], ' ')) args = string.split(line) # split into ';;' separated commands # unless it's an alias command if args[0] != 'alias': marker = string.find(line, ';;') if marker >= 0: # queue up everything after marker next = string.lstrip(line[marker + 2:]) self.cmdqueue.append(next) line = string.rstrip(line[:marker]) return line
def GetBiRuns(runtype=False): if len(sys.argv) < 2: # print "No run list.\nUsage: " + "read_bi_pulser.py" + " <Run Log File>" # print "Using default runlist ../Aux/UCNA_LED_run_log.txt" # how does using this run list make any sense? # runstring = "../../Aux/UCNA_LED_run_log.txt" # runstring = "../../Aux/UCNA_RUN_LOG_2012_renamed.txt" runstring = "../../Aux/UCNA Run Log 2012.txt" else: runstring = sys.argv[1] runlist = list() runfile = open(runstring, "r") for line in runfile: if line[0] == "*": splitted = string.lstrip(line, "*") if runtype == False: runlist.append(splitted[0:5]) if runtype == True: runlist.append(splitted[6:9].rstrip()) return runlist
def get_charmm_rtp_lines(filename, molname): foundmol = 0 store = 0 rtplines = [] f = open(filename, 'r') section = "NONE" for line in f.readlines(): if (store == 1) and line.startswith("RESI"): store = 0 if line.startswith("RESI"): entry = re.split('\s+', string.lstrip(line)) rtfmolname = entry[1] if (rtfmolname == molname): store = 1 if line.startswith("END"): store = 0 if (store == 1): rtplines.append(line) return rtplines
def load_pictures(num): array_X = [] array_Y = [] count = 0 for filename in os.listdir("imagery"): file_id = string.lstrip(filename, "image_city_") if (re.match('\d+\.tif\Z', file_id) == None): continue img_file = "imagery/image_city_" + file_id bldg_file = "buildings/bldg_city_" + file_id try: I = plt.imread(img_file) J = plt.imread(bldg_file) except IOError: continue I = np.resize(I, (127, 127, 4)) J = np.resize(J, (127, 127)) array_X.append(I) array_Y.append(J) count += 1 if count == num: break return array_X, array_Y
def getdoc(object): try: doc = object.__doc__ except AttributeError: return None if not isinstance(doc, types.StringTypes): return None try: lines = string.split(string.expandtabs(doc), '\n') except UnicodeError: return None margin = sys.maxint for line in lines[1:]: content = len(string.lstrip(line)) if content: indent = len(line) - content margin = min(margin, indent) continue if lines: lines[0] = lines[0].lstrip() if margin < sys.maxint: for i in range(1, len(lines)): lines[i] = lines[i][margin:] while lines and not lines[-1]: lines.pop() while lines and not lines[0]: lines.pop(0) return string.join(lines, '\n')
def add_word(word, glove_dict, words_set, not_found_set, word_origin): possessive_suffix = "'s" def remove_suffix(w): # remove question mark in the end if len(w) > 0 and w[-1] == '?': w = w[:-1] # remove possible possessive form if w.endswith(possessive_suffix): w = w[:-len(possessive_suffix)] return w def add_list(original_word, lst): found = False if original_word not in lst: lst.append(original_word) for w in lst: if w in glove_dict: words_set.add(w) found = True if not found: if raw_word not in not_found_set and verbose: print('{}:{} is not in glove model'.format( word_origin, raw_word)) not_found_set.add(raw_word) word = string.lower(word.strip()) word_lst = [word] # word_no_suffix word_lst.append(remove_suffix(word)) # strip punctuation word_stripped = string.lstrip(string.rstrip(word, punctuation), punctuation) word_lst.append(word_stripped) word_lst.append(remove_suffix(word_stripped)) add_list(word, word_lst)
def list_stations(): body = "" dir_title = "Station list" btn_sel = "<img class=\"navbutton\" src=\"/images/btn-play.svg\"/>" btn_back = "<a href=\"javascript:rp_back()\"><img class=\"navbutton\" src=\"/images/btn-back.svg\"/></a>" body += """ <div class="directorylisting"> <div class="directoryentry"> <div class="directorybutton">""" + btn_back + """</div> <div class="directorytext">""" + dir_title + """</div> </div> <div class="directoryentry"><hr/></div> """ sl = open(radiopi_cfg.stationlist, "r") for line in sl: if line[0] != "#": line = string.rstrip( string.lstrip(line) ) # Remove leading and trailing whitespace (incl. newline) if string.find(line, "|") > 0: name, url = string.split(line, "|") station_link = "<a href=\"javascript:rp_station('" + url + "')\">" station_link += btn_sel + "</a>" body += """ <div class="directoryentry"> <div class="directorybutton">""" + station_link + """</div> <div class="directorytext">""" + name + """</div> </div>""" sl.close() body += """ </div> """ print_page("WebRadioPi", body, "webradiopi")
class TextBase(BaseField): ''' Virtual class common to Text and Entry ''' def setup(self, entry): BaseField.setup(self, entry) if self.string and self.string[0] == '@': self.string = ' ' + self.string return def update_content(self, entry, text): if text[0] == '@' and hasattr(entry, 'set_native'): try: entry.set_native(self.field, string.lstrip(text[1:])) except Exceptions.ParserError, msg: Utils.error_dialog(_("Error in native string parsing"), str(msg)) return -1 return 1 text = string.lstrip(text) entry[self.field] = Fields.Text(text) return 1
def getdoc(object): """Get the documentation string for an object. All tabs are expanded to spaces. To clean up docstrings that are indented to line up with blocks of code, any whitespace than can be uniformly removed from the second line onwards is removed.""" try: doc = object.__doc__ except AttributeError: return None if not isinstance(doc, types.StringTypes): return None try: lines = string.split(string.expandtabs(doc), '\n') except UnicodeError: return None else: # Find minimum indentation of any non-blank lines after first line. margin = sys.maxint for line in lines[1:]: content = len(string.lstrip(line)) if content: indent = len(line) - content margin = min(margin, indent) # Remove indentation. if lines: lines[0] = lines[0].lstrip() if margin < sys.maxint: for i in range(1, len(lines)): lines[i] = lines[i][margin:] # Remove any trailing or leading blank lines. while lines and not lines[-1]: lines.pop() while lines and not lines[0]: lines.pop(0) return string.join(lines, '\n')
def GenTrajTranString(self): trajtranlist = [] if (self.treetype == FW): vertex = self.verticeslist[-1] parent = vertex.parent while (vertex.parent != None): trajtranlist.append(vertex.trajtran) vertex = parent if (vertex.parent != None): parent = vertex.parent trajtranlist = trajtranlist[::-1] else: vertex = self.verticeslist[-1] while (vertex.parent != None): trajtranlist.append(vertex.trajtran) if (vertex.parent != None): vertex = vertex.parent trajectorytranstring = '' for i in range(len(trajtranlist)): trajectorytranstring += "\n" trajectorytranstring += trajtranlist[i] trajectorytranstring = string.lstrip( trajectorytranstring) # remove leading "\n" return trajectorytranstring
def parse(self,args): a=iter(args) self.ename=args[0] a.next() while a: try: v=a.next() if( v.startswith('-') ): key=lstrip(v,'-') if( key == 'help' ): self.help(self.name) val = a.next() if( self.map.has_key(key) ): o=self.map[key] o.parse(val) else: try: f = open(v,'r') self.parseFile(f) except IOError: f = None except StopIteration: break
def readAscii(fpath,iskp): #iskp the total line skipped of the file # fpath the full path of the file # usage: onedim=readAscii(fpaht,iskp) onedim=[] timestr=[] linesplit=[] f=open(fpath) ff=f.readlines()[iskp:] ## first line in obs file is legend for line in ff: line=string.lstrip(line) linesplit.append(line[:-1].split(' ')) for lnstrs in linesplit: for strs in lnstrs: if strs!='': try: onedim.append(string.atof(strs)) except: timestr.append(strs) #if string.atof(strs)>0 : # print strs del linesplit f.close() return onedim
def FillList(self): index = 0 size = self.GetWindowRect() width = size[2] - size[0] - (10) - win32api.GetSystemMetrics( win32con.SM_CXVSCROLL) numCols = len(self.colHeadings) if self.colWidths: colw = self.colWidths else: colw = [0.20] for i in range(1, numCols): colw.append((1 - colw[0]) / (numCols - 1)) for col in self.colHeadings: itemDetails = (commctrl.LVCFMT_LEFT, width * colw[index], col, 0) self.itemsControl.InsertColumn(index, itemDetails) index = index + 1 index = 0 for items in self.items: #preceding spaces are stripped to facilitate list completion itemText = string.lstrip(str(items[0])) index = self.itemsControl.InsertItem(index + 1, itemText, 0) for itemno in range(1, numCols): item = items[itemno] self.itemsControl.SetItemText(index, itemno, str(item))
def parse_playlist(self, xmlstr): #logging.info('Parsing playlist XML... %s', xml) #xml.replace('<summary/>', '<summary></summary>') #xml = fix_selfclosing(xml) #soup = BeautifulStoneSoup(xml, selfClosingTags=self_closing_tags) tree = ET.XML(xmlstr) xml_strip_namespace(tree) self.meta = {} self._items = [] self._related = [] logging.info('Found programme: %s', tree.find('title').text) self.meta['title'] = tree.find('title').text self.meta['summary'] = tree.find('summary').text # Live radio feeds have no text node in the summary node if self.meta['summary']: self.meta['summary'] = string.lstrip(self.meta['summary'], ' ') self.meta['updated'] = tree.find('updated').text if tree.find('noitems'): logging.info('No playlist items: %s', tree.find('noitems').get('reason')) self.meta['reason'] = tree.find('noitems').get('reason') self._items = [item(self, i) for i in tree.findall('item')] rId = re.compile('concept_pid:([a-z0-9]{8})') for link in tree.findall('relatedlink'): i = {} i['title'] = link.find('title').text #i['summary'] = item.summary # FIXME looks like a bug in BSS i['pid'] = (rId.findall(link.find('id').text) or [None])[0] i['programme'] = programme(i['pid']) self._related.append(i)
def draw_circle(self, center, rng): retstr = "" steps = 30 #so we're going to do this by computing a bearing angle based on the steps, and then compute the coordinate of a line extended from the center point to that range. [center_lat, center_lon] = center esquared = (1 / 298.257223563) * (2 - (1 / 298.257223563)) earth_radius_mi = 3963.19059 #here we figure out the circumference of the latitude ring #which tells us how wide one line of longitude is at our latitude lat_circ = earth_radius_mi * math.cos(center_lat) #the circumference of the longitude ring will be equal to the circumference of the earth lat_rad = math.radians(center_lat) lon_rad = math.radians(center_lon) tmp0 = rng / earth_radius_mi for i in range(0, steps + 1): bearing = i * (2 * math.pi / steps) #in radians lat_out = math.degrees( math.asin( math.sin(lat_rad) * math.cos(tmp0) + math.cos(lat_rad) * math.sin(tmp0) * math.cos(bearing))) lon_out = center_lon + math.degrees( math.atan2( math.sin(bearing) * math.sin(tmp0) * math.cos(lat_rad), math.cos(tmp0) - math.sin(lat_rad) * math.sin(math.radians(lat_out)))) retstr += " %.8f,%.8f, 0" % ( lon_out, lat_out, ) retstr = string.lstrip(retstr) return retstr
def robot_description(self): """ there is always: - start date - start time "Monday, 1 January, 7pm" sometimes: - end date, if different from start date "Monday, 1 January to Wednesday, 3 January, 7pm" - rule As above, but starting with "Weekly from..." - repeat until As above but ending with "...until 5 January" """ result = "%s %s" % (datetime.datetime.strftime(self.first_start_date, "%A"), string.lstrip( datetime.datetime.strftime( self.first_start_date, "%d %B %Y"), '0')) if self.first_end_date and (self.first_start_date != self.first_end_date): result += " to %s" % datetime.datetime.strftime( self.first_end_date, "%A %d %B %Y") result += ", %s" % datetime.time.strftime( self.first_start_time, "%I:%M%p").lstrip('0').replace(':00', '') if self.rule: result = "%s from %s" % (self.rule, result) if self.repeat_until: result += " until %s" % datetime.datetime.strftime( self.repeat_until, "%A %d %B %Y") return result
def process(self, line): # Attempt to match line against our command regular expression temp_line = string.lstrip(line) len_1 = len(line) len_2 = len(temp_line) white_spaces = len_1 - len_2 if white_spaces: front_padding = line[0:white_spaces] t_line = preParseArgs(temp_line, self.commands, self.o_ss_str_subopts, self.o_ss_num_subopts) if t_line is not temp_line: line = t_line # Prepend leading white space indentation if white_spaces: line = front_padding + t_line # Return the line to be processed by Python return line
def winGetCPUinfo(): """Retrieves Machine information from the registry""" cpuInfo = [] try: hHardwareReg = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, "HARDWARE") hDescriptionReg = _winreg.OpenKey(hHardwareReg, "DESCRIPTION") hSystemReg = _winreg.OpenKey(hDescriptionReg, "SYSTEM") hCentralProcessorReg = _winreg.OpenKey(hSystemReg, "CentralProcessor") nbProcessors = _winreg.QueryInfoKey(hCentralProcessorReg)[0] for idxKey in range(nbProcessors): hProcessorIDReg = _winreg.OpenKey(hCentralProcessorReg, str(idxKey)) processorDescription = _winreg.QueryValueEx( hProcessorIDReg, "ProcessorNameString")[0] mhz = _winreg.QueryValueEx(hProcessorIDReg, "~MHz")[0] cpuInfo.append( str(idxKey) + '.' + string.lstrip(processorDescription) + '.' + str(mhz)) return cpuInfo except WindowsError: print "Cannot retrieve processor information from registry!"
def reindent(s, numSpaces): s = s.split('\n') s = [(numSpaces * ' ') + string.lstrip(line) for line in s] s = "\n".join(s) return s
% host) # Open the file for processing try: infile = open(host, 'r') except: sys.stderr.write('host failed to open\n') sys.exit(2) # Create a dictionary of dictionaries to format the installed time, name # of the software, and the version number from the raw output in host. # apps = {} version = "" verPattern = re.compile(" \(?[0-9\.-]{2,}\)?") line = string.lstrip(infile.readline()) while (line): if (re.match(".*:$", line)): # we have a software name line # is the version number in the name? if (re.search(verPattern, line)): version = re.search(verPattern, line).group(0) swName = re.sub(verPattern, '', line) else: swName = line if (re.match("Version", line)): # we have a version line version = line.split(": ")[1] version = version[:-1]
def indentsize(line): """Return the indent size, in spaces, at the start of a line of text.""" expline = string.expandtabs(line) return len(expline) - len(string.lstrip(expline))