def index2es(): es = Elasticsearch(hosts=[{'host': 'localhost', 'port': '9200'}]) indexName = 'sougou_news' typeName = 'sougou_news' docTypeDict = loadDocTypeDict() with SougouXmlFile(newsSohusiteXmlDat, mode='r', encoding='gb18030') as f: blockNo = 1 while True: block = f.readDocBlock() if block is None: break id = block['docno'] if len(id) == 0: continue data = dict(doc_id=block['docno'], doc_url=block['url'], doc_title=StringUtils.strQ2B(block['contenttitle']), doc_content=StringUtils.strQ2B(block['content']), doc_type=getDocTypeByUrl(block['url'], docTypeDict)) es.index(index=indexName, doc_type=typeName, id=id, body=data) print('No-%d: type=%s, title=%s' % (blockNo, data['doc_type'], data['doc_title'])) blockNo = blockNo + 1
def __init__(self, callbackFunc, hostname, messagetosend, endchar, timeout, attempts=1): '''Constructor''' Thread.__init__(self) self._name = "ClientCommThread" self._portname = None if callbackFunc != None: self._callback = Communicate() self._callback.myGUI_signal.connect(callbackFunc) if hostname != None: self._hostname = hostname if timeout != None: self._timeout = timeout if not StringUtils.isNoneOrEmpty(messagetosend): self._messagetosend = messagetosend if not StringUtils.isNoneOrEmpty(endchar): self._end_char = endchar if attempts != None: self._attemps = attempts self._stopevent = Event() self._stopevent.clear() self._wasStopped = Event() self._wasStopped.clear()
def split_plus_test(): separator_array_ = ['ab', ', ', ' ', 'e', '中文', ':', '|'] str_ = 'abc,de fgh, ijk,lmn:op|qr中文st' separator_array_ = TypeUtils.convert_to_type(separator_array_, unicode) str_ = TypeUtils.convert_to_type(str_, unicode) print StringUtils.split_plus(str_, separator_array_)
def LCS(file1, file2): '''returns longest common substring between the files''' with open(file1) as f1: s = f1.read() with open(file2) as f2: t = f2.read() sentinel = chr(24) st = s + sentinel + t # chr(24) is ascii CAN (Cancel) sa = SuffixArray(st) maxlen = 0 indices = set() # indexes into original string counts = {} for i in xrange(2, len(st)): str1 = sa.select(i-1) str2 = sa.select(i) b1 = sentinel in set(str1) b2 = sentinel in set(str2) if (b1 or b2) and (not (b1 and b2)): if b1: # get index of sentinel in string1 j = str1.find(sentinel) length = StringUtils.lcp(str1[:j], str2) index1 = sa.index(i - 1) index2 = sa.index(i) - len(s) - 1 else: j = str2.find(sentinel) length = StringUtils.lcp(str1, str2[:j]) index1 = sa.index(i - 1) - len(s) - 1 index2 = sa.index(i) # process length if length > maxlen: # found first lrs maxlen = length indices.clear() counts.clear() lrs = sa.select(i)[:maxlen] counts[lrs] = 2 indices.add(index1) indices.add(index2) elif (length == maxlen): # found another lrs with same length as current lrs lrs = sa.select(i)[:maxlen] if lrs in counts: # another repeat of current lrs counts[lrs] = counts[lrs] + 1 indices.add(index1) indices.add(index2) else: # add lrs to dict counts[lrs] = 2 indices.add(index1) indices.add(index2) print counts print indices
def runTraceChecklist(self, traceName, fileName = None): """ Run a GLES trace through a checklist of common performance issues. @param traceName: Name of trace to check @param fileName: File to which the checklist is saved """ if not traceName in self.analyzer.traces: self.analyzer.fail("Trace not found: %s" % traceName) trace = self.analyzer.traces[traceName] if fileName: output = open(fileName, "w") print >>output, "GLES Checklist Report" print >>output, "=====================" print >>output, "" else: output = None checklistItems = Checklist.compileChecklist(self.analyzer.project, trace, GlesChecklist.checklistItems) for item in checklistItems: if item.verdict: self.analyzer.reportInfo(str(item)) else: self.analyzer.reportError(str(item)) if output: title = "%s [%s]" % (item.name, item.verdict and "PASS" or "FAIL") print >>output, title print >>output, "-" * len(title) print >>output, "" print >>output, StringUtils.wordWrap(item.description) print >>output, "" # Print unique comments only comments = sorted(item.comments, key = lambda c: c[1]) lastComment = None repeatedEvents = [] for event, comment in comments + [(None, None)]: if comment != lastComment: if lastComment and repeatedEvents: if len(repeatedEvents) < 100: print >>output, StringUtils.wordWrap("* Last comment repeated for %s" % ", ".join(["%s (%d)" % (e.name, e.seq) for e in repeatedEvents]), indent = " " * 4, extraIndent = " ") else: print >>output, StringUtils.wordWrap("* Last comment repeated for %d events." % len(repeatedEvents), indent = " " * 4, extraIndent = " ") if comment: if event: print >>output, StringUtils.wordWrap("* %s (%d): %s" % (event.name, event.seq, comment), indent = " " * 4, extraIndent = " ") else: print >>output, StringUtils.wordWrap("* %s" % (comment), indent = " " * 4, extraIndent = " ") lastComment = comment repeatedEvents = [] elif event: repeatedEvents.append(event) print >>output, ""
def __init__(self, callbackFunc, serialport, baudrate, messagetosend, endchar, timeout, attempts=2): '''Constructor''' Thread.__init__(self) self._name = "SerialCommThread" self._portname = None if callbackFunc != None: self._callback = Communicate() self._callback.myGUI_signal.connect(callbackFunc) if baudrate != None: self._baudrate = baudrate if timeout != None: self._timeout = timeout if serialport != None and baudrate != None: self._serialport = serialport self._serialport.baudrate = self._baudrate self._serialport.rtscts = False self._serialport.dsrdtr = False self._serialport.timeout = 10 self._serialport.writeTimeout = 1 self._serialport.stopbits = serial.STOPBITS_ONE self._serialport.parity = serial.PARITY_NONE self._serialport.bytesize = serial.EIGHTBITS self._serialport.xonxoff = True self._serialport.setDTR(True) self._serialport.setRTS(False) self._portname = self._serialport.name if timeout > 0: self._serialport.timeout = 0.25 if not StringUtils.isNoneOrEmpty(messagetosend): self._messagetosend = messagetosend if not StringUtils.isNoneOrEmpty(endchar): self._end_char = endchar if attempts != None: self._attemps = attempts self._stopevent = Event() self._stopevent.clear() self._wasStopped = Event() self._wasStopped.clear()
def getEventDescription(self, event): # Create a textual representation an event args = [] func = self.library.functions[event.name] for name, value in event.values.items(): if not name: continue value = StringUtils.ellipsis(StringUtils.decorateValue(self.library, func, name, value)) args.append((name, value)) return "%s(%s)" % (event.name, ", ".join(["%s = %s" % (k, v) for k, v in args]))
def process_message(self, message): if message[0] == 'PING': # todo move and return only meaningful data self.irc.pong('PONGERONI BACK') print('PONGERONI') if message[1] == 'PRIVMSG': sender = StringUtils.get_sender(message) message_text = StringUtils.get_message(message) print(sender + ":" + message_text) self.do_command(message_text, sender)
def getEventDescription(self, event): # Create a textual representation an event args = [] func = self.library.functions[event.name] for name, value in event.values.items(): if not name: continue value = StringUtils.ellipsis( StringUtils.decorateValue(self.library, func, name, value)) args.append((name, value)) return "%s(%s)" % (event.name, ", ".join( ["%s = %s" % (k, v) for k, v in args]))
def showTraceInfo(self, traceName=None): """ Show information about a trace. @param traceName: Trace to examine or all opened traces by default. """ if traceName is not None: if not traceName in self.analyzer.traces: self.analyzer.fail("Trace not found: %s" % traceName) traces = {traceName: self.analyzer.traces[traceName]} else: traces = self.analyzer.traces arrayElementSizes = { Trace.ByteArrayValue: 1, Trace.ShortArrayValue: 2, Trace.IntegerArrayValue: 4, Trace.LongArrayValue: 8, Trace.FloatArrayValue: 4, Trace.DoubleArrayValue: 8, Trace.ObjectArrayValue: 4, } for name, trace in traces.items(): self.reportInfo("Trace %s:" % name) frames = 0 renderCalls = 0 arrayBytes = 0 for event in trace.events: for valueName, value in event.values.items(): if isinstance(value, Trace.Array): arrayBytes += len(value) * arrayElementSizes[ value.__class__] try: if self.analyzer.lookupFunction(event).isFrameMarker: frames += 1 if self.analyzer.lookupFunction(event).isRenderCall: renderCalls += 1 except AttributeError: pass self.reportInfo(" Events: %d" % len(trace.events)) self.reportInfo(" Frames: %d" % frames) self.reportInfo(" Render calls: %d" % renderCalls) self.reportInfo( " Array data: %s" % StringUtils.normalizeSiValue(arrayBytes, "B", 1024.0)) if len(trace.events): self.reportInfo( " Duration: %s" % StringUtils.normalizeTimeValue( (trace.events[-1].time - trace.events[0].time) / 1e6)) if name in self.analyzer.traceFiles: self.reportInfo(" Filename: %s" % self.analyzer.traceFiles[name])
def main () : parser = argparse.ArgumentParser( description="*** Compute the free energy along a string. ***") parser.add_argument('-s', '--string-folder', help='The folder of the string to be computed.') parser.add_argument('-o', '--output', default = "energy.out", help='The output energy in string folder.') args = parser.parse_args() string_file = args.string_folder + "/string.out" string_force = args.string_folder + "/force.out" if not os.path.exists (string_file) : raise RuntimeError ("cannot find string file " + string_file) if not os.path.exists (string_force) : raise RuntimeError ("cannot find string force file " + string_force + ". maybe the simulation is not finished") string = np.loadtxt (string_file) force = np.loadtxt (string_force) numb_node = string.shape[0] dim = string.shape[1] # compute arc (alpha) alpha_seg = StringUtils.arc_seg (string) alpha = StringUtils.arc_norm (string) # integrate the energy energy = np.zeros (alpha.shape) tagent = compute_string_tegent (alpha, string) for ii in range (1, numb_node) : v0 = np.dot (tagent[ii-1], force[ii-1]) v1 = np.dot (tagent[ii] , force[ii] ) vn = force[ii] - v1 * tagent[ii] # print (str(v1) + # " t: " + str(v1 * tagent[ii]) + # " |t|: " + str(np.sqrt(np.dot(v1 * tagent[ii], v1 * tagent[ii]))) + # " n: " + str(vn) + # " |n|: " + str(np.sqrt(np.dot(vn,vn))) + # " t.n: " + str(np.dot(v1 * tagent[ii], vn)) # ) de = 0.5 * (v0 + v1) * alpha_seg[ii] energy[ii] = energy[ii-1] - de new_shape = np.array ([alpha.shape[0], 2]) result = np.zeros (new_shape) result[:,0] = alpha result[:,1] = energy string_energy = args.string_folder + "/energy.out" np.savetxt (string_energy, result)
def accionar_con_directorio(self, ruta, accionArchivo, accionDirectorio): for dirName, subdirList, fileList in os.walk(ruta): #Parsea el nombre de la carpeta del directorio actual estilo = dirName[dirName.rindex("/") + 1:] for fname in fileList: arch = fname #Quita la extensión del archivo extension = StringUtils.convertir_a_estandard(fname[fname.rindex('.') + 1:]) fname = fname[-len(fname):fname.rindex('.') - len(fname)] #Verifica que sea un archivo de audio if not extension in self.EXTENSIONES_AUDIO: print "El archivo", arch, "No es un archivo de audio." continue #Parseo del nombre de la cancion formato tema - interprete #Quita el espacio adelante y atrás del separador tema = fname[:fname.rindex(self.separador) - 1] interprete = fname[fname.rindex(self.separador) + 2:] #Si el formato es interprete-tema (es inverso) hago swap if (self.es_inverso): aux = tema tema = interprete interprete = aux accionArchivo(tema, interprete, estilo, dirName, arch) #Subdirectorios for subdirName in subdirList: accionDirectorio(subdirName)
def showTraceInfo(self, traceName = None): """ Show information about a trace. @param traceName: Trace to examine or all opened traces by default. """ if traceName is not None: if not traceName in self.analyzer.traces: self.analyzer.fail("Trace not found: %s" % traceName) traces = {traceName: self.analyzer.traces[traceName]} else: traces = self.analyzer.traces arrayElementSizes = { Trace.ByteArrayValue: 1, Trace.ShortArrayValue: 2, Trace.IntegerArrayValue: 4, Trace.LongArrayValue: 8, Trace.FloatArrayValue: 4, Trace.DoubleArrayValue: 8, Trace.ObjectArrayValue: 4, } for name, trace in traces.items(): self.reportInfo("Trace %s:" % name) frames = 0 renderCalls = 0 arrayBytes = 0 for event in trace.events: for valueName, value in event.values.items(): if isinstance(value, Trace.Array): arrayBytes += len(value) * arrayElementSizes[value.__class__] try: if self.analyzer.lookupFunction(event).isFrameMarker: frames += 1 if self.analyzer.lookupFunction(event).isRenderCall: renderCalls += 1 except AttributeError: pass self.reportInfo(" Events: %d" % len(trace.events)) self.reportInfo(" Frames: %d" % frames) self.reportInfo(" Render calls: %d" % renderCalls) self.reportInfo(" Array data: %s" % StringUtils.normalizeSiValue(arrayBytes, "B", 1024.0)) if len(trace.events): self.reportInfo(" Duration: %s" % StringUtils.normalizeTimeValue((trace.events[-1].time - trace.events[0].time) / 1e6)) if name in self.analyzer.traceFiles: self.reportInfo(" Filename: %s" % self.analyzer.traceFiles[name])
def help(self, command=None): """ Show available commands or help on a specific command. """ if command: command = self.analyzer.completeCommand(command) if command in self.analyzer.commands: args, _, _, defaults = inspect.getargspec( self.analyzer.commands[command]) if defaults is None: defaults = [] helpString = "Usage: %s " % command for i, arg in enumerate(args): if arg == "self": continue if i >= len(args) - len(defaults): helpString += "[%s = %s] " % ( arg, defaults[i - (len(args) - len(defaults))]) else: helpString += "<%s> " % arg self.reportInfo(helpString) if self.analyzer.commands[command].__doc__: for line in self.analyzer.commands[command].__doc__.split( "\n"): line = line.strip() if line.startswith("@param"): _, arg, text = line.split(" ", 2) line = "%-16s %s" % (arg.strip(), text.strip()) self.reportInfo(line) if self.analyzer.commandHelp.get(command): self.reportInfo(self.analyzer.commandHelp[command]()) return self.reportInfo("Available commands:") for command, func in sorted(self.analyzer.commands.items()): doc = func.__doc__ or "" if doc and "." in doc: doc = doc.strip().split(".", 1)[0] + "." doc = re.sub(" +", " ", doc) doc = re.sub("\n", " ", doc) doc = StringUtils.wordWrap(doc, columns=55) doc = doc.replace("\n", "\n" + " " * 21) self.reportInfo("%-19s- %s" % (command, doc)) self.reportInfo(""" Tips: * You may use abbreviated names for commands, e.g. "h" for "help" or "s-s" for "show-state". * Multiple commands may be separated with a semicolon ';'. * Whenever an event range is required, you may pass a single event (e.g. 123), a range of events (10:20 or 10:+10), a single frame (#512) e.g. or a range of frames (#15:#20 or #15:+#5). * Named arguments can also be given for commands, e.g. list traceName=t0. """)
def start(self): try: recv_data = self.__conn.recv(PACKET_SIZE) self.__logger.debug('Recv:\n%s', recv_data) url_args = StringUtils.get_args(recv_data) self.sending(url_args) finally: self.stop()
def consume(self, message): # initializes the chrome driver with the requires window size and position driver = webdriver.Chrome(executable_path='chromedriver.exe') driver.set_window_size(1080, 2500) driver.set_window_position(100, 0) driver.get('https://www.infomoney.com.br/cotacoes/ibovespa/') tbbody_low_xpath = '//*[@id="low"]/tbody' tbbody_high_xpath = '//*[@id="high"]/tbody' data_collected = [] data_collected.extend( self._find_companies_data(driver, tbbody_high_xpath)) data_collected.extend( self._find_companies_data(driver, tbbody_low_xpath)) company_data = [] stock_title_xpath = '/html/body/div[4]/div/div[1]/div[1]/div/div[1]/h1' company_type_xpath = '//*[@id="header-quotes"]/div[2]/h3[2]/strong' initials_xpath = '//*[@id="header-quotes"]/div[2]/h3[1]/strong' sector_xpath = '//*[@id="header-quotes"]/div[2]/h3[3]/strong' description_xpath = '//*[@id="header-quotes"]/div[2]/div' for stock_data in data_collected: try: driver.get(stock_data['link']) initials = driver.find_element_by_xpath(initials_xpath).text name = re.compile('\\s*\\(').split( driver.find_element_by_xpath(stock_title_xpath).text)[0] company_type = driver.find_element_by_xpath( company_type_xpath).text sector = driver.find_element_by_xpath(sector_xpath).text description = su.remove_html( driver.find_element_by_xpath(description_xpath).text) company_data.append({ 'initials': initials, 'name': name, 'type': company_type, 'sector': sector, 'description': description, }) except Exception as e: print( f'An exception occoured when trying to fetch company data for {stock_data}. {e}' ) driver.quit() self.api_stub.infomoney_ibovespa_company_data( {'companyData': company_data})
def help(self, command = None): """ Show available commands or help on a specific command. """ if command: command = self.analyzer.completeCommand(command) if command in self.analyzer.commands: args, _, _, defaults = inspect.getargspec(self.analyzer.commands[command]) if defaults is None: defaults = [] helpString = "Usage: %s " % command for i, arg in enumerate(args): if arg == "self": continue if i >= len(args) - len(defaults): helpString += "[%s = %s] " % (arg, defaults[i - (len(args) - len(defaults))]) else: helpString += "<%s> " % arg self.reportInfo(helpString) if self.analyzer.commands[command].__doc__: for line in self.analyzer.commands[command].__doc__.split("\n"): line = line.strip() if line.startswith("@param"): _, arg, text = line.split(" ", 2) line = "%-16s %s" % (arg.strip(), text.strip()) self.reportInfo(line) if self.analyzer.commandHelp.get(command): self.reportInfo(self.analyzer.commandHelp[command]()) return self.reportInfo("Available commands:") for command, func in sorted(self.analyzer.commands.items()): doc = func.__doc__ or "" if doc and "." in doc: doc = doc.strip().split(".", 1)[0] + "." doc = re.sub(" +", " ", doc) doc = re.sub("\n", " ", doc) doc = StringUtils.wordWrap(doc, columns = 55) doc = doc.replace("\n", "\n" + " " * 21) self.reportInfo("%-19s- %s" % (command, doc)) self.reportInfo(""" Tips: * You may use abbreviated names for commands, e.g. "h" for "help" or "s-s" for "show-state". * Multiple commands may be separated with a semicolon ';'. * Whenever an event range is required, you may pass a single event (e.g. 123), a range of events (10:20 or 10:+10), a single frame (#512) e.g. or a range of frames (#15:#20 or #15:+#5). * Named arguments can also be given for commands, e.g. list traceName=t0. """)
def char_data(self, text): """标签内容""" # print('sax:char_data: %s' % text) if self.__node: for ignore in self.__ignore: text = Str.replacestr(text, ignore, '') if self.__valueHandler: text = self.__valueHandler(text) if text != '': self.__node.add_value(text)
def __init__(self, address): try: os.makedirs(self.file_path) except OSError: print("Creation of the directory %s failed" % self.file_path) else: print("Successfully created the directory %s" % self.file_path) if not StringUtils.isNoneOrEmpty(address): self.address_number = address self.file_name = self.file_path + self.address_number + '.xls' #'.txt' self.text_to_print = ""
def agregar_interprete(self, interprete): """Agrega un interprete en caso de no existir. Devuelve el id del interprete: si ya existia el id que poseia; de no existir, el id con el que fue agregado""" interprete = StringUtils.convertir_a_estandard(interprete) if (interprete in self.idInterpretesDict): return self.idInterpretesDict[interprete] self.idInterpretesDict[interprete] = self.idInterpretesCount self.idInterpretesCount += 1 return self.idInterpretesDict[interprete]
def processEvent(self, event, implicitStateValue = None): assert event.name in self.library.functions, "Function not found in library: %s" % event.name function = self.library.functions[event.name] stateModifiers = [] def getStatePathForValue(name): if name is None: relation = function.retStateRelation else: relation = function.parameters[name].stateRelation if relation: try: return relation.path except AttributeError: # The copy relation does it this way return relation.destPath # Sort the values by their state paths (shortest first) values = event.values.items() values.sort(cmp = lambda v1, v2: cmp(getStatePathForValue(v1[0]), getStatePathForValue(v2[0]))) for name, value in values: # Get the state path if name is None: relation = function.retStateRelation else: relation = function.parameters[name].stateRelation if self.decorateValues: value = StringUtils.decorateValue(self.library, function, name, value) # Write the state value if relation and self._updateState(event, relation, value): stateModifiers.append(name) for relation in function.stateRelations: if isinstance(relation, Library.StateRelationModify): if implicitStateValue is None: # Use a special placeholder value for completely unknown state values self._updateState(event, relation, ImplicitStateValue()) else: # When we know the exact value, this relation becomes a "set" relation self._updateState(event, Library.StateRelationSet(relation.path), implicitStateValue) elif relation.__class__ in (Library.StateRelationSet, Library.StateRelationGet, Library.StateRelationCopy): self._updateState(event, relation, None) else: raise RuntimeError("Unknown state relation: %s" % relation) if self.collectEffectiveEvents and self.isStateAccessingEvent(event): self.effectiveEvents.add(event) return stateModifiers
def appendWithTimeStampUsingFile(self, text, file): if not StringUtils.isNoneOrEmpty(file): self.address_number = file self.file_name = self.file_path + self.address_number + '.xls' #'.txt' if not os.path.exists(self.file_name): self.begin() st = self.create_timestamp() temp = str(st) + " " + text + "\r\n" self.append(temp) self.text_to_print += temp return temp
def send_data(self, path, http, args, host, agent): file_name = path print file_name if file_name.endswith('/'): if not os.path.exists(self.__root_path+file_name): return self.send_error(http, ER_NOT_FOUND, file_name) if not os.path.exists(self.__root_path+file_name+self.__start_page): if not self.__dir_list: return self.send_error(http, ER_FORBIDDEN, file_name) ctype = 'text/html' response = 200 f = self.gen_directory_list(host, file_name) fsize = len(f.getvalue()) self.send_headers(http, response, ctype, fsize) return f else: file_name += self.__start_page type = StringUtils.type_split(file_name) if type in self.encodings_map: type.lower() ctype = self.encodings_map[type] else: ctype = 'text/plain' mode = 'rb' relative_path = file_name file_name = StringUtils.urlstoc(file_name) file_name = self.__root_path + file_name file_name = unicode(file_name, 'cp866') print file_name try: f = open(file_name, mode) response = 200 fsize = os.stat(file_name)[stat.ST_SIZE] self.send_headers(http, response, ctype, fsize) except IOError, (errno, strerror): return self.send_error(http, errno, relative_path)
def agregar_cancion(self, titulo, interprete, estilo, dirName, arch): idInterprete = self.agregar_interprete(interprete) idEstilo = self.idEstilosDict[estilo] titulo = StringUtils.convertir_a_estandard(titulo) if (titulo, idInterprete) in self.idCancionesDict: print "La canción", titulo, "de", interprete, "ya se encontraba en la bdd" return self.idCancionesDict[(titulo, idInterprete)] = (self.idCancionesCount, idEstilo) self.agregar_huella_digital(dirName, arch) self.idCancionesCount += 1 print "Se agregó la canción", titulo, "del interprete", interprete, "con estilo", estilo
def showResultRows(buf, titles, recs, isShowDetail=True): rows = [] # result rows maxcSize = [] # cume width for every rows recIdx = 0 # index of rows # title row tl = [] # title row rows.append(tl) tl.append('RowNum') # col of row num of title row maxcSize.append(6) # init width of row num for tit in titles: tl.append(str(tit[0])) currwidth = len(str(tit[0])) maxcSize.append(currwidth) # recoard rows for rec in recs: ccount = len(rec) row = [] rowNum = str(recIdx+1) row.append(rowNum) mx = maxcSize[0] currwidth = len(rowNum) if(mx < currwidth): maxcSize[0] = currwidth for c in range(len(rec)): col = str(rec[c]) col = col.replace('\t','\\t') col = col.replace('\n','\\n') # only show init 30 char of the field when need not detail if (isShowDetail == False) and (len(col)>30) : col = col[0:27]+"..." row.append(col) # countrow size currwidth = len(str(col)) mx = maxcSize[c+1] if(mx < currwidth): maxcSize[c+1] = currwidth if(len(row)>0): rows.append(row) recIdx += 1 # format rows to strings for row in rows: line = "| " for c in range(len(row)): line = line + StringUtils.justCJK(row[c],maxcSize[c],'l') + ' | ' buf.append(line)
def getData(fromIndex, toIndex): exits = os.path.exists('example.xls') workbook = Workbook() sheet = workbook.create_sheet("jd-fs", 0) sheet.cell(row=1, column=1).value = "名称" sheet.cell(row=1, column=2).value = "价格" sheet.cell(row=1, column=3).value = "链接" sheet.cell(row=1, column=4).value = "评论数" line = 2 for index in range(fromIndex, toIndex): print("页数:", index) baseurl = "https://search.jd.com/Search?keyword=" + parse.quote( keyword ) + "&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&suggest=1.his.0.0&psort=3&wtype=1&click=1&page=" + str( 2 * index - 1) + "&s=" + str(30 * (index - 1) + 1) #print(baseurl) req = request.Request(baseurl) req.add_header( 'User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) ' 'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36' ) try: with request.urlopen(req) as f: returnData = f.read().decode('utf-8') soup = BeautifulSoup(returnData, "lxml") for product in soup.find_all("li", class_="gl-item"): price = product.find( "div", class_="p-price").find('i').get_text() name = product.find("div", class_="p-name").find('em').get_text() if "【京东超市】" in name: name = name.replace("【京东超市】", " ") name = StringUtils.dealProductName(name) url = product.find("div", class_="p-name").find('a').get('href') comments = product.find( "div", class_="p-commit").find('a').get_text() lineDatas = [name, price, url, comments] for l in range(len(lineDatas)): sheet.cell(row=line, column=l + 1).value = lineDatas[l] line = line + 1 except Exception as err: print(err) logging.debug("fail 页数: %d %s", index, err) workbook.save('jd.xlsx')
def compute_string (compute_force, # function for computing the force string, # the input string dt = 0.05, # artificial time step for updating the string max_iter = 200, # maximum allowed number of iterations start_iter = 0, weighting = [[0,1],[1,1]] # weighting of string discretization ): """ compute the string""" factor_Q = 1.1 numb_node = string.shape[0] dim = np.size(string[0]) # check validity of the inputs if dim != np.size(string[-1]): raise NameError ('The dimention of starting and ending nodes of the string should match!') if numb_node <= 2: raise NameError ('The number of nodes on string should be larger than 2') # initialize alpha_eq = np.linspace (0, 1, numb_node) incr_hist = [[]] conv_file = open ("conv.out", "w") # starts the main loop for ii in range (start_iter, max_iter): # update the string string = update_string_Euler (compute_force, dt, ii, string) # string = update_string_RK4 (compute_force, dt, string) # discretize the string string = StringUtils.resample_string (string, numb_node, weighting) # compute the max norm force as measure of convergence if ii != start_iter : norm_string = string diff_string = norm_string - norm_string_old norm_string_old = np.copy (norm_string) diff = np.sqrt (np.sum (np.multiply (diff_string, diff_string), axis=1)) diff_inf = np.max( diff ) new_item = np.array([ii, diff_inf]) new_item = new_item[np.newaxis,:] if np.size (incr_hist) == 0: incr_hist = new_item else: incr_hist = np.append (incr_hist, new_item, axis=0) logging.info ("string %06d: updated with timestep %e . String difference is %e", ii+1, dt, diff_inf) conv_file.write (str(ii) + " " + str(diff_inf) + "\n") else : norm_string_old = string logging.info ("string %06d: updated with timestep %e .", ii+1, dt) # print incr_hist conv_file.close () return string
def _getBullets(self, eachHazard, argDict, areaDictionary): ### ### set up the bullet list ### bList = [] ### get the list from the GUI if the forecaster entered anything if len(self._rfwType) > 0: for b in self._rfwType: dict = self._bulletDict() bList = bList + dict.get(b)[1] bList.append("Impacts") ### get the default configured list else: ### Use GenericBullets defined locally to throw in some generic bullets bList = self._GenericBullets ### remove any duplicate entries in the bList ### removeDups is in CommonUtils bList = self.removeDups(bList) ### initialize the bullet output bullets = "" ### loop through the bullets and format the output for b in bList: b = b.upper() if b == self._locationsBullet.upper(): locations = self._getLocationsList(areaDictionary, argDict, eachHazard['seg']) bullets = bullets + StringUtils.StringUtils().indentText(b+"..."+locations, \ indentFirstString="* ", indentNextString=" ", \ maxWidth=65,breakStrings=[" ","..."]) + "\n\n" elif b == "Extreme grassland fire danger".upper(): bullets = bullets + "* " + b + "...is forecast.\n\n" elif b == "Highest threat".upper(): bullets = bullets + "|* * " + b + "...is located (optional bullet)*|\n\n" elif b == "Impacts".upper(): bullets = bullets + "* " + b + "...any fires that develop will likely spread rapidly." bullets = bullets + " Outdoor burning is not recommended.\n\n" else: bullets = bullets + "* " + b + "...|* Enter bullet text *|\n\n" return bullets
def createGeneralStatisticsTable(self): t = Report.Table(["Property", "Value"]) duration = self.frames[-1].endTime - self.frames[0].startTime t.addRow("Events", len(self.trace.events)) t.addRow("Frames", len(self.frames)) t.addRow("Duration", StringUtils.normalizeTimeValue(duration)) t.addRow("Render calls", sum([len(f.renderEvents) for f in self.frames])) averageFps = len(self.frames) / duration averageCalls = sum([len(f.events) for f in self.frames]) / float(len(self.frames)) t.addRow("Average FPS", "%.02f" % averageFps) t.addRow("Average calls per frame", "%.02f" % averageCalls) return t
def init_source_string(string_dir, numb_node_tgt): """ init a string from existing nodes""" if not os.path.isdir(string_dir): raise RuntimeError("Dir " + string_dir + " not found") file_name = string_dir + "/string.out" if not os.path.exists(file_name): raise RuntimeError("cannot find file " + file_name) string = np.loadtxt(file_name) alpha = StringUtils.arc_norm(string) smooth_str = interp1d(alpha, string, axis=0, kind="linear") alpha_eq = np.linspace(0, 1, numb_node_tgt) string = smooth_str(alpha_eq) return string
def doesPortExistsBySerialNumber(serialnumber): result = False try: ports = list(serial.tools.list_ports.comports()) #device_name = None serial_number = None for port in ports: serial_number = port.serial_number if not StringUtils.isNoneOrEmpty(serialnumber): if serial_number == serialnumber: result = True break return result except: return result
def init_source_string (string_dir, numb_node_tgt) : """ init a string from existing nodes""" if not os.path.isdir (string_dir) : raise RuntimeError ("Dir " + string_dir + " not found") file_name = string_dir + "/string.out" if not os.path.exists (file_name) : raise RuntimeError ("cannot find file " + file_name) string = np.loadtxt (file_name) alpha = StringUtils.arc_norm (string) smooth_str = interp1d (alpha, string, axis=0, kind="linear") alpha_eq = np.linspace (0, 1, numb_node_tgt) string = smooth_str (alpha_eq) return string
def getPortBySerialNumber(serialnumber): try: ports = list(serial.tools.list_ports.comports()) device_name = None serial_number = None for port in ports: device_name = port.device serial_number = port.serial_number if not StringUtils.isNoneOrEmpty(serialnumber): if serial_number == serialnumber: ser = serial.Serial(device_name, 9600, timeout=1) break if not ser.isOpen(): return None else: return ser except: return None
def listParse2Str(lst,deftp,r_split): ret=[] if(lst is None or deftp is None): return ret i=0;now1=datetime.datetime.now() #if(len(lst)>0):wlog.getLogger().debug(str(len(lst))+"listParse2Str():"+str(now1)) for rtp in lst: #print(i) i=i+1 try: ss=StringUtils.pAstr(tupleParse2Strs(rtp,deftp),r_split) ret.append(ss) except : wlog.doTraceBack() continue #if(len(lst)>0):wlog.getLogger().debug(str(len(lst))+"end_listParse2Str():"+str(now1)) return ret
def listParse2Str(lst, deftp, r_split): ret = [] if (lst is None or deftp is None): return ret i = 0 now1 = datetime.datetime.now() #if(len(lst)>0):wlog.getLogger().debug(str(len(lst))+"listParse2Str():"+str(now1)) for rtp in lst: #print(i) i = i + 1 try: ss = StringUtils.pAstr(tupleParse2Strs(rtp, deftp), r_split) ret.append(ss) except: wlog.doTraceBack() continue #if(len(lst)>0):wlog.getLogger().debug(str(len(lst))+"end_listParse2Str():"+str(now1)) return ret
def gen_directory_list(self, host, path): f = cStringIO.StringIO() list = os.listdir(self.__root_path+path) back_path = os.pardir name_len = 32 f.write('<html><head>\n') f.write('<title>Directory listing for %s</title>\n' % (path,)) f.write('</head>\n') f.write('<body style = "font-family:verdana,sans-serif">\n') f.write('<h2>Directory listing for %s%s</h2>\n' % (host, path,)) f.write('<pre><table><tr>\n') f.write('<td colspan="3"><a href="..">Parent Directory</a></td>\n') f.write('</tr><tr align="left">\n') f.write('<th width="320px">Name</th>\n') f.write('<th width="100px">Size</th>\n') f.write('<th width="200px">Last Modified</th>\n') f.write('</tr>\n') for name in list: rel_path = path + name abs_path = self.__root_path + path + name stats = os.stat(abs_path) if len(name) > name_len: name = name[:name_len] + '>' if os.path.isdir(abs_path): rel_path += '/' fsize = 'Directory' else: size = stats[stat.ST_SIZE] fsize = StringUtils.size_to_maxbyte(size) modif_time = time.strftime(DATE_FORMAT_LITE, time.localtime(stats[stat.ST_MTIME])) f.write('<tr>\n') f.write('<td><a href=%s>%s</div></a></td>\n' % (rel_path, name,)) f.write('<td>%s</td>\n' % (fsize,)) f.write('<td>%s</td>\n' % (modif_time,)) f.write('</tr>\n') f.write('</table></pre>\n') f.write('<hr><address>%s</address>\n'% (SERVER_NAME,)) f.write('</body></html>') f.seek(0) return f
def ConvertToBedViaR(cel_file, bpmapfile): '''function for t calling R to get Bed-like values''' # original script - minus redundant variables. # library(rMAT) # library(Biobase) # # expName <- "2012-07-12_SIR2_IP"; # cell file name # bpmapFile <- "Sc03b_MR_v04.bpmap"; # the mapping! # seqHeader <- ReadBPMAPAllSeqHeader(bpmapFile); # arrayFile1 <- c("2012-07-12_SIR2_IP.CEL"); # ScSet <- BPMAPCelParser(bpmapFile, arrayFile1, verbose = FALSE, groupName = "Sc", seqName="chr"); # data <- list(chrNo = ScSet1@featureChromosome, probePos = ScSet1@featurePosition, MATScore = exprs(ScSet1)); # last parameter is the raw data. # write.table(data, file = paste(expName,"_exp1_AllData.txt",sep=''), append = FALSE, row.names = FALSE, sep = "\t"); bedfile = StringUtils.rreplace(cel_file, 'CEL', 'BED', 2) bedfile += "like" print "input file: %s" % (cel_file) print "Will be writing out to %s" % (bedfile) print "importing rMat and Biobase libraries" importr('rMAT') importr('Biobase') # print "creating seqHeader" # robjects.r('seqHeader <- ReadBPMAPAllSeqHeader(\"' + bpmapfile + '\")') print "creating scSet" robjects.r('ScSet <- BPMAPCelParser(\"' + bpmapfile + '\", c(\"' + cel_file + '\"), verbose = FALSE, groupName = "Sc", seqName="chr")') print "creating data" robjects.r('data <- list(chrNo = ScSet@featureChromosome, probePos = ScSet@featurePosition, MATScore = exprs(ScSet))') # last parameter is the raw data. print "writing table" robjects.r('write.table(data, file = \"' + bedfile + '\", append = FALSE, quote = FALSE, row.names = FALSE, sep = "\t")')
def _sort(self, lo, hi, d): '''sort a sequence of strings of unequal length''' if (hi <= lo + StringUtils.CUTOFF): StringUtils.insertion(self.alist, lo, hi, d) return; # if (hi <= lo): # return lt = lo gt = hi i = lo + 1 # set parition character v = self._charAt(self.alist[lo], d) # make 3 paritions while(i <= gt): curr = self.alist[i] t = self._charAt(curr, d) if t < v: StringUtils.exch(self.alist, lt, i) lt += 1 i += 1 elif t > v: StringUtils.exch(self.alist, i, gt) gt -= 1 else: i += 1 # sort the lower partition self._sort(lo, lt - 1, d) # sort the middle partition on the next character if v >= 0: self._sort(lt, gt, d + 1) # sort the upper partition self._sort(gt + 1, hi, d)
def generate_from_source (source_string_dir_, string) : # check the dir and string file if not os.path.isdir (source_string_dir_) : raise RuntimeError ("cannot find dir " + source_string_dir_) cwd = os.getcwd() os.chdir (source_string_dir_) source_string_dir = os.getcwd () os.chdir (cwd) file_name = source_string_dir + "/string.out" if not os.path.exists (file_name) : raise RuntimeError ("cannot find file " + file_name) # load source string source_string = np.loadtxt (file_name) if source_string.shape[1] != string.shape[1] : raise RuntimeError ("size of the string nodes does not match") # mk dir of the string str_force = StringForce () string_name = str_force.mk_string_name (0) if not os.path.isdir(string_name) : ret = Popen(["cp", '-a', str_force.string_template, string_name], stdout=PIPE, stderr=PIPE) stdout, stderr = ret.communicate() if ret.returncode != 0 : raise RuntimeError ("cannot copy template dir to " + string_name) # generate nodes cwd = os.getcwd() cmd_gen_dir = "tools/gen.dir.absdep.sh" os.chdir (string_name) my_alpha = StringUtils.arc_norm (string) source_alpha = StringUtils.arc_norm (source_string) for ii in range (string.shape[0]) : # min_val = 1e10 # min_posi = 0 # for jj in range(source_string.shape[0]) : # norm = np.linalg.norm (string[ii] - source_string[jj]) # # print ("id " + str(jj) + # # " diff " + str(string[ii]) + " " + str(source_string[jj]) + " norm " + str(norm)) # if norm < min_val : # min_val = norm # min_posi = jj for jj in range(source_string.shape[0]) : if (my_alpha[ii] >= source_alpha[jj]) : min_posi = jj this_node = str_force.mk_node_name (ii) str_force.mk_node_param (string[ii]) source_node = str_force.mk_node_name (min_posi) ret = Popen ([cmd_gen_dir, this_node, source_string_dir+"/"+source_node], stdout=PIPE, stderr=PIPE) stdout, stderr = ret.communicate() if ret.returncode == 1 : raise RuntimeError ("cannot generate node. LOCATION: string: " + string_name + " node index: " + str(this_node) + ". error info: " + str(stderr, encoding='ascii') ) if ret.returncode == 10 : print ("# detected node: " + string_name + "/" + this_node) else : print ("# generated node: " + string_name + "/" + this_node) np.savetxt ("string.out", string) os.chdir (cwd)
def main (): parser = argparse.ArgumentParser( description="*** Initialize a string. ***") parser.add_argument('-m', '--method', default = "", help='Method for generating the string. Available methods are: ' + __available_method()) parser.add_argument('-n', '--numb-nodes', type=int, default = 20, help='Number of nodes on a string.') parser.add_argument('-t','--md-time', type=int, default=20, help='Physical time of MD simulation in unit of ps.') parser.add_argument('-d','--dep-size', type=int, default=1, help='Number of nodes that depends on one node.') lg = parser.add_argument_group ("Linear string") lg.add_argument('-b', '--begin', type=float, nargs = '*', help='Start of the string.') lg.add_argument('-e', '--end', type=float, nargs = '*', help='End of the string.') sg = parser.add_argument_group ("Source string") sg.add_argument('-s', '--source', help='Generate a new string from this string.') sg.add_argument('-w', '--weighting', help='The weighting applied to the string.') args = parser.parse_args() str_force = StringForce ("template.string", args.dep_size) str_force.replace ("template.string/parameters.sh", "md_time=.*", "md_time=" + str(args.md_time)) if args.method == "resample" or args.method == "resample-reinit" : check_source_string (args.source) source_string = np.loadtxt (args.source + "/string.out") if args.weighting is not None : check_weighting (args.weighting) weighting = np.loadtxt (args.weighting) else : weighting = [[0, 1], [1, 1]] if args.method == "linear" : if args.begin == None : raise RuntimeError ("Begin of the string is empty") if args.end == None : raise RuntimeError ("End of the string is empty") string = init_linear_string (args.begin, args.end, args.numb_nodes) print (str(string)) str_force.generate_string (0, string) job = str_force.submit_string (0, False) elif args.method == "resample" : if args.source == None : raise RuntimeError ("No source string") # string = init_source_string (args.source, args.numb_nodes) string = StringUtils.resample_string (source_string, args.numb_nodes, weighting) generate_from_source (args.source, string) job = str_force.submit_string (0, True) elif args.method == "resample-reinit" : if args.source == None : raise RuntimeError ("No source string") # string = init_source_string (args.source, args.numb_nodes) string = StringUtils.resample_string (source_string, args.numb_nodes, weighting) str_force.generate_string (0, string) job = str_force.submit_string (0, False) else : raise RuntimeError ("unknow method to generate the string!") str_force.wait_string (job) str_force.write_tag (0) force = str_force.statistic_string (0)
def part_channel(self, _channel: str) -> None: self.irc_socket.send( StringUtils.str_to_byte('PART ' + '#' + _channel + StringUtils.endl))
#!/usr/bin/env python3 import numpy as np import StringUtils if __name__ == "__main__": string = np.loadtxt ("string.out") weight = np.loadtxt ("weight.out") print (string.shape[0] + 10) new_string = StringUtils.resample_string (string, string.shape[0] + 16, weight) new1_string = StringUtils.resample_string (new_string, new_string.shape[0], weight) # new1_string = resample_string (new_string, weight, 1 + 2 * (new_string.shape[0] - 1)) np.savetxt ("new.out", new_string) np.savetxt ("new1.out", new1_string)
def pong(self, _message: str) -> None: self.irc_socket.send( StringUtils.str_to_byte('PONG ' + _message + StringUtils.endl))
def subtract_blank_test(): print StringUtils.subtract_blank('''ab \t\n gda\d''') print StringUtils.subtract_blank(None) print StringUtils.subtract_blank('') print StringUtils.subtract_blank(' a ')
def __init__(self): self.style = getSampleStyleSheet()['Normal'] self.file_path = StringUtils.resource_path('el_schema.pdf') print self.file_path self.data = []
def main(): parser = argparse.ArgumentParser( description="*** Initialize a string. ***") parser.add_argument( '-m', '--method', default="", help='Method for generating the string. Available methods are: ' + __available_method()) parser.add_argument('-n', '--numb-nodes', type=int, default=20, help='Number of nodes on a string.') parser.add_argument('-t', '--md-time', type=int, default=20, help='Physical time of MD simulation in unit of ps.') parser.add_argument('-d', '--dep-size', type=int, default=1, help='Number of nodes that depends on one node.') lg = parser.add_argument_group("Linear string") lg.add_argument('-b', '--begin', type=float, nargs='*', help='Start of the string.') lg.add_argument('-e', '--end', type=float, nargs='*', help='End of the string.') sg = parser.add_argument_group("Source string") sg.add_argument('-s', '--source', help='Generate a new string from this string.') sg.add_argument('-w', '--weighting', help='The weighting applied to the string.') args = parser.parse_args() str_force = StringForce("template.string", args.dep_size) str_force.replace("template.string/parameters.sh", "md_time=.*", "md_time=" + str(args.md_time)) if args.method == "resample" or args.method == "resample-reinit": check_source_string(args.source) source_string = np.loadtxt(args.source + "/string.out") if args.weighting is not None: check_weighting(args.weighting) weighting = np.loadtxt(args.weighting) else: weighting = [[0, 1], [1, 1]] if args.method == "linear": if args.begin == None: raise RuntimeError("Begin of the string is empty") if args.end == None: raise RuntimeError("End of the string is empty") string = init_linear_string(args.begin, args.end, args.numb_nodes) print(str(string)) str_force.generate_string(0, string) job = str_force.submit_string(0, False) elif args.method == "resample": if args.source == None: raise RuntimeError("No source string") # string = init_source_string (args.source, args.numb_nodes) string = StringUtils.resample_string(source_string, args.numb_nodes, weighting) generate_from_source(args.source, string) job = str_force.submit_string(0, True) elif args.method == "resample-reinit": if args.source == None: raise RuntimeError("No source string") # string = init_source_string (args.source, args.numb_nodes) string = StringUtils.resample_string(source_string, args.numb_nodes, weighting) str_force.generate_string(0, string) job = str_force.submit_string(0, False) else: raise RuntimeError("unknow method to generate the string!") str_force.wait_string(job) str_force.write_tag(0) force = str_force.statistic_string(0)
def saveTrace(self, trace, traceFile, dataFileName=None, dataFileFormat="rvct", frameMarkers=[], initFuncName="init", uninitFuncName="uninit", playFuncName="play", playFrameFuncName="playFrame", frameFuncName="frame", arrayPrefix="array", playerArgument="context", insertCopyright=True): try: library = self.analyzer.project.targets["code"].library config = self.analyzer.project.config except (AttributeError, KeyError): raise RuntimeError("API configuration not found.") def arrayId(array): assert isinstance(array, Trace.Array) return (array.__class__, array.id) def objectId(obj): assert isinstance(obj, Trace.Object) return "%s_%x_%x" % (obj.cls.name.lower(), obj.ns, obj.id) task = Task.startTask("c-export", "Formatting source", len(trace.events)) indent = " " * 3 # Collect all values for all events task = Task.startTask("c-export", "Collecting data", len(trace.events)) values = [] [values.extend(e.values.values()) for e in trace.events] # Collect arrays arrays = OrderedDict([(arrayId(v), v) for v in reversed(values) if isinstance(v, Trace.Array)]) # Check that the external data format is supported if dataFileName: assert dataFileName.endswith( ".s"), "Unsupported external data file type. Use one of: s" assert dataFileFormat in ( "gcc", "rvct" ), "Unsupported external data format. Use one of: gcc, rvct" dataFile = open(dataFileName, "w") else: dataFile = None # Calculate maximum sizes of arrays arraySizes = dict([(a, 0) for a in arrays.keys()]) for value in values: if isinstance(value, Trace.Array): a = arrayId(value) arraySizes[a] = max(len(value), arraySizes[a]) # Figure out the C types of arrays and objects classes = {} objectTypes = {} arrayTypes = {} outValueObjects = set() # Objects that are acquired through a function arrayVariants = DefaultDict(list) usePersistentArrays = False # Use arrays whose contents must be kept up to date # even after passing them to the API def registerObject(event, name, value): if not value.cls in classes: classes[value.cls] = {} function = self.analyzer.lookupFunction(event) # Object has already been registered if not name or function.parameters[name].isOut: outValueObjects.add(value) if objectId(value) in classes[value.cls]: return classes[value.cls][objectId(value)] = value for cType, nativeTypeName in library.typeMap.items(): if cType.name == value.cls.name: objectTypes[value] = cType break else: self.analyzer.reportWarning("Unknown class: <%s>" % value.cls.name) # Create a fake type name for this class objectTypes[value] = value.cls.name def registerArray(event, name, value): function = self.analyzer.lookupFunction(event) if name: cType = function.parameters[name].type else: cType = function.type # Extract an essential type for the array discarding all qualifiers and modifiers cType = Library.Type(cType.name) # Get the real, non-typedef'd type as well realType = library.resolveType(cType) # If this is a void type, use the original type instead if realType.name == "void": cType = arrayTypeMap[value.__class__] arrayTypes[arrayId(value)] = cType # If this is an object array, register the objects too if isinstance(value, Trace.ObjectArrayValue): for obj in value: registerObject(event, name, obj) for event in trace.events: # OpenGL needs persistent arrays if event.name.startswith("gl"): usePersistentArrays = True for name, value in event.values.items(): if isinstance(value, Trace.Object): registerObject(event, name, value) elif isinstance(value, Trace.Array): registerArray(event, name, value) # Collect the modified arrays for this event for array in event.modifiedArrays: a = arrayId(array) # Only consider the arrays we know about if a in arrayTypes: arrayVariants[a].append(array) task.step() # Count the number of frames if frameMarkers: frameCount = len(frameMarkers) + 3 else: frameCount = len([ 1 for event in trace.events if self.analyzer.lookupFunction(event).isFrameMarker ]) + 3 # Add the header print >> traceFile, "/**" print >> traceFile, " * C source generated from %d events (%d frames). " % ( len(trace.events), frameCount) if insertCopyright: print >> traceFile, copyrightText print >> traceFile, " */" print >> traceFile, "" print >> traceFile, "/** A macro for copying data into an array */" print >> traceFile, "#define LOAD_ARRAY(TO, FROM, LENGTH) \\" print >> traceFile, indent, "{ \\" print >> traceFile, indent * 2, "int i; \\" print >> traceFile, indent * 2, "for (i = 0; i < (LENGTH); i++) \\" print >> traceFile, indent * 3, "(TO)[i] = (FROM)[i]; \\" print >> traceFile, indent, "}" print >> traceFile, "" # Insert any additional code specified in the configuration if "c_player_code" in config: for fileName in config["c_player_code"]: f = open(config.getRelativePath(fileName)) print >> traceFile, f.read() f.close() # Add the header to the data file if we have one if dataFile: if dataFileFormat == "gcc": print >> dataFile, "#" print >> dataFile, "# GNU Assembler data file generated from %d events (%d frames). " % ( len(trace.events), frameCount) print >> dataFile, "#" print >> dataFile, "" print >> dataFile, ".section .data" else: # rvct print >> dataFile, ";" print >> dataFile, "; RVCT Assembler data file generated from %d events (%d frames). " % ( len(trace.events), frameCount) print >> dataFile, ";" print >> dataFile, "" print >> dataFile, " AREA ||.constdata||, DATA, READONLY, ALIGN=2" # Introduce objects print >> traceFile, "/* Objects */ " for objects in classes.values(): for obj in objects.values(): print >> traceFile, "static %s %s = (%s)0x%x;" % ( objectTypes[obj], objectId(obj), objectTypes[obj], obj.id) print >> traceFile, "" task.step() # Introduce arrays print >> traceFile, "/* %d arrays */ " % len(arrays) for i, array in enumerate(arrays.values()): a = arrayId(array) if usePersistentArrays: l = arraySizes[a] if not l: self.analyzer.reportWarning("Empty array %s" % str(a)) l = 1 print >> traceFile, "static %s %s_%s%d[%d];" % ( arrayTypes[a], str( arrayTypes[a]).lower(), arrayPrefix, i, l) else: print >> traceFile, "static %s* %s_%s%d;" % ( arrayTypes[a], str(arrayTypes[a]).lower(), arrayPrefix, i) print >> traceFile, "" # Introduce unique array data print >> traceFile, "/* Array data */ " arrayData = [] arrayMap = {} for variants in arrayVariants.values(): for array in variants: # See if an equivalent array is already created for j, existingArray in enumerate(arrayData): if existingArray == array and \ existingArray.__class__ == array.__class__: arrayMap[id(array)] = j break else: arrayMap[id(array)] = len(arrayData) arrayData.append(array) if not dataFile: # Inline data for i, array in enumerate(arrayData): if not len(array): continue # Object arrays can't be initialized inline if isinstance(array, Trace.ObjectArrayValue): print >> traceFile, "static %s %sData%d[%d];" % ( arrayTypes[arrayId(array)], arrayPrefix, i, len(array)) print >> traceFile, "" continue elif usePersistentArrays: print >> traceFile, "static const %s %sData%d[%d] = {" % ( arrayTypes[arrayId(array)], arrayPrefix, i, len(array)) else: print >> traceFile, "static %s %sData%d[%d] = {" % ( arrayTypes[arrayId(array)], arrayPrefix, i, len(array)) print >> traceFile, indent, # Figure out the proper qualifier for the array elements qualifier = "" format = "s" if len(array): if isinstance(array, Trace.FloatArrayValue): format = qualifier = "f" elif isinstance(array, Trace.DoubleArrayValue): format = qualifier = "d" elif isinstance(array, Trace.LongArrayValue): format = qualifier = "l" for k, value in enumerate(array): value = ("%%%s%s" % (format, qualifier)) % value if k != len(array) - 1: print >> traceFile, "%s," % value, if not (k + 1) % 8: print >> traceFile, "" print >> traceFile, indent, else: print >> traceFile, value print >> traceFile, "};" print >> traceFile, "" else: # External data for i, array in enumerate(arrayData): if not len(array): continue if usePersistentArrays and not isinstance( array, Trace.ObjectArrayValue): print >> traceFile, "extern const %s %sData%d[%d];" % ( arrayTypes[arrayId(array)], arrayPrefix, i, len(array)) else: print >> traceFile, "extern %s %sData%d[%d];" % ( arrayTypes[arrayId(array)], arrayPrefix, i, len(array)) # Object arrays can't be initialized inline if isinstance(array, Trace.ObjectArrayValue): continue # Figure out the proper type code for the array elements if dataFileFormat == "gcc": print >> dataFile, ".global %sData%d" % (arrayPrefix, i) print >> dataFile, "%sData%d:" % (arrayPrefix, i) if isinstance(array, Trace.FloatArrayValue): typeCode = ".float" elif isinstance(array, Trace.DoubleArrayValue): typeCode = ".double" elif isinstance(array, Trace.LongArrayValue): typeCode = ".quad" elif isinstance(array, Trace.ShortArrayValue): typeCode = ".short" elif isinstance(array, Trace.ByteArrayValue): typeCode = ".byte" elif isinstance(array, Trace.IntegerArrayValue): typeCode = ".int" else: raise RuntimeError("Unknown array type") # Write out the data print >> dataFile, "%s %s" % (typeCode, ", ".join( map(str, array))) else: # rvct print >> dataFile, "GLOBAL %sData%d" % (arrayPrefix, i) print >> dataFile, "%sData%d" % (arrayPrefix, i) if isinstance(array, Trace.FloatArrayValue): typeCode = "DCFS" elif isinstance(array, Trace.DoubleArrayValue): typeCode = "DCFD" elif isinstance(array, Trace.LongArrayValue): typeCode = "DCQ" elif isinstance(array, Trace.ShortArrayValue): typeCode = "DCW" elif isinstance(array, Trace.ByteArrayValue): typeCode = "DCB" elif isinstance(array, Trace.IntegerArrayValue): typeCode = "DCD" else: raise RuntimeError("Unknown array type") # Write out the data prefix = " %s " % typeCode for j in xrange(0, len(array), 8): values = array[j:j + 8] print >> dataFile, prefix, ",".join(map(str, values)) # Initialize the objects print >> traceFile, "static void %s(void* %s)" % (initFuncName, playerArgument) print >> traceFile, "{" def getObjectAttributeValue(attr): if isinstance(attr, Trace.Array): # Only strings are supported so far assert isinstance(attr, Trace.ByteArrayValue) s = "".join((chr(c) for c in attr)) s = s.replace("\r", "\\r") s = s.replace("\t", "\\t") s = s.rstrip("\x00") lines = s.split("\n") return "\n".join(('"%s\\n"' % l for l in lines)) return str(attr) for objects in classes.values(): for obj in objects.values(): # If the object has attributes or it wasn't created from a return value, ask the user to create it cClass = library.classes.get(obj.cls.name) if obj.attrs or (not obj in outValueObjects and cClass and cClass.overridable): print >> traceFile, indent, "/* %s attributes: %s */" % ( obj.cls.name, ", ".join(obj.attrs.keys())) if obj.attrs: attrs = ", ".join( map(getObjectAttributeValue, obj.attrs.values())) print >> traceFile, indent, "%s = create%s%d(%s, %s);" % ( objectId(obj), obj.cls.name, len(obj.attrs) + 1, playerArgument, attrs) else: print >> traceFile, indent, "%s = create%s1(%s);" % ( objectId(obj), obj.cls.name, playerArgument) print >> traceFile, "}" print >> traceFile, "" # Uninitialize the objects print >> traceFile, "static void %s(void* %s)" % (uninitFuncName, playerArgument) print >> traceFile, "{" for objects in classes.values(): for obj in objects.values(): # If the object has attributes or it wasn't created from a return value, ask the user to destroy it cClass = library.classes.get(obj.cls.name) if obj.attrs or (not obj in outValueObjects and cClass and cClass.overridable): print >> traceFile, indent, "destroy%s2(%s, %s);" % ( obj.cls.name, playerArgument, objectId(obj)) print >> traceFile, "}" print >> traceFile, "" # Add the events task.finish() task = Task.startTask("c-export", "Generating source", len(trace.events)) frameNumber = 0 frameFunctions = ["%s0" % frameFuncName] activeArrays = dict([(a, None) for a in arrays.keys()]) # Open the frame function print >> traceFile, "static void %s0(void* %s)" % (frameFuncName, playerArgument) print >> traceFile, "{" for event in trace.events: function = self.analyzer.lookupFunction(event) # Modify objects for obj in event.modifiedObjects: # Check the the object was really modified if obj.attrs and obj.attrs != classes[obj.cls][objectId( obj)].attrs: attrs = ", ".join( map(getObjectAttributeValue, obj.attrs.values())) print >> traceFile, indent, "/* %s attributes: %s */" % ( obj.cls.name, ", ".join(obj.attrs.keys())) print >>traceFile, indent, "%s = modify%s%d(%s, %s, %s);" % \ (objectId(obj), obj.cls.name, len(obj.attrs) + 2, playerArgument, objectId(obj), attrs) classes[obj.cls][objectId(obj)].attrs = obj.attrs # Modify arrays for array in event.modifiedArrays: # Load the correct data into the array a = arrayId(array) # If this array is not used anywhere, skip it if not id(array) in arrayMap: continue toArray = arrays.index(a) fromArray = arrayMap[id(array)] # Don't reload the same data if activeArrays[a] == fromArray: continue # Ignore empty arrays if not len(array): continue activeArrays[a] = fromArray # Insert new objects directly into the array if isinstance(array, Trace.ObjectArrayValue): for i, obj in enumerate(array): print >>traceFile, indent, "%s_%s%d[%d] = %s;" % \ (str(arrayTypes[a]).lower(), arrayPrefix, toArray, i, objectId(obj)) elif usePersistentArrays: print >>traceFile, indent, "LOAD_ARRAY(%s_%s%d, %sData%d, %d);" % \ (str(arrayTypes[a]).lower(), arrayPrefix, toArray, arrayPrefix, fromArray, len(array)) else: print >>traceFile, indent, "%s_%s%d = %sData%d;" % \ (str(arrayTypes[a]).lower(), arrayPrefix, toArray, arrayPrefix, fromArray) # Collect the arguments args = [] returnValue = None for name, value in event.values.items(): valueType = name and function.parameters[ name].type or function.type if value is None: value = "(%s)0" % valueType elif isinstance(value, Trace.Array): # If this array can be modified by the function, mark it as lost if not valueType.isConstant( ) and value in event.modifiedArrays: a = arrayId(value) activeArrays[a] = None if not value.id: value = "(%s)0" % valueType else: a = arrayId(value) value = "(%s)%s_%s%d" % (valueType, str( arrayTypes[a]).lower(), arrayPrefix, arrays.index(a)) elif isinstance(value, Trace.Object): value = str(objectId(value)) elif isinstance(value, Trace.UnknownPhrase): value = "(%s)NULL" % valueType else: value = StringUtils.decorateValue(library, function, name, value) if isinstance(value, Trace.FloatValue): value = str(value) + "f" elif isinstance(value, Trace.DoubleValue): value = str(value) + "d" elif isinstance(value, Trace.LongValue): value = str(value) + "l" # Do a cast if this is actually a pointer parameter (e.g. 'ptr' in glVertexAttribPointer) if name and library.isPointerType( function.parameters[name].type): value = "(%s)%s" % (valueType, value) # If the real C type is unsigned and we have a negative value, do a cast try: if name and "unsigned" in str( library.resolveType(function.parameters[name]. type)) and int(value) < 0: value = "(%s)%s" % (function.parameters[name].type, value) except ValueError: # Not an integer pass # HACK: eglGetDisplay(0) -> eglGetDisplay(EGL_DEFAULT_DISPLAY) if event.name == "eglGetDisplay" and name and str( value) == "0": value = "EGL_DEFAULT_DISPLAY" # Make sure we have a meaningful parameter value assert len(str(value)) if name: args.append(str(value)) else: returnValue = value # Truncated event stream? if not len(args) == len(function.parameters): self.analyzer.reportWarning("Truncated call to %s(%s)" % (event.name, ", ".join(args))) print >> traceFile, indent, "/* truncated call to %s(%s) */" % ( event.name, ", ".join(args)) continue # Save the return value if needed returnObject = event.values.get(None, None) if isinstance(returnObject, Trace.Object): print >> traceFile, indent, "%s =" % objectId(returnObject), else: print >> traceFile, indent, args = ", ".join(args) print >> traceFile, "%s(%s);" % (event.name, args) # Apply modifications to object arrays for array in event.modifiedArrays: if isinstance(array, Trace.ObjectArrayValue): for i, obj in enumerate(array): a = arrayId(array) fromArray = arrays.index(a) print >>traceFile, indent, "%s = %s_%s%d[%d];" % \ (objectId(obj), str(arrayTypes[a]).lower(), arrayPrefix, fromArray, i) if (not frameMarkers and function.isFrameMarker) or event in frameMarkers: frameNumber += 1 name = "%s%d" % (frameFuncName, frameNumber) print >> traceFile, "}" print >> traceFile, "" print >> traceFile, "/**" print >> traceFile, " * Frame #%d" % frameNumber print >> traceFile, " */" print >> traceFile, "static void %s(void* %s)" % ( name, playerArgument) print >> traceFile, "{" frameFunctions.append(name) task.step() print >> traceFile, "}" print >> traceFile, "" # Create the playback function print >> traceFile, "/**" print >> traceFile, " * Play back all trace frames." print >> traceFile, " * @param %s Optional user data pointer" % ( playerArgument) print >> traceFile, " */" print >> traceFile, "static void %s(void* %s)" % (playFuncName, playerArgument) print >> traceFile, "{" print >> traceFile, indent, "%s(%s);" % (initFuncName, playerArgument) for name in frameFunctions: print >> traceFile, indent, "%s(%s);" % (name, playerArgument) print >> traceFile, indent, "%s(%s);" % (uninitFuncName, playerArgument) print >> traceFile, "}" print >> traceFile, "" # Create the playback function for single frame playback print >> traceFile, "/**" print >> traceFile, " * Play back a single frame of the trace." print >> traceFile, " * @param %s Optional user data pointer" % ( playerArgument) print >> traceFile, " * @param frame Zero-based number of frame to play" print >> traceFile, " * @returns 1 if the frame number was valid, 0 otherwise" print >> traceFile, " */" print >> traceFile, "static int %s(void* %s, int frame)" % ( playFrameFuncName, playerArgument) print >> traceFile, "{" print >> traceFile, indent, "switch (frame)" print >> traceFile, indent, "{" print >> traceFile, indent * 2, "case %6d: %s(%s); break;" % ( 0, initFuncName, playerArgument) for i, name in enumerate(frameFunctions): print >> traceFile, indent * 2, "case %6d: %s(%s); break;" % ( i + 1, name, playerArgument) print >> traceFile, indent * 2, "case %6d: %s(%s); break;" % ( len(frameFunctions) + 1, uninitFuncName, playerArgument) print >> traceFile, indent * 2, "default: return 0;" print >> traceFile, indent, "}" print >> traceFile, indent, "return 1;" print >> traceFile, "}" # Close the data file if dataFile: dataFile.close() # All done task.finish()
def agregar_estilo(self, estilo): estilo = StringUtils.convertir_a_estandard(estilo) if (not estilo in self.idEstilosDict): self.idEstilosDict[estilo] = self.idEstilosCount self.idEstilosCount += 1
def generate_from_source(source_string_dir_, string): # check the dir and string file if not os.path.isdir(source_string_dir_): raise RuntimeError("cannot find dir " + source_string_dir_) cwd = os.getcwd() os.chdir(source_string_dir_) source_string_dir = os.getcwd() os.chdir(cwd) file_name = source_string_dir + "/string.out" if not os.path.exists(file_name): raise RuntimeError("cannot find file " + file_name) # load source string source_string = np.loadtxt(file_name) if source_string.shape[1] != string.shape[1]: raise RuntimeError("size of the string nodes does not match") # mk dir of the string str_force = StringForce() string_name = str_force.mk_string_name(0) if not os.path.isdir(string_name): ret = Popen(["cp", '-a', str_force.string_template, string_name], stdout=PIPE, stderr=PIPE) stdout, stderr = ret.communicate() if ret.returncode != 0: raise RuntimeError("cannot copy template dir to " + string_name) # generate nodes cwd = os.getcwd() cmd_gen_dir = "tools/gen.dir.absdep.sh" os.chdir(string_name) my_alpha = StringUtils.arc_norm(string) source_alpha = StringUtils.arc_norm(source_string) for ii in range(string.shape[0]): # min_val = 1e10 # min_posi = 0 # for jj in range(source_string.shape[0]) : # norm = np.linalg.norm (string[ii] - source_string[jj]) # # print ("id " + str(jj) + # # " diff " + str(string[ii]) + " " + str(source_string[jj]) + " norm " + str(norm)) # if norm < min_val : # min_val = norm # min_posi = jj for jj in range(source_string.shape[0]): if (my_alpha[ii] >= source_alpha[jj]): min_posi = jj this_node = str_force.mk_node_name(ii) str_force.mk_node_param(string[ii]) source_node = str_force.mk_node_name(min_posi) ret = Popen( [cmd_gen_dir, this_node, source_string_dir + "/" + source_node], stdout=PIPE, stderr=PIPE) stdout, stderr = ret.communicate() if ret.returncode == 1: raise RuntimeError("cannot generate node. LOCATION: string: " + string_name + " node index: " + str(this_node) + ". error info: " + str(stderr, encoding='ascii')) if ret.returncode == 10: print("# detected node: " + string_name + "/" + this_node) else: print("# generated node: " + string_name + "/" + this_node) np.savetxt("string.out", string) os.chdir(cwd)
def renombrar_archivo(self, tema, interprete, estilo, dirName, arch): extension = arch[arch.rindex('.') : len(arch)] tema = StringUtils.convertir_a_estandard(tema) interprete = StringUtils.convertir_a_estandard(interprete) arch_estandard = interprete + " " + self.separador + " " + tema + extension IOManager.renombrar_archivo(dirName, arch, arch_estandard)
print StringUtils.subtract_blank('''ab \t\n gda\d''') print StringUtils.subtract_blank(None) print StringUtils.subtract_blank('') print StringUtils.subtract_blank(' a ') def subtract_unprintable_test(): str_test = '''ab \t\n gda\d''' print StringUtils.subtract_unprintable(str_test) def split_plus_test(): separator_array_ = ['ab', ', ', ' ', 'e', '中文', ':', '|'] str_ = 'abc,de fgh, ijk,lmn:op|qr中文st' separator_array_ = TypeUtils.convert_to_type(separator_array_, unicode) str_ = TypeUtils.convert_to_type(str_, unicode) print StringUtils.split_plus(str_, separator_array_) if __name__ == '__main__': subtract_blank_test() subtract_unprintable_test() split_plus_test() print StringUtils.split_plus('abc,d e,f.g.h', [',', ' ', '.', 'bc'])
files = os.listdir(args.BEDlikefiles) starttime = time.time() num_probes = 2635714 print "initializing array..." probeset = [[0.0 for y in xrange(num_probes)] for x in xrange(0, 2)] print "reading baseline from %s" % (args.Baselinefile) print "probeset[%i][%i]" % (len(probeset), len(probeset[0])) ReadBaseline(args.Baselinefile, probeset) blaverage = FindAverageProbeIntensity(probeset) print "baseline average probe intensity is:", blaverage print "done." # op = StringUtils.rreplace(sys.argv[1], 'BED', 'NORMAL', 1) for i, f in enumerate(files): filetime = time.time() of = StringUtils.rreplace(f, '.BEDlike', '.normalized.BEDlike', 1) bedprobes = [[0.0 for y in xrange(num_probes)] for x in xrange(0, 2)] print "determining average probe intensity from %s%s" % (args.BEDlikefiles, f) print "bedprobes[%i][%i]" % (len(bedprobes), len(bedprobes[0])) ReadBaseline("%s%s" % (args.BEDlikefiles, f), bedprobes) bedaverage = FindAverageProbeIntensity(bedprobes) print "average probe intensity is:", bedaverage scaling = float(bedaverage) / blaverage if scaling < 1: scaling = 1 print "baseline needs to be scaled by a factor of", scaling ApplyBaseline(i + 1, "%s%s" % (args.BEDlikefiles, f), "%s%s" % (args.output_path, of), probeset, scaling) # first file is first file, use zero for chr/pos print "File %i - %s processed in %f seconds" % (i + 1, f, time.time() - filetime) # ProduceStats(len(files) + 1, probeset) print 'Completed in %s seconds' % int((time.time() - starttime))
""" # 检查参数合法性 if len(sys.argv) < 2: print ARGS_HELPER os._exit(0) ROOT_PATH = sys.argv[1] PERCENT = 0.0 SAVE_TO_FILES = False if len(sys.argv) > 2: PERCENT = float(sys.argv[2]) if len(sys.argv) > 3: SAVE_TO_FILES = bool(sys.argv[3]) if not os.path.isdir(ROOT_PATH): ERROR_STR = Str.chsstr("只能输入目录作为第一个参数,您输入的是 %s" % ROOT_PATH) raise StandardError(ERROR_STR) AnalysisResult1 = {} AnalysisResult2 = {} AnalysisFinalResult = {} def DoAnalysis1(value, params): """resource => prefab""" path = params[0].replace(ROOT_PATH + '\\', '') filename = params[1] if value in AnalysisResult1: result = AnalysisResult1[value] result[0] = result[0] + 1 if path in result[1]:
def saveTrace(self, trace, traceFile, truncateValues=True, includeSensorData=False): try: library = self.analyzer.project.targets["code"].library except (AttributeError, KeyError): library = None task = Task.startTask("text-export", "Formatting text", len(trace.events)) truncateValues = self.analyzer.parseBoolean(truncateValues) includeSensorData = self.analyzer.parseBoolean(includeSensorData) maxValueLength = 1024 * 1024 # Describe instrumentation sensors if includeSensorData: for name, sensor in sorted(trace.sensors.items()): print >> traceFile, "%010d %010d @inst %s: %s" % ( 0, 0, name, sensor.description) for event in trace.events: try: function = self.analyzer.lookupFunction(event) except: function = None # Print out any associated instrumentation data if includeSensorData: for key, value in sorted(event.sensorData.items()): if value: print >> traceFile, "%010d %010d @inst %s = %s" % ( event.seq, event.time, key, value) # Print out any modified arrays that are not actual parameters for the event. for array in event.modifiedArrays: for value in event.values.values(): if isinstance(value, Trace.Array) and value.id == array.id: break else: if truncateValues: text = StringUtils.ellipsis(array, maxLength=maxValueLength) else: text = array print >> traceFile, "%010d %010d @array 0x%x = %s" % ( event.seq, event.time, array.id, text) args = [] # Print out the parameters for name, value in event.values.items(): if not name: continue if function and library: value = StringUtils.decorateValue(library, function, name, value) if truncateValues: value = StringUtils.ellipsis(value, maxLength=maxValueLength) args += ["%s=%s" % (name, value)] print >> traceFile, "%010d %010d %s (%s)" % ( event.seq, event.time, event.name, ", ".join(args)), if None in event.values: print >> traceFile, "-> %s" % event.values[ None], "+%d" % event.duration else: print >> traceFile, "+%d" % event.duration task.step()
def saveTrace(self, trace, traceFile, truncateValues = True, includeSensorData = False): try: library = self.analyzer.project.targets["code"].library except (AttributeError, KeyError): library = None task = Task.startTask("text-export", "Formatting text", len(trace.events)) truncateValues = self.analyzer.parseBoolean(truncateValues) includeSensorData = self.analyzer.parseBoolean(includeSensorData) maxValueLength = 1024 * 1024 # Describe instrumentation sensors if includeSensorData: for name, sensor in sorted(trace.sensors.items()): print >>traceFile, "%010d %010d @inst %s: %s" % (0, 0, name, sensor.description) for event in trace.events: try: function = self.analyzer.lookupFunction(event) except: function = None # Print out any associated instrumentation data if includeSensorData: for key, value in sorted(event.sensorData.items()): if value: print >>traceFile, "%010d %010d @inst %s = %s" % (event.seq, event.time, key, value) # Print out any modified arrays that are not actual parameters for the event. for array in event.modifiedArrays: for value in event.values.values(): if isinstance(value, Trace.Array) and value.id == array.id: break else: if truncateValues: text = StringUtils.ellipsis(array, maxLength = maxValueLength) else: text = array print >>traceFile, "%010d %010d @array 0x%x = %s" % (event.seq, event.time, array.id, text) args = [] # Print out the parameters for name, value in event.values.items(): if not name: continue if function and library: value = StringUtils.decorateValue(library, function, name, value) if truncateValues: value = StringUtils.ellipsis(value, maxLength = maxValueLength) args += ["%s=%s" % (name, value)] print >>traceFile, "%010d %010d %s (%s)" % (event.seq, event.time, event.name, ", ".join(args)), if None in event.values: print >>traceFile, "-> %s" % event.values[None], "+%d" % event.duration else: print >>traceFile, "+%d" % event.duration task.step()
def subtract_unprintable_test(): str_test = '''ab \t\n gda\d''' print StringUtils.subtract_unprintable(str_test)