def prettify(obj): if hasattr(obj, 'getSource'): return obj.getSource() else: t = type(obj) if t in _SIMPLE_BUILTINS: return repr(obj) elif t is types.DictType: out = ['{'] for k,v in obj.items(): out.append('\n\0%s: %s,' % (prettify(k), prettify(v))) out.append(len(obj) and '\n\0}' or '}') return string.join(out, '') elif t is types.ListType: out = ["["] for x in obj: out.append('\n\0%s,' % prettify(x)) out.append(len(obj) and '\n\0]' or ']') return string.join(out, '') elif t is types.TupleType: out = ["("] for x in obj: out.append('\n\0%s,' % prettify(x)) out.append(len(obj) and '\n\0)' or ')') return string.join(out, '') else: raise TypeError("Unsupported type %s when trying to prettify %s." % (t, obj))
def writedoc(key,top=False): """Write HTML documentation to a file in the current directory.""" if(type(key) == str and (key == "modules" or key == "/.")): heading = pydoc.html.heading( '<br><big><big><strong> ' 'Python: Index of Modules' '</strong></big></big>', '#ffffff', '#7799ee') builtins = [] for name in sys.builtin_module_names: builtins.append('<a href="%s">%s</a>' % (cgi.escape(name,quote=True), cgi.escape(name))) indices = ['<p>Built-in modules: ' + cgi.escape(join(builtins, ', '))] seen = {} for dir in pydoc.pathdirs(): indices.append(pydoc.html.index(dir, seen)) print cleanlinks(heading + join(indices)) return if(type(key) != types.ModuleType): object = pydoc.locate(key) if(object == None and top): print "could not locate module/object for key " + \ cgi.escape(key) + "<br><a href=\"pydoc:modules\">go to index</a>"; else: object = key if object: print cleanlinks(pydoc.html.page(pydoc.describe(object), pydoc.html.document(object)))
def smb4_grant_user_rights(user): args = [ "/usr/local/bin/net", "-d 0", "sam", "rights", "grant" ] rights = [ "SeTakeOwnershipPrivilege", "SeBackupPrivilege", "SeRestorePrivilege" ] net_cmd = "%s %s %s" % ( string.join(args, ' '), user, string.join(rights, ' ') ) p = pipeopen(net_cmd) net_out = p.communicate() if net_out and net_out[0]: for line in net_out[0].split('\n'): if not line: continue print line if p.returncode != 0: return False return True
def _writeLifecycle(self, fileOut): """ Write default constructor and destructor. """ # Constructor fileOut.write("%s::%s::%s(void)\n" % \ (string.join(self.namespace, "::"), self.objname, self.objname)) fileOut.write("{ // constructor\n") for scalar in self.scalars: n = scalar['name'] if "char*" != scalar['type']: fileOut.write(" %s = %s;\n" % (n[1:], n)) else: fileOut.write(" %s = const_cast<char*>(%s);\n" % (n[1:], n)) for array in self.arrays: n = array['name'] fileOut.write(" %s = const_cast<%s*>(%s);\n" % \ (n[1:], array['type'], n)) fileOut.write("} // constructor\n\n") # Destructor fileOut.write("%s::%s::~%s(void)\n" % \ (string.join(self.namespace, "::"), self.objname, self.objname)) fileOut.write("{}\n\n") return
def _format_data(self, start_time, timestamp, name, units, values): fields = _fields[:] file_timestamp = time.strftime('%Y%m%d%H%M',time.gmtime(start_time)) value_timestamp = time.strftime('%Y%m%d%H%M',time.gmtime(timestamp)) fields[_field_index['units']] = units fields[_field_index['commodity']] = self.commodity meter_id = name + '|1' if units: meter_id += '/%s' % units fields[_field_index['meter_id']] = meter_id fields[_field_index['receiver_id']] = '' fields[_field_index['receiver_customer_id']] = self.customer_name + '|' + self.account_name fields[_field_index['timestamp']] = file_timestamp # interval put into "MMDDHHMM" with MMDD = 0000 fields[_field_index['interval']] = '0000%02d%02d' % (self.period / 3600, (self.period % 3600) / 60) fields[_field_index['count']] = str(len(values)) value_sets = [] for value in values: try: value = '%f' % value protocol_text = '' except ValueError: value = '' protocol_text = 'N' value_set = (value_timestamp, protocol_text, value) value_sets.append(string.join(value_set, ',')) value_timestamp = '' fields[_field_index['interval_data']] = string.join(value_sets, ',') return string.join(fields, ',')
def main( text, key ): result = list() for i in range ( 0, len( text ) ): result.append( chr( ord( text[i] ) ^ ord( key[i%len( key )] ) ) ) print string.join( result, "" ).encode( "hex" )
def checkVersion(): import sys, string if sys.version_info < requiredPythonVersion: raise Exception("%s requires at least Python %s, found %s instead." % ( name, string.join(map(str, requiredPythonVersion), "."), string.join(map(str, sys.version_info), ".")))
def endElement(self, name): if name in self.atags: aux = string.join(self.interestData) if not self.atags[name]: #delta_ts could be overwritten by delta_ts of attachment self.atags[name] = unicode(aux) self.tag_name = None elif name in self.long_desc_tags: aux = string.join(self.interestData) self.long_desc_tags[name] = unicode(aux) self.tag_name = None elif name in self.btags: aux = string.join(self.interestData) self.btags[name].append(unicode(aux)) self.tag_name = None elif name in self.ctags: if name == 'long_desc': aux = self._copy_long_desc_tags() self.ctags['long_desc'].append(aux) self._init_long_desc_tags() elif name == 'attachment': pass self.tag_name = None elif name == "bug": #self.issues_data[self.atags["bug_id"]] = self.get_issue() self.issues_data.append(self.get_issue())
def join_header_words(lists): """Do the inverse of the conversion done by split_header_words. Takes a list of lists of (key, value) pairs and produces a single header value. Attribute values are quoted if needed. >>> join_header_words([[("text/plain", None), ("charset", "iso-8859/1")]]) 'text/plain; charset="iso-8859/1"' >>> join_header_words([[("text/plain", None)], [("charset", "iso-8859/1")]]) 'text/plain, charset="iso-8859/1"' """ headers = [] for pairs in lists: attr = [] for k, v in pairs: if v is not None: if not re.search(r"^\w+$", v): v = join_escape_re.sub(r"\\\1", v) # escape " and \ v = '"%s"' % v if k is None: # Netscape cookies may have no name k = v else: k = "%s=%s" % (k, v) attr.append(k) if attr: headers.append(string.join(attr, "; ")) return string.join(headers, ", ")
def __init__(self, entityTypeModule): self.entTypeModule = entityTypeModule hv = HashVal() import EntityTypes reload(EntityTypes) reload(self.entTypeModule) def getPyExtVersion(filename): base, ext = os.path.splitext(filename) if ext == '.pyc' or ext == '.pyo': filename = base + '.py' return filename fileLines = file(getPyExtVersion(EntityTypes.__file__)).readlines() hv.hashString(string.join(fileLines, '')) s = str(hv.asHex()) s += '.' fileLines = file(getPyExtVersion(self.entTypeModule.__file__)).readlines() hv.hashString(string.join(fileLines, '')) s += str(hv.asHex()) self.hashStr = s getPyExtVersion = None classes = [] for key, value in entityTypeModule.__dict__.items(): if type(value) is types.ClassType: if issubclass(value, EntityTypeDesc.EntityTypeDesc): classes.append(value) self.entTypeName2typeDesc = {} mostDerivedLast(classes) for c in classes: if c.__dict__.has_key('type'): if self.entTypeName2typeDesc.has_key(c.type): EntityTypeRegistry.notify.debug("replacing %s with %s for entity type '%s'" % (self.entTypeName2typeDesc[c.type].__class__, c, c.type)) self.entTypeName2typeDesc[c.type] = c() self.output2typeNames = {} for typename, typeDesc in self.entTypeName2typeDesc.items(): if typeDesc.isConcrete(): if hasattr(typeDesc, 'output'): outputType = typeDesc.output self.output2typeNames.setdefault(outputType, []) self.output2typeNames[outputType].append(typename) self.permanentTypeNames = [] for typename, typeDesc in self.entTypeName2typeDesc.items(): if typeDesc.isPermanent(): self.permanentTypeNames.append(typename) self.typeName2derivedTypeNames = {} for typename, typeDesc in self.entTypeName2typeDesc.items(): typenames = [] for tn, td in self.entTypeName2typeDesc.items(): if td.isConcrete(): if issubclass(td.__class__, typeDesc.__class__): typenames.append(tn) self.typeName2derivedTypeNames[typename] = typenames return
def makePWCSV(pathwayconnections): curStr = '"Pathway1 KEGG","Pathway2 KEGG","Connecting reactions","Connection metabolites"\n' for pathway in sorted(pathwayconnections.keys()): cPathways = pathwayconnections[pathway] #print pathway for cPathway in cPathways: if cPathway[:5] != "path:": ctPathway = "path:" + cPathway else: ctPathway = cPathway cReactionsStr = "" cMeabolitesStr = "" if pathway[:5] != "path:": tPathway = "path:" + pathway else: tPathway = pathway if ((tPathway in pathwayReact.keys()) and (ctPathway in pathwayReact.keys())): cReactions = [reaction[3:] for reaction in pathwayReact[tPathway] if reaction in pathwayReact[ctPathway]] cReactionsStr = string.join(cReactions,"|") cMetabolites = [] for reaction in cReactions: tReaction = "rn:" + reaction if tReaction in reactionsD.keys(): for metabolite in reactionsD[tReaction]["METABOLITES"]: if metabolite not in cMetabolites: cMetabolites.append(metabolite) cMetabolitesStr = string.join(cMetabolites,"|") #[metabolite for metabolite in rDictionary[reaction]["METABOLITES"] if metaolite in cReactions] curStr+= '"%s","%s","%s","%s"\n' % (pathway[5:], cPathway,cReactionsStr,cMetabolitesStr) return curStr
def parse_quoted_string(self, unfold=False): """Parses a quoted-string. Parses a single instance of the production quoted-string. The return value is the entire matching string (including the quotes and any quoted pairs) or None if no quoted-string was found. If *unfold* is True then any folding LWS is replaced with a single SP. It defaults to False""" if not self.parse(DQUOTE): return None qs = [DQUOTE] while self.the_char is not None: if self.parse(DQUOTE): qs.append(DQUOTE) break elif self.match("\\"): qp = self.parse_quoted_pair() if qp: qs.append(qp) else: raise ValueError( "Expected quoted pair: %s..." % self.peek(5)) else: qdtext = self.parse_qdtext(unfold) if qdtext: qs.append(qdtext) else: raise BadSyntax("Expected closing <\">: %s%s..." % (string.join(qs, ''), self.peek(5))) return string.join(qs, '')
def create_columns(self, table_id): """ Create the columns for the table in question. We call fix_datatype to clean up anything PGSQL or MYSQL specific. The function is to be overidden by the database-specific subclass in question. """ col_list = self.tables[table_id].get_columns() table_name = self.tables[table_id].name num_cols = len(col_list) if verbose: print "Found %s columns" % num_cols for index in range(num_cols): colname = col_list[index] coltype = self.tables[table_id].get_col_type(colname) coltype = self.fix_datatype(coltype) colvalue = self.tables[table_id].get_col_value(colname) colcomm = self.tables[table_id].get_col_comm(colname) if colcomm: colcomm.strip() self.sql_create = string.join((self.sql_create, "\n\t/* ", \ colcomm, " */"), "") col_statement = self.fix_value(table_name, colname, coltype, colvalue) if index < num_cols-1: self.sql_create = string.join((self.sql_create, "\n\t", \ string.upper(col_statement)+","), "") else: self.sql_create = string.join((self.sql_create, "\n\t", \ string.upper(col_statement)), "")
def getBulletin(self, includeError=False, useFinalLineSeparator=True): """getBulletin([includeError]) -> bulletin bulletin : String includeError: Bool - If True, include error in bulletin body. useFinalLineSeparator: Bool - If True, use finalLineSeparator returns the bulletin text. """ if useFinalLineSeparator: marqueur = self.finalLineSeparator else: marqueur = self.lineSeparator if self.errorBulletin == None: return string.join(self.bulletin, marqueur) else: if includeError: return ("### " + self.errorBulletin[0] + marqueur + "PROBLEM BULLETIN" + marqueur) + string.join( self.bulletin, marqueur ) else: return string.join(self.bulletin, marqueur)
def UpdateIcon(self, force=False, info=None): try: isOnline = self.connection.IsOnline(force) except UpdateStatusException: return if isOnline: status = u"在线" icon = 'online' else: status = u"离线" icon = 'offline' if info == None: tooltip = string.join(("pNJU", status), " - ") else: tooltip = string.join(("pNJU", status, info), " - ") self.SetIcon(self.MakeIcon(icon), tooltip) if force and isOnline: newVersion = self.connection.CheckNewVersion() if newVersion is not None: confirm = wx.MessageBox( u"发现新版本:{0}。是否立即查看更新信息?".format(newVersion), u"pNJU 发现新版本", wx.YES_NO | wx.YES_DEFAULT ) if confirm == wx.YES: import webbrowser webbrowser.open(config.WEBSITE)
def EdgesBetweenSequences( self ): """Write edges between sequences, if they share a common domain. """ if self.mLogLevel >= 1: print "# instance of <" + str(self.__class__) + "> on " + time.asctime(time.localtime(time.time())) print "# source: %s" % (self.mTableNameDomains) sys.stdout.flush() class_ids = self.mTableFamilies.GetAllClasses() class_name = self.mTableFamilies.GetFieldNameClass() for class_id in class_ids: statement = """ SELECT a.nid, COUNT(*) AS counts FROM %s AS a WHERE %s = %s GROUP BY a.nid """ % (self.mTableNameDomains, class_name, class_id) result = self.dbhandle.Execute(statement).fetchall() for x in range(0, len(result)-1): nid1, counts1 = result[x] for y in range(x+1, len(result)): nid2, counts2 = result[y] print string.join(map(str, (nid1, nid2, counts1 * counts2, class_id)), "\t")
def main(): t1 = clock() filename = "HashInt.txt" inputfile = open(filename, 'r') numberDict = dict() for line in inputfile: number = eval(line) if not (number in numberDict): numberDict[number] = None inputfile.close() resultList = [None] * 9 testfile = open("test.txt", 'r') index = 0 for line in testfile: dest = eval(line) for number in numberDict.keys(): if (dest - number) in numberDict: resultList[index] = '1' break if resultList[index] == None: resultList[index] = '0' index = index + 1 print join(resultList, "") testfile.close() print clock() - t1
def iter_some(self, depth, available, extra): self.writeout("recur: [%s] %s %d/%d\n" % ( string.join(self.selected[:self.selected_committed], ","), string.join(self.selected[self.selected_committed:], ","), len(available), len(extra))) if depth == 0: extra.extend(available) return self.iter_end(extra) nuninst = None while len(available) > depth: x = available.pop(0) self.doop_source(x) self.selected.append(x) res = self.iter_some(depth - 1, available[:], extra[:]) if res[0]: nuninst = res[0] available = filter(lambda x, y=res[1]: x in y, available + extra) # reset nuninst_orig too self.nuninst_orig = nuninst extra = [] continue self.srcsn.undo_change() self.selected.pop() extra.append(x) return (nuninst, extra)
def NumAssignments( self ): """Write number of assignments """ if self.mLogLevel >= 1: print "# instance of <" + str(self.__class__) + "> on " + time.asctime(time.localtime(time.time())) print "# source: %s" % (self.mTableNameDomains) print "# hubs between domains" print string.join( ("nid", "nassignments", "nclasses"), "\t") sys.stdout.flush() result = self.mTableDomains.GetNumAssignments() for r in result: print string.join(map(str,r), "\t") data = map(lambda x:x[1], result) h = Histogram.Calculate(data) print "# histogram of number of domains per sequence" Histogram.Print(h) data = map(lambda x:x[2], result) h = Histogram.Calculate(data) print "# histogram of number of different domains per sequence" Histogram.Print(h)
def extract_key( self ): text = string.strip( self.text ) key = string.join( string.split( text ) ) words = string.split( key ) key = string.join( words[ :2 ] ) self.text = '' try: self.open_tag = self.open_tag_stack.pop() except: self.open_tag = 'open_html' if( self.open_tag == 'open_table_data' ): if( self.context == 'general_info' ): if( self.key_waiting == '' ): self.key_waiting = key self.text = '' elif( self.context == 'seq_info' ): if( text == 'Key to Symbols' ): self.context = 'legend' self.master_key = key elif( self.context == 'general_info' ): self.master_key = key if( string.find( key, 'SEQUENCE' ) != -1 ): self.context = 'seq_info' self.queue[ key ] = UserDict.UserDict() elif( self.context == 'seq_info' ): self.queue[ key ] = UserDict.UserDict() self.master_key = key
def output(self,seq,outfile,occdata=[]): ### Output to file '''Output to file.''' try:### ~ [1] ~ Basic Data ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ### if self.opt['OccOnly'] and not occdata: return odata = ['Name\t%s' % (seq.shortName()), 'Sequence\t%s' % (seq.getSequence(gaps=False)), 'Output\t%s' % (string.join(string.split(outfile,'.')[:-1],'.')), 'RE\t%s' % (string.join(self.list['PlotRE'],',')), 'TrueELMs\tY', 'Description\t%s' % (seq.info['Description']), '',] ### ~ [2] ~ PlotStats ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ### for plot in rje.sortKeys(seq.dict['PlotStat']): odata.append('Plot\t%s\t%s' % (plot,string.join(seq.dict['PlotStat'][plot],', '))) if seq.dict['PlotStat']: odata.append('') ### ~ [3] ~ PlotFT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ### if seq.obj['Entry']: for ft in seq.obj['Entry'].list['Feature']: if ft['Type'] in self.list['PlotFT']: odata.append('Region\t%s %s\t%s:%s' % (ft['Type'],ft['Desc'],ft['Start'],ft['End'])) odata.append('') ### ~ [4] ~ MotifOcc ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ### if occdata: for occ in occdata: odata.append('Motif\t%s\t%s:%s' % (occ['Pattern'],occ['Start_Pos'],occ['End_Pos'])) ### ~ [5] ~ Output ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ### open(outfile,'w').write(string.join(odata,'\n')) self.printLog('#PLOT','SeqPlot output saved as %s' % (outfile)) except: self.errorLog(rje_zen.Zen().wisdom())
def PrintRecords(labels, s4, fmtHead, fmtTail="", printHex=True, printNorm=True): fmt = fmtHead szHead = struct.calcsize(fmtHead) szTail = struct.calcsize(fmtTail) printableHead = string.join([x for x in fmtHead if fmtmap.has_key(x)],"") printableTail = string.join([x for x in fmtTail if fmtmap.has_key(x)],"") if fmtTail != "": gap = len(s4[0]) - (struct.calcsize(fmtHead) + struct.calcsize(fmtTail)) fmt = fmtHead + ("x"*gap) + fmtTail labels = ["LINE"] + labels[:len(printableHead)] + labels[len(labels)-len(printableTail):] PrintMultiLineLabels(labels,6) sys.stdout.write(6*" ") PrintByteLabels(fmt, len(s4)) for i in range(0, len(s4)): if printNorm: sys.stdout.write("%5i:%s\n" % (i, StructToString(s4[i], fmt, 6))) if printHex: sys.stdout.write("\33[0m") sys.stdout.write(" %s\n" % (StructToString(s4[i], fmt, 6, color=False, hexonly=True))) if not ((i+1) % 40) or (i == len(s4) - 1): PrintMultiLineLabels(labels,6) sys.stdout.write(6*" ") PrintByteLabels(fmt, len(s4)) #HexPrintMod(s4[i][:szHead].tostring() + s4[i][len(s4[i]) - szTail:].tostring(), szHead + szTail) PrintByteStats(s4, fmt)
def readKit(self, parent): """ Reads the data from a xml file (auxiliar recursive function). """ for node in parent.childNodes: if node.nodeType == Node.ELEMENT_NODE: attrs = node.attributes for attrName in attrs.keys(): attrNode = attrs.get(attrName) attrValue = attrNode.nodeValue self._Kit[attrName] = attrValue content = [] for child in node.childNodes: if child.nodeType == Node.TEXT_NODE: content.append(child.nodeValue) if content and (node.nodeName != "control" and node.nodeName != "kitPackage"): strContent = string.join(content) tmpContent = strContent.replace("\n ", "") self._Kit[node.nodeName] = tmpContent.replace("\n ", "") if content and node.nodeName == "kitPackage": strContent = string.join(content) tmpContent = strContent.replace("\n ", "") self._kitPackages.append(tmpContent.replace("\n ", "")) self.readKit(node)
def gettags(comment): tags = [] tag = None tag_lineno = lineno = 0 tag_text = [] for line in comment: if line[:1] == "@": tags.append((tag_lineno, tag, string.join(tag_text, "\n"))) line = string.split(line, " ", 1) tag = line[0][1:] if len(line) > 1: tag_text = [line[1]] else: tag_text = [] tag_lineno = lineno else: tag_text.append(line) lineno = lineno + 1 tags.append((tag_lineno, tag, string.join(tag_text, "\n"))) return tags
def Table(self, t, has_head=1, headcolor="#ffc0c0", bgcolor="#ffffff"): if has_head: head = [] for th in t.pop(0): head.append("<th><font size=\"-1\" face=\"arial,sans-serif\">" "%s</font></th>" % th) # endfor table = ["<tr bgcolor=\"%s\">\n%s</th>\n" % ( headcolor, string.join(head, "\n"))] else: table = [] # endif for tr in t: row = [] for td in tr: row.append("<td><font size=\"-1\" face=\"arial,sans-serif\">" "%s</font></td>" % td) # endfor table.append("<tr bgcolor=\"%s\">\n%s</th>\n" % ( bgcolor, string.join(row, "\n"))) # endfor return "<p><table cellpadding=5 width=100%%>\n%s\n</table></p>\n" % ( string.join(table, "\n"))
def build_post_policy(self, expiration_time, conditions): if type(expiration_time) != time.struct_time: raise 'Policy document must include a valid expiration Time object' if type(conditions) != types.DictionaryType: raise 'Policy document must include a valid conditions Hash object' # Convert conditions object mappings to condition statements conds = [] for name in conditions: test = conditions[name] if not test: # A nil condition value means allow anything. conds.append('["starts-with", "$%s", ""]' % name) elif type(test) == types.StringType: conds.append('{"%s": "%s"}' % (name, test)) elif type(test) == types.ListType: conds.append('{"%s": "%s"}' % (name, string.join(test, ','))) elif type(test) == types.DictionaryType: operation = test['op'] value = test['value'] conds.append('["%s", "$%s", "%s"]' % (operation, name, value)) elif type(test) == types.SliceType: conds.append('["%s", %i, %i]' % (name, test.start, test.stop)) else: raise 'Unexpected value type for condition "%s": %s' % \ (name, type(test)) return '{"expiration": "%s",\n"conditions": [%s]}' % \ (time.strftime(self.ISO8601, expiration), string.join(conds, ','))
def toXML(self, writer, ttFont): if hasattr (ttFont, "disassembleInstructions") and ttFont.disassembleInstructions: assembly = self.getAssembly() writer.begintag("assembly") writer.newline() i = 0 nInstr = len(assembly) while i < nInstr: instr = assembly[i] writer.write(instr) writer.newline() m = _pushCountPat.match(instr) i = i + 1 if m: nValues = int(m.group(1)) line = [] j = 0 for j in range(nValues): if j and not (j % 25): writer.write(string.join(line, " ")) writer.newline() line = [] line.append(assembly[i+j]) writer.write(string.join(line, " ")) writer.newline() i = i + j + 1 writer.endtag("assembly") else: writer.begintag("bytecode") writer.newline() writer.dumphex(self.getBytecode()) writer.endtag("bytecode")
def compact_traceback (): t,v,tb = sys.exc_info() tbinfo = [] if tb is None: # this should never happen, but then again, lots of things # should never happen but do. return (('','',''), str(t), str(v), 'traceback is None!!!') while 1: tbinfo.append ( tb.tb_frame.f_code.co_filename, tb.tb_frame.f_code.co_name, str(tb.tb_lineno) ) tb = tb.tb_next if not tb: break # just to be safe del tb file, function, line = tbinfo[-1] info = '[' + string.join ( map ( lambda x: string.join (x, '|'), tbinfo ), '] [' ) + ']' return (file, function, line), str(t), str(v), info
def MoveFolder(self, From, To): OPList = SymbolTools.SplitPath(From); OName = OPList[-1]; SPList = OPList[:-1]; Src = "/"+string.join(SPList,"/"); NPList = SymbolTools.SplitPath(To); NName = NPList[-1]; DPList = NPList[:-1]; Dest = "/"+string.join(DPList,"/"); #check if source exists: if not self.CheckFolder(From): raise Exceptions.ItemNotFound("Can't move folder %s to %s: source doesn't exists!"%(From,To)); #check if destination is a symbol: if self.CheckSymbol(To): raise Exceptions.SymbolError("Can't move folder %s to %s: destination allready exists!"%(From,To)); #remove folder from source: folder_obj = self.__GetElementByPath(OPList); src_obj = self.__GetElementByPath(SPList); src_obj.RemoveFolder(OName); #check if dest-folder isn't in src-folder: if not self.CheckFolder(Dest): src_obj.AddFolder(OName, folder_obj); raise Exceptions.SymbolError("Can't move folder %s to %s: destination is part of the source!"%(From,To)); #move folder dest_obj = self.__GetElementByPath(DPList); folder_obj.Rename(NName); dest_obj.AddFolder(NName, folder_obj);
def generate(tmpl, name, table): w_arg_names = [a[1] for a in table[name] if a[0] == 'fop-arg'] w_arg_types = [a[2] for a in table[name] if a[0] == 'fop-arg'] u_arg_names = [a[1] for a in table[name] if a[0] == 'cbk-arg'] u_arg_types = [a[2] for a in table[name] if a[0] == 'cbk-arg'] fn_arg_names = [a[1] for a in table[name] if a[0] == 'fn-arg'] fn_arg_types = [a[2] for a in table[name] if a[0] == 'fn-arg'] ret_type = [a[1] for a in table[name] if a[0] == 'ret-val'] ret_var = [a[2] for a in table[name] if a[0] == 'ret-val'] sdict = {} #Parameters are (t1, var1), (t2, var2)... #Args are (var1, var2,...) sdict["@WIND_ARGS@"] = string.join(w_arg_names, ", ") sdict["@UNWIND_ARGS@"] = string.join(u_arg_names, ", ") sdict["@ERROR_ARGS@"] = string.join(map(get_error_arg, u_arg_types), ", ") sdict["@WIND_PARAMS@"] = get_param(w_arg_names, w_arg_types) sdict["@UNWIND_PARAMS@"] = get_param(u_arg_names, u_arg_types) sdict["@FUNC_PARAMS@"] = get_param(fn_arg_names, fn_arg_types) sdict["@NAME@"] = name sdict["@FOP_PREFIX@"] = fop_prefix sdict["@RET_TYPE@"] = string.join(ret_type, "") sdict["@RET_VAR@"] = string.join(ret_var, "") for old, new in sdict.iteritems(): tmpl = tmpl.replace(old, new) # TBD: reindent/reformat the result for maximum readability. return tmpl
def replace(file, searchkey, replacekey): regex = re.compile(searchkey) str_file = str(file) str_rep = regex.sub(replacekey, string.join(str_file, '')) return posixpath(str_rep)
def build_reply_header(self): return string.join([self.response(self.reply_code)] + map( lambda x: '%s: %s' % x, self.reply_headers.items()), '\r\n') + '\r\n\r\n'
def nice_bytes(n): return string.join(status_handler.english_bytes(n))
class http_request: # default reply code reply_code = 200 request_counter = counter() # Whether to automatically use chunked encoding when # # HTTP version is 1.1 # Content-Length is not set # Chunked encoding is not already in effect # # If your clients are having trouble, you might want to disable this. use_chunked = 1 # by default, this request object ignores user data. collector = None def __init__(self, *args): # unpack information about the request (self.channel, self.request, self.command, self.uri, self.version, self.header) = args self.outgoing = [] self.reply_headers = { 'Server': 'Medusa/%s' % VERSION_STRING, 'Date': http_date.build_http_date(time.time()) } self.request_number = http_request.request_counter.increment() self._split_uri = None self._header_cache = {} # -------------------------------------------------- # reply header management # -------------------------------------------------- def __setitem__(self, key, value): self.reply_headers[key] = value def __getitem__(self, key): return self.reply_headers[key] def has_key(self, key): return self.reply_headers.has_key(key) def build_reply_header(self): return string.join([self.response(self.reply_code)] + map( lambda x: '%s: %s' % x, self.reply_headers.items()), '\r\n') + '\r\n\r\n' # -------------------------------------------------- # split a uri # -------------------------------------------------- # <path>;<params>?<query>#<fragment> path_regex = re.compile( # path params query fragment r'([^;?#]*)(;[^?#]*)?(\?[^#]*)?(#.*)?') def split_uri(self): if self._split_uri is None: m = self.path_regex.match(self.uri) if m.end() != len(self.uri): raise ValueError, "Broken URI" else: self._split_uri = m.groups() return self._split_uri def get_header_with_regex(self, head_reg, group): for line in self.header: m = head_reg.match(line) if m.end() == len(line): return m.group(group) return '' def get_header(self, header): header = string.lower(header) hc = self._header_cache if not hc.has_key(header): h = header + ': ' hl = len(h) for line in self.header: if string.lower(line[:hl]) == h: r = line[hl:] hc[header] = r return r hc[header] = None return None else: return hc[header] # -------------------------------------------------- # user data # -------------------------------------------------- def collect_incoming_data(self, data): if self.collector: self.collector.collect_incoming_data(data) else: self.log_info( 'Dropping %d bytes of incoming request data' % len(data), 'warning') def found_terminator(self): if self.collector: self.collector.found_terminator() else: self.log_info('Unexpected end-of-record for incoming request', 'warning') def push(self, thing): if type(thing) == type(''): self.outgoing.append(producers.simple_producer(thing)) else: self.outgoing.append(thing) def response(self, code=200): message = self.responses[code] self.reply_code = code return 'HTTP/%s %d %s' % (self.version, code, message) def error(self, code): self.reply_code = code message = self.responses[code] s = self.DEFAULT_ERROR_MESSAGE % { 'code': code, 'message': message, } self['Content-Length'] = len(s) self['Content-Type'] = 'text/html' # make an error reply self.push(s) self.done() # can also be used for empty replies reply_now = error def done(self): "finalize this transaction - send output to the http channel" # ---------------------------------------- # persistent connection management # ---------------------------------------- # --- BUCKLE UP! ---- connection = string.lower(get_header(CONNECTION, self.header)) close_it = 0 wrap_in_chunking = 0 if self.version == '1.0': if connection == 'keep-alive': if not self.has_key('Content-Length'): close_it = 1 else: self['Connection'] = 'Keep-Alive' else: close_it = 1 elif self.version == '1.1': if connection == 'close': close_it = 1 elif not self.has_key('Content-Length'): if self.has_key('Transfer-Encoding'): if not self['Transfer-Encoding'] == 'chunked': close_it = 1 elif self.use_chunked: self['Transfer-Encoding'] = 'chunked' wrap_in_chunking = 1 else: close_it = 1 elif self.version is None: # Although we don't *really* support http/0.9 (because we'd have to # use \r\n as a terminator, and it would just yuck up a lot of stuff) # it's very common for developers to not want to type a version number # when using telnet to debug a server. close_it = 1 outgoing_header = producers.simple_producer(self.build_reply_header()) if close_it: self['Connection'] = 'close' if wrap_in_chunking: outgoing_producer = producers.chunked_producer( producers.composite_producer(self.outgoing)) # prepend the header outgoing_producer = producers.composite_producer( [outgoing_header, outgoing_producer]) else: # prepend the header self.outgoing.insert(0, outgoing_header) outgoing_producer = producers.composite_producer(self.outgoing) # apply a few final transformations to the output self.channel.push_with_producer( # globbing gives us large packets producers.globbing_producer( # hooking lets us log the number of bytes sent producers.hooked_producer(outgoing_producer, self.log))) self.channel.current_request = None if close_it: self.channel.close_when_done() def log_date_string(self, when): gmt = time.gmtime(when) if time.daylight and gmt[8]: tz = time.altzone else: tz = time.timezone if tz > 0: neg = 1 else: neg = 0 tz = -tz h, rem = divmod(tz, 3600) m, rem = divmod(rem, 60) if neg: offset = '-%02d%02d' % (h, m) else: offset = '+%02d%02d' % (h, m) return time.strftime('%d/%b/%Y:%H:%M:%S ', gmt) + offset def log(self, bytes): self.channel.server.logger.log( self.channel.addr[0], '%d - - [%s] "%s" %d %d\n' % (self.channel.addr[1], self.log_date_string( time.time()), self.request, self.reply_code, bytes)) responses = { 100: "Continue", 101: "Switching Protocols", 200: "OK", 201: "Created", 202: "Accepted", 203: "Non-Authoritative Information", 204: "No Content", 205: "Reset Content", 206: "Partial Content", 300: "Multiple Choices", 301: "Moved Permanently", 302: "Moved Temporarily", 303: "See Other", 304: "Not Modified", 305: "Use Proxy", 400: "Bad Request", 401: "Unauthorized", 402: "Payment Required", 403: "Forbidden", 404: "Not Found", 405: "Method Not Allowed", 406: "Not Acceptable", 407: "Proxy Authentication Required", 408: "Request Time-out", 409: "Conflict", 410: "Gone", 411: "Length Required", 412: "Precondition Failed", 413: "Request Entity Too Large", 414: "Request-URI Too Large", 415: "Unsupported Media Type", 500: "Internal Server Error", 501: "Not Implemented", 502: "Bad Gateway", 503: "Service Unavailable", 504: "Gateway Time-out", 505: "HTTP Version not supported" } # Default error message DEFAULT_ERROR_MESSAGE = string.join([ '<head>', '<title>Error response</title>', '</head>', '<body>', '<h1>Error response</h1>', '<p>Error code %(code)d.', '<p>Message: %(message)s.', '</body>', '' ], '\r\n')
def getCode(self): "pack onto one line; used internally" self._code.append('ET') return string.join(self._code, ' ')
def getCode(self): "pack onto one line; used internally" return string.join(self._code, ' ')
def setPageTransition(self, effectname=None, duration=1, direction=0, dimension='H', motion='I'): """PDF allows page transition effects for use when giving presentations. There are six possible effects. You can just guive the effect name, or supply more advanced options to refine the way it works. There are three types of extra argument permitted, and here are the allowed values: direction_arg = [0,90,180,270] dimension_arg = ['H', 'V'] motion_arg = ['I','O'] (start at inside or outside) This table says which ones take which arguments: PageTransitionEffects = { 'Split': [direction_arg, motion_arg], 'Blinds': [dimension_arg], 'Box': [motion_arg], 'Wipe' : [direction_arg], 'Dissolve' : [], 'Glitter':[direction_arg] } Have fun! """ if not effectname: self._pageTransitionString = '' return #first check each optional argument has an allowed value if direction in [0, 90, 180, 270]: direction_arg = '/Di /%d' % direction else: raise 'PDFError', ' directions allowed are 0,90,180,270' if dimension in ['H', 'V']: dimension_arg = '/Dm /%s' % dimension else: raise 'PDFError', 'dimension values allowed are H and V' if motion in ['I', 'O']: motion_arg = '/M /%s' % motion else: raise 'PDFError', 'motion values allowed are I and O' # this says which effects require which argument types from above PageTransitionEffects = { 'Split': [direction_arg, motion_arg], 'Blinds': [dimension_arg], 'Box': [motion_arg], 'Wipe': [direction_arg], 'Dissolve': [], 'Glitter': [direction_arg] } try: args = PageTransitionEffects[effectname] except KeyError: raise 'PDFError', 'Unknown Effect Name "%s"' % effectname self._pageTransitionString = '' return self._pageTransitionString = (('/Trans <</D %d /S /%s ' % (duration, effectname)) + string.join(args, ' ') + ' >>')
def explain(self): if not self.exists(): return "building `%s' because it doesn't exist\n" % self if self.always_build: return "rebuilding `%s' because AlwaysBuild() is specified\n" % self old = self.get_stored_info() if old is None: return None old = old.binfo old.prepare_dependencies() try: old_bkids = old.bsources + old.bdepends + old.bimplicit old_bkidsigs = old.bsourcesigs + old.bdependsigs + old.bimplicitsigs except AttributeError: return "Cannot explain why `%s' is being rebuilt: No previous build information found\n" % self new = self.get_binfo() new_bkids = new.bsources + new.bdepends + new.bimplicit new_bkidsigs = new.bsourcesigs + new.bdependsigs + new.bimplicitsigs osig = dict(izip(old_bkids, old_bkidsigs)) nsig = dict(izip(new_bkids, new_bkidsigs)) # The sources and dependencies we'll want to report are all stored # as relative paths to this target's directory, but we want to # report them relative to the top-level SConstruct directory, # so we only print them after running them through this lambda # to turn them into the right relative Node and then return # its string. def stringify( s, E=self.dir.Entry ) : if hasattr( s, 'dir' ) : return str(E(s)) return str(s) lines = [] removed = filter(lambda x, nk=new_bkids: not x in nk, old_bkids) if removed: removed = map(stringify, removed) fmt = "`%s' is no longer a dependency\n" lines.extend(map(lambda s, fmt=fmt: fmt % s, removed)) for k in new_bkids: if not k in old_bkids: lines.append("`%s' is a new dependency\n" % stringify(k)) elif k.changed_since_last_build(self, osig[k]): lines.append("`%s' changed\n" % stringify(k)) if len(lines) == 0 and old_bkids != new_bkids: lines.append("the dependency order changed:\n" + "%sold: %s\n" % (' '*15, map(stringify, old_bkids)) + "%snew: %s\n" % (' '*15, map(stringify, new_bkids))) if len(lines) == 0: def fmt_with_title(title, strlines): lines = string.split(strlines, '\n') sep = '\n' + ' '*(15 + len(title)) return ' '*15 + title + string.join(lines, sep) + '\n' if old.bactsig != new.bactsig: if old.bact == new.bact: lines.append("the contents of the build action changed\n" + fmt_with_title('action: ', new.bact)) else: lines.append("the build action changed:\n" + fmt_with_title('old: ', old.bact) + fmt_with_title('new: ', new.bact)) if len(lines) == 0: return "rebuilding `%s' for unknown reasons\n" % self preamble = "rebuilding `%s' because" % self if len(lines) == 1: return "%s %s" % (preamble, lines[0]) else: lines = ["%s:\n" % preamble] + lines return string.join(lines, ' '*11)
def gen_handler_imp(addon, md, handler): __check_kv = lambda key, val, handler: (key in handler) and (val == handler[key]) __request_body_action = lambda action: lambda handler: __check_kv( 'action_for_request_body', action, handler) __read_request_body = __request_body_action('read') __discard_request_body = __request_body_action('discard') __upstream_has_key = lambda key: lambda handler: use_upstream( handler) and (key in handler['upstream'] and handler['upstream'][key]) __use_parallel = __upstream_has_key('parallel_subrequests') __use_sequential = __upstream_has_key('sequential_subrequests') __gen_args_decoder = lambda md, handler: tpls['decode_args'].substitute({ 'var_fields': merge([ ' %s %s;' % (type_dict[arg[TYPE]], arg[NAME]) for arg in handler['args'] ]), 'var_flags': merge( [' ngx_bool_t has_%s;' % arg[NAME] for arg in handler['args']]), 'var_arg_t': get_args(md, handler), 'var_decoder': get_decoder(md, handler), 'var_reset': merge([ ' args->%s = %s;' % (arg[NAME], arg[VAL]) if len(arg) > VAL else FILTER for arg in handler['args'] ]), 'var_impl': merge([ tpls['decode_arg'].substitute({ 'var_key': arg[NAME].upper(), 'var_field': arg[NAME], 'var_val': val_dict[arg[TYPE]] }) for arg in handler['args'] ]), }) if 'args' in handler else FILTER __gen_ctx = lambda md, handler: tpls['parallel_ctx'].substitute( {'var_ctx_t': get_ctx(md, handler)}) if __use_parallel( handler) else substitute_tpls( tpls['upstream_ctx'], { 'var_peer': ' %s;' % consts['peer_def'] if __use_sequential(handler) else FILTER, 'var_ctx_t': get_ctx(md, handler) }) if use_upstream(handler) else FILTER __gen_check_parameter = lambda: tpls['check'].substitute( {'var_args': consts['request_args']}) __gen_post_subrequest_handler = lambda md, handler: FILTER if ( __use_parallel(handler) or not use_upstream(handler)) else tpls[ 'post_subrequest'].substitute({'var_ctx_t': get_ctx(md, handler)}) __gen_sr_peer = lambda handler: 'ctx->peer' if __use_sequential( handler) else 'peer' __gen_sr = lambda prefix, backend_uri, handler: merge([ merge([ ' // TODO: initialize the peer here', ' %s = NULL;' % consts['peer_def'] ]) if not __use_sequential(handler) else FILTER, ' %sngx_http_gen_subrequest(%s, r, %s,' % (prefix, backend_uri, __gen_sr_peer(handler) ), ' &ctx->base, __post_subrequest_handler);' ]) __gen_post_body_impl = lambda md, handler: tpls[ 'parallel_post_body' ].substitute({ 'var_parallel_call': gen_parallel_call(__use_parallel, handler, ' // TODO') }) if __use_parallel(handler) else tpls['sequential_post_body'].substitute( { 'var_ctx_t': get_ctx(md, handler), 'var_sr': __gen_sr('', 'ctx->base.backend_uri', handler) }) if use_upstream( handler) else ' ngx_http_post_body_handler(r, __post_body_cb);' __gen_post_body_handler = lambda md, handler: merge([ FILTER if use_upstream(handler) else tpls['post_body_cb'].template, tpls['post_body_handler'].substitute( {'var_impl': __gen_post_body_impl(md, handler)}) ]) if __read_request_body(handler) else FILTER __gen_methods_filter = lambda handler: tpls['http_not_allowed'].substitute( { 'var_cond': string.join([ '!(r->method & NGX_HTTP_%s)' % method.upper() for method in handler['methods'] ], ' && ') }) if 'methods' in handler and len(handler['methods']) > 0 else FILTER __gen_discard_body = lambda handler: tpls[ 'discard_body'].template if __discard_request_body(handler) else FILTER __gen_init_peer = lambda handler: tpls[ 'init_peer'].template if __use_sequential(handler) else FILTER __gen_init_ctx_base = lambda handler: ' ctx->base.backend_uri = backend_uri;' if __read_request_body( handler) else FILTER __gen_init_ctx = lambda handler: ' ctx->peer = ngx_http_first_peer(peers->peer);\n' if __use_sequential( handler) else FILTER __gen_read_body = lambda handler: tpls['read_body'].substitute({ 'var_rc': ('rc' if __discard_request_body(handler) else 'ngx_int_t rc') }) __gen_first_handler = lambda md, handler: FILTER if __use_parallel( handler) else merge([ 'static ngx_int_t __first_%s_handler(%s)' % (get_uri(handler), consts['request_args']), '{', __gen_methods_filter(handler), __gen_discard_body(handler), tpls['call_check'].template, __gen_init_peer(handler), gen_create_ctx(md, handler), __gen_init_ctx_base(handler), __gen_init_ctx(handler), __gen_read_body(handler) if __read_request_body(handler) else __gen_sr( 'return ', 'backend_uri', handler), '}', '' ]) if use_upstream(handler) else FILTER __gen_first_loop = lambda md, handler: tpls['first_loop'].substitute( { 'var_ctx_t': get_ctx(md, handler), 'var_first_handler': '__first_%s_handler' % get_uri(handler) }) __gen_next_loop = lambda handler: tpls['next_loop'].substitute( {'var_sr_peer': __gen_sr_peer(handler)}) if __use_sequential( handler) else FILTER __gen_request_handler = lambda md, handler: merge([ 'ngx_int_t %s_%s_handler(%s)' % (md, get_uri(handler), consts['request_args']), '{', merge([ __gen_methods_filter(handler), __gen_discard_body(handler), tpls['call_check'].template, merge([' %s' % consts['set_ctx'], __gen_read_body(handler)]) if __read_request_body(handler) else gen_parallel_call( __use_parallel, handler, ' return NGX_DONE;'), ]) if __use_parallel(handler) else merge([ __gen_first_loop(md, handler), __gen_next_loop(handler), tpls['final_loop'].template ]) if use_upstream(handler) else merge([ __gen_methods_filter(handler), __gen_discard_body(handler), tpls['call_check'].template, __gen_read_body(handler) if __read_request_body(handler) else tpls[ 'default_handler'].template ]), '}' ]) write_file( '%s/%s_%s_handler.c' % (addon, md, get_uri(handler)), merge([ '#include "%s_handler.h"' % md, '', __gen_args_decoder(md, handler), __gen_ctx(md, handler), __gen_check_parameter(), gen_parallel_imp(__use_parallel, md, handler), __gen_post_subrequest_handler(md, handler), __gen_post_body_handler(md, handler), __gen_first_handler(md, handler), __gen_request_handler(md, handler) ]))
def fmt_with_title(title, strlines): lines = string.split(strlines, '\n') sep = '\n' + ' '*(15 + len(title)) return ' '*15 + title + string.join(lines, sep) + '\n'
def __updatewithQueryStatus(self): """ query device to update information """ try: fwv=self.drobo.GetSubPageFirmware() except: self.statusBar().showMessage( 'bad poll: %d.. need to restart' % self.updates ) return settings=self.drobo.GetSubPageSettings() self.Device.id.setText( self.drobo.GetCharDev() + ' ' + settings[2] + ' firmware: ' + fwv[7] ) self.Device.id.setToolTip( "Firmware build: " + str(fwv[0]) + '.' + str(fwv[1]) + '.' + str(fwv[2]) + "\n Features: " + string.join(fwv[8],"\n") ) self.s=self.drobo.GetSubPageSlotInfo() luninfo=self.drobo.GetSubPageLUNs() luntooltip="luns, count: " + str(len(luninfo)) + "\n" for l in luninfo: luntooltip = luntooltip + "lun id: " + str(l[0]) + " used: " + \ _toGB(l[2]) + " total: " + _toGB(l[1]) if 'SUPPORTS_NEW_LUNINFO2' in self.drobo.features : luntooltip = luntooltip + " scheme: " + str(l[3]) + " type: " + str(l[4]) luntooltip = luntooltip + "\n" i=0 mnw=0 while i < self.drobo.SlotCount() : self.Device.slot[i][0].setText(_setDiskLabel(self.s[i][5],self.s[i][1])) w= self.Device.slot[i][0].width() if w > mnw: mnw=w self.Device.slot[i][0].setToolTip(luntooltip) i=i+1 c=self.drobo.GetSubPageConfig() self.Format.lunsize = _toTiB(c[2]) c=self.drobo.GetSubPageCapacity() if c[2] > 0: self.Device.fullbar.setValue( c[1]*100/c[2] ) self.Device.fullbar.setToolTip( string.join(self.drobo.DiscoverMounts(),',') + "\nused: " + _toGB(c[1]) + ' free: ' + _toGB(c[0]) + ' Total: ' + _toGB(c[2]) + ' GB, update# ' + str(self.updates) ) #print self.statusmsg #self.__StatusBar_space() self.statusBar().showMessage( self.statusmsg ) ss = self.drobo.GetSubPageStatus() self.statusmsg = 'Status: ' + str(ss[0]) + ' update: ' + str(self.updates) if self.Format.inProgress and ( self.fmt_process.poll() != None) : # reset to normal state... print('it took: %d updates to run' % (self.updates - self.Format.startupdate )) self.Format.inProgress=0 normal = self.Tools.Updatebutton.palette().color( QtGui.QPalette.Button ) self.Format.Formatbutton.palette().setColor( QtGui.QPalette.Button, QtCore.Qt.blue ) self.Format.ext3.setChecked(0) self.Format.ntfs.setChecked(0) self.Format.msdos.setChecked(0) self.Format.Formatbutton.setText('Format Done!') self.Format.connect(self.Format.Formatbutton, \ QtCore.SIGNAL('clicked()'), self.FormatLUN)
def escape_entities(m): out = [] for char in m.group(): out.append("&#%d;" % ord(char)) return string.join(out, "")
def mapQuotaEntries(old): new = [] for i in old: new.append(string.join(i, ' ')) return new
#!/usr/bin/python # email: [email protected] import sys import platform import os.path import re import datetime import string import json TYPE = 0 NAME = 1 VAL = 2 FILTER = '@null_string_place_holder' join_lines = lambda lines, dim: string.join( filter(lambda item: -1 == item.find(FILTER), lines), dim) merge = lambda lines: join_lines(lines, '\n') def manual(): print """ usage: python ngx_wizard.py [conf] sample: python ngx_wizard.py ngx_wizard.json """ def write_file(path, data): with open(path, 'w') as f: f.writelines(data)
def loadnetCDF_data(filename, mult=False, *args, **kwargs): ''' Open one or many netCDF data files and load them into a python numpy array. Input: filename = netCDF filename or a string with wildcards that would include all desired netCDF files if used for an 'ls' command. NetCDF4 files are not supported. Ex: see__L3_*.ncdf mult = True for multiple files, False for single (default is False) Output: out = a dict containing the data in np.arrays, the dict keys are specified by the header data line units = a dict containing the units for each data key desc = a dict containing the descriptions for each data key ''' import netCDF4 as cdf func_name = string.join([module_name, "loadnetCDF_data"], " ") # Open the file f = list() if mult: try: temp = cdf.MFDataset(filename) f.append(temp) except: # Probably has the wrong file format. You'll need to cycle through # these individually now. import subprocess command = "ls %s" % (filename) pipe = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE) names = pipe.stdout.readlines() for name in names: print "Opening:", name[0:-1] f.append(cdf.Dataset(name[0:-1])) else: f.append(cdf.Dataset(filename)) # Load the variables into a standard numpy array and save the description # as attributes out = dict() units = dict() desc = dict() unit_warning = False desc_warning = False for fdat in f: slist = [] for fkeys in fdat.variables.keys(): s = fdat.variables[fkeys].shape if(len(s) > 0): slist.append(s[0]) dim1 = np.max(slist) for fkeys in fdat.variables.keys(): # Ensure the data will be stored as an array if len(fdat.variables[fkeys].shape) < 1: farray = [fdat.variables[fkeys][0] for i in range(dim1)] else: farray = fdat.variables[fkeys][:] if out.has_key(fkeys): out[fkeys] = np.append(out[fkeys], farray) else: out[fkeys] = np.array(farray) try: if units.has_key(fkeys): units[fkeys] = np.append(units[fkeys], fdat.variables[fkeys].Units) else: units[fkeys] = fdat.variables[fkeys].Units except AttributeError: if not unit_warning: print module_name, "ADVISEMENT: no unit attribute" unit_warning = True try: if desc.has_key(fkeys): desc[fkeys] = np.append(desc[fkeys], fdat.variables[fkeys].Description) else: desc[fkeys] = fdat.variables[fkeys].Description except AttributeError: if not desc_warning: print module_name, "ADVISEMENT: no description attribute" desc_warning = True fdat.close() return(out, units, desc)
def __init__(self, reference, resolution="daily", client=None, drive_data_id="1etaxbcHUa8YKiNw0tEkIyMtnC8DShRxQ"): if resolution not in self.RESOLUTIONS: raise ValueError, "{} not in resolution valid values: {}".format(resolution, string.join(self.RESOLUTIONS, ", ")) # authenticate user & start Google Drive client if not client: self.client = get_client() else: self.client = client # initialize object attributes self.ref_symbol = reference self.resolution = resolution self.DATA_ID = drive_data_id self.PICKLE = "gdrive-{}.p".format(self.DATA_ID) # initialize files list from Google Drive self.files = {} for file in self.client.ListFile({"q": "'{}' in parents".format(self.DATA_ID)}).GetList(): filename = file.get("title") category, resolution, symbol = self._parse_filename(filename) if resolution != self.resolution: continue self.files[symbol] = dict(category=category, id=file.get("id")) if self.ref_symbol not in self.files: raise ValueError, "Symbol '{}' not found.".format(self.ref_symbol) # download tables if exist(self.PICKLE): self.all_dataframes = pk.load(open(self.PICKLE, "rb")) else: self.all_dataframes = self._download_tables() pk.dump((self.all_dataframes), open(self.PICKLE, "wb")) # initialize joint dataframe self.joint_dataframe = None
def loadASCII_index_profile(filename, miss=None, fill=np.nan, *args, **kwargs): ''' Open an ascii data file and load it into a python numpy array. Assumes this file is seperated into index blocks, which should be maintained. Input: filename = CINDI data file name miss = string or list denoting missing value options (default=None) fill = fill value (default = NaN) Output: header = a list containing the header strings without the '#' out = a dict containing the data in np.arrays, the dict keys are specified by the header data line nblocks = number of indexed data blocks ''' func_name = string.join([module_name, "loadASCII_data_header"]) #----------------------------------------------------------------------- # Test to ensure the file is small enough to read in. Python can only # allocate 2GB of data. If you load something larger, python will crash fsize = path.getsize(filename) header = list() out = dict() nblocks = 0 if(fsize > 2.0e9): print func_name, "WARNING: File size [", (fsize * 1e-9), "GB > 2 GB]" return(header, out, nblocks) elif(fsize == 0): print func_name, "WARNING: empty file [", filename, "]" return(header, out, nblocks) #---------------------------------------------- # Open the datafile and read the header rows f = open(filename, "r") if not f: print func_name, "ERROR: unable to open input file [", filename, "]" return header, out, nblocks line = f.readline() check = 0 while line.find("#") >= 0: hline = string.strip(line.replace("#", "")) line = f.readline() check += 1 if(len(hline) > 0): header.append(hline) if(check > 0): hline = hline.split() else: print func_name, "ERROR: no header in this file [", filename, "]" return(header, out, nblocks) #------------------------------------------------- # Cycle through the data rows, identifying blocks while len(line) > 0: if (len(line) == 1 and line.find("\n") == 0): # A new block has been found. Only incriment if data has been read if len(out) > 0: nblocks += 1 print "TEST: at block", nblocks, len(line) # Cycle to new dataline while len(line) == 1 and line.find("\n") == 0: line = f.readline() # Load the dataline into the output structure dline = line.split() for num,name in enumerate(hline): if out.has_key(name): if len(out[name]) < nblocks: out[name][nblocks].append(dline[num]) else: out[name].append([dline[num]]) else: out[name] = [[dline[num]]] return(header, out, nblocks)
import sys import os import string file = open("summary.txt") lines = file.readlines() f = open("temp1.txt", "w") for line in lines: #line.split('\t') term = line.split('\t') t = term[0].split('_') if sys.argv[2] in t: term[0] = t[1] + " " + t[2] + " " + t[3] f.write(string.join(term)) f = open("temp1.txt") filename = "result_" + sys.argv[2] + "_" + sys.argv[3] + ".dat" f2 = open(filename, "w+") lines = f.readlines() g_counter = 1 g_newline = "" f2.write("type noop deadline cfq\n") for line in lines: term = line.split(" ") if sys.argv[3] in term: if g_counter == 1: g_newline += term[0] g_newline += " " g_newline += term[4].split("=")[1].split("K")[0] g_counter += 1 continue
def initialize(self): self.__paths = [] if os.environ.has_key("DISTUTILS_USE_SDK") and os.environ.has_key( "MSSdk") and self.find_exe("cl.exe"): # Assume that the SDK set up everything alright; don't try to be # smarter self.cc = "cl.exe" self.linker = "link.exe" self.lib = "lib.exe" self.rc = "rc.exe" self.mc = "mc.exe" else: self.__paths = self.get_msvc_paths("path") if len(self.__paths) == 0: raise DistutilsPlatformError, \ ("Python was built with %s, " "and extensions need to be built with the same " "version of the compiler, but it isn't installed." % self.__product) self.cc = self.find_exe("cl.exe") self.linker = self.find_exe("link.exe") self.lib = self.find_exe("lib.exe") self.rc = self.find_exe("rc.exe") # resource compiler self.mc = self.find_exe("mc.exe") # message compiler self.set_path_env_var('lib') self.set_path_env_var('include') # extend the MSVC path with the current path try: for p in string.split(os.environ['path'], ';'): self.__paths.append(p) except KeyError: pass self.__paths = normalize_and_reduce_paths(self.__paths) os.environ['path'] = string.join(self.__paths, ';') self.preprocess_options = None if self.__arch == "Intel": self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GX', '/DNDEBUG' ] self.compile_options_debug = [ '/nologo', '/Od', '/MDd', '/W3', '/GX', '/Z7', '/D_DEBUG' ] else: # Win64 self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GS-', '/DNDEBUG' ] self.compile_options_debug = [ '/nologo', '/Od', '/MDd', '/W3', '/GS-', '/Z7', '/D_DEBUG' ] self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO'] if self.__version >= 7: self.ldflags_shared_debug = [ '/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG' ] else: self.ldflags_shared_debug = [ '/DLL', '/nologo', '/INCREMENTAL:no', '/pdb:None', '/DEBUG' ] self.ldflags_static = ['/nologo'] self.initialized = True
def loadASCII_data_header(filename, miss=None, fill=np.nan, *args, **kwargs): ''' Open an ascii data file and load it into a python numpy array. Input: filename = CINDI data file name miss = string or list denoting missing value options (default=None) fill = fill value (default = NaN) Output: header = a list containing the header strings without the '#' out = a dict containing the data in np.arrays, the dict keys are specified by the header data line ''' func_name = string.join([module_name, "loadASCII_data_header"], " ") #----------------------------------------------------------------------- # Test to ensure the file is small enough to read in. Python can only # allocate 2GB of data. If you load something larger, python will crash fsize = path.getsize(filename) header = list() out = dict() if(fsize > 2.0e9): print func_name, "WARNING: File size [", (fsize * 1e-9), "GB > 2 GB]" return(header, out) elif(fsize == 0): print func_name, "WARNING: empty file [", filename, "]" return(header, out) #---------------------------------------------- # Open the datafile and read the header rows f = open(filename, "r") if not f: print func_name, "ERROR: unable to open input file [", filename, "]" return header, out line = f.readline() check = 0 while line.find("#") >= 0: hline = string.strip(line.replace("#", "")) line = f.readline() check += 1 if(len(hline) > 0): header.append(hline) if(check > 0): hline = hline.split() else: print func_name, "ERROR: no header in this file [", filename, "]" return(header, out) #------------------------------------------- # Open the datafile and read the data rows temp = np.genfromtxt(filename, comments="#", missing_values=miss, filling_values=fill) if len(temp) > 0: #------------------------------------------ # Create the output dictionary for num,name in enumerate(hline): out[name] = temp[:,num] del temp return(header, out)
def elf_format(dict): return struct.pack(string.join(['<'] + [e[1] for e in dict[FMT]], ''), *tuple([dict[e[0]] for e in dict[FMT]]))
def loadASCII_data_hline(filename, hlines, miss=None, fill=np.nan, *args, **kwargs): ''' Open an ascii data file and load it into a python numpy array. File header may be any number of lines and does not have to be preceeded by a particular character. Input: filename = CINDI data file name hlines = number of lines in header miss = string or list denoting missing value options (default=None) fill = fill value (default = NaN) Output: header = a list containing all specified header lines out = a dict containing the data in np.arrays, the dict keys are specified by the header data line ''' func_name = string.join([module_name, "loadASCII_data_hline"]) #----------------------------------------------------------------------- # Test to ensure the file is small enough to read in. Python can only # allocate 2GB of data. If you load something larger, python will crash fsize = path.getsize(filename) header = list() out = dict() if(fsize > 2.0e9): print func_name, "WARNING: File size [", (fsize * 1e-9), "GB > 2 GB]" return hearder, out elif(fsize == 0): print func_name, "WARNING: empty file [", filename, "]" return(header, out) #---------------------------------------------- # Open the datafile and read the header rows f = open(filename, "r") if not f: print func_name, "ERROR: unable to open input file [", filename, "]" return out for h in range(hlines): header.append(f.readline()) #------------------------------------------- # Open the datafile and read the data rows temp = np.genfromtxt(filename, skip_header=hlines, missing_values=miss, filling_values=fill) if len(temp) > 0: #--------------------------------------------------------------------- # Create the output dictionary, removing the point sign from any keys for num,name in enumerate(header[-1].split()): name = name.replace("#", "") if len(name) > 0: out[name] = temp[:,num] del temp return header, out
def execute(self, target, source, env, executor=None): """Execute a command action. This will handle lists of commands as well as individual commands, because construction variable substitution may turn a single "command" into a list. This means that this class can actually handle lists of commands, even though that's not how we use it externally. """ escape_list = SCons.Subst.escape_list flatten_sequence = SCons.Util.flatten_sequence try: shell = env['SHELL'] except KeyError: raise SCons.Errors.UserError( 'Missing SHELL construction variable.') try: spawn = env['SPAWN'] except KeyError: raise SCons.Errors.UserError( 'Missing SPAWN construction variable.') else: if is_String(spawn): spawn = env.subst(spawn, raw=1, conv=lambda x: x) escape = env.get('ESCAPE', lambda x: x) ENV = get_default_ENV(env) # Ensure that the ENV values are all strings: for key, value in ENV.items(): if not is_String(value): if is_List(value): # If the value is a list, then we assume it is a # path list, because that's a pretty common list-like # value to stick in an environment variable: value = flatten_sequence(value) ENV[key] = string.join(map(str, value), os.pathsep) else: # If it isn't a string or a list, then we just coerce # it to a string, which is the proper way to handle # Dir and File instances and will produce something # reasonable for just about everything else: ENV[key] = str(value) if executor: target = executor.get_all_targets() source = executor.get_all_sources() cmd_list, ignore, silent = self.process(target, map(rfile, source), env, executor) # Use len() to filter out any "command" that's zero-length. for cmd_line in filter(len, cmd_list): # Escape the command line for the interpreter we are using. cmd_line = escape_list(cmd_line, escape) result = spawn(shell, escape, cmd_line[0], cmd_line, ENV) if not ignore and result: msg = "Error %s" % result return SCons.Errors.BuildError(errstr=msg, status=result, action=self, command=cmd_line) return 0
def parse_generic(data, fmt): tmp = struct.unpack(string.join(['<'] + [e[1] for e in fmt], ''), data) tmp = dict((fmt[j][0], tmp[j]) for j in range(len(fmt))) tmp[FMT] = fmt return tmp
def _subproc(env, cmd, error='ignore', **kw): """Do common setup for a subprocess.Popen() call""" # allow std{in,out,err} to be "'devnull'" io = kw.get('stdin') if is_String(io) and io == 'devnull': kw['stdin'] = open(os.devnull) io = kw.get('stdout') if is_String(io) and io == 'devnull': kw['stdout'] = open(os.devnull, 'w') io = kw.get('stderr') if is_String(io) and io == 'devnull': kw['stderr'] = open(os.devnull, 'w') # Figure out what shell environment to use ENV = kw.get('env', None) if ENV is None: ENV = get_default_ENV(env) # Ensure that the ENV values are all strings: new_env = {} for key, value in ENV.items(): if is_List(value): # If the value is a list, then we assume it is a path list, # because that's a pretty common list-like value to stick # in an environment variable: value = SCons.Util.flatten_sequence(value) new_env[key] = string.join(map(str, value), os.pathsep) else: # It's either a string or something else. If it's a string, # we still want to call str() because it might be a *Unicode* # string, which makes subprocess.Popen() gag. If it isn't a # string or a list, then we just coerce it to a string, which # is the proper way to handle Dir and File instances and will # produce something reasonable for just about everything else: new_env[key] = str(value) kw['env'] = new_env try: #FUTURE return subprocess.Popen(cmd, **kw) return subprocess.Popen(*(cmd, ), **kw) except EnvironmentError as e: if error == 'raise': raise # return a dummy Popen instance that only returns error class dummyPopen: def __init__(self, e): self.exception = e def communicate(self): return ('', '') def wait(self): return -self.exception.errno stdin = None class f: def read(self): return '' def readline(self): return '' stdout = stderr = f() return dummyPopen(e)
def repr_generic(dict): return string.join([('%s: %s' % (e[0], e[2])) % (dict[e[0]]) for e in dict[FMT]], ' ')
def __str__(self): return string.join(map(str, self.list), '\n')
def __str__(self): if is_List(self.cmd_list): return string.join(map(str, self.cmd_list), ' ') return str(self.cmd_list)
def genstring(self, target, source, env): return string.join( map(lambda a, t=target, s=source, e=env: a.genstring(t, s, e), self.list), '\n')
def __call__(self, target, source, env, exitstatfunc=_null, presub=_null, show=_null, execute=_null, chdir=_null, executor=None): if not is_List(target): target = [target] if not is_List(source): source = [source] if presub is _null: presub = self.presub if presub is _null: presub = print_actions_presub if exitstatfunc is _null: exitstatfunc = self.exitstatfunc if show is _null: show = print_actions if execute is _null: execute = execute_actions if chdir is _null: chdir = self.chdir save_cwd = None if chdir: save_cwd = os.getcwd() try: chdir = str(chdir.abspath) except AttributeError: if not is_String(chdir): if executor: chdir = str(executor.batches[0].targets[0].dir) else: chdir = str(target[0].dir) if presub: if executor: target = executor.get_all_targets() source = executor.get_all_sources() t = string.join(map(str, target), ' and ') l = string.join(self.presub_lines(env), '\n ') out = "Building %s with action:\n %s\n" % (t, l) sys.stdout.write(out) cmd = None if show and self.strfunction: if executor: target = executor.get_all_targets() source = executor.get_all_sources() try: cmd = self.strfunction(target, source, env, executor) except TypeError: cmd = self.strfunction(target, source, env) if cmd: if chdir: cmd = ('os.chdir(%s)\n' % repr(chdir)) + cmd try: get = env.get except AttributeError: print_func = self.print_cmd_line else: print_func = get('PRINT_CMD_LINE_FUNC') if not print_func: print_func = self.print_cmd_line print_func(cmd, target, source, env) stat = 0 if execute: if chdir: os.chdir(chdir) try: stat = self.execute(target, source, env, executor=executor) if isinstance(stat, SCons.Errors.BuildError): s = exitstatfunc(stat.status) if s: stat.status = s else: stat = s else: stat = exitstatfunc(stat) finally: if save_cwd: os.chdir(save_cwd) if cmd and save_cwd: print_func('os.chdir(%s)' % repr(save_cwd), target, source, env) return stat