def __init__(self, master, title, dict=None): width = 0 height = 40 if dict: height = 20 * len(dict) self.master = master self.title = title import repr self.repr = repr.Repr() self.repr.maxstring = 60 self.repr.maxother = 60 self.frame = frame = Frame(master) self.frame.pack(expand=1, fill='both') self.label = Label(frame, text=title, borderwidth=2, relief='groove') self.label.pack(fill='x') self.vbar = vbar = Scrollbar(frame, name='vbar') vbar.pack(side='right', fill='y') self.canvas = canvas = Canvas(frame, height=min(300, max(40, height)), scrollregion=(0, 0, width, height)) canvas.pack(side='left', fill='both', expand=1) vbar['command'] = canvas.yview canvas['yscrollcommand'] = vbar.set self.subframe = subframe = Frame(canvas) self.sfid = canvas.create_window(0, 0, window=subframe, anchor='nw') self.load_dict(dict)
def __init__(self, master, title, dict=None): width = 0 height = 40 if dict: height = 20 * len( dict) # XXX 20 == observed height of Entry widget self.master = master self.title = title import repr self.repr = repr.Repr() self.repr.maxstring = 60 self.repr.maxother = 60 self.frame = frame = Frame(master) self.frame.pack(expand=1, fill="both") self.label = Label(frame, text=title, borderwidth=2, relief="groove") self.label.pack(fill="x") self.vbar = vbar = Scrollbar(frame, name="vbar") vbar.pack(side="right", fill="y") self.canvas = canvas = Canvas(frame, height=min(300, max(40, height)), scrollregion=(0, 0, width, height)) canvas.pack(side="left", fill="both", expand=1) vbar["command"] = canvas.yview canvas["yscrollcommand"] = vbar.set self.subframe = subframe = Frame(canvas) self.sfid = canvas.create_window(0, 0, window=subframe, anchor="nw") self.load_dict(dict)
def select(self, minid=None, maxid=None, **attrs): import repr from Ganga.GPIDev.Lib.Job.Job import Job if isType(minid, Job): if minid.master: minid = minid.master.id else: minid = minid.id if maxid is None: maxid = minid if isType(maxid, Job): if maxid.master: maxid = maxid.master.id else: maxid = maxid.id logger = getLogger() this_repr = repr.Repr() from Ganga.GPIDev.Base.Proxy import GPIProxyObjectFactory attrs_str = "" ## Loop through all possible input combinations to constructa string representation of the attrs from possible inputs ## Reuired to flatten the additional arguments into a flat string in attrs_str for a in attrs: from inspect import isclass if isclass(attrs[a]): this_attr = GPIProxyObjectFactory(attrs[a]()) else: from Ganga.GPIDev.Base.Objects import GangaObject if isType(attrs[a], GangaObject): this_attr = GPIProxyObjectFactory(attrs[a]) else: if type(attrs[a]) is str: from Ganga.GPIDev.Base.Proxy import getRuntimeGPIObject this_attr = getRuntimeGPIObject(attrs[a], True) else: this_attr = attrs[a] full_str = str(this_attr) split_str = full_str.split('\n') for line in split_str: line = line.strip() flat_str = ''.join(split_str) attrs_str += ", %s=\"%s\"" % (str(a), flat_str) logger.debug("Attrs_Str: %s" % str(attrs_str)) logger.debug("Constructing slice: %s" % str("%s.select(minid='%s', maxid='%s'%s)" % (self.name, this_repr.repr(minid), this_repr.repr(maxid), attrs_str))) this_slice = self.__class__("%s.select(minid='%s', maxid='%s'%s)" % (self.name, this_repr.repr(minid), this_repr.repr(maxid), attrs_str)) def append(id, obj): this_slice.objects[id] = obj self.do_select(append, minid, maxid, **attrs) return this_slice
def log_exception(name, **kwargs): """Logs an exception, along with relevant information such as message, traceback, and anything provided pertinent to the situation. This function does nothing unless called while an exception is being handled. :param name: ``name`` as passed in to :py:func:`logging.getLogger()`. :param kwargs: Other keywords may be passed in and will be included in the produced log line. """ type, value, tb = sys.exc_info() if not value: return exc_repr = repr.Repr() exc_repr.maxstring = 1000 logger = logging.getLogger(name) data = kwargs.copy() data['message'] = str(value) data['traceback'] = traceback.format_exception(type, value, tb) data['args'] = value.args data_str = ' '.join([ '='.join((key, exc_repr.repr(val))) for key, val in sorted(data.iteritems()) ]) logger.error('exception:{0}:unhandled {1}'.format(type.__name__, data_str))
def create_head_Variable(self, docobj): from doctree import Value import repr myrepr = repr.Repr() myrepr.maxstring = REPR_MAXSTRING myrepr.maxlong = REPR_MAXLONG head = self.create_head_Object(docobj) if type(docobj.object()) == types.InstanceType: clsobject = docobjects.create_docobject(docobj.object().__class__) value = clsobject.path() + " instance" head.add_attribute('HREF', clsobject.path()) else: value = myrepr.repr(docobj.object()) m = self.dumb_value_re.match(value) if m: value = m.group('value') head.add_child(Value(value)) return head
def _testCaches(self, live): argsForConfig = ['--rhizomedir', os.path.abspath(RHIZOMEDIR)] root = raccoon.HTTPRequestProcessor(a='test-links.py', argsForConfig=argsForConfig, appVars={'LIVE_ENVIRONMENT': live}) #root.styleSheetCache.debug = 1 repr1 = Repr.Repr() repr1.maxtuple = 100 #import sets #comparesets = [] cacheSizes = [] for i in range(4): start = time.time() self.doHTTPRequest(root, {}, 'http://www.foo.com/page1') #print time.time() - start cacheSizes.append( (root.actionCache.nodeSize, root.styleSheetCache.nodeSize, root.queryCache.nodeSize, root.expCache.nodeSize, raccoon.fileCache.nodeSize)) #comparesets.append( sets.Set(root.queryCache.nodeDict.keys() ) ) return cacheSizes
TODO: Define a VM that doens't use exceptions for control flow! """ import operator # for + - * / etc. import os import sys import repr as repr_lib from .pyvm2 import debug1 from ..lib import dis # Create a repr that won't overflow. repr_obj = repr_lib.Repr() repr_obj.maxother = 120 repper = repr_obj.repr # Different than log def debug(msg, *args): if not VERBOSE: return debug1(msg, *args) VERBOSE = False #VERBOSE = True
if self.filesBucket is not None: for upload in self.filesBucket.list_multipart_uploads(): upload.cancel_upload() if self.__getBucketVersioning(self.filesBucket) in (True, None): for key in list(self.filesBucket.list_versions()): self.filesBucket.delete_key(key.name, version_id=key.version_id) else: for key in list(self.filesBucket.list()): key.delete() self.filesBucket.delete() for domain in (self.filesDomain, self.jobsDomain): if domain is not None: domain.delete() aRepr = reprlib.Repr() aRepr.maxstring = 38 # so UUIDs don't get truncated (36 for UUID plus 2 for quotes) custom_repr = aRepr.repr class AWSJob(JobWrapper, SDBHelper): """ A Job that can be converted to and from an SDB item. """ @classmethod def fromItem(cls, item): """ :type item: Item :rtype: AWSJob """
def main(): global currentDir global currentDirCount global currentFileCount global blankLine global lineLength global fileCountLength global fileSizeLength global lineCountLength global lineLength global fileCountFormat global lineCountFormat global fileSizeFormat global quiet global statusLineDirty blankLine = ' ' * (defaultLineLength - 1) parser = argparse.ArgumentParser(prog=PROGRAM_NAME, description=PROGRAM_NAME + ' - ' + VERSION + ' - ' + COPYRIGHT_MESSAGE, prefix_chars=prefixList, add_help=False) parser.add_argument(argumentPrefix + 'a', '--file_attributes', action='store_true') parser.add_argument(argumentPrefix + '1', '--find_one', action='store_true') parser.add_argument(argumentPrefix + 'b', '--backup', action='store', default='') parser.add_argument(argumentPrefix + 'c', '--execute_command', action='store', default='') parser.add_argument(argumentPrefix + 'd', '--output_timestamp', action='store_const', const='m') parser.add_argument( argumentPrefix + 'D', choices='acm', default='m', help= 'output timestamp, a = last accessed, c = created, m = last modified') parser.add_argument(argumentPrefix + 'e', '--output_dir_totals', action='store_true') parser.add_argument(argumentPrefix + 'E', '--output_dir_totals_only', action='store_true') parser.add_argument(argumentPrefix + 'f', '--folders-only', action='store_true') parser.add_argument(argumentPrefix + 'g', '--filename_truncation', action='store_true') parser.add_argument(argumentPrefix + 'h', '--print_help2', action='store_true') parser.add_argument(argumentPrefix + 'i', '--include_filespec', action='append', nargs='+') parser.add_argument(argumentPrefix + 'l', '--count_lines', action='store_true') parser.add_argument(argumentPrefix + 'Lf', '--file_count_length', type=int, default=defaultFileCountLength) parser.add_argument(argumentPrefix + 'Ll', '--line_length', type=int, default=defaultLineLength) parser.add_argument(argumentPrefix + 'Ln', '--line_count_length', type=int, default=defaultLineCountLength) parser.add_argument(argumentPrefix + 'Lz', '--file_size_length', type=int, default=defaultFileSizeLength) parser.add_argument(argumentPrefix + 'm', '--no_commas', action='store_true') parser.add_argument(argumentPrefix + 'n', '--max_depth', type=int, const=1, default=0, nargs='?') parser.add_argument(argumentPrefix + 'q', '--quiet', action='store_true') parser.add_argument(argumentPrefix + 'r', '--output_relative_path', action='store_true') # parser.add_argument( argumentPrefix + 'R', '--rename', choices='dmnsu' ) parser.add_argument(argumentPrefix + 's', '--output_file_size', action='store_true') parser.add_argument(argumentPrefix + 't', '--output_totals', action='store_true') parser.add_argument(argumentPrefix + 'u', '--hide_command_output', action='store_true') parser.add_argument(argumentPrefix + 'w', '--extra_target', action='append', nargs='+') parser.add_argument(argumentPrefix + 'v', '--version', action='version', version='%(prog)s ' + VERSION) parser.add_argument(argumentPrefix + 'vv', '--version_history', action='store_true') parser.add_argument(argumentPrefix + 'x', '--exclude_filespec', action='append', nargs='+') parser.add_argument(argumentPrefix + 'y', '--find_dupes', action='store_true') parser.add_argument(argumentPrefix + 'z', '--print_command_only', action='store_true') parser.add_argument(argumentPrefix + '?', '--print_help', action='store_true') parser.add_argument(argumentPrefix + '!', '--print_bang_help', action='store_true') # let's do a little preprocessing of the argument list because argparse is missing a few pieces of functionality # the original whereis provided... specifically the ability to determine the order in which arguments occur new_argv = list() # grab the fileSpec and SourceDir and stick everything else in the list for argparse prog = '' fileSpec = '' sourceDir = '' extraSourceDirs = [] copyNextOne = False for arg in sys.argv: if arg[0] not in prefixList: if copyNextOne: new_argv.append(arg) if sourceDir == '': copyNextOne = False elif prog == '': prog = arg elif fileSpec == '': fileSpec = arg elif sourceDir == '': sourceDir = arg else: print('ignoring extra arg: ' + arg) else: new_argv.append(arg) copyNextOne = arg[ 1] in 'bciLwx' # these are args that can have parameters # build the output order list if arg[1] == 'a': outputOrder.append(outputAttributes) elif arg[1] == 'l': outputOrder.append(outputLineCount) elif arg[1] == 'd': outputOrder.append(outputModified) elif arg[1] == 's': outputOrder.append(outputSize) elif arg[1] == 'D': if arg[2] == 'a': outputOrder.append(outputAccessed) elif arg[2] == 'c': outputOrder.append(outputCreated) else: outputOrder.append(outputModified) # set defaults if necessary if fileSpec == '': fileSpec = '*' if sourceDir == '': sourceDir = '.' # let argparse handle the rest args = parser.parse_args(new_argv) quiet = args.quiet if args.print_help or args.print_help2: printHelp() return if args.print_bang_help: printBangHelp() return if args.version_history: printRevisionHistory() return # let's handle all the flags and values parsed off the command-line if args.include_filespec: includeFileSpecs = list(itertools.chain(*args.include_filespec)) else: includeFileSpecs = list() if args.exclude_filespec: excludeFileSpecs = list(itertools.chain(*args.exclude_filespec)) else: excludeFileSpecs = list() if args.extra_target: extraSourceDirs = list(itertools.chain(*args.extra_target)) fileCountLength = args.file_count_length fileSizeLength = args.file_size_length lineCountLength = args.line_count_length lineLength = args.line_length if lineLength != defaultLineLength: blankLine = ' ' * (lineLength - 1) countLines = args.count_lines outputDirTotalsOnly = args.output_dir_totals_only outputRelativePath = args.output_relative_path outputTotals = args.output_totals outputTimestamp = args.output_timestamp outputDirTotals = args.output_dir_totals executeCommand = args.execute_command backupLocation = args.backup findOne = args.find_one hideCommandOutput = args.hide_command_output fileAttributes = args.file_attributes foldersOnly = args.folders_only fileNameTruncation = args.filename_truncation printCommandOnly = args.print_command_only maxDepth = args.max_depth if args.no_commas: formatString = 'd' else: formatString = ',d' fileCountFormat = str(fileCountLength) + formatString lineCountFormat = str(lineCountLength) + formatString fileSizeFormat = str(fileSizeLength) + formatString fileNameRepr = reprlib.Repr() fileNameRepr.maxstring = lineLength - 1 # sets max string length of repr findDupes = args.find_dupes #redirected = not sys.stdout.isatty( ) # try to identify source dir and filespec intelligently... # I don't want order to matter if it's obvious what the user meant if all( ( c in './\\' ) for c in fileSpec ) or any( ( c in '*?' ) for c in sourceDir ) or \ any( ( c in '/\\' ) for c in fileSpec ) or ( os.path.isdir( fileSpec ) ): fileSpec, sourceDir = sourceDir, fileSpec if all((c in './\\') for c in fileSpec): fileSpec = '*' fileSpec = fileSpec.replace( '*.*', '*') # *.* and * mean the same thing on Windows # a little validation before we start if not os.path.isdir(sourceDir): print("whereis: source directory '" + sourceDir + "' does not exist or cannot be accessed", file=sys.stderr) return if (backupLocation != '') and (not os.path.isdir(backupLocation)): try: os.makedirs(backupLocation) except: print("whereis: backup location '" + backupLocation + "' cannot be created", file=sys.stderr) return # start status thread if not quiet: threading.Thread(target=statusProcess).start() fileCount = 0 lineTotal = 0 grandDirTotal = 0 grandLineTotal = 0 # initialize currentDir because the status thread might need it before we set it below currentDir = os.path.abspath(sourceDir) foundOne = False printDate = False attributeFlags = 0 sourceDirs = [sourceDir] sourceDirs.extend(extraSourceDirs) # We'll use this if we want to find duplicates. filesAndSizes = {} # walk the tree for currentSourceDir in sourceDirs: for top, dirs, files in os.walk(currentSourceDir): top = os.path.normpath(top) # performance note: We're still going to walk all the directories even if we are ignoring them. # I haven't figured out how to avoid that. if maxDepth > 0: depth = top.count(os.sep) + 1 if top != '' and top[0] != os.sep and top[0] != '.': depth += 1 if depth > maxDepth: continue currentAbsoluteDir = os.path.abspath(top) currentRelativeDir = os.path.relpath(top, currentSourceDir) if outputRelativePath: currentDir = currentRelativeDir else: currentDir = currentAbsoluteDir currentDirCount += 1 currentFileCount = 0 dirTotal = 0 lineTotal = 0 # build the set of files that match our criteria fileSet = set(fnmatch.filter(files, fileSpec)) for includeFileSpec in includeFileSpecs: fileSet = fileSet.union( set(fnmatch.filter(files, includeFileSpec))) for excludeFileSpec in excludeFileSpecs: fileSet = fileSet.difference( set(fnmatch.filter(files, excludeFileSpec))) createdBackupDir = (top == '.') # now we have the list of files, so let's sort them and handle them for fileName in sorted(fileSet, key=str.lower): currentFileCount += 1 absoluteFileName = os.path.join(currentAbsoluteDir, fileName) relativeFileName = os.path.join(currentRelativeDir, fileName) try: fileSize = os.stat(absoluteFileName).st_size except PermissionError: fileSize = 0 except FileNotFoundError: fileSize = 0 except OSError: fileSize = 0 if findDupes: filesAndSizes[absoluteFileName] = fileSize dirTotal = dirTotal + fileSize fileCount += 1 if os.name == 'nt' and fileAttributes: attributeFlags = win32file.GetFileAttributes( absoluteFileName) if executeCommand != '' and os.name == 'nt': base, extension = os.path.splitext(fileName) extension = extension.strip( ) # unix puts in a newline supposedly translatedCommand = translateCommand( executeCommand, base, extension, currentAbsoluteDir, absoluteFileName, currentRelativeDir, relativeFileName) if hideCommandOutput: translatedCommand += ' > ' + os.devnull if printCommandOnly: print(blankLine, end='\r', file=sys.stderr) print(translatedCommand) else: subprocess.Popen(shlex.split(translatedCommand), shell=True) lineCount = 0 if countLines: for line in codecs.open(absoluteFileName, 'rU', 'ascii', 'replace'): lineCount += 1 lineTotal = lineTotal + lineCount if backupLocation != '': if not createdBackupDir: backupTargetDir = os.path.join(backupLocation, currentRelativeDir) if not os.path.exists(backupTargetDir): os.makedirs(backupTargetDir) createdBackupDir = True backupTargetFileName = os.path.join( backupLocation, relativeFileName) try: shutil.copy2(absoluteFileName, backupTargetDir) except: print('error copying ' + absoluteFileName + ' to ' + backupTargetDir) if not outputDirTotalsOnly: with outputLock: # this will clear the console line for output, if necessary if not quiet and statusLineDirty: print(blankLine, end='\r', file=sys.stderr) statusLineDirty = False outputFileStats(absoluteFileName, fileSize, lineCount, attributeFlags) if outputRelativePath: if fileNameTruncation: outputText = fileNameRepr.repr( relativeFileName).replace('\\\\', '\\')[1:-1] else: outputText = relativeFileName.replace( '\\\\', '\\') else: if fileNameTruncation: outputText = fileNameRepr.repr( absoluteFileName).replace('\\\\', '\\')[1:-1] else: outputText = absoluteFileName.replace( '\\\\', '\\') try: print(outputText) except: print("whereis: unicode filename found ('" + str( outputText.encode('ascii', 'backslashreplace')) + "')", file=sys.stderr) foundOne = True if findOne: break if outputDirTotals or outputDirTotalsOnly: if outputDirTotalsOnly or dirTotal > 0: with outputLock: if not quiet and statusLineDirty: print(blankLine, end='\r', file=sys.stderr) statusLineDirty = False if not outputDirTotalsOnly: print() outputTotalStats(dirTotal, lineTotal) print( currentDir.encode(sys.stdout.encoding, errors='replace')) if not outputDirTotalsOnly: print() if outputTotals: grandDirTotal += dirTotal grandLineTotal += lineTotal currentDirCount += 1 if foundOne and findOne: break if outputTotals: with outputLock: if not quiet and statusLineDirty: print(blankLine, end='\r', file=sys.stderr) statusLineDirty = False outputTotalStats(separator=True) print('-' * fileCountLength) outputTotalStats(grandDirTotal, grandLineTotal) if outputDirTotalsOnly: print(format(currentDirCount, fileCountFormat)) else: print(format(fileCount, fileCountFormat)) # hey, we might not be done yet... if not findDupes: return sizesAndFiles = {} # flip the dictionary into a reverse multidict for key, value in filesAndSizes.items(): sizesAndFiles.setdefault(value, set()).add(key) # now for any key that has multiple values, those values are files # we need to actually compare, so let's make a list of those files fileSetsToCompare = list(values for key, values in sizesAndFiles.items() if len(values) > 1) print() matchResults = [] for fileSet in fileSetsToCompare: if len(fileSet) == 1: continue fileResults = {} for file in fileSet: fileResults[file] = 0 fileFlavor = 1 for firstFile, secondFile in itertools.combinations(fileSet, 2): if fileResults[firstFile] == 0 or fileResults[secondFile] == 0: print(f"Comparing '{firstFile}' and '{secondFile}'...") if filecmp.cmp(firstFile, secondFile, shallow=False): if fileResults[firstFile] == 0: if fileResults[secondFile] != 0: fileResults[firstFile] = fileResults[secondFile] else: fileResults[firstFile] = fileFlavor fileResults[secondFile] = fileFlavor fileFlavor += 1 else: fileResults[secondFile] = fileResults[firstFile] fileFlavors = {} # do the reverse multidict thing _again_ for key, value in fileResults.items(): fileFlavors.setdefault(value, set()).add(key) #print( 'fileFlavors', fileFlavors ) # extracts sets of files that match so we can print them out fileSetsThatMatch = [] for key, value in fileFlavors.items(): #print( 'value', value ) if key > 0 and len(value) > 1: fileSetsThatMatch.append(list(value)) #print( 'fileSetsThatMatch', fileSetsThatMatch ) for fileSet in fileSetsThatMatch: matchResults.append(fileSet) print() print('Match results...') maxSize = [0, 0] for matchResult in matchResults: matches = sorted(matchResult) if len(maxSize) < len(matches): maxSize.extend([0] * (len(matches) - len(maxSize))) for i, match in enumerate(matches): if maxSize[i] < len(match): maxSize[i] = len(match) for matchResult in matchResults: matches = sorted(matchResult) output = '' for match in matches: output += '"' + match + '",' + ' ' * (maxSize[i] - len(match) + 5) print(output)
def traceback(req, html=0): import traceback, time, types, linecache, inspect, repr repr = repr.Repr() repr.maxdict = 10 repr.maxlist = 10 repr.maxtuple = 10 repr.maxother = 200 repr.maxstring = 200 repr = repr.repr (etype, evalue, etb) = sys.exc_info() if type(etype) is types.ClassType: etype = etype.__name__ if html: try: req.clear_headers() req.clear_output() req.set_header("Content-Type", "text/html; charset=iso-8859-1") except SequencingError: req.write( "</font></font></font></script></object></blockquote></pre>" "</table></table></table></table></table></table></font></font>" ) req.write("""\ <?xml version="1.0" encoding="iso-8859-1"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "DTD/xhtml1-strict.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head><title>jonpy traceback: %s</title> <style type="text/css"><!-- BODY { background-color: #f0f0f8; font-family: helveta, arial, sans-serif } .tb_head { background-color: #6622aa; color: #ffffff } .tb_title { font-size: x-large } .tb_frame { background-color: #d8bbff } .tb_lineno { font-size: smaller } .tb_codehigh { background-color: #ffccee } .tb_code { color: #909090 } .tb_dump { color: #909090; font-size: smaller } --></style> </head><body> <table width="100%%" cellspacing="0" cellpadding="0" border="0"> <tr class="tb_head"> <td valign="bottom" class="tb_title"> <br /><strong>%s</strong></td> <td align="right" valign="bottom">%s<br />%s</td></tr></table> <p>A problem occurred in a Python script. Here is the sequence of function calls leading up to the error, with the most recent first.</p> """ % (_tb_encode(etype), _tb_encode(etype), "Python %s: %s" % (sys.version.split()[0], sys.executable), time.ctime(time.time()))) req.error("jonpy error: %s at %s\n" % (etype, time.ctime(time.time()))) # this code adapted from the standard cgitb module # unfortunately we cannot use that module directly, # mainly because it won't allow us to output to the log if html: req.write("<p><strong>%s</strong>: %s" % (_tb_encode(etype), _tb_encode(evalue))) req.error("%s: %s\n" % (etype, evalue)) #if type(evalue) is types.InstanceType: # for name in dir(evalue): # if html: # req.write("\n<br /><tt> </tt>%s = %s" % # (_tb_encode(name), _tb_encode(repr(getattr(evalue, name))))) # req.error(" %s = %s\n" % (name, repr(getattr(evalue, name)))) if html: req.write("</p>\n") frames = [] records = inspect.getinnerframes(etb, 7) records.reverse() for frame, fn, lnum, func, lines, index in records: if html: req.write("""\ <table width="100%" cellspacing="0" cellpadding="0" border="0">""") fn = fn and os.path.abspath(fn) or "?" args, varargs, varkw, locls = inspect.getargvalues(frame) if func != "?": fav = inspect.formatargvalues(args, varargs, varkw, locls) if html: req.write( "<tr><td class=\"tb_frame\">%s in <strong>%s</strong>%s</td>" "</tr>\n" % (_tb_encode(fn), _tb_encode(func), _tb_encode(fav))) req.error("%s in %s%s\n" % (fn, func, fav)) else: if html: req.write("<tr><td class=\"tb_head\">%s</td></tr>\n" % (_tb_encode(fn), )) req.error("%s\n" % (fn, )) highlight = {} def reader(lnum=[lnum]): highlight[lnum[0]] = 1 try: return linecache.getline(fn, lnum[0]) finally: lnum[0] += 1 vrs = _scanvars(reader, frame, locls) if index is not None: i = lnum - index for line in lines: if html: if i in highlight: style = "tb_codehigh" else: style = "tb_code" req.write( "<tr><td class=\"%s\"><code><span class=\"tb_lineno\">" "%s</span> %s</code></td></tr>\n" % (style, " " * (5 - len(str(i))) + str(i), _tb_encode(line))) req.error("%s %s" % (" " * (5 - len(str(i))) + str(i), line)) i += 1 done = {} dump = [] htdump = [] for name, where, value in vrs: if name in done: continue done[name] = 1 if value is not __UNDEF__: if where == "global": dump.append("global %s = %s" % (name, repr(value))) htdump.append( "<em>global</em> <strong>%s</strong> = %s" % (_tb_encode(name), _tb_encode(repr(value)))) elif where == "builtin": dump.append("builtin %s = %s" % (name, repr(value))) htdump.append( "<em>builtin</em> <strong>%s</strong> = %s" % (_tb_encode(name), _tb_encode(repr(value)))) elif where == "local": dump.append("%s = %s" % (name, repr(value))) htdump.append("<strong>%s</strong> = %s" % (_tb_encode(name), _tb_encode(repr(value)))) else: dump.append("%s%s = %s" % (where, name.split(".")[-1], repr(value))) htdump.append( "%s<strong>%s</strong> = %s" % (_tb_encode(where), _tb_encode( name.split(".")[-1]), _tb_encode(repr(value)))) else: dump.append("%s undefined" % (name, )) htdump.append("%s <em>undefined</em>" % (_tb_encode(name, ))) if html: req.write("<tr><td class=\"tb_dump\">%s</td></tr>\n" % (", ".join(htdump), )) req.error(", ".join(dump) + "\n") if html: req.write("</table>\n") if html: req.write("</body></html>\n") linecache.clearcache()
return exc_repr = repr.Repr() exc_repr.maxstring = 1000 logger = logging.getLogger(name) data = kwargs.copy() data['message'] = str(value) data['traceback'] = traceback.format_exception(type, value, tb) data['args'] = value.args data_str = ' '.join([ '='.join((key, exc_repr.repr(val))) for key, val in sorted(data.iteritems()) ]) logger.error('exception:{0}:unhandled {1}'.format(type.__name__, data_str)) log_repr = repr.Repr() log_repr.maxstring = 100 def logline(log, type, typeid, operation, **data): if not data: log('{0}:{1}:{2}'.format(type, typeid, operation)) else: data_str = ' '.join([ '='.join((key, log_repr.repr(val))) for key, val in sorted(data.iteritems()) ]) log('{0}:{1}:{2} {3}'.format(type, typeid, operation, data_str)) parseline_pattern = re.compile(r'^([^:]+):([^:]+):(\S+) ?(.*)$')