def _load(self): json_data = open('categories.json') data = json.load(json_data) json_data.close() self._categories = data self._root = _Node(None, None) nodeQueue = deque([]) nodeQueue.append(self._root) objectQueue = deque([]) objectQueue.append(self._categories) while len(nodeQueue) > 0: node = nodeQueue.pop() category = objectQueue.pop() if not (node.data is None): self._name_category_map[node.data['name']] = node self._short_name_category_map[node.data['shortName']] = node categories = category.get('categories') if not (categories is None): for child in categories: childNode = _Node( node, { 'name': child[ 'pluralName'], ### change this to name if it is not plural 'shortName': child['shortName'] }) node.children.append(childNode) nodeQueue.append(childNode) objectQueue.append(child)
def uninformedSearch(queue,goalstate,limit,numRuns,choice,p1): # List to keep track of visited nodes visited = [] # Get first list of states in queue path = deque([queue]) # cloning path temp_path = [queue] # If no more states available then return false if queue == []: print "No Solution Exists" return elif testProcedure(queue[0],goalstate): # Check state is goal state and print output outputProcedure(numRuns, queue[0],choice,p1) return elif limit == 0: print "Limit reached" return q = deque(queue) while len(q) > 0: # Get first element in queue n = q.popleft() temp_path = path.popleft() if n not in visited: # add node to visited nodes visited.append(n) limit -= 1 numRuns += 1 if queue == []: # check for elements in queue print "No Solution Exists" return elif testProcedure(n,goalstate): # check if reached goal state outputProcedure(numRuns,temp_path,choice,p1) return elif limit == 0: print "Limit reached" return successors = expandProcedure(n) #find successors of current state for succ in successors: new_path = temp_path + [succ] path.append(new_path) q.extend(successors) # Add successors in queue print "No Solution Exists" return
def uninformedSearch(queue, goalstate, limit, numRuns, choice, p1): # List to keep track of visited nodes visited = [] # Get first list of states in queue path = deque([queue]) # cloning path temp_path = [queue] # If no more states available then return false if queue == []: print "No Solution Exists" return elif testProcedure(queue[0], goalstate): # Check state is goal state and print output outputProcedure(numRuns, queue[0], choice, p1) return elif limit == 0: print "Limit reached" return q = deque(queue) while len(q) > 0: # Get first element in queue n = q.popleft() temp_path = path.popleft() if n not in visited: # add node to visited nodes visited.append(n) limit -= 1 numRuns += 1 if queue == []: # check for elements in queue print "No Solution Exists" return elif testProcedure(n, goalstate): # check if reached goal state outputProcedure(numRuns, temp_path, choice, p1) return elif limit == 0: print "Limit reached" return successors = expandProcedure(n) #find successors of current state for succ in successors: new_path = temp_path + [succ] path.append(new_path) q.extend(successors) # Add successors in queue print "No Solution Exists" return
def get_items(string_to_search): data = get_data_from_server(string_to_search) soup = BeautifulSoup(''.join(data)) list = [] table = soup.find('table',{'class':'mpitems'}) rows = deque(table.findAll('tr')) rows.popleft() for theItem in rows: cols = theItem.findAll('td',recursive=False) newItem = Item() image = cols[0].find('img') if image: newItem.image=(image['src']) titleSpan = cols[1].find('span',{'class':'br_item_title'}) if titleSpan: newItem.title = unicode(titleSpan.a.string) newItem.link='http://www.discogs.com'+titleSpan.a['href'] newItem.fromPage='Discogs' priceSpan = cols[4].find('span',{'class':'price'}) if priceSpan: newItem.price=unicode(priceSpan.string) list.append(newItem) return list
def getCaminoOptimo(self, vInicial, vFinal): distancias = dict() padres = dict() cola = deque() # marco todas las distancias en infinito y los padres en None for v in self.vertices.keys(): distancias[v] = 999999999 padres[v] = None distancias[vInicial] = 0 # la distancia a si mismo es cero cola.append((vInicial, distancias[vInicial])) while cola: u = min(cola, key=lambda x: x[1]) cola.remove(u) for x in self.vertices[str( u[0])].getVecinos(): # para x en los vecinos if (distancias[x] > distancias[u[0]] + self.vertices[x].getPesoArista(str(u[0]))): distancias[x] = distancias[ u[0]] + self.vertices[x].getPesoArista(str(u[0])) padres[x] = u cola.append((x, distancias[x])) padre = padres[vFinal] res = list() res.append(vFinal) ### if padre == None: return distancias[vFinal], res ### while padre[0] != vInicial: res.append(padre[0]) padre = padres[padre[0]] if not res.__contains__(vInicial): res.append(vInicial) res.reverse() return distancias[vFinal], res
def __read_data(self): '''reads (multi)labeled data''' dataList = [] max_f_index = 0 for line in self.__file: try: # strip comments, whitespaces and line breaks line = line[:line.find('#')] line = line.strip('\n') line = line.strip() if line == '': continue # something left? go! data_ = {} tokens = deque(line.split(' ')) data_['target'] = float(tokens.popleft()) for token in tokens: t = token.split(':') feature = int(t[0]) if feature > max_f_index: max_f_index = feature data_[feature] = float(t[1]) if '.' in t[1] else int(t[1]) dataList.append(data_) except Exception as e: print e self.__dataSet = DataSet(dataList, max_f_index)
def getCaminoOptimo(self, vInicial, vFinal): distancias = dict() padres = dict() cola = deque() # marco todas las distancias en infinito y los padres en None for v in self.vertices.keys(): distancias[v] = 999999999 padres[v] = None distancias[vInicial] = 0 # la distancia a si mismo es cero cola.append((vInicial, distancias[vInicial])) while cola: u = min(cola, key=lambda x: x[1]) cola.remove(u) for x in self.vertices[str(u[0])].getVecinos(): # para x en los vecinos if distancias[x] > distancias[u[0]] + self.vertices[x].getPesoArista(str(u[0])): distancias[x] = distancias[u[0]] + self.vertices[x].getPesoArista(str(u[0])) padres[x] = u cola.append((x, distancias[x])) padre = padres[vFinal] res = list() res.append(vFinal) ### if padre == None: return distancias[vFinal], res ### while padre[0] != vInicial: res.append(padre[0]) padre = padres[padre[0]] if not res.__contains__(vInicial): res.append(vInicial) res.reverse() return distancias[vFinal], res
def opendocx(file): '''Open a docx file, return a document XML tree''' mydoc = zipfile.ZipFile(file) xmlcontent = mydoc.read('word/document.xml') document = etree.fromstring(xmlcontent) media = [] for item in mydoc.namelist(): if "word/media" in item: media.append(item) doc = {"media": deque(sorted(media)), "doc": document, "file": file} return doc
def add_transition_to_memory_old(self, input_symbol, state, startNewTransitionQueue, action): """ We use startNewTransitionQueue when we have finished with pushing all the actions. Then we start new parallel branch. Thus we create a new FSA for that branch and put it in the list of FSAs for the state """ if (state not in self.memory): startNewTransitionQueue = True cur_memory_state = self.memory.setdefault(state, []) if startNewTransitionQueue: cur_memory_state.append(deque([input_symbol])) else: cur_memory_state[-1].append(input_symbol)
def opendocx(file): """Open a docx file, return a document XML tree""" f = open("test.tmp", "w") f.write("la") f.close() mydoc = zipfile.ZipFile(file) xmlcontent = mydoc.read("word/document.xml") document = etree.fromstring(xmlcontent) media = [] for item in mydoc.namelist(): if "word/media" in item: media.append(item) doc = {"media": deque(sorted(media)), "doc": document, "file": file} return doc
def uninformedSearch(queue,goalState,limit,numRuns): visited = []# List to keep track of visited nodes path = deque([queue])# Get first list of states in queue temp_path = [queue]# cloning path if queue == []:# If no more states available then return false print "No Solution Exists" return 0 elif testProcedure(queue[0],goalState):# Check state is goal state and return info=[numRuns,queue] return info elif limit == 0: print "Limit reached" return 0 q = deque(queue) while len(q) > 0:# Get first element in queue n = q.popleft() temp_path = path.popleft() if n not in visited:# add node to visited nodes visited.append(n) limit -= 1 numRuns += 1 if queue == []: # check for elements in queue print "No Solution Exists" return numRuns elif testProcedure(n,goalState): # check if reached goal state info=[numRuns,temp_path] return info elif limit == 0: print "Limit reached" return numRuns successors = expandProcedure(n) for succ in successors: new_path = temp_path + [succ] path.append(new_path) q.extend(successors) print "No Solution Exists" return numRuns
def opendocx(file): '''Open a docx file, return a document XML tree''' mydoc = zipfile.ZipFile(file) xmlcontent = mydoc.read('word/document.xml') document = etree.fromstring(xmlcontent) media = []; for item in mydoc.namelist(): if "word/media" in item: media.append(item) doc = { "media":deque(sorted(media)), "doc":document, "file":file } return doc
def load(logindex_reader): start_pos, end_pos = None, None entries = list(logindex_reader.iterentries()) for i, idx in enumerate(entries): if idx.needs_commit: if start_pos is None: start_pos = i elif end_pos is not None: raise CorruptLogIndexException("needs_commit fields are incoherent") else: if start_pos is not None: if end_pos is None: end_pos = i #else just advance end_pos = end_pos if end_pos is not None else i return SerializedLogIndex(logindex_reader, deque(entries[start_pos:end_pos]), start_pos=0)
def __init__(self, codebase, project_key, opt_input): ''' :param project_key: The name of the project in the Eclipse workspace. :param codebase: The codebase instance to which the CodeElement will be associated with. :param opt_input: Optional input. Not used by this parser. ''' self.project_name = project_key self.gateway = JavaGateway() self.hierarchies = deque() # list of tuples. [(parent, child, child)] self.codebase = codebase self.queue = Queue() self.package_kind = CodeElementKind.objects.get(kind='package') self.ASTParser = self.gateway.jvm.org.eclipse.jdt.core.dom.ASTParser self.JLS3 = self.gateway.jvm.org.eclipse.jdt.core.dom.AST.JLS3
def __read_binary_data(self): '''reads data and checks for binary classes''' targetList = [] dataList = [] max_f_index = 0 for line in self.__file: # strip comments, whitespaces and line breaks line = line[:line.find('#')] line = line.strip('\n') line = line.strip() if line == '': continue # something left? go! data_ = {} tokens = deque(line.split(' ')) data_['target'] = float(tokens.popleft()) if len(targetList) <= 2: if data_['target'] not in targetList: targetList.append(data_['target']) else: raise TypeError('Not a binary class file') for token in tokens: t = token.split(':') feature = int(t[0]) if feature > max_f_index: max_f_index = feature data_[feature] = float(t[1]) if '.' in t[1] else int(t[1]) dataList.append(data_) #normalization of targets: e.g.'0 & 1 ==> -1 & +1' #bonus: handling of ordinal values like 'green & blue' try: a = int(targetList[0]) b = int(targetList[1]) #larger value becomes '+1' if a > b: for i in dataList: i['target'] = +1 if int(i['target']) == a else -1 else: for i in dataList: i['target'] = -1 if int(i['target']) == a else +1 except ValueError: #value is not int - set classes for i in dataList: dataList[i]['target'] = +1 if dataList[i]['target'] == targetList[0] else -1 self.__dataSet = DataSet(dataList, max_f_index)
def load(logindex_reader): start_pos, end_pos = None, None entries = list(logindex_reader.iterentries()) for i, idx in enumerate(entries): if idx.needs_commit: if start_pos is None: start_pos = i elif end_pos is not None: raise CorruptLogIndexException( "needs_commit fields are incoherent") else: if start_pos is not None: if end_pos is None: end_pos = i #else just advance end_pos = end_pos if end_pos is not None else i return SerializedLogIndex(logindex_reader, deque(entries[start_pos:end_pos]), start_pos=0)
def test_SerializedLogIndex_recover_ErasesLastUnfinishedTransaction(self): COUNT = 10 io = IoHandle.using_stringio(COUNT) reader = LogIndexReader(io, COUNT) logindex = SerializedLogIndex.new(reader) logindex.log(LogIndexEntry(LogIndexEntry.BEGIN_TX)) logindex.log(LogIndexEntry(LogIndexEntry.WRITE, 1)) logindex.log(LogIndexEntry(LogIndexEntry.WRITE, 2)) logindex.log(LogIndexEntry(LogIndexEntry.END_TX)) logindex.log(LogIndexEntry(LogIndexEntry.BEGIN_TX)) logindex.log(LogIndexEntry(LogIndexEntry.WRITE, 3)) logindex.log(LogIndexEntry(LogIndexEntry.WRITE, 4)) logindex2 = SerializedLogIndex.load(reader) logindex2.recover() self.assertEquals(logindex2.tocommit, deque([ LogIndexEntry(LogIndexEntry.BEGIN_TX), LogIndexEntry(LogIndexEntry.WRITE, 1), LogIndexEntry(LogIndexEntry.WRITE, 2), LogIndexEntry(LogIndexEntry.END_TX)] ))
def test_SerializedLogIndex_recover_ErasesLastUnfinishedTransaction(self): COUNT = 10 io = IoHandle.using_stringio(COUNT) reader = LogIndexReader(io, COUNT) logindex = SerializedLogIndex.new(reader) logindex.log(LogIndexEntry(LogIndexEntry.BEGIN_TX)) logindex.log(LogIndexEntry(LogIndexEntry.WRITE, 1)) logindex.log(LogIndexEntry(LogIndexEntry.WRITE, 2)) logindex.log(LogIndexEntry(LogIndexEntry.END_TX)) logindex.log(LogIndexEntry(LogIndexEntry.BEGIN_TX)) logindex.log(LogIndexEntry(LogIndexEntry.WRITE, 3)) logindex.log(LogIndexEntry(LogIndexEntry.WRITE, 4)) logindex2 = SerializedLogIndex.load(reader) logindex2.recover() self.assertEquals( logindex2.tocommit, deque([ LogIndexEntry(LogIndexEntry.BEGIN_TX), LogIndexEntry(LogIndexEntry.WRITE, 1), LogIndexEntry(LogIndexEntry.WRITE, 2), LogIndexEntry(LogIndexEntry.END_TX) ]))
def __init__(self, tocommit=None): self.tocommit = tocommit or deque() self.ON_COMMITING = Event() self.ON_LOG = Event() self.ON_REVERTLOG = Event() self.ON_COMMITED = Event()
def __init__(self, webpage): self.webpage=WebPage() self.criteria_list=deque() self.result=0
def docclass( self, object, name=None, mod=None ): """Produce tex documentation for a given class object.""" realname = object.__name__ name = name or realname bases = object.__bases__ def makename( c, m=object.__module__ ): return classname( c, m ) if name == realname: title = self.texttt( 'class ' + realname ) else: title = self.texttt( name + ' = class ' + realname ) if bases: parents = map( makename, bases ) title = title + '(%s)' % join( parents, ', ' ) doc = getdoc( object ) contents = doc and [doc + '\n'] or [] push = contents.append # List the mro, if non-trivial. mro = deque( inspect.getmro( object ) ) # if len(mro) > 2: # push("Method resolution order:") # for base in mro: # push(' ' + makename(base)) # push('') # class to pump out a horizontal rule between sections. class HorizontalRule: def __init__( self ): self.needone = 0 def maybe( self ): if self.needone: push( '%' * 40 ) self.needone = 1 hr = HorizontalRule() def spill( msg, attrs, predicate ): ok, attrs = _split_list( attrs, predicate ) if ok: hr.maybe() #push(msg) docstr = [] for name, kind, homecls, value in ok: if name.startswith( '__' ) and name.endswith( '__' ): pass else: docstr.append( self.document( getattr( object, name ), name, mod, object ) ) push( "\n".join( docstr ) ) return attrs def spillproperties( msg, attrs, predicate ): ok, attrs = _split_list( attrs, predicate ) if ok: hr.maybe() push( msg ) for name, kind, homecls, value in ok: push( self._docproperty( name, value, mod ) ) return attrs def spilldata( msg, attrs, predicate ): ok, attrs = _split_list( attrs, predicate ) if ok: hr.maybe() push( msg ) for name, kind, homecls, value in ok: if callable( value ) or inspect.isdatadescriptor( value ): doc = getdoc( value ) else: doc = None push( self.docother( getattr( object, name ), name, mod, 70, doc ) + '\n' ) return attrs attrs = filter( lambda ( name, kind, cls, value ): visiblename( name ), inspect.classify_class_attrs( object ) ) while attrs: if mro: thisclass = mro.popleft() else: thisclass = attrs[0][2] attrs, inherited = _split_list( attrs, lambda t: t[2] is thisclass ) if thisclass is __builtin__.object: attrs = inherited continue elif thisclass is object: tag = "defined here" else: tag = "inherited from %s" % classname( thisclass, object.__module__ ) filter( lambda t: not t[0].startswith( '_' ), attrs ) # Sort attrs by name. attrs.sort() # Pump out the attrs, segregated by kind. attrs = spill( "Methods %s:\n" % tag, attrs, lambda t: t[1] == 'method' ) attrs = spill( "Class methods %s:\n" % tag, attrs, lambda t: t[1] == 'class method' ) attrs = spill( "Static methods %s:\n" % tag, attrs, lambda t: t[1] == 'static method' ) # attrs = spillproperties("Properties %s:\n" % tag, attrs, # lambda t: t[1] == 'property') # attrs = spilldata("Data and other attributes %s:\n" % tag, attrs, # lambda t: t[1] == 'data') # assert attrs == [] # attrs = inherited contents = '\n'.join( contents ) if not contents: return title + '\n' return self.classdesc( realname, '\n' + self.indent( rstrip( contents ), ' ' ) )
def docclass(self, object, name=None, mod=None, *ignored): """Produce Markdown documentation for a given class object.""" realname = object.__name__ name = name or realname bases = object.__bases__ def makename(c, m=object.__module__): return pydoc.classname(c, m) if name == realname: title = 'class ' + realname else: title = name + ' = class ' + realname if bases: parents = map(makename, bases) title = title + '(%s)' % ', '.join(parents) title = self.header(title, self.hlevel, name, 'class') self.hlevel += 1 doc = pydoc.getdoc(object) contents = doc and [doc + '\n'] or [] push = contents.append # List the mro, if non-trivial. mro = pydoc.deque(inspect.getmro(object)) if len(mro) > 2: push("Method resolution order:\n") for base in mro: push('* ' + makename(base)) push('') # Cute little class to pump out a horizontal rule between sections. class HorizontalRule: def __init__(self): self.needone = 0 def maybe(self): if self.needone: push('-' * 70 + '\n') self.needone = 1 hr = HorizontalRule() def spill(msg, attrs, predicate): ok, attrs = pydoc._split_list(attrs, predicate) if ok: hr.maybe() push(msg) for name, kind, homecls, value in ok: try: value = getattr(object, name) except Exception: # Some descriptors may meet a failure in their __get__. # (bug #1785) push(self._docdescriptor(name, value, mod)) else: push(self.document(value, name, mod, object)) return attrs def spilldescriptors(msg, attrs, predicate): ok, attrs = pydoc._split_list(attrs, predicate) if ok: hr.maybe() push(msg) for name, kind, homecls, value in ok: push(self._docdescriptor(name, value, mod)) return attrs def spilldata(msg, attrs, predicate): ok, attrs = pydoc._split_list(attrs, predicate) if ok: hr.maybe() push(msg) for name, kind, homecls, value in ok: if (hasattr(value, '__call__') or inspect.isdatadescriptor(value)): doc = pydoc.getdoc(value) else: doc = None push( self.docother(getattr(object, name), name, mod, maxlen=70, doc=doc) + '\n') return attrs attrs = filter(lambda data: pydoc.visiblename(data[0], obj=object), pydoc.classify_class_attrs(object)) while attrs: if mro: thisclass = mro.popleft() else: thisclass = attrs[0][2] attrs, inherited = pydoc._split_list(attrs, lambda t: t[2] is thisclass) if thisclass is pydoc.__builtin__.object: attrs = inherited continue elif thisclass is object: tag = "defined here" else: tag = "inherited from %s" % pydoc.classname( thisclass, object.__module__) # Sort attrs by name. attrs.sort() # Pump out the attrs, segregated by kind. attrs = spill("Methods %s:\n" % tag, attrs, lambda t: t[1] == 'method') attrs = spill("Class methods %s:\n" % tag, attrs, lambda t: t[1] == 'class method') attrs = spill("Static methods %s:\n" % tag, attrs, lambda t: t[1] == 'static method') attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs, lambda t: t[1] == 'data descriptor') attrs = spilldata("Data and other attributes %s:\n" % tag, attrs, lambda t: t[1] == 'data') assert attrs == [] attrs = inherited self.hlevel -= 1 contents = '\n'.join(contents) if not contents: return title return title + contents.rstrip() + '\n'
def docclass(self, object, name=None, mod=None, *ignored): """Produce text documentation for a given class object.""" realname = object.__name__ name = name or realname bases = object.__bases__ def makename(c, m=object.__module__): return classname(c, m) if name == realname: title = 'class ' + self.bold(realname) else: title = self.bold(name) + ' = class ' + realname if bases: parents = map(makename, bases) title = title + '(%s)' % ', '.join(parents) contents = [] push = contents.append try: signature = inspect.signature(object) except (ValueError, TypeError): signature = None if signature: argspec = str(signature) if argspec and argspec != '()': push(name + argspec) doc = getdoc(object) if doc: push(self.indent(doc.splitlines()[0])) # List the mro, if non-trivial. mro = deque(inspect.getmro(object)) if len(mro) > 2: push("Method resolution order:") for base in mro: push(' ' + makename(base)) # List the built-in subclasses, if any: subclasses = sorted((str(cls.__name__) for cls in type.__subclasses__(object) if not cls.__name__.startswith("_") and cls.__module__ == "builtins"), key=str.lower) no_of_subclasses = len(subclasses) MAX_SUBCLASSES_TO_DISPLAY = 4 if subclasses: push("Built-in subclasses:") for subclassname in subclasses[:MAX_SUBCLASSES_TO_DISPLAY]: push(' ' + subclassname) if no_of_subclasses > MAX_SUBCLASSES_TO_DISPLAY: push(' ... and ' + str(no_of_subclasses - MAX_SUBCLASSES_TO_DISPLAY) + ' other subclasses') push('') def header(msg): push(f"\n{msg}\n" + ("-" * len(msg))) def spill(msg, attrs, predicate): ok, attrs = _split_list(attrs, predicate) if ok: header(msg) for name, kind, homecls, value in ok: try: value = getattr(object, name) except Exception: # Some descriptors may meet a failure in their __get__. # (bug #1785) push(self.docdata(value, name, mod)) else: push(self.document(value, name, mod, object)) return attrs def spilldescriptors(msg, attrs, predicate): ok, attrs = _split_list(attrs, predicate) if ok: header(msg) for name, kind, homecls, value in ok: push(self.docdata(value, name, mod)) return attrs def spilldata(msg, attrs, predicate): ok, attrs = _split_list(attrs, predicate) if ok: header(msg) for name, kind, homecls, value in ok: if callable(value) or inspect.isdatadescriptor(value): doc = getdoc(value) else: doc = None try: obj = getattr(object, name) except AttributeError: obj = homecls.__dict__[name] push(self.docother(obj, name, mod, maxlen=70, doc=doc)) return attrs attrs = [(name, kind, cls, value) for name, kind, cls, value in classify_class_attrs(object) if visiblename(name, obj=object)] while attrs: if mro: thisclass = mro.popleft() else: thisclass = attrs[0][2] attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass) if object is not builtins.object and thisclass is builtins.object: attrs = inherited continue elif thisclass is object: tag = "defined here" else: tag = "inherited from %s" % classname(thisclass, object.__module__) sort_attributes(attrs, object) # Pump out the attrs, segregated by kind. attrs = spill("Methods %s" % tag, attrs, lambda t: t[1] == 'method') attrs = spill("Class methods %s" % tag, attrs, lambda t: t[1] == 'class method') attrs = spill("Static methods %s" % tag, attrs, lambda t: t[1] == 'static method') attrs = spilldescriptors("Readonly properties %s" % tag, attrs, lambda t: t[1] == 'readonly property') attrs = spilldescriptors("Data descriptors %s" % tag, attrs, lambda t: t[1] == 'data descriptor') attrs = spilldata("Data and other attributes %s" % tag, attrs, lambda t: t[1] == 'data') assert attrs == [] attrs = inherited contents = '\n'.join(contents) if not contents: return title + '\n' return title + '\n' + self.indent(contents.rstrip(), ' | ') + '\n'
def docclass(self, object, name=None, mod=None, funcs={}, classes={}, *ignored): """Produce HTML documentation for a class object.""" realname = object.__name__ name = name or realname bases = object.__bases__ contents = [] push = contents.append # Cute little class to pump out a horizontal rule between sections. class HorizontalRule: def __init__(self): self.needone = 0 def maybe(self): if self.needone: push('<hr>\n') self.needone = 1 hr = HorizontalRule() # List the mro, if non-trivial. mro = deque(inspect.getmro(object)) if len(mro) > 2: hr.maybe() push('<dl><dt>Method resolution order:</dt>\n') for base in mro: push('<dd>%s</dd>\n' % self.classlink(base, object.__module__)) push('</dl>\n') def spill(msg, attrs, predicate): ok, attrs = _split_list(attrs, predicate) if ok: hr.maybe() push(msg) for name, kind, homecls, value in ok: try: value = getattr(object, name) except Exception: # Some descriptors may meet a failure # in their __get__. # (bug aroberge/mod_pydoc#1785) push(self._docdescriptor(name, value, mod)) else: push( self.document(value, name, mod, funcs, classes, mdict, object)) push('\n') return attrs def spilldescriptors(msg, attrs, predicate): ok, attrs = _split_list(attrs, predicate) if ok: hr.maybe() push(msg) for name, kind, homecls, value in ok: push(self._docdescriptor(name, value, mod)) return attrs def spilldata(msg, attrs, predicate): ok, attrs = _split_list(attrs, predicate) if ok: hr.maybe() push(msg) for name, kind, homecls, value in ok: base = self.docother(getattr(object, name), name, mod) if callable(value) or inspect.isdatadescriptor(value): doc = getattr(value, "__doc__", None) else: doc = None if doc is None: push('<dl><dt>%s</dt><dd></dd></dl>\n' % base) else: doc = self.markup(getdoc(value), self.preformat, funcs, classes, mdict) doc = '<dd><code>%s</code></dd>' % doc push('<dl><dt>%s%s</dt></dl>\n' % (base, doc)) push('\n') return attrs attrs = [(name, kind, cls, value) for name, kind, cls, value in classify_class_attrs(object) if visiblename(name, obj=object)] mdict = {} for key, kind, homecls, value in attrs: mdict[key] = anchor = '#' + name + '-' + key try: value = getattr(object, name) except Exception: # Some descriptors may meet a failure in their __get__. # (bug #1785) pass try: # The value may not be hashable (e.g., a data attr with # a dict or list value). mdict[value] = anchor except TypeError: pass while attrs: if mro: thisclass = mro.popleft() else: thisclass = attrs[0][2] attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass) if thisclass is builtins.object: attrs = inherited continue elif thisclass is object: tag = 'defined here' else: tag = 'inherited from %s' % self.classlink( thisclass, object.__module__) tag += ':<br>\n' # Sort attrs by name. attrs.sort(key=lambda t: t[0]) # Pump out the attrs, segregated by kind. attrs = spill('Methods %s' % tag, attrs, lambda t: t[1] == 'method') attrs = spill('Class methods %s' % tag, attrs, lambda t: t[1] == 'class method') attrs = spill('Static methods %s' % tag, attrs, lambda t: t[1] == 'static method') attrs = spilldescriptors('Data descriptors %s' % tag, attrs, lambda t: t[1] == 'data descriptor') attrs = spilldata('Data and other attributes %s' % tag, attrs, lambda t: t[1] == 'data') assert attrs == [] attrs = inherited contents = ''.join(contents) if name == realname: title = '<span id="%s" class="signature"> class %s</span>' % ( name, realname) else: title = ( '%s = <span id="%s" class="signature">class %s</span>' % (name, name, realname)) if bases: parents = [] for base in bases: parents.append(self.classlink(base, object.__module__)) title = title + '(%s)' % ', '.join(parents) doc = self.markup(getdoc(object), self.preformat, funcs, classes, mdict) doc = doc and '<code>%s<br> </code>' % doc return self.html_section(title, contents, 3, doc, css_class="docclass")