def _Commands(component, depth=3): """Yields tuples representing commands. To use the command from Python, insert '.' between each element of the tuple. To use the command from the command line, insert ' ' between each element of the tuple. Args: component: The component considered to be the root of the yielded commands. depth: The maximum depth with which to traverse the member DAG for commands. Yields: Tuples, each tuple representing one possible command for this CLI. Only traverses the member DAG up to a depth of depth. """ if inspect.isroutine(component) or inspect.isclass(component): for completion in Completions(component): yield (completion,) if inspect.isroutine(component): return # Don't descend into routines. if depth < 1: return for member_name, member in _Members(component): # TODO: Also skip components we've already seen. member_name = _FormatForCommand(member_name) yield (member_name,) for command in _Commands(member, depth - 1): yield (member_name,) + command
def add_symbols(self, symtab): """ add a symbol to symtab it should be named name its type should be a libraryfunction SymbolType its ref should be the object itself (obj) check if method was decorated like before add a symbol like before, but with type librarymethod (the ref should be the method, not obj) Parameters ---------- func: function Function needs to be checked. """ assert self.mod is not None, 'No module specified or loaded' for (name,obj) in inspect.getmembers(self.mod): if inspect.isroutine(obj) and is_component(obj): symtab.addsym( Symbol(name, SymbolType.libraryfunction, obj) ) elif inspect.isclass(obj): for (methodname,method) in inspect.getmembers(obj): if inspect.isroutine(method) and is_component(method): symtab.addsym( Symbol(methodname, SymbolType.librarymethod, method)) return symtab
def printExtensions(): ''' Echoes all entities in our extension module. Useful to create documentation. ''' print(" Types:") for name, o in inspect.getmembers(frepple): if not inspect.isclass(o) or issubclass(o,Exception) or hasattr(o,"__iter__"): continue print(" %s: %s" % (o.__name__, inspect.getdoc(o))) print(" Methods:") for name, o in inspect.getmembers(frepple): if not inspect.isroutine(o): continue print(" %s: %s" % (o.__name__, inspect.getdoc(o))) print(" Exceptions:") for name, o in inspect.getmembers(frepple): if not inspect.isclass(o) or not issubclass(o,Exception): continue print(" %s" % (o.__name__)) print(" Iterators:") for name, o in inspect.getmembers(frepple): if not inspect.isclass(o) or not hasattr(o,"__iter__"): continue print(" %s: %s" % (o.__name__, inspect.getdoc(o))) print(" Other:") for name, o in inspect.getmembers(frepple): # Negating the exact same filters as in the previous blocks if not(not inspect.isclass(o) or issubclass(o,Exception) or hasattr(o,"__iter__")): continue if inspect.isroutine(o): continue if not(not inspect.isclass(o) or not issubclass(o,Exception)): continue if not(not inspect.isclass(o) or not hasattr(o,"__iter__")): continue print(" %s: %s" % (name, o))
def add_symbols(self, symtab): """ Adds any component-decorated functions in the current module to a symbol table Parameters ---------- symtab : SymbolTable The table to which the functions should be added Returns ------- SymbolTable The same table with the module functions included """ assert self.mod is not None, 'No module specified or loaded' for (name,obj) in inspect.getmembers(self.mod): if inspect.isroutine(obj) and is_component(obj): symtab.addsym( Symbol(name, SymbolType.libraryfunction, obj) ) elif inspect.isclass(obj): for (methodname,method) in inspect.getmembers(obj): if inspect.isroutine(method) and is_component(method): symtab.addsym( Symbol(methodname, SymbolType.librarymethod, method) ) return symtab
def _cmp_member(name, given, expected): if hasattr(given, name) is False: return False given = getattr(given, name) expected = getattr(expected, name) is_routine = inspect.isroutine(expected) if inspect.isroutine(given): return is_routine and _cmp_routine(given, expected) return not is_routine
def get_keyword_names(self): get_kw_names = getattr(self._library, 'get_keyword_names', None) or \ getattr(self._library, 'getKeywordNames', None) if inspect.isroutine(get_kw_names): names = get_kw_names() else: names = [attr for attr in dir(self._library) if attr[0] != '_' and inspect.isroutine(getattr(self._library, attr))] return names + ['stop_remote_server']
def _get_keyword_names(library): kw_names = getattr(library, 'get_keyword_names', None) or getattr(library, 'getKeywordNames', None) if inspect.isroutine(kw_names): names = kw_names() else: names = [name for name in dir(library) if name[0] != '_' and inspect.isroutine(getattr(library, name))] return names
def add_symbols(self, symtab): assert self.mod is not None, 'No module specified or loaded' for (name,obj) in inspect.getmembers(self.mod): if inspect.isroutine(obj) and is_component(obj): symtab.addsym( Symbol(name, SymbolType.libraryfunction, obj) ) elif inspect.isclass(obj): for (methodname,method) in inspect.getmembers(obj): if inspect.isroutine(method) and is_component(method): symtab.addsym( Symbol(methodname, SymbolType.librarymethod, method) ) return symtab # unnecessary return statement, since dicts are mutable
def init(self): self.open_function = getattr(self.module, "open_test_session", None) self.close_function = getattr(self.module, "close_test_session", None) if self.open_function is not None: assert _inspect.isroutine(self.open_function), self.open_function if self.close_function is not None: assert _inspect.isroutine(self.close_function), self.close_function functions = dict(_inspect.getmembers(self.module, _inspect.isroutine)).values() def line_number(function): try: return function.__code__.co_firstlineno except AttributeError: return 0 def is_test_function(name): for prefix in self.command.test_prefixes: if name.startswith(prefix): return True def included(names): if len(self.command.include_patterns) == 0: return True for pattern in self.command.include_patterns: if _fnmatch.filter(names, pattern): return True def excluded(names): for pattern in self.command.exclude_patterns: if _fnmatch.filter(names, pattern): return True for function in sorted(functions, key=lambda x: line_number(x)): name = function.__name__ full_name = "{0}:{1}".format(self.name, name) short_name = name[5:] names = [name, full_name, short_name] if not is_test_function(name): continue if not included(names): self.command.info("Skipping test '{0}' (not included)", full_name) continue if excluded(names): self.command.info("Skipping test '{0}' (excluded)", full_name) continue _TestFunction(self, function)
def build_from_dict(self, attr_dict): self.attr_type = attr_dict.pop('dtype', CmdArgType.DevDouble) self.attr_format = attr_dict.pop('dformat', AttrDataFormat.SCALAR) self.dim_x = attr_dict.pop('max_dim_x', 1) self.dim_y = attr_dict.pop('max_dim_y', 0) self.display_level = attr_dict.pop('display_level', DispLevel.OPERATOR) self.polling_period = attr_dict.pop('polling_period', -1) self.memorized = attr_dict.pop('memorized', False) self.hw_memorized = attr_dict.pop('hw_memorized', False) is_access_explicit = "access" in attr_dict if is_access_explicit: self.attr_write = attr_dict.pop('access') else: # access is defined by which methods were defined r_explicit = "fread" in attr_dict or "fget" in attr_dict w_explicit = "fwrite" in attr_dict or "fset" in attr_dict if r_explicit and w_explicit: self.attr_write = AttrWriteType.READ_WRITE elif r_explicit: self.attr_write = AttrWriteType.READ elif w_explicit: self.attr_write = AttrWriteType.WRITE else: self.attr_write = AttrWriteType.READ fread = attr_dict.pop('fget', attr_dict.pop('fread', None)) if fread is not None: if is_pure_str(fread): self.read_method_name = fread elif inspect.isroutine(fread): self.read_method_name = fread.__name__ fwrite = attr_dict.pop('fset', attr_dict.pop('fwrite', None)) if fwrite is not None: if is_pure_str(fwrite): self.write_method_name = fwrite elif inspect.isroutine(fwrite): self.write_method_name = fwrite.__name__ fisallowed = attr_dict.pop('fisallowed', None) if fisallowed is not None: if is_pure_str(fisallowed): self.is_allowed_name = fisallowed elif inspect.isroutine(fisallowed): self.is_allowed_name = fisallowed.__name__ self.attr_class = attr_dict.pop("klass", self.DftAttrClassMap[self.attr_format]) self.attr_args.extend((self.attr_name, self.attr_type, self.attr_write)) if not self.attr_format == AttrDataFormat.SCALAR: self.attr_args.append(self.dim_x) if not self.attr_format == AttrDataFormat.SPECTRUM: self.attr_args.append(self.dim_y) if len(attr_dict): self.att_prop = self.__create_user_default_attr_prop(attr_dict) return self
def compare(self, other): """Compares the attributes of this object with those of another, and returns a list of the fields and the values of the other if they differ""" ourAttributes = inspect.getmembers(self, lambda a:not(inspect.isroutine(a))) ourAttributes = ourAttributes = [a for a in ourAttributes if not(a[0].startswith('__') and a[0].endswith('__'))] theirAttributes = inspect.getmembers(other, lambda a:not(inspect.isroutine(a))) theirAttributes = [a for a in theirAttributes if not(a[0].startswith('__') and a[0].endswith('__'))] changes = [] for i in range(len(ourAttributes)): if ourAttributes[i][1] != theirAttributes[i][1]: changes.append(theirAttributes[i]) return changes
def compare(self, other): """Compares this item with another, and returns the fields and values that differ with the other item, returning a list of the other item's values""" changes = [] ourAttributes = inspect.getmembers(self, lambda a:not(inspect.isroutine(a))) ourAttributes = [a for a in ourAttributes if not(a[0].startswith('__') and a[0].endswith('__'))] theirAttributes = inspect.getmembers(other, lambda a:not(inspect.isroutine(a))) theirAttributes = [a for a in theirAttributes if not(a[0].startswith('__') and abs[0].endswith('__'))] for i in range(len(ourAttributes)): if ourAttributes[i][1] != theirAttributes[i][1]: changes.append(theirAttributes[i]) return changes
def map_object(source, destination, routines=False): attributes = inspect.getmembers(source, lambda a: not (inspect.isroutine(a))) attributes = [a for a in attributes if not (a[0].startswith('__') and a[0].endswith('__'))] for attribute in attributes: setattr(destination, attribute[0], getattr(source, attribute[0])) if routines: attributes = inspect.getmembers(source, lambda a: inspect.isroutine(a)) attributes = [a for a in attributes if not (a[0].startswith('__') and a[0].endswith('__'))] for attribute in attributes: setattr(destination, attribute[0], getattr(source, attribute[0])) return destination
def _variables(cls, include_special=False): """Return class variables.""" vars = inspect.getmembers(cls, lambda m: not inspect.isroutine(m)) if not include_special: vars = [v for v in vars if not (v[0].startswith('__') and v[0].endswith('__'))] return vars
def Completions(component, verbose=False): """Gives possible Fire command completions for the component. A completion is a string that can be appended to a command to continue that command. These are used for TAB-completions in Bash for Fire CLIs. Args: component: The component whose completions to list. verbose: Whether to include all completions, even private members. Returns: A list of completions for a command that would so far return the component. """ if inspect.isroutine(component) or inspect.isclass(component): spec = inspectutils.GetFullArgSpec(component) return _CompletionsFromArgs(spec.args + spec.kwonlyargs) if isinstance(component, (tuple, list)): return [str(index) for index in range(len(component))] if inspect.isgenerator(component): # TODO: There are currently no commands available for generators. return [] return [ _FormatForCommand(member_name) for member_name, unused_member in _Members(component, verbose) ]
def update(self, v): """Add `v` to the hash, recursively if needed.""" self.md5.update(to_bytes(str(type(v)))) if isinstance(v, string_class): self.md5.update(to_bytes(v)) elif v is None: pass elif isinstance(v, (int, float)): self.md5.update(to_bytes(str(v))) elif isinstance(v, (tuple, list)): for e in v: self.update(e) elif isinstance(v, dict): keys = v.keys() for k in sorted(keys): self.update(k) self.update(v[k]) else: for k in dir(v): if k.startswith('__'): continue a = getattr(v, k) if inspect.isroutine(a): continue self.update(k) self.update(a)
def extract_data_doc(state: State, parent, path: List[str], data): assert not inspect.ismodule(data) and not inspect.isclass(data) and not inspect.isroutine(data) and not inspect.isframe(data) and not inspect.istraceback(data) and not inspect.iscode(data) out = Empty() out.name = path[-1] # Welp. https://stackoverflow.com/questions/8820276/docstring-for-variable out.summary = '' out.has_details = False if hasattr(parent, '__annotations__') and out.name in parent.__annotations__: out.type = extract_annotation(state, parent.__annotations__[out.name]) else: out.type = None # The autogenerated <foo.bar at 0xbadbeef> is useless, so provide the value # only if __repr__ is implemented for given type if '__repr__' in type(data).__dict__: out.value = html.escape(repr(data)) else: out.value = None # External data summary, if provided path_str = '.'.join(path) if path_str in state.data_docs: # TODO: use also the contents out.summary = render_inline_rst(state, state.data_docs[path_str]['summary']) del state.data_docs[path_str] return out
def weave_module(module, aspect, methods=NORMAL_METHODS, lazy=False, bag=BrokenBag, **options): """ Low-level weaver for "whole module weaving". .. warning:: You should not use this directly. :returns: An :obj:`aspectlib.Rollback` object. """ if bag.has(module): return Nothing entanglement = Rollback() method_matches = make_method_matcher(methods) logdebug("weave_module (module=%r, aspect=%s, methods=%s, lazy=%s, **options=%s)", module, aspect, methods, lazy, options) for attr in dir(module): func = getattr(module, attr) if method_matches(attr): if isroutine(func): entanglement.merge(patch_module_function(module, func, aspect, force_name=attr, **options)) elif isclass(func): entanglement.merge( weave_class(func, aspect, owner=module, name=attr, methods=methods, lazy=lazy, bag=bag, **options), # it's not consistent with the other ways of weaving a class (it's never weaved as a routine). # therefore it's disabled until it's considered useful. # #patch_module_function(module, getattr(module, attr), aspect, force_name=attr, **options), ) return entanglement
def set_handlers(self, *args, **kwargs): '''Attach one or more event handlers to the top level of the handler stack. See `push_handlers` for the accepted argument types. ''' # Create event stack if necessary if type(self._event_stack) is tuple: self._event_stack = [{}] for object in args: if inspect.isroutine(object): # Single magically named function name = object.__name__ if name not in self.event_types: raise EventException('Unknown event "%s"' % name) self.set_handler(name, object) else: # Single instance with magically named methods for name, handler in inspect.getmembers(object): if name in self.event_types: self.set_handler(name, handler) for name, handler in kwargs.items(): # Function for handling given event (no magic) if name not in self.event_types: raise EventException('Unknown event "%s"' % name) self.set_handler(name, handler)
def parse_hookimpl_opts(self, plugin, name): # pytest hooks are always prefixed with pytest_ # so we avoid accessing possibly non-readable attributes # (see issue #1073) if not name.startswith("pytest_"): return # ignore names which can not be hooks if name == "pytest_plugins": return method = getattr(plugin, name) opts = super(PytestPluginManager, self).parse_hookimpl_opts(plugin, name) # consider only actual functions for hooks (#3775) if not inspect.isroutine(method): return # collect unmarked hooks as long as they have the `pytest_' prefix if opts is None and name.startswith("pytest_"): opts = {} if opts is not None: # TODO: DeprecationWarning, people should use hookimpl # https://github.com/pytest-dev/pytest/issues/4562 known_marks = {m.name for m in getattr(method, "pytestmark", [])} for name in ("tryfirst", "trylast", "optionalhook", "hookwrapper"): opts.setdefault(name, hasattr(method, name) or name in known_marks) return opts
def _class_attributes(self): """ If called before init, just class attributes. Otherwise, instance attributes are included as well. Returns GENERATOR! """ attributes = inspect.getmembers(self, lambda a:not(inspect.isroutine(a))) return (a for a in attributes if not(a[0].startswith('__') and a[0].endswith('__')))
def run_example(self, func, images): results = [] for doc_example in func.doc_examples: src_image = None pixel_type = None arguments = None display_arguments = None if inspect.isroutine(doc_example): result = doc_example(images) else: if len(doc_example): if isinstance(func.self_type, args.ImageType): pixel_type = doc_example[0] src_image = images[pixel_type].image_copy() arguments = [src_image] + list(doc_example[1:]) display_arguments = doc_example[1:] else: display_arguments = arguments = doc_example else: display_arguments = arguments = [] result = func.__call__(*tuple(arguments)) # add quotes around string args for display displayargs = None if display_arguments != None: displayargs = [] for da in display_arguments: if isinstance(da,str): displayargs.append("\"" + da + "\"") else: displayargs.append(da) results.append((result, src_image, pixel_type, displayargs)) return results
def get_func_args(func, stripself=False): """Return the argument name list of a callable""" if inspect.isfunction(func): func_args, _, _, _ = inspect.getargspec(func) elif inspect.isclass(func): return get_func_args(func.__init__, True) elif inspect.ismethod(func): return get_func_args(func.__func__, True) elif inspect.ismethoddescriptor(func): return [] elif isinstance(func, partial): return [x for x in get_func_args(func.func)[len(func.args):] if not (func.keywords and x in func.keywords)] elif hasattr(func, '__call__'): if inspect.isroutine(func): return [] elif getattr(func, '__name__', None) == '__call__': return [] else: return get_func_args(func.__call__, True) else: raise TypeError('%s is not callable' % type(func)) if stripself: func_args.pop(0) return func_args
def analyse(obj): members = obj.__dict__ if inspect.isclass(obj): main_doc = preprocess_routine(obj.__name__, get_doc(members)) bases = [x.__name__ for x in obj.__bases__] else: main_doc = split_para(get_doc(members)) bases = [] routines = {} classes = {} data = {} for name, m in members.items(): if name.startswith('__'): continue try: mro = list(inspect.getmro(m)) if mro[0] != m: continue except AttributeError: pass if inspect.isroutine(m): try: doc = m.__doc__ except KeyError: pass if not doc: doc = 'FIXME' routines[name] = preprocess_routine(name, doc) continue if inspect.isclass(m): classes[name] = analyse(m) continue t = type(m) if t == types.IntType or t == types.StringType: data[name] = repr(m) else: data[name] = m.__doc__ return {'name': obj.__name__, 'doc': main_doc, 'routines': routines, 'classes': classes, 'data': data, 'bases': bases}
def _plot_graph_plot(ax, plot_data, **kwargs): plot_args = [] plot_kwargs = {} thumb = kwargs.get('thumb', False) plot_args = plot_data['data'] xaxis, yaxis = plot_args color = plot_data['color'] for prop in [ 'label', 'linewidth', 'zorder']: if prop not in plot_data: continue value = plot_data[prop] if isroutine(value): value = value(thumb) plot_kwargs[prop] = value ax.plot(xaxis, yaxis, color, **plot_kwargs) if plot_data.get('fill', False): where = [ True for x in xaxis ] alpha = plot_data.get('fillalpha', 1.0) plt.fill_between(xaxis, yaxis, where=where, interpolate=True, color=color, alpha=alpha)
def to_dict(self): obj = {} obj['__id__'] = 'Indexer' obj['__module__'] = self.__class__.__module__ obj['__name__'] = self.__class__.__name__ import inspect attributes = inspect.getmembers(self, lambda m:not(inspect.isroutine(m))) for a in attributes: if a[0] == '_indxr_helper' and a[1] is not None: lattice_cell_dict = {} lattice_list = a[1].get_all() for l, c in lattice_list: lattice_cell_dict[l] = c obj[a[0]] = lattice_cell_dict elif a[0] == '_indxr_experiment_list' and a[1] is not None: obj[a[0]] = a[1].to_dict() elif a[0] == '_indxr_imagesets': from dxtbx.serialize.imageset import imageset_to_dict obj[a[0]] = [imageset_to_dict(imgset) for imgset in a[1]] elif a[0] == '_indxr_sweeps': # XXX I guess we probably want this? continue elif (a[0].startswith('_indxr_') or a[0].startswith('_fp_')): obj[a[0]] = a[1] return obj
def __init__(self, o): self.obj = o self.outputs = [] self.inputs = [] # TODO: This may not be accurate - check. if inspect.isroutine(o): # it's a 'function' self.nodeType = self.WORKFLOW_NODE_FN elif isinstance(o, GenericPE): # TODO Perhaps we should have a similar arrangement for annotating PEs and their ins/outs o.id = o.name + str(WorkflowNode.node_counter) WorkflowNode.node_counter += 1 self.nodeType = self.WORKFLOW_NODE_PE for i in o.inputconnections.values(): self.inputs.append({}) # empty for the time being - only the index matters for i in o.outputconnections.values(): self.outputs.append({}) elif isinstance(o, WorkflowGraph): self.nodeType = self.WORKFLOW_NODE_CP try: for i in o.inputmappings: self.inputs.append({}) except AttributeError: pass try: for i in o.outputmappings: self.outputs.append({}) except AttributeError: pass else: sys.stderr.write('Error: Unknown type of object passed as a Workflow Node: %s\n' % type(o)) raise Exception("Unknown type of object passed as a Workflow Node: %s" % type(o))
def get_member_info(member): info = { 'type': str(type(member)), } if isinstance(member, property): info['getter'] = (member.fget.__name__, getdoc(member.fget)) elif isroutine(member): info['rutine'] = (member.__name__, getdoc(member)) elif 'freeOrionAIInterface' in info['type']: info['value'] = str(member) elif isinstance(member, int): if type(member) == int: info['value'] = member else: info['value'] = str(member) elif isinstance(member, (str, long, bool, float)): info['value'] = member elif isinstance(member, (list, tuple, dict, set, frozenset)): if not len(member): info['value'] = member else: print '>>>', type(member), "of", member print return info
def event(self, *args): '''Function decorator for an event handler. Usage:: win = window.Window() @win.event def on_resize(self, width, height): # ... or:: @win.event('on_resize') def foo(self, width, height): # ... ''' if len(args) == 0: # @window.event() def decorator(func): name = func.__name__ self.set_handler(name, func) return func return decorator elif inspect.isroutine(args[0]): # @window.event func = args[0] name = func.__name__ self.set_handler(name, func) return args[0] elif type(args[0]) in (str, unicode): # @window.event('on_resize') name = args[0] def decorator(func): self.set_handler(name, func) return func return decorator
def register_task(_id=None, force=False): """ Decorator which registers function in registered tasks with function _id """ if isroutine(_id): # if _id is a routine <=> no parameter is given # _id is routine _id result = _id # task is the routine _id = path(_id) # task id is its python path register_tasks(force=force, **{_id: result}) else: # if _id is a str or None def register_task(function, _id=_id): """ Register input function as a task """ if _id is None: _id = path(function) register_tasks(force=force, **{_id: function}) return function result = register_task return result
def __init_fixture_methods(self): """Initialize and populate the lists of fixture methods for this TestCase. Fixture methods are identified by the fixture_decorator_factory when the methods are created. This means in order to figure out all the fixtures this particular TestCase will need, we have to test all of its attributes for 'fixture-ness'. See __fixture_decorator_factory for more info. """ # init our self.(class_setup|setup|teardown|class_teardown)_fixtures lists for fixture_type in FIXTURE_TYPES: setattr(self, "%s_fixtures" % fixture_type, []) # the list of classes in our heirarchy, starting with the highest class # (object), and ending with our class reverse_mro_list = [x for x in reversed(type(self).mro())] # discover which fixures are on this class, including mixed-in ones self._fixture_methods = defaultdict(list) # we want to know everything on this class (including stuff inherited # from bases), but we don't want to trigger any lazily loaded # attributes, so dir() isn't an option; this traverses __bases__/__dict__ # correctly for us. for classified_attr in inspect.classify_class_attrs(type(self)): # have to index here for Python 2.5 compatibility attr_name = classified_attr[0] unbound_method = classified_attr[3] defining_class = classified_attr[2] # skip everything that's not a function/method if not inspect.isroutine(unbound_method): continue # if this is an old setUp/tearDown/etc, tag it as a fixture if attr_name in DEPRECATED_FIXTURE_TYPE_MAP: fixture_type = DEPRECATED_FIXTURE_TYPE_MAP[attr_name] fixture_decorator = globals()[fixture_type] unbound_method = fixture_decorator(unbound_method) # collect all of our fixtures in appropriate buckets if inspection.is_fixture_method(unbound_method): # where in our MRO this fixture was defined defining_class_depth = reverse_mro_list.index(defining_class) inspection.callable_setattr( unbound_method, '_defining_class_depth', defining_class_depth, ) # we grabbed this from the class and need to bind it to us instance_method = instancemethod(unbound_method, self) self._fixture_methods[instance_method._fixture_type].append( instance_method) # arrange our fixture buckets appropriately for fixture_type, fixture_methods in self._fixture_methods.iteritems(): # sort our fixtures in order of oldest (smaller id) to newest, but # also grouped by class to correctly place deprecated fixtures fixture_methods.sort( key=lambda x: (x._defining_class_depth, x._fixture_id)) # for setup methods, we want methods defined further back in the # class hierarchy to execute first. for teardown methods though, # we want the opposite while still maintaining the class-level # definition order, so we reverse only on class depth. if fixture_type in REVERSED_FIXTURE_TYPES: fixture_methods.sort(key=lambda x: x._defining_class_depth, reverse=True) fixture_list_name = "%s_fixtures" % fixture_type setattr(self, fixture_list_name, fixture_methods)
def traced(*args, **keywords): """Add call and return tracing to an unbound function or to the methods of a class. The arguments to ``traced`` differ depending on whether it is being used to trace an unbound function or the methods of a class: .. rubric:: Trace an unbound function using the default logger :arg func: the unbound function to be traced By default, a logger named for the function's module is used: >>> import sys >>> logging.basicConfig( ... level=TRACE, stream=sys.stdout, ... format="%(levelname)s:%(name)s:%(funcName)s:%(message)s") >>> @traced ... def func(x, y): ... return x + y ... >>> func(7, 9) TRACE:autologging:func:CALL *(7, 9) **{} TRACE:autologging:func:RETURN 16 16 .. rubric:: Trace an unbound function using a named logger :arg logging.Logger logger: the parent logger used to trace the unbound function >>> import sys >>> logging.basicConfig( ... level=TRACE, stream=sys.stdout, ... format="%(levelname)s:%(name)s:%(funcName)s:%(message)s") >>> @traced(logging.getLogger("my.channel")) ... def func(x, y): ... return x + y ... >>> func(7, 9) TRACE:my.channel:func:CALL *(7, 9) **{} TRACE:my.channel:func:RETURN 16 16 .. rubric:: Trace default methods using the default logger :arg class_: the class whose methods will be traced By default, all "public", "_nonpublic", and "__internal" methods, as well as the special "__init__" and "__call__" methods, will be traced. Tracing log entries will be written to a logger named for the module and class: >>> import sys >>> logging.basicConfig( ... level=TRACE, stream=sys.stdout, ... format="%(levelname)s:%(name)s:%(funcName)s:%(message)s") >>> @traced ... class Class: ... def __init__(self, x): ... self._x = x ... def public(self, y): ... return self._x + y ... def _nonpublic(self, y): ... return self._x - y ... def __internal(self, y=2): ... return self._x ** y ... def __repr__(self): ... return "Class(%r)" % self._x ... def __call__(self): ... return self._x ... >>> obj = Class(7) TRACE:autologging.Class:__init__:CALL *(7,) **{} >>> obj.public(9) TRACE:autologging.Class:public:CALL *(9,) **{} TRACE:autologging.Class:public:RETURN 16 16 >>> obj._nonpublic(5) TRACE:autologging.Class:_nonpublic:CALL *(5,) **{} TRACE:autologging.Class:_nonpublic:RETURN 2 2 >>> obj._Class__internal(y=3) TRACE:autologging.Class:__internal:CALL *() **{'y': 3} TRACE:autologging.Class:__internal:RETURN 343 343 >>> repr(obj) # not traced by default 'Class(7)' >>> obj() TRACE:autologging.Class:__call__:CALL *() **{} TRACE:autologging.Class:__call__:RETURN 7 7 .. note:: When the runtime Python version is >= 3.3, the *qualified* class name will be used to name the tracing logger (i.e. a nested class will write tracing log entries to a logger named "module.Parent.Nested"). .. rubric:: Trace default methods using a named logger :arg logging.Logger logger: the parent logger used to trace the methods of the class By default, all "public", "_nonpublic", and "__internal" methods, as well as the special "__init__" method, will be traced. Tracing log entries will be written to the specified logger: >>> import sys >>> logging.basicConfig( ... level=TRACE, stream=sys.stdout, ... format="%(levelname)s:%(name)s:%(funcName)s:%(message)s") >>> @traced(logging.getLogger("my.channel")) ... class Class: ... def __init__(self, x): ... self._x = x ... def public(self, y): ... return self._x + y ... def _nonpublic(self, y): ... return self._x - y ... def __internal(self, y=2): ... return self._x ** y ... def __repr__(self): ... return "Class(%r)" % self._x ... def __call__(self): ... return self._x ... >>> obj = Class(7) TRACE:my.channel.Class:__init__:CALL *(7,) **{} >>> obj.public(9) TRACE:my.channel.Class:public:CALL *(9,) **{} TRACE:my.channel.Class:public:RETURN 16 16 >>> obj._nonpublic(5) TRACE:my.channel.Class:_nonpublic:CALL *(5,) **{} TRACE:my.channel.Class:_nonpublic:RETURN 2 2 >>> obj._Class__internal(y=3) TRACE:my.channel.Class:__internal:CALL *() **{'y': 3} TRACE:my.channel.Class:__internal:RETURN 343 343 >>> repr(obj) # not traced by default 'Class(7)' >>> obj() TRACE:my.channel.Class:__call__:CALL *() **{} TRACE:my.channel.Class:__call__:RETURN 7 7 .. rubric:: Trace specified methods using the default logger :arg tuple method_names: the names of the methods that will be traced Tracing log entries will be written to a logger named for the module and class: >>> import sys >>> logging.basicConfig( ... level=TRACE, stream=sys.stdout, ... format="%(levelname)s:%(name)s:%(funcName)s:%(message)s") >>> @traced("public", "__internal") ... class Class: ... def __init__(self, x): ... self._x = x ... def public(self, y): ... return self._x + y ... def _nonpublic(self, y): ... return self._x - y ... def __internal(self, y=2): ... return self._x ** y ... def __repr__(self): ... return "Class(%r)" % self._x ... def __call__(self): ... return self._x ... >>> obj = Class(7) >>> obj.public(9) TRACE:autologging.Class:public:CALL *(9,) **{} TRACE:autologging.Class:public:RETURN 16 16 >>> obj._nonpublic(5) 2 >>> obj._Class__internal(y=3) TRACE:autologging.Class:__internal:CALL *() **{'y': 3} TRACE:autologging.Class:__internal:RETURN 343 343 >>> repr(obj) 'Class(7)' >>> obj() 7 .. warning:: When method names are specified explicitly via *args*, Autologging ensures that each method is actually defined in the body of the class being traced. (This means that inherited methods that are not overridden are **never** traced, even if they are named explicitly in *args*.) If a defintion for any named method is not found in the class body, either because the method is inherited or because the name is misspelled, Autologging will issue a :exc:`UserWarning`. If you wish to trace a method from a super class, you have two options: 1. Use ``traced`` to decorate the super class. 2. Override the method and trace it in the subclass. .. note:: When the runtime Python version is >= 3.3, the *qualified* class name will be used to name the tracing logger (i.e. a nested class will write tracing log entries to a logger named "module.Parent.Nested"). .. rubric:: Trace specified methods using a named logger :arg logging.Logger logger: the parent logger used to trace the methods of the class :arg tuple method_names: the names of the methods that will be traced >>> import sys >>> logging.basicConfig( ... level=TRACE, stream=sys.stdout, ... format="%(levelname)s:%(name)s:%(funcName)s:%(message)s") >>> @traced(logging.getLogger("my.channel"), "public", "__internal") ... class Class: ... def __init__(self, x): ... self._x = x ... def public(self, y): ... return self._x + y ... def _nonpublic(self, y): ... return self._x - y ... def __internal(self, y=2): ... return self._x ** y ... def __repr__(self): ... return "Class(%r)" % self._x ... def __call__(self): ... return self._x ... >>> obj = Class(7) >>> obj.public(9) TRACE:my.channel.Class:public:CALL *(9,) **{} TRACE:my.channel.Class:public:RETURN 16 16 >>> obj._nonpublic(5) 2 >>> obj._Class__internal(y=3) TRACE:my.channel.Class:__internal:CALL *() **{'y': 3} TRACE:my.channel.Class:__internal:RETURN 343 343 >>> repr(obj) # not traced by default 'Class(7)' >>> obj() 7 .. warning:: When method names are specified explicitly via *args*, Autologging ensures that each method is actually defined in the body of the class being traced. (This means that inherited methods that are not overridden are **never** traced, even if they are named explicitly in *args*.) If a defintion for any named method is not found in the class body, either because the method is inherited or because the name is misspelled, Autologging will issue a :exc:`UserWarning`. If you wish to trace a method from a super class, you have two options: 1. Use ``traced`` to decorate the super class. 2. Override the method and trace it in the subclass. .. rubric:: Exclude specified methods from tracing .. versionadded:: 1.3.0 :arg tuple method_names: the names of the methods that will be excluded from tracing :keyword bool exclude: ``True`` to cause the method names list to be interpreted as an exclusion list (``False`` is the default, and causes the named methods to be **included** as described above) The example below demonstrates exclusions using the default logger. >>> import sys >>> logging.basicConfig( ... level=TRACE, stream=sys.stdout, ... format="%(levelname)s:%(name)s:%(funcName)s:%(message)s") >>> @traced("_nonpublic", "__internal", exclude=True) ... class Class: ... def __init__(self, x): ... self._x = x ... def public(self, y): ... return self._x + y ... def _nonpublic(self, y): ... return self._x - y ... def __internal(self, y=2): ... return self._x ** y ... def __repr__(self): ... return "Class(%r)" % self._x ... def __call__(self): ... return self._x ... >>> obj = Class(7) >>> obj.public(9) TRACE:autologging.Class:public:CALL *(9,) **{} TRACE:autologging.Class:public:RETURN 16 16 >>> obj._nonpublic(5) 2 >>> obj._Class__internal(y=3) 343 >>> repr(obj) 'Class(7)' >>> obj() TRACE:autologging.Class:__call__:CALL *() **{} TRACE:autologging.Class:__call__:RETURN 7 7 When method names are excluded via *args* and the *exclude* keyword, Autologging **ignores** methods that are not actually defined in the body of the class being traced. .. warning:: If an exclusion list causes the list of traceable methods to resolve empty, then Autologging will issue a :exc:`UserWarning`. .. note:: When the runtime Python version is >= 3.3, the *qualified* class name will be used to name the tracing logger (i.e. a nested class will write tracing log entries to a logger named "module.Parent.Nested"). .. note:: When tracing a class, if the default (class-named) logger is used **and** the runtime Python version is >= 3.3, then the *qualified* class name will be used to name the tracing logger (i.e. a nested class will write tracing log entries to a logger named "module.Parent.Nested"). .. note:: If method names are specified when decorating a function, a :exc:`UserWarning` is issued, but the methods names are ignored and the function is traced as though the method names had not been specified. .. note:: Both `Jython <http://www.jython.org/>`_ and `IronPython <http://ironpython.net/>`_ report an "internal" class name using its mangled form, which will be reflected in the default tracing logger name. For example, in the sample code below, both Jython and IronPython will use the default tracing logger name "autologging._Outer__Nested" (whereas CPython/PyPy/Stackless would use "autologging.__Nested" under Python 2 or "autologging.Outer.__Nested" under Python 3.3+):: class Outer: @traced class __Nested: pass .. warning:: Neither `Jython <http://www.jython.org/>`_ nor `IronPython <http://ironpython.net/>`_ currently implement the ``function.__code__.co_lnotab`` attribute, so the last line number of a function cannot be determined by Autologging. .. versionchanged:: 1.3.1 Due to unavoidable inconsistencies in line number tracking across Python variants (see `issues/6 <https://github.com/mzipay/Autologging/issues/6>`_, as of version 1.3.1 and until further notice Autologging will only record the first line number of the function being traced in all tracing CALL and RETURN records. (Note that YIELD tracing records for generator iterators will continue to record the correct line number on variants other than IronPython.) """ obj = args[0] if args else None if obj is None: # treat `@traced()' as equivalent to `@traced' return traced if isclass(obj): # `@traced' class if hasattr(obj, _mangle_name("__log", obj.__name__)): return _install_traceable_methods(obj, logger=obj.__log, exclude=keywords.get( "exclude", False)) else: return _install_traceable_methods(obj, exclude=keywords.get( "exclude", False)) elif isroutine(obj): # `@traced' function if hasattr(obj, "_log"): return _make_traceable_function(obj, logger=obj._log) else: return _make_traceable_function( obj, logging.getLogger(_generate_logger_name(obj))) elif isinstance(obj, logging.Logger): # may be decorating a class OR a function method_names = args[1:] exclude = keywords.get("exclude", False) def traced_decorator(class_or_fn): if isclass(class_or_fn): # `@traced(logger)' or `@traced(logger, "method", ..)' class return _install_traceable_methods( class_or_fn, *method_names, exclude=exclude, logger=logging.getLogger( _generate_logger_name(class_or_fn, parent_name=obj.name))) else: # `@traced(logger)' function if method_names: warnings.warn( "ignoring method names for @traced function %s.%s" % (class_or_fn.__module__, class_or_fn.__name__)) return _make_traceable_function(class_or_fn, obj) return traced_decorator else: # `@traced("method_name1", ..)' class method_names = args[:] exclude = keywords.get("exclude", False) return lambda class_: _install_traceable_methods( class_, *method_names, exclude=exclude)
def load_pretrained_weights(self, context): if os.path.isdir(self._model_dir_): shutil.rmtree(self._model_dir_) if self._weights_dir_ is not None: for i, network in self.networks.items(): # param_file = self._model_prefix_ + "_" + str(i) + "_newest-0000.params" param_file = None if hasattr(network, 'episodic_sub_nets'): num_episodic_sub_nets = len(network.episodic_sub_nets) lastMemEpoch = [0] * num_episodic_sub_nets mem_files = [None] * num_episodic_sub_nets if os.path.isdir(self._weights_dir_): lastEpoch = 0 for file in os.listdir(self._weights_dir_): if ".params" in file and self._model_prefix_ + "_" + str( i) in file and not "loss" in file: epochStr = file.replace(".params", "").replace( self._model_prefix_ + "_" + str(i) + "-", "") epoch = int(epochStr) if epoch >= lastEpoch: lastEpoch = epoch param_file = file elif hasattr( network, 'episodic_sub_nets' ) and self._model_prefix_ + "_" + str( i) + "_episodic_memory_sub_net_" in file: relMemPathInfo = file.replace( self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_").split("-") memSubNet = int(relMemPathInfo[0]) memEpochStr = relMemPathInfo[1] memEpoch = int(memEpochStr) if memEpoch >= lastMemEpoch[memSubNet - 1]: lastMemEpoch[memSubNet - 1] = memEpoch mem_files[memSubNet - 1] = file logging.info("Loading pretrained weights: " + self._weights_dir_ + param_file) network.load_parameters(self._weights_dir_ + param_file, allow_missing=True, ignore_extra=True) if hasattr(network, 'episodic_sub_nets'): assert lastEpoch == lastMemEpoch for j, sub_net in enumerate(network.episodic_sub_nets): if mem_files[j] != None: logging.info( "Loading pretrained Replay Memory: " + mem_files[j]) mem_layer = \ [param for param in inspect.getmembers(sub_net, lambda x: not (inspect.isroutine(x))) if param[0].startswith("memory")][0][1] mem_layer.load_memory(self._model_dir_ + mem_files[j]) else: logging.info("No pretrained weights available at: " + self._weights_dir_ + param_file)
def GenerateImportsTipForModule(obj_to_complete, dirComps=None, getattr=getattr, filter=lambda name: True): ''' @param obj_to_complete: the object from where we should get the completions @param dirComps: if passed, we should not 'dir' the object and should just iterate those passed as a parameter @param getattr: the way to get a given object from the obj_to_complete (used for the completer) @param filter: a callable that receives the name and decides if it should be appended or not to the results @return: list of tuples, so that each tuple represents a completion with: name, doc, args, type (from the TYPE_* constants) ''' ret = [] if dirComps is None: dirComps = dir(obj_to_complete) if hasattr(obj_to_complete, '__dict__'): dirComps.append('__dict__') if hasattr(obj_to_complete, '__class__'): dirComps.append('__class__') getCompleteInfo = True if len(dirComps) > 1000: # ok, we don't want to let our users wait forever... # no complete info for you... getCompleteInfo = False dontGetDocsOn = (float, int, str, tuple, list) for d in dirComps: if d is None: continue if not filter(d): continue args = '' try: obj = getattr(obj_to_complete, d) except: # just ignore and get it without aditional info ret.append((d, '', args, TYPE_BUILTIN)) else: if getCompleteInfo: retType = TYPE_BUILTIN # check if we have to get docs getDoc = True for class_ in dontGetDocsOn: if isinstance(obj, class_): getDoc = False break doc = '' if getDoc: # no need to get this info... too many constants are defined and # makes things much slower (passing all that through sockets takes quite some time) try: doc = inspect.getdoc(obj) if doc is None: doc = '' except: # may happen on jython when checking java classes (so, just ignore it) doc = '' if inspect.ismethod(obj) or inspect.isbuiltin( obj) or inspect.isfunction(obj) or inspect.isroutine( obj): try: args, vargs, kwargs, defaults = inspect.getargspec(obj) except: args, vargs, kwargs, defaults = (('self', ), None, None, None) if defaults is not None: start_defaults_at = len(args) - len(defaults) r = '' for i, a in enumerate(args): if len(r) > 0: r = r + ', ' r = r + str(a) if defaults is not None and i >= start_defaults_at: default = defaults[i - start_defaults_at] r += '=' + str(default) others = '' if vargs: others += '*' + vargs if kwargs: if others: others += ', ' others += '**' + kwargs if others: r += ', ' args = '(%s%s)' % (r, others) retType = TYPE_FUNCTION elif inspect.isclass(obj): retType = TYPE_CLASS elif inspect.ismodule(obj): retType = TYPE_IMPORT else: retType = TYPE_ATTR # add token and doc to return - assure only strings. ret.append((d, doc, args, retType)) else: # getCompleteInfo == False if inspect.ismethod(obj) or inspect.isbuiltin( obj) or inspect.isfunction(obj) or inspect.isroutine( obj): retType = TYPE_FUNCTION elif inspect.isclass(obj): retType = TYPE_CLASS elif inspect.ismodule(obj): retType = TYPE_IMPORT else: retType = TYPE_ATTR # ok, no complete info, let's try to do this as fast and clean as possible # so, no docs for this kind of information, only the signatures ret.append((d, '', str(args), retType)) return ret
def _is_routine(self, handler): return inspect.isroutine(handler) or is_java_method(handler)
def __getattr__(self, attribute_name): # When deciding how to handle attribute accesses, we have three # different possible outcomes: # 1. If this is defined as a method on the base implementation, we are # able delegate it to the backends based on the selector function. # 2. If this is defined as an attribute on the base implementation, we # are able to (immediately) return that as the value. (This also # mirrors the behavior of ``LazyServiceWrapper``, which will cache # any attribute access during ``expose``, so we can't delegate # attribute access anyway.) # 3. If this isn't defined at all on the base implementation, we let # the ``AttributeError`` raised by ``getattr`` propagate (mirroring # normal attribute access behavior for a missing/invalid name.) base_value = getattr(self.__backend_base, attribute_name) if not inspect.isroutine(base_value): return base_value def execute(*args, **kwargs): context = type(self).__state.context # If there is no context object already set in the thread local # state, we are entering the delegator for the first time and need # to create a new context. if context is None: from sentry.app import env # avoids a circular import context = Context(env.request, {}) # If this thread already has an active backend for this base class, # we can safely call that backend synchronously without delegating. if self.__backend_base in context.backends: backend = context.backends[self.__backend_base] return getattr(backend, attribute_name)(*args, **kwargs) # Binding the call arguments to named arguments has two benefits: # 1. These values always be passed in the same form to the selector # function and callback, regardless of how they were passed to # the method itself (as positional arguments, keyword arguments, # etc.) # 2. This ensures that the given arguments are those supported by # the base backend itself, which should be a common subset of # arguments that are supported by all backends. callargs = inspect.getcallargs(base_value, None, *args, **kwargs) selected_backend_names = list( self.__selector_func(context, attribute_name, callargs)) if not len(selected_backend_names) > 0: raise self.InvalidBackend("No backends returned by selector!") # Ensure that the primary backend is actually registered -- we # don't want to schedule any work on the secondaries if the primary # request is going to fail anyway. if selected_backend_names[0] not in self.__backends: raise self.InvalidBackend( f"{selected_backend_names[0]!r} is not a registered backend." ) def call_backend_method(context, backend, is_primary): # Update the thread local state in the executor to the provided # context object. This allows the context to be propagated # across different threads. assert type(self).__state.context is None type(self).__state.context = context # Ensure that we haven't somehow accidentally entered a context # where the backend we're calling has already been marked as # active (or worse, some other backend is already active.) base = self.__backend_base assert base not in context.backends # Mark the backend as active. context.backends[base] = backend try: return getattr(backend, attribute_name)(*args, **kwargs) except Exception as e: # If this isn't the primary backend, we log any unexpected # exceptions so that they don't pass by unnoticed. (Any # exceptions raised by the primary backend aren't logged # here, since it's assumed that the caller will log them # from the calling thread.) if not is_primary: expected_raises = getattr(base_value, "__raises__", []) if not expected_raises or not isinstance( e, tuple(expected_raises)): logger.warning( "%s caught in executor while calling %r on %s.", type(e).__name__, attribute_name, type(backend).__name__, exc_info=True, ) raise finally: type(self).__state.context = None # Enqueue all of the secondary backend requests first since these # are non-blocking queue insertions. (Since the primary backend # executor queue insertion can block, if that queue was full the # secondary requests would have to wait unnecessarily to be queued # until the after the primary request can be enqueued.) # NOTE: If the same backend is both the primary backend *and* in # the secondary backend list -- this is unlikely, but possible -- # this means that one of the secondary requests will be queued and # executed before the primary request is queued. This is such a # strange usage pattern that I don't think it's worth optimizing # for.) results = [None] * len(selected_backend_names) for i, backend_name in enumerate(selected_backend_names[1:], 1): try: backend, executor = self.__backends[backend_name] except KeyError: logger.warning( "%r is not a registered backend and will be ignored.", backend_name, exc_info=True, ) else: results[i] = executor.submit( functools.partial(call_backend_method, context.copy(), backend, is_primary=False), priority=1, block=False, ) # The primary backend is scheduled last since it may block the # calling thread. (We don't have to protect this from ``KeyError`` # since we already ensured that the primary backend exists.) backend, executor = self.__backends[selected_backend_names[0]] results[0] = executor.submit( functools.partial(call_backend_method, context.copy(), backend, is_primary=True), priority=0, block=True, ) if self.__callback_func is not None: FutureSet([ _f for _f in results if _f ]).add_done_callback(lambda *a, **k: self.__callback_func( context, attribute_name, callargs, selected_backend_names, results)) return results[0].result() return execute
__author__ = 'ipetrash' def foo(): return 1 class Foo: def __call__(self): return 1 import inspect print(inspect.isroutine(lambda: 1)) # True print(inspect.isroutine(foo)) # True print() # Functor foo = Foo() print(inspect.isroutine(foo)) # False print(hasattr(foo, '__call__')) # True print(callable(foo)) # True print() import math print(inspect.isroutine(math.sin)) # True print(inspect.isroutine(print)) # True
def filter_attr(member): return not inspect.isroutine(member)
def __match_attributes(attribute): attr_name, attr_obj = attribute if inspect.isroutine(attr_obj) or \ (attr_name.startswith('__') and attr_name.endswith('__')): return False return True
def isroutine(obj: Any) -> bool: """Check is any kind of function or method.""" return inspect.isroutine(unwrap_all(obj))
def _get__class__(obj): if hasattr(obj, "__class__") and not inspect.isroutine(obj): return obj.__class__ # instance class
def load(self, context): earliestLastEpoch = None for i, network in self.networks.items(): lastEpoch = 0 param_file = None if hasattr(network, 'episodic_sub_nets'): num_episodic_sub_nets = len(network.episodic_sub_nets) lastMemEpoch = [0] * num_episodic_sub_nets mem_files = [None] * num_episodic_sub_nets try: os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + "_newest-0000.params") except OSError: pass try: os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + "_newest-symbol.json") except OSError: pass if hasattr(network, 'episodic_sub_nets'): try: os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(0) + "-0000.params") except OSError: pass try: os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(0) + "-symbol.json") except OSError: pass for j in range(len(network.episodic_sub_nets)): try: os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(j + 1) + "-0000.params") except OSError: pass try: os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(j + 1) + "-symbol.json") except OSError: pass try: os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_query_net_' + str(j + 1) + "-0000.params") except OSError: pass try: os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_query_net_' + str(j + 1) + "-symbol.json") except OSError: pass try: os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_loss' + "-0000.params") except OSError: pass try: os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_loss' + "-symbol.json") except OSError: pass try: os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + "_newest_episodic_memory_sub_net_" + str(j + 1) + "-0000") except OSError: pass if os.path.isdir(self._model_dir_): for file in os.listdir(self._model_dir_): if ".params" in file and self._model_prefix_ + "_" + str( i) in file and not "loss" in file: epochStr = file.replace(".params", "").replace( self._model_prefix_ + "_" + str(i) + "-", "") epoch = int(epochStr) if epoch >= lastEpoch: lastEpoch = epoch param_file = file elif hasattr(network, 'episodic_sub_nets' ) and self._model_prefix_ + "_" + str( i) + "_episodic_memory_sub_net_" in file: relMemPathInfo = file.replace( self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_", "").split("-") memSubNet = int(relMemPathInfo[0]) memEpochStr = relMemPathInfo[1] memEpoch = int(memEpochStr) if memEpoch >= lastMemEpoch[memSubNet - 1]: lastMemEpoch[memSubNet - 1] = memEpoch mem_files[memSubNet - 1] = file if param_file is None: earliestLastEpoch = 0 else: logging.info("Loading checkpoint: " + param_file) network.load_parameters(self._model_dir_ + param_file) if hasattr(network, 'episodic_sub_nets'): for j, sub_net in enumerate(network.episodic_sub_nets): if mem_files[j] != None: logging.info("Loading Replay Memory: " + mem_files[j]) mem_layer = [ param for param in inspect.getmembers( sub_net, lambda x: not (inspect.isroutine(x))) if param[0].startswith("memory") ][0][1] mem_layer.load_memory(self._model_dir_ + mem_files[j]) if earliestLastEpoch == None or lastEpoch + 1 < earliestLastEpoch: earliestLastEpoch = lastEpoch + 1 return earliestLastEpoch
def merge(self, other): #This sucks attrs_we_care_about = [(attr, v) for attr, v in inspect.getmembers(self, lambda a: not (inspect.isroutine(a))) if not attr.startswith('__')] for attr, value in attrs_we_care_about: if value in (None, '') and hasattr(other, attr): setattr(self, attr, getattr(other, attr)) return self
def testMethodInspectRoutine(self): self.assertTrue(inspect.isroutine(self.cls.substring)) self.assertTrue(inspect.isroutine(self.obj.substring))
def find_mod_objs(modname: str, app: Sphinx = None) -> Dict[str, Dict[str, Any]]: """ Inspect the module ``modname`` for all the contained objects, sort for the object type (module, function, class, etc.), and return a dictionary containing object names, fully qualified names, and instances. Parameters ---------- modname : str Name of the module (e.g. ``"plasmapy_sphinx.utils'``) to be inspect. app : `~sphinx.application.Sphinx` Instance of the `Sphinx` application. Returns ------- mod_objs : Dict[str, Dict[str, List[Any]]] A dictionary containing names, qualified names, and objects instances of all the objects in ``modname`` sorted by their respective group (module, class, function, etc.) The first key of the dictionary represents the object type (modules, classes, functions, etc.). The second key is either ``"names"`` (list of all object short names), ``"qualnames"`` (list of all object qualified names), and ``"objs"`` (list of object instances). Examples -------- >>> find_mod_objs("plasmapy_sphinx.utils") { 'functions': { 'names': ['find_mod_objs', 'get_custom_grouping_info'], 'qualnames': [ 'plasmapy_sphinx.utils.find_mod_objs', 'plasmapy_sphinx.utils.get_custom_grouping_info', ], 'objs': [ <function plasmapy_sphinx.utils.find_mod_objs>, <function plasmapy_sphinx.utils.get_custom_grouping_info>, ] }, 'variables': { 'names': ['default_grouping_info', 'package_dir', 'templates_dir'], 'qualnames': [ 'plasmapy_sphinx.utils.default_grouping_info', 'plasmapy_sphinx.utils.package_dir', 'plasmapy_sphinx.utils.templates_dir', ], 'objs': [ OrderedDict(...), "/.../plasmapy_sphinx", "/.../plasmapy_sphinx/templates", ] } } Notes ----- If the module contains the ``__all__`` dunder, then the routine groups the objects specified in the dunder; otherwise, it will search the module's `globals`, minus any private or special members. The routing will then group the module objects in the following order... 1. Group any imported modules or packages. - Regardless of if ``__all__`` is defined, the routine will first search the module's `globals` for any imported modules or packages. - Any 3rd party modules are excluded unless specified in ``__all__``. - Any non-direct sub-modules are excluded unless specified in ``__all__``. 2. Custom groups defined by :confval:`automodapi_custom_groups` are then collected. 3. The remaining objects are grouped into the default groupds defined by :attr:`default_grouping_info`. """ if app is not None: if isinstance(app, Sphinx): cgroups_def = get_custom_grouping_info(app) else: # assuming dict for testing cgroups_def = app cgroups = set(cgroups_def) else: cgroups_def = {} cgroups = set() mod = import_module(modname) pkg_name = modname.split(".")[0] # define what to search pkg_names = { name for name in mod.__dict__.keys() if not name.startswith("_") } if hasattr(mod, "__all__"): no_all = False names_to_search = set(mod.__all__) else: no_all = True names_to_search = pkg_names # filter pkg_names for name in pkg_names.copy(): obj = getattr(mod, name) if not no_all and name in names_to_search: continue ismod = inspect.ismodule(obj) ispkg = ismod and obj.__package__ == obj.__name__ # remove test folders if ispkg and obj.__package__.split(".")[-1] == "tests": pkg_names.remove(name) continue # remove 3rd party objects if ismod and obj.__package__.split(".")[0] != pkg_name: pkg_names.remove(name) continue elif (not ismod and hasattr(obj, "__module__") and obj.__module__.split(".")[0] != pkg_name): # Note: this will miss ufuncs like numpy.sqrt since they do not have # a __module__ property pkg_names.remove(name) continue # remove non direct sub-pkgs and mods of modname if ismod: if not obj.__name__.startswith(modname): pkg_names.remove(name) continue else: nm = obj.__name__[len(modname):].split(".") nm.remove("") if len(nm) != 1: pkg_names.remove(name) continue # find local modules first names_of_modules = set() for name in pkg_names.copy(): obj = getattr(mod, name) if inspect.ismodule(obj): names_of_modules.add(name) mod_objs = {"modules": {"names": []}} if len(names_of_modules) > 0: names_of_modules = names_of_modules mod_objs["modules"]["names"] = list(names_of_modules) names_to_search = names_to_search - names_of_modules # find and filter custom groups for name in cgroups: dunder = cgroups_def[name]["dunder"] if hasattr(mod, dunder): custom_names = set(getattr(mod, dunder)) else: continue if len(custom_names) > 0: mod_objs.update({name: {"names": list(custom_names)}}) names_to_search = names_to_search - custom_names # gather all remaining groups mod_objs.update({ "classes": { "names": [] }, "exceptions": { "names": [] }, "warnings": { "names": [] }, "functions": { "names": [] }, "variables": { "names": [] }, }) # type: Dict[str, Dict[str, Any]] for name in names_to_search: obj = getattr(mod, name) if inspect.isroutine(obj): # is a user-defined or built-in function mod_objs["functions"]["names"].append(name) elif inspect.isclass(obj): if issubclass(obj, Warning): mod_objs["warnings"]["names"].append(name) elif issubclass(obj, BaseException): mod_objs["exceptions"]["names"].append(name) else: mod_objs["classes"]["names"].append(name) else: mod_objs["variables"]["names"].append(name) # retrieve and defined qualnames and objs for obj_type in list(mod_objs): if len(mod_objs[obj_type]["names"]) == 0: del mod_objs[obj_type] continue mod_objs[obj_type].update({"qualnames": [], "objs": []}) for name in list(mod_objs[obj_type]["names"]): # Note: The 'qualname' is always constructed with 'name' so when # something like # # def func(...): # ... # # f2 = func # # is done, then the 'qualname' ends with 'f2' and not 'func'. # obj = getattr(mod, name) ismod = inspect.ismodule(obj) # ispkg = ismod and obj.__package__ == obj.__name__ if not ismod and no_all: # only allow local objects to be collected # - at this point modules & pkgs should have already been # filtered for direct sub-modules and pkgs if not hasattr(obj, "__module__"): # this would be a locally defined variable like # plasmapy.__citation__ pass elif not obj.__module__.startswith(pkg_name): # object not from package being documented mod_objs[obj_type]["names"].remove(name) continue if ismod: obj_renamed = obj.__name__.split(".")[-1] != name elif not hasattr(obj, "__name__"): obj_renamed = False else: obj_renamed = obj.__name__ != name if ismod and obj_renamed: qualname = f"{obj.__package__}.{name}" elif ismod and not obj_renamed: qualname = obj.__name__ elif obj_renamed or not hasattr(obj, "__module__"): # can not tell if the object was renamed in modname or in # obj.__module__, so assumed it happened in modname qualname = f"{modname}.{name}" elif obj.__module__.split(".")[0] != pkg_name: # this will catch scenarios like typing alias definitions where # __module__ == typing even when defined locally qualname = f"{modname}.{name}" else: qualname = f"{obj.__module__}.{name}" mod_objs[obj_type]["qualnames"].append(qualname) mod_objs[obj_type]["objs"].append(obj) # sort lists names = sorted(mod_objs[obj_type]["names"].copy()) qualnames = [] objs = [] for name in names: index = mod_objs[obj_type]["names"].index(name) qualnames.append(mod_objs[obj_type]["qualnames"][index]) objs.append(mod_objs[obj_type]["objs"][index]) mod_objs[obj_type] = { "names": names, "qualnames": qualnames, "objs": objs } return mod_objs
def receive_game_update_message(self, action, round_state): ''' pp = pprint.PrettyPrinter(indent=2) print("------------ACTION/receive_game_update_message--------") pp.pprint(action) print("------------ROUND_STATE----------") pp.pprint(round_state) print("-------------------------------") ''' p_action_in_str = action['action'].upper() p_action = None p_uuid = action['player_uuid'] p_amount = action['amount'] # https://stackoverflow.com/questions/9058305/getting-attributes-of-a-class all_attributes = inspect.getmembers( PokerConstants.Action, lambda a: not (inspect.isroutine(a))) attributes = [ a for a in all_attributes if not (a[0].startswith('__') and a[0].endswith('__')) ] for a in attributes: if a[0] == p_action_in_str: p_action = a[1] for p in self.players: if p.uuid == p_uuid: p.add_action_history(p_action, p_amount) if not self.big_blind_has_spoken and p_uuid == round_state['big_blind_pos'] and \ p_action != 'BIG_BLIND': self.big_blind_has_spoken = True if self.player_pos == None: self.missed_action = p_action self.missed_uuid = p_uuid if self.players[0].uuid == p_uuid: if p_action_in_str == "RAISE": self.commitments[ 0] = self.commitments[1] + 2 * self.small_blind_amount self.num_raises = self.num_raises + 1 elif p_action_in_str == "CALL": self.commitments[0] = self.commitments[1] else: if p_action_in_str == "RAISE": self.commitments[ 1] = self.commitments[0] + self.small_blind_amount * 2 self.num_raises = self.num_raises + 1 elif p_action_in_str == "CALL": self.commitments[1] = self.commitments[0] if self.player_pos != None: if p_uuid == self.players[abs( self.player_pos - 1)].uuid and p_action_in_str == "RAISE": self.oppo_committed_amt = self.player_committed_amt + 2 * self.small_blind_amount self.raise_freq[self.num_raises][2] = self.raise_freq[ self.num_raises][2] + 1 self.num_raises = self.num_raises + 1 elif p_uuid == self.players[abs( self.player_pos - 1)].uuid and p_action_in_str == "CALL": self.oppo_committed_amt = self.player_committed_amt self.raise_freq[self.num_raises][1] = self.raise_freq[ self.num_raises][1] + 1 elif p_uuid == self.players[abs( self.player_pos - 1)].uuid and p_action_in_str == "FOLD": self.raise_freq[self.num_raises][0] = self.raise_freq[ self.num_raises][0] + 1 else: pass '''
def _epydoc_isroutine_override(object): # yes, import in function is evil. # so is this function... import inspect return isinstance(object, partial) or inspect.isroutine(object)
def isroutine(object): # pylint: disable=redefined-builtin """TFDecorator-aware replacement for inspect.isroutine.""" return _inspect.isroutine(tf_decorator.unwrap(object)[1])
def check_signature(object_name, reference_object, other_object): """ Given a reference class or function check if an other class or function could be substituted without causing any instantiation/usage issues. @param object_name: the name of the object being checked. @type object_name: string @param reference_object: the reference class or function. @type reference_object: class/function @param other_object: the other class or function to be checked. @type other_object: class/function @raise InvenioPluginContainerError: in case the other object is not compatible with the reference object. """ try: if inspect.isclass(reference_object): ## if the reference_object is a class if inspect.isclass(other_object): ## if the other_object is a class if issubclass(other_object, reference_object): ## if the other_object is derived from the reference we ## should check for all the method in the former that ## exists in the the latter, wethever they recursively have ## the same signature. reference_object_map = dict( inspect.getmembers(reference_object, inspect.isroutine)) for other_method_name, other_method_code in \ inspect.getmembers(other_object, inspect.isroutine): if other_method_name in reference_object_map: check_signature( object_name, reference_object_map[other_method_name], other_method_code) else: ## if the other_object is not derived from the ## reference_object then all the method declared in the ## latter should exist in the former and they should ## recursively have the same signature. other_object_map = dict( inspect.getmembers(other_object, inspect.isroutine)) for reference_method_name, reference_method_code in \ inspect.getmembers( reference_object, inspect.isroutine): if reference_method_name in other_object_map: check_signature( object_name, reference_method_code, other_object_map[reference_method_name]) else: raise InvenioPluginContainerError( '"%s", which' ' exists in the reference class, does not' ' exist in the other class, and the reference' ' class is not an anchestor of the other' % reference_method_name) else: ## We are comparing apples and oranges! raise InvenioPluginContainerError( "%s (the reference object)" " is a class while %s (the other object) is not a class" % (reference_object, other_object)) elif inspect.isroutine(reference_object): ## if the reference_object is a function if inspect.isroutine(other_object): ## if the other_object is a function we will compare the ## reference_object and other_object function signautre i.e. ## their parameters. reference_args, reference_varargs, reference_varkw, \ reference_defaults = inspect.getargspec(reference_object) other_args, other_varargs, other_varkw, \ other_defaults = inspect.getargspec(other_object) ## We normalize the reference_defaults to be a list if reference_defaults is not None: reference_defaults = list(reference_defaults) else: reference_defaults = [] ## We normalize the other_defaults to be a list if other_defaults is not None: other_defaults = list(other_defaults) else: other_defaults = [] ## Check for presence of missing parameters in other function if not (other_varargs or other_varkw): for reference_arg in reference_args: if reference_arg not in other_args: raise InvenioPluginContainerError( 'Argument "%s"' ' in reference function %s does not exist in' ' the other function %s' % (reference_arg, reference_object, other_object)) ## Check for presence of additional parameters in other ## function if not (reference_varargs or reference_varkw): for other_arg in other_args: if other_arg not in reference_args: raise InvenioPluginContainerError( 'Argument "%s"' ' in other function %s does not exist in the' ' reference function %s' % (other_arg, other_object, reference_object)) ## Check sorting of arguments for reference_arg, other_arg in map(None, reference_args, other_args): if not ((reference_arg == other_arg) or (reference_arg is None and (reference_varargs or reference_varkw)) or (other_arg is None and (other_args or other_varargs))): raise InvenioPluginContainerError( 'Argument "%s" in' ' the other function is in the position of' ' argument "%s" in the reference function, i.e.' ' the order of arguments is not respected' % (other_arg, reference_arg)) if len(reference_defaults) != len(other_defaults) and \ not (reference_args or reference_varargs or other_args or other_varargs): raise InvenioPluginContainerError( "Default parameters in" " the other function are not corresponding to the" " default of parameters of the reference function") else: ## We are comparing apples and oranges! raise InvenioPluginContainerError( '%s (the reference object)' ' is a function while %s (the other object) is not a' ' function' % (reference_object, other_object)) except InvenioPluginContainerError, err: try: sourcefile = inspect.getsourcefile(other_object) sourceline = inspect.getsourcelines(other_object)[1] except IOError: ## other_object is not loaded from a real file sourcefile = 'N/A' sourceline = 'N/A' raise InvenioPluginContainerError( 'Error in checking signature for' ' "%s" as defined at "%s" (line %s): %s' % (object_name, sourcefile, sourceline, err))
"""Initilization procedure for `Photosphere` modules.""" import inspect import os import sys path = os.path.dirname(os.path.abspath(__file__)) __all__ = [] for py in [ f[:-3] for f in os.listdir(path) if f.endswith('.py') and f != '__init__.py' ]: mod = __import__('.'.join([__name__, py]), fromlist=[py]) classes = [ x[1] for x in inspect.getmembers(mod) if (inspect.isroutine(x[1]) or inspect.isclass(x[1])) and inspect.getmodule(x[1]) == mod ] for cls in classes: __all__.append(cls.__name__) setattr(sys.modules[__name__], cls.__name__, cls)
def _get_handler_method(self, libcode, name): method = getattr(libcode, name) if not inspect.isroutine(method): raise DataError('Not a method or function') return method
def can_document_member(cls, member, membername, isattr, parent): return inspect.isroutine(member) and \ not isinstance(parent, ModuleDocumenter)
def _Fire(component, args, context, name=None): """Execute a Fire command on a target component using the args supplied. Arguments that come after a final isolated '--' are treated as Flags, eg for interactive mode or completion script generation. Other arguments are consumed by the execution of the Fire command, eg in the traversal of the members of the component, or in calling a function or instantiating a class found during the traversal. The steps performed by this method are: 1. Parse any Flag args (the args after the final --) 2. Start with component as the current component. 2a. If the current component is a class, instantiate it using args from args. 2b. If the current component is a routine, call it using args from args. 2c. Otherwise access a member from component using an arg from args. 2d. Repeat 2a-2c until no args remain. 3a. Embed into ipython REPL if interactive mode is selected. 3b. Generate a completion script if that flag is provided. In step 2, arguments will only ever be consumed up to a separator; a single step will never consume arguments from both sides of a separator. The separator defaults to a hyphen (-), and can be overwritten with the --separator Fire argument. Args: component: The target component for Fire. args: A list of args to consume in Firing on the component, usually from the command line. context: A dict with the local and global variables available at the call to Fire. name: Optional. The name of the command. Used in interactive mode and in the tab completion script. Returns: FireTrace of components starting with component, tracing Fire's execution path as it consumes args. Raises: ValueError: If there are arguments that cannot be consumed. ValueError: If --completion is specified but no name available. """ args, flag_args = parser.SeparateFlagArgs(args) argparser = parser.CreateParser() parsed_flag_args, unused_args = argparser.parse_known_args(flag_args) verbose = parsed_flag_args.verbose interactive = parsed_flag_args.interactive separator = parsed_flag_args.separator show_completion = parsed_flag_args.completion show_help = parsed_flag_args.help show_trace = parsed_flag_args.trace # component can be a module, class, routine, object, etc. if component is None: component = context initial_component = component component_trace = trace.FireTrace(initial_component=initial_component, name=name, separator=separator, verbose=verbose, show_help=show_help, show_trace=show_trace) instance = None remaining_args = args while True: last_component = component initial_args = remaining_args if not remaining_args and (show_help or interactive or show_trace or show_completion is not None): # Don't initialize the final class or call the final function unless # there's a separator after it, and instead process the current component. break saved_args = [] used_separator = False if separator in remaining_args: # For the current component, only use arguments up to the separator. separator_index = remaining_args.index(separator) saved_args = remaining_args[separator_index + 1:] remaining_args = remaining_args[:separator_index] used_separator = True assert separator not in remaining_args if inspect.isclass(component) or inspect.isroutine(component): # The component is a class or a routine; we'll try to initialize it or # call it. isclass = inspect.isclass(component) try: target = component.__name__ filename, lineno = inspectutils.GetFileAndLine(component) component, consumed_args, remaining_args, capacity = _CallCallable( component, remaining_args) # Update the trace. if isclass: component_trace.AddInstantiatedClass( component, target, consumed_args, filename, lineno, capacity) else: component_trace.AddCalledRoutine(component, target, consumed_args, filename, lineno, capacity) except FireError as error: component_trace.AddError(error, initial_args) return component_trace if last_component is initial_component: # If the initial component is a class, keep an instance for use with -i. instance = component elif isinstance(component, (list, tuple)) and remaining_args: # The component is a tuple or list; we'll try to access a member. arg = remaining_args[0] try: index = int(arg) component = component[index] except (ValueError, IndexError): error = FireError( 'Unable to index into component with argument:', arg) component_trace.AddError(error, initial_args) return component_trace remaining_args = remaining_args[1:] filename = None lineno = None component_trace.AddAccessedProperty(component, index, [arg], filename, lineno) elif isinstance(component, dict) and remaining_args: # The component is a dict; we'll try to access a member. target = remaining_args[0] if target in component: component = component[target] elif target.replace('-', '_') in component: component = component[target.replace('-', '_')] else: # The target isn't present in the dict as a string, but maybe it is as # another type. # TODO: Consider alternatives for accessing non-string keys. found_target = False for key, value in component.items(): if target == str(key): component = value found_target = True break if not found_target: error = FireError('Cannot find target in dict:', target, component) component_trace.AddError(error, initial_args) return component_trace remaining_args = remaining_args[1:] filename = None lineno = None component_trace.AddAccessedProperty(component, target, [target], filename, lineno) elif remaining_args: # We'll try to access a member of the component. try: target = remaining_args[0] component, consumed_args, remaining_args = _GetMember( component, remaining_args) filename, lineno = inspectutils.GetFileAndLine(component) component_trace.AddAccessedProperty(component, target, consumed_args, filename, lineno) except FireError as error: component_trace.AddError(error, initial_args) return component_trace if used_separator: # Add back in the arguments from after the separator. if remaining_args: remaining_args = remaining_args + [separator] + saved_args elif (inspect.isclass(last_component) or inspect.isroutine(last_component)): remaining_args = saved_args component_trace.AddSeparator() elif component is not last_component: remaining_args = [separator] + saved_args else: # It was an unnecessary separator. remaining_args = saved_args if component is last_component and remaining_args == initial_args: # We're making no progress. break if remaining_args: component_trace.AddError( FireError('Could not consume arguments:', remaining_args), initial_args) return component_trace if show_completion is not None: if name is None: raise ValueError( 'Cannot make completion script without command name') script = CompletionScript(name, initial_component, shell=show_completion) component_trace.AddCompletionScript(script) if interactive: variables = context.copy() if name is not None: variables[name] = initial_component variables['component'] = initial_component variables['result'] = component variables['trace'] = component_trace if instance is not None: variables['self'] = instance interact.Embed(variables, verbose) component_trace.AddInteractiveMode() return component_trace
def generate_imports_tip_for_module(obj_to_complete, dirComps=None, getattr=getattr, filter=lambda name: True): ''' @param obj_to_complete: the object from where we should get the completions @param dirComps: if passed, we should not 'dir' the object and should just iterate those passed as a parameter @param getattr: the way to get a given object from the obj_to_complete (used for the completer) @param filter: a callable that receives the name and decides if it should be appended or not to the results @return: list of tuples, so that each tuple represents a completion with: name, doc, args, type (from the TYPE_* constants) ''' ret = [] if dirComps is None: dirComps = dir(obj_to_complete) if hasattr(obj_to_complete, '__dict__'): dirComps.append('__dict__') if hasattr(obj_to_complete, '__class__'): dirComps.append('__class__') getCompleteInfo = True if len(dirComps) > 1000: #ok, we don't want to let our users wait forever... #no complete info for you... getCompleteInfo = False dontGetDocsOn = (float, int, str, tuple, list) for d in dirComps: if d is None: continue if not filter(d): continue args = '' try: try: obj = getattr(obj_to_complete.__class__, d) except: obj = getattr(obj_to_complete, d) except: #just ignore and get it without additional info ret.append((d, '', args, TYPE_BUILTIN)) else: if getCompleteInfo: try: retType = TYPE_BUILTIN #check if we have to get docs getDoc = True for class_ in dontGetDocsOn: if isinstance(obj, class_): getDoc = False break doc = '' if getDoc: #no need to get this info... too many constants are defined and #makes things much slower (passing all that through sockets takes quite some time) try: doc = inspect.getdoc(obj) if doc is None: doc = '' except: #may happen on jython when checking java classes (so, just ignore it) doc = '' if inspect.ismethod(obj) or inspect.isbuiltin( obj) or inspect.isfunction( obj) or inspect.isroutine(obj): try: args, vargs, kwargs, defaults = inspect.getargspec( obj) r = '' for a in (args): if len(r) > 0: r = r + ', ' r = r + str(a) args = '(%s)' % (r) except TypeError: #ok, let's see if we can get the arguments from the doc args = '()' try: found = False if len(doc) > 0: if IS_IPY: #Handle case where we have the situation below #sort(self, object cmp, object key) #sort(self, object cmp, object key, bool reverse) #sort(self) #sort(self, object cmp) #Or: sort(self: list, cmp: object, key: object) #sort(self: list, cmp: object, key: object, reverse: bool) #sort(self: list) #sort(self: list, cmp: object) if hasattr(obj, '__name__'): name = obj.__name__ + '(' #Fix issue where it was appearing sort(aa)sort(bb)sort(cc) in the same line. lines = doc.splitlines() if len(lines) == 1: c = doc.count(name) if c > 1: doc = ('\n' + name).join( doc.split(name)) major = '' for line in doc.splitlines(): if line.startswith( name ) and line.endswith(')'): if len(line) > len(major): major = line if major: args = major[major.index('('):] found = True if not found: i = doc.find('->') if i < 0: i = doc.find('--') if i < 0: i = doc.find('\n') if i < 0: i = doc.find('\r') if i > 0: s = doc[0:i] s = s.strip() #let's see if we have a docstring in the first line if s[-1] == ')': start = s.find('(') if start >= 0: end = s.find('[') if end <= 0: end = s.find(')') if end <= 0: end = len(s) args = s[start:end] if not args[-1] == ')': args = args + ')' #now, get rid of unwanted chars l = len(args) - 1 r = [] for i in xrange(len(args)): if i == 0 or i == l: r.append(args[i]) else: r.append( check_char( args[i])) args = ''.join(r) if IS_IPY: if args.startswith('(self:'): i = args.find(',') if i >= 0: args = '(self' + args[i:] else: args = '(self)' i = args.find(')') if i > 0: args = args[:i + 1] except: pass retType = TYPE_FUNCTION elif inspect.isclass(obj): retType = TYPE_CLASS elif inspect.ismodule(obj): retType = TYPE_IMPORT else: retType = TYPE_ATTR #add token and doc to return - assure only strings. ret.append((d, doc, args, retType)) except: #just ignore and get it without aditional info ret.append((d, '', args, TYPE_BUILTIN)) else: #getCompleteInfo == False if inspect.ismethod(obj) or inspect.isbuiltin( obj) or inspect.isfunction(obj) or inspect.isroutine( obj): retType = TYPE_FUNCTION elif inspect.isclass(obj): retType = TYPE_CLASS elif inspect.ismodule(obj): retType = TYPE_IMPORT else: retType = TYPE_ATTR #ok, no complete info, let's try to do this as fast and clean as possible #so, no docs for this kind of information, only the signatures ret.append((d, '', str(args), retType)) return ret
def all_permissions(cls): return [ val for perm, val in inspect.getmembers( cls, lambda memb: not inspect.isroutine(memb)) if not perm.startswith('_') ]
def all_scopes(cls): return [ val for scope, val in inspect.getmembers( cls, lambda memb: not inspect.isroutine(memb)) if not scope.startswith('_') ]
def to_json(self): attr = inspect.getmembers(self, lambda a: not (inspect.isroutine(a))) js = {k: v for (k, v) in attr if not k.startswith('_')} json_rules = [r.to_json() for r in self.rules] js['rules'] = json_rules return js
def _is_function(obj): return inspect.isroutine(obj) and callable(obj)
def vals(self): vars = dict(inspect.getmembers(self, lambda a: not (inspect.isroutine(a)))) return {key.lower(): vars[key] for key in vars}
def add_dynamic_field(self, fn): if not inspect.isroutine(fn): raise TypeError("add_dynamic_field requires function argument") self._dyn_fields.add(fn)