def _get_full_data(node): data = {} parent_desc = node.parent.parent name, uid = _get_desc_data(parent_desc) if not name: return for field in node: fieldname, fieldbody = field try: # split into field type and argument fieldtype, _ = fieldname.astext().split(None, 1) except ValueError: # maybe an argument-less field type? fieldtype = fieldname.astext() # collect the content, trying not to keep unnecessary paragraphs if _is_single_paragraph(fieldbody): content = fieldbody.children[0].children else: content = fieldbody.children data.setdefault(uid, {}) if fieldtype == 'Returns': for child in content: ret_data = child.astext() data[uid].setdefault(fieldtype, []).append(ret_data) if fieldtype == 'Raises': for child in content: ret_data = child.astext() data[uid].setdefault(fieldtype, []).append(ret_data) return data
def _patched__DocFieldTransformer__transform(self, node): """Transform a single field list *node*.""" typemap = self.typemap entries = [] groupindices = {} types = {} # step 1: traverse all fields and collect field types and content for field in node: fieldname, fieldbody = field try: # split into field type and argument fieldtype, fieldarg = fieldname.astext().split(None, 1) except ValueError: # maybe an argument-less field type? fieldtype, fieldarg = fieldname.astext(), '' typedesc, is_typefield = typemap.get(fieldtype, (None, None)) # sort out unknown fields if typedesc is None or typedesc.has_arg != bool(fieldarg): # either the field name is unknown, or the argument doesn't # match the spec; capitalize field name and be done with it new_fieldname = fieldtype[0:1].upper() + fieldtype[1:] if fieldarg: new_fieldname = new_fieldname + ' ' + fieldarg fieldname[0] = nodes.Text(new_fieldname) entries.append(field) continue typename = typedesc.name # collect the content, trying not to keep unnecessary paragraphs if _is_single_paragraph(fieldbody): content = fieldbody.children[0].children else: content = fieldbody.children # if the field specifies a type, put it in the types collection if is_typefield: # filter out only inline nodes; others will result in invalid # markup being written out content = filter( lambda n: (isinstance(n, nodes.Inline) or isinstance(n, nodes.Text)), content) if content: types.setdefault(typename, {})[fieldarg] = content continue # also support syntax like ``:param type name:`` if typedesc.is_typed: try: argtype, argname = fieldarg.split(None, 1) except ValueError: pass else: types.setdefault(typename, {})[argname] = \ [nodes.Text(argtype)] fieldarg = argname translatable_content = nodes.inline(fieldbody.rawsource, translatable=True) translatable_content.source = fieldbody.parent.source translatable_content.line = fieldbody.parent.line translatable_content += content # grouped entries need to be collected in one entry, while others # get one entry per field if typedesc.is_grouped: if typename in groupindices: group = entries[groupindices[typename]] else: groupindices[typename] = len(entries) group = [typedesc, []] entries.append(group) entry = typedesc.make_entry(fieldarg, translatable_content) group[1].append(entry) else: entry = typedesc.make_entry(fieldarg, translatable_content) entries.append([typedesc, entry]) # step 2: all entries are collected, construct the new field list new_list = nodes.field_list() for entry in entries: if isinstance(entry, nodes.field): # pass-through old field new_list += entry else: fieldtype, content = entry fieldtypes = types.get(fieldtype.name, {}) new_list += fieldtype.make_field(fieldtypes, self.domain, content) node.replace_self(new_list)
def _hacked_transform(typemap, node): """ Taken from docfields.py from sphinx. This does all the steps around gathering data, but doesn't actually do the node transformations. """ entries = [] groupindices = {} types = {} # step 1: traverse all fields and collect field types and content for field in node: fieldname, fieldbody = field try: # split into field type and argument fieldtype, fieldarg = fieldname.astext().split(None, 1) except ValueError: # maybe an argument-less field type? fieldtype, fieldarg = fieldname.astext(), '' typedesc, is_typefield = typemap.get(fieldtype, (None, None)) # sort out unknown fields if typedesc is None or typedesc.has_arg != bool(fieldarg): # either the field name is unknown, or the argument doesn't # match the spec; capitalize field name and be done with it new_fieldname = fieldtype[0:1].upper() + fieldtype[1:] if fieldarg: new_fieldname += ' ' + fieldarg fieldname[0] = nodes.Text(new_fieldname) entries.append(field) continue typename = typedesc.name # collect the content, trying not to keep unnecessary paragraphs if _is_single_paragraph(fieldbody): content = fieldbody.children[0].children else: content = fieldbody.children # if the field specifies a type, put it in the types collection if is_typefield: # filter out only inline nodes; others will result in invalid # markup being written out content = [n for n in content if isinstance(n, nodes.Inline) or isinstance(n, nodes.Text)] if content: types.setdefault(typename, {})[fieldarg] = content continue # also support syntax like ``:param type name:`` if typedesc.is_typed: try: argtype, argname = fieldarg.split(None, 1) except ValueError: pass else: types.setdefault(typename, {})[argname] = \ [nodes.Text(argtype)] fieldarg = argname translatable_content = nodes.inline(fieldbody.rawsource, translatable=True) translatable_content.source = fieldbody.parent.source translatable_content.line = fieldbody.parent.line translatable_content += content # grouped entries need to be collected in one entry, while others # get one entry per field if typedesc.is_grouped: if typename in groupindices: group = entries[groupindices[typename]] else: groupindices[typename] = len(entries) group = [typedesc, []] entries.append(group) entry = typedesc.make_entry(fieldarg, [translatable_content]) group[1].append(entry) else: entry = typedesc.make_entry(fieldarg, [translatable_content]) entries.append([typedesc, entry]) return (entries, types)
def transform(self, node): """Transform a single field list *node*.""" typemap = self.typemap fmodname = self.modname ftypename = self.typename entries = [] groupindices = {} types = {} shapes = {} attrs = {} # step 1: traverse all fields and collect field types and content for field in node: fieldname, fieldbody = field try: # split into field type and argument fieldtype, fieldarg = fieldname.astext().split(None, 1) except ValueError: # maybe an argument-less field type? fieldtype, fieldarg = fieldname.astext(), '' typedesc, is_typefield = typemap.get(fieldtype, (None, None)) # sort out unknown fields if typedesc is None: #or typedesc.has_arg != bool(fieldarg): # either the field name is unknown, or the argument doesn't # match the spec; capitalize field name and be done with it new_fieldname = fieldtype.capitalize() + ' ' + fieldarg fieldname[0] = nodes.Text(new_fieldname) entries.append(field) continue typename = typedesc.name # collect the content, trying not to keep unnecessary paragraphs if _is_single_paragraph(fieldbody): content = fieldbody.children[0].children else: content = fieldbody.children # if the field specifies a type, put it in the types collection if is_typefield: # filter out only inline nodes; others will result in invalid # markup being written out content = [ n for n in content if isinstance(n, nodes.Inline) or isinstance(n, nodes.Text) ] if content: eval(is_typefield).setdefault(typename, {})[fieldarg] = content continue # also support syntax like ``:param type name [attrs]:`` if typedesc.is_typed == 2 and len(fieldarg.strip()): argname, argshape, argtype, argattrs = self.scan_fieldarg( fieldarg) if argtype: types.setdefault(typename, {})[argname] = \ [nodes.Text(argtype)] if argshape: shapes.setdefault(typename, {})[argname] = \ [nodes.Text(argshape)] if argattrs: attrs.setdefault(typename, {})[argname] = \ [nodes.emphasis(argattrs,argattrs)] fieldarg = argname elif typedesc.is_typed: try: argtype, argname = fieldarg.split(None, 1) except ValueError: pass else: types.setdefault(typename, {})[argname] = \ [nodes.Text(argtype)] fieldarg = argname # grouped entries need to be collected in one entry, while others # get one entry per field if typedesc.is_grouped: if typename in groupindices: group = entries[groupindices[typename]] else: groupindices[typename] = len(entries) group = [typedesc, []] entries.append(group) group[1].append(typedesc.make_entry(fieldarg, content)) else: entries.append( [typedesc, typedesc.make_entry(fieldarg, content)]) # step 2: all entries are collected, construct the new field list new_list = nodes.field_list() for entry in entries: if isinstance(entry, nodes.field): # pass-through old field new_list += entry else: fieldtype, content = entry fieldtypes = types.get(fieldtype.name, {}) fieldshapes = shapes.get(fieldtype.name, {}) fieldattrs = attrs.get(fieldtype.name, {}) new_list += fieldtype.make_field(fieldtypes, self.domain, content, shapes=fieldshapes, attrs=fieldattrs, modname=fmodname, typename=ftypename) node.replace_self(new_list)
def transform(self, node): """Transform a single field list *node*.""" typemap = self.typemap fmodname = self.modname ftypename = self.typename entries = [] groupindices = {} types = {} shapes = {} attrs = {} # step 1: traverse all fields and collect field types and content for field in node: fieldname, fieldbody = field try: # split into field type and argument fieldtype, fieldarg = fieldname.astext().split(None, 1) except ValueError: # maybe an argument-less field type? fieldtype, fieldarg = fieldname.astext(), '' typedesc, is_typefield = typemap.get(fieldtype, (None, None)) # sort out unknown fields if typedesc is None :#or typedesc.has_arg != bool(fieldarg): # either the field name is unknown, or the argument doesn't # match the spec; capitalize field name and be done with it new_fieldname = fieldtype.capitalize() + ' ' + fieldarg fieldname[0] = nodes.Text(new_fieldname) entries.append(field) continue typename = typedesc.name # collect the content, trying not to keep unnecessary paragraphs if _is_single_paragraph(fieldbody): content = fieldbody.children[0].children else: content = fieldbody.children # if the field specifies a type, put it in the types collection if is_typefield: # filter out only inline nodes; others will result in invalid # markup being written out content = filter( lambda n: isinstance(n, nodes.Inline) or isinstance(n, nodes.Text), content) if content: eval(is_typefield).setdefault(typename, {})[fieldarg] = content continue # also support syntax like ``:param type name [attrs]:`` if typedesc.is_typed==2: argname, argshape, argtype, argattrs = self.scan_fieldarg(fieldarg) if argtype: types.setdefault(typename, {})[argname] = \ [nodes.Text(argtype)] if argshape: shapes.setdefault(typename, {})[argname] = \ [nodes.Text(argshape)] if argattrs: attrs.setdefault(typename, {})[argname] = \ [nodes.emphasis(argattrs,argattrs)] fieldarg = argname elif typedesc.is_typed: try: argtype, argname = fieldarg.split(None, 1) except ValueError: pass else: types.setdefault(typename, {})[argname] = \ [nodes.Text(argtype)] fieldarg = argname # grouped entries need to be collected in one entry, while others # get one entry per field if typedesc.is_grouped: if typename in groupindices: group = entries[groupindices[typename]] else: groupindices[typename] = len(entries) group = [typedesc, []] entries.append(group) group[1].append(typedesc.make_entry(fieldarg, content)) else: entries.append([typedesc, typedesc.make_entry(fieldarg, content)]) # step 2: all entries are collected, construct the new field list new_list = nodes.field_list() for entry in entries: if isinstance(entry, nodes.field): # pass-through old field new_list += entry else: fieldtype, content = entry fieldtypes = types.get(fieldtype.name, {}) fieldshapes = shapes.get(fieldtype.name, {}) fieldattrs = attrs.get(fieldtype.name, {}) new_list += fieldtype.make_field(fieldtypes, self.domain, content, shapes=fieldshapes, attrs=fieldattrs, modname=fmodname, typename=ftypename) node.replace_self(new_list)
def _patched__DocFieldTransformer__transform(self, node): """Transform a single field list *node*.""" typemap = self.typemap entries = [] groupindices = {} types = {} # step 1: traverse all fields and collect field types and content for field in node: fieldname, fieldbody = field try: # split into field type and argument fieldtype, fieldarg = fieldname.astext().split(None, 1) except ValueError: # maybe an argument-less field type? fieldtype, fieldarg = fieldname.astext(), '' typedesc, is_typefield = typemap.get(fieldtype, (None, None)) # sort out unknown fields if typedesc is None or typedesc.has_arg != bool(fieldarg): # either the field name is unknown, or the argument doesn't # match the spec; capitalize field name and be done with it new_fieldname = fieldtype[0:1].upper() + fieldtype[1:] if fieldarg: new_fieldname = new_fieldname + ' ' + fieldarg fieldname[0] = nodes.Text(new_fieldname) entries.append(field) continue typename = typedesc.name # collect the content, trying not to keep unnecessary paragraphs if _is_single_paragraph(fieldbody): content = fieldbody.children[0].children else: content = fieldbody.children # if the field specifies a type, put it in the types collection if is_typefield: # filter out only inline nodes; others will result in invalid # markup being written out content = filter( lambda n: (isinstance(n, nodes.Inline) or isinstance(n, nodes.Text)), content) if content: types.setdefault(typename, {})[fieldarg] = content continue # also support syntax like ``:param type name:`` if typedesc.is_typed: try: argtype, argname = fieldarg.split(None, 1) except ValueError: pass else: types.setdefault(typename, {})[argname] = \ [nodes.Text(argtype)] fieldarg = argname translatable_content = nodes.inline(fieldbody.rawsource, translatable=True) translatable_content.source = fieldbody.parent.source translatable_content.line = fieldbody.parent.line translatable_content += content # grouped entries need to be collected in one entry, while others # get one entry per field if typedesc.is_grouped: if typename in groupindices: group = entries[groupindices[typename]] else: groupindices[typename] = len(entries) group = [typedesc, []] entries.append(group) entry = typedesc.make_entry(fieldarg, translatable_content) group[1].append(entry) else: entry = typedesc.make_entry(fieldarg, translatable_content) entries.append([typedesc, entry]) # step 2: all entries are collected, construct the new field list new_list = nodes.field_list() for entry in entries: if isinstance(entry, nodes.field): # pass-through old field new_list += entry else: fieldtype, content = entry fieldtypes = types.get(fieldtype.name, {}) new_list += fieldtype.make_field(fieldtypes, self.domain, content) node.replace_self(new_list)
def transform(self, node): # type: (nodes.Node) -> None """Transform a single field list *node*.""" typemap = self.typemap entries = [] groupindices = {} # type: Dict[unicode, int] types = {} # type: Dict[unicode, Dict] # step 1: traverse all fields and collect field types and content for field in node: fieldname, fieldbody = field try: # split into field type and argument fieldtype, fieldarg = fieldname.astext().split(None, 1) except ValueError: # maybe an argument-less field type? fieldtype, fieldarg = fieldname.astext(), '' typedesc, is_typefield = typemap.get(fieldtype, (None, None)) # collect the content, trying not to keep unnecessary paragraphs if docfields._is_single_paragraph(fieldbody): content = fieldbody.children[0].children else: content = fieldbody.children # sort out unknown fields if typedesc is None or typedesc.has_arg != bool(fieldarg): continue typename = typedesc.name # if the field specifies a type, put it in the types collection if is_typefield: # filter out only inline nodes; others will result in invalid # markup being written out content = [ n for n in content if isinstance(n, nodes.Inline) or isinstance(n, nodes.Text) ] if content: types.setdefault(typename, {})[fieldarg] = content continue # also support syntax like ``:param type name:`` if typedesc.is_typed: try: argtype, argname = fieldarg.split(None, 1) except ValueError: pass else: xrefs = typedesc.make_xrefs( nodes.Text(argtype), self.directive.domain, nodes.Text(argtype), ) xrefs[0].attributes['json:name'] = strip_json_array( nodes.Text(argtype)) types.setdefault(typename, {})[argname] = xrefs fieldarg = argname translatable_content = nodes.inline(fieldbody.rawsource, translatable=True) translatable_content.document = fieldbody.parent.document translatable_content.source = fieldbody.parent.source translatable_content.line = fieldbody.parent.line translatable_content += content # grouped entries need to be collected in one entry, while others # get one entry per field if typedesc.is_grouped: if typename in groupindices: group = entries[groupindices[typename]] else: groupindices[typename] = len(entries) group = [typedesc, []] entries.append(group) entry = typedesc.make_entry(fieldarg, [translatable_content]) group[1].append(entry) else: entry = typedesc.make_entry(fieldarg, [translatable_content]) entries.append([typedesc, entry]) # step 2: all entries are collected, construct the new field list new_list = nodes.field_list() for entry in entries: if isinstance(entry, nodes.field): # pass-through old field new_list += entry else: fieldtype, content = entry fieldtypes = types.get(fieldtype.name, {}) env = self.directive.state.document.settings.env new_list += fieldtype.make_field(fieldtypes, self.directive.domain, content, env=env) node.replace_self(new_list)
def override_transform(self, other_self, node): """ Transform a single field list *node*. Overwrite function `transform <https://github.com/sphinx-doc/sphinx/blob/ master/sphinx/util/docfields.py#L271>`_. It only adds extra verification and returns results from the replaced function. @param other_self the builder @param node node the replaced function changes or replace The function parses the original function and checks that the list of arguments declared by the function is the same the list of documented arguments. """ typemap = other_self.typemap entries = [] groupindices = {} types = {} # step 1: traverse all fields and collect field types and content for field in node: fieldname, fieldbody = field try: # split into field type and argument fieldtype, fieldarg = fieldname.astext().split(None, 1) except ValueError: # maybe an argument-less field type? fieldtype, fieldarg = fieldname.astext(), '' if fieldtype == "Parameters": # numpydoc style keyfieldtype = 'parameter' elif fieldtype == "param": keyfieldtype = 'param' else: continue typedesc, is_typefield = typemap.get(keyfieldtype, (None, None)) # sort out unknown fields extracted = [] if keyfieldtype == 'parameter': # numpydoc for child in fieldbody.children: if isinstance(child, nodes.definition_list): for child2 in child.children: extracted.append(child2) elif typedesc is None or typedesc.has_arg != bool(fieldarg): # either the field name is unknown, or the argument doesn't # match the spec; capitalize field name and be done with it new_fieldname = fieldtype[0:1].upper() + fieldtype[1:] if fieldarg: new_fieldname += ' ' + fieldarg fieldname[0] = nodes.Text(new_fieldname) entries.append(field) continue typename = typedesc.name # collect the content, trying not to keep unnecessary paragraphs if extracted: content = extracted elif _is_single_paragraph(fieldbody): content = fieldbody.children[0].children else: content = fieldbody.children # if the field specifies a type, put it in the types collection if is_typefield: # filter out only inline nodes; others will result in invalid # markup being written out content = [ n for n in content if isinstance(n, (nodes.Inline, nodes.Text)) ] if content: types.setdefault(typename, {})[fieldarg] = content continue # also support syntax like ``:param type name:`` if typedesc.is_typed: try: argtype, argname = fieldarg.split(None, 1) except ValueError: pass else: types.setdefault(typename, {})[argname] = [nodes.Text(argtype)] fieldarg = argname translatable_content = nodes.inline(fieldbody.rawsource, translatable=True) translatable_content.document = fieldbody.parent.document translatable_content.source = fieldbody.parent.source translatable_content.line = fieldbody.parent.line translatable_content += content # Import object, get the list of parameters docs = fieldbody.parent.source.split(":docstring of")[-1].strip() myfunc = None funckind = None function_name = None excs = [] try: myfunc, function_name, funckind = import_any_object(docs) except ImportError as e: excs.append(e) if myfunc is None: if len(excs) > 0: reasons = "\n".join(" {0}".format(e) for e in excs) else: reasons = "unknown" logger = logging.getLogger("docassert") logger.warning( "[docassert] unable to import object '{0}', reasons:\n{1}". format(docs, reasons)) myfunc = None if myfunc is None: signature = None parameters = None else: try: signature = inspect.signature(myfunc) parameters = signature.parameters except (TypeError, ValueError): logger = logging.getLogger("docassert") logger.warning( "[docassert] unable to get signature of '{0}'.".format( docs)) signature = None parameters = None # grouped entries need to be collected in one entry, while others # get one entry per field if extracted: # numpydoc group_entries = [] for ext in extracted: name = ext.astext().split('\n')[0].split()[0] group_entries.append((name, ext)) entries.append([typedesc, group_entries]) elif typedesc.is_grouped: if typename in groupindices: group = entries[groupindices[typename]] else: groupindices[typename] = len(entries) group = [typedesc, []] entries.append(group) entry = typedesc.make_entry(fieldarg, [translatable_content]) group[1].append(entry) else: entry = typedesc.make_entry(fieldarg, [translatable_content]) entries.append([typedesc, entry]) # step 2: all entries are collected, check the parameters list. try: env = other_self.directive.state.document.settings.env except AttributeError as e: logger = logging.getLogger("docassert") logger.warning("[docassert] {0}".format(e)) env = None docname = fieldbody.parent.source.split(':docstring')[0] for entry in entries: if isinstance(entry, nodes.field): logger = logging.getLogger("docassert") logger.warning( "[docassert] unable to check [nodes.field] {0}".format( entry)) else: fieldtype, content = entry fieldtypes = types.get(fieldtype.name, {}) check_typed_make_field(other_self, fieldtypes, other_self.directive.domain, content, env=env, parameters=parameters, function_name=function_name, docname=docname, kind=funckind) return self.replaced(other_self, node)