def refPageShell(pageName, pageDesc, fp, sections=None, tail_content=None, man_section=3): printCopyrightSourceComments(fp) print(':data-uri:', ':icons: font', conventions.extra_refpage_headers, '', sep='\n', file=fp) s = '{}({})'.format(pageName, man_section) print('= ' + s, '', sep='\n', file=fp) if pageDesc.strip() == '': pageDesc = 'NO SHORT DESCRIPTION PROVIDED' logWarn('refPageHead: no short description provided for', pageName) print('== Name', '{} - {}'.format(pageName, pageDesc), '', sep='\n', file=fp) if sections: for title, content in sections.items(): print('== {}'.format(title), '', content, '', sep='\n', file=fp) if tail_content: print(tail_content, '', sep='\n', file=fp)
def genRef(specFile, baseDir): file = loadFile(specFile) if file is None: return # Save the path to this file for later use in rewriting relative includes specDir = os.path.dirname(os.path.abspath(specFile)) pageMap = findRefs(file, specFile) logDiag(specFile + ': found', len(pageMap.keys()), 'potential pages') sys.stderr.flush() # Fix up references in pageMap fixupRefs(pageMap, specFile, file) # Create each page, if possible for name in sorted(pageMap): pi = pageMap[name] printPageInfo(pi, file) if pi.Warning: logDiag('genRef:', pi.name + ':', pi.Warning) if pi.extractPage: emitPage(baseDir, specDir, pi, file) elif pi.type == 'enums': autoGenEnumsPage(baseDir, pi, file) elif pi.type == 'flags': autoGenFlagsPage(baseDir, pi.name) else: # Don't extract this page logWarn('genRef: Cannot extract or autogenerate:', pi.name)
def refPageHead(pageName, pageDesc, specText, fieldName, fieldText, descText, fp): sections = OrderedDict() sections['C Specification'] = specText if fieldName is not None: sections[fieldName] = fieldText if descText is None or descText.strip() == '': logWarn('refPageHead: no description provided for', pageName) if descText is not None: sections['Description'] = descText refPageShell(pageName, pageDesc, fp, sections=sections)
def genExtension(baseDir, name, info): # Add a dictionary entry for this page global genDict genDict[name] = None declares = [] elem = info.elem ext_type = elem.get('type') for required in elem.find('require'): req_name = required.get('name') if not req_name: # This isn't what we're looking for continue if req_name.endswith('_SPEC_VERSION') or req_name.endswith('_EXTENSION_NAME'): # Don't link to spec version or extension name - those ref pages aren't created. continue if required.get('extends'): # These are either extensions of enums, # or enum values: neither of which get a ref page. continue if req_name not in genDict: logWarn('ERROR: {} (in extension {}) does not have a ref page.'.format(req_name, name)) declares.append(req_name) pageName = baseDir + '/' + name + '.txt' logDiag('genExtension:', pageName) fp = open(pageName, 'w', encoding='utf-8') sections = OrderedDict() sections['Specification'] = 'See link:{html_spec_relative}#%s[ %s] in the main specification for complete information.' % ( name, name) refPageShell(name, "{} extension".format(ext_type), fp, sections=sections, tail_content=makeExtensionInclude(name)) refPageTail(name, seeAlsoList(name, declares), fp, auto=True) fp.close()
def autoGenFlagsPage(baseDir, flagName): pageName = baseDir + '/' + flagName + '.txt' fp = open(pageName, 'w', encoding='utf-8') # Add a dictionary entry for this page global genDict genDict[flagName] = None logDiag('autoGenFlagsPage:', pageName) # Short description matches = flagNamePat.search(flagName) if matches is not None: name = matches.group('name') author = matches.group('author') logDiag('autoGenFlagsPage: split name into', name, 'Flags', author) flagBits = name + 'FlagBits' + author desc = 'Bitmask of ' + flagBits else: logWarn('autoGenFlagsPage:', pageName, 'does not end in "Flags{author ID}". Cannot infer FlagBits type.') flagBits = None desc = 'Unknown ' + apiName + ' flags type' # Description text if flagBits is not None: txt = ''.join(( 'etext:' + flagName, ' is a mask of zero or more elink:' + flagBits + '.\n', 'It is used as a member and/or parameter of the structures and commands\n', 'in the See Also section below.\n' )) else: txt = ''.join(( 'etext:' + flagName, ' is an unknown ' + apiName + ' type, assumed to be a bitmask.\n' )) refPageHead(flagName, desc, 'include::../api/flags/' + flagName + '.txt[]\n', None, None, txt, fp) refPageTail(flagName, seeAlsoList(flagName), fp, auto = True) fp.close()
def genSinglePageRef(baseDir): """Generate baseDir/apispec.txt, the single-page version of the ref pages. This assumes there's a page for everything in the api module dictionaries. Extensions (KHR, EXT, etc.) are currently skipped""" # Accumulate head of page head = io.StringIO() printCopyrightSourceComments(head) print('= ' + apiName + ' API Reference Pages', ':data-uri:', ':icons: font', ':doctype: book', ':numbered!:', ':max-width: 200', ':data-uri:', ':toc2:', ':toclevels: 2', '', sep='\n', file=head) print('== Copyright', file=head) print('', file=head) print('include::{config}/copyright-ccby.txt[]', file=head) print('', file=head) # Inject the table of contents. Asciidoc really ought to be generating # this for us. sections = [[api.protos, 'protos', apiName + ' Commands'], [api.handles, 'handles', 'Object Handles'], [api.structs, 'structs', 'Structures'], [api.enums, 'enums', 'Enumerations'], [api.flags, 'flags', 'Flags'], [api.funcpointers, 'funcpointers', 'Function Pointer Types'], [api.basetypes, 'basetypes', apiName + ' Scalar types'], [api.defines, 'defines', 'C Macro Definitions'], [extensions, 'extensions', apiName + ' Extensions']] # Accumulate body of page body = io.StringIO() for (apiDict, label, title) in sections: # Add section title/anchor header to body anchor = '[[' + label + ',' + title + ']]' print(anchor, '== ' + title, '', ':leveloffset: 2', '', sep='\n', file=body) if label == 'extensions': # preserve order of extensions since we already sorted the way we want. keys = apiDict.keys() else: keys = sorted(apiDict.keys()) for refPage in keys: # Don't generate links for aliases, which are included with the # aliased page if refPage not in api.alias: # Add page to body if 'FlagBits' in refPage and conventions.unified_flag_refpages: # OpenXR does not create separate ref pages for FlagBits: # the FlagBits includes go in the Flags refpage. # Previously the Vulkan script would only emit non-empty # Vk*Flags pages, via the logic # if refPage not in api.flags or api.flags[refPage] is not None # emit page # Now, all are emitted. continue else: print('include::' + refPage + '.txt[]', file=body) else: # Alternatively, we could (probably should) link to the # aliased refpage logWarn('(Benign) Not including', refPage, 'in single-page reference', 'because it is an alias of', api.alias[refPage]) print('\n' + ':leveloffset: 0' + '\n', file=body) # Write head and body to the output file pageName = baseDir + '/apispec.txt' fp = open(pageName, 'w', encoding='utf-8') print(head.getvalue(), file=fp, end='') print(body.getvalue(), file=fp, end='') head.close() body.close() fp.close()
def emitPage(baseDir, specDir, pi, file): """Extract a single reference page into baseDir. - baseDir - base directory to emit page into - specDir - directory extracted page source came from - pi - pageInfo for this page relative to file - file - list of strings making up the file, indexed by pi""" pageName = baseDir + '/' + pi.name + '.txt' # Add a dictionary entry for this page global genDict genDict[pi.name] = None logDiag('emitPage:', pageName) # Short description if pi.desc is None: pi.desc = '(no short description available)' # Member/parameter section label and text, if there is one field = None fieldText = None if pi.type != 'freeform': # Not sure how this happens yet if pi.include is None: logWarn('emitPage:', pageName, 'INCLUDE is None, no page generated') return # Specification text lines = remapIncludes(file[pi.begin:pi.include + 1], baseDir, specDir) specText = ''.join(lines) if pi.param is not None: if pi.type == 'structs': field = 'Members' elif pi.type in ['protos', 'funcpointers']: field = 'Parameters' else: logWarn('emitPage: unknown field type:', pi.type, 'for', pi.name) lines = remapIncludes(file[pi.param:pi.body], baseDir, specDir) fieldText = ''.join(lines) # Description text if pi.body != pi.include: lines = remapIncludes(file[pi.body:pi.end + 1], baseDir, specDir) descText = ''.join(lines) else: descText = None logWarn( 'emitPage: INCLUDE == BODY, so description will be empty for', pi.name) if pi.begin != pi.include: logWarn( 'emitPage: Note: BEGIN != INCLUDE, so the description might be incorrectly located before the API include!' ) else: specText = None descText = ''.join(file[pi.begin:pi.end + 1]) # Rewrite asciidoctor xrefs to resolve properly in refpages specURL = conventions.specURL(pi.spec) specText = xrefRewrite(specText, specURL) fieldText = xrefRewrite(fieldText, specURL) descText = xrefRewrite(descText, specURL) fp = open(pageName, 'w', encoding='utf-8') refPageHead(pi.name, pi.desc, specText, field, fieldText, descText, fp) refPageTail(pageName=pi.name, specType=pi.spec, specAnchor=pi.anchor, seeAlso=seeAlsoList(pi.name, pi.refs), fp=fp, auto=False) fp.close()
(api.funcpointers, 'Function Pointers'), (api.basetypes, apiName + ' Scalar Types'), (extensions, apiName + ' Extensions'), ] # Summarize pages that weren't generated, for good or bad reasons for (apiDict, title) in sections: # OpenXR was keeping a 'flagged' state which only printed out a # warning for the first non-generated page, but was otherwise # unused. This doesn't seem helpful. for page in apiDict: if page not in genDict: # Page was not generated - why not? if page in api.alias: logWarn('(Benign, is an alias) Ref page for', title, page, 'is aliased into', api.alias[page]) elif page in api.flags and api.flags[page] is None: logWarn( '(Benign, no FlagBits defined) No ref page generated for ', title, page) else: # Could introduce additional logic to detect # external types and not emit them. logWarn('No ref page generated for ', title, page) genSinglePageRef(baseDir) if results.rewrite: # Generate Apache rewrite directives for refpage aliases fp = open(results.rewrite, 'w', encoding='utf-8')
default='', help='Set the suffix added to updated file names (default: none)') parser.add_argument('files', metavar='filename', nargs='*', help='a filename to reflow text in') parser.add_argument('--version', action='version', version='%(prog)s 1.0') args = parser.parse_args() setLogFile(True, True, args.logFile) setLogFile(True, False, args.diagFile) setLogFile(False, True, args.warnFile) if args.overwrite: logWarn('reflow.py: will overwrite all input files') if args.tagvu and args.nextvu is None: args.nextvu = startVUID if args.nextvu is not None: logWarn('Tagging untagged Valid Usage statements starting at', args.nextvu) # If no files are specified, reflow the entire specification chapters folder if not args.files: folder_to_reflow = os.getcwd() folder_to_reflow += '/' + conventions.spec_reflow_path reflowAllAdocFiles(folder_to_reflow, args) else: for file in args.files:
def reflowFile(filename, args): logDiag('reflow: filename', filename) lines = loadFile(filename) if lines is None: return # Output file handle and reflow object for this file. There are no race # conditions on overwriting the input, but it's not recommended unless # you have backing store such as git. if args.overwrite: outFilename = filename else: outFilename = args.outDir + '/' + os.path.basename(filename) + args.suffix try: fp = open(outFilename, 'w', encoding='utf8') except: logWarn('Cannot open output file', filename, ':', sys.exc_info()[0]) return state = ReflowState(filename, file = fp, reflow = not args.noflow, nextvu = args.nextvu) for line in lines: state.incrLineNumber() # Is this a title line (leading '= ' followed by text)? thisTitle = False # The logic here is broken. If we're in a non-reflowable block and # this line *doesn't* end the block, it should always be # accumulated. if endPara.match(line): # Ending a paragraph. Emit the current paragraph, if any, and # prepare to begin a new paragraph. state.endPara(line) # If this is an include:: line starting the definition of a # structure or command, track that for use in VUID generation. matches = includePat.search(line) if matches is not None: include_type = matches.group('type') if include_type in ('protos', 'structs'): state.apiName = matches.group('name') elif endParaContinue.match(line): # For now, always just end the paragraph. # Could check see if len(para) > 0 to accumulate. state.endParaContinue(line) # If it's a title line, track that if line[0:2] == '= ': thisTitle = True elif blockReflow.match(line): # Starting or ending a block whose contents may be reflowed. # Blocks cannot be nested. # First see if this is an explicit Valid Usage block vuBlock = (state.lineNumber > 1 and lines[state.lineNumber-2] == '.Valid Usage\n') state.endParaBlockReflow(line, vuBlock) elif blockPassthrough.match(line): # Starting or ending a block whose contents must not be reflowed. # These are tables, etc. Blocks cannot be nested. state.endParaBlockPassthrough(line) elif state.lastTitle: # The previous line was a document title line. This line # is the author / credits line and must not be reflowed. state.endPara(line) else: # Just accumulate a line to the current paragraph. Watch out for # hanging indents / bullet-points and track that indent level. state.addLine(line) state.lastTitle = thisTitle # Cleanup at end of file state.endPara(None) # Sanity check on block nesting if len(state.blockStack) > 1: logWarn('file', filename, 'mismatched asciidoc block delimiters at EOF:', state.blockStack[-1]) fp.close() # Update the 'nextvu' value if args.nextvu != state.nextvu: logWarn('Updated nextvu to', state.nextvu, 'after file', filename) args.nextvu = state.nextvu
def emitPara(self): """Emit a paragraph, possibly reflowing it depending on the block context. Resets the paragraph accumulator.""" if self.para != []: if self.vuStack[-1] and self.nextvu is not None: # If: # - this paragraph is in a Valid Usage block, # - VUID tags are being assigned, # Try to assign VUIDs if nestedVuPat.search(self.para[0]): # Check for nested bullet points. These should not be # assigned VUIDs, nor present at all, because they break # the VU extractor. logWarn( self.filename + ': Invalid nested bullet point in VU block:', self.para[0]) elif self.vuPrefix not in self.para[0]: # If: # - a tag is not already present, and # - the paragraph is a properly marked-up list item # Then add a VUID tag starting with the next free ID. # Split the first line after the bullet point matches = vuPat.search(self.para[0]) if matches is not None: logDiag('findRefs: Matched vuPat on line:', self.para[0], end='') head = matches.group('head') tail = matches.group('tail') # Use the first pname: statement in the paragraph as # the parameter name in the VUID tag. This won't always # be correct, but should be highly reliable. for vuLine in self.para: matches = pnamePat.search(vuLine) if matches is not None: break if matches is not None: paramName = matches.group('param') else: paramName = 'None' logWarn( self.filename, 'No param name found for VUID tag on line:', self.para[0]) newline = (head + ' [[' + self.vuFormat.format( self.vuPrefix, self.apiName, paramName, self.nextvu) + ']] ' + tail) logDiag('Assigning', self.vuPrefix, self.apiName, self.nextvu, ' on line:', self.para[0], '->', newline, 'END') self.para[0] = newline self.nextvu = self.nextvu + 1 # else: # There are only a few cases of this, and they're all # legitimate. Leave detecting this case to another tool # or hand inspection. # logWarn(self.filename + ': Unexpected non-bullet item in VU block (harmless if following an ifdef):', # self.para[0]) if self.reflowStack[-1]: self.printLines(self.reflowPara()) else: self.printLines(self.para) # Reset the paragraph, including its indentation level self.para = [] self.leadIndent = 0 self.hangIndent = 0
def genSinglePageRef(baseDir): # Accumulate head of page head = io.StringIO() printCopyrightSourceComments(head) print('= ' + apiName + ' API Reference Pages', ':data-uri:', ':icons: font', ':doctype: book', ':numbered!:', ':max-width: 200', ':data-uri:', ':toc2:', ':toclevels: 2', '', sep='\n', file=head) print('include::copyright-ccby.txt[]', file=head) print('', file=head) # Inject the table of contents. Asciidoc really ought to be generating # this for us. sections = [ [ api.protos, 'protos', apiName + ' Commands' ], [ api.handles, 'handles', 'Object Handles' ], [ api.structs, 'structs', 'Structures' ], [ api.enums, 'enums', 'Enumerations' ], [ api.flags, 'flags', 'Flags' ], [ api.funcpointers, 'funcpointers', 'Function Pointer Types' ], [ api.basetypes, 'basetypes', apiName + ' Scalar types' ], [ api.defines, 'defines', 'C Macro Definitions' ], [ extensions, 'extensions', apiName + ' Extensions' ] ] # Accumulate body of page body = io.StringIO() for (apiDict,label,title) in sections: # Add section title/anchor header to body anchor = '[[' + label + ',' + title + ']]' print(anchor, '== ' + title, '', ':leveloffset: 2', '', sep='\n', file=body) if label == 'extensions': # preserve order of extensions since we already sorted the way we want. keys = apiDict.keys() else: keys = sorted(apiDict.keys()) for refPage in keys: # Don't generate links for aliases, which are included with the # aliased page if refPage not in api.alias: # Add page to body if 'FlagBits' in refPage and conventions.unified_flag_refpages: # OpenXR does not create separate ref pages for FlagBits: # the FlagBits includes go in the Flags refpage. # Previously the Vulkan script would only emit non-empty # Vk*Flags pages, via the logic # if refPage not in api.flags or api.flags[refPage] is not None # emit page # Now, all are emitted. continue else: print('include::' + refPage + '.txt[]', file=body) else: # Alternatively, we could (probably should) link to the # aliased refpage logWarn('(Benign) Not including', refPage, 'in single-page reference', 'because it is an alias of', api.alias[refPage]) print('\n' + ':leveloffset: 0' + '\n', file=body) # Write head and body to the output file pageName = baseDir + '/apispec.txt' fp = open(pageName, 'w', encoding='utf-8') print(head.getvalue(), file=fp, end='') print(body.getvalue(), file=fp, end='') head.close() body.close() fp.close()
def emitPage(baseDir, specDir, pi, file): pageName = baseDir + '/' + pi.name + '.txt' # Add a dictionary entry for this page global genDict genDict[pi.name] = None logDiag('emitPage:', pageName) # Short description if pi.desc is None: pi.desc = '(no short description available)' # Not sure how this happens yet if pi.include is None: logWarn('emitPage:', pageName, 'INCLUDE is None, no page generated') return # Specification text lines = remapIncludes(file[pi.begin:pi.include+1], baseDir, specDir) specText = ''.join(lines) # Member/parameter list, if there is one field = None fieldText = None if pi.param is not None: if pi.type == 'structs': field = 'Members' elif pi.type in ['protos', 'funcpointers']: field = 'Parameters' else: logWarn('emitPage: unknown field type:', pi.type, 'for', pi.name) lines = remapIncludes(file[pi.param:pi.body], baseDir, specDir) fieldText = ''.join(lines) # Description text if pi.body != pi.include: lines = remapIncludes(file[pi.body:pi.end+1], baseDir, specDir) descText = ''.join(lines) else: descText = None logWarn('emitPage: INCLUDE == BODY, so description will be empty for', pi.name) if pi.begin != pi.include: logWarn('emitPage: Note: BEGIN != INCLUDE, so the description might be incorrectly located before the API include!') # Substitute xrefs to point at the main spec specLinksPattern = re.compile(r'<<([^>,]+)[,]?[ \t\n]*([^>,]*)>>') specLinksSubstitute = r"link:{html_spec_relative}#\1[\2]" specText, _ = specLinksPattern.subn(specLinksSubstitute, specText) if fieldText is not None: fieldText, _ = specLinksPattern.subn(specLinksSubstitute, fieldText) if descText is not None: descText, _ = specLinksPattern.subn(specLinksSubstitute, descText) fp = open(pageName, 'w', encoding='utf-8') refPageHead(pi.name, pi.desc, specText, field, fieldText, descText, fp) refPageTail(pi.name, seeAlsoList(pi.name, pi.refs), fp, auto = False) fp.close()
def genExtension(baseDir, extpath, name, info): """Generate refpage, and add dictionary entry for an extension - baseDir - output directory to generate page in - extpath - None, or path to per-extension specification sources if those are to be included in extension refpages - name - extension name - info - <extension> Element from XML""" # Add a dictionary entry for this page global genDict genDict[name] = None declares = [] elem = info.elem # Type of extension (instance, device, etc.) ext_type = elem.get('type') # Autogenerate interfaces from <extension> entry for required in elem.find('require'): req_name = required.get('name') if not req_name: # This isn't what we're looking for continue if req_name.endswith('_SPEC_VERSION') or req_name.endswith( '_EXTENSION_NAME'): # Don't link to spec version or extension name - those ref pages aren't created. continue if required.get('extends'): # These are either extensions of enumerated types, or const enum # values: neither of which get a ref page - although we could # include the enumerated types in the See Also list. continue if req_name not in genDict: logWarn( 'ERROR: {} (in extension {}) does not have a ref page.'.format( req_name, name)) declares.append(req_name) # import pdb # pdb.set_trace() appbody = None if extpath is not None: appfp = open('{}/{}.txt'.format(extpath, name), 'r', encoding='utf-8') if appfp is not None: appbody = appfp.read() # Transform internal links to crosslinks specURL = conventions.specURL() appbody = xrefRewrite(appbody, specURL) else: logWarn('Cannot find extension appendix for', name) # Fall through to autogenerated page extpath = None appbody = None appfp.close() # Include the extension appendix without an extra title # head_content = 'include::{{appendices}}/{}.txt[]'.format(name) # Write the extension refpage pageName = baseDir + '/' + name + '.txt' logDiag('genExtension:', pageName) fp = open(pageName, 'w', encoding='utf-8') # There are no generated titled sections sections = None # 'See link:{html_spec_relative}#%s[ %s] in the main specification for complete information.' % ( # name, name) refPageShell(name, "{} extension".format(ext_type), fp, appbody, sections=sections) refPageTail(pageName=name, specType=None, specAnchor=name, seeAlso=seeAlsoList(name, declares), fp=fp, auto=True) fp.close()
help='Do not reflow text. Other actions may apply.') parser.add_argument('-suffix', action='store', dest='suffix', default='', help='Set the suffix added to updated file names (default: none)') parser.add_argument('files', metavar='filename', nargs='*', help='a filename to reflow text in') parser.add_argument('--version', action='version', version='%(prog)s 1.0') args = parser.parse_args() setLogFile(True, True, args.logFile) setLogFile(True, False, args.diagFile) setLogFile(False, True, args.warnFile) if args.overwrite: logWarn('reflow.py: will overwrite all input files') if args.tagvu and args.nextvu is None: args.nextvu = startVUID if args.nextvu is not None: logWarn('Tagging untagged Valid Usage statements starting at', args.nextvu) # If no files are specified, reflow the entire specification chapters folder if not args.files: folder_to_reflow = os.getcwd() folder_to_reflow += '/' + conventions.spec_reflow_path reflowAllAdocFiles(folder_to_reflow, args) else: for file in args.files: reflowFile(file, args)
default='', help='Set the suffix added to updated file names (default: none)') parser.add_argument('files', metavar='filename', nargs='*', help='a filename to reflow text in') parser.add_argument('--version', action='version', version='%(prog)s 1.0') args = parser.parse_args() setLogFile(True, True, args.logFile) setLogFile(True, False, args.diagFile) setLogFile(False, True, args.warnFile) if args.overwrite: logWarn("reflow.py: will overwrite all input files") if args.tagvu and args.nextvu is None: args.nextvu = startVUID if args.nextvu is not None: logWarn('Tagging untagged Valid Usage statements starting at', args.nextvu) # If no files are specified, reflow the entire specification chapters folder if not args.files: folder_to_reflow = conventions.spec_reflow_path logWarn('Reflowing all asciidoc files under', folder_to_reflow) reflowAllAdocFiles(folder_to_reflow, args) else: for file in args.files:
def emitPara(self): if self.para != []: if self.vuStack[-1] and self.nextvu is not None: # If: # - this paragraph is in a Valid Usage block, # - VUID tags are being assigned, # Try to assign VUIDs if nestedVuPat.search(self.para[0]): # Check for nested bullet points. These should not be # assigned VUIDs, nor present at all, because they break # the VU extractor. logWarn(self.filename + ': Invalid nested bullet point in VU block:', self.para[0]) elif self.vuPrefix not in self.para[0]: # If: # - a tag is not already present, and # - the paragraph is a properly marked-up list item # Then add a VUID tag starting with the next free ID. # Split the first line after the bullet point matches = vuPat.search(self.para[0]) if matches is not None: logDiag('findRefs: Matched vuPat on line:', self.para[0], end='') head = matches.group('head') tail = matches.group('tail') # Use the first pname: statement in the paragraph as # the parameter name in the VUID tag. This won't always # be correct, but should be highly reliable. for vuLine in self.para: matches = pnamePat.search(vuLine) if matches is not None: break if matches is not None: paramName = matches.group('param') else: paramName = 'None' logWarn(self.filename, 'No param name found for VUID tag on line:', self.para[0]) newline = (head + ' [[' + self.vuFormat.format(self.vuPrefix, self.apiName, paramName, self.nextvu) + ']] ' + tail) logDiag('Assigning', self.vuPrefix, self.apiName, self.nextvu, ' on line:', self.para[0], '->', newline, 'END') self.para[0] = newline self.nextvu = self.nextvu + 1 # else: # There are only a few cases of this, and they're all # legitimate. Leave detecting this case to another tool # or hand inspection. # logWarn(self.filename + ': Unexpected non-bullet item in VU block (harmless if following an ifdef):', # self.para[0]) if self.reflowStack[-1]: self.printLines(self.reflowPara()) else: self.printLines(self.para) # Reset the paragraph, including its indentation level self.para = [] self.leadIndent = 0 self.hangIndent = 0
def reflowFile(filename, args): logDiag('reflow: filename', filename) lines = loadFile(filename) if lines is None: return # Output file handle and reflow object for this file. There are no race # conditions on overwriting the input, but it's not recommended unless # you have backing store such as git. if args.overwrite: outFilename = filename else: outFilename = args.outDir + '/' + os.path.basename( filename) + args.suffix try: fp = open(outFilename, 'w', encoding='utf8') except: logWarn('Cannot open output file', filename, ':', sys.exc_info()[0]) return state = ReflowState(filename, file=fp, reflow=not args.noflow, nextvu=args.nextvu) for line in lines: state.incrLineNumber() # Is this a title line (leading '= ' followed by text)? thisTitle = False # The logic here is broken. If we're in a non-reflowable block and # this line *doesn't* end the block, it should always be # accumulated. # Test for a blockCommonReflow delimiter comment first, to avoid # treating it solely as a end-Paragraph marker comment. if line == blockCommonReflow: # Starting or ending a pseudo-block for "common" VU statements. # Common VU statements use an Asciidoc variable as the apiName, # instead of inferring it from the most recent API include. state.apiName = '{refpage}' state.endParaBlockReflow(line, vuBlock=True) elif blockReflow.match(line): # Starting or ending a block whose contents may be reflowed. # Blocks cannot be nested. # Is this is an explicit Valid Usage block? vuBlock = (state.lineNumber > 1 and lines[state.lineNumber - 2] == '.Valid Usage\n') state.endParaBlockReflow(line, vuBlock) elif endPara.match(line): # Ending a paragraph. Emit the current paragraph, if any, and # prepare to begin a new paragraph. state.endPara(line) # If this is an include:: line starting the definition of a # structure or command, track that for use in VUID generation. matches = includePat.search(line) if matches is not None: generated_type = matches.group('generated_type') include_type = matches.group('category') if generated_type == 'api' and include_type in ('protos', 'structs'): apiName = matches.group('entity_name') if state.apiName != '': # This happens when there are multiple API include # lines in a single block. The style guideline is to # always place the API which others are promoted to # first. In virtually all cases, the promoted API # will differ solely in the vendor suffix (or # absence of it), which is benign. if not apiMatch(state.apiName, apiName): logWarn('Promoted API name mismatch at line', state.lineNumber, ':', 'apiName:', apiName, 'does not match state.apiName:', state.apiName) else: state.apiName = apiName elif endParaContinue.match(line): # For now, always just end the paragraph. # Could check see if len(para) > 0 to accumulate. state.endParaContinue(line) # If it's a title line, track that if line[0:2] == '= ': thisTitle = True elif blockPassthrough.match(line): # Starting or ending a block whose contents must not be reflowed. # These are tables, etc. Blocks cannot be nested. state.endParaBlockPassthrough(line) elif state.lastTitle: # The previous line was a document title line. This line # is the author / credits line and must not be reflowed. state.endPara(line) else: # Just accumulate a line to the current paragraph. Watch out for # hanging indents / bullet-points and track that indent level. state.addLine(line) state.lastTitle = thisTitle # Cleanup at end of file state.endPara(None) # Check block nesting if len(state.blockStack) > 1: logWarn('file', filename, 'mismatched asciidoc block delimiters at EOF:', state.blockStack[-1]) fp.close() # Update the 'nextvu' value if args.nextvu != state.nextvu: logWarn('Updated nextvu to', state.nextvu, 'after file', filename) args.nextvu = state.nextvu
sections = [ ( api.flags, 'Flag Types' ), ( api.enums, 'Enumerated Types' ), ( api.structs, 'Structures' ), ( api.protos, 'Prototypes' ), ( api.funcpointers, 'Function Pointers' ), ( api.basetypes, apiName + ' Scalar Types' ), ( extensions, apiName + ' Extensions'), ] # Summarize pages that weren't generated, for good or bad reasons for (apiDict,title) in sections: # OpenXR was keeping a 'flagged' state which only printed out a # warning for the first non-generated page, but was otherwise # unused. This doesn't seem helpful. for page in apiDict: if page not in genDict: # Page was not generated - why not? if page in api.alias: logWarn('(Benign, is an alias) Ref page for', title, page, 'is aliased into', api.alias[page]) elif page in api.flags and api.flags[page] is None: logWarn('(Benign, no FlagBits defined) No ref page generated for ', title, page) else: # Could introduce additional logic to detect # external types and not emit them. logWarn('No ref page generated for ', title, page) genSinglePageRef(baseDir)