def parse(self, line): #return re.sub(r'(Arguments *:)', self.replace, line) pctxt = self.pctxt result = re.search(r'(Arguments? *:)', line) if result: label = result.group(0) content = [] desc_indent = False desc = re.sub(r'.*Arguments? *:', '', line).strip() indent = parser.get_indent(line) pctxt.next() pctxt.eat_empty_lines() arglines = [] if desc != "none": add_empty_lines = 0 while pctxt.has_more_lines() and (parser.get_indent(pctxt.get_line()) > indent): for j in range(0, add_empty_lines): arglines.append("") arglines.append(pctxt.get_line()) pctxt.next() add_empty_lines = pctxt.eat_empty_lines() ''' print line if parser.get_indent(line) == arg_indent: argument = re.sub(r' *([^ ]+).*', r'\1', line) if argument: #content.append("<b>%s</b>" % argument) arg_desc = [line.replace(argument, " " * len(self.unescape(argument)), 1)] #arg_desc = re.sub(r'( *)([^ ]+)(.*)', r'\1<b>\2</b>\3', line) arg_desc_indent = parser.get_indent(arg_desc[0]) arg_desc[0] = arg_desc[0][arg_indent:] pctxt.next() add_empty_lines = 0 while pctxt.has_more_lines and parser.get_indent(pctxt.get_line()) >= arg_indent: for i in xrange(0, add_empty_lines): arg_desc.append("") arg_desc.append(pctxt.get_line()[arg_indent:]) pctxt.next() add_empty_lines = pctxt.eat_empty_lines() # TODO : reduce space at the beginnning content.append({ 'name': argument, 'desc': arg_desc }) ''' if arglines: new_arglines = [] #content = self.parse_args(arglines) parser.remove_indent(arglines) ''' pctxt2 = parser.PContext(pctxt.templates) pctxt2.set_content_list(arglines) while pctxt2.has_more_lines(): new_arglines.append(parser.example.Parser(pctxt2).parse(pctxt2.get_line())) pctxt2.next() arglines = new_arglines ''' pctxt.stop = True template = pctxt.templates.get_template("parser/arguments.tpl") return template.render( pctxt=pctxt, label=label, desc=desc, content=arglines #content=content ) return line return line
def convert(infile, outfile, base=''): global document, keywords, keywordsCount, chapters, keyword_conflicts if len(base) > 0 and base[:-1] != '/': base += '/' hasSummary = False data = [] fd = file(infile,"r") for line in fd: line.replace("\t", " " * 8) line = line.rstrip() data.append(line) fd.close() pctxt = PContext( TemplateLookup( directories=[ 'templates' ] ) ) parsers = init_parsers(pctxt) pctxt.context = { 'headers': {}, 'document': "" } sections = [] currentSection = { "details": getTitleDetails(""), "content": "", } chapters = {} keywords = {} keywordsCount = {} specialSections = { "default": { "hasKeywords": True, }, "4.1": { "hasKeywords": True, }, } pctxt.keywords = keywords pctxt.keywordsCount = keywordsCount pctxt.chapters = chapters print >> sys.stderr, "Importing %s..." % infile nblines = len(data) i = j = 0 while i < nblines: line = data[i].rstrip() if i < nblines - 1: next = data[i + 1].rstrip() else: next = "" if (line == "Summary" or re.match("^[0-9].*", line)) and (len(next) > 0) and (next[0] == '-') \ and ("-" * len(line)).startswith(next): # Fuzzy underline length detection sections.append(currentSection) currentSection = { "details": getTitleDetails(line), "content": "", } j = 0 i += 1 # Skip underline while not data[i + 1].rstrip(): i += 1 # Skip empty lines else: if len(line) > 80: print >> sys.stderr, "Line `%i' exceeds 80 columns" % (i + 1) currentSection["content"] = currentSection["content"] + line + "\n" j += 1 if currentSection["details"]["title"] == "Summary" and line != "": hasSummary = True # Learn chapters from the summary details = getTitleDetails(line) if details["chapter"]: chapters[details["chapter"]] = details i += 1 sections.append(currentSection) chapterIndexes = sorted(chapters.keys()) document = "" # Complete the summary for section in sections: details = section["details"] title = details["title"] if title: fulltitle = title if details["chapter"]: #documentAppend("<a name=\"%s\"></a>" % details["chapter"]) fulltitle = details["chapter"] + ". " + title if not details["chapter"] in chapters: print >> sys.stderr, "Adding '%s' to the summary" % details["title"] chapters[details["chapter"]] = details chapterIndexes = sorted(chapters.keys()) for section in sections: details = section["details"] pctxt.details = details level = details["level"] title = details["title"] content = section["content"].rstrip() print >> sys.stderr, "Parsing chapter %s..." % title if (title == "Summary") or (title and not hasSummary): summaryTemplate = pctxt.templates.get_template('summary.html') documentAppend(summaryTemplate.render( chapters = chapters, chapterIndexes = chapterIndexes, )) if title and not hasSummary: hasSummary = True else: continue if title: documentAppend('<a class="anchor" id="%s" name="%s"></a>' % (details["chapter"], details["chapter"])) if level == 1: documentAppend("<div class=\"page-header\">", False) documentAppend('<h%d id="chapter-%s" data-target="%s"><small><a class="small" href="#%s">%s.</a></small> %s</h%d>' % (level, details["chapter"], details["chapter"], details["chapter"], details["chapter"], cgi.escape(title, True), level)) if level == 1: documentAppend("</div>", False) if content: if False and title: # Display a navigation bar documentAppend('<ul class="well pager">') documentAppend('<li><a href="#top">Top</a></li>', False) index = chapterIndexes.index(details["chapter"]) if index > 0: documentAppend('<li class="previous"><a href="#%s">Previous</a></li>' % chapterIndexes[index - 1], False) if index < len(chapterIndexes) - 1: documentAppend('<li class="next"><a href="#%s">Next</a></li>' % chapterIndexes[index + 1], False) documentAppend('</ul>', False) content = cgi.escape(content, True) content = re.sub(r'section ([0-9]+(.[0-9]+)*)', r'<a href="#\1">section \1</a>', content) pctxt.set_content(content) if not title: lines = pctxt.get_lines() pctxt.context['headers'] = { 'title': lines[1].strip(), 'subtitle': lines[2].strip(), 'version': lines[4].strip(), 'author': lines[5].strip(), 'date': lines[6].strip() } if HAPROXY_GIT_VERSION: pctxt.context['headers']['version'] = 'version ' + HAPROXY_GIT_VERSION # Skip header lines pctxt.eat_lines() pctxt.eat_empty_lines() documentAppend('<div>', False) delay = [] while pctxt.has_more_lines(): try: specialSection = specialSections[details["chapter"]] except: specialSection = specialSections["default"] line = pctxt.get_line() if i < nblines - 1: nextline = pctxt.get_line(1) else: nextline = "" oldline = line pctxt.stop = False for parser in parsers: line = parser.parse(line) if pctxt.stop: break if oldline == line: # nothing has changed, # delays the rendering if delay or line != "": delay.append(line) pctxt.next() elif pctxt.stop: while delay and delay[-1].strip() == "": del delay[-1] if delay: remove_indent(delay) documentAppend('<pre class="text">%s\n</pre>' % "\n".join(delay), False) delay = [] documentAppend(line, False) else: while delay and delay[-1].strip() == "": del delay[-1] if delay: remove_indent(delay) documentAppend('<pre class="text">%s\n</pre>' % "\n".join(delay), False) delay = [] documentAppend(line, True) pctxt.next() while delay and delay[-1].strip() == "": del delay[-1] if delay: remove_indent(delay) documentAppend('<pre class="text">%s\n</pre>' % "\n".join(delay), False) delay = [] documentAppend('</div>') if not hasSummary: summaryTemplate = pctxt.templates.get_template('summary.html') print chapters document = summaryTemplate.render( chapters = chapters, chapterIndexes = chapterIndexes, ) + document # Log warnings for keywords defined in several chapters keyword_conflicts = {} for keyword in keywords: keyword_chapters = list(keywords[keyword]) keyword_chapters.sort() if len(keyword_chapters) > 1: print >> sys.stderr, 'Multi section keyword : "%s" in chapters %s' % (keyword, list(keyword_chapters)) keyword_conflicts[keyword] = keyword_chapters keywords = list(keywords) keywords.sort() createLinks() # Add the keywords conflicts to the keywords list to make them available in the search form # And remove the original keyword which is now useless for keyword in keyword_conflicts: sections = keyword_conflicts[keyword] offset = keywords.index(keyword) for section in sections: keywords.insert(offset, "%s (%s)" % (keyword, chapters[section]['title'])) offset += 1 keywords.remove(keyword) print >> sys.stderr, "Exporting to %s..." % outfile template = pctxt.templates.get_template('template.html') try: footerTemplate = pctxt.templates.get_template('footer.html') footer = footerTemplate.render( headers = pctxt.context['headers'], document = document, chapters = chapters, chapterIndexes = chapterIndexes, keywords = keywords, keywordsCount = keywordsCount, keyword_conflicts = keyword_conflicts, version = VERSION, date = datetime.datetime.now().strftime("%Y/%m/%d"), ) except TopLevelLookupException: footer = "" fd = open(outfile,'w') print >> fd, template.render( headers = pctxt.context['headers'], base = base, document = document, chapters = chapters, chapterIndexes = chapterIndexes, keywords = keywords, keywordsCount = keywordsCount, keyword_conflicts = keyword_conflicts, version = VERSION, date = datetime.datetime.now().strftime("%Y/%m/%d"), footer = footer ) fd.close()
def parse(self, line): pctxt = self.pctxt result = re.search(r'(Examples? *:)', line) if result: label = result.group(0) desc_indent = False desc = re.sub(r'.*Examples? *:', '', line).strip() # Some examples have a description if desc: desc_indent = len(line) - len(desc) indent = parser.get_indent(line) if desc: # And some description are on multiple lines while pctxt.get_line(1) and parser.get_indent(pctxt.get_line(1)) == desc_indent: desc += " " + pctxt.get_line(1).strip() pctxt.next() pctxt.next() add_empty_line = pctxt.eat_empty_lines() content = [] if parser.get_indent(pctxt.get_line()) > indent: if desc: desc = desc[0].upper() + desc[1:] add_empty_line = 0 while pctxt.has_more_lines() and ((not pctxt.get_line()) or (parser.get_indent(pctxt.get_line()) > indent)): if pctxt.get_line(): for j in xrange(0, add_empty_line): content.append("") content.append(re.sub(r'(#.*)$', self.comment, pctxt.get_line())) add_empty_line = 0 else: add_empty_line += 1 pctxt.next() elif parser.get_indent(pctxt.get_line()) == indent: # Simple example that can't have empty lines if add_empty_line: # This means that the example was on the same line as the 'Example' tag content.append(" " * indent + desc) desc = False else: while pctxt.has_more_lines() and (parser.get_indent(pctxt.get_line()) == indent): content.append(pctxt.get_line()) pctxt.next() pctxt.eat_empty_lines() # Skip empty remaining lines pctxt.stop = True parser.remove_indent(content) template = pctxt.templates.get_template("parser/example.tpl") return template.render( label=label, desc=desc, content=content ) return line
def parse(self, line): #return re.sub(r'(Arguments *:)', self.replace, line) pctxt = self.pctxt result = re.search(r'(Arguments? *:)', line) if result: label = result.group(0) content = [] desc_indent = False desc = re.sub(r'.*Arguments? *:', '', line).strip() indent = parser.get_indent(line) pctxt.next() pctxt.eat_empty_lines() arglines = [] if desc != "none": add_empty_lines = 0 while pctxt.has_more_lines() and (parser.get_indent( pctxt.get_line()) > indent): for j in range(0, add_empty_lines): arglines.append("") arglines.append(pctxt.get_line()) pctxt.next() add_empty_lines = pctxt.eat_empty_lines() ''' print line if parser.get_indent(line) == arg_indent: argument = re.sub(r' *([^ ]+).*', r'\1', line) if argument: #content.append("<b>%s</b>" % argument) arg_desc = [line.replace(argument, " " * len(self.unescape(argument)), 1)] #arg_desc = re.sub(r'( *)([^ ]+)(.*)', r'\1<b>\2</b>\3', line) arg_desc_indent = parser.get_indent(arg_desc[0]) arg_desc[0] = arg_desc[0][arg_indent:] pctxt.next() add_empty_lines = 0 while pctxt.has_more_lines and parser.get_indent(pctxt.get_line()) >= arg_indent: for i in xrange(0, add_empty_lines): arg_desc.append("") arg_desc.append(pctxt.get_line()[arg_indent:]) pctxt.next() add_empty_lines = pctxt.eat_empty_lines() # TODO : reduce space at the beginnning content.append({ 'name': argument, 'desc': arg_desc }) ''' if arglines: new_arglines = [] #content = self.parse_args(arglines) parser.remove_indent(arglines) ''' pctxt2 = parser.PContext(pctxt.templates) pctxt2.set_content_list(arglines) while pctxt2.has_more_lines(): new_arglines.append(parser.example.Parser(pctxt2).parse(pctxt2.get_line())) pctxt2.next() arglines = new_arglines ''' pctxt.stop = True template = pctxt.templates.get_template("parser/arguments.tpl") return template.render(pctxt=pctxt, label=label, desc=desc, content=arglines #content=content ) return line return line
def convert(pctxt, infile, outfile, base='', version='', haproxy_version=''): global document, keywords, keywordsCount, chapters, keyword_conflicts if base and base[:-1] != '/': base += '/' hasSummary = False # read data from the input file, # store everything as a list of string # after replacing tabulation characters # with 8 spaces with open(infile) as fd: data = [ line.replace("\t", " " * 8).rstrip() for line in fd.readlines() ] parsers = init_parsers(pctxt) pctxt.context = { 'headers': {}, 'document': "", 'base': base, } sections = [] currentSection = { "details": getTitleDetails(""), "content": "", } chapters = {} keywords = {} keywordsCount = {} specialSections = { "default": { "hasKeywords": True, }, "4.1": { "hasKeywords": True, }, } pctxt.keywords = keywords pctxt.keywordsCount = keywordsCount pctxt.chapters = chapters print("Importing %s..." % infile, file=sys.stderr) nblines = len(data) i = j = 0 while i < nblines: line = data[i].rstrip() if i < nblines - 1: next = data[i + 1].rstrip() else: next = "" if (line == "Summary" or re.match( "^[0-9].*", line)) and (len(next) > 0) and (next[0] == '-'): # and ("-" * len(line)).startswith(next): # Fuzzy underline length detection sections.append(currentSection) currentSection = { "details": getTitleDetails(line), "content": "", } j = 0 i += 1 # Skip underline while not data[i + 1].rstrip(): i += 1 # Skip empty lines else: if len(line) > 80: print("Line `%i' exceeds 80 columns" % (i + 1), file=sys.stderr) currentSection["content"] = currentSection["content"] + line + "\n" j += 1 if currentSection["details"]["title"] == "Summary" and line != "": hasSummary = True # Learn chapters from the summary details = getTitleDetails(line) if details["chapter"]: chapters[details["chapter"]] = details i += 1 sections.append(currentSection) chapterIndexes = sorted( list(chapters.keys()), key=lambda chapter: list(map(int, chapter.split('.')))) document = "" # Complete the summary for section in sections: details = section["details"] title = details["title"] if title: fulltitle = title if details["chapter"]: #documentAppend("<a name=\"%s\"></a>" % details["chapter"]) fulltitle = details["chapter"] + ". " + title if not details["chapter"] in chapters: print("Adding '%s' to the summary" % details["title"], file=sys.stderr) chapters[details["chapter"]] = details chapterIndexes = sorted(chapters.keys()) for section in sections: details = section["details"] pctxt.details = details level = details["level"] title = details["title"] content = section["content"].rstrip() print("Parsing chapter %s..." % title, file=sys.stderr) if (title == "Summary") or (title and not hasSummary): summaryTemplate = pctxt.templates.get_template('summary.html') documentAppend( summaryTemplate.render( pctxt=pctxt, chapters=chapters, chapterIndexes=chapterIndexes, )) if title and not hasSummary: hasSummary = True else: continue if title: documentAppend('<a class="anchor" id="%s" name="%s"></a>' % (details["chapter"], details["chapter"])) if level == 1: documentAppend("<div class=\"page-header\">", False) documentAppend( '<h%d id="chapter-%s" data-target="%s"><small><a class="small" href="#%s">%s.</a></small> %s</h%d>' % (level, details["chapter"], details["chapter"], details["chapter"], details["chapter"], cgi.escape(title, True), level)) if level == 1: documentAppend("</div>", False) if content: if False and title: # Display a navigation bar documentAppend('<ul class="well pager">') documentAppend('<li><a href="#top">Top</a></li>', False) index = chapterIndexes.index(details["chapter"]) if index > 0: documentAppend( '<li class="previous"><a href="#%s">Previous</a></li>' % chapterIndexes[index - 1], False) if index < len(chapterIndexes) - 1: documentAppend( '<li class="next"><a href="#%s">Next</a></li>' % chapterIndexes[index + 1], False) documentAppend('</ul>', False) content = cgi.escape(content, True) content = re.sub(r'section ([0-9]+(.[0-9]+)*)', r'<a href="#\1">section \1</a>', content) pctxt.set_content(content) if not title: lines = pctxt.get_lines() pctxt.context['headers'] = { 'title': '', 'subtitle': '', 'version': '', 'author': '', 'date': '' } if re.match("^-+$", pctxt.get_line().strip()): # Try to analyze the header of the file, assuming it follows # those rules : # - it begins with a "separator line" (several '-' chars) # - then the document title # - an optional subtitle # - a new separator line # - the version # - the author # - the date pctxt.next() pctxt.context['headers']['title'] = pctxt.get_line().strip( ) pctxt.next() subtitle = "" while not re.match("^-+$", pctxt.get_line().strip()): subtitle += " " + pctxt.get_line().strip() pctxt.next() pctxt.context['headers']['subtitle'] += subtitle.strip() if not pctxt.context['headers']['subtitle']: # No subtitle, try to guess one from the title if it # starts with the word "HAProxy" if pctxt.context['headers']['title'].startswith( 'HAProxy '): pctxt.context['headers'][ 'subtitle'] = pctxt.context['headers'][ 'title'][8:] pctxt.context['headers']['title'] = 'HAProxy' pctxt.next() pctxt.context['headers']['version'] = pctxt.get_line( ).strip() pctxt.next() pctxt.context['headers']['author'] = pctxt.get_line( ).strip() pctxt.next() pctxt.context['headers']['date'] = pctxt.get_line().strip() pctxt.next() if haproxy_version: pctxt.context['headers'][ 'version'] = 'version ' + haproxy_version # Skip header lines pctxt.eat_lines() pctxt.eat_empty_lines() documentAppend('<div>', False) delay = [] while pctxt.has_more_lines(): try: specialSection = specialSections[details["chapter"]] except: specialSection = specialSections["default"] line = pctxt.get_line() if i < nblines - 1: nextline = pctxt.get_line(1) else: nextline = "" oldline = line pctxt.stop = False for one_parser in parsers: line = one_parser.parse(line) if pctxt.stop: break if oldline == line: # nothing has changed, # delays the rendering if delay or line != "": delay.append(line) pctxt.next() elif pctxt.stop: while delay and delay[-1].strip() == "": del delay[-1] if delay: parser.remove_indent(delay) documentAppend( '<pre class="text">%s\n</pre>' % "\n".join(delay), False) delay = [] documentAppend(line, False) else: while delay and delay[-1].strip() == "": del delay[-1] if delay: parser.remove_indent(delay) documentAppend( '<pre class="text">%s\n</pre>' % "\n".join(delay), False) delay = [] documentAppend(line, True) pctxt.next() while delay and delay[-1].strip() == "": del delay[-1] if delay: parser.remove_indent(delay) documentAppend( "<pre class=\"text\">{}\n</pre>".format("\n".join(delay)), False) delay = [] documentAppend('</div>') if not hasSummary: summaryTemplate = pctxt.templates.get_template('summary.html') print(chapters) document = summaryTemplate.render( pctxt=pctxt, chapters=chapters, chapterIndexes=chapterIndexes, ) + document # Log warnings for keywords defined in several chapters keyword_conflicts = {} for keyword in keywords: keyword_chapters = list(keywords[keyword]) keyword_chapters.sort() if len(keyword_chapters) > 1: print("Multi section keyword : \"{}\" in chapters {}".format( keyword, list(keyword_chapters)), file=sys.stderr) keyword_conflicts[keyword] = keyword_chapters keywords = list(keywords) keywords.sort() createLinks() # Add the keywords conflicts to the keywords list to make # them available in the search form and remove the original # keyword which is now useless for keyword in keyword_conflicts: sections = keyword_conflicts[keyword] offset = keywords.index(keyword) for section in sections: keywords.insert( offset, "{} ({})".format(keyword, chapters[section]['title'])) offset += 1 keywords.remove(keyword) try: footerTemplate = pctxt.templates.get_template('footer.html') footer = footerTemplate.render( pctxt=pctxt, headers=pctxt.context['headers'], document=document, chapters=chapters, chapterIndexes=chapterIndexes, keywords=keywords, keywordsCount=keywordsCount, keyword_conflicts=keyword_conflicts, version=version, date=datetime.datetime.now().strftime("%Y/%m/%d"), ) except TopLevelLookupException: footer = "" return { 'pctxt': pctxt, 'headers': pctxt.context['headers'], 'base': base, 'document': document, 'chapters': chapters, 'chapterIndexes': chapterIndexes, 'keywords': keywords, 'keywordsCount': keywordsCount, 'keyword_conflicts': keyword_conflicts, 'version': version, 'date': datetime.datetime.now().strftime("%Y/%m/%d"), 'footer': footer }
def convert(infile, outfile, base=''): global document, keywords, keywordsCount, chapters, keyword_conflicts if len(base) > 0 and base[:-1] != '/': base += '/' hasSummary = False data = [] fd = file(infile, "r") for line in fd: line.replace("\t", " " * 8) line = line.rstrip() data.append(line) fd.close() pctxt = PContext(TemplateLookup(directories=['templates'])) parsers = init_parsers(pctxt) pctxt.context = {'headers': {}, 'document': ""} sections = [] currentSection = { "details": getTitleDetails(""), "content": "", } chapters = {} keywords = {} keywordsCount = {} specialSections = { "default": { "hasKeywords": True, }, "4.1": { "hasKeywords": True, }, } pctxt.keywords = keywords pctxt.keywordsCount = keywordsCount pctxt.chapters = chapters print >> sys.stderr, "Importing %s..." % infile nblines = len(data) i = j = 0 while i < nblines: line = data[i].rstrip() if i < nblines - 1: next = data[i + 1].rstrip() else: next = "" if (line == "Summary" or re.match("^[0-9].*", line)) and (len(next) > 0) and (next[0] == '-') \ and ("-" * len(line)).startswith(next): # Fuzzy underline length detection sections.append(currentSection) currentSection = { "details": getTitleDetails(line), "content": "", } j = 0 i += 1 # Skip underline while not data[i + 1].rstrip(): i += 1 # Skip empty lines else: if len(line) > 80: print >> sys.stderr, "Line `%i' exceeds 80 columns" % (i + 1) currentSection["content"] = currentSection["content"] + line + "\n" j += 1 if currentSection["details"]["title"] == "Summary" and line != "": hasSummary = True # Learn chapters from the summary details = getTitleDetails(line) if details["chapter"]: chapters[details["chapter"]] = details i += 1 sections.append(currentSection) chapterIndexes = sorted(chapters.keys()) document = "" # Complete the summary for section in sections: details = section["details"] title = details["title"] if title: fulltitle = title if details["chapter"]: #documentAppend("<a name=\"%s\"></a>" % details["chapter"]) fulltitle = details["chapter"] + ". " + title if not details["chapter"] in chapters: print >> sys.stderr, "Adding '%s' to the summary" % details[ "title"] chapters[details["chapter"]] = details chapterIndexes = sorted(chapters.keys()) for section in sections: details = section["details"] pctxt.details = details level = details["level"] title = details["title"] content = section["content"].rstrip() print >> sys.stderr, "Parsing chapter %s..." % title if (title == "Summary") or (title and not hasSummary): summaryTemplate = pctxt.templates.get_template('summary.html') documentAppend( summaryTemplate.render( chapters=chapters, chapterIndexes=chapterIndexes, )) if title and not hasSummary: hasSummary = True else: continue if title: documentAppend('<a class="anchor" id="%s" name="%s"></a>' % (details["chapter"], details["chapter"])) if level == 1: documentAppend("<div class=\"page-header\">", False) documentAppend( '<h%d id="chapter-%s" data-target="%s"><small><a class="small" href="#%s">%s.</a></small> %s</h%d>' % (level, details["chapter"], details["chapter"], details["chapter"], details["chapter"], cgi.escape(title, True), level)) if level == 1: documentAppend("</div>", False) if content: if False and title: # Display a navigation bar documentAppend('<ul class="well pager">') documentAppend('<li><a href="#top">Top</a></li>', False) index = chapterIndexes.index(details["chapter"]) if index > 0: documentAppend( '<li class="previous"><a href="#%s">Previous</a></li>' % chapterIndexes[index - 1], False) if index < len(chapterIndexes) - 1: documentAppend( '<li class="next"><a href="#%s">Next</a></li>' % chapterIndexes[index + 1], False) documentAppend('</ul>', False) content = cgi.escape(content, True) content = re.sub(r'section ([0-9]+(.[0-9]+)*)', r'<a href="#\1">section \1</a>', content) pctxt.set_content(content) if not title: lines = pctxt.get_lines() pctxt.context['headers'] = { 'title': lines[1].strip(), 'subtitle': lines[2].strip(), 'version': lines[4].strip(), 'author': lines[5].strip(), 'date': lines[6].strip() } if HAPROXY_GIT_VERSION: pctxt.context['headers'][ 'version'] = 'version ' + HAPROXY_GIT_VERSION # Skip header lines pctxt.eat_lines() pctxt.eat_empty_lines() documentAppend('<div>', False) delay = [] while pctxt.has_more_lines(): try: specialSection = specialSections[details["chapter"]] except: specialSection = specialSections["default"] line = pctxt.get_line() if i < nblines - 1: nextline = pctxt.get_line(1) else: nextline = "" oldline = line pctxt.stop = False for parser in parsers: line = parser.parse(line) if pctxt.stop: break if oldline == line: # nothing has changed, # delays the rendering if delay or line != "": delay.append(line) pctxt.next() elif pctxt.stop: while delay and delay[-1].strip() == "": del delay[-1] if delay: remove_indent(delay) documentAppend( '<pre class="text">%s\n</pre>' % "\n".join(delay), False) delay = [] documentAppend(line, False) else: while delay and delay[-1].strip() == "": del delay[-1] if delay: remove_indent(delay) documentAppend( '<pre class="text">%s\n</pre>' % "\n".join(delay), False) delay = [] documentAppend(line, True) pctxt.next() while delay and delay[-1].strip() == "": del delay[-1] if delay: remove_indent(delay) documentAppend( '<pre class="text">%s\n</pre>' % "\n".join(delay), False) delay = [] documentAppend('</div>') if not hasSummary: summaryTemplate = pctxt.templates.get_template('summary.html') print chapters document = summaryTemplate.render( chapters=chapters, chapterIndexes=chapterIndexes, ) + document # Log warnings for keywords defined in several chapters keyword_conflicts = {} for keyword in keywords: keyword_chapters = list(keywords[keyword]) keyword_chapters.sort() if len(keyword_chapters) > 1: print >> sys.stderr, 'Multi section keyword : "%s" in chapters %s' % ( keyword, list(keyword_chapters)) keyword_conflicts[keyword] = keyword_chapters keywords = list(keywords) keywords.sort() createLinks() # Add the keywords conflicts to the keywords list to make them available in the search form # And remove the original keyword which is now useless for keyword in keyword_conflicts: sections = keyword_conflicts[keyword] offset = keywords.index(keyword) for section in sections: keywords.insert(offset, "%s (%s)" % (keyword, chapters[section]['title'])) offset += 1 keywords.remove(keyword) print >> sys.stderr, "Exporting to %s..." % outfile template = pctxt.templates.get_template('template.html') try: footerTemplate = pctxt.templates.get_template('footer.html') footer = footerTemplate.render( headers=pctxt.context['headers'], document=document, chapters=chapters, chapterIndexes=chapterIndexes, keywords=keywords, keywordsCount=keywordsCount, keyword_conflicts=keyword_conflicts, version=VERSION, date=datetime.datetime.now().strftime("%Y/%m/%d"), ) except TopLevelLookupException: footer = "" fd = open(outfile, 'w') print >> fd, template.render( headers=pctxt.context['headers'], base=base, document=document, chapters=chapters, chapterIndexes=chapterIndexes, keywords=keywords, keywordsCount=keywordsCount, keyword_conflicts=keyword_conflicts, version=VERSION, date=datetime.datetime.now().strftime("%Y/%m/%d"), footer=footer) fd.close()
def parse(self, line): pctxt = self.pctxt result = re.search(r'^ *(Examples? *:)(.*)', line) if result: label = result.group(1) desc_indent = False desc = result.group(2).strip() # Some examples have a description if desc: desc_indent = len(line) - len(desc) indent = parser.get_indent(line) if desc: # And some description are on multiple lines while pctxt.get_line(1) and parser.get_indent( pctxt.get_line(1)) == desc_indent: desc += " " + pctxt.get_line(1).strip() pctxt.next() pctxt.next() add_empty_line = pctxt.eat_empty_lines() content = [] if parser.get_indent(pctxt.get_line()) > indent: if desc: desc = desc[0].upper() + desc[1:] add_empty_line = 0 while pctxt.has_more_lines() and ( (not pctxt.get_line()) or (parser.get_indent(pctxt.get_line()) > indent)): if pctxt.get_line(): for j in range(0, add_empty_line): content.append("") content.append( re.sub(r'(#.*)$', self.comment, pctxt.get_line())) add_empty_line = 0 else: add_empty_line += 1 pctxt.next() elif parser.get_indent(pctxt.get_line()) == indent: # Simple example that can't have empty lines if add_empty_line and desc: # This means that the example was on the same line as the 'Example' tag # and was not a description content.append(" " * indent + desc) desc = False else: while pctxt.has_more_lines() and (parser.get_indent( pctxt.get_line()) >= indent): content.append(pctxt.get_line()) pctxt.next() pctxt.eat_empty_lines() # Skip empty remaining lines pctxt.stop = True parser.remove_indent(content) template = pctxt.templates.get_template("parser/example.tpl") return template.render(pctxt=pctxt, label=label, desc=desc, content=content) return line
def convert(pctxt, infile, outfile, base='', version='', haproxy_version=''): global document, keywords, keywordsCount, chapters, keyword_conflicts if base and base[:-1] != '/': base += '/' hasSummary = False # read data from the input file, # store everything as a list of string # after replacing tabulation characters # with 8 spaces with open(infile) as fd: data = [line.replace("\t", " "*8).rstrip() for line in fd.readlines()] parsers = init_parsers(pctxt) pctxt.context = { 'headers': {}, 'document': "", 'base': base, } sections = [] currentSection = { "details": getTitleDetails(""), "content": "", } chapters = {} keywords = {} keywordsCount = {} specialSections = { "default": { "hasKeywords": True, }, "4.1": { "hasKeywords": True, }, } pctxt.keywords = keywords pctxt.keywordsCount = keywordsCount pctxt.chapters = chapters print("Importing %s..." % infile, file=sys.stderr) nblines = len(data) i = j = 0 while i < nblines: line = data[i].rstrip() if i < nblines - 1: next = data[i + 1].rstrip() else: next = "" if (line == "Summary" or re.match("^[0-9].*", line)) and (len(next) > 0) and (next[0] == '-') \ and ("-" * len(line)).startswith(next): # Fuzzy underline length detection sections.append(currentSection) currentSection = { "details": getTitleDetails(line), "content": "", } j = 0 i += 1 # Skip underline while not data[i + 1].rstrip(): i += 1 # Skip empty lines else: if len(line) > 80: print("Line `%i' exceeds 80 columns" % (i + 1), file=sys.stderr) currentSection["content"] = currentSection["content"] + line + "\n" j += 1 if currentSection["details"]["title"] == "Summary" and line != "": hasSummary = True # Learn chapters from the summary details = getTitleDetails(line) if details["chapter"]: chapters[details["chapter"]] = details i += 1 sections.append(currentSection) chapterIndexes = sorted(list(chapters.keys()), key=lambda chapter: list(map(int, chapter.split('.')))) document = "" # Complete the summary for section in sections: details = section["details"] title = details["title"] if title: fulltitle = title if details["chapter"]: #documentAppend("<a name=\"%s\"></a>" % details["chapter"]) fulltitle = details["chapter"] + ". " + title if not details["chapter"] in chapters: print("Adding '%s' to the summary" % details["title"], file=sys.stderr) chapters[details["chapter"]] = details chapterIndexes = sorted(chapters.keys()) for section in sections: details = section["details"] pctxt.details = details level = details["level"] title = details["title"] content = section["content"].rstrip() print("Parsing chapter %s..." % title, file=sys.stderr) if (title == "Summary") or (title and not hasSummary): summaryTemplate = pctxt.templates.get_template('summary.html') documentAppend(summaryTemplate.render( pctxt=pctxt, chapters=chapters, chapterIndexes=chapterIndexes, )) if title and not hasSummary: hasSummary = True else: continue if title: documentAppend('<a class="anchor" id="%s" name="%s"></a>' % (details["chapter"], details["chapter"])) if level == 1: documentAppend("<div class=\"page-header\">", False) documentAppend('<h%d id="chapter-%s" data-target="%s"><small><a class="small" href="#%s">%s.</a></small> %s</h%d>' % (level, details["chapter"], details["chapter"], details["chapter"], details["chapter"], cgi.escape(title, True), level)) if level == 1: documentAppend("</div>", False) if content: if False and title: # Display a navigation bar documentAppend('<ul class="well pager">') documentAppend('<li><a href="#top">Top</a></li>', False) index = chapterIndexes.index(details["chapter"]) if index > 0: documentAppend('<li class="previous"><a href="#%s">Previous</a></li>' % chapterIndexes[index - 1], False) if index < len(chapterIndexes) - 1: documentAppend('<li class="next"><a href="#%s">Next</a></li>' % chapterIndexes[index + 1], False) documentAppend('</ul>', False) content = cgi.escape(content, True) content = re.sub(r'section ([0-9]+(.[0-9]+)*)', r'<a href="#\1">section \1</a>', content) pctxt.set_content(content) if not title: lines = pctxt.get_lines() pctxt.context['headers'] = { 'title': '', 'subtitle': '', 'version': '', 'author': '', 'date': '' } if re.match("^-+$", pctxt.get_line().strip()): # Try to analyze the header of the file, assuming it follows # those rules : # - it begins with a "separator line" (several '-' chars) # - then the document title # - an optional subtitle # - a new separator line # - the version # - the author # - the date pctxt.next() pctxt.context['headers']['title'] = pctxt.get_line().strip() pctxt.next() subtitle = "" while not re.match("^-+$", pctxt.get_line().strip()): subtitle += " " + pctxt.get_line().strip() pctxt.next() pctxt.context['headers']['subtitle'] += subtitle.strip() if not pctxt.context['headers']['subtitle']: # No subtitle, try to guess one from the title if it # starts with the word "HAProxy" if pctxt.context['headers']['title'].startswith('HAProxy '): pctxt.context['headers']['subtitle'] = pctxt.context['headers']['title'][8:] pctxt.context['headers']['title'] = 'HAProxy' pctxt.next() pctxt.context['headers']['version'] = pctxt.get_line().strip() pctxt.next() pctxt.context['headers']['author'] = pctxt.get_line().strip() pctxt.next() pctxt.context['headers']['date'] = pctxt.get_line().strip() pctxt.next() if haproxy_version: pctxt.context['headers']['version'] = 'version ' + haproxy_version # Skip header lines pctxt.eat_lines() pctxt.eat_empty_lines() documentAppend('<div>', False) delay = [] while pctxt.has_more_lines(): try: specialSection = specialSections[details["chapter"]] except: specialSection = specialSections["default"] line = pctxt.get_line() if i < nblines - 1: nextline = pctxt.get_line(1) else: nextline = "" oldline = line pctxt.stop = False for one_parser in parsers: line = one_parser.parse(line) if pctxt.stop: break if oldline == line: # nothing has changed, # delays the rendering if delay or line != "": delay.append(line) pctxt.next() elif pctxt.stop: while delay and delay[-1].strip() == "": del delay[-1] if delay: parser.remove_indent(delay) documentAppend('<pre class="text">%s\n</pre>' % "\n".join(delay), False) delay = [] documentAppend(line, False) else: while delay and delay[-1].strip() == "": del delay[-1] if delay: parser.remove_indent(delay) documentAppend('<pre class="text">%s\n</pre>' % "\n".join(delay), False) delay = [] documentAppend(line, True) pctxt.next() while delay and delay[-1].strip() == "": del delay[-1] if delay: parser.remove_indent(delay) documentAppend( "<pre class=\"text\">{}\n</pre>".format("\n".join(delay)), False ) delay = [] documentAppend('</div>') if not hasSummary: summaryTemplate = pctxt.templates.get_template('summary.html') print(chapters) document = summaryTemplate.render( pctxt=pctxt, chapters=chapters, chapterIndexes=chapterIndexes, ) + document # Log warnings for keywords defined in several chapters keyword_conflicts = {} for keyword in keywords: keyword_chapters = list(keywords[keyword]) keyword_chapters.sort() if len(keyword_chapters) > 1: print("Multi section keyword : \"{}\" in chapters {}". format(keyword, list(keyword_chapters)), file=sys.stderr) keyword_conflicts[keyword] = keyword_chapters keywords = list(keywords) keywords.sort() createLinks() # Add the keywords conflicts to the keywords list to make # them available in the search form and remove the original # keyword which is now useless for keyword in keyword_conflicts: sections = keyword_conflicts[keyword] offset = keywords.index(keyword) for section in sections: keywords.insert(offset, "{} ({})".format(keyword, chapters[section]['title'])) offset += 1 keywords.remove(keyword) try: footerTemplate = pctxt.templates.get_template('footer.html') footer = footerTemplate.render( pctxt=pctxt, headers=pctxt.context['headers'], document=document, chapters=chapters, chapterIndexes=chapterIndexes, keywords=keywords, keywordsCount=keywordsCount, keyword_conflicts=keyword_conflicts, version=version, date=datetime.datetime.now().strftime("%Y/%m/%d"), ) except TopLevelLookupException: footer = "" return { 'pctxt': pctxt, 'headers': pctxt.context['headers'], 'base': base, 'document': document, 'chapters': chapters, 'chapterIndexes': chapterIndexes, 'keywords': keywords, 'keywordsCount': keywordsCount, 'keyword_conflicts': keyword_conflicts, 'version': version, 'date': datetime.datetime.now().strftime("%Y/%m/%d"), 'footer': footer }