Exemple #1
0
def test_quotes_129():
    #pf https://github.com/sergiocorreia/panflute/issues/129
    text = [pf.Str("Some"), pf.Space, pf.Str("quoted text")]
    quoted_text = pf.Quoted(*text)
    para = pf.Para(quoted_text)
    output = pf.stringify(para, False)
    assert output == '"Some quoted text"'
def handleStringPattern2(elem, doc):
    splt = recomp2.split(elem.text)
    logging.debug("Pattern2 text: " + elem.text + " \t " + str(splt))
    if len(splt) == 3 and isinstance(elem.next, pf.Space):
        logging.debug("Replacing " + elem.text + " to " + splt[1] +
                      "(Halfspace)/ at recomp2")
        return [pf.Str(splt[1]), getInline(doc), pf.Str("/")]
def filter_hatena_footnote(elem, doc):
  """
  脚注をはてな記法に置き換え. <code> が含まれていると機能しないので平文に変換する.
  """
  if isinstance(elem, pf.Note):
    content_without_code = [pf.Str(f'`{pf.stringify(x)}`') if isinstance(x, pf.Code) else x for x in elem.content[0].content]
    return [pf.Str('((')] + content_without_code + [pf.Str('))')]
Exemple #4
0
 def action(self, elem, doc):
     if isinstance(elem, pf.RawBlock):
         if elem.text == r"\newpage":
             if (doc.format == "docx"):
                 pf.debug("Page Break")
                 elem = self.pagebreak
         elif elem.text == r"\newsection":
             if (doc.format == "docx"):
                 pf.debug("Section Break")
                 elem = self.sectionbreak
             else:
                 elem = []
         elif elem.text == r"\toc":
             if (doc.format == "docx"):
                 pf.debug("Table of Contents")
                 para = [
                     pf.Para(pf.Str("Table"), pf.Space(), pf.Str("of"),
                             pf.Space(), pf.Str("Contents"))
                 ]
                 div = pf.Div(*para,
                              attributes={"custom-style": "TOC Heading"})
                 elem = [div, self.toc]
             else:
                 elem = []
     return elem
Exemple #5
0
 def test_cancel_emph(self):
     ast = deepcopy(self.ast_double_type)
     ast.walk(self.cancel_repeated_type)
     res = convert_text(ast, input_format="panflute", output_format="native")
     ref = pf.Para(pf.Str("a"), pf.Space, self.ElementType(pf.Str("b")))
     ref_native = convert_text(ref, input_format="panflute", output_format="native")
     assert res == ref_native
Exemple #6
0
 def _nonnormative(name):
     _wrap(
         pf.Span(pf.Str('[ '), pf.Emph(pf.Str('{}:'.format(name.title()))),
                 pf.Space),
         pf.Span(pf.Str(' — '),
                 pf.Emph(pf.Str('end {}'.format(name.lower()))),
                 pf.Str(' ]')))
Exemple #7
0
 def test_to_emph_simple(self):
     ast = deepcopy(self.simple)
     ast.walk(self.to_type)
     res = convert_text(ast, input_format="panflute", output_format="native")
     ref = pf.Para(self.ElementType(pf.Str("a")), pf.Space, self.ElementType(pf.Str("b")))
     ref_native = convert_text(ref, input_format="panflute", output_format="native")
     assert res == ref_native
Exemple #8
0
 def test_merge_emph2(self):
     ast = deepcopy(self.ast2)
     ast.walk(self.merge_consecutive_type)
     res = convert_text(ast, input_format="panflute", output_format="native")
     ref = pf.Para(self.ElementType(pf.Str("a"), pf.Space, pf.Str("b")))
     ref_native = convert_text(ref, input_format="panflute", output_format="native")
     assert res == ref_native
def exercise_filter(elem, doc):
    if isinstance(elem, pf.Para) and len(elem.content) == 0:
        return []  ## Remove empty paragraphs ...
    elif isinstance(elem, pf.Header) and (
            "exercise"
            in elem.classes):  # No need to use level 3 and elem.level==3:
        if "reset" in elem.classes:
            doc.exercisecount = 1
        else:
            doc.exercisecount += 1
        #print(sys.stderr,"Exercise detected",file=sys.stderr)
        doc.inside_exercise = True
        return []
    elif isinstance(elem, pf.Header) and (
            "question"
            in elem.classes):  # No need to use level 3 and elem.level==3:
        if "reset" in elem.classes:
            doc.questioncount = 1
        else:
            doc.questioncount += 1
        #print(sys.stderr,"Exercise detected",file=sys.stderr)
        doc.inside_question = True
        return []
    elif doc.inside_exercise:
        if isinstance(elem, pf.Para) and len(elem.content) > 0:
            cogollo = pf.Str(str(doc.exercisecount) + ".-")
            elem.content = [pf.Strong(cogollo), pf.Space] + list(elem.content)
            doc.inside_exercise = False
            return elem
    elif doc.inside_question:
        if isinstance(elem, pf.Para) and len(elem.content) > 0:
            cogollo = pf.Str(str(doc.questioncount) + ".-")
            elem.content = [pf.Strong(cogollo), pf.Space] + list(elem.content)
            doc.inside_question = False
            return elem
Exemple #10
0
def render_citations(elem, doc, string=False):
    if isinstance(elem, pf.Cite):
        if doc.format == "latex" and not doc.get_metadata(
            "doit_citeproc_for_latex", True
        ):

            latex_commands = []
            latex_command = "\\autocite{{{ids}}}"

            if hasattr(elem, "latex_command") and elem.latex_command:
                for command in elem.latex_command:
                    head = "" if command.startswith("\\") else "\\cite"
                    latex_command = "{head}{command}{{{{{{ids}}}}}}".format(
                        head=head, command=command
                    )

                    latex_commands.append(latex_command)
            else:
                latex_commands.append(latex_command)

            citations = ",".join([c.id for c in elem.citations])

            raw = "".join(lc.format(ids=citations) for lc in latex_commands)

            if string:
                return raw
            else:
                return pf.RawInline(raw, format="latex")
        else:
            if hasattr(elem, "latex_command") and "author" in elem.latex_command:

                names = []
                amount_citations = len(elem.citations)

                for i in range(1, amount_citations + 1):
                    citation = elem.citations[i - 1]
                    citation = doc.bibliography.get(citation.id, False)

                    if citation:
                        names_list = citation.get(
                            "author", citation.get("editor", False)
                        )

                        if names_list:
                            names.extend(utils.format_names(names_list))

                            if not i == amount_citations:
                                names.extend([pf.Str(", "), pf.Space])

                if names:
                    if elem.next:
                        if pf.stringify(names[-1]).endswith(".") and pf.stringify(
                            elem.next
                        ).startswith("."):
                            names[-1] = pf.Str(pf.stringify(names[-1])[:-1])

                            return pf.Span(*names)

            return pf.Cite(citations=elem.citations)
Exemple #11
0
def finalize(doc):
    reader_options = pf.load_reader_options()
    definitions = []
    for k, v in reader_options.items():
        term = [pf.Str(k)]
        definition = pf.Definition(pf.Para(pf.Str(repr(v))))
        definitions.append(pf.DefinitionItem(term, [definition]))
    doc.content.append(pf.DefinitionList(*definitions))
Exemple #12
0
def str_to_metainline(raw_str):
	tokens=raw_str.split(" ")
	mi=pf.MetaInlines(pf.Str(tokens[0]))
	i=0
	for i in range(1,len(tokens)):
		mi.content.append(pf.Space)
		mi.content.append(pf.Str(tokens[i]))
	return mi
Exemple #13
0
 def test_valid_content_strs_html(self):
     doc = common.MockDoc('html')
     span = pf.Span(pf.Str('a'),
                    pf.Str('b'),
                    pf.Str('c'),
                    classes=['gloss'])
     gloss.parse(span, doc)
     self.assertEqual(pf.stringify(span), 'a<br/>b<br/>c')
Exemple #14
0
 def test_all_emph_together(self):
     ast = deepcopy(self.combined)
     ast.walk(self.to_type)
     ast.walk(self.cancel_repeated_type)
     ast.walk(self.merge_consecutive_type)
     res = convert_text(ast, input_format="panflute", output_format="native")
     ref = pf.Para(pf.Str("a"), pf.Str("b"), pf.Space, self.ElementType(pf.Str("c"), pf.Str("d")))
     ref_native = convert_text(ref, input_format="panflute", output_format="native")
     assert res == ref_native
Exemple #15
0
def action2(elem,doc) :
  pattern = re.compile("\[\!(.*?)\]")
  if isinstance(elem,pf.Str) :
    for m in pattern.finditer(elem.text) :
      span = m.span()
      i = m.group(1)
      heq = [ x for x in doc.backmatter if x.i == i ]
      if len(heq) != 0 :
          return [pf.Str(elem.text[0:span[0]]), heq[0].toLink(), pf.Str(elem.text[span[1]:]) ]
Exemple #16
0
 def test_valid_content_strs(self):
     doc = common.MockDoc('html')
     span = pf.Span(pf.Str('a'),
                    pf.Str('b'),
                    pf.Str('c'),
                    pf.Str('d'),
                    classes=['phonrule'])
     phonrule.parse(span, doc)
     self.assertEqual(pf.stringify(span), 'a -&gt; b/c_d')
Exemple #17
0
 def test_valid_content_strs_and_spans_latex(self):
     doc = common.MockDoc('latex')
     span1 = pf.Span(pf.Str('a'))
     str1 = pf.Str('b')
     span2 = pf.Span(pf.Str('c'))
     str2 = pf.Str('d')
     span = pf.Span(span1, str1, span2, str2, classes=['phonrule'])
     phonrule.parse(span, doc)
     self.assertEqual(pf.stringify(span), '\\phonb{a}{b}{c}{d}')
Exemple #18
0
 def test_valid_content_strs_and_spans_latex(self):
     doc = common.MockDoc('latex')
     span1 = pf.Span(pf.Str('a'))
     str1 = pf.Str('b')
     span2 = pf.Span(pf.Str('c'))
     span = pf.Span(span1, str1, span2, classes=['gloss'])
     gloss.parse(span, doc)
     self.assertEqual(
         pf.stringify(span),
         '\\begin{exe}\n\\ex\n\\gll a\\\\\nb\\\\\n\\trans c\n\\end{exe}')
Exemple #19
0
def finalize(doc):
	c1 = pf.TableCell(pf.Plain(pf.Str("Element")))
	c2 = pf.TableCell(pf.Plain(pf.Str("Frequency")))
	header = pf.TableRow(c1, c2)
	rows = []
	for tag in doc.counter:
		c1 = pf.TableCell(pf.Plain(pf.Str(tag)))
		c2 = pf.TableCell(pf.Plain(pf.Str(str(doc.counter[tag]))))
		rows.append(pf.TableRow(c1, c2))
	table = pf.Table(*rows, header=header)
	doc.content = [table] # bugbug?
Exemple #20
0
def finalize(doc: pf.Doc):
    #raise Exception("input file %s header_level %s" % (doc.input_file, doc.header_level))
    header = pf.Header(pf.Str(doc.meta_title), level=doc.header_level)
    doc.content.insert(0, header)
    doc.content.insert(1, pf.Para(pf.Str(doc.description)))
    del doc.header_level
    del doc.input_file
    del doc.meta_title
    del doc.description
    del doc.images_path
    del doc.out_meta
Exemple #21
0
def format_names(names):
    first = pf.Emph(pf.Str(split_name(names[0])[0]))

    if len(names) == 0:
        return [first]
    elif len(names) == 1:
        second = pf.Emph(pf.Str(split_name(names[0])[0]))

        return [first, pf.Str("and"), second]
    elif len(names) > 1:
        return [first, pf.Str("et al.")]
def autounderlined(elem, doc):
    if doc.autounderlined and type(elem) == pf.Link:
        ##Create a span with bogus content but class underline
        span = pf.Span(pf.Str('More'),
                       pf.Space,
                       pf.Str('words.'),
                       classes=["underline"])
        ## Force link's content to become the span's content
        span.content = elem.content
        ## Put the span inside the link
        elem.content = [span]
        #return the modified link
        return elem
Exemple #23
0
 def test_merge_root_multiple_nodes(self):
     doc = common.MockDoc('html')
     filename = os.path.join('linguafilter', 'test_lexicon.csv')
     attributes = {'file': filename, 'merge_root': 'foo'}
     div = pf.Div(pf.Para(pf.Str('{field1}')),
                  pf.Para(pf.Str('{field2}')),
                  attributes=attributes,
                  classes=['lexicon'])
     with self.assertRaisesRegexp(
             Exception,
             'if merge_root is specified, there can be only one node under the lexicon div'
     ):
         lexicon.parse(div, doc)
Exemple #24
0
def fenced_action(options, data, element, doc):
	modalid  = options.get('id', 'modal1')
	title    = options.get('title')
	closebtn = options.get('closebtn', True)
	size     = options.get('size', 'default')
	size2class = {
		'default': None,
		'small'  : 'modal-sm',
		'sm'     : 'modal-sm',
		'large'  : 'modal-lg',
		'lg'     : 'modal-lg',
		'xlarge' : 'modal-xl',
		'xl'     : 'modal-xl',
	}

	components = []
	if title:
		modal_header1 = pf.Header(pf.Str(title), classes=['modal-title'], level=5, identifier=modalid + 'Title')
		modal_header2 = pf.Div(
			pf.Div(pf.Para(pf.Str('x')), attributes = {'aria-hidden': "true"}),
			classes = ['close', 'button'],
			attributes = {
				'type': 'button',
				'data-dismiss': 'modal',
				'aria-label': 'Close'
			})
		components.append(pf.Div(modal_header1, modal_header2, classes = ['modal-header']))
	components.append(pf.Div(*data, classes = ['modal-body']))
	if closebtn:
		components.append(pf.Div(
			pf.Div(pf.Para(pf.Str('Close')), classes = ['button', 'btn', 'btn-secondary'],
			attributes = {
				'type': 'button',
				'data-dismiss': 'modal',
			}),
			classes = ['modal-footer']
		))
	modal_content = pf.Div(*components, classes = ['modal-content'])
	mainclasses = ['modal-dialog', 'modal-dialog-centered', 'modal-dialog-scrollable']
	sizeclass = size2class.get(size)
	if sizeclass:
		mainclasses.append(sizeclass)
	model_dialog = pf.Div(modal_content, classes = mainclasses, attributes = {'role': 'document'})

	return pf.Div(model_dialog, classes = ['modal', 'fade'], identifier = modalid, attributes = {
		'tabindex'       : '-1',
		'role'           : 'dialog',
		'aria-labelledby': modalid + 'Title',
		'aria-hidden'    : "true"
	})
Exemple #25
0
def action(elem, doc):
    global collect_tags, tag_sequence
    if isinstance(
            elem,
            pf.Header) and "course-title" in elem.classes and collect_tags:
        # this is a course separator, we should reset state
        build_course_barcode(doc)
        collect_tags = False
        tag_sequence = []
    if isinstance(elem,
                  pf.Header) and pf.stringify(elem) == "Learning objectives":
        collect_tags = elem.index + 1
    if isinstance(elem, pf.Span) and "used-in" in elem.attributes:
        courses = ", ".join(elem.attributes['used-in'].split())
        used_in = pf.Span(pf.Str(f"(Depended on by {courses})"))
        elem.content.append(pf.Space())
        elem.content.append(used_in)
    if isinstance(elem, pf.Span) and "outcomes" in elem.attributes:
        outcomes = elem.attributes["outcomes"].split()
        outcome_spans = []
        for outcome in outcomes:
            # only include outcomes in the sequence if there's an ID
            if collect_tags:
                tag_sequence.append(outcome)

            outcome_spans.append(pf.Space())

            if outcome not in outcome_colours:
                outcome_colours[outcome] = colours.popleft()

            colour = outcome_colours[outcome]
            if doc.format in ('html', 'html5'):
                outcome_spans.append(
                    pf.Span(
                        pf.Str(outcome),
                        attributes={
                            'style':
                            f"color:{colour[0]};background-color:{colour[1]};border:1px solid black;"
                        }))
            elif doc.format == 'latex':
                outcome_spans.append(
                    pf.Span(
                        pf.RawInline(f"""
                    \\tcbox[on line,arc=0pt, outer arc=0pt,boxsep=0pt,boxrule=1pt,top=2pt,bottom=2pt,left=1pt,right=1pt,colback={colour[1]}]{{
                        \\color{{{colour[0]}}}{outcome}
                    }}
                    """,
                                     format='latex')))

        elem.content.extend(outcome_spans)
 def test_remove_empty_paragraphs(self):
     """It should remove empty paras in document"""
     doc = pf.Doc(
         pf.Para(pf.Str("Foo"), pf.Space(), pf.Str("Bar")),
         pf.Para(),
         pf.Para(pf.Str("Bar"), pf.Space(), pf.Str("Baz")),
     )
     remove_empty_paragraphs(doc)
     self.assertEqual(len(doc.content), 2)
     para1 = doc.content[0]
     self.assertEqual(para1.content[0].text, "Foo")
     self.assertEqual(para1.content[2].text, "Bar")
     para2 = doc.content[1]
     self.assertEqual(para2.content[0].text, "Bar")
     self.assertEqual(para2.content[2].text, "Baz")
Exemple #27
0
    def listingtable(self, filename, idn, caption, options):
        if self.doc.format in ["latex"]:
            for c in caption:
                if isinstance(c, (pf.Str)):
                    c.text = c.text.replace("_", r"\textunderscore ")
                    # pf.debug(c.text)
        basename = os.path.basename(filename)  # /path/to/file.txt -> file.txt
        file_type = options.get("type", "plain")
        types = [file_type, "numberLines"]
        startFrom = options.get("startFrom", "1")
        numbers = options.get("numbers", "left")
        attr = {"startFrom": startFrom, "numbers": numbers}
        linefrom = options.get("from")
        lineto = options.get("to")
        linefrom = None if not linefrom else (int(linefrom) - 1)
        lineto = None if not lineto else (int(lineto))

        if self.doc.format in ["latex"]:
            file_title = basename.replace("_", r"\textunderscore")
        else:
            file_title = basename

        temp_caption = [pf.Str("%s" % (file_title))]
        caption = temp_caption if not len(caption) else caption

        with open(filename, "r", encoding="utf-8") as f:
            lines = list(f)
        if (not linefrom) and (not lineto):
            raw = "".join(lines)
        elif linefrom and (not lineto):
            raw = "".join(lines[linefrom:])
        elif not linefrom and lineto:
            raw = "".join(lines[:lineto])
        else:
            raw = "".join(lines[linefrom:lineto])

        # pf.debug(linefrom, lineto, raw)
        label = basename.lower().replace(".", "_").replace("/", "_") + str(
            self.counter)
        idn = idn if idn else "lst:{label:s}".format(label=label)

        read = pf.CodeBlock(raw,
                            classes=types,
                            identifier=idn,
                            attributes=attr)

        ret = [pf.Para(pf.Str("Listing:"), pf.Space(), *caption), read]
        return ret
Exemple #28
0
def fenced_action(options, data, element, doc):
    # We'll only run this for CodeBlock elements of class 'csv'
    title = options.get('title', 'Untitled Table')
    title = [pf.Str(title)]
    has_header = options.get('has-header', False)

    with io.StringIO(data) as f:
        reader = csv.reader(f)
        body = []
        for row in reader:
            cells = [pf.TableCell(pf.Plain(pf.Str(x))) for x in row]
            body.append(pf.TableRow(*cells))

    header = body.pop(0) if has_header else None
    table = pf.Table(*body, header=header, caption=title)
    return table
Exemple #29
0
def finalize(doc):
    objectives = materialize_unused(doc, all_ids)
    unused_section = pfp.find_by_id('missing-objectives', doc)

    spot = itertools.count(start=unused_section.index + 1)

    for course in sorted(objectives):
        identifier = course.lower().replace(" ", "") + "-missing"
        doc.content.insert(
            next(spot),
            pf.Header(pf.Str(course),
                      level=2,
                      identifier=identifier,
                      classes=["unnumbered"]))
        for header in objectives[course]:
            unit_list = pf.OrderedList()
            for objective in objectives[course][header]:
                if objective.identifier not in referenced_ids:
                    # we've got a handle on the original `pf.Span`, so ask
                    # for its grandparent to get the `pf.ListItem`.
                    unit_list.content.append(objective.ancestor(2))

            # Only put the header into the document *if* the unit actually
            # has learning objectives that were not referenced.
            if len(unit_list.content):
                doc.content.insert(next(spot), header)
                doc.content.insert(next(spot), unit_list)

    # Any appendices (anything *after* the "missing objectives" section) should
    # be unnumbered, because numbering gets difficult after that.
    for elem in doc.content[next(spot):]:
        if isinstance(elem, pf.Header):
            elem.classes.append("unnumbered")
Exemple #30
0
def action_clean_link(elem, doc, job, context):
    """Cleans some nodes in the syntax tree:
    - removing "wikilink" title from links
    - remove "fig:" prefix from title in links
    - remove linebreaks if first in paragraph

    Args:
        elem (Element): current element in the Panflute syntax tree
        doc (Doc): representing full document
        job (Job): a job object holding the batch job context

    Returns:
        [type]: [description]
    """
    if isinstance(elem, pf.Link):
        # Read at end for explanation of why wikilink https://github.com/jgm/pandoc/issues/5414
        if elem.title == "wikilink":
            elem.title = ""
        if len(elem.content) == 0:
            s = elem.title or elem.url
            elem.content = [pf.Str(s)]
    elif isinstance(elem, pf.Image):
        if elem.title.startswith("fig:"):
            elem.title = elem.title[4:]
        elem.attributes.clear()
    elif isinstance(elem, pf.LineBreak):
        if elem.index == 0:  # No need for a LineBreak at beginning of a paragraph
            job.debug("Removed hard line break at start of paragraph")
            return []