Пример #1
0
def parse_amendment(data, parent):
    subject = data['sujet']
    text = clean_html(data['texte'])

    tokens = lexer.tokenize(subject + '\n' + text)
    node = create_node(
        parent, {
            'type':
            'amendment',
            'id':
            data['numero'],
            'content':
            text,
            'status':
            AMENDMENT_STATUS[data['sort'].lower()],
            'description':
            clean_html(data['expose']),
            'signatories': [{
                'name': s.strip()
            } for s in data['signataires'].split(', ')],
            'url':
            data['source']
        })

    # The "subject" declares the target bill article reference for this admendment.
    # That reference will be referenced later on using syntaxes such as "cet article" ("this article").
    parse_subject(tokens, 0, node)
    parse_alineas(node['content'], node)
    # If the admendment content actually need that bill article reference, they already have it copied by now.
    # So we simply we remove it.
    remove_node(node, node['children'][0])

    return node
Пример #2
0
 def call_parse_func(self, fn, data, ast=None):
     if not ast:
         ast = {'children': []}
     fn(lexer.tokenize(data), 0, ast)
     # default_visitors(ast)
     return ast
Пример #3
0
 def call_parse_func(self, fn, data, tree=None):
     if not tree:
         tree = duralex.tree.create_node(None, {})
     fn(lexer.tokenize(data), 0, tree)
     return tree