コード例 #1
0
 def test_str(self):
     self.assertEqual(tokenize('"a\\"b\\_c"'),
                      [Token('STRING', '"a\\"b\\_c"'),
                       Token('NL', '')])
     self.assertEqual(tokenize("'a\\'b\\_c'"),
                      [Token('STRING', "'a\\'b\\_c'"),
                       Token('NL', '')])
コード例 #2
0
 def test_intermediate_dedent_to_zero(self):
     tokens = tokenize('\n a\n  b\n   c\n    d\nabc\n  a')
     token_types = [t.type for t in tokens]
     expected = [
         'NL',
         'INDENT',
         'ID',
         'NL',
         'INDENT',
         'ID',
         'NL',
         'INDENT',
         'ID',
         'NL',
         'INDENT',
         'ID',
         'NL',
         'DEDENT',
         'DEDENT',
         'DEDENT',
         'DEDENT',
         'ID',
         'NL',
         'INDENT',
         'ID',
         'NL',
         'DEDENT',
     ]
     self.assertEqual(token_types, expected)
コード例 #3
0
 def test_comment_misindent(self):
     tokens = tokenize('\n  a\n #b\n  c\n   # d\n  e')
     token_types = [t.type for t in tokens]
     expected = [
         'NL', 'INDENT', 'ID', 'NL', 'ID', 'NL', 'ID', 'NL', 'DEDENT'
     ]
     self.assertEqual(token_types, expected)
コード例 #4
0
 def test_multiple_dedent(self):
     tokens = tokenize('\n \n  \n   \n    \n  \n')
     token_types = [t.type for t in tokens]
     expected = [
         'NL', 'INDENT', 'NL', 'INDENT', 'NL', 'INDENT', 'NL', 'INDENT',
         'NL', 'DEDENT', 'DEDENT', 'NL', 'DEDENT', 'DEDENT'
     ]
     self.assertEqual(token_types, expected)
コード例 #5
0
 def test_entrypoints5(self):
     tokens = tokenize(
         '\nentrypoint c:\n # f\nentrypoint d: \n\t\t# f\n\td')
     token_types = [t.type for t in tokens]
     expected = [
         'NL', 'INDENT', 'ID', 'ID', 'COLON', 'NL', 'ID', 'ID', 'COLON',
         'NL', 'ID', 'NL', 'DEDENT'
     ]
     self.assertEqual(token_types, expected)
コード例 #6
0
def process_file(filename, output_dir, output_name, actor_gen, optimizer_flags,
                 **kwargs):
    init_logger(filename)

    if_ = Path(filename)
    of = output_dir / output_name if output_name else output_dir / if_.with_suffix(
        '.bfevfl').name
    name = if_.with_suffix('').name
    if not if_.exists():
        emit_error('file not found, skipping')
        raise LogError()

    with if_.open('rt') as f:
        evfl = f.read()
        setup_logger(evfl)

    tokens = tokenize(evfl)
    roots, actors = parse(tokens,
                          actor_gen,
                          exported_tco=optimizer_flags['exported_tco'],
                          **kwargs)
    if optimizer_flags['merge_duplicate']:
        optimize_merge_identical(roots)
    if optimizer_flags['short_event_names']:
        optimize_names(roots, make_compact_renamer)
    else:
        optimize_names(roots, make_counter_renamer)
    nodes: Set[Node] = set()
    entrypoints = set(r.name for r in roots)
    for root in roots:
        for node in find_postorder(root):
            if node in nodes:
                continue
            if isinstance(node, ActionNode):
                node.action.mark_used()
            elif isinstance(node, SwitchNode):
                node.query.mark_used()
            elif isinstance(node, SubflowNode):
                if node.ns == '':
                    if node.called_root_name not in entrypoints:
                        emit_error(
                            f'subflow call for {node.called_root_name} but matching flow/entrypoint not found'
                        )
                        raise LogError()

            nodes.add(node)

    bfevfl = File(name, actors, list(nodes))
    with of.open('wb') as f:
        f.write(bfevfl.prepare_bitstream().bytes)
コード例 #7
0
 def test_entrypoints2(self):
     tokens = tokenize('\na\nentrypoint b:\n\tc')
     token_types = [t.type for t in tokens]
     expected = [
         'NL',
         'ID',
         'NL',
         'INDENT',
         'ID',
         'ID',
         'COLON',
         'NL',
         'ID',
         'NL',
         'DEDENT',
     ]
     self.assertEqual(token_types, expected)
コード例 #8
0
    def test_files(self):
        self.maxDiff = None

        set_log_level(level=logging.ERROR)
        for c, err in TestParser.CASES:
            with self.subTest(msg=f'test_{c}'):
                with open(f'{TestParser.TEST_DIR}/{c}.evfl', 'rt') as ef:
                    evfl = ef.read()

                tokens = tokenize(evfl)

                if err is not None:
                    with self.assertRaises(err):
                        parse(tokens, self.generate_actor)
                else:
                    rn, actors = parse(tokens, self.generate_actor)
                    actual = [str(x) for x in extract_and_sort_nodes(rn)]
                    expected = []
                    with open(f'{TestParser.TEST_DIR}/{c}.out', 'rt') as ef:
                        for line in ef:
                            line = line.strip()
                            if line:
                                expected.append(line)
                    self.assertEqual(actual, expected)
コード例 #9
0
 def test_bad_dedent(self):
     with self.assertRaises(LexerError):
         tokenize('\n\t\n\t \n ')
コード例 #10
0
 def test_paren_unopened2(self):
     with self.assertRaises(LexerError):
         tokenize(']')
コード例 #11
0
 def test_comment_indent_1(self):
     tokens = tokenize('\n  # \na # b\n # c')
     token_types = [t.type for t in tokens]
     expected = ['NL', 'ID', 'NL']
     self.assertEqual(token_types, expected)
コード例 #12
0
 def test_paren_unclosed2(self):
     with self.assertRaises(LexerError):
         tokenize('[')
コード例 #13
0
 def test_id(self):
     with self.assertRaises(LexerError):
         tokenize('-abc')
     with self.assertRaises(LexerError):
         tokenize('_abc')
     with self.assertRaises(LexerError):
         tokenize('abc-')
     with self.assertRaises(LexerError):
         tokenize('abc_')
     self.assertEqual(
         tokenize('abc_d0-3'),
         [Token('ID', 'abc_d0-3'), Token('NL', '')])
     self.assertEqual(
         tokenize('`1.233 444\\` ee`'),
         [Token('QUOTE_ID', '`1.233 444\\` ee`'),
          Token('NL', '')])
     with self.assertRaises(LexerError):
         tokenize('01-')
コード例 #14
0
    def test_example(self):
        src = ''' \
            flow Test(a: int = 5, b: float = 3.0): # comment
                if SubflowResults[+5] in (1, 2):
                    System.EventFlags['a'] = 3
                else:
                    fork:
                        branch:
                            System.EventFlags["b"] = 7
                        branch:
                            System.EventFlags["c"] = 7
                        branch:
                            pass'''
        expected = [
            'ID',
            'ID',
            'LPAREN',
            'ID',
            'COLON',
            'ID',
            'ASSIGN',
            'INT',
            'COMMA',
            'ID',
            'COLON',
            'ID',
            'ASSIGN',
            'FLOAT',
            'RPAREN',
            'COLON',
            'NL',
            'INDENT',
            'ID',
            'ID',
            'LSQUARE',
            'INT',
            'RSQUARE',
            'ID',
            'LPAREN',
            'INT',
            'COMMA',
            'INT',
            'RPAREN',
            'COLON',
            'NL',
            'INDENT',
            'ID',
            'DOT',
            'ID',
            'LSQUARE',
            'STRING',
            'RSQUARE',
            'ASSIGN',
            'INT',
            'NL',
            'DEDENT',
            'ID',
            'COLON',
            'NL',
            'INDENT',
            'ID',
            'COLON',
            'NL',
            'INDENT',
            'ID',
            'COLON',
            'NL',
            'INDENT',
            'ID',
            'DOT',
            'ID',
            'LSQUARE',
            'STRING',
            'RSQUARE',
            'ASSIGN',
            'INT',
            'NL',
            'DEDENT',
            'ID',
            'COLON',
            'NL',
            'INDENT',
            'ID',
            'DOT',
            'ID',
            'LSQUARE',
            'STRING',
            'RSQUARE',
            'ASSIGN',
            'INT',
            'NL',
            'DEDENT',
            'ID',
            'COLON',
            'NL',
            'INDENT',
            'ID',
            'NL',
            'DEDENT',
            'DEDENT',
            'DEDENT',
            'DEDENT',
        ]

        tokens = tokenize(src)
        token_types = [t.type for t in tokens]
        self.assertEqual(token_types, expected)
コード例 #15
0
 def test_paren_mismatch_nested2(self):
     with self.assertRaises(LexerError):
         tokenize('([[()])]')
コード例 #16
0
 def test_paren_mismatch2(self):
     with self.assertRaises(LexerError):
         tokenize('[)')
コード例 #17
0
 def test_entrypoints4(self):
     with self.assertRaises(LexerError):
         tokenize('\nentrypoint e:\n')
コード例 #18
0
 def test_indent_dedent_simple(self):
     tokens = tokenize('\n \n')
     token_types = [t.type for t in tokens]
     self.assertEqual(token_types, ['NL', 'INDENT', 'NL', 'DEDENT'])
コード例 #19
0
 def test_paren_mismatch_nested1(self):
     with self.assertRaises(LexerError):
         tokenize('[(([])])')
コード例 #20
0
 def test_keyword_start_id(self):
     tokens = tokenize('\nentrypoint_e:\n')
     token_types = [t.type for t in tokens]
     expected = ['NL', 'ID', 'COLON', 'NL']
     self.assertEqual(token_types, expected)