コード例 #1
0
 def test_iter_compat(self):
     u = Untokenizer()
     token = (NAME, 'Hello')
     u.compat(token, iter([]))
     self.assertEqual(u.tokens, ["Hello "])
     u = Untokenizer()
     self.assertEqual(u.untokenize(iter([token])), 'Hello ')
コード例 #2
0
ファイル: views.py プロジェクト: hadi-f90/futurecoder
    def get_solution(self, page_index, step_index: int):
        page = pages[page_slugs_list[page_index]]
        step = getattr(page, page.step_names[step_index])
        if issubclass(step, ExerciseStep):
            program = clean_program(step.solution)
        else:
            program = step.program

        untokenizer = Untokenizer()
        tokens = generate_tokens(StringIO(program).readline)
        untokenizer.untokenize(tokens)
        tokens = untokenizer.tokens

        masked_indices = []
        mask = [False] * len(tokens)
        for i, token in enumerate(tokens):
            if not token.isspace():
                masked_indices.append(i)
                mask[i] = True
        shuffle(masked_indices)

        return dict(
            tokens=tokens,
            maskedIndices=masked_indices,
            mask=mask,
        )
コード例 #3
0
ファイル: test_tokenize.py プロジェクト: bobfrank/cpython
 def test_bad_input_order(self):
     u = Untokenizer()
     u.prev_row = 2
     u.prev_col = 2
     with self.assertRaises(ValueError) as cm:
         u.add_whitespace((1,3))
     self.assertEqual(cm.exception.args[0],
             'start (1,3) precedes previous end (2,2)')
     self.assertRaises(ValueError, u.add_whitespace, (2,1))
コード例 #4
0
 def test_backslash_continuation(self):
     # The problem is that <whitespace>\<newline> leaves no token
     u = Untokenizer()
     u.prev_row = 1
     u.prev_col = 1
     u.tokens = []
     u.add_whitespace((2, 0))
     self.assertEqual(u.tokens, ['\\\n'])
     u.prev_row = 2
     u.add_whitespace((4, 4))
     self.assertEqual(u.tokens, ['\\\n', '\\\n\\\n', '    '])
コード例 #5
0
 def test_iter_compat(self):
     u = Untokenizer()
     token = (NAME, 'Hello')
     tokens = [(ENCODING, 'utf-8'), token]
     u.compat(token, iter([]))
     self.assertEqual(u.tokens, ["Hello "])
     u = Untokenizer()
     self.assertEqual(u.untokenize(iter([token])), 'Hello ')
     u = Untokenizer()
     self.assertEqual(u.untokenize(iter(tokens)), 'Hello ')
     self.assertEqual(u.encoding, 'utf-8')
     self.assertEqual(untokenize(iter(tokens)), b'Hello ')
コード例 #6
0
 def test_backslash_continuation(self):
     # The problem is that <whitespace>\<newline> leaves no token
     u = Untokenizer()
     u.prev_row = 1
     u.prev_col =  1
     u.tokens = []
     u.add_whitespace((2, 0))
     self.assertEqual(u.tokens, ['\\\n'])
     u.prev_row = 2
     u.add_whitespace((4, 4))
     self.assertEqual(u.tokens, ['\\\n', '\\\n\\\n', '    '])
コード例 #7
0
 def test_iter_compat(self):
     u = Untokenizer()
     token = (NAME, 'Hello')
     u.compat(token, iter([]))
     self.assertEqual(u.tokens, ["Hello "])
     u = Untokenizer()
     self.assertEqual(u.untokenize(iter([token])), 'Hello ')
コード例 #8
0
def get_solution(step):
    if issubclass(step, ExerciseStep):
        if step.solution.__name__ == "solution":
            program, _ = clean_program(step.solution, None)  # noqa
        else:
            program = clean_solution_function(step.solution, dedent(inspect.getsource(step.solution)))
    else:
        program = step.program

    untokenizer = Untokenizer()
    tokens = generate_tokens(StringIO(program).readline)
    untokenizer.untokenize(tokens)
    tokens = untokenizer.tokens

    masked_indices = []
    mask = [False] * len(tokens)
    for i, token in enumerate(tokens):
        if not token.isspace():
            masked_indices.append(i)
            mask[i] = True
    shuffle(masked_indices)

    if step.parsons_solution:
        lines = shuffled_well([
            dict(
                id=str(i),
                content=line,
            )
            for i, line in enumerate(
                pygments.highlight(program, lexer, html_formatter)
                    .splitlines()
            )
            if line.strip()
        ])
    else:
        lines = None

    return dict(
        tokens=tokens,
        maskedIndices=masked_indices,
        mask=mask,
        lines=lines,
    )
コード例 #9
0
 def test_iter_compat(self):
     u = Untokenizer()
     token = (NAME, 'Hello')
     tokens = [(ENCODING, 'utf-8'), token]
     u.compat(token, iter([]))
     self.assertEqual(u.tokens, ["Hello "])
     u = Untokenizer()
     self.assertEqual(u.untokenize(iter([token])), 'Hello ')
     u = Untokenizer()
     self.assertEqual(u.untokenize(iter(tokens)), 'Hello ')
     self.assertEqual(u.encoding, 'utf-8')
     self.assertEqual(untokenize(iter(tokens)), b'Hello ')
コード例 #10
0
ファイル: text.py プロジェクト: alexmojaki/futurecoder
def get_solution(step):
    program = step.show_solution_program
    untokenizer = Untokenizer()
    tokens = generate_tokens(StringIO(program).readline)
    untokenizer.untokenize(tokens)
    tokens = untokenizer.tokens

    masked_indices = []
    mask = [False] * len(tokens)
    for i, token in enumerate(tokens):
        if not token.isspace():
            masked_indices.append(i)
            mask[i] = True
    shuffle(masked_indices)

    if step.parsons_solution:
        lines = shuffled_well([
            dict(
                id=str(i),
                content=line,
            )
            for i, line in enumerate(
                pygments.highlight(program, lexer, html_formatter)
                    .splitlines()
            )
            if line.strip()
        ])
    else:
        lines = None

    return dict(
        tokens=tokens,
        maskedIndices=masked_indices,
        mask=mask,
        lines=lines,
    )
コード例 #11
0
 def test_bad_input_order(self):
     # raise if previous row
     u = Untokenizer()
     u.prev_row = 2
     u.prev_col = 2
     with self.assertRaises(ValueError) as cm:
         u.add_whitespace((1, 3))
     self.assertEqual(cm.exception.args[0],
                      'start (1,3) precedes previous end (2,2)')
     # raise if previous column in row
     self.assertRaises(ValueError, u.add_whitespace, (2, 1))