Exemple #1
0
    def get_solution(self, page_index, step_index: int):
        page = pages[page_slugs_list[page_index]]
        step = getattr(page, page.step_names[step_index])
        if issubclass(step, ExerciseStep):
            program = clean_program(step.solution)
        else:
            program = step.program

        untokenizer = Untokenizer()
        tokens = generate_tokens(StringIO(program).readline)
        untokenizer.untokenize(tokens)
        tokens = untokenizer.tokens

        masked_indices = []
        mask = [False] * len(tokens)
        for i, token in enumerate(tokens):
            if not token.isspace():
                masked_indices.append(i)
                mask[i] = True
        shuffle(masked_indices)

        return dict(
            tokens=tokens,
            maskedIndices=masked_indices,
            mask=mask,
        )
 def test_iter_compat(self):
     u = Untokenizer()
     token = (NAME, 'Hello')
     tokens = [(ENCODING, 'utf-8'), token]
     u.compat(token, iter([]))
     self.assertEqual(u.tokens, ["Hello "])
     u = Untokenizer()
     self.assertEqual(u.untokenize(iter([token])), 'Hello ')
     u = Untokenizer()
     self.assertEqual(u.untokenize(iter(tokens)), 'Hello ')
     self.assertEqual(u.encoding, 'utf-8')
     self.assertEqual(untokenize(iter(tokens)), b'Hello ')
Exemple #3
0
 def test_iter_compat(self):
     u = Untokenizer()
     token = (NAME, 'Hello')
     tokens = [(ENCODING, 'utf-8'), token]
     u.compat(token, iter([]))
     self.assertEqual(u.tokens, ["Hello "])
     u = Untokenizer()
     self.assertEqual(u.untokenize(iter([token])), 'Hello ')
     u = Untokenizer()
     self.assertEqual(u.untokenize(iter(tokens)), 'Hello ')
     self.assertEqual(u.encoding, 'utf-8')
     self.assertEqual(untokenize(iter(tokens)), b'Hello ')
Exemple #4
0
 def test_iter_compat(self):
     u = Untokenizer()
     token = (NAME, 'Hello')
     u.compat(token, iter([]))
     self.assertEqual(u.tokens, ["Hello "])
     u = Untokenizer()
     self.assertEqual(u.untokenize(iter([token])), 'Hello ')
 def test_iter_compat(self):
     u = Untokenizer()
     token = (NAME, 'Hello')
     u.compat(token, iter([]))
     self.assertEqual(u.tokens, ["Hello "])
     u = Untokenizer()
     self.assertEqual(u.untokenize(iter([token])), 'Hello ')
Exemple #6
0
def get_solution(step):
    if issubclass(step, ExerciseStep):
        if step.solution.__name__ == "solution":
            program, _ = clean_program(step.solution, None)  # noqa
        else:
            program = clean_solution_function(step.solution, dedent(inspect.getsource(step.solution)))
    else:
        program = step.program

    untokenizer = Untokenizer()
    tokens = generate_tokens(StringIO(program).readline)
    untokenizer.untokenize(tokens)
    tokens = untokenizer.tokens

    masked_indices = []
    mask = [False] * len(tokens)
    for i, token in enumerate(tokens):
        if not token.isspace():
            masked_indices.append(i)
            mask[i] = True
    shuffle(masked_indices)

    if step.parsons_solution:
        lines = shuffled_well([
            dict(
                id=str(i),
                content=line,
            )
            for i, line in enumerate(
                pygments.highlight(program, lexer, html_formatter)
                    .splitlines()
            )
            if line.strip()
        ])
    else:
        lines = None

    return dict(
        tokens=tokens,
        maskedIndices=masked_indices,
        mask=mask,
        lines=lines,
    )
Exemple #7
0
def get_solution(step):
    program = step.show_solution_program
    untokenizer = Untokenizer()
    tokens = generate_tokens(StringIO(program).readline)
    untokenizer.untokenize(tokens)
    tokens = untokenizer.tokens

    masked_indices = []
    mask = [False] * len(tokens)
    for i, token in enumerate(tokens):
        if not token.isspace():
            masked_indices.append(i)
            mask[i] = True
    shuffle(masked_indices)

    if step.parsons_solution:
        lines = shuffled_well([
            dict(
                id=str(i),
                content=line,
            )
            for i, line in enumerate(
                pygments.highlight(program, lexer, html_formatter)
                    .splitlines()
            )
            if line.strip()
        ])
    else:
        lines = None

    return dict(
        tokens=tokens,
        maskedIndices=masked_indices,
        mask=mask,
        lines=lines,
    )