def test_tokenize_file_code(): content = [ 'abc\n', '\n', '```python\n', 'def random():\n', ' return 4\n', '```\n', '\n', '```python skip', 'print(random())\n', '```\n', ] assert list(tokenize_file(content)) == [ Token.LINE('abc\n'), Token.LINE('\n'), Token.START_CODE('```python\n', language='python', skip=False), Token.LINE('def random():\n'), Token.LINE(' return 4\n'), Token.END_CODE('```\n'), Token.LINE('\n'), Token.START_CODE('```python skip', language='python', skip=True), Token.LINE('print(random())\n'), Token.END_CODE('```\n'), ]
def test_tokenize_file_simple(): content = [ 'abc\n', 'def\n', '\n', ] assert list(tokenize_file(content)) == [ Token.LINE('abc\n'), Token.LINE('def\n'), Token.LINE('\n'), ]
def test_tokenize_file_split(): content = [ 'abc\n', '----------\n', 'def\n', ] assert list(tokenize_file(content)) == [ Token.LINE('abc\n'), Token.SPLIT('----------\n'), Token.LINE('def\n'), ]
def test_parse_cells_horizontal_split(): split_rules = {SlideType.SUBSLIDE: [Token.SPLIT()]} cells = list(parse_cells(TOKENS, split_rules)) assert cells == [ Cell('markdown', 'slide', [ '# 111\n', '\n', '* bullet point\n', '* bullet point 2\n', '\n', '```python\n', 'def random():\n', ' return 4\n', '```', ]), Cell('markdown', 'subslide', [ '```python\n', 'print(random())\n', '```', ]), Cell('markdown', 'subslide', [ 'Text.\n', '## 222\n', '\n', '```python skip\n', 'import itertools\n', '```\n', '\n', 'Hello world.', ]), ]
def test_tokenize_files(): contents = [ ['# First file\n', 'Hey\n'], ['# Second file\n', 'Ho\n'], ] assert list(tokenize_files(contents)) == [ Token.FILE(), Token.TITLE('# First file\n', level=1), Token.AFTER_TITLE(level=1), Token.LINE('Hey\n'), Token.AFTER_FILE(), Token.FILE(), Token.TITLE('# Second file\n', level=1), Token.AFTER_TITLE(level=1), Token.LINE('Ho\n'), Token.AFTER_FILE(), ]
def test_token(): token = Token.SPLIT() assert token.type is Token.SPLIT assert token.content is None assert token.params == {} assert repr(token) == "Token.SPLIT()" token = Token.LINE('foobar') assert token.type is Token.LINE assert token.content == 'foobar' assert token.params == {'content': 'foobar'} assert repr(token) == "Token.LINE('foobar')" token = Token.TITLE('# foobar', level=1) assert token.type is Token.TITLE assert token.content == '# foobar' assert token.level == 1 assert token.params == {'level': 1, 'content': '# foobar'} assert repr(token) == "Token.TITLE('# foobar', level=1)"
def test_parse_cells_code_split(): split_rules = { SlideType.CONTINUE: [Token.START_CODE(), Token.END_CODE()], SlideType.SKIP: [Token.START_CODE(skip=True)], } cells = list(parse_cells(TOKENS, split_rules)) assert cells == [ Cell('markdown', 'slide', [ '# 111\n', '\n', '* bullet point\n', '* bullet point 2', ]), Cell('code', '-', ['def random():\n', ' return 4']), Cell('markdown', '-', ['---']), Cell('code', '-', ['print(random())']), Cell('markdown', '-', ['---\n', '\n', 'Text.\n', '## 222']), Cell('code', 'skip', ['import itertools']), Cell('markdown', '-', ['Hello world.']), ]
def test_tokenize_file_title(): content = [ '# Document\n', '## First\n', '* ok\n', '* well\n', '## Then\n', 'content\n', ] assert list(tokenize_file(content)) == [ Token.TITLE('# Document\n', level=1), Token.AFTER_TITLE(level=1), Token.TITLE('## First\n', level=2), Token.AFTER_TITLE(level=2), Token.LINE('* ok\n'), Token.LINE('* well\n'), Token.TITLE('## Then\n', level=2), Token.AFTER_TITLE(level=2), Token.LINE('content\n'), ]
def test_split_rules(): rules = SplitRules({ SlideType.SLIDE: [Token.TITLE(level=1)], SlideType.FRAGMENT: [Token.TITLE(), Token.SPLIT()], SlideType.SUBSLIDE: [Token.TITLE(level=2)], }) match = rules.match(Token.TITLE('# foo', level=1)) assert match == (True, SlideType.SLIDE) match = rules.match(Token.TITLE('## foo', level=2)) assert match == (True, SlideType.SUBSLIDE) match = rules.match(Token.TITLE('### foo', level=3)) assert match == (True, SlideType.FRAGMENT) match = rules.match(Token.SPLIT()) assert match == (True, SlideType.FRAGMENT) match = rules.match(Token.FILE()) assert match == (False, None)
def test_parse_cells_combine_split(): split_rules = { SlideType.SLIDE: [Token.TITLE(level=1)], SlideType.SUBSLIDE: [Token.AFTER_TITLE(level=1), Token.TITLE(level=2)], SlideType.FRAGMENT: [Token.SPLIT()], SlideType.CONTINUE: [Token.START_CODE(), Token.END_CODE()], SlideType.SKIP: [Token.START_CODE(skip=True)], } cells = list(parse_cells(TOKENS, split_rules)) assert cells == [ Cell('markdown', 'slide', ['# 111']), Cell('markdown', 'subslide', ['* bullet point\n', '* bullet point 2']), Cell('code', '-', ['def random():\n', ' return 4']), Cell('code', 'fragment', ['print(random())']), Cell('markdown', 'fragment', ['Text.']), Cell('markdown', 'subslide', ['## 222']), Cell('code', 'skip', ['import itertools']), Cell('markdown', '-', ['Hello world.']), ]
from lucina import parse_cells from lucina import tokenize_files from lucina.cell import SlideType from lucina.tokenizer import Token from lucina.utils import open_files parser = argparse.ArgumentParser() parser.add_argument( 'files', metavar='file', nargs='+', help='Files to compute', ) parser.add_argument('-o', '--output', default=None) parser.add_argument('--no-autolaunch', dest='autolaunch', action='store_false') split_rules = { SlideType.SLIDE: [Token.TITLE(level=1)], SlideType.SUBSLIDE: [Token.TITLE(level=2)], SlideType.CONTINUE: [Token.START_CODE(), Token.END_CODE()], SlideType.FRAGMENT: [Token.SPLIT()], SlideType.SKIP: [Token.START_CODE(skip=True)], } def run(args): with open_files(args.files, 'r') as files: tokens = tokenize_files(files) cells = parse_cells(tokens, split_rules) doc = format_doc(cells, args.autolaunch) if args.output: f = open(args.output, 'w')
def test_parse_cells_slide_hierarchy(): tokens = [ Token.TITLE('# 111\n', level=1), Token.AFTER_TITLE(level=1), Token.SPLIT('---\n'), Token.LINE('foo\n'), Token.SPLIT('---\n'), Token.LINE('bar\n'), Token.TITLE('## secret\n', level=2), Token.AFTER_TITLE(level=2), Token.LINE('baz\n'), Token.TITLE('## secret\n', level=2), Token.AFTER_TITLE(level=2), Token.START_CODE(language='python'), Token.LINE('x = 0\n'), Token.END_CODE(), ] split_rules = { SlideType.SLIDE: [Token.TITLE(level=1)], SlideType.SUBSLIDE: [Token.AFTER_TITLE(level=1)], SlideType.FRAGMENT: [Token.SPLIT()], SlideType.CONTINUE: [ Token.TITLE(level=2), Token.START_CODE(), Token.END_CODE(), ], SlideType.SKIP: [Token.AFTER_TITLE(level=2)], } cells = list(parse_cells(tokens, split_rules)) assert cells == [ Cell('markdown', 'slide', ['# 111']), Cell('markdown', 'subslide', ['foo']), # Ignore Token.SPLIT Cell('markdown', 'fragment', ['bar']), Cell('markdown', '-', ['## secret']), Cell('markdown', 'skip', ['baz']), Cell('markdown', '-', ['## secret']), # Go back to normal side Cell('code', '-', ['x = 0']), # START_CODE resets slide type ] split_rules = { SlideType.SLIDE: [Token.TITLE(level=1)], SlideType.FRAGMENT: [Token.SPLIT()], SlideType.CONTINUE: [Token.START_CODE(), Token.END_CODE()], SlideType.SKIP: [Token.AFTER_TITLE(level=2)], } cells = list(parse_cells(tokens, split_rules)) assert cells == [ Cell('markdown', 'slide', ['# 111']), Cell('markdown', 'fragment', ['foo']), Cell('markdown', 'fragment', ['bar\n', '## secret']), Cell('markdown', 'skip', ['baz\n', '## secret']), Cell('code', '-', ['x = 0']), ]
def test_parse_cells_title_splits(): split_rules = { SlideType.SLIDE: [Token.TITLE(level=1)], SlideType.SUBSLIDE: [Token.TITLE(level=2)], } cells = list(parse_cells(TOKENS, split_rules)) assert cells == [ Cell('markdown', 'slide', [ '# 111\n', '\n', '* bullet point\n', '* bullet point 2\n', '\n', '```python\n', 'def random():\n', ' return 4\n', '```\n', '\n', '---\n', '\n', '```python\n', 'print(random())\n', '```\n', '\n', '---\n', '\n', 'Text.', ]), Cell('markdown', 'subslide', [ '## 222\n', '\n', '```python skip\n', 'import itertools\n', '```\n', '\n', 'Hello world.', ]), ] split_rules = { SlideType.SLIDE: [Token.TITLE(level=1)], SlideType.SUBSLIDE: [Token.AFTER_TITLE(level=1), Token.TITLE(level=2)], } cells = list(parse_cells(TOKENS, split_rules)) assert cells == [ Cell('markdown', 'slide', ['# 111']), Cell('markdown', 'subslide', [ '* bullet point\n', '* bullet point 2\n', '\n', '```python\n', 'def random():\n', ' return 4\n', '```\n', '\n', '---\n', '\n', '```python\n', 'print(random())\n', '```\n', '\n', '---\n', '\n', 'Text.', ]), Cell('markdown', 'subslide', [ '## 222\n', '\n', '```python skip\n', 'import itertools\n', '```\n', '\n', 'Hello world.', ]), ]
from lucina.cell import Cell from lucina.cell import SlideType from lucina.parser import SplitRules from lucina.parser import clean_source from lucina.parser import parse_cells from lucina.tokenizer import Token TOKENS = [ Token.FILE(), Token.TITLE('# 111\n', level=1), Token.AFTER_TITLE(level=1), Token.LINE('\n'), Token.LINE('* bullet point\n'), Token.LINE('* bullet point 2\n'), Token.LINE('\n'), Token.START_CODE('```python\n', language='python', skip=False), Token.LINE('def random():\n'), Token.LINE(' return 4\n'), Token.END_CODE('```\n'), Token.LINE('\n'), Token.SPLIT('---\n'), Token.LINE('\n'), Token.START_CODE('```python\n', language='python', skip=False), Token.LINE('print(random())\n'), Token.END_CODE('```\n'), Token.LINE('\n'), Token.SPLIT('---\n'), Token.LINE('\n'), Token.LINE('Text.\n'), Token.AFTER_FILE(), Token.FILE(),