Example #1
0
 def test_token_nlike(self):
     self.assertEqual(Token('!~').token_type, Type.NLIKE)
Example #2
0
 def test_token_lparen(self):
     self.assertEqual(Token('(').token_type, Type.LPAREN)
Example #3
0
# pylint: disable=missing-docstring

import pytest

from task_forge.ql.lexer import Lexer
from task_forge.ql.tokens import Token, Type


@pytest.mark.parametrize("query,expected", [(
    "milk and cookies",
    [
        Token('milk'),
        Token('and'),
        Token('cookies'),
    ],
), (
    "completed = false",
    [
        Token('completed'),
        Token('='),
        Token('false'),
    ],
), (
    "(priority > 0)",
    [
        Token('('),
        Token('priority'),
        Token('>'),
        Token('0'),
        Token(')'),
    ],
Example #4
0
 def test_token_upper_or(self):
     self.assertEqual(Token('OR').token_type, Type.OR)
Example #5
0
 def test_token_or(self):
     self.assertEqual(Token('or').token_type, Type.OR)
Example #6
0
 def test_token_date_24hr(self):
     self.assertEqual(Token('2018-01-01 10:00').token_type, Type.DATE)
Example #7
0
 def test_token_lte(self):
     self.assertEqual(Token('<=').token_type, Type.LTE)
Example #8
0
 def test_token_float(self):
     self.assertEqual(Token('1.00').token_type, Type.NUMBER)
Example #9
0
 def test_token_upper_true(self):
     self.assertEqual(Token('True').token_type, Type.BOOLEAN)
Example #10
0
 def test_token_gte(self):
     self.assertEqual(Token('>=').token_type, Type.GTE)
Example #11
0
 def test_token_upper_false(self):
     self.assertEqual(Token('False').token_type, Type.BOOLEAN)
Example #12
0
 def test_token_upper_and(self):
     self.assertEqual(Token('AND').token_type, Type.AND)
Example #13
0
 def test_token_and(self):
     self.assertEqual(Token('and').token_type, Type.AND)
Example #14
0
 def test_token_rparen(self):
     self.assertEqual(Token(')').token_type, Type.RPAREN)
Example #15
0
 def test_token_eq(self):
     self.assertEqual(Token('=').token_type, Type.EQ)
Example #16
0
 def test_token_num(self):
     self.assertEqual(Token('100').token_type, Type.NUMBER)
Example #17
0
 def test_token_ne_shell(self):
     self.assertEqual(Token('^=').token_type, Type.NE)
Example #18
0
 def test_token_string(self):
     self.assertEqual(Token('hello world').token_type, Type.STRING)
Example #19
0
 def test_token_ne(self):
     self.assertEqual(Token('!=').token_type, Type.NE)
Example #20
0
 def test_token_date_upper_am(self):
     self.assertEqual(Token('2018-01-01 10:00 AM').token_type, Type.DATE)
Example #21
0
 def test_token_like(self):
     self.assertEqual(Token('~').token_type, Type.LIKE)
Example #22
0
 def test_token_date(self):
     self.assertEqual(Token('2018-01-01').token_type, Type.DATE)
Example #23
0
 def test_token_nlike_shell(self):
     self.assertEqual(Token('^^').token_type, Type.NLIKE)
Example #24
0
# pylint: disable=missing-docstring

import pytest

from task_forge.ql.ast import AST, Expression
from task_forge.ql.parser import Parser
from task_forge.ql.tokens import Token


@pytest.mark.parametrize("query,ast", [
    (
        'milk and cookies',
        AST(
            Expression(Token('and'),
                       left=Expression(Token('milk')),
                       right=Expression(Token('cookies'))), ),
    ),
    (
        'completed = false',
        AST(
            Expression(Token('='),
                       left=Expression(Token('completed')),
                       right=Expression(Token('false'))), ),
    ), (
        'milk -and cookies',
        AST(Expression(Token('milk and cookies'))),
    ),
    (
        '(priority > 5 and title ^ \'take out the trash\') or '
        '(context = "work" and (priority >= 2 or ("my little pony")))',
        AST(
Example #25
0
    def test_query_benchmark(self, task_list, benchmark):
        # Hand-crafted artisinal Abstract Syntax Tree
        ast = AST(
            Expression(
                Token('or'),
                right=Expression(
                    Token('and'),
                    left=Expression(Token('='),
                                    left=Expression(Token('context')),
                                    right=Expression(Token('work'))),
                    right=Expression(
                        Token('or'),
                        left=Expression(Token('>='),
                                        left=Expression(Token('priority')),
                                        right=Expression(Token('2'))),
                        right=Expression(Token('my little pony'))),
                ),
                left=Expression(
                    Token('and'),
                    right=Expression(Token('~'),
                                     right=Expression(
                                         Token('take out the trash')),
                                     left=Expression(Token('title'))),
                    left=Expression(Token('>'),
                                    left=Expression(Token('priority')),
                                    right=Expression(Token('5'))),
                ),
            ), )

        tasks = [
            Task("my little pony"),
            Task("this task won't match anything"),
            Task("a priority 2 task", priority=2.0),
            Task("take out the trash", priority=5.0),
            Task("work task 1", context="work"),
            Task("work task 2", context="work"),
            Task("task 1"),
            Task("task 2"),
            Task("task 3"),
            Task("task 4"),
        ]

        task_list.add_multiple(tasks)
        benchmark(task_list.search, ast=ast)