def test_token_nlike(self): self.assertEqual(Token('!~').token_type, Type.NLIKE)
def test_token_lparen(self): self.assertEqual(Token('(').token_type, Type.LPAREN)
# pylint: disable=missing-docstring import pytest from task_forge.ql.lexer import Lexer from task_forge.ql.tokens import Token, Type @pytest.mark.parametrize("query,expected", [( "milk and cookies", [ Token('milk'), Token('and'), Token('cookies'), ], ), ( "completed = false", [ Token('completed'), Token('='), Token('false'), ], ), ( "(priority > 0)", [ Token('('), Token('priority'), Token('>'), Token('0'), Token(')'), ],
def test_token_upper_or(self): self.assertEqual(Token('OR').token_type, Type.OR)
def test_token_or(self): self.assertEqual(Token('or').token_type, Type.OR)
def test_token_date_24hr(self): self.assertEqual(Token('2018-01-01 10:00').token_type, Type.DATE)
def test_token_lte(self): self.assertEqual(Token('<=').token_type, Type.LTE)
def test_token_float(self): self.assertEqual(Token('1.00').token_type, Type.NUMBER)
def test_token_upper_true(self): self.assertEqual(Token('True').token_type, Type.BOOLEAN)
def test_token_gte(self): self.assertEqual(Token('>=').token_type, Type.GTE)
def test_token_upper_false(self): self.assertEqual(Token('False').token_type, Type.BOOLEAN)
def test_token_upper_and(self): self.assertEqual(Token('AND').token_type, Type.AND)
def test_token_and(self): self.assertEqual(Token('and').token_type, Type.AND)
def test_token_rparen(self): self.assertEqual(Token(')').token_type, Type.RPAREN)
def test_token_eq(self): self.assertEqual(Token('=').token_type, Type.EQ)
def test_token_num(self): self.assertEqual(Token('100').token_type, Type.NUMBER)
def test_token_ne_shell(self): self.assertEqual(Token('^=').token_type, Type.NE)
def test_token_string(self): self.assertEqual(Token('hello world').token_type, Type.STRING)
def test_token_ne(self): self.assertEqual(Token('!=').token_type, Type.NE)
def test_token_date_upper_am(self): self.assertEqual(Token('2018-01-01 10:00 AM').token_type, Type.DATE)
def test_token_like(self): self.assertEqual(Token('~').token_type, Type.LIKE)
def test_token_date(self): self.assertEqual(Token('2018-01-01').token_type, Type.DATE)
def test_token_nlike_shell(self): self.assertEqual(Token('^^').token_type, Type.NLIKE)
# pylint: disable=missing-docstring import pytest from task_forge.ql.ast import AST, Expression from task_forge.ql.parser import Parser from task_forge.ql.tokens import Token @pytest.mark.parametrize("query,ast", [ ( 'milk and cookies', AST( Expression(Token('and'), left=Expression(Token('milk')), right=Expression(Token('cookies'))), ), ), ( 'completed = false', AST( Expression(Token('='), left=Expression(Token('completed')), right=Expression(Token('false'))), ), ), ( 'milk -and cookies', AST(Expression(Token('milk and cookies'))), ), ( '(priority > 5 and title ^ \'take out the trash\') or ' '(context = "work" and (priority >= 2 or ("my little pony")))', AST(
def test_query_benchmark(self, task_list, benchmark): # Hand-crafted artisinal Abstract Syntax Tree ast = AST( Expression( Token('or'), right=Expression( Token('and'), left=Expression(Token('='), left=Expression(Token('context')), right=Expression(Token('work'))), right=Expression( Token('or'), left=Expression(Token('>='), left=Expression(Token('priority')), right=Expression(Token('2'))), right=Expression(Token('my little pony'))), ), left=Expression( Token('and'), right=Expression(Token('~'), right=Expression( Token('take out the trash')), left=Expression(Token('title'))), left=Expression(Token('>'), left=Expression(Token('priority')), right=Expression(Token('5'))), ), ), ) tasks = [ Task("my little pony"), Task("this task won't match anything"), Task("a priority 2 task", priority=2.0), Task("take out the trash", priority=5.0), Task("work task 1", context="work"), Task("work task 2", context="work"), Task("task 1"), Task("task 2"), Task("task 3"), Task("task 4"), ] task_list.add_multiple(tasks) benchmark(task_list.search, ast=ast)