def basic_schema(): yield core.Schema( [ {'type': 'string', 'label': 'name'}, {'type': 'string', 'label': 'description'}, ] )
def test_parse(schema, strict, data, expected_output, expected_errors): """ Tests Schema.parse() """ s = core.Schema(schema) parsed = s.parse(data, strict=strict) assert parsed.parsed == expected_output assert str(parsed.errors) == expected_errors
def test_schema_magic_methods(): """ Tests primary magic methods on Schema that make it behave like a dict """ s = core.Schema([{'label': 'hello'}, {'label': 'world'}]) assert 'hello' in s assert s[0]['label'] == 'hello' assert s['hello']['label'] == 'hello' assert len(s) == 2
def test_schema_init(schema, clean_schema): """ Verifies initializing a schema """ s = core.Schema(schema) assert s._schema == clean_schema for schema_entry in s: if schema_entry['condition']: assert isinstance(schema_entry['condition'], kmatch.K)
def test_prompt(schema, defaults, prompt_return, expected_prompts, mocker): """ Tests Schema.prompt(). NOTE (@wesleykendall) - It is very difficult to patch out user input and also run prompt toolkit's prompting within pytest. We test that python prompt toolkit is being called correctly and assume prompt toolkit is working as intended. """ mocked_prompt = mocker.patch( 'prompt_toolkit.prompt', autospec=True, side_effect=prompt_return ) s = core.Schema(schema) s.prompt(defaults=defaults) assert mocked_prompt.call_args_list == expected_prompts
def test_get_prompt_text(schema, expected_prompt_text): """Tests Schema._get_prompt_text""" s = core.Schema(schema) assert s._get_prompt_text('a') == expected_prompt_text
def test_parse_entry(schema, input, expected_output): """ Tests Schema.parse_string() """ s = core.Schema(schema) assert s._parse_entry('hello', input) == expected_output
def test_parse_datetime(input, expected_output): """ Tests Schema.parse_string() """ s = core.Schema([{'type': 'datetime', 'label': 'hello'}]) assert s.parse_datetime('hello', input) == expected_output