def test(self): cases = [ Case('empty line with no following lines', source= remaining_source(''), expectation= Expectation(elements=[], source=asrt_source.is_at_end_of_line(1)), ), Case('only white space on current line, with no following lines', source= remaining_source(' '), expectation= Expectation(elements=[], source=asrt_source.source_is_not_at_end( current_line_number=asrt.equals(1), remaining_part_of_current_line=asrt.equals(' '))), ), Case('empty line, with following lines', source= remaining_source('', ['contents of following line']), expectation= Expectation(elements=[], source=asrt_source.is_at_end_of_line(1), ) ), ] # ACT & ASSERT # _test_cases(self, cases)
def test_parse(self): pattern = 'include*' space = ' ' cases = [ SourceCase('single name argument', remaining_source(name_glob_pattern_matcher_of(pattern)), assert_source(is_at_eof=asrt.is_true), ), SourceCase('single name argument followed by space, and following lines', remaining_source(name_glob_pattern_matcher_of(pattern) + space, ['following line']), assert_source(current_line_number=asrt.equals(1), remaining_part_of_current_line=asrt.equals(space[1:])), ), SourceCase('single name argument followed by arguments', remaining_source(name_glob_pattern_matcher_of(pattern) + space + 'following argument', ['following line']), assert_source(current_line_number=asrt.equals(1), remaining_part_of_current_line=asrt.equals(space[1:] + 'following argument')), ), ] for case in cases: with self.subTest(case=case.name): self._check_parse( case.source, Expectation( resolved_value_equals_file_matcher(file_matchers.FileMatcherNameGlobPattern(pattern)), source=case.source_assertion, ) )
def test_fail(self): for grammar_description, grammar in GRAMMARS: for prefix_operator, mk_prefix_expr in self.PREFIX_OPERATORS: cases = [ NameAndValue( 'no source after operator', remaining_source(prefix_operator), ), NameAndValue( 'operator followed by non-expression', remaining_source('{op} {non_expr}'.format( op=prefix_operator, non_expr=str( surrounded_by_soft_quotes( ast.PRIMITIVE_SANS_ARG)))), ), ] for case in cases: with self.subTest(grammar=grammar_description, prefix_operator=prefix_operator, case_name=case.name): parser = self.parser_maker.make( grammar, must_be_on_current_line=True) with self.assertRaises( SingleInstructionInvalidArgumentException): parser.parse(case.value)
def test_fail(self): symbol_name = 'the_symbol_name' for grammar_description, grammar in GRAMMARS: cases = [ NameAndValue( 'symbol name is quoted', remaining_source( str(surrounded_by_hard_quotes(symbol_name))), ), NameAndValue( 'symbol reference syntax with invalid symbol name character: space', remaining_source( symbol_reference_syntax_for_name('the symbol')), ), NameAndValue( 'symbol reference syntax with invalid symbol name character: &', remaining_source( symbol_reference_syntax_for_name('the&symbol')), ), ] for case in cases: with self.subTest(grammar=grammar_description, case_name=case.name): parser = self.parser_maker.make( grammar, must_be_on_current_line=True) with self.assertRaises( SingleInstructionInvalidArgumentException): parser.parse(case.value)
def test(self): for must_be_on_current_line in [False, True]: for grammar in GRAMMARS: parser = self.parser_maker.make(grammar.value, must_be_on_current_line) cases = [ NameAndValue( 'source is just space', remaining_source(' '), ), NameAndValue( 'first token quoted/soft', remaining_source( str(surrounded_by_soft_quotes('token'))), ), NameAndValue( 'first token quoted/hard', remaining_source( str(surrounded_by_hard_quotes('token'))), ), NameAndValue( 'missing )', remaining_source('( {primitive} '.format( primitive=ast.PRIMITIVE_SANS_ARG)), ), ] for case in cases: with self.subTest( grammar=grammar.name, must_be_on_current_line=must_be_on_current_line, case_name=case.name): with self.assertRaises( SingleInstructionInvalidArgumentException): parser.parse(case.value)
def test_fail(self): for grammar_description, grammar in self.grammars: for prefix_operator, mk_prefix_expr in self.prefix_operators: cases = [ ( 'no source after operator', remaining_source(prefix_operator), ), ( 'no source after operator, but expr on following line', remaining_source(prefix_operator, [ast.SIMPLE_SANS_ARG]), ), ( 'operator followed by non-expression', remaining_source('{op} {non_expr}'.format( op=prefix_operator, non_expr=str(surrounded_by_soft_quotes(ast.SIMPLE_SANS_ARG)))), ), ] for case_name, source in cases: with self.subTest(grammar=grammar_description, prefix_operator=prefix_operator, case_name=case_name): with self.assertRaises(SingleInstructionInvalidArgumentException): sut.parse_from_parse_source(grammar, source)
def test_fail(self): for grammar_description, grammar in self.grammars: cases = [ ( 'token is not the name of a simple expression', remaining_source(ast.NOT_A_SIMPLE_EXPR_NAME_AND_NOT_A_VALID_SYMBOL_NAME), ), ( 'token is the name of a simple expression, but it is quoted/soft', remaining_source(str(surrounded_by_soft_quotes(ast.SIMPLE_SANS_ARG))), ), ( 'token is the name of a simple expression, but it is quoted/hard', remaining_source(str(surrounded_by_hard_quotes(ast.SIMPLE_SANS_ARG))), ), ( 'token is the name of a simple expression, but it is on the next line', remaining_source('', [ast.SIMPLE_SANS_ARG]), ), ] for case_name, source in cases: with self.subTest(grammar=grammar_description, case_name=case_name): with self.assertRaises(SingleInstructionInvalidArgumentException): sut.parse_from_parse_source(grammar, source)
def test_fail_parse_of_complex_expression(self): valid_simple_expressions = [ '{simple_expression}'.format(simple_expression=ast.SIMPLE_SANS_ARG), '{simple_expression_name} {argument}'.format( simple_expression_name=ast.SIMPLE_WITH_ARG, argument='simple-expr-argument'), ] operators = [ast.COMPLEX_A, ast.COMPLEX_B_THAT_IS_NOT_A_VALID_SYMBOL_NAME] for valid_simple_expr in valid_simple_expressions: for operator in operators: cases = [ ( 'operator not followed by expression', remaining_source('{simple_expr} {operator}'.format( simple_expr=valid_simple_expr, operator=operator, )), ), ( 'operator followed by non-expression', remaining_source('{simple_expr} {operator} {non_expr}'.format( simple_expr=valid_simple_expr, operator=operator, non_expr=ast.NOT_A_SIMPLE_EXPR_NAME_AND_NOT_A_VALID_SYMBOL_NAME, )), ), ( 'operator followed by non-expression/two operators', remaining_source('{simple_expr} {operator} {simple_expr} {operator} {non_expr}'.format( simple_expr=valid_simple_expr, operator=operator, non_expr=ast.NOT_A_SIMPLE_EXPR_NAME_AND_NOT_A_VALID_SYMBOL_NAME, )), ), ( '( at start of expr: missing )', remaining_source('( {simple_expr} {operator} {simple_expr} '.format( simple_expr=valid_simple_expr, operator=operator), []), ), ( '( in middle of expr: missing )', remaining_source('( {simple_expr} {operator} ( {simple_expr} '.format( simple_expr=valid_simple_expr, operator=operator), []), ), ] for case_name, source in cases: with self.subTest(case_name=case_name, operator=operator, valid_simple_expr=valid_simple_expr): with self.assertRaises(SingleInstructionInvalidArgumentException): sut.parse_from_parse_source(ast.GRAMMAR_WITH_ALL_COMPONENTS, source)
def test_success_of_expression_within_parentheses(self): s = ast.SimpleSansArg() cases = [ ( 'parentheses around first expr to make nested expr instead of "linear" args to op', Arrangement( grammar=ast.GRAMMAR_WITH_ALL_COMPONENTS, source=remaining_source('( {s} {op} {s} ) {op} {s}'.format( s=ast.SIMPLE_SANS_ARG, op=ast.COMPLEX_A, )), ), Expectation( expression=ComplexA([ComplexA([s, s]), s]), source=asrt_source.is_at_end_of_line(1), ), ), ( 'parentheses around final (second) expr to make first op have precedence', Arrangement( grammar=ast.GRAMMAR_WITH_ALL_COMPONENTS, source=remaining_source('{s} {op} ( {s} {op} {s} )'.format( s=ast.SIMPLE_SANS_ARG, op=ast.COMPLEX_A, )), ), Expectation( expression=ComplexA([s, ComplexA([s, s])]), source=asrt_source.is_at_end_of_line(1), ), ), ( '"linear" (sequence) of OPA, by embedding OPB inside parentheses', Arrangement( grammar=ast.GRAMMAR_WITH_ALL_COMPONENTS, source=remaining_source('{s} {op_a} ( {s} {op_b} {s} ) {op_a} {s}'.format( s=ast.SIMPLE_SANS_ARG, op_a=ast.COMPLEX_A, op_b=ast.COMPLEX_B_THAT_IS_NOT_A_VALID_SYMBOL_NAME, )), ), Expectation( expression=ComplexA([s, ComplexB([s, s]), s]), source=asrt_source.is_at_end_of_line(1), ), ), ] for case_name, arrangement, expectation in cases: with self.subTest(name=case_name): _check(self, arrangement, expectation )
def test_successful_parse(self): symbol_name = 'the_symbol_name' space_after = ' ' token_after = str(surrounded_by_hard_quotes('not an expression')) for grammar_description, grammar in self.grammars: cases = [ SourceCase( 'first line is only simple expr', source= remaining_source('{symbol_name}'.format( symbol_name=symbol_name, )), source_assertion= asrt_source.is_at_end_of_line(1) ), SourceCase( 'first line is simple expr with space around', source= remaining_source(' {symbol_name}{space_after}'.format( symbol_name=symbol_name, space_after=space_after)), source_assertion= asrt_source.source_is_not_at_end(current_line_number=asrt.equals(1), remaining_part_of_current_line=asrt.equals(space_after[1:])) ), SourceCase( 'expression is followed by non-expression', source= remaining_source('{symbol_name} {token_after}'.format( symbol_name=symbol_name, token_after=token_after)), source_assertion= asrt_source.source_is_not_at_end(current_line_number=asrt.equals(1), remaining_part_of_current_line=asrt.equals(token_after)) ), ] for case in cases: with self.subTest(grammar=grammar_description, name=case.name): self._check( Arrangement( grammar=grammar, source=case.source), Expectation( expression=ast.RefExpr(symbol_name), source=case.source_assertion, ) )
def test_failing_validation(self): # ARRANGE # symbol_not_an_int = StringSymbolContext.of_constant('SYMBOL_NOT_AN_INT', 'notAnInt') cases = [ ValidationCase(comparators.EQ.name + ' not a number', remaining_source(comparators.EQ.name + ' notANumber'), source_assertion= assert_source(is_at_eol=asrt.is_true), ), ValidationCase(comparators.EQ.name + ' not an int', remaining_source(comparators.EQ.name + ' 0.5'), source_assertion= assert_source(is_at_eol=asrt.is_true), ), ValidationCase(comparators.EQ.name + ' invalid expression syntax', remaining_source(comparators.EQ.name + ' (1'), source_assertion= assert_source(is_at_eol=asrt.is_true), ), ValidationCase(comparators.EQ.name + ' with symbol references', remaining_source( '== {}'.format(symbol_reference_syntax_for_name(symbol_not_an_int.name)) ), source_assertion= assert_source(is_at_eol=asrt.is_true), references=asrt.matches_singleton_sequence( symbol_not_an_int.reference_assertion__string__w_all_indirect_refs_are_strings), symbols=symbol_not_an_int.symbol_table ), ] for case in cases: with self.subTest(case.name): integration_check.CHECKER__PARSE_SIMPLE.check( self, case.source, input_=integration_check.ARBITRARY_MODEL, arrangement=arrangement_wo_tcds( symbols=case.symbols, ), expectation=Expectation( ParseExpectation( source=case.source_assertion, symbol_references=case.references, ), ExecutionExpectation( validation=validation.ValidationAssertions.pre_sds_fails__w_any_msg(), ), ) )
def test_failing_parse(self): cases = [ NameAndValue( 'missing transformer', parse_source.remaining_source(''), ), NameAndValue( 'neither a symbol, nor a transformer', parse_source.remaining_source(NOT_A_PRIMITIVE_EXPR_NAME_AND_NOT_A_VALID_SYMBOL_NAME), ), ] for case in cases: with self.subTest(case_name=case.name): with self.assertRaises(SingleInstructionInvalidArgumentException): sut.parsers(True).full.parse(case.value)
def test_combined_expression_with_single_primitive_expr(self): # [ [ [ s A s ] B s B s ] A s ] s = ast.PrimitiveSansArg() op_sequence_1 = ast.InfixOpA([s, s]) op_sequence_2 = ast.InfixOpB([op_sequence_1, s, s]) expected = ast.InfixOpA([op_sequence_2, s]) arguments = '{s} {op_a} {s} {op_b} {s} {op_b} {s} {op_a} {s}'.format( op_a=ast.INFIX_OP_A, op_b=ast.INFIX_OP_B_THAT_IS_NOT_A_VALID_SYMBOL_NAME, s=ast.PRIMITIVE_SANS_ARG, ) check( self, PARSER_MAKER_OF_FULL_EXPR_PARSER, Arrangement( grammar= ast.GRAMMAR_WITH_ALL_COMPONENTS, source= remaining_source(arguments)), Expectation( expression= expected, source= asrt_source.is_at_end_of_line(1), ) )
def test_valid_token(self): test_cases = [ ('token', assert_plain('token'), assert_source(is_at_eol=asrt.is_true, has_current_line=asrt.is_true, current_line_number=asrt.equals(1))), (' token_preceded_by_space', assert_plain('token_preceded_by_space'), assert_source(is_at_eol=asrt.is_true, has_current_line=asrt.is_true, current_line_number=asrt.equals(1))), ('token_followed_by_space ', assert_plain('token_followed_by_space'), source_is_not_at_end( remaining_part_of_current_line=asrt.equals(' '), current_line_number=asrt.equals(1))), ('token_followed_by_other_token other_token', assert_plain('token_followed_by_other_token'), source_is_not_at_end( remaining_part_of_current_line=asrt.equals(' other_token'), current_line_number=asrt.equals(1))), ('<<->> other_token', assert_plain('<<->>'), source_is_not_at_end( remaining_part_of_current_line=asrt.equals(' other_token'), current_line_number=asrt.equals(1))), ('\'single quoted\'', assert_quoted('single quoted', '\'single quoted\''), assert_source(is_at_eol=asrt.is_true, has_current_line=asrt.is_true, current_line_number=asrt.equals(1))), ('\"double quoted\"', assert_quoted('double quoted', '\"double quoted\"'), assert_source(is_at_eol=asrt.is_true, has_current_line=asrt.is_true, current_line_number=asrt.equals(1))), (' \'quoted preceded by space\'', assert_quoted('quoted preceded by space', '\'quoted preceded by space\''), assert_source(is_at_eol=asrt.is_true, has_current_line=asrt.is_true, current_line_number=asrt.equals(1))), (' \'quoted followed by space\' ', assert_quoted('quoted followed by space', '\'quoted followed by space\''), source_is_not_at_end( remaining_part_of_current_line=asrt.equals(' '), current_line_number=asrt.equals(1))), (' \'quoted token followed by other token\' \'other_token\'', assert_quoted('quoted token followed by other token', '\'quoted token followed by other token\''), source_is_not_at_end(remaining_part_of_current_line=asrt.equals( ' \'other_token\''), current_line_number=asrt.equals(1))), ] for first_line, token_assertion, source_assertion in test_cases: with self.subTest(msg=repr(first_line)): source = remaining_source(first_line) actual = sut.parse_token_on_current_line(source) token_assertion.apply_with_message(self, actual, 'token') source_assertion.apply_with_message(self, source, 'source')
def test__primitive_recursive_followed_by_binary_op_on_same_line(self): s = ast.PrimitiveSansArg() expected = ast.InfixOpA([ast.PrimitiveRecursive(s), s]) arguments = '{r} {s} {op_a} {s}'.format( r=ast.PRIMITIVE_RECURSIVE, s=ast.PRIMITIVE_SANS_ARG, op_a=ast.INFIX_OP_A, ) check( self, PARSER_MAKER_OF_FULL_EXPR_PARSER, Arrangement( grammar= ast.GRAMMAR_WITH_ALL_COMPONENTS, source= remaining_source(arguments)), Expectation( expression= expected, source= asrt_source.is_at_end_of_line(1), ) )
def runTest(self): actual_line_num = 3 int_string_symbol = NameAndValue( 'int_string_symbol_name', string_resolvers.str_constant(str(actual_line_num)) ) arguments = arg.LineNum(int_condition__expr(comparators.EQ, symbol_reference_syntax_for_name(int_string_symbol.name))) model_that_matches = (actual_line_num, 'the line') integration_check.check( self, source= remaining_source(str(arguments)), model=model_that_matches, arrangement= integration_check.Arrangement( symbols=symbol_utils.symbol_table_from_name_and_resolvers([int_string_symbol]) ), expectation= integration_check.Expectation( symbol_references=asrt.matches_sequence([ is_reference_to_symbol_in_expression(int_string_symbol.name) ]), ) )
def runTest(self): # ARRANGE # matcher = LineMatcherSymbolContext.of_primitive_constant( 'line_matcher_symbol', False, ) line_matcher_arg = lm_args.SymbolReference(matcher.name) arguments = st_args.syntax_for_filter_transformer(str(line_matcher_arg)) # ACT & ASSERT # for may_depend_on_external_resources in [False, True]: with self.subTest(may_depend_on_external_resources=may_depend_on_external_resources): integration_check.CHECKER__PARSE_SIMPLE.check( self, remaining_source(arguments), model_constructor.empty(self, may_depend_on_external_resources=may_depend_on_external_resources), arrangement_w_tcds( symbols=matcher.symbol_table ), expectation_of_successful_filter_execution( symbol_references=asrt.matches_singleton_sequence( matcher.reference_assertion ), output_lines=[], ) )
def test_rel_result_should_not_be_available_pre_act_phase(self): arguments = path_syntax.REL_RESULT_OPTION with self.assertRaises(SingleInstructionInvalidArgumentException): parser = sut.EmbryoParser(is_after_act_phase=False) # ACT # parser.parse(ARBITRARY_FS_LOCATION_INFO, remaining_source(arguments))
def test_parse_SHOULD_fail_WHEN_initial_token_is_neither_valid_sym_ref_nor_primitive( self): source = remaining_source( symbol_syntax. NOT_A_VALID_SYMBOL_NAME_NOR_PRIMITIVE_GRAMMAR_ELEMENT_NAME) with self.assertRaises(SingleInstructionInvalidArgumentException): sut.parsers().full.parse(source)
def test_raise_exception_WHEN_quoting_of_second_token_is_invalid(self): for case in TOKENS_WITH_INVALID_SYNTAX: source = remaining_source('valid' + ' ' + case.value) with self.subTest(name=case.name, source=case.value): with self.assertRaises(SingleInstructionInvalidArgumentException): sut.parse_list(source)
def runTest(self): # ARRANGE # for arguments in self.argument_cases(): for may_depend_on_external_resources in [False, True]: with self.subTest(may_depend_on_external_resources=may_depend_on_external_resources, arguments=repr(arguments)): # ACT & ASSERT # integration_check.CHECKER__PARSE_SIMPLE.check( self, remaining_source(arguments), self.model(may_depend_on_external_resources), arrangement_w_tcds(), Expectation( ParseExpectation( symbol_references=asrt.is_empty_sequence ), ExecutionExpectation( main_result=asrt_string_source.pre_post_freeze__matches_lines__any_frozen_ext_deps( self.expected_output_lines_for_model(), may_depend_on_external_resources=asrt.equals(may_depend_on_external_resources), ), ), prim_asrt__constant( asrt_string_transformer.is_identity_transformer( self.is_identity_transformer() ) ) ), )
def runTest(self): # ARRANGE # instruction_arguments = instruction_arguments_for_src_file_rel_result() source = remaining_source(instruction_arguments) # ACT # self.conf.parser().parse(ARBITRARY_FS_LOCATION_INFO, source)
def test_assignment_of_list_with_symbol_references(self): symbol_name = 'the_symbol_name' referred_symbol = SymbolWithReferenceSyntax('referred_symbol') source = remaining_source(src( '{list_type} {symbol_name} = {symbol_reference} ', symbol_name=symbol_name, symbol_reference=referred_symbol, ), ['following line'], ) expected_symbol_reference = references.reference_to_any_data_type_value(referred_symbol.name) expected_resolver = lrs.from_elements([lrs.symbol_element(expected_symbol_reference)]) expected_resolver_container = container(expected_resolver) expectation = Expectation( symbol_usages=asrt.matches_sequence([ vs_asrt.equals_symbol(SymbolDefinition(symbol_name, expected_resolver_container), ignore_source_line=True) ]), symbols_after_main=assert_symbol_table_is_singleton( symbol_name, equals_container(expected_resolver_container), ), source=asrt_source.is_at_beginning_of_line(2), ) self._check(source, ArrangementWithSds(), expectation)
def test_without_reference_to_existing_file(self): # ARRANGE # arguments = ab.sequence(['program_name', 'argument-that-is-not-a-file']).as_str source = remaining_source(arguments) arrangement = validation_check.Arrangement( dir_contents=home_and_sds_populators.empty() ) expectation = validation_check.is_success() # ACT # program_resolver = sut.program_parser().parse(source) # ASSERT # validation_check.check( self, program_resolver.validator, arrangement, expectation, )
def test_with_reference_to_existing_file(self): referenced_file = 'referenced-file.txt' relativity_cases = [ relativity_options.conf_rel_home(RelHomeOptionType.REL_HOME_CASE), relativity_options.conf_rel_non_home(RelNonHomeOptionType.REL_ACT), ] for file_existence_case in FILE_EXISTENCE_CASES: for relativity_conf in relativity_cases: arguments = ab.sequence(['program_name', ab.option(syntax_elements.EXISTING_FILE_OPTION_NAME), relativity_conf.option_argument, referenced_file]).as_str source = remaining_source(arguments) arrangement = validation_check.Arrangement( dir_contents=relativity_conf.populator_for_relativity_option_root( DirContents(file_existence_case.files_for_name(referenced_file)) )) expectation = file_existence_case.expectation_for(relativity_conf.directory_structure_partition) with self.subTest(relativity=relativity_conf.option_string, file_do_existence_case=file_existence_case.name): program_resolver = sut.program_parser().parse(source) validation_check.check( self, program_resolver.validator, arrangement, expectation, )
def runTest(self): parser = self.conf.parser() for (source_str, case_name) in INVALID_SYNTAX_CASES: source = remaining_source(source_str) with self.subTest(msg=case_name): with self.assertRaises(SingleInstructionInvalidArgumentException): parser.parse(ARBITRARY_FS_LOCATION_INFO, source)
def test_exception_SHOULD_be_raised_WHEN_invalid_syntax_of_second_element(self): for case in TOKENS_WITH_INVALID_SYNTAX: source = remaining_source('valid' + ' ' + case.value) with self.subTest(name=case.name, source=case.value): with self.assertRaises(SingleInstructionInvalidArgumentException): sut.parse(source)
def _doTest(self, maybe_not: ExpectationTypeConfigForNoneIsSuccess): # ARRANGE # string_to_prepend = '.' initial_model_contents = '\n' model_after_2_transformations = ''.join([string_to_prepend, string_to_prepend, initial_model_contents]) initial_model = model_construction.model_of(initial_model_contents) equals_expected_matcher = NameAndValue('EQUALS_EXPECTED', StringMatcherResolverConstantTestImpl( EqualsMatcher(model_after_2_transformations) )) prepend_transformer_symbol = NameAndValue('PREPEND_TRANSFORMER', StringTransformerResolverConstantTestImpl( PrependStringToLinesTransformer(string_to_prepend)) ) prepend_trans_arg = str_trans_syntax.syntax_for_transformer_option(prepend_transformer_symbol.name) trans_and_eq_expected_matcher_source = remaining_source('{prepend_trans_arg} {equals_expected_matcher}'.format( prepend_trans_arg=prepend_trans_arg, equals_expected_matcher=equals_expected_matcher.name, )) # ACT & ASSERT # parser = sut.string_matcher_parser() prepend_and_equals_expected_matcher_resolver = parser.parse(trans_and_eq_expected_matcher_source) prepend_and_equals_expected_matcher = NameAndValue('PREPEND_AND_EQUALS_EXPECTED', prepend_and_equals_expected_matcher_resolver) symbols = symbol_table_from_name_and_resolvers([ equals_expected_matcher, prepend_transformer_symbol, prepend_and_equals_expected_matcher, ]) expected_symbol_references = asrt.matches_sequence([ is_reference_to_string_transformer(prepend_transformer_symbol.name), is_reference_to_string_matcher(prepend_and_equals_expected_matcher.name), ]) self._check_with_source_variants( self.configuration.arguments_for( args('{prepend_trans_arg} {maybe_not} {prepend_and_equals_expected_matcher}', prepend_trans_arg=prepend_trans_arg, maybe_not=maybe_not.nothing__if_positive__not_option__if_negative, prepend_and_equals_expected_matcher=prepend_and_equals_expected_matcher.name)), initial_model, self.configuration.arrangement_for_contents( symbols=symbols), Expectation( main_result=maybe_not.pass__if_positive__fail__if_negative, symbol_usages=expected_symbol_references), )
def test_fail_when_end_marker_not_found(self): first_line = '<<MARKER' following_lines_cases = [ [ 'not marker', syntax.section_header('section-name'), ], [ 'not marker', ], [], [ ' MARKER', ], [ 'MARKER ', ], [ 'NOT_MARKER', ], ] for following_lines in following_lines_cases: for is_mandatory in [False, True]: parser = sut.HereDocParser(is_mandatory) with self.subTest(msg=repr((is_mandatory, following_lines))): with self.assertRaises( sut.HereDocumentContentsParsingException): parser.parse( remaining_source(first_line, following_lines))
def test_symbols_from_comparison_SHOULD_be_reported(self): # ARRANGE # operand_sym_ref = SymbolReference('operand_symbol_name', string_made_up_by_just_strings()) condition_str = '{operator} {symbol_reference}'.format( operator=comparators.EQ.name, symbol_reference=symbol_reference_syntax_for_name(operand_sym_ref.name) ) arguments_constructor = args.complete_arguments_constructor( NumFilesAssertionVariant(condition_str)) argument = arguments_constructor.apply(expectation_type_config__non_is_success(ExpectationType.NEGATIVE)) source = remaining_source(argument) # ACT # actual_matcher = sut.files_matcher_parser().parse(source) assert isinstance(actual_matcher, FilesMatcherResolver) actual_symbol_references = actual_matcher.references # ASSERT # expected_symbol_references = [ operand_sym_ref, ] assertion = equals_symbol_references(expected_symbol_references) assertion.apply_without_message(self, actual_symbol_references)
def test_raise_exception_WHEN_invalid_assertion_variant(self): parser = sut.parsers(must_be_on_current_line=True).full cases = [ NameAndValue( 'Matcher is missing', args.complete_arguments_constructor( InvalidAssertionVariantArgumentsConstructor('')), ), NameAndValue( 'Matcher has invalid syntax', args.complete_arguments_constructor( InvalidAssertionVariantArgumentsConstructor( NOT_A_VALID_SYMBOL_NAME_NOR_PRIMITIVE_GRAMMAR_ELEMENT_NAME )), ), ] for case in cases: for expectation_type in ExpectationType: etc = pfh_expectation_type_config(expectation_type) instruction_arguments = case.value.apply(etc) source = remaining_source(instruction_arguments) with self.subTest(case_name=case.name, expectation_type=str(expectation_type)): with self.assertRaises( SingleInstructionInvalidArgumentException): parser.parse(source)
def runTest(self): # ARRANGE # name_of_referenced_selector = 'SELECTOR' name_of_referenced_files_matcher = 'FILES_MATCHER' expected_symbol_usages = asrt.matches_sequence([ is_reference_to_file_matcher(name_of_referenced_selector), is_reference_to_files_matcher(name_of_referenced_files_matcher), ]) arguments = fsm_args.argument_constructor_for_symbol_reference( files_matcher_symbol_name=name_of_referenced_files_matcher, named_matcher=name_of_referenced_selector).apply( expectation_type_config__non_is_success( ExpectationType.POSITIVE)) source = remaining_source(arguments) # ACT # sdv = sut.parsers().full.parse(source) # ASSERT # expected_symbol_usages.apply_without_message(self, sdv.references)
def runTest(self): # ARRANGE # for failing_file_matcher_case in failing_validation_cases__svh(): failing_symbol_context = failing_file_matcher_case.value.symbol_context argument = args.CompleteInstructionArg( ExpectationType.POSITIVE, args.PathArg(file_ref_argument('ignored-file')), fm_args.SymbolReference(failing_symbol_context.name)) with self.subTest(failing_file_matcher_case.name): # ACT & ASSERT # instruction_check.check( self, sut.Parser(), remaining_source(str(argument)), ArrangementPostAct( symbols=failing_symbol_context.symbol_table, ), instruction_check.expectation( validation=failing_file_matcher_case.value.expectation, symbol_usages=failing_symbol_context.references_assertion ))
def runTest(self): # ARRANGE # name_of_referenced_selector = 'SELECTOR' name_of_referenced_files_matcher = 'FILES_MATCHER' expected_symbol_usages = asrt.matches_sequence([ is_file_matcher_reference_to__ref(name_of_referenced_selector), is_reference_to_files_matcher__ref(name_of_referenced_files_matcher), ]) arguments = fsm_args.argument_constructor_for_symbol_reference( files_matcher_symbol_name=name_of_referenced_files_matcher, named_matcher=name_of_referenced_selector ).apply(expectation_type_config__non_is_success(ExpectationType.POSITIVE)) source = remaining_source(arguments) # ACT # resolver = sut.files_matcher_parser().parse(source) # ASSERT # expected_symbol_usages.apply_without_message(self, resolver.references)
def equivalent_source_variants( put: unittest.TestCase, instruction_argument: str) -> Iterator[ParseSource]: for following_lines, source_assertion in _SOURCE_VARIANT_TEST_CASES: with put.subTest(following_lines=repr(following_lines)): source = remaining_source(instruction_argument, following_lines) yield source
def test(self): symbol_name = 'the_string_matcher' for quantifier in Quantifier: arguments = args.Quantification( quantifier, fm_args.SymbolReference(symbol_name)) for case in validation_cases.failing_validation_cases(symbol_name): symbol_context = case.value.symbol_context with self.subTest(quantifier=quantifier, validation_case=case.name): integration_check.CHECKER__PARSE_FULL.check( self, source=remaining_source(str(arguments)), input_=arbitrary_model(), arrangement= Arrangement( symbols=symbol_context.symbol_table ), expectation= Expectation( ParseExpectation( symbol_references=symbol_context.references_assertion, ), ExecutionExpectation( validation=case.value.expectation, ), ), )
def check_rel_opt_variants_with_same_result_for_every_expectation_type( self, make_instruction_arguments: InstructionArgumentsVariantConstructor, main_result: ValueAssertion, contents_of_relativity_option_root: DirContents = empty_dir_contents(), following_symbols_setup: SymbolsArrAndExpectSetup = SymbolsArrAndExpectSetup.empty()): for rel_opt_config in self.accepted_rel_opt_configurations: for expectation_type_of_test_case in ExpectationType: etc = pfh_expectation_type_config(expectation_type_of_test_case) instruction_arguments = make_instruction_arguments.apply(etc, rel_opt_config) instruction_source = remaining_source(instruction_arguments) with self.put.subTest(expectation_type=etc.expectation_type.name, arguments=instruction_arguments): instruction_check.check( self.put, self.parser, instruction_source, ArrangementPostAct( pre_contents_population_action=MAKE_CWD_OUTSIDE_OF_EVERY_REL_OPT_DIR, home_or_sds_contents=rel_opt_config.populator_for_relativity_option_root( contents_of_relativity_option_root ), symbols=_symbol_table_of(rel_opt_config.symbols, following_symbols_setup), ), Expectation( main_result=main_result, symbol_usages=asrt.matches_sequence( rel_opt_config.symbols.usage_expectation_assertions() + following_symbols_setup.expected_references_list ) ))
def test_file_matcher_reference_is_reported(self): name_of_file_matcher = 'a_file_matcher_symbol' arguments_constructor = args.complete_arguments_constructor( self.assertion_variant.arguments, file_matcher=name_of_file_matcher ) arguments = arguments_constructor.apply(pfh_expectation_type_config(ExpectationType.NEGATIVE)) source = remaining_source(arguments) # ACT # matcher = sut.files_matcher_parser().parse(source) assert isinstance(matcher, FilesMatcherResolver) actual = matcher.references # ASSERT # expected_references = asrt.matches_sequence( [is_file_matcher_reference_to(name_of_file_matcher)] + list(self.assertion_variant.expected_references) ) expected_references.apply_without_message(self, actual) asrt_source.is_at_end_of_line(1)
def test_combined_expression_with_single_simple_expr(self): # [ [ [ s A s ] B s B s ] A s ] s = ast.SimpleSansArg() op_sequence_1 = ast.ComplexA([s, s]) op_sequence_2 = ast.ComplexB([op_sequence_1, s, s]) expected = ast.ComplexA([op_sequence_2, s]) arguments = '{s} {op_a} {s} {op_b} {s} {op_b} {s} {op_a} {s}'.format( op_a=ast.COMPLEX_A, op_b=ast.COMPLEX_B_THAT_IS_NOT_A_VALID_SYMBOL_NAME, s=ast.SIMPLE_SANS_ARG, ) self._check( Arrangement( grammar= ast.GRAMMAR_WITH_ALL_COMPONENTS, source= remaining_source(arguments)), Expectation( expression= expected, source= asrt_source.is_at_end_of_line(1), ) )
def test_single_file_in_existing_sub_dir(self): for rel_opt_conf in ALLOWED_DST_FILE_RELATIVITIES: with self.subTest(relativity_option_string=rel_opt_conf.option_argument): sub_dir_name = 'sub-dir' expected_file = fs.empty_file('file-name.txt') self._check( remaining_source('{relativity_option} {sub_dir}/{file_name}'.format( relativity_option=rel_opt_conf.option_argument, sub_dir=sub_dir_name, file_name=expected_file.file_name)), ArrangementWithSds( pre_contents_population_action=SETUP_CWD_INSIDE_SDS_BUT_NOT_A_SDS_DIR, non_home_contents=rel_opt_conf.populator_for_relativity_option_root__non_home( fs.DirContents([fs.empty_dir(sub_dir_name)]) ) ), Expectation( main_result=IS_SUCCESS, side_effects_on_home=f_asrt.dir_is_empty(), symbol_usages=asrt.is_empty_sequence, main_side_effects_on_sds=non_home_dir_contains_exactly( rel_opt_conf.root_dir__non_home, fs.DirContents([fs.Dir(sub_dir_name, [expected_file])])), ))
def test_file_matcher_reference_is_reported(self): name_of_file_matcher = 'a_file_matcher_symbol' arguments_constructor = args.complete_arguments_constructor( self.assertion_variant.arguments, file_matcher=name_of_file_matcher) arguments = arguments_constructor.apply( pfh_expectation_type_config(ExpectationType.NEGATIVE)) source = remaining_source(arguments) # ACT # matcher = parsers().full.parse(source) assert isinstance(matcher, MatcherSdv) actual = matcher.references # ASSERT # expected_references = asrt.matches_sequence( list(self.assertion_variant.expected_references) + [is_reference_to_file_matcher(name_of_file_matcher)]) expected_references.apply_without_message(self, actual) asrt_source.is_at_end_of_line(1)
def runTest(self): # ARRANGE # transformer_symbols = ['st1', 'st2', 'st3'] expected_references = asrt.matches_sequence([ is_reference_to_string_transformer__ref(transformer) for transformer in transformer_symbols ]) arguments = st_args.syntax_for_sequence_of_transformers([ transformer for transformer in transformer_symbols ]) parser = parse_string_transformer.parser() # ACT # actual = parser.parse(remaining_source(arguments)) # ASSERT # self.assertIsInstance(actual, StringTransformerResolver) actual_references = actual.references expected_references.apply(self, actual_references)
def test_parse_SHOULD_fail_WHEN_initial_token_is_neither_valid_sym_ref_nor_primitive( self): source = remaining_source( self.configuration. not_a_valid_symbol_name_nor_valid_primitive_or_operator()) with self.assertRaises(SingleInstructionInvalidArgumentException): self.configuration.parsers_for_expr_on_any_line().full.parse( source)
def test_fail_when_program_name_has_invalid_quoting(self): # ARRANGE # invalid_program_name = HARD_QUOTE_CHAR parser = sut.embryo_parser('instruction-name') # ACT & ASSERT # with self.assertRaises(SingleInstructionInvalidArgumentException): parser.parse(ARBITRARY_FS_LOCATION_INFO, remaining_source(invalid_program_name))
def _expect_parse_exception( self, source: str, must_be_on_current_line: bool, ): with self.assertRaises(SingleInstructionInvalidArgumentException): sut.parsers(must_be_on_current_line).full.parse( remaining_source(source))
def test_fail_when_argument_has_invalid_quoting(self): # ARRANGE # program_and_arguments = 'program-name arg {}un-ended'.format(HARD_QUOTE_CHAR) parser = sut.embryo_parser('instruction-name') # ACT & ASSERT # with self.assertRaises(SingleInstructionInvalidArgumentException): parser.parse(ARBITRARY_FS_LOCATION_INFO, remaining_source(program_and_arguments))
def test_fail_when_superfluous_arguments(self): for is_after_act_phase in [False, True]: with self.subTest(is_after_act_phase=is_after_act_phase): arguments = 'expected-argument superfluous-argument' parser = sut.EmbryoParser(is_after_act_phase=is_after_act_phase) # ACT & ASSERT # with self.assertRaises(SingleInstructionInvalidArgumentException): parser.parse(ARBITRARY_FS_LOCATION_INFO, remaining_source(arguments))
def test_rel_tmp_with_superfluous_argument(self): arguments = format_rel_options('{rel_tmp} subdir superfluous') for is_after_act_phase in [False, True]: with self.subTest(is_after_act_phase=is_after_act_phase): with self.assertRaises(SingleInstructionInvalidArgumentException): parser = sut.EmbryoParser(is_after_act_phase=is_after_act_phase) # ACT # parser.parse(ARBITRARY_FS_LOCATION_INFO, remaining_source(arguments))
def _test_symbol_reference_in_dst_file_and_contents( self, symbol_ref_syntax_2_contents_arguments: Callable[[str], ArgumentElements], symbol_value_2_expected_contents: Callable[[str], str] ): sub_dir_name = 'sub-dir' relativity = RelOptionType.REL_ACT file_symbol = NameAndValue('file_symbol_name', file_refs.of_rel_option(relativity, file_refs.constant_path_part(sub_dir_name))) contents_symbol = NameAndValue('contents_symbol_name', 'contents symbol value') expected_file_symbol_reference = SymbolReference( file_symbol.name, parse_file_ref.path_or_string_reference_restrictions( ACCEPTED_RELATIVITY_VARIANTS)) expected_contents_symbol_reference = SymbolReference( contents_symbol.name, is_any_data_type()) expected_file_contents = symbol_value_2_expected_contents(contents_symbol.value) expected_file = fs.File('a-file-name.txt', expected_file_contents) expected_symbol_references = [expected_file_symbol_reference, expected_contents_symbol_reference] symbol_table = data_symbol_utils.SymbolTable({ file_symbol.name: data_symbol_utils.file_ref_constant_container(file_symbol.value), contents_symbol.name: data_symbol_utils.string_constant_container(contents_symbol.value), }) contents_arguments = symbol_ref_syntax_2_contents_arguments( symbol_reference_syntax_for_name(contents_symbol.name)).as_arguments assert isinstance(contents_arguments, Arguments) self._check( remaining_source( '{symbol_ref}/{file_name} {contents}'.format( symbol_ref=symbol_reference_syntax_for_name(file_symbol.name), file_name=expected_file.file_name, contents=contents_arguments.first_line ), contents_arguments.following_lines), ArrangementWithSds( pre_contents_population_action=SETUP_CWD_INSIDE_SDS_BUT_NOT_A_SDS_DIR, symbols=symbol_table, ), Expectation( main_result=IS_SUCCESS, symbol_usages=equals_symbol_references(expected_symbol_references), main_side_effects_on_sds=dir_contains_exactly( relativity, fs.DirContents([ fs.Dir(sub_dir_name, [expected_file])])), ))
def test_invalid_token(self): test_cases = [ '\'missing end single quote', ] for first_line in test_cases: with self.subTest(msg=repr(first_line)): with self.assertRaises(SingleInstructionInvalidArgumentException): source = remaining_source(first_line) sut.parse_token_or_none_on_current_line(source)
def runTest(self): # ARRANGE # instruction_arguments = instruction_arguments_for_src_file_rel_result() source = remaining_source(instruction_arguments) # ACT # with self.assertRaises(SingleInstructionInvalidArgumentException): # ACT # self.conf.parser().parse(ARBITRARY_FS_LOCATION_INFO, source)
def test_exception_SHOULD_be_raised_WHEN_second_element_is_reserved_word( self): for reserved_word in set(reserved_words.RESERVED_TOKENS).difference( reserved_words.PAREN_END): source = remaining_source('valid' + ' ' + reserved_word) with self.subTest(reserved_word): with self.assertRaises( SingleInstructionInvalidArgumentException): sut.parser().parse(source)
def test_fail_when_superfluous_arguments(self): for argument_elements in ARGUMENTS_CASES: arguments = argument_elements.as_arguments with self.subTest(arguments.first_line): with self.assertRaises(SingleInstructionInvalidArgumentException): source = remaining_source('expected-argument = {contents} superfluous_argument'.format( contents=arguments.first_line), arguments.following_lines) just_parse(source)
def test_raise_exception_WHEN_element_is_a_reserved_word_that_is_not_r_paren( self): for illegal_string in set(reserved_words.RESERVED_TOKENS).difference( reserved_words.PAREN_END): source = remaining_source(illegal_string) with self.subTest(illegal_string): with self.assertRaises( SingleInstructionInvalidArgumentException): sut.parse_list(source)
def test_invalid_token(self): test_cases = [ '\'missing end single quote', ] for first_line in test_cases: with self.subTest(msg=repr(first_line)): with self.assertRaises( SingleInstructionInvalidArgumentException): source = remaining_source(first_line) sut.parse_token_or_none_on_current_line(source)
def test_no_token_on_remaining_part_of_current_line(self): test_cases = [ '', ' ', ] for first_line in test_cases: with self.subTest(msg=repr(first_line)): with self.assertRaises( SingleInstructionInvalidArgumentException): source = remaining_source(first_line) sut.parse_token_on_current_line(source)
def check__abs_stx( self, put: unittest.TestCase, source: AbstractSyntax, arrangement: Arrangement, expectation_: Expectation[T], layout: LayoutSpec = LayoutSpec.of_default(), ): source = remaining_source(source.tokenization().layout(layout)) _ParseAndExecutionChecker(put, arrangement, expectation_).execute(self.parser, source)
def test_paren_end(self): _test_case( self, ARRANGEMENT__NEUTRAL, Expectation( elements=[], validators=asrt.is_empty_sequence, references=asrt.is_empty_sequence, ), remaining_source(reserved_words.PAREN_END), asrt_source.source_is_not_at_end( current_line_number=asrt.equals(1), remaining_source=asrt.equals(reserved_words.PAREN_END)))
def parse_should_fail_when_syntax_is_invalid( self) -> Sequence[NameAndValue[ParseSource]]: return [ NameAndValue('eof after (', remaining_source('( ')), NameAndValue( 'missing )', remaining_source('( {symbol_name}'.format( symbol_name=self. valid_symbol_name_and_not_valid_primitive_or_operator))), NameAndValue( 'missing space after (', remaining_source('({symbol_name} )'.format( symbol_name=self. valid_symbol_name_and_not_valid_primitive_or_operator))), NameAndValue( 'invalid expression inside ()', remaining_source('( {invalid} )'.format( invalid=self. not_a_valid_symbol_name_nor_valid_primitive_or_operator))), ]
def test_no_token_on_remaining_part_of_current_line(self): test_cases = [ '', ' ', ] for first_line in test_cases: with self.subTest(msg=repr(first_line)): source = remaining_source(first_line) actual = sut.parse_token_or_none_on_current_line(source) self.assertIsNone(actual) assert_source(is_at_eol=asrt.is_true)