예제 #1
0
 def _expectation_type_tokens(self) -> TokenSequence:
     return (TokenSequence.empty()
             if self._expectation_type is ExpectationType.POSITIVE else
             TokenSequence.sequence([
                 logic.NOT_OPERATOR_NAME,
                 layout.OPTIONAL_NEW_LINE,
             ]))
예제 #2
0
 def tokenization(self) -> TokenSequence:
     element_tokens = self._syntax.tokenization()
     return (TokenSequence.empty()
             if not element_tokens else TokenSequence.concat([
                 TokenSequence.optional_new_line(),
                 element_tokens,
             ]))
예제 #3
0
 def tokenization(self) -> TokenSequence:
     return TokenSequence.concat([
         TokenSequence.optional_new_line(),
         self._expectation_type_tokens(),
         self._path.tokenization(),
         self._file_matcher_tokens()
     ])
예제 #4
0
 def tokenization(self) -> TokenSequence:
     return TokenSequence.concat([
         TokenSequence.singleton(reserved_words.PAREN_BEGIN),
         TokenSequence.optional_new_line(),
         self._element.tokenization(),
         self._end_paren()
     ])
예제 #5
0
 def tokenization(self) -> TokenSequence:
     return TokenSequence.concat([
         TokenSequence.singleton(
             instruction_arguments.ASSIGNMENT_OPERATOR,
         ),
         self.value.tokenization(),
     ])
예제 #6
0
 def tokenization(self) -> TokenSequence:
     return TokenSequence.concat([
         TokenSequence.singleton(file_matcher.PROGRAM_MATCHER_NAME),
         TokenSequence.optional_new_line(),
         abstract_syntaxes.OptionallyOnNewLine(self._path_argument_position).tokenization(),
         self._program.tokenization(),
     ])
예제 #7
0
 def tokenization(self) -> TokenSequence:
     return TokenSequence.concat([
         TokenSequence.singleton(self._file_type),
         TokenSequence.optional_new_line(),
         self._file_name.tokenization(),
         self._contents.tokenization(),
     ])
예제 #8
0
 def tokenization(self) -> TokenSequence:
     return TokenSequence.concat([
         self._path.tokenization(),
         TokenSequence.optional_new_line(),
         TokenSequence.singleton(reserved_words.COLON),
         TokenSequence.optional_new_line(),
         self._matcher.tokenization(),
     ])
예제 #9
0
def symbol_reference_followed_by_superfluous_string_on_same_line(
    symbol_name: str = 'STRING_MATCHER_SYMBOL_NAME', ) -> StringMatcherAbsStx:
    return CustomStringMatcherAbsStx(
        TokenSequence.concat([
            symbol_tok_seq.SymbolReferenceAsEitherPlainNameOrReferenceSyntax(
                symbol_name),
            TokenSequence.singleton('superfluous')
        ]))
예제 #10
0
 def _transformation_tokenizer(transformer: TokenSequence) -> TokenSequence:
     return TokenSequence.concat([
         TokenSequence.new_line(),
         token_sequences.OptionWMandatoryValue.of_option_name(
             syntax_elements.WITH_TRANSFORMED_CONTENTS_OPTION_NAME,
             transformer,
         ),
     ])
예제 #11
0
 def tokenization(self) -> TokenSequence:
     return TokenSequence.concat([
         TokenSequence.sequence([
             instruction_arguments.ASSIGNMENT_OPERATOR,
             layout.OPTIONAL_NEW_LINE,
         ]),
         self.value.tokenization(),
     ])
예제 #12
0
 def _stdin_tokenizer(string_source: TokenSequence) -> TokenSequence:
     return TokenSequence.concat([
         TokenSequence.new_line(),
         token_sequences.OptionWMandatoryValue.of_option_name(
             syntax_elements.STDIN_OPTION_NAME,
             string_source,
         ),
     ])
예제 #13
0
 def _variant_tokenization(self) -> TokenSequence:
     return TokenSequence.concat([
         self.var_name.tokenization(),
         TokenSequence.optional_new_line(),
         TokenSequence.singleton(defs.ASSIGNMENT_IDENTIFIER),
         TokenSequence.optional_new_line(),
         self.value.tokenization(),
     ])
예제 #14
0
 def tokenization(self) -> TokenSequence:
     return TokenSequence.concat([
         self.transformed.tokenization(),
         TokenSequence.optional_new_line(),
         token_sequences.OptionWMandatoryValue.of_option_name(
             string_transformer.WITH_TRANSFORMED_CONTENTS_OPTION_NAME,
             self.transformer.tokenization(),
         ),
     ])
예제 #15
0
 def _file_matcher_tokens(self) -> TokenSequence:
     if not self._file_matcher:
         return TokenSequence.empty()
     else:
         return TokenSequence.concat([
             TokenSequence.singleton(':'),
             TokenSequence.optional_new_line(),
             self._file_matcher.tokenization(),
         ])
예제 #16
0
 def of_modification(
     modification: TokenSequence,
     contents: AbstractSyntax,
 ) -> TokenSequence:
     return TokenSequence.concat([
         modification,
         TokenSequence.optional_new_line(),
         contents.tokenization()
     ])
예제 #17
0
 def tokenization(self) -> TokenSequence:
     return TokenSequence.concat([
         TokenSequence.optional_new_line(),
         OptionWMandatoryValue.of_option_name(
             OUTPUT_FROM_PROGRAM_OPTION_NAME,
             self._program.tokenization(),
         ),
         TokenSequence.new_line(),
         self._expectation.tokenization(),
     ])
예제 #18
0
 def test_fail_when_missing_end_end_paren(self):
     # ARRANGE #
     valid_string = str_abs_stx.StringLiteralAbsStx('contents')
     missing_end_paren = CustomAbsStx(
         TokenSequence.concat([
             TokenSequence.singleton('('),
             valid_string.tokenization(),
         ]))
     # ACT & ASSERT #
     parse_check.checker().check_invalid_syntax__abs_stx(
         self, OptionallyOnNewLine(missing_end_paren))
예제 #19
0
 def test_fail_when_missing_end_end_paren(self):
     # ARRANGE #
     valid_program = ProgramOfSymbolReferenceAbsStx('PROGRAM_SYMBOL')
     missing_end_paren = CustomAbsStx(
         TokenSequence.concat([
             TokenSequence.singleton('('),
             valid_program.tokenization(),
         ])
     )
     # ACT & ASSERT #
     PARSE_CHECKER.check_invalid_syntax__abs_stx(
         self,
         OptionallyOnNewLine(missing_end_paren)
     )
예제 #20
0
 def tokenization(self) -> TokenSequence:
     return TokenSequence.concat([
         TokenSequence.singleton(names.REPLACE_TRANSFORMER_NAME),
         TokenSequence.optional_new_line(),
         token_sequences.FollowedByOptionalNewLineIfNonEmpty(
             self.lines_filter.tokenization()
         ),
         token_sequences.FollowedByOptionalNewLineIfNonEmpty(
             token_sequences.OptionalOption.of_option_name(
                 names.PRESERVE_NEW_LINES_OPTION_NAME,
                 self.preserve_new_lines,
             )
         ),
         self.regex_token.tokenization(),
         TokenSequence.optional_new_line(),
         self.replacement_token.tokenization(),
     ])
예제 #21
0
 def tokenization(self) -> TokenSequence:
     return TokenSequence.concat([
         self._pgm_and_args.tokenization(),
         OptionalAbsStx(self._stdin,
                        self._stdin_tokenizer).tokenization(),
         OptionalAbsStx(self._transformation,
                        self._transformation_tokenizer).tokenization(),
     ])
예제 #22
0
 def tokenization(self) -> TokenSequence:
     return TokenSequence.sequence([
         layout.OPTIONAL_NEW_LINE,
         reserved_words.PAREN_BEGIN,
         self._plain_expt,
         layout.OPTIONAL_NEW_LINE,
         reserved_words.PAREN_END,
     ])
예제 #23
0
 def runTest(self):
     tokens_s = ['S1', 'S2']
     tokens_t = ['T1', 'T2']
     concat = TokenSequence.concat([
         _ConstTokSeqTestImpl(tokens_s),
         _ConstTokSeqTestImpl(tokens_t),
     ])
     actual = concat.tokens
     self.assertEqual(tokens_s + tokens_t, actual)
예제 #24
0
    def runTest(self):
        # ARRANGE #
        program_w_superfluous_stx = CustomPgmAndArgsAbsStx(
            TokenSequence.empty())

        define_symbol_syntax = DefineSymbolWMandatoryValue(
            'the_symbol',
            ValueType.PROGRAM,
            program_w_superfluous_stx,
        )

        PARSE_CHECKER.check_invalid_syntax__abs_stx(self, define_symbol_syntax)
예제 #25
0
 def tokenization(self) -> TokenSequence:
     file_specs = collection.intersperse_list(
         TokenSequence.new_line(),
         [fs.tokenization() for fs in self._files])
     return TokenSequence.concat([
         TokenSequence.singleton(self.delimiter__begin),
         TokenSequence.preceded_by_optional_new_line_if_non_empty(
             TokenSequence.concat(file_specs), ),
         TokenSequence.optional_new_line(),
         TokenSequence.singleton(self.delimiter__end),
     ])
예제 #26
0
 def test_invalid_syntax(self):
     syntax = CustomAbsStx(
         TokenSequence.sequence([
             here_doc.here_doc_start_token('marker'),
             '\n',
             'contents',
             '\n',
             'non_marker',
         ]))
     CHECKER.check_invalid_syntax(
         self,
         equivalent_source_variants__for_full_line_expr_parse__s__nsc,
         syntax,
     )
예제 #27
0
    def runTest(self):
        # ARRANGE #
        cases = [
            NameAndValue(
                'layout spec w optional-new line as empty',
                LAYOUT_SPEC__OPTIONAL_NEW_LINE_AS_EMPTY,
            ),
            NameAndValue(
                'layout spec w optional new-line as new-line',
                LAYOUT_SPEC__OPTIONAL_NEW_LINE_AS_NEW_LINE,
            ),
        ]

        tok_seq = TokenSequence.new_line()
        for case in cases:
            with self.subTest(case.name):
                # ACT #
                actual = tok_seq.layout(case.value)
                # ASSERT #
                self.assertEqual('\n', actual)
예제 #28
0
    def runTest(self):
        # ARRANGE #
        cases = [
            NArrEx(
                'layout spec w optional-new line as empty',
                LAYOUT_SPEC__OPTIONAL_NEW_LINE_AS_EMPTY,
                '',
            ),
            NArrEx(
                'layout spec w optional new-line as new-line',
                LAYOUT_SPEC__OPTIONAL_NEW_LINE_AS_NEW_LINE,
                '\n',
            ),
        ]

        tok_seq = TokenSequence.optional_new_line()
        for case in cases:
            with self.subTest(case.name):
                # ACT #
                actual = tok_seq.layout(case.arrangement)
                # ASSERT #
                self.assertEqual(case.expectation, actual)
예제 #29
0
    def runTest(self):
        # ARRANGE #
        invalid_modification_types = [':', 2 * syntax.EXPLICIT_CREATE, 'text']

        for invalid_modification_type in invalid_modification_types:
            for file_type in abs_stx.FileType:
                missing_contents_file_spec = abs_stx.FileSpecAbsStx.of_file_type(
                    file_type, StringLiteralAbsStx('valid_file_name'),
                    abs_stx.CustomContentsAbsStx(
                        abs_stx.ContentsAbsStx.of_modification(
                            TokenSequence.singleton(invalid_modification_type),
                            CustomAbsStx.singleton(A_VALID_SYMBOL_NAME),
                        ), ))
                literal_syntax = abs_stx.LiteralFilesSourceAbsStx(
                    [missing_contents_file_spec])
                # ACT & ASSERT #
                integration_check.PARSE_CHECKER__FULL.check_invalid_syntax__abs_stx(
                    self,
                    literal_syntax,
                    sub_test_identifiers={
                        'file_type': file_type,
                        'invalid_modification_type': invalid_modification_type,
                    })
예제 #30
0
 def tokenization(self) -> TokenSequence:
     return TokenSequence.concat([
         TokenSequence.optional_new_line(),
         self._expectation.tokenization(),
     ])