def tokenization(self) -> TokenSequence: file_specs = collection.intersperse_list( TokenSequence.new_line(), [fs.tokenization() for fs in self._files]) return TokenSequence.concat([ TokenSequence.singleton(self.delimiter__begin), TokenSequence.preceded_by_optional_new_line_if_non_empty( TokenSequence.concat(file_specs), ), TokenSequence.optional_new_line(), TokenSequence.singleton(self.delimiter__end), ])
def tokenization(self) -> TokenSequence: return TokenSequence.concat([ TokenSequence.singleton(file_matcher.PROGRAM_MATCHER_NAME), TokenSequence.optional_new_line(), abstract_syntaxes.OptionallyOnNewLine(self._path_argument_position).tokenization(), self._program.tokenization(), ])
def tokenization(self) -> TokenSequence: element_tokens = self._syntax.tokenization() return (TokenSequence.empty() if not element_tokens else TokenSequence.concat([ TokenSequence.optional_new_line(), element_tokens, ]))
def tokenization(self) -> TokenSequence: return TokenSequence.concat([ TokenSequence.singleton(reserved_words.PAREN_BEGIN), TokenSequence.optional_new_line(), self._element.tokenization(), self._end_paren() ])
def tokenization(self) -> TokenSequence: return TokenSequence.concat([ TokenSequence.optional_new_line(), self._expectation_type_tokens(), self._path.tokenization(), self._file_matcher_tokens() ])
def tokenization(self) -> TokenSequence: return TokenSequence.concat([ TokenSequence.singleton( instruction_arguments.ASSIGNMENT_OPERATOR, ), self.value.tokenization(), ])
def tokenization(self) -> TokenSequence: return TokenSequence.concat([ TokenSequence.singleton(self._file_type), TokenSequence.optional_new_line(), self._file_name.tokenization(), self._contents.tokenization(), ])
def _transformation_tokenizer(transformer: TokenSequence) -> TokenSequence: return TokenSequence.concat([ TokenSequence.new_line(), token_sequences.OptionWMandatoryValue.of_option_name( syntax_elements.WITH_TRANSFORMED_CONTENTS_OPTION_NAME, transformer, ), ])
def _stdin_tokenizer(string_source: TokenSequence) -> TokenSequence: return TokenSequence.concat([ TokenSequence.new_line(), token_sequences.OptionWMandatoryValue.of_option_name( syntax_elements.STDIN_OPTION_NAME, string_source, ), ])
def tokenization(self) -> TokenSequence: return TokenSequence.concat([ self._pgm_and_args.tokenization(), OptionalAbsStx(self._stdin, self._stdin_tokenizer).tokenization(), OptionalAbsStx(self._transformation, self._transformation_tokenizer).tokenization(), ])
def _variant_tokenization(self) -> TokenSequence: return TokenSequence.concat([ self.var_name.tokenization(), TokenSequence.optional_new_line(), TokenSequence.singleton(defs.ASSIGNMENT_IDENTIFIER), TokenSequence.optional_new_line(), self.value.tokenization(), ])
def tokenization(self) -> TokenSequence: return TokenSequence.concat([ self._path.tokenization(), TokenSequence.optional_new_line(), TokenSequence.singleton(reserved_words.COLON), TokenSequence.optional_new_line(), self._matcher.tokenization(), ])
def symbol_reference_followed_by_superfluous_string_on_same_line( symbol_name: str = 'STRING_MATCHER_SYMBOL_NAME', ) -> StringMatcherAbsStx: return CustomStringMatcherAbsStx( TokenSequence.concat([ symbol_tok_seq.SymbolReferenceAsEitherPlainNameOrReferenceSyntax( symbol_name), TokenSequence.singleton('superfluous') ]))
def tokenization(self) -> TokenSequence: return TokenSequence.concat([ TokenSequence.sequence([ instruction_arguments.ASSIGNMENT_OPERATOR, layout.OPTIONAL_NEW_LINE, ]), self.value.tokenization(), ])
def _file_matcher_tokens(self) -> TokenSequence: if not self._file_matcher: return TokenSequence.empty() else: return TokenSequence.concat([ TokenSequence.singleton(':'), TokenSequence.optional_new_line(), self._file_matcher.tokenization(), ])
def of_modification( modification: TokenSequence, contents: AbstractSyntax, ) -> TokenSequence: return TokenSequence.concat([ modification, TokenSequence.optional_new_line(), contents.tokenization() ])
def runTest(self): tokens_s = ['S1', 'S2'] tokens_t = ['T1', 'T2'] concat = TokenSequence.concat([ _ConstTokSeqTestImpl(tokens_s), _ConstTokSeqTestImpl(tokens_t), ]) actual = concat.tokens self.assertEqual(tokens_s + tokens_t, actual)
def tokenization(self) -> TokenSequence: return TokenSequence.concat([ self.transformed.tokenization(), TokenSequence.optional_new_line(), token_sequences.OptionWMandatoryValue.of_option_name( string_transformer.WITH_TRANSFORMED_CONTENTS_OPTION_NAME, self.transformer.tokenization(), ), ])
def tokenization(self) -> TokenSequence: return TokenSequence.concat([ TokenSequence.optional_new_line(), OptionWMandatoryValue.of_option_name( OUTPUT_FROM_PROGRAM_OPTION_NAME, self._program.tokenization(), ), TokenSequence.new_line(), self._expectation.tokenization(), ])
def test_fail_when_missing_end_end_paren(self): # ARRANGE # valid_string = str_abs_stx.StringLiteralAbsStx('contents') missing_end_paren = CustomAbsStx( TokenSequence.concat([ TokenSequence.singleton('('), valid_string.tokenization(), ])) # ACT & ASSERT # parse_check.checker().check_invalid_syntax__abs_stx( self, OptionallyOnNewLine(missing_end_paren))
def test_fail_when_missing_end_end_paren(self): # ARRANGE # valid_program = ProgramOfSymbolReferenceAbsStx('PROGRAM_SYMBOL') missing_end_paren = CustomAbsStx( TokenSequence.concat([ TokenSequence.singleton('('), valid_program.tokenization(), ]) ) # ACT & ASSERT # PARSE_CHECKER.check_invalid_syntax__abs_stx( self, OptionallyOnNewLine(missing_end_paren) )
def tokenization(self) -> TokenSequence: return TokenSequence.concat([ TokenSequence.singleton(names.REPLACE_TRANSFORMER_NAME), TokenSequence.optional_new_line(), token_sequences.FollowedByOptionalNewLineIfNonEmpty( self.lines_filter.tokenization() ), token_sequences.FollowedByOptionalNewLineIfNonEmpty( token_sequences.OptionalOption.of_option_name( names.PRESERVE_NEW_LINES_OPTION_NAME, self.preserve_new_lines, ) ), self.regex_token.tokenization(), TokenSequence.optional_new_line(), self.replacement_token.tokenization(), ])
def tokenization(self) -> TokenSequence: return TokenSequence.concat([ TokenSequence.singleton(matcher_options.EMPTY_ARGUMENT), ])
def tokenization(self) -> TokenSequence: return TokenSequence.concat([ TokenSequence.optional_new_line(), self._expectation.tokenization(), ])
def tokenization(self) -> TokenSequence: return TokenSequence.concat( [argument.tokenization() for argument in self._arguments])
def tokenization(self) -> TokenSequence: return TokenSequence.concat([ TokenSequence.singleton(defs.CONTINUATION_TOKEN), TokenSequence.new_line(), self._argument_on_next_line.tokenization(), ])
def tokenization(self) -> TokenSequence: return TokenSequence.concat([ TokenSequence.singleton(matcher.RUN_PROGRAM), TokenSequence.optional_new_line(), self._program.tokenization(), ])
def tokenization(self) -> TokenSequence: return TokenSequence.concat([ TokenSequence.singleton(matcher_options.EQUALS_ARGUMENT), TokenSequence.optional_new_line(), self._expected.tokenization(), ])
def tokenization(self) -> TokenSequence: return TokenSequence.concat([ TokenSequence.singleton(names.RUN_PROGRAM_TRANSFORMER_NAME), TokenSequence.optional_new_line(), self._program.tokenization(), ])
def tokenization(self) -> TokenSequence: return TokenSequence.concat([ self._type_and_sym_name_tokens(), abstract_syntaxes.AssignmentOfOptionalValue( self.value).tokenization(), ])