def _transformation_tokenizer(transformer: TokenSequence) -> TokenSequence: return TokenSequence.concat([ TokenSequence.new_line(), token_sequences.OptionWMandatoryValue.of_option_name( syntax_elements.WITH_TRANSFORMED_CONTENTS_OPTION_NAME, transformer, ), ])
def _stdin_tokenizer(string_source: TokenSequence) -> TokenSequence: return TokenSequence.concat([ TokenSequence.new_line(), token_sequences.OptionWMandatoryValue.of_option_name( syntax_elements.STDIN_OPTION_NAME, string_source, ), ])
def tokenization(self) -> TokenSequence: return TokenSequence.concat([ TokenSequence.optional_new_line(), OptionWMandatoryValue.of_option_name( OUTPUT_FROM_PROGRAM_OPTION_NAME, self._program.tokenization(), ), TokenSequence.new_line(), self._expectation.tokenization(), ])
def tokenization(self) -> TokenSequence: file_specs = collection.intersperse_list( TokenSequence.new_line(), [fs.tokenization() for fs in self._files]) return TokenSequence.concat([ TokenSequence.singleton(self.delimiter__begin), TokenSequence.preceded_by_optional_new_line_if_non_empty( TokenSequence.concat(file_specs), ), TokenSequence.optional_new_line(), TokenSequence.singleton(self.delimiter__end), ])
def runTest(self): # ARRANGE # cases = [ NameAndValue( 'layout spec w optional-new line as empty', LAYOUT_SPEC__OPTIONAL_NEW_LINE_AS_EMPTY, ), NameAndValue( 'layout spec w optional new-line as new-line', LAYOUT_SPEC__OPTIONAL_NEW_LINE_AS_NEW_LINE, ), ] tok_seq = TokenSequence.new_line() for case in cases: with self.subTest(case.name): # ACT # actual = tok_seq.layout(case.value) # ASSERT # self.assertEqual('\n', actual)
def tokenization(self) -> TokenSequence: return TokenSequence.concat([ TokenSequence.singleton(defs.CONTINUATION_TOKEN), TokenSequence.new_line(), self._argument_on_next_line.tokenization(), ])