Esempio n. 1
0
def test_stream_user_data_crlf(stream_parser):
    events = stream_parser.stream_updates([Tokenizer.StreamData(b"Hello,\r\nworld!")])
    assert events == [
        StreamParser.UserData("Hello,"),
        StreamParser.UserData("\n"),
        StreamParser.UserData("world!"),
    ]
Esempio n. 2
0
def test_stream_command_in_crlf(stream_parser):
    events = stream_parser.stream_updates(
        [
            Tokenizer.StreamData(b"abc\r"),
            Tokenizer.Command(B.NOP, B.NOP.value),
            Tokenizer.StreamData(b"\ndef"),
        ]
    )
    assert events == [
        StreamParser.UserData("abc"),
        StreamParser.Command(B.NOP, B.NOP.value),
        StreamParser.UserData("\n"),
        StreamParser.UserData("def"),
    ]
Esempio n. 3
0
    def _write(self, text):
        if isinstance(text, str):
            text = StreamParser.UserData(text)

        out_data = self.encoder.stuff(text)
        logger.debug("writing", extra={"data": out_data})
        self.writer.write(out_data)
Esempio n. 4
0
def test_stream_user_data_nonascii(stream_parser):
    events = stream_parser.stream_updates([Tokenizer.StreamData(b"abc\xabdef")])
    assert events == [StreamParser.UserData("abcdef")]
Esempio n. 5
0
def test_stream_user_data(stream_parser):
    events = stream_parser.stream_updates([Tokenizer.StreamData(b"Hello, world!")])
    assert events == [StreamParser.UserData("Hello, world!")]
Esempio n. 6
0
def test_integration(tokenizer, stream_parser):
    data = (
        b"Hel"
        + B.IAC.byte
        + B.NOP.byte
        + b"lo,\r"
        +
        # start a subneg
        B.IAC.byte
        + B.SB.byte
        + bytes([42])
        + b"abc"
        +
        # literal IAC SE as subneg data
        B.IAC.byte
        + B.IAC.byte
        + B.SE.byte
        + b"def"
        +
        # finish the subneg
        B.IAC.byte
        + B.SE.byte
        + b"\0wor"
        + B.IAC.byte
        + B.DO.byte
        + bytes([42])
        + b"ld!"
    )
    atomized = [bytes([b]) for b in data]  # process it one byte at a time

    toks = sum([tokenizer.tokens(b) for b in atomized], [])
    events = sum([stream_parser.stream_updates([tok]) for tok in toks], [])

    assert events == [
        StreamParser.UserData("H"),
        StreamParser.UserData("e"),
        StreamParser.UserData("l"),
        StreamParser.Command(B.NOP, B.NOP.value),
        StreamParser.UserData("l"),
        StreamParser.UserData("o"),
        StreamParser.UserData(","),
        StreamParser.OptionSubnegotiation(None, 42),
        StreamParser.UserData("\r"),
        StreamParser.UserData("w"),
        StreamParser.UserData("o"),
        StreamParser.UserData("r"),
        StreamParser.OptionNegotiation(None, 42, StreamParser.Host.LOCAL, True),
        StreamParser.UserData("l"),
        StreamParser.UserData("d"),
        StreamParser.UserData("!"),
    ]
Esempio n. 7
0
def test_stuffer_crlf(stuffer):
    stuffed = stuffer.stuff(StreamParser.UserData("abc\ndef\rghi"))
    assert stuffed == b"abc\r\ndef\r\0ghi"
Esempio n. 8
0
def test_stuffer_nonascii(stuffer):
    try:
        stuffed = stuffer.stuff(StreamParser.UserData("abcdéf"))
        assert False  # should have thrown an exception
    except UnicodeEncodeError:
        pass  # expected behavior under test
Esempio n. 9
0
def test_stuffer_text(stuffer):
    stuffed = stuffer.stuff(StreamParser.UserData("abc"))
    assert stuffed == b"abc"