def test_2StringIO(self): """ Python 2's L{StringIO} and L{cStringIO} modules are both binary I/O. """ from cStringIO import StringIO as cStringIO from StringIO import StringIO self.assertEqual(ioType(StringIO()), bytes) self.assertEqual(ioType(cStringIO()), bytes)
def extReceived(self, t, data): if t == connection.EXTENDED_DATA_STDERR: log.msg('got {} stderr data'.format(len(data))) if ioType(sys.stderr) == unicode: sys.stderr.buffer.write(data) else: sys.stderr.write(data)
def test_codecsOpenBytes(self): """ The L{codecs} module, oddly, returns a file-like object which returns bytes when not passed an 'encoding' argument. """ with codecs.open(self.mktemp(), 'wb') as f: self.assertEqual(ioType(f), bytes)
def test_codecsOpenText(self): """ When passed an encoding, however, the L{codecs} module returns unicode. """ self.assertEquals(ioType(codecs.open(self.mktemp(), 'wb', encoding='utf-8')), unicodeCompat)
def extReceived(self, t, data): if t == connection.EXTENDED_DATA_STDERR: log.msg(f"got {len(data)} stderr data") if ioType(sys.stderr) == str: sys.stderr.buffer.write(data) else: sys.stderr.write(data)
def test_codecsOpenText(self): """ When passed an encoding, however, the L{codecs} module returns unicode. """ self.assertEquals( ioType(codecs.open(self.mktemp(), 'wb', encoding='utf-8')), unicodeCompat)
def __init__(self, outFile, formatEvent): if ioType(outFile) is not unicode: self._encoding = "utf-8" else: self._encoding = None self._outFile = outFile self.formatEvent = formatEvent
def _streamWriteWrapper(stream): if ioType(stream) == bytes: def w(s): if isinstance(s, unicode): s = s.encode("utf-8") stream.write(s) else: def w(s): if isinstance(s, bytes): s = s.decode("utf-8") stream.write(s) return w
def __init__(self, outFile: IO[Any], formatEvent: Callable[[LogEvent], Optional[str]]) -> None: """ @param outFile: A file-like object. Ideally one should be passed which accepts text data. Otherwise, UTF-8 L{bytes} will be used. @param formatEvent: A callable that formats an event. """ if ioType(outFile) is not str: self._encoding: Optional[str] = "utf-8" else: self._encoding = None self._outFile = outFile self.formatEvent = formatEvent
def __init__(self, outFile, formatEvent): """ @param outFile: A file-like object. Ideally one should be passed which accepts L{unicode} data. Otherwise, UTF-8 L{bytes} will be used. @type outFile: L{io.IOBase} @param formatEvent: A callable that formats an event. @type formatEvent: L{callable} that takes an C{event} argument and returns a formatted event as L{unicode}. """ if ioType(outFile) is not unicode: self._encoding = "utf-8" else: self._encoding = None self._outFile = outFile self.formatEvent = formatEvent
def test_2openTextMode(self): """ The special built-in console file in Python 2 which has an 'encoding' attribute should qualify as a special type, since it accepts both bytes and text faithfully. """ class VerySpecificLie(file): """ In their infinite wisdom, the CPython developers saw fit not to allow us a writable 'encoding' attribute on the built-in 'file' type in Python 2, despite making it writable in C with PyFile_SetEncoding. Pretend they did not do that. """ encoding = 'utf-8' self.assertEqual(ioType(VerySpecificLie(self.mktemp(), "wb")), basestring)
def test_2openBinaryMode(self): """ The normal 'open' builtin in Python 2 will always result in bytes I/O. """ with open(self.mktemp(), "w") as f: self.assertEqual(ioType(f), bytes)
def test_3openBinaryMode(self): """ A file opened via 'io.open' in binary mode accepts and returns bytes. """ with io.open(self.mktemp(), "wb") as f: self.assertEqual(ioType(f), bytes)
def test_3openTextMode(self): """ A file opened via 'io.open' in text mode accepts and returns text. """ with io.open(self.mktemp(), "w") as f: self.assertEqual(ioType(f), unicodeCompat)
def test_3BytesIO(self): """ An L{io.BytesIO} accepts and returns bytes. """ self.assertEqual(ioType(io.BytesIO()), bytes)
def test_3StringIO(self): """ An L{io.StringIO} accepts and returns text. """ self.assertEqual(ioType(io.StringIO()), unicodeCompat)
def test_defaultToText(self): """ When passed an object about which no sensible decision can be made, err on the side of unicode. """ self.assertEqual(ioType(object()), unicodeCompat)
def shellComplete(config, cmdName, words, shellCompFile): """ Perform shell completion. A completion function (shell script) is generated for the requested shell and written to C{shellCompFile}, typically C{stdout}. The result is then eval'd by the shell to produce the desired completions. @type config: L{twisted.python.usage.Options} @param config: The L{twisted.python.usage.Options} instance to generate completions for. @type cmdName: C{str} @param cmdName: The name of the command we're generating completions for. In the case of zsh, this is used to print an appropriate "#compdef $CMD" line at the top of the output. This is not necessary for the functionality of the system, but it helps in debugging, since the output we produce is properly formed and may be saved in a file and used as a stand-alone completion function. @type words: C{list} of C{str} @param words: The raw command-line words passed to use by the shell stub function. argv[0] has already been stripped off. @type shellCompFile: C{file} @param shellCompFile: The file to write completion data to. """ # If given a file with unicode semantics, such as sys.stdout on Python 3, # we must get at the the underlying buffer which has bytes semantics. if shellCompFile and ioType(shellCompFile) == str: shellCompFile = shellCompFile.buffer # shellName is provided for forward-compatibility. It is not used, # since we currently only support zsh. shellName, position = words[-1].split(":") position = int(position) # zsh gives the completion position ($CURRENT) as a 1-based index, # and argv[0] has already been stripped off, so we subtract 2 to # get the real 0-based index. position -= 2 cWord = words[position] # since the user may hit TAB at any time, we may have been called with an # incomplete command-line that would generate getopt errors if parsed # verbatim. However, we must do *some* parsing in order to determine if # there is a specific subcommand that we need to provide completion for. # So, to make the command-line more sane we work backwards from the # current completion position and strip off all words until we find one # that "looks" like a subcommand. It may in fact be the argument to a # normal command-line option, but that won't matter for our purposes. while position >= 1: if words[position - 1].startswith("-"): position -= 1 else: break words = words[:position] subCommands = getattr(config, "subCommands", None) if subCommands: # OK, this command supports sub-commands, so lets see if we have been # given one. # If the command-line arguments are not valid then we won't be able to # sanely detect the sub-command, so just generate completions as if no # sub-command was found. args = None try: opts, args = getopt.getopt(words, config.shortOpt, config.longOpt) except getopt.error: pass if args: # yes, we have a subcommand. Try to find it. for (cmd, short, parser, doc) in config.subCommands: if args[0] == cmd or args[0] == short: subOptions = parser() subOptions.parent = config gen = ZshSubcommandBuilder(subOptions, config, cmdName, shellCompFile) gen.write() return # sub-command not given, or did not match any knowns sub-command names genSubs = True if cWord.startswith("-"): # optimization: if the current word being completed starts # with a hyphen then it can't be a sub-command, so skip # the expensive generation of the sub-command list genSubs = False gen = ZshBuilder(config, cmdName, shellCompFile) gen.write(genSubs=genSubs) else: gen = ZshBuilder(config, cmdName, shellCompFile) gen.write()
def test_codecsOpenText(self): """ When passed an encoding, however, the L{codecs} module returns unicode. """ with codecs.open(self.mktemp(), "wb", encoding="utf-8") as f: self.assertEqual(ioType(f), str)