def test_log_output_with_filter(capfd, tmpdir): with tmpdir.as_cwd(): with log_output('foo.txt', filter_fn=_log_filter_fn): print('foo blah') print('blah foo') print('foo foo') # foo.txt output is not filtered with open('foo.txt') as f: assert f.read() == 'foo blah\nblah foo\nfoo foo\n' # output is not echoed assert capfd.readouterr()[0] == '' # now try with echo with tmpdir.as_cwd(): with log_output('foo.txt', echo=True, filter_fn=_log_filter_fn): print('foo blah') print('blah foo') print('foo foo') # foo.txt output is still not filtered with open('foo.txt') as f: assert f.read() == 'foo blah\nblah foo\nfoo foo\n' # echoed output is filtered. assert capfd.readouterr()[0] == 'bar blah\nblah bar\nbar bar\n'
def synchronized_logger(**kwargs): """Mock logger (minion) process for testing log.keyboard_input. This logger synchronizes with the parent process to test that 'v' can toggle output. It is used in ``test_foreground_background_output`` below. """ def handler(signum, frame): running[0] = False signal.signal(signal.SIGUSR1, handler) log_path = kwargs["log_path"] write_lock = kwargs["write_lock"] v_lock = kwargs["v_lock"] running = [True] sys.stderr.write(os.getcwd() + "\n") with log_output(log_path) as logger: with logger.force_echo(): print("forced output") while running[0]: with write_lock: if v_lock.acquire(False): # non-blocking acquire print("off") v_lock.release() else: print("on") # lock held; v is toggled on time.sleep(1e-2)
def test_log_python_output_with_fd_stream(capfd, tmpdir): with log_output('foo.txt'): print('logged') with open('foo.txt') as f: assert f.read() == 'logged\n' assert capfd.readouterr() == ('', '')
def test_log_python_output_with_fd_stream(capfd, tmpdir): with tmpdir.as_cwd(): with log_output('foo.txt'): print('logged') with open('foo.txt') as f: assert f.read() == 'logged\n' assert capfd.readouterr() == ('', '')
def test_log_python_output_and_echo_output(capfd, tmpdir): with log_output('foo.txt') as logger: with logger.force_echo(): print('echo') print('logged') assert capfd.readouterr() == ('echo\n', '') with open('foo.txt') as f: assert f.read() == 'echo\nlogged\n'
def test_log_python_output_with_fd_stream(capfd, tmpdir): with tmpdir.as_cwd(): with log_output('foo.txt'): print('logged') with open('foo.txt') as f: assert f.read() == 'logged\n' # Coverage is cluttering stderr during tests assert capfd.readouterr()[0] == ''
def test_log_python_output_with_echo(capfd, tmpdir): with tmpdir.as_cwd(): with log_output('foo.txt', echo=True): print('logged') # foo.txt has output with open('foo.txt') as f: assert f.read() == 'logged\n' # output is also echoed. assert capfd.readouterr()[0] == 'logged\n'
def test_log_python_output_without_echo(capfd, tmpdir): with tmpdir.as_cwd(): with log_output('foo.txt'): print('logged') # foo.txt has output with open('foo.txt') as f: assert f.read() == 'logged\n' # nothing on stdout or stderr assert capfd.readouterr()[0] == ''
def test_log_python_output_and_echo_output(capfd, tmpdir): with tmpdir.as_cwd(): with log_output('foo.txt') as logger: with logger.force_echo(): print('echo') print('logged') assert capfd.readouterr() == ('echo\n', '') with open('foo.txt') as f: assert f.read() == 'echo\nlogged\n'
def test_log_python_output_with_python_stream(capsys, tmpdir): # pytest's DontReadFromInput object does not like what we do here, so # disable capsys or things hang. with capsys.disabled(): with log_output('foo.txt'): print('logged') with open('foo.txt') as f: assert f.read() == 'logged\n' assert capsys.readouterr() == ('', '')
def __call__(self, *argv, **kwargs): """Invoke this SpackCommand. Args: argv (list): command line arguments. Keyword Args: fail_on_error (optional bool): Don't raise an exception on error global_args (optional list): List of global spack arguments: simulates ``spack [global_args] [command] [*argv]`` Returns: (str): combined output and error as a string On return, if ``fail_on_error`` is False, return value of command is set in ``returncode`` property, and the error is set in the ``error`` property. Otherwise, raise an error. """ # set these before every call to clear them out self.returncode = None self.error = None prepend = kwargs['global_args'] if 'global_args' in kwargs else [] args, unknown = self.parser.parse_known_args(prepend + [self.command_name] + list(argv)) fail_on_error = kwargs.get('fail_on_error', True) out = StringIO() try: with log_output(out): self.returncode = _invoke_command(self.command, self.parser, args, unknown) except SystemExit as e: self.returncode = e.code except BaseException as e: tty.debug(e) self.error = e if fail_on_error: self._log_command_output(out) raise if fail_on_error and self.returncode not in (None, 0): self._log_command_output(out) raise SpackCommandError( "Command exited with code %d: %s(%s)" % (self.returncode, self.command_name, ', '.join("'%s'" % a for a in argv))) return out.getvalue()
def simple_logger(**kwargs): """Mock logger (minion) process for testing log.keyboard_input.""" def handler(signum, frame): running[0] = False signal.signal(signal.SIGUSR1, handler) log_path = kwargs["log_path"] running = [True] with log_output(log_path): while running[0]: print("line") time.sleep(1e-3)
def test_log_python_output_and_echo_output(capfd, tmpdir): with tmpdir.as_cwd(): with log_output('foo.txt') as logger: with logger.force_echo(): print('echo') print('logged') # Coverage is cluttering stderr during tests assert capfd.readouterr()[0] == 'echo\n' with open('foo.txt') as f: assert f.read() == 'echo\nlogged\n'
def test_log_python_output_with_python_stream(capsys, tmpdir): # pytest's DontReadFromInput object does not like what we do here, so # disable capsys or things hang. with tmpdir.as_cwd(): with capsys.disabled(): with log_output('foo.txt'): print('logged') with open('foo.txt') as f: assert f.read() == 'logged\n' assert capsys.readouterr() == ('', '')
def test_log_subproc_and_echo_output(capfd, tmpdir): echo = which('echo') with log_output('foo.txt') as logger: with logger.force_echo(): echo('echo') print('logged') assert capfd.readouterr() == ('echo\n', '') with open('foo.txt') as f: assert f.read() == 'logged\n'
def test_log_subproc_output(capsys, tmpdir): echo = which('echo') # pytest seems to interfere here, so we need to use capsys.disabled() # TODO: figure out why this is and whether it means we're doing # sometihng wrong with OUR redirects. Seems like it should work even # with capsys enabled. with capsys.disabled(): with log_output('foo.txt'): echo('logged') with open('foo.txt') as f: assert f.read() == 'logged\n'
def test_log_subproc_and_echo_output_capfd(capfd, tmpdir): echo = which('echo') # This tests *only* what is echoed when using a subprocess, as capfd # interferes with the logged data. See # test_log_subproc_and_echo_output_no_capfd for tests on the logfile. with tmpdir.as_cwd(): with log_output('foo.txt') as logger: with logger.force_echo(): echo('echo') print('logged') assert capfd.readouterr()[0] == "echo\n"
def test_log_python_output_and_echo_output(capfd, tmpdir): with tmpdir.as_cwd(): # echo two lines with log_output('foo.txt') as logger: with logger.force_echo(): print('force echo') print('logged') # log file contains everything with open('foo.txt') as f: assert f.read() == 'force echo\nlogged\n' # only force-echo'd stuff is in output assert capfd.readouterr()[0] == 'force echo\n'
def test_log_subproc_output(capsys, tmpdir): echo = which('echo') # pytest seems to interfere here, so we need to use capsys.disabled() # TODO: figure out why this is and whether it means we're doing # sometihng wrong with OUR redirects. Seems like it should work even # with capsys enabled. with tmpdir.as_cwd(): with capsys.disabled(): with log_output('foo.txt'): echo('logged') with open('foo.txt') as f: assert f.read() == 'logged\n'
def test_log_subproc_and_echo_output_no_capfd(capfd, tmpdir): echo = which('echo') # this is split into two tests because capfd interferes with the # output logged to file when using a subprocess. We test the file # here, and echoing in test_log_subproc_and_echo_output_capfd below. with capfd.disabled(): with tmpdir.as_cwd(): with log_output('foo.txt') as logger: with logger.force_echo(): echo('echo') print('logged') with open('foo.txt') as f: assert f.read() == 'echo\nlogged\n'
def test_log_python_output_with_invalid_utf8(capfd, tmpdir): with tmpdir.as_cwd(): with log_output('foo.txt'): sys.stdout.buffer.write(b'\xc3\x28\n') # python2 and 3 treat invalid UTF-8 differently if sys.version_info.major == 2: expected = b'\xc3(\n' else: expected = b'<line lost: output was not encoded as UTF-8>\n' with open('foo.txt', 'rb') as f: written = f.read() assert written == expected # nothing on stdout or stderr assert capfd.readouterr()[0] == ''
def __call__(self, *argv, **kwargs): """Invoke this PymodCommand. Args: argv (list of str): command line arguments. Keyword Args: fail_on_error (optional bool): Don't raise an exception on error Returns: (str): combined output and error as a string On return, if ``fail_on_error`` is False, return value of command is set in ``returncode`` property, and the error is set in the ``error`` property. Otherwise, raise an error. """ # set these before every call to clear them out self.returncode = None self.error = None args, unknown = self.parser.parse_known_args([self.command_name] + list(argv)) fail_on_error = kwargs.get("fail_on_error", True) out = StringIO() try: with log_output(out): self.returncode = _invoke_command(self.command, self.parser, args, unknown) except SystemExit as e: self.returncode = e.code except BaseException as e: self.error = e if fail_on_error: raise if fail_on_error and self.returncode not in (None, 0): raise PymodCommandError("Command exited with code %d: %s(%s)" % ( self.returncode, self.command_name, ", ".join("'%s'" % a for a in argv), )) return out.getvalue()
def __call__(self, *argv, **kwargs): """Invoke this SpackCommand. Args: argv (list of str): command line arguments. Keyword Args: fail_on_error (optional bool): Don't raise an exception on error Returns: (str): combined output and error as a string On return, if ``fail_on_error`` is False, return value of command is set in ``returncode`` property, and the error is set in the ``error`` property. Otherwise, raise an error. """ # set these before every call to clear them out self.returncode = None self.error = None args, unknown = self.parser.parse_known_args( [self.command_name] + list(argv)) fail_on_error = kwargs.get('fail_on_error', True) out = StringIO() try: with log_output(out): self.returncode = _invoke_command( self.command, self.parser, args, unknown) except SystemExit as e: self.returncode = e.code except BaseException as e: self.error = e if fail_on_error: raise if fail_on_error and self.returncode not in (None, 0): raise SpackCommandError( "Command exited with code %d: %s(%s)" % ( self.returncode, self.command_name, ', '.join("'%s'" % a for a in argv))) return out.getvalue()