def test_json_encoded_argument_processing_string_input(self): # pylint: disable=invalid-name """Make sure that method json_encoded in src/params.py correctly: - Reads the .txt files - If input is not a file, reads and serializes the input as json - Returns correct error messages """ # -------------------------------------- # Pass in json as a string # -------------------------------------- str_io = StringIO() # str_io captures the output of a wrongly formatted json with redirect_stdout(str_io): try: json_encoded('') except Exception: # pylint: disable=broad-except pass printed_output = str_io.getvalue() self.assertIn('You can also pass the json argument in a .txt file', printed_output) self.assertIn('To do so, set argument value to the ' 'absolute path of the text file prefixed by "@".', printed_output) # str_io captures the output of a wrongly formatted json str_io = StringIO() with redirect_stdout(str_io): try: json_encoded('{3.14 : "pie"}') except Exception: # pylint: disable=broad-except pass printed_output = str_io.getvalue() self.assertIn('You can also pass the json argument in a .txt file', printed_output) self.assertIn('To do so, set argument value to the ' 'absolute path of the text file prefixed by "@".', printed_output) # Capture output with str_io even though it's not used in order to prevent test to writing # to output, in order to keep tests looking clean. # These tests ensure that incorrectly formatted json throws error. str_io = StringIO() with redirect_stdout(str_io): with self.assertRaises(ValueError): json_encoded('') with self.assertRaises(ValueError): json_encoded('{3.14 : "pie"}') # Test to ensure that correct json is serialized correctly. simple_dictionary = {'k': 23} self.assertEqual(simple_dictionary, json_encoded('{"k": 23}'))
def test_profile_parser(self): """ Verify that the function parse_profile produces the expected output. """ with contextlib.closing(StringIO()) as stream: with contextlib.redirect_stdout(stream): cProfile.run('print()') stream.seek(0) actual = list(parse_profile(stream)) expected = [[ 'ncalls', 'tottime', 'percall', 'cumtime', 'percall', 'filename:lineno(function)' ], [ '1', '0.000', '0.000', '0.000', '0.000', '<string>:1(<module>)' ], [ '1', '0.000', '0.000', '0.000', '0.000', '{built-in method builtins.exec}' ], [ '1', '0.000', '0.000', '0.000', '0.000', '{built-in method builtins.print}' ], [ '1', '0.000', '0.000', '0.000', '0.000', "{method 'disable' of '_lsprof.Profiler' objects}" ]] self.assertListEqual(actual, expected)
def __call__(self, command: str, echo: Optional[bool] = None) -> CommandResult: """ Provide functionality to call application commands by calling PyscriptBridge ex: app('help') :param command: command line being run :param echo: if True, output will be echoed to stdout/stderr while the command runs this temporarily overrides the value of self.cmd_echo """ if echo is None: echo = self.cmd_echo # This will be used to capture _cmd2_app.stdout and sys.stdout copy_cmd_stdout = StdSim(self._cmd2_app.stdout, echo) # This will be used to capture sys.stderr copy_stderr = StdSim(sys.stderr, echo) self._cmd2_app._last_result = None try: self._cmd2_app.stdout = copy_cmd_stdout with redirect_stdout(copy_cmd_stdout): with redirect_stderr(copy_stderr): # Include a newline in case it's a multiline command self._cmd2_app.onecmd_plus_hooks(command + '\n') finally: self._cmd2_app.stdout = copy_cmd_stdout.inner_stream # Save the output. If stderr is empty, set it to None. result = CommandResult(stdout=copy_cmd_stdout.getvalue(), stderr=copy_stderr.getvalue() if copy_stderr.getvalue() else None, data=self._cmd2_app._last_result) return result
def _exec_cmd(thgcmd_app, command: str, echo: bool) -> CommandResult: """ Helper to encapsulate executing a command and capturing the results :param thgcmd_app: thgcmd app that will run the command :param command: command line being run :param echo: if True, output will be echoed to stdout/stderr while the command runs :return: result of the command """ copy_stdout = StdSim(sys.stdout, echo) copy_stderr = StdSim(sys.stderr, echo) copy_cmd_stdout = StdSim(thgcmd_app.stdout, echo) thgcmd_app._last_result = None try: thgcmd_app.stdout = copy_cmd_stdout with redirect_stdout(copy_stdout): with redirect_stderr(copy_stderr): # Include a newline in case it's a multiline command thgcmd_app.onecmd_plus_hooks(command + "\n") finally: thgcmd_app.stdout = copy_cmd_stdout.inner_stream # if stderr is empty, set it to None stderr = copy_stderr.getvalue() if copy_stderr.getvalue() else None outbuf = (copy_cmd_stdout.getvalue() if copy_cmd_stdout.getvalue() else copy_stdout.getvalue()) result = CommandResult(stdout=outbuf, stderr=stderr, data=thgcmd_app._last_result) return result
def _exec_cmd(cmd2_app, func: Callable, echo: bool): """Helper to encapsulate executing a command and capturing the results""" copy_stdout = CopyStream(sys.stdout, echo) copy_stderr = CopyStream(sys.stderr, echo) copy_cmd_stdout = CopyStream(cmd2_app.stdout, echo) cmd2_app._last_result = None try: cmd2_app.stdout = copy_cmd_stdout with redirect_stdout(copy_stdout): with redirect_stderr(copy_stderr): func() finally: cmd2_app.stdout = copy_cmd_stdout.inner_stream # if stderr is empty, set it to None stderr = copy_stderr.buffer if copy_stderr.buffer else None outbuf = copy_cmd_stdout.buffer if copy_cmd_stdout.buffer else copy_stdout.buffer result = CommandResult(stdout=outbuf, stderr=stderr, data=cmd2_app._last_result) return result
def test_cli(self): f = StringIO() with redirect_stdout(f): try: cli.main(["ssh_config", "-v"]) except SystemExit: pass output = f.getvalue().strip() self.assertTrue(output.startswith("ssh_config"))
def _get_console_out(to_eval, namespace): fake_out, fake_err = StringIO(), StringIO() console = code.InteractiveConsole(locals=namespace) with redirect_stdout(fake_out), redirect_stderr(fake_err): for function in namespace.get("functions", []): for statement in function.split("\\n"): console.push(statement) for statement in to_eval.split("\n"): if statement: console.push(statement) else: console.push('\n') return fake_out.getvalue(), fake_err.getvalue()
def test_cli_help(self): """ The command line script is self-docummenting. """ stdout_file = six.StringIO() with self.assertRaises(SystemExit): with contextlib.redirect_stdout(stdout_file): pythonprojectstructure.main(args=["--help"]) stdout = stdout_file.getvalue() self.assertIn( pythonprojectstructure.__doc__.strip(), stdout, "The console script name missing from --help output", )
def test_read_sigproc(self): """Capture print output, assert it is a long string""" gulp_nframe = 101 stdout = io.BytesIO() with ExitStack() as stack: pipeline = stack.enter_context(bfp.Pipeline()) stack.enter_context(redirect_stdout(stdout)) rawdata = blocks.sigproc.read_sigproc([self.fil_file], gulp_nframe) print_header_block = blocks.print_header(rawdata) pipeline.run() print_header_dump = stdout.getvalue() self.assertGreater(len(print_header_dump), 10)
def test_cli_help(self): """ The command line script is self-docummenting. """ stdout_file = six.StringIO() with self.assertRaises(SystemExit): with contextlib.redirect_stdout(stdout_file): mainwrapper.main(args=["--help"]) stdout = stdout_file.getvalue() self.assertIn( mainwrapper.__doc__.strip().split("\n")[0][:55], stdout, "The console script name missing from --help output", )
def test_ls_with_pattern(self): expect = u"""\ Host HostName User Port IdentityFile ======================================================== server_cmd_1 203.0.113.76 None 2202 None server_cmd_2 203.0.113.76 user 22 None server_cmd_3 203.0.113.76 user 2202 None """ f = StringIO() with redirect_stdout(f): cli.main(["ssh_config", "-f", sample, "ls", "server_*"]) output = f.getvalue() self.maxDiff = None self.assertEqual(expect, output)
def test__parse_data__too_short_error(mocker): """ Test "_parse_data" method must exit with too short response error. """ out = StringIO() mocker.patch("sys.argv", ["check_hddtemp.py", "-s", "127.0.0.1", "-p", "7634"]) checker = CheckHDDTemp() with pytest.raises(SystemExit): with contextlib2.redirect_stdout(out): checker._parse_data(data="") assert "ERROR: Server response too short" in out.getvalue().strip( ) # nosec: B101
def test__parse_data__parsing_error(mocker): """ Test "_parse_data" method must exit with parsing error. """ out = StringIO() mocker.patch("sys.argv", ["check_hddtemp.py", "-s", "127.0.0.1", "-p", "7634"]) checker = CheckHDDTemp() with pytest.raises(SystemExit): with contextlib2.redirect_stdout(out): checker._parse_data(data="|/dev/sda|HARD DRIVE|C|") # noqa: E501 assert "ERROR: Server response for device" in out.getvalue().strip( ) # nosec: B101
def test__get_data__network_error(mocker): """ Test "_get_data" method must exit with network error. """ out = StringIO() mocker.patch("sys.argv", ["check_hddtemp.py", "-s", "127.0.0.1", "-p", "7634"]) mocker.patch("telnetlib.Telnet.read_all", side_effect=socket.error) mocker.patch("telnetlib.Telnet.read_all", side_effect=EOFError) checker = CheckHDDTemp() with pytest.raises(SystemExit): with contextlib2.redirect_stdout(out): checker._get_data() assert ( # nosec: B101 "ERROR: Server communication problem" in out.getvalue().strip())
def test_main(mocker): """ Test "main" function must print Nagios and human readable HDD's statuses. """ out = StringIO() expected = "OK: device /dev/sda is functional and stable 27C\n" # noqa: E501 mocker.patch("sys.argv", ["check_hddtemp.py", "-s", "127.0.0.1", "-p", "7634"]) mocker.patch("telnetlib.Telnet.open") mocker.patch( "telnetlib.Telnet.read_all", lambda data: b"|/dev/sda|HARD DRIVE|27|C|", ) with pytest.raises(SystemExit) as excinfo: with contextlib2.redirect_stdout(out): main() assert out.getvalue() == expected # nosec: B101 assert excinfo.value.args == (0, ) # nosec: B101
def test_profile_parser(self): """ Verify that the function parse_profile produces the expected output. """ with contextlib.closing(io.StringIO()) as stream: with contextlib.redirect_stdout(stream): cProfile.run('print()') stream.seek(0) actual = list(parse_profile(stream)) # Expected format for the profiling output on cPython implementations (PyPy differs) # actual = [ # ["ncalls", "tottime", "percall", "cumtime", "percall", "filename:lineno(function)"], # ["1", "0.000", "0.000", "0.000", "0.000", "<string>:1(<module>)"], # ["1", "0.000", "0.000", "0.000", "0.000", "{built-in method builtins.exec}"], # ["1", "0.000", "0.000", "0.000", "0.000", "{built-in method builtins.print}"], # ["1", "0.000", "0.000", "0.000", "0.000", "{method 'disable' of '_lsprof.Profiler' objects}"], # ] exc_header = [ "ncalls", "tottime", "percall", "cumtime", "percall", "filename:lineno(function)" ] self.assertEqual(actual[0], exc_header) exc_number = re.compile(r"\d(.\d+)?") exc_module = re.compile( r"({method.*})|({built-in.*})|(<.+>:\d+\(<.+>\))") exc_row = [ exc_number, exc_number, exc_number, exc_number, exc_number, exc_module ] for row in actual[1:]: for text, expected_regex in zip(row, exc_row): self.assertRegex( text, expected_regex, msg="Expected something like {} but found {}")
def test_main(mocker): """ Test "check" method must print Nagios and human readable statuses. :param mocker: mock :type mocker: MockerFixture """ out = StringIO() expected = "OK: 'example': OK" data = [{ "description": "pid 666, uptime 0 days, 0:00:00", "pid": 666, "stderr_logfile": "", "stop": 0, "logfile": "/var/log/example.log", "exitstatus": 0, "spawnerr": "", "now": 0, "group": "example", "name": "example", "statename": "RUNNING", "start": 0, "state": 20, "stdout_logfile": "/var/log/example.log", }] mocker.patch("sys.argv", ["check_supervisord.py", "-s", "127.0.0.1", "-p", "9001"]) mocker.patch( "{name}._Method.__call__".format(**{"name": xmlrpclib.__name__}), return_value=data, ) with pytest.raises(SystemExit) as excinfo: with contextlib2.redirect_stdout(out): main() assert out.getvalue().strip() == expected # nosec: B101 assert excinfo.value.args == (0, ) # nosec: B101
def _page_output(skip): if skip or not sys.stdout.isatty(): yield return terminal_settings = subprocess.check_output(['stty', '-g']).strip() try: pipe_rd, pipe_wr = os.pipe() pager = subprocess.Popen(['less', '-F', '-R', '-S', '-X', '-K'], stdin=os.fdopen(pipe_rd, 'r'), close_fds=True) pipe_output = os.fdopen(pipe_wr, 'a', 0) try: with contextlib.redirect_stdout(pipe_output): yield except KeyboardInterrupt: # let less handle this, -K will exit cleanly pass finally: pipe_output.close() pager.wait() finally: subprocess.check_call(['stty', terminal_settings])
def test_profile_parser(self): """ Verify that the function parse_profile produces the expected output. """ with contextlib.closing(StringIO()) as stream: with contextlib.redirect_stdout(stream): cProfile.run('print()') stream.seek(0) actual = list(parse_profile(stream)) if PY3: if sys.version_info < (3,5): expected = [ ['ncalls', 'tottime', 'percall', 'cumtime', 'percall', 'filename:lineno(function)'], ['1', '0.000', '0.000', '0.000', '0.000', '<string>:1(<module>)'], ['1', '0.000', '0.000', '0.000', '0.000', '{built-in method exec}'], ['1', '0.000', '0.000', '0.000', '0.000', '{built-in method print}'], ['1', '0.000', '0.000', '0.000', '0.000', "{method 'disable' of '_lsprof.Profiler' objects}"], ] else: expected = [ ['ncalls', 'tottime', 'percall', 'cumtime', 'percall', 'filename:lineno(function)'], ['1', '0.000', '0.000', '0.000', '0.000', '<string>:1(<module>)'], ['1', '0.000', '0.000', '0.000', '0.000', '{built-in method builtins.exec}'], ['1', '0.000', '0.000', '0.000', '0.000', '{built-in method builtins.print}'], ['1', '0.000', '0.000', '0.000', '0.000', "{method 'disable' of '_lsprof.Profiler' objects}"], ] else: expected = [ ['ncalls', 'tottime', 'percall', 'cumtime', 'percall', 'filename:lineno(function)'], ['1', '0.000', '0.000', '0.000', '0.000', '<string>:1(<module>)'], ['2', '0.000', '0.000', '0.000', '0.000', 'StringIO.py:208(write)'], ['2', '0.000', '0.000', '0.000', '0.000', 'StringIO.py:38(_complain_ifclosed)'], ['2', '0.000', '0.000', '0.000', '0.000', '{isinstance}'], ['2', '0.000', '0.000', '0.000', '0.000', '{len}'], ['2', '0.000', '0.000', '0.000', '0.000', "{method 'append' of 'list' objects}"], ['1', '0.000', '0.000', '0.000', '0.000', "{method 'disable' of '_lsprof.Profiler' objects}"] ] self.assertListEqual(actual, expected)
def run_cmd(app, cmd): """ Clear out and err StdSim buffers, run the command, and return out and err """ saved_sysout = sys.stdout sys.stdout = app.stdout # This will be used to capture app.stdout and sys.stdout copy_cmd_stdout = StdSim(app.stdout) # This will be used to capture sys.stderr copy_stderr = StdSim(sys.stderr) try: app.stdout = copy_cmd_stdout with redirect_stdout(copy_cmd_stdout): with redirect_stderr(copy_stderr): app.onecmd_plus_hooks(cmd) finally: app.stdout = copy_cmd_stdout.inner_stream sys.stdout = saved_sysout out = copy_cmd_stdout.getvalue() err = copy_stderr.getvalue() return normalize(out), normalize(err)
def test__get_data__network_error(mocker): """ Test "_get_data" method must exit with server error. :param mocker: mock :type mocker: MockerFixture """ out = StringIO() mocker.patch("sys.argv", ["check_supervisord.py", "-s", "127.0.0.1", "-p", "9001"]) mocker.patch( "{name}._Method.__call__".format(**{"name": xmlrpclib.__name__}), side_effect=OSError, ) checker = CheckSupervisord() with pytest.raises(SystemExit) as excinfo: with contextlib2.redirect_stdout(out): checker._get_data() assert ( # nosec: B101 "ERROR: Server communication problem" in out.getvalue().strip()) assert excinfo.value.args == (3, ) # nosec: B101
def test_json_encoded_argument_processing_file_input(self): # pylint: disable=invalid-name """Make sure that method json_encoded in src/params.py correctly: - Reads the .txt files - If input is not a file, reads and serializes the input as json - Returns correct error messages """ # -------------------------------------- # Pass in json as a file # -------------------------------------- # Create object that contains the correct object that should be loaded from reading the file pets_dictionary = dict() pets_dictionary['Coco'] = 'Golden Retriever' pets_dictionary['Lily'] = 'Ragdoll Cat' pets_dictionary['Poofy'] = 'Golden Doodle' dictionary = dict() dictionary['name'] = 'John' dictionary['last_name'] = 'Smith' dictionary['pets'] = pets_dictionary # Test .txt files containing json live in the same folder as this file. # Get their full paths. file_path_correct_json = \ '@' + path.join(path.dirname(__file__), 'sample_json', 'correct_json.txt') file_path_incorrect_json =\ '@' + path.join(path.dirname(__file__), 'sample_json', 'incorrect_json.txt') file_path_empty_file = \ '@' + path.join(path.dirname(__file__), 'sample_json', 'empty_file.txt') file_path_nonexistent = \ '@' + path.join(path.dirname(__file__), 'sample_json', 'non_existent.txt') # Use str_io capture here to avoid the printed clutter when running the tests. # Using ValueError instead of json.decoder.JSONDecodeError because that is not # supported in python 2.7. # Test that incorrect or empty file paths return error. str_io = StringIO() with redirect_stdout(str_io): with self.assertRaises(ValueError): json_encoded(file_path_empty_file) with self.assertRaises(ValueError): json_encoded(file_path_incorrect_json) # Test that correct file path returns correct serialized object self.assertEqual(dictionary, json_encoded(file_path_correct_json)) # Test that appropriate error messages are printed out on error str_io = StringIO() with redirect_stdout(str_io): try: json_encoded(file_path_empty_file) except Exception: # pylint: disable=broad-except pass printed_output = str_io.getvalue() self.assertIn( 'Decoding JSON value from file {0} failed'.format(file_path_empty_file.lstrip('@')), printed_output) self.assertTrue('Expecting value: line 1 column 1 (char 0)' in printed_output or 'No JSON object could be decoded' in printed_output) self.assertNotIn('You can also pass the json argument in a .txt file', printed_output) str_io = StringIO() with redirect_stdout(str_io): try: json_encoded(file_path_incorrect_json) except Exception: # pylint: disable=broad-except pass printed_output = str_io.getvalue() self.assertIn( 'Decoding JSON value from file {0} failed'.format(file_path_incorrect_json.lstrip('@')), printed_output) self.assertTrue('Expecting property name enclosed in double quotes: line 1 column 2 (char 1)' in printed_output # pylint: disable=line-too-long or 'Expecting property name: line 1 column 2 (char 1)' in printed_output) self.assertNotIn('You can also pass the json argument in a .txt file', printed_output) str_io = StringIO() with redirect_stdout(str_io): try: json_encoded(file_path_nonexistent) except Exception: # pylint: disable=broad-except pass printed_output = str_io.getvalue() self.assertIn( 'File not found at {0}'.format(file_path_nonexistent.lstrip('@')), printed_output)
def html_visit_altair_plot(self, node): # Execute the code, saving output and namespace namespace = node['namespace'] try: f = io.StringIO() with contextlib2.redirect_stdout(f): chart = eval_block(node['code'], namespace) stdout = f.getvalue() except Exception as e: warnings.warn("altair-plot: {}:{} Code Execution failed:" "{}: {}".format(node['rst_source'], node['rst_lineno'], e.__class__.__name__, str(e))) raise nodes.SkipNode chart_name = node['chart-var-name'] if chart_name is not None: if chart_name not in namespace: raise ValueError("chart-var-name='{}' not present in namespace" "".format(chart_name)) chart = namespace[chart_name] output = node['output'] if output == 'none': raise nodes.SkipNode elif output == 'stdout': if not stdout: raise nodes.SkipNode else: output_literal = nodes.literal_block(stdout, stdout) output_literal['language'] = 'none' node.extend([output_literal]) elif output == 'repr': if chart is None: raise nodes.SkipNode else: rep = ' ' + repr(chart).replace('\n', '\n ') repr_literal = nodes.literal_block(rep, rep) repr_literal['language'] = 'none' node.extend([repr_literal]) elif output == 'plot': if isinstance(chart, alt.TopLevelMixin): # Last line should be a chart; convert to spec dict spec = chart.to_dict() actions = node['links'] # TODO: add an option to save spects to file & load from there. # TODO: add renderer option # Write spec to a *.vl.json file # dest_dir = os.path.join(self.builder.outdir, node['relpath']) # if not os.path.exists(dest_dir): # os.makedirs(dest_dir) # filename = "{0}.vl.json".format(node['target_id']) # dest_path = os.path.join(dest_dir, filename) # with open(dest_path, 'w') as f: # json.dump(spec, f) # Pass relevant info into the template and append to the output html = VGL_TEMPLATE.render(div_id=node['div_id'], spec=json.dumps(spec), mode='vega-lite', renderer='canvas', actions=json.dumps(actions)) self.body.append(html) else: warnings.warn( 'altair-plot: {}:{} Malformed block. Last line of ' 'code block should define a valid altair Chart object.' ''.format(node['rst_source'], node['rst_lineno'])) raise nodes.SkipNode
pssepath.add_pssepath() # import all psspy functions from psspy import * # redirect PSS/E alerts/messages to python command line import redirect redirect.psse2py() # declare 'with mute:' block to silent PSS/E alerts import os from contextlib2 import redirect_stdout mute = redirect_stdout(open(os.devnull, 'w')) class PssError(BaseException): pass ############################################################################# # Bus class Bus: def __init__(self, id): if id is int: if busexs(ibus) <> 0: raise PssError('Bus number {} not found'.format(id))
def test_profile_parser(self): """ Verify that the function parse_profile produces the expected output. """ with contextlib.closing(StringIO()) as stream: with contextlib.redirect_stdout(stream): cProfile.run('print()') stream.seek(0) actual = list(parse_profile(stream)) if PY3: if sys.version_info < (3, 5): expected = [ [ 'ncalls', 'tottime', 'percall', 'cumtime', 'percall', 'filename:lineno(function)' ], [ '1', '0.000', '0.000', '0.000', '0.000', '<string>:1(<module>)' ], [ '1', '0.000', '0.000', '0.000', '0.000', '{built-in method exec}' ], [ '1', '0.000', '0.000', '0.000', '0.000', '{built-in method print}' ], [ '1', '0.000', '0.000', '0.000', '0.000', "{method 'disable' of '_lsprof.Profiler' objects}" ], ] else: expected = [ [ 'ncalls', 'tottime', 'percall', 'cumtime', 'percall', 'filename:lineno(function)' ], [ '1', '0.000', '0.000', '0.000', '0.000', '<string>:1(<module>)' ], [ '1', '0.000', '0.000', '0.000', '0.000', '{built-in method builtins.exec}' ], [ '1', '0.000', '0.000', '0.000', '0.000', '{built-in method builtins.print}' ], [ '1', '0.000', '0.000', '0.000', '0.000', "{method 'disable' of '_lsprof.Profiler' objects}" ], ] else: expected = [ [ 'ncalls', 'tottime', 'percall', 'cumtime', 'percall', 'filename:lineno(function)' ], [ '1', '0.000', '0.000', '0.000', '0.000', '<string>:1(<module>)' ], [ '2', '0.000', '0.000', '0.000', '0.000', 'StringIO.py:208(write)' ], [ '2', '0.000', '0.000', '0.000', '0.000', 'StringIO.py:38(_complain_ifclosed)' ], ['2', '0.000', '0.000', '0.000', '0.000', '{isinstance}'], ['2', '0.000', '0.000', '0.000', '0.000', '{len}'], [ '2', '0.000', '0.000', '0.000', '0.000', "{method 'append' of 'list' objects}" ], [ '1', '0.000', '0.000', '0.000', '0.000', "{method 'disable' of '_lsprof.Profiler' objects}" ] ] self.assertListEqual(actual, expected)
def captured_stdout(): stdout = StringIO() with contextlib.redirect_stdout(stdout): yield stdout