def test_molecule_get_structure(self): name = 'water' with App(argv=['molecule', 'get-structure', '--by-name', name]) as app: with CaptureOutput(termination_delay=0.1) as capturer: app.run() self.assertEqual( re.split(' +', capturer.get_text().split('\n')[-1]), ['water', 'pubchem.compound', '962', 'InChI=1S/H2O/h1H2']) namespace = 'chebi' id = '15377' with App(argv=[ 'molecule', 'get-structure', '--by-id', '--namespace', namespace, id ]) as app: with CaptureOutput(termination_delay=0.1) as capturer: app.run() self.assertEqual( re.split(' +', capturer.get_text().split('\n')[-1]), ['', 'chebi', '15377', 'InChI=1S/H2O/h1H2']) namespace = 'chebi' id = '0' with App(argv=[ 'molecule', 'get-structure', '--by-id', '--namespace', namespace, id ]) as app: with CaptureOutput(merged=False) as capturer: app.run() self.assertEqual(capturer.stderr.get_text(), 'Unable to find structure')
def test_prompt_for_confirmation(self): """Test :func:`humanfriendly.prompts.prompt_for_confirmation()`.""" # Test some (more or less) reasonable replies that indicate agreement. for reply in 'yes', 'Yes', 'YES', 'y', 'Y': with PatchedAttribute(prompts, 'interactive_prompt', lambda p: reply): assert prompt_for_confirmation("Are you sure?") is True # Test some (more or less) reasonable replies that indicate disagreement. for reply in 'no', 'No', 'NO', 'n', 'N': with PatchedAttribute(prompts, 'interactive_prompt', lambda p: reply): assert prompt_for_confirmation("Are you sure?") is False # Test that empty replies select the default choice. for default_choice in True, False: with PatchedAttribute(prompts, 'interactive_prompt', lambda p: ''): assert prompt_for_confirmation("Are you sure?", default=default_choice) is default_choice # Test that a warning is shown when no input nor a default is given. replies = ['', 'y'] with PatchedAttribute(prompts, 'interactive_prompt', lambda p: replies.pop(0)): with CaptureOutput() as capturer: assert prompt_for_confirmation("Are you sure?") is True assert "there's no default choice" in capturer.get_text() # Test that the default reply is shown in uppercase. with PatchedAttribute(prompts, 'interactive_prompt', lambda p: 'y'): for default_value, expected_text in (True, 'Y/n'), (False, 'y/N'), (None, 'y/n'): with CaptureOutput() as capturer: assert prompt_for_confirmation("Are you sure?", default=default_value) is True assert expected_text in capturer.get_text() # Test that interactive prompts eventually give up on invalid replies. with PatchedAttribute(prompts, 'interactive_prompt', lambda p: ''): self.assertRaises(TooManyInvalidReplies, prompt_for_confirmation, "Are you sure?")
def test_difference(self): now = datetime.datetime.now().replace(microsecond=0) model1 = Model(id='model', name='test model', version='0.0.1a', wc_lang_version='0.0.0', created=now, updated=now) filename1 = path.join(self.tempdir, 'model1.xlsx') Writer().run(filename1, model1, data_repo_metadata=False) model2 = Model(id='model', name='test model', version='0.0.1a', wc_lang_version='0.0.0', created=now, updated=now) filename2 = path.join(self.tempdir, 'model2.xlsx') Writer().run(filename2, model2, data_repo_metadata=False) model3 = Model(id='model', name='test model', version='0.0.1a', wc_lang_version='0.0.1', created=now, updated=now) filename3 = path.join(self.tempdir, 'model3.xlsx') Writer().run(filename3, model3, data_repo_metadata=False) with CaptureOutput(relay=False) as capturer: with __main__.App( argv=['difference', filename1, filename2]) as app: app.run() self.assertEqual(capturer.get_text(), 'Models are identical') with CaptureOutput(relay=False) as capturer: with __main__.App(argv=[ 'difference', filename1, filename2, '--compare-files' ]) as app: app.run() self.assertEqual(capturer.get_text(), 'Models are identical') with CaptureOutput(relay=False) as capturer: with __main__.App( argv=['difference', filename1, filename3]) as app: app.run() diff = ( 'Objects (Model: "model", Model: "model") have different attribute values:\n ' '`wc_lang_version` are not equal:\n 0.0.0 != 0.0.1') self.assertEqual(capturer.get_text(), diff) with CaptureOutput(relay=False) as capturer: with __main__.App(argv=[ 'difference', filename1, filename3, '--compare-files' ]) as app: app.run() diff = 'Sheet !!Model:\n Row 7:\n Cell B: 0.0.0 != 0.0.1' self.assertEqual(capturer.get_text(), diff)
def __init__(self): """Initialize output capturing when running under ``cron`` with the correct configuration.""" self.is_enabled = 'text/html' in os.environ.get('CONTENT_TYPE', 'text/plain') self.is_silent = False if self.is_enabled: # We import capturer here so that the colouredlogs[cron] extra # isn't required to use the other functions in this module. from capturer import CaptureOutput self.capturer = CaptureOutput(merged=True, relay=False)
class ColoredCronMailer(object): """ Easy to use integration between :mod:`coloredlogs` and the UNIX ``cron`` daemon. By using :class:`ColoredCronMailer` as a context manager in the command line interface of your Python program you make it trivially easy for users of your program to opt in to HTML output under ``cron``: The only thing the user needs to do is set ``CONTENT_TYPE="text/html"`` in their crontab! Under the hood this requires quite a bit of magic and I must admit that I developed this code simply because I was curious whether it could even be done :-). It requires my :mod:`capturer` package which you can install using ``pip install 'coloredlogs[cron]'``. The ``[cron]`` extra will pull in the :mod:`capturer` 2.4 or newer which is required to capture the output while silencing it - otherwise you'd get duplicate output in the emails sent by ``cron``. """ def __init__(self): """Initialize output capturing when running under ``cron`` with the correct configuration.""" self.is_enabled = 'text/html' in os.environ.get('CONTENT_TYPE', 'text/plain') self.is_silent = False if self.is_enabled: # We import capturer here so that the coloredlogs[cron] extra # isn't required to use the other functions in this module. from capturer import CaptureOutput self.capturer = CaptureOutput(merged=True, relay=False) def __enter__(self): """Start capturing output (when applicable).""" if self.is_enabled: self.capturer.__enter__() return self def __exit__(self, exc_type=None, exc_value=None, traceback=None): """Stop capturing output and convert the output to HTML (when applicable).""" if self.is_enabled: if not self.is_silent: # Only call output() when we captured something useful. text = self.capturer.get_text() if text and not text.isspace(): output(convert(text)) self.capturer.__exit__(exc_type, exc_value, traceback) def silence(self): """ Tell :func:`__exit__()` to swallow all output (things will be silent). This can be useful when a Python program is written in such a way that it has already produced output by the time it becomes apparent that nothing useful can be done (say in a cron job that runs every few minutes :-p). By calling :func:`silence()` the output can be swallowed retroactively, avoiding useless emails from ``cron``. """ self.is_silent = True
class ColouredCronMailer(object): """ Easy to use integration between :mod:`colouredlogs` and the UNIX ``cron`` daemon. By using :class:`ColouredCronMailer` as a context manager in the command line interface of your Python program you make it trivially easy for users of your program to opt in to HTML output under ``cron``: The only thing the user needs to do is set ``CONTENT_TYPE="text/html"`` in their crontab! Under the hood this requires quite a bit of magic and I must admit that I developed this code simply because I was curious whether it could even be done :-). It requires my :mod:`capturer` package which you can install using ``pip install 'colouredlogs[cron]'``. The ``[cron]`` extra will pull in the :mod:`capturer` 2.4 or newer which is required to capture the output while silencing it - otherwise you'd get duplicate output in the emails sent by ``cron``. """ def __init__(self): """Initialize output capturing when running under ``cron`` with the correct configuration.""" self.is_enabled = 'text/html' in os.environ.get('CONTENT_TYPE', 'text/plain') self.is_silent = False if self.is_enabled: # We import capturer here so that the colouredlogs[cron] extra # isn't required to use the other functions in this module. from capturer import CaptureOutput self.capturer = CaptureOutput(merged=True, relay=False) def __enter__(self): """Start capturing output (when applicable).""" if self.is_enabled: self.capturer.__enter__() return self def __exit__(self, exc_type=None, exc_value=None, traceback=None): """Stop capturing output and convert the output to HTML (when applicable).""" if self.is_enabled: if not self.is_silent: # Only call output() when we captured something useful. text = self.capturer.get_text() if text and not text.isspace(): output(convert(text)) self.capturer.__exit__(exc_type, exc_value, traceback) def silence(self): """ Tell :func:`__exit__()` to swallow all output (things will be silent). This can be useful when a Python program is written in such a way that it has already produced output by the time it becomes apparent that nothing useful can be done (say in a cron job that runs every few minutes :-p). By calling :func:`silence()` the output can be swallowed retroactively, avoiding useless emails from ``cron``. """ self.is_silent = True
def test_raw_cli(self): with mock.patch('sys.argv', ['wc-lang', '--help']): with CaptureOutput(relay=False): with self.assertRaises(SystemExit) as context: __main__.main() self.assertRegex(context.Exception, 'usage: wc-lang') with mock.patch('sys.argv', ['wc-lang']): with CaptureOutput(relay=False) as capturer: __main__.main() self.assertRegex(capturer.get_text(), 'usage: wc-lang')
def test_fast_log(self): with CaptureOutput(relay=True) as capturer: fast_logger = FastLogger(self.fixture_logger, 'info') fast_logger.fast_log('msg') self.assertFalse(capturer.get_text()) with CaptureOutput(relay=False) as capturer: fast_logger = FastLogger(self.fixture_logger, self.fixture_level.name) message = 'hi mom' fast_logger.fast_log(message) self.assertTrue(capturer.get_text().endswith(message))
def test_get_version(self): with CaptureOutput() as capturer: with __main__.App(argv=['-v']) as app: with self.assertRaises(SystemExit): app.run() self.assertEqual(capturer.get_text(), wc_kb.__version__) with CaptureOutput() as capturer: with __main__.App(argv=['--version']) as app: with self.assertRaises(SystemExit): app.run() self.assertEqual(capturer.get_text(), wc_kb.__version__)
def test_error_handling(self): # Nested CaptureOutput.start_capture() calls should raise an exception. capturer = CaptureOutput() capturer.start_capture() try: self.assertRaises(TypeError, capturer.start_capture) finally: # Make sure not to start swallowing output here ;-). capturer.finish_capture() # Nested Stream.redirect() calls should raise an exception. stream = Stream(sys.stdout.fileno()) stream.redirect(sys.stderr.fileno()) self.assertRaises(TypeError, stream.redirect, sys.stderr.fileno())
def test_get_version(self): with CaptureOutput() as capture_output: with App(argv=['-v']) as app: with self.assertRaises(SystemExit): app.run() self.assertEqual(capture_output.get_text(), datanator.__version__) with CaptureOutput() as capture_output: with App(argv=['--version']) as app: with self.assertRaises(SystemExit): app.run() self.assertEqual(capture_output.get_text(), datanator.__version__)
def test_taxonomy_get_common_ancestor(self): with App(argv=[ 'taxonomy', 'get-common-ancestor', 'Mycoplasma genitalium', 'Mycoplasma pneumoniae' ]) as app: with CaptureOutput(termination_delay=0.1) as capturer: app.run() self.assertEqual(capturer.get_text(), 'Mycoplasma') with App(argv=[ 'taxonomy', 'get-common-ancestor', 'Mycoplasma genitalium', 'XXX' ]) as app: with CaptureOutput(termination_delay=0.1) as capturer: self.assertRaises(SystemExit, lambda: app.run())
def test_taxonomy_get_rank(self): with App(argv=['taxonomy', 'get-rank', '2097']) as app: with CaptureOutput(termination_delay=0.1) as capturer: app.run() self.assertEqual(capturer.get_text(), "species") with App( argv=['taxonomy', 'get-rank', 'Mycoplasma genitalium']) as app: with CaptureOutput(termination_delay=0.1) as capturer: app.run() self.assertEqual(capturer.get_text(), "species") with App(argv=['taxonomy', 'get-rank', 'Mycoplasma genitalium XXX' ]) as app: self.assertRaises(SystemExit, lambda: app.run())
def test_progress_bar(self): simulator = SimulationEngine() simulator.add_object(PeriodicSimulationObject('name', 1)) simulator.initialize() print('\nTesting progress bar:', file=sys.stderr) sys.stderr.flush() with CaptureOutput(relay=True) as capturer: try: time_max = 10 config_dict = dict(time_max=time_max, progress=True) self.assertEqual( simulator.simulate(config_dict=config_dict).num_events, time_max + 1) self.assertTrue(f"/{time_max}" in capturer.get_text()) self.assertTrue("time_max" in capturer.get_text()) except ValueError as e: if str(e) == 'I/O operation on closed file': pass # This ValueError is raised because progressbar expects sys.stderr to remain open # for an extended time period but karr_lab_build_utils run-tests has closed it. # Since SimulationProgressBar works and passes tests under naked pytest, and # progressbar does not want to address the conflict over sys.stderr # (see https://github.com/WoLpH/python-progressbar/issues/202) we let this # test fail under karr_lab_build_utils. else: self.fail('test_progress failed for unknown reason')
def test_download_uniprot(self): with App(argv=['download', '--path=' + self.dirname, 'uniprot']) as app: with CaptureOutput(termination_delay=0.1) as capturer: app.run() self.assertTrue( os.path.exists(self.dirname + '/Uniprot.sqlite'))
def test_download_array_express(self): with App(argv=['download', '--path=' + self.dirname, 'array-express']) as app: with CaptureOutput(termination_delay=0.1) as capturer: app.run() self.assertTrue( os.path.exists(self.dirname + '/ArrayExpress.sqlite'))
def test_profiling(self): existing_levels = self.suspend_logging(self.log_names) simulation_engine = SimulationEngine() num_sim_objs = 20 self.prep_simulation(simulation_engine, num_sim_objs) end_sim_time = 200 expected_text = [ 'function calls', 'Ordered by: internal time', 'filename:lineno(function)' ] sim_config_dict = dict(time_max=end_sim_time, output_dir=self.out_dir, profile=True) stats = simulation_engine.simulate( config_dict=sim_config_dict).profile_stats self.assertTrue(isinstance(stats, pstats.Stats)) measurements = ''.join( open(self.measurements_pathname, 'r').readlines()) for text in expected_text: self.assertIn(text, measurements) sim_config_dict = dict(time_max=end_sim_time, profile=True) with CaptureOutput(relay=False) as capturer: stats = simulation_engine.simulate( config_dict=sim_config_dict).profile_stats for text in expected_text: self.assertIn(text, capturer.get_text()) self.assertTrue(isinstance(stats, pstats.Stats)) self.restore_logging_levels(self.log_names, existing_levels)
def parallel_command(command, n=2, wordir=os.getcwd() + '/', name='log.txt'): """ Function that takes a list of linux commands as its argument, instructs the linux system to run them one by one in parallel using the required amount of processors and capturing the output into a log file. :param command: List (Required). List of Linux commands that will be running. :param n: Integer (Optional). Number of processors will be used. Default to 2. :param wordir: String (Optional). Full pathname of the place where the log file will be saved to. Default to the current working directory. Should always finish with an '/'. :param name: String (Optional). Name of the output log file. Default to log.txt """ processes = set() max_processes = n with CaptureOutput() as capturer: for i in range(0, len(command)): processes.add(subprocess.Popen(command[i], shell=True)) if len(processes) >= max_processes: os.wait() processes.difference_update( [p for p in processes if p.poll() is not None]) for p in processes: if p.poll() is None: p.wait() text_file = open(wordir + name, "a+") text_file.write("\n" + capturer.get_text()) text_file.close() return
def _zen_call(func, script, conf, keys, data, verbosity): script = script.encode() if isinstance(script, str) else script conf = conf.encode() if isinstance(conf, str) else conf keys = keys.encode() if isinstance(keys, str) else keys data = data.encode() if isinstance(data, str) else data with CaptureOutput() as capturer: result = Queue() args = ( func, result, script, conf, keys, data, verbosity, ) p = Process(target=_execute, args=args) p.start() p.join() p.terminate() if result.empty(): capturer.finish_capture() raise Error(capturer.get_lines()) return result.get_nowait()
def test_call_with_url(mocker, url, response, return_code, exp_output): """ Call monitoring plugin with args """ def fake_parser(): """ Fake argument passer """ parser = mocker.MagicMock() parser.url = 'http://foo.bar' return parser # Patch plugin argument parser mocker.patch('monitoring_plugins.web.e2e.check_protractor_html.parse_args', fake_parser) # Manage test contexts with requests_mock.mock() as mock, \ pytest.raises(SystemExit) as sys_exit, \ CaptureOutput() as capture: mock.get(url, text=response) check_protractor_html.main() assert sys_exit.value.code == return_code assert exp_output in capture.get_text()
def test_combined_current_and_subprocess(self): """Test combined standard output and error capturing from the same process and subprocesses.""" # Some unique strings that are not substrings of each other. cur_stdout_1 = "Some output from Python's print statement" cur_stdout_2 = "Output from Python's sys.stdout.write() method" cur_stdout_3 = "More output from Python's print statement" cur_stderr = "Output from Python's sys.stderr.write() method" sub_stderr = "Output from subprocess stderr stream" sub_stdout = "Output from subprocess stdout stream" with CaptureOutput() as capturer: # Emit multiple lines on both streams from current process and subprocess. print(cur_stdout_1) sys.stderr.write("%s\n" % cur_stderr) subprocess.call(["sh", "-c", "echo %s 1>&2" % sub_stderr]) subprocess.call(["echo", sub_stdout]) sys.stdout.write("%s\n" % cur_stdout_2) print(cur_stdout_3) # Verify that all of the expected lines were captured. assert all(l in capturer.get_lines() for l in ( cur_stdout_1, cur_stderr, sub_stderr, sub_stdout, cur_stdout_2, cur_stdout_3, ))
def type_output(self, f, block, with_ansi=False): """ Execute input command "block", capture and add results to the screencast. """ # execute and time input command line = '\n'.join(block) line = expanduser(expandvars(line)) t0 = time.time() if not with_ansi: s = subprocess.getoutput(line) self.typist.ts += (time.time() - t0) if s: for out_line in s.split('\n'): ac_line = [self.typist.ts, 'o', '{}\r\n'.format(out_line)] # ac_line = [float_to_limited_str(self.typist.ts), 'o', '{}\r\n'.format(out_line)] f.write('{}\n'.format(json.dumps(ac_line))) self.typist.ts += 0.01 # print output, without ANSI escape sequences else: with CaptureOutput() as capturer: subprocess.call(line.split()) # trouble ahead with splitting self.typist.ts += (time.time() - t0) for out_line in capturer.get_lines(): # + s.errors(raw=True): ac_line = [self.typist.ts, 'o', '{}\r\n'.format(out_line)] # ac_line = [float_to_limited_str(self.typist.ts), 'o', '{}\r\n'.format(out_line)] f.write('{}\n'.format(json.dumps(ac_line))) self.typist.ts += 0.01
def test_username_filter(self): """Make sure :func:`install()` integrates with :class:`~coloredlogs.UserNameFilter()`.""" install(fmt='%(username)s') with CaptureOutput() as capturer: logging.info("A truly insignificant message ..") output = capturer.get_text() assert find_username() in output
def test_auto_disable(self): """ Make sure ANSI escape sequences are not emitted when logging output is being redirected. This is a regression test for https://github.com/xolox/python-coloredlogs/issues/100. It works as follows: 1. We mock an interactive terminal using 'capturer' to ensure that this test works inside test drivers that capture output (like pytest). 2. We launch a subprocess (to ensure a clean process state) where stderr is captured but stdout is not, emulating issue #100. 3. The output captured on stderr contained ANSI escape sequences after this test was written and before the issue was fixed, so now this serves as a regression test for issue #100. """ with CaptureOutput(): interpreter = subprocess.Popen([ sys.executable, "-c", ";".join([ "import coloredlogs, logging", "coloredlogs.install()", "logging.info('Hello world')", ]), ], stderr=subprocess.PIPE) stdout, stderr = interpreter.communicate() assert ANSI_CSI not in stderr.decode('UTF-8')
def test_call_with_params(mocker, script_args, exp_code, exp_output): """ Call monitoring plugin with args """ fake_args = ['check_nginx_stub_status.py'] + script_args request_mock_response = """ Active connections: 300 server accepts handled requests 390 390 390 Reading: 290 Writing: 4 Waiting: 6""" def fake_getitem(index): """ Fake lambda to return needed arguments for tests """ return fake_args[index] # Mock arguments list used with monitoring script parser sys_argv = mocker.MagicMock() sys_argv.__getitem__.side_effect = fake_getitem # Execute monitoring script main function with requests_mock.mock() as mock, \ pytest.raises(SystemExit) as sys_exit, \ CaptureOutput() as capture, \ mocker.patch('sys.argv', sys_argv): mock.get('http://foo.bar', text=request_mock_response) check_nginx_stub_status.main() # Check returns assert exp_output in capture.get_text() assert sys_exit.value.code == exp_code
def run_P_minor_outbreak_test(self, sir_class): # Allen (2017) estimates P[minor outbreak] for the SIR model shown in Fig. 1 as 0.25 ensemble_size = 50 num_minor_outbreaks = 0 with CaptureOutput(relay=False): for _ in range(ensemble_size): sir_args = dict(name='sir', s=98, i=2, N=100, beta=0.3, gamma=0.15, recording_period=10) seed = random.randrange(1E6) sir = RunSIRs.main(sir_class, time_max=60, seed=seed, **sir_args) # consider an outbreak to be minor if no infections remain and fewer than 10 people were infected if sir.history[-1]['i'] == 0 and 90 < sir.history[-1]['s']: num_minor_outbreaks += 1 p_minor_outbreak = num_minor_outbreaks / ensemble_size expected_p_minor_outbreak = 0.25 self.assertGreater(p_minor_outbreak, 0.5 * expected_p_minor_outbreak) self.assertLess(p_minor_outbreak, 2 * expected_p_minor_outbreak)
def test_progress(self): unused_bar = SimulationProgressBar() self.assertEqual(unused_bar.start(1), None) self.assertEqual(unused_bar.progress(2), None) self.assertEqual(unused_bar.end(), None) used_bar = SimulationProgressBar(True) with CaptureOutput(relay=True) as capturer: try: duration = 20 self.assertEqual(used_bar.start(duration), None) self.assertEqual(used_bar.progress(10), None) # view intermediate progress print('', file=sys.stderr) self.assertEqual(used_bar.progress(20), None) self.assertEqual(used_bar.end(), None) self.assertTrue("/{}".format(duration) in capturer.get_text()) self.assertTrue( "time_max".format(duration) in capturer.get_text()) except ValueError as e: if str(e) == 'I/O operation on closed file': pass # This ValueError is raised because progressbar expects sys.stderr to remain open # for an extended time period but karr_lab_build_utils run-tests has closed it. # Since SimulationProgressBar works and passes tests under naked pytest, and # progressbar does not want to address the conflict over sys.stderr # (see https://github.com/WoLpH/python-progressbar/issues/202) we let this # test fail under karr_lab_build_utils. else: self.fail('test_progress failed for unknown reason')
def test_log_with_context_static(self): with CaptureOutput() as co: self.logger.info('Hello', timestamp='now') output = co.get_text() assert not BASIC_OUTPUT_REGEX.match(output) assert output.startswith('now')
def test_taxonomy_get_distance_to_root(self): with App(argv=['taxonomy', 'get-distance-to-root', 'bacteria']) as app: with CaptureOutput(termination_delay=0.1) as capturer: app.run() self.assertEqual(float(capturer.get_text()), 2.) with App(argv=['taxonomy', 'get-distance-to-root', 'XXX']) as app: self.assertRaises(SystemExit, lambda: app.run())
def test_cli_demo(self): """Test the command line colored logging demonstration.""" with CaptureOutput() as capturer: main('coloredlogs', '--demo') output = capturer.get_text() # Make sure the output contains all of the expected logging level names. for name in 'debug', 'info', 'warning', 'error', 'critical': assert name.upper() in output
def test_auto_install(self): """Test :func:`coloredlogs.auto_install()`.""" needle = random_string() command_line = [sys.executable, '-c', 'import logging; logging.info(%r)' % needle] # Sanity check that log messages aren't enabled by default. with CaptureOutput() as capturer: os.environ['COLOREDLOGS_AUTO_INSTALL'] = 'false' subprocess.check_call(command_line) output = capturer.get_text() assert needle not in output # Test that the $COLOREDLOGS_AUTO_INSTALL environment variable can be # used to automatically call coloredlogs.install() during initialization. with CaptureOutput() as capturer: os.environ['COLOREDLOGS_AUTO_INSTALL'] = 'true' subprocess.check_call(command_line) output = capturer.get_text() assert needle in output
def test_build_ecmdb(self): with App(argv=[ 'build', '--path=' + self.dirname, '--max-entries=1', '--verbose=True', 'ecmdb' ]) as app: with CaptureOutput(termination_delay=0.1) as capturer: app.run() self.assertTrue(os.path.exists(self.dirname + '/Ecmdb.sqlite'))
def __init__(self): """Initialize output capturing when running under ``cron`` with the correct configuration.""" self.is_enabled = 'text/html' in os.environ.get('CONTENT_TYPE', 'text/plain') self.is_silent = False if self.is_enabled: # We import capturer here so that the coloredlogs[cron] extra # isn't required to use the other functions in this module. from capturer import CaptureOutput self.capturer = CaptureOutput(merged=True, relay=False)