Ejemplo n.º 1
0
    def test_parse_value(self):
        ret = cli._parse_value("foobar")
        self.assertEqual(ret, "foobar")

        ret = cli._parse_value(True)
        self.assertEqual(ret, True)

        ret = cli._parse_value(1)
        self.assertEqual(ret, 1)

        ret = cli._parse_value(None)
        self.assertEqual(ret, None)

        fd, temp_path = tempfile.mkstemp()
        os.write(fd, b"content")
        os.close(fd)
        ret = cli._parse_value("@%s" % temp_path)
        self.assertEqual(ret, "content")
        os.unlink(temp_path)

        fl = six.StringIO()
        with redirect_stderr(fl):
            with self.assertRaises(SystemExit) as exc:
                cli._parse_value("@/thisfileprobablydoesntexist")
            self.assertEqual(
                fl.getvalue(),
                "[Errno 2] No such file or directory:"
                " '/thisfileprobablydoesntexist'\n",
            )
            self.assertEqual(exc.exception.code, 1)
Ejemplo n.º 2
0
 def test_die(self):
     fl = six.StringIO()
     with redirect_stderr(fl):
         with self.assertRaises(SystemExit) as test:
             cli.die("foobar")
     self.assertEqual(fl.getvalue(), "foobar\n")
     self.assertEqual(test.exception.code, 1)
Ejemplo n.º 3
0
    def test_process_streams_redirect(self):
        # This won't work for asyncio implementation of subprocess

        async def test():
            prog = bR'''
import sys
print('out', flush=True)
print('err', file=sys.stderr, flush=True)
            '''

            proc = await asyncio.create_subprocess_exec(
                sys.executable, '-c', prog,
                loop=self.loop)

            out, err = await proc.communicate()
            self.assertIsNone(out)
            self.assertIsNone(err)

        with tempfile.NamedTemporaryFile('w') as stdout:
            with tempfile.NamedTemporaryFile('w') as stderr:
                with contextlib.redirect_stdout(stdout):
                    with contextlib.redirect_stderr(stderr):
                        self.loop.run_until_complete(test())

                stdout.flush()
                stderr.flush()

                with open(stdout.name, 'rb') as so:
                    self.assertEqual(so.read(), b'out\n')

                with open(stderr.name, 'rb') as se:
                    self.assertEqual(se.read(), b'err\n')
Ejemplo n.º 4
0
    def test_invalid_config(self):
        """Test an existing configuration file but invalid"""

        c = Config(base_dir + '/static/config_invalid.yml')
        self.assertFalse(c.read())

        self.assertGreater(len(c.error), 0)
        self.assertRegex(c.error, '.*Additional properties are not allowed.*')

        # The rest doesn't work, for an unknown reason
        pytest.skip('Error trying to capture stderr')
        return

        # Don't go further with python < 3.5
        try:
            from contextlib import redirect_stderr
        except Exception:
            return

        f = io.StringIO()
        with redirect_stderr(f):
            c.display_errors()
        err = f.getvalue()

        regex = re.compile('.*config_invalid.yml.*', re.MULTILINE)
        self.assertRegex(err, regex)

        regex = re.compile('.*Failed validating main config or plugin configs.*', re.MULTILINE)
        self.assertRegex(err, regex)

        regex = re.compile('Additional properties are not allowed.*', re.MULTILINE)
        self.assertRegex(err, regex)
Ejemplo n.º 5
0
 def on_generate(self, event):
     """Generate button was pressed."""
     event.Skip()
     s = StringIO()
     sys.argv = [sys.argv[0]]
     for argument, control in self.advanced_controls.items():
         name = argument.option_strings[-1]
         if isinstance(argument, _StoreTrueAction):
             if control.GetValue():
                 sys.argv.append(name)
         elif isinstance(argument, _StoreFalseAction):
             if not control.GetValue():
                 sys.argv.append(name)
         elif isinstance(argument, _StoreAction):
             sys.argv += [
                 name,
                 control.GetValue()
             ]
         else:
             error('No clue what to do with argument: %s.' % argument.help)
     sys.argv += self._markings
     with redirect_stdout(s), redirect_stderr(s):
         main()
     s.seek(0)
     self.output.SetValue(s.read())
     self.output.SetFocus()
Ejemplo n.º 6
0
 def test_with_misspelled_argument(self):
     stderr = self._get_fake_stream()
     with contextlib.redirect_stderr(stderr), self.assertRaises(SystemExit) as ctx:
         self.parser.parse_args(["--optionz", "value"])
     self.assertEqual(2, ctx.exception.code)
     expected = "error: unrecognized arguments: --optionz value\ndid you mean: --option (was: --optionz)"
     self.assertIn(expected, stderr.getvalue())
Ejemplo n.º 7
0
    def test_invalid_config(self):
        """Test an existing configuration file but invalid"""
        c = Config(base_dir + '/static/config_invalid.ini')
        self.assertFalse(c.read())
        self.assertGreater(len(c.errors), 0)
        self.assertTrue('project_name' in c.errors)
        self.assertEqual('Missing', c.errors['project_name'])
        self.assertTrue('php.version' in c.errors)
        self.assertEqual('the value "8.0" is unacceptable.', c.errors['php.version'])

        # Don't go further with python < 3.5
        try:
            from contextlib import redirect_stderr
        except Exception:
            return

        f = io.StringIO()
        with redirect_stderr(f):
            c.display_errors()
        res = f.getvalue()

        regex = re.compile('Failed validating .*config_invalid.ini', re.MULTILINE)
        self.assertRegex(res, regex)

        regex = re.compile('the value ".*8.0.*" is unacceptable', re.MULTILINE)
        self.assertRegex(res, regex)
Ejemplo n.º 8
0
def run_tests(src, test, fail):
    stderr = io.StringIO()
    stdout = io.StringIO()
    with contextlib.redirect_stderr(stderr):
        with contextlib.redirect_stdout(stdout):
            e = pytest.main([
                '-qq',
                '--disable-pytest-warnings',
                '--no-faulthandler',
                '--cov', src.replace('.py', '').replace('/', '.'),
                '--cov-fail-under', '100',
                '--cov-report', 'term-missing:skip-covered',
                test
            ])

    if e == 0:
        if fail:
            print("UNEXPECTED SUCCESS:", src, "Please remove this file from setup.cfg tool:individual_coverage/exclude.")
            e = 42
        else:
            print("SUCCESS:           ", src)
    else:
        if fail:
            print("IGNORING FAIL:     ", src)
            e = 0
        else:
            cov = [l for l in stdout.getvalue().split("\n") if (src in l) or ("was never imported" in l)]
            if len(cov) == 1:
                print("FAIL:              ", cov[0])
            else:
                print("FAIL:              ", src, test, stdout.getvalue(), stdout.getvalue())
                print(stderr.getvalue())
                print(stdout.getvalue())

    sys.exit(e)
Ejemplo n.º 9
0
    def _get_requirements(package, fname, url, digest, python_version, build_config):
        """Call into conda_build.skeletons.pypi to handle the ugly mess of extracting
        requirements from python packages.

        Note: It is not safe to call into conda multiple times parallel, and thus this
        function must not be called in parallel.
        """
        from conda_build.skeletons.pypi import get_pkginfo, get_requirements

        with open("/dev/null", "w") as devnull:
            with redirect_stdout(devnull), redirect_stderr(devnull):
                try:
                    pkg_info = get_pkginfo(package, fname, url, digest, python_version,
                                           [], build_config, [])
                    requirements = get_requirements(package, pkg_info)
                except SystemExit as exc:
                    raise Exception(exc) from None
                except Exception as exc:
                    raise Exception(exc) from None

        if len(requirements) == 1 and isinstance(requirements[0], list):
            requirements = requirements[0]
        requirements_fixed = []
        for req in requirements:
            if '\n' in req:
                requirements_fixed.extend(req.split('\n'))
            else:
                requirements_fixed.append(req)

        return pkg_info, requirements_fixed
Ejemplo n.º 10
0
Archivo: test.py Proyecto: me-and/mdf
    def run_standard_test(self, *,
                          src_dir_struct,
                          start_dst_dir_struct=None,
                          goal_dst_dir_struct=None,
                          manifest,
                          manifest_path=None,
                          args=None,
                          add_dst_dir_args=True,
                          stderr=sys.stderr):
        self.create_test_env(
                src_dir_struct=src_dir_struct,
                dst_dir_struct=start_dst_dir_struct,
                manifest=manifest, manifest_path=manifest_path)

        if args is None:
            args = []
        if add_dst_dir_args:
            args.extend(['-d', self.dst_dir_name])

        os.chdir(self.src_dir_name)
        with redirect_stderr(stderr):
            mdf.main(args)

        if goal_dst_dir_struct is None:
            goal_dst_dir_struct = self.src_to_dst_dir_struct(src_dir_struct)
        self.assert_dir_structure(self.dst_dir_name, goal_dst_dir_struct)
Ejemplo n.º 11
0
 def cmd_run(self, version: str, args: Sequence[str]) -> Dict[str, object]:
     """Check a list of files, triggering a restart if needed."""
     try:
         # Process options can exit on improper arguments, so we need to catch that and
         # capture stderr so the client can report it
         stderr = io.StringIO()
         stdout = io.StringIO()
         with redirect_stderr(stderr):
             with redirect_stdout(stdout):
                 sources, options = mypy.main.process_options(
                     ['-i'] + list(args),
                     require_targets=True,
                     server_options=True,
                     fscache=self.fscache,
                     program='mypy-daemon',
                     header=argparse.SUPPRESS)
         # Signal that we need to restart if the options have changed
         if self.options_snapshot != options.snapshot():
             return {'restart': 'configuration changed'}
         if __version__ != version:
             return {'restart': 'mypy version changed'}
         if self.fine_grained_manager:
             manager = self.fine_grained_manager.manager
             start_plugins_snapshot = manager.plugins_snapshot
             _, current_plugins_snapshot = mypy.build.load_plugins(options, manager.errors)
             if current_plugins_snapshot != start_plugins_snapshot:
                 return {'restart': 'plugins changed'}
     except InvalidSourceList as err:
         return {'out': '', 'err': str(err), 'status': 2}
     except SystemExit as e:
         return {'out': stdout.getvalue(), 'err': stderr.getvalue(), 'status': e.code}
     return self.check(sources)
Ejemplo n.º 12
0
 def test_nowindows(self):
     for opt in '-n', '--nowindows':
         with self.subTest(opt=opt):
             with contextlib.redirect_stderr(io.StringIO()) as stderr:
                 ns = libregrtest._parse_args([opt])
             self.assertTrue(ns.nowindows)
             err = stderr.getvalue()
             self.assertIn('the --nowindows (-n) option is deprecated', err)
Ejemplo n.º 13
0
 def test_too_many_args(self):
     parser = get_arg_handler()
     args = ['one', 'two']
     error_message = 'unrecognized arguments: two'
     with self.assertRaisesRegex(CLIError, error_message):
         with redirect_stderr(StringIO()):
             argns = parser.parse_args(args)
             print(argns)
Ejemplo n.º 14
0
def test_err(capfd):
    msg = "Something that should not show up in log"
    stream = StringIO()
    with redirect_stderr(stream):
        m.raw_err(msg)
    stdout, stderr = capfd.readouterr()
    assert stdout == ''
    assert stderr == msg
    assert stream.getvalue() == ''

    stream = StringIO()
    with redirect_stderr(stream):
        m.captured_err(msg)
    stdout, stderr = capfd.readouterr()
    assert stdout == ''
    assert stderr == ''
    assert stream.getvalue() == msg
Ejemplo n.º 15
0
 def test_no_args(self):
     parser = get_arg_handler()
     args = []
     error_message = ('the following arguments are required: '
                      'config_file_path')
     with self.assertRaisesRegex(CLIError, error_message):
         with redirect_stderr(StringIO()):
             argns = parser.parse_args(args)
Ejemplo n.º 16
0
    def _run(self):
        argv = ['', '--with-cosmic_ray']
        argv += list(self.test_args)
        collector = NoseResultsCollector()

        with open(os.devnull, 'w') as devnull, \
            redirect_stdout(devnull), redirect_stderr(devnull):
            nose.run(argv=argv, plugins=[collector])
        return (collector.result.wasSuccessful(),
            [r[1] for r in collector.result.errors + collector.result.failures])
Ejemplo n.º 17
0
 def wrapped_runner(*args, catch_errors=False):
     with redirect_stdout(io.StringIO()) as stdout, redirect_stderr(io.StringIO()) as stderr:
         try:
             f(list(args))
         except BaseException as exc:
             if not catch_errors:
                 raise
             elif isinstance(catch_errors, BaseException) and not isinstance(exc, catch_errors):
                 raise
             return stdout.getvalue(), stderr.getvalue(), exc
     return stdout.getvalue(), stderr.getvalue()
Ejemplo n.º 18
0
 def test_parse_error(self):
     """Check that a reasonable error message is returned from a JSON file that
     contains badly-formed XML."""
     md = {'short_error': True}
     with io.StringIO() as buf, redirect_stderr(buf):
         badresult = from_sc(self.brokendata, md)
         errormsg = buf.getvalue()
     self.assertRegex(errormsg, 'Parsing error in the JSON')
     errorlines = errormsg.splitlines()[1:]
     self.assertEqual(len(errorlines), 55)
     self.assertRegex(errorlines[0], 'Affected portion of XML is 493: \<pb')
Ejemplo n.º 19
0
    def test_copy(self):
        opt1 = Options.make([('foo', 'bar')], hej='med', dig={'action': 'store_true'})
        opt2 = opt1.copy()

        #TODO copies should copy actions, not values
        with FailAssert(SystemExit):
            with redirect_stderr(StringIO()):
                opt2.parseargs('-d')

        opt2.parseargs('-d', True)
        assert opt2.dig is True
        assert opt1.dig is False
Ejemplo n.º 20
0
def test_redirect_err(capfd):
    msg = "StdOut"
    msg2 = "StdErr"

    stream = StringIO()
    with redirect_stderr(stream):
        with m.ostream_redirect(stdout=False):
            m.raw_output(msg)
            m.raw_err(msg2)
    stdout, stderr = capfd.readouterr()
    assert stdout == msg
    assert stderr == ''
    assert stream.getvalue() == msg2
Ejemplo n.º 21
0
 def install_lib(self, *options):
     """ Installs packages using pip.main, redirects output with the 
     redirect_stderr and redirect_stdout context managers."""
     with open("pipimport.log", "a") as f:
         with redirect_stdout(f), redirect_stderr(f):
             for option in options:
                 print("Trying option: ", option)  # logged
                 try:
                     r = pip.main(["install", "-q", option])
                     if not r:
                         return
                 except Exception as e:
                     print(e.args[0])  # logged
 def test_log(self):
     # Traceback and exception should be written to stderr.
     exc_type = NotImplementedError
     exc_message = 'hello'
     try:
         raise exc_type(exc_message)
     except Exception as caught:
         exc = caught
     stderr = io.StringIO()
     with contextlib.redirect_stderr(stderr):
         self.server.log(exc)
     logged = stderr.getvalue()
     self.assertIn(exc_type.__name__, logged)
     self.assertIn(exc_message, logged)
     self.assertIn('Traceback', logged)
Ejemplo n.º 23
0
    def setup(self, event):
        # TODO this wont work if one instance is registered with more than one context.
        # Two options:
        # - move state to context
        # - forbid registering more than once
        self.prefix = ''
        self.counter = 0
        self._append_cache = ''

        self.stdout = IOBuffer()
        self.redirect_stdout = redirect_stdout(self._stdout if self.iswindows else self.stdout)
        self.redirect_stdout.__enter__()

        self.stderr = IOBuffer()
        self.redirect_stderr = redirect_stderr(self._stderr if self.iswindows else self.stderr)
        self.redirect_stderr.__enter__()
Ejemplo n.º 24
0
    def test_reap_children(self):
        # Make sure that there is no other pending child process
        support.reap_children()

        # Create a child process
        pid = os.fork()
        if pid == 0:
            # child process: do nothing, just exit
            os._exit(0)

        t0 = time.monotonic()
        deadline = time.monotonic() + 60.0

        was_altered = support.environment_altered
        try:
            support.environment_altered = False
            stderr = io.StringIO()

            while True:
                if time.monotonic() > deadline:
                    self.fail("timeout")

                with contextlib.redirect_stderr(stderr):
                    support.reap_children()

                # Use environment_altered to check if reap_children() found
                # the child process
                if support.environment_altered:
                    break

                # loop until the child process completed
                time.sleep(0.100)

            msg = "Warning -- reap_children() reaped child process %s" % pid
            self.assertIn(msg, stderr.getvalue())
            self.assertTrue(support.environment_altered)
        finally:
            support.environment_altered = was_altered

        # Just in case, check again that there is no other
        # pending child process
        support.reap_children()
Ejemplo n.º 25
0
def configure(project=LOGGING_PROJECT):
    """Configures cloud logging

    This is called for all main calls. If a $LOGGING_PROJECT is environment
    variable configured, then STDERR and STDOUT are redirected to cloud
    logging.
    """
    if not project:
        sys.stderr.write('!! Error: The $LOGGING_PROJECT enviroment '
                         'variable is required in order to set up cloud logging. '
                         'Cloud logging is disabled.\n')
        return

    logging.basicConfig(level=logging.INFO)
    try:
        # if this fails, redirect stderr to /dev/null so no startup spam.
        with contextlib.redirect_stderr(io.StringIO()):
            client = glog.Client(project)
            client.setup_logging(logging.INFO)
    except:
        sys.stderr.write('!! Cloud logging disabled\n')
Ejemplo n.º 26
0
    def test_command_without_stderr_and_stdout_err(self):
        # TODO make it work under windows
        if os.name == 'nt':
            return

        f = io.StringIO()
        with redirect_stdout(f):
            launch_cmd_displays_output(self.cmd_nook, False, False)
        res = f.getvalue()
        self.assertEqual('\n', res)

        try:
            from contextlib import redirect_stderr
        except Exception:
            return

        f = io.StringIO()
        with redirect_stderr(f):
            launch_cmd_displays_output(self.cmd_nook, False, False)
        res = f.getvalue()
        self.assertEqual('', res)
Ejemplo n.º 27
0
    def test_command_with_stderr_no_stdout_err_loop(self):
        # TODO make it work under windows
        if os.name == 'nt':
            return

        f = io.StringIO()
        with redirect_stdout(f):
            launch_cmd_displays_output(['wget', '--debug', '--tries', '3', 'http://doesnotexist'], False, True)
        res = f.getvalue()
        expected = re.compile('.*\.\.\. and more.*', re.MULTILINE)
        self.assertRegex(res, expected)

        try:
            from contextlib import redirect_stderr
        except Exception:
            return

        f = io.StringIO()
        with redirect_stderr(f):
            launch_cmd_displays_output(self.cmd_nook, False, True)
        res = f.getvalue()
        self.assertEqual('', res)
Ejemplo n.º 28
0
    def test_command_with_stderr_no_stdout_err(self):
        # TODO make it work under windows
        if os.name == 'nt':
            return

        f = io.StringIO()
        with redirect_stdout(f):
            launch_cmd_displays_output(self.cmd_nook, False, True)
        res = f.getvalue()
        expected = re.compile('.*No such file or directory.*', re.MULTILINE)
        self.assertRegex(res, expected)

        try:
            from contextlib import redirect_stderr
        except Exception:
            return

        f = io.StringIO()
        with redirect_stderr(f):
            launch_cmd_displays_output(self.cmd_nook, False, True)
        res = f.getvalue()
        self.assertEqual('', res)
Ejemplo n.º 29
0
#!/usr/bin/env python3
# encoding: utf-8
#
# Copyright (c) 2015 Doug Hellmann.  All rights reserved.
#
"""
"""

#end_pymotw_header
from contextlib import redirect_stdout, redirect_stderr
import io
import sys


def misbehaving_function(a):
    sys.stdout.write('(stdout) A: {!r}\n'.format(a))
    sys.stderr.write('(stderr) A: {!r}\n'.format(a))


capture = io.StringIO()
with redirect_stdout(capture), redirect_stderr(capture):
    misbehaving_function(5)

print(capture.getvalue())
Ejemplo n.º 30
0
grid.set(0, 3, ALIVE)
grid.set(1, 4, ALIVE)
grid.set(2, 2, ALIVE)
grid.set(2, 3, ALIVE)
grid.set(2, 4, ALIVE)

columns = ColumnPrinter()
for i in range(5):
    columns.append(str(grid))
    grid = simulate_threaded(grid)  # Changed

print(columns)


# Example 4
def game_logic(state, neighbors):
    raise OSError('Problem with I/O')


# Example 5
import contextlib
import io

fake_stderr = io.StringIO()
with contextlib.redirect_stderr(fake_stderr):
    thread = Thread(target=game_logic, args=(ALIVE, 3))
    thread.start()
    thread.join()

print(fake_stderr.getvalue())
Ejemplo n.º 31
0
def test_integration(test_file):
    with (INTEGRATION_TESTS_ROOT / test_file).open("r") as f:
        test_config = yaml.safe_load(f)

    config = load_config(io.StringIO(TEST_CONFIG))
    config = unittest.mock.Mock(wraps=config)
    config.directory = DummyDirectory(users=(
        UserEntry(
            id="1",
            join_date=datetime.datetime(2017, 1, 1,
                                        tzinfo=dateutil.tz.tzutc()),
            tags=(),
        ),
        UserEntry(
            id="2",
            join_date=datetime.datetime(2017, 1, 2,
                                        tzinfo=dateutil.tz.tzutc()),
            tags=("tag1", "tag2"),
        ),
    ))

    wsgi = get_wsgi_app(config)
    test_client = werkzeug.test.Client(wsgi)

    for step in test_config:
        if "command" in step:
            stdout = io.StringIO()
            stderr = io.StringIO()

            args = shlex.split(step["command"])

            try:
                with contextlib.redirect_stdout(stdout):
                    with contextlib.redirect_stderr(stderr):
                        with _temporary_working_directory(JACQUARD_ROOT):
                            main(args, config=config)
            except SystemExit:
                pass

            output = stdout.getvalue()

            if "expect_error" in step:
                error_message = stderr.getvalue()
            else:
                assert not stderr.getvalue()

        elif "get" in step:
            path = step["get"]

            data, status, headers = test_client.get(path)

            assert status == "200 OK"

            output = b"".join(data).decode("utf-8")

        if "expect" in step:
            expected_output = textwrap.dedent(step["expect"]).strip()
            actual_output = textwrap.dedent(output).strip()

            assert actual_output == expected_output

        if "expect_yaml" in step:
            expected_output = step["expect_yaml"]
            actual_output = yaml.safe_load(output)

            assert actual_output == expected_output

        if "expect_yaml_keys" in step:
            expected_keys = step["expect_yaml_keys"]
            actual_output = yaml.safe_load(output)

            assert set(actual_output.keys()) == set(expected_keys)

        if "expect_error" in step:
            expected_error = textwrap.dedent(step["expect_error"].strip())
            actual_error = textwrap.dedent(error_message).strip()

            assert actual_error == expected_error
Ejemplo n.º 32
0
def stderr():
    """Patch stderr and return stringio"""
    return contextlib.redirect_stderr(io.StringIO())
Ejemplo n.º 33
0
 def __enter__(self):
     super().__enter__()
     self.enter_context(redirect_stdout(StdoutWriter()))
     self.enter_context(redirect_stderr(StderrWriter()))
     return self
Ejemplo n.º 34
0
 def run_and_capture_output(self, command):
     out = io.StringIO()
     err = io.StringIO()
     with contextlib.redirect_stdout(out), contextlib.redirect_stderr(err):
         self.main.run_command(command)
     return out.getvalue(), err.getvalue()
Ejemplo n.º 35
0
def extract_csv(args):
    buf = StringIO()
    with redirect_stdout(buf), redirect_stderr(buf):
        pdf_id, total_pages, engine_string2, pdf_files_folder_string, csv_tables_folder_string = args
        pdf_files_folder2 = Path(pdf_files_folder_string)
        csv_tables_folder2 = Path(csv_tables_folder_string)
        engine2 = create_engine(engine_string2)
        start_time = time.time()

        def save_tables(csv_tables, csv_page, method):
            for index, table in enumerate(csv_tables):
                table_number = index + 1
                csv_id = f"{pdf_id}_{csv_page}_{method}_{table_number}"
                csv_file_name = f"{csv_id}.csv"
                csv_full_path = str(csv_tables_folder2.joinpath(csv_file_name))
                csv_rows, csv_columns = table.shape
                accuracy = table.accuracy
                whitespace = table.whitespace
                top_row_json = json.dumps(table.df.iloc[0].tolist())
                csv_text = table.df.to_json(None, orient='values')
                table.to_csv(csv_full_path,
                             index=False,
                             header=False,
                             encoding="utf-8-sig")
                has_content = 0 if is_empty(json.dumps(csv_text)) else 1

                with engine2.connect() as conn2:
                    statement2 = text(
                        "INSERT INTO csvs (csvId, csvFileName, csvFullPath, pdfId, page, tableNumber,"
                        +
                        "topRowJson, csvRows, csvColumns, method, accuracy, whitespace, csvText, hasContent) "
                        +
                        "VALUE (:csvId, :csvFileName, :csvFullPath, :pdfId, :page, :tableNumber, "
                        +
                        ":topRowJson, :csvRows, :csvColumns, :method, :accuracy, :whitespace, :csvText, :hasContent);"
                    )
                    conn2.execute(
                        statement2, {
                            "csvId": csv_id,
                            "csvFileName": csv_file_name,
                            "csvFullPath": csv_full_path,
                            "pdfId": pdf_id,
                            "page": csv_page,
                            "tableNumber": table_number,
                            "topRowJson": top_row_json,
                            "csvRows": csv_rows,
                            "csvColumns": csv_columns,
                            "method": method,
                            "accuracy": accuracy,
                            "whitespace": whitespace,
                            "csvText": csv_text,
                            "hasContent": has_content
                        })

        try:
            pdf_file_path = pdf_files_folder2.joinpath(f"{pdf_id}.pdf")

            for page in range(1, total_pages + 1):
                try:
                    tables = camelot.read_pdf(
                        str(pdf_file_path),
                        pages=str(page),
                        strip_text='\n',
                        line_scale=40,
                        flag_size=True,
                        copy_text=['v'],
                    )
                    save_tables(tables, page, "lattice-v")
                except Exception as e:
                    print(f'Error processing {pdf_id} on page {page}:')
                    print(e)
                    traceback.print_tb(e.__traceback__)

            with engine2.connect() as conn:
                statement = text(
                    "UPDATE pdfs SET csvsExtracted = :csvsExtracted WHERE pdfId = :pdfId;"
                )
                conn.execute(statement, {
                    "csvsExtracted": 'true',
                    "pdfId": pdf_id
                })
            duration = round(time.time() - start_time)
            mins = round(duration / 60, 2)
            hrs = round(duration / 3600, 2)
            print(
                f"{pdf_id}: done {total_pages} pages in {duration} seconds ({mins} min or {hrs} hours)"
            )
        except Exception as e:
            print(f'Error processing {pdf_id}:')
            print(e)
            traceback.print_tb(e.__traceback__)
        finally:
            return buf.getvalue()
Ejemplo n.º 36
0
def is_valid_feedstock_token(user, project, feedstock_token, token_repo):
    """Test if the input feedstock_token is valid.

    All exceptions are swallowed and stdout/stderr from this function is
    redirected to `/dev/null`. Sanitized error messages are
    displayed at the end.

    If you need to debug this function, define `DEBUG_FEEDSTOCK_TOKENS` in
    your environment before calling this function.
    """
    from .github import gh_token

    github_token = gh_token()

    failed = False
    err_msg = None
    valid = False

    # capture stdout, stderr and suppress all exceptions so we don't
    # spill tokens
    with tempfile.TemporaryDirectory() as tmpdir, open(
            os.devnull, "w") as fp, redirect_stdout(fp), redirect_stderr(fp):
        try:
            # clone the repo
            _token_repo = (token_repo.replace(
                "$GITHUB_TOKEN",
                github_token).replace("${GITHUB_TOKEN}", github_token).replace(
                    "$GH_TOKEN", github_token).replace("${GH_TOKEN}",
                                                       github_token))
            git.Repo.clone_from(_token_repo, tmpdir, depth=1)
            token_file = os.path.join(
                tmpdir,
                "tokens",
                project + ".json",
            )

            if os.path.exists(token_file):
                with open(token_file, "r") as fp:
                    token_data = json.load(fp)

                salted_token = scrypt.hash(
                    feedstock_token,
                    bytes.fromhex(token_data["salt"]),
                    buflen=256,
                )

                valid = hmac.compare_digest(
                    salted_token,
                    bytes.fromhex(token_data["hashed_token"]),
                )
        except Exception as e:
            if "DEBUG_FEEDSTOCK_TOKENS" in os.environ:
                raise e
            failed = True
    if failed:
        if err_msg:
            raise RuntimeError(err_msg)
        else:
            raise RuntimeError(
                ("Validating the feedstock token for %s/%s failed!"
                 " Try the command locally with DEBUG_FEEDSTOCK_TOKENS"
                 " defined in the environment to investigate!") %
                (user, project))

    return valid
Ejemplo n.º 37
0
def disable_console_output():
    with contextlib.ExitStack() as stack, open(os.devnull, "w") as devnull:
        stack.enter_context(contextlib.redirect_stdout(devnull))
        stack.enter_context(contextlib.redirect_stderr(devnull))
        yield
Ejemplo n.º 38
0
def execute_notebook(nb, secret='secret', initial_env=None, ignore_errors=False):
    """
    Execute notebook & return the global environment that results from execution.

    TODO: write a note about the injection of check_results

    If ignore_errors is True, exceptions are swallowed.

    secret contains random digits so check_results and check are not easily modifiable

    nb is passed in as a dictionary that's a parsed ipynb file
    """
    with hide_outputs():
        if initial_env:
            global_env = initial_env.copy()
        else:
            global_env = {}
        source = ""

        # Before rewriting AST, find cells of code that generate errors.
        # One round of execution is done beforehand to mimic the Jupyter notebook style of running
        # (e.g. code runs up to the point of execution).
        # The reason this is workaround is introduced is because once the
        # source code is parsed into an AST, there is no sense of local cells

        for cell in nb['cells']:
            if cell['cell_type'] == 'code':
                # transform the input to executable Python
                # FIXME: use appropriate IPython functions here
                isp = IPythonInputSplitter(line_input_checker=False)
                try:
                    code_lines = []
                    cell_source_lines = cell['source']
                    source_is_str_bool = False
                    if isinstance(cell_source_lines, str):
                        source_is_str_bool = True
                        cell_source_lines = cell_source_lines.split('\n')

                    for line in cell_source_lines:
                        # Filter out ipython magic commands
                        # Filter out interact widget
                        if not line.startswith('%'):
                            if "interact(" not in line:
                                code_lines.append(line)
                                if source_is_str_bool:
                                    code_lines.append('\n')
                    cell_source = isp.transform_cell(''.join(code_lines))
                    exec(cell_source, global_env)
                    source += cell_source
                except:
                    if not ignore_errors:
                        raise

        tree = ast.parse(source)
        if find_check_assignment(tree) or find_check_definition(tree):
            # an empty global_env will fail all the tests
            return global_env

        # wrap check(..) calls into a check_results_X.append(check(..))
        transformer = CheckCallWrapper(secret)
        tree = transformer.visit(tree)
        ast.fix_missing_locations(tree)

        cleaned_source = compile(tree, filename="nb-ast", mode="exec")
        try:
            with open(os.devnull, 'w') as f, redirect_stdout(f), redirect_stderr(f):
                exec(cleaned_source, global_env)
        except:
            if not ignore_errors:
                raise
        return global_env
Ejemplo n.º 39
0
def suppress_stdout_stderr():
    """A context manager that redirects stdout and stderr to devnull"""
    with open(os.devnull, 'w') as fnull:
        with redirect_stderr(fnull) as err, redirect_stdout(fnull) as out:
            yield err, out
Ejemplo n.º 40
0
 def syntaxError(self, *args, **kwargs):
     captured = io.StringIO()
     with redirect_stderr(captured):
         super().syntaxError(*args, **kwargs)
     raise ParseError(captured.getvalue())
Ejemplo n.º 41
0
def execute(context, is_interactive):
    sc = context.space_data

    try:
        line_object = sc.history[-1]
    except:
        return {'CANCELLED'}

    console, stdout, stderr = get_console(hash(context.region))

    if _BPY_MAIN_OWN:
        main_mod_back = sys.modules["__main__"]
        sys.modules["__main__"] = console._bpy_main_mod

    # redirect output
    from contextlib import (
        redirect_stdout,
        redirect_stderr,
    )

    # not included with Python
    class redirect_stdin(redirect_stdout.__base__):
        _stream = "stdin"

    # don't allow the stdin to be used, can lock blender.
    with redirect_stdout(stdout), \
            redirect_stderr(stderr), \
            redirect_stdin(None):

        # in case exception happens
        line = ""  # in case of encoding error
        is_multiline = False

        try:
            line = line_object.body

            # run the console, "\n" executes a multi line statement
            line_exec = line if line.strip() else "\n"

            is_multiline = console.push(line_exec)
        except:
            # unlikely, but this can happen with unicode errors for example.
            import traceback
            stderr.write(traceback.format_exc())

    if _BPY_MAIN_OWN:
        sys.modules["__main__"] = main_mod_back

    stdout.seek(0)
    stderr.seek(0)

    output = stdout.read()
    output_err = stderr.read()

    # cleanup
    sys.last_traceback = None

    # So we can reuse, clear all data
    stdout.truncate(0)
    stderr.truncate(0)

    # special exception. its possible the command loaded a new user interface
    if hash(sc) != hash(context.space_data):
        return {'FINISHED'}

    bpy.ops.console.scrollback_append(text=sc.prompt + line, type='INPUT')

    if is_multiline:
        sc.prompt = PROMPT_MULTI
        if is_interactive:
            indent = line[:len(line) - len(line.lstrip())]
            if line.rstrip().endswith(":"):
                indent += "    "
        else:
            indent = ""
    else:
        sc.prompt = PROMPT
        indent = ""

    # insert a new blank line
    bpy.ops.console.history_append(text=indent,
                                   current_character=0,
                                   remove_duplicates=True)
    sc.history[-1].current_character = len(indent)

    # Insert the output into the editor
    # not quite correct because the order might have changed,
    # but ok 99% of the time.
    if output:
        add_scrollback(output, 'OUTPUT')
    if output_err:
        add_scrollback(output_err, 'ERROR')

    # execute any hooks
    for func, args in execute.hooks:
        func(*args)

    return {'FINISHED'}
Ejemplo n.º 42
0
# see https://github.com/keras-team/keras/issues/1406

from contextlib import redirect_stderr
import os

with redirect_stderr(open(os.devnull, "w")):
    import keras  # noqa pylint: disable=unused-import
Ejemplo n.º 43
0
 def verbosity_manager(new_target):
     stack = contextlib.ExitStack()
     stack.enter_context(contextlib.redirect_stdout(new_target))
     stack.enter_context(contextlib.redirect_stderr(new_target))
     return stack
Ejemplo n.º 44
0
def register_feedstock_token(user, project, token_repo):
    """Register the feedstock token with the token repo.

    This function uses a random salt and scrypt to hash the feedstock
    token before writing it to the token repo. NEVER STORE THESE TOKENS
    IN PLAIN TEXT!

    All exceptions are swallowed and stdout/stderr from this function is
    redirected to `/dev/null`. Sanitized error messages are
    displayed at the end.

    If you need to debug this function, define `DEBUG_FEEDSTOCK_TOKENS` in
    your environment before calling this function.
    """
    from .github import gh_token

    github_token = gh_token()

    failed = False
    err_msg = None

    # capture stdout, stderr and suppress all exceptions so we don't
    # spill tokens
    with tempfile.TemporaryDirectory() as tmpdir, open(
            os.devnull, "w") as fp, redirect_stdout(fp), redirect_stderr(fp):
        try:
            feedstock_token, err_msg = read_feedstock_token(user, project)
            if err_msg:
                failed = True
                raise RuntimeError(err_msg)

            # clone the repo
            _token_repo = (token_repo.replace(
                "$GITHUB_TOKEN",
                github_token).replace("${GITHUB_TOKEN}", github_token).replace(
                    "$GH_TOKEN", github_token).replace("${GH_TOKEN}",
                                                       github_token))
            repo = git.Repo.clone_from(_token_repo, tmpdir, depth=1)
            token_file = os.path.join(
                tmpdir,
                "tokens",
                project + ".json",
            )

            # don't overwrite existing tokens
            # check again since there might be a race condition
            if os.path.exists(token_file):
                failed = True
                err_msg = "Token for repo %s/%s already exists!" % (
                    user,
                    project,
                )
                raise RuntimeError(err_msg)

            # salt, encrypt and write
            salt = os.urandom(64)
            salted_token = scrypt.hash(feedstock_token, salt, buflen=256)
            data = {
                "salt": salt.hex(),
                "hashed_token": salted_token.hex(),
            }
            with open(token_file, "w") as fp:
                json.dump(data, fp)

            # push
            repo.index.add(token_file)
            repo.index.commit(
                "[ci skip] [skip ci] [cf admin skip] ***NO_CI*** added token for %s/%s"
                % (user, project))
            repo.remote().pull(rebase=True)
            repo.remote().push()
        except Exception as e:
            if "DEBUG_FEEDSTOCK_TOKENS" in os.environ:
                raise e
            failed = True
    if failed:
        if err_msg:
            raise RuntimeError(err_msg)
        else:
            raise RuntimeError(
                ("Registering the feedstock token for %s/%s failed!"
                 " Try the command locally with DEBUG_FEEDSTOCK_TOKENS"
                 " defined in the environment to investigate!") %
                (user, project))

    return failed
Ejemplo n.º 45
0
def suppress_output():
    """Redirects stdout and stderr to devnull."""
    with open(devnull, "w") as null:
        with redirect_stderr(null) as err, redirect_stdout(null) as out:
            yield (err, out)
Ejemplo n.º 46
0
def register_feedstock_token_with_proviers(
        user,
        project,
        drone=True,
        circle=True,
        travis=True,
        azure=True,
        clobber=True,
        drone_endpoints=(),
):
    """Register the feedstock token with provider CI services.

    Note that if a feedstock token is already registered and `clobber=True`
    this function will overwrite existing tokens.

    All exceptions are swallowed and stdout/stderr from this function is
    redirected to `/dev/null`. Sanitized error messages are
    displayed at the end.

    If you need to debug this function, define `DEBUG_FEEDSTOCK_TOKENS` in
    your environment before calling this function.
    """
    # we are swallong all of the logs below, so we do a test import here
    # to generate the proper errors for missing tokens
    from .ci_register import travis_endpoint  # noqa
    from .azure_ci_utils import default_config  # noqa

    # capture stdout, stderr and suppress all exceptions so we don't
    # spill tokens
    failed = False
    err_msg = None
    with open(os.devnull, "w") as fp:
        if "DEBUG_FEEDSTOCK_TOKENS" in os.environ:
            fpo = sys.stdout
            fpe = sys.stdout
        else:
            fpo = fp
            fpe = fp

        with redirect_stdout(fpo), redirect_stderr(fpe):
            try:
                feedstock_token, err_msg = read_feedstock_token(user, project)
                if err_msg:
                    failed = True
                    raise RuntimeError(err_msg)

                if circle:
                    try:
                        add_feedstock_token_to_circle(user, project,
                                                      feedstock_token, clobber)
                    except Exception as e:
                        if "DEBUG_FEEDSTOCK_TOKENS" in os.environ:
                            raise e
                        else:
                            err_msg = (
                                "Failed to register feedstock token for %s/%s"
                                " on circle!") % (user, project)
                            failed = True
                            raise RuntimeError(err_msg)

                if drone:
                    for drone_endpoint in drone_endpoints:
                        try:
                            add_feedstock_token_to_drone(
                                user,
                                project,
                                feedstock_token,
                                clobber,
                                drone_endpoint,
                            )
                        except Exception as e:
                            if "DEBUG_FEEDSTOCK_TOKENS" in os.environ:
                                raise e
                            else:
                                err_msg = (
                                    "Failed to register feedstock token for %s/%s"
                                    " on drone endpoint %s!") % (
                                        user, project, drone_endpoint)
                                failed = True
                                raise RuntimeError(err_msg)

                if travis:
                    try:
                        add_feedstock_token_to_travis(user, project,
                                                      feedstock_token, clobber)
                    except Exception as e:
                        if "DEBUG_FEEDSTOCK_TOKENS" in os.environ:
                            raise e
                        else:
                            err_msg = (
                                "Failed to register feedstock token for %s/%s"
                                " on travis!") % (user, project)
                            failed = True
                            raise RuntimeError(err_msg)

                if azure:
                    try:
                        add_feedstock_token_to_azure(user, project,
                                                     feedstock_token, clobber)
                    except Exception as e:
                        if "DEBUG_FEEDSTOCK_TOKENS" in os.environ:
                            raise e
                        else:
                            err_msg = (
                                "Failed to register feedstock token for %s/%s"
                                " on azure!") % (user, project)
                            failed = True
                            raise RuntimeError(err_msg)

            except Exception as e:
                if "DEBUG_FEEDSTOCK_TOKENS" in os.environ:
                    raise e
                failed = True
    if failed:
        if err_msg:
            raise RuntimeError(err_msg)
        else:
            raise RuntimeError((
                "Registering the feedstock token with proviers for %s/%s failed!"
                " Try the command locally with DEBUG_FEEDSTOCK_TOKENS"
                " defined in the environment to investigate!") %
                               (user, project))
Ejemplo n.º 47
0
    def test_multipart_copy(self):
        size = 100 * 1024 * 1024 * 1024

        # size / 8MB would give us 12501 chunks - but the maximum allowed is 10000,
        # so we should end with 16MB chunks instead.
        chunksize = 8 * 1024 * 1024
        assert size / chunksize > 10000
        chunksize *= 2

        chunks = -(-size // chunksize)
        assert chunks <= 10000

        self.s3_stubber.add_response(method='create_multipart_upload',
                                     service_response={'UploadId': '123'},
                                     expected_params={
                                         'Bucket': 'example2',
                                         'Key': 'large_file2.npy',
                                     })

        for part_num in range(1, chunks + 1):
            self.s3_stubber.add_response(
                method='upload_part_copy',
                service_response={
                    'CopyPartResult': {
                        'ETag': 'etag%d' % part_num
                    }
                },
                expected_params={
                    'Bucket':
                    'example2',
                    'Key':
                    'large_file2.npy',
                    'UploadId':
                    '123',
                    'PartNumber':
                    part_num,
                    'CopySource': {
                        'Bucket': 'example1',
                        'Key': 'large_file1.npy'
                    },
                    'CopySourceRange':
                    'bytes=%d-%d' % ((part_num - 1) * chunksize,
                                     min(part_num * chunksize, size) - 1)
                })

        self.s3_stubber.add_response(method='complete_multipart_upload',
                                     service_response={},
                                     expected_params={
                                         'Bucket': 'example2',
                                         'Key': 'large_file2.npy',
                                         'UploadId': '123',
                                         'MultipartUpload': {
                                             'Parts': [{
                                                 'ETag': 'etag%d' % i,
                                                 'PartNumber': i
                                             } for i in range(1, chunks + 1)]
                                         }
                                     })

        with mock.patch('quilt3.data_transfer.MAX_CONCURRENCY', 1):
            stderr = io.StringIO()

            with redirect_stderr(stderr), mock.patch(
                    'quilt3.data_transfer.DISABLE_TQDM', False):
                data_transfer.copy_file_list([
                    (PhysicalKey.from_url('s3://example1/large_file1.npy'),
                     PhysicalKey.from_url('s3://example2/large_file2.npy'),
                     size),
                ])
            assert stderr.getvalue()
Ejemplo n.º 48
0
def silenced(no_stdout: bool = True, no_stderr: bool = True):
    with contextlib.redirect_stdout(
            DevNull()) if no_stdout else null_context():
        with contextlib.redirect_stderr(
                DevNull()) if no_stderr else null_context():
            yield
Ejemplo n.º 49
0
    def test_add_person(self):
        ''' Uses open_persons_db '''
        # self.p1 = 'Rich', 'Thompson', '5/21', 'rm@g'
        if os.path.exists('persons.db'):
            os.remove('persons.db')
        db = open_persons_db()
        with io.StringIO() as buf, redirect_stderr(buf):
            self.assertFalse(add_person(db, self.p1, friend=False))
            self.assertEqual(
                'Warning: rm@g not added - must be friend or colleague\n',
                buf.getvalue())
        self.assertTrue(add_person(db, self.p1, colleague=True))
        c = db.execute('SELECT * FROM friends;')
        row = c.fetchone()
        self.assertEqual('Rich', row['first'])
        self.assertEqual('Thompson', row['last'])
        self.assertEqual('5/21', row['bday'])
        self.assertEqual('rm@g', row['email'])
        self.assertIsNone(c.fetchone())
        c = db.execute('SELECT * FROM colleagues;')
        row = c.fetchone()
        self.assertEqual('Rich', row['first'])
        self.assertEqual('Thompson', row['last'])
        self.assertEqual('5/21', row['bday'])
        self.assertEqual('rm@g', row['email'])
        self.assertIsNone(c.fetchone())

        # W/o placeholders, will inject executescript, break execute:
        # ------------ attack colleagues -----------------
        p = Person('a', 'b', 'c', "d');DROP TABLE colleagues;--")
        add_person(db, p)
        c = db.execute('SELECT * FROM friends;')
        row = c.fetchone()
        row = c.fetchone()
        self.assertEqual("d');DROP TABLE colleagues;--", row['email'])

        c.fetchone()  # without this, something breaks badly

        p = Person('a', 'b', 'c', 'd");DROP TABLE colleagues;--')
        add_person(db, p)

        c = db.execute('SELECT * FROM friends;')
        row = c.fetchone()
        row = c.fetchone()
        row = c.fetchone()
        self.assertEqual('d");DROP TABLE colleagues;--', row['email'])

        # ----------------- Now attack friends ----------------

        p = Person('a', 'b', 'c', "d');DROP TABLE friends;--")
        add_person(db, p, friend=False, colleague=True)
        c = db.execute('SELECT * FROM colleagues;')
        row = c.fetchone()
        row = c.fetchone()
        self.assertEqual("d');DROP TABLE friends;--", row['email'])

        c.fetchone()  # without this, something breaks badly

        p = Person('a', 'b', 'c', 'd");DROP TABLE friends;--')
        add_person(db, p, friend=False, colleague=True)

        c = db.execute('SELECT * FROM colleagues;')
        row = c.fetchone()
        row = c.fetchone()
        row = c.fetchone()
        self.assertEqual('d");DROP TABLE friends;--', row['email'])

        db.close()
Ejemplo n.º 50
0
def execute(context, is_interactive):
    sc = context.space_data

    try:
        line_object = sc.history[-1]
    except:
        return {'CANCELLED'}

    console, stdout, stderr = get_console(hash(context.region))

    if _BPY_MAIN_OWN:
        main_mod_back = sys.modules["__main__"]
        sys.modules["__main__"] = console._bpy_main_mod

    # redirect output
    from contextlib import (
        redirect_stdout,
        redirect_stderr,
    )

    # not included with Python
    class redirect_stdin(redirect_stdout.__base__):
        _stream = "stdin"

    # don't allow the stdin to be used, can lock blender.
    with redirect_stdout(stdout), \
            redirect_stderr(stderr), \
            redirect_stdin(None):

        # in case exception happens
        line = ""  # in case of encoding error
        is_multiline = False

        try:
            line = line_object.body

            # run the console, "\n" executes a multi line statement
            line_exec = line if line.strip() else "\n"

            is_multiline = console.push(line_exec)
        except:
            # unlikely, but this can happen with unicode errors for example.
            import traceback
            stderr.write(traceback.format_exc())

    if _BPY_MAIN_OWN:
        sys.modules["__main__"] = main_mod_back

    stdout.seek(0)
    stderr.seek(0)

    output = stdout.read()
    output_err = stderr.read()

    # cleanup
    sys.last_traceback = None

    # So we can reuse, clear all data
    stdout.truncate(0)
    stderr.truncate(0)

    # special exception. its possible the command loaded a new user interface
    if hash(sc) != hash(context.space_data):
        return {'FINISHED'}

    bpy.ops.console.scrollback_append(text=sc.prompt + line, type='INPUT')

    if is_multiline:
        sc.prompt = PROMPT_MULTI
        if is_interactive:
            indent = line[:len(line) - len(line.lstrip())]
            if line.rstrip().endswith(":"):
                indent += "    "
        else:
            indent = ""
    else:
        sc.prompt = PROMPT
        indent = ""

    # insert a new blank line
    bpy.ops.console.history_append(text=indent, current_character=0,
                                   remove_duplicates=True)
    sc.history[-1].current_character = len(indent)

    # Insert the output into the editor
    # not quite correct because the order might have changed,
    # but ok 99% of the time.
    if output:
        add_scrollback(output, 'OUTPUT')
    if output_err:
        add_scrollback(output_err, 'ERROR')

    # execute any hooks
    for func, args in execute.hooks:
        func(*args)

    return {'FINISHED'}
Ejemplo n.º 51
0
def _tractor_coadds(sample,
                    targetwcs,
                    tims,
                    mods,
                    version_header,
                    objid=None,
                    brickname=None,
                    survey=None,
                    mp=None,
                    log=None,
                    bands=['g', 'r', 'z']):
    """Generate individual-band FITS and color coadds for each central using
    Tractor.

    """
    from legacypipe.coadds import make_coadds, write_coadd_images
    #from legacypipe.runbrick import rgbkwargs, rgbkwargs_resid
    from legacypipe.survey import get_rgb, imsave_jpeg

    if brickname is None:
        brickname = sample['brickname']

    print('Producing coadds...', flush=True, file=log)
    if log:
        with redirect_stdout(log), redirect_stderr(log):
            C = make_coadds(tims,
                            bands,
                            targetwcs,
                            mods=mods,
                            mp=mp,
                            callback=write_coadd_images,
                            callback_args=(survey, brickname, version_header,
                                           tims, targetwcs))
    else:
        C = make_coadds(tims,
                        bands,
                        targetwcs,
                        mods=mods,
                        mp=mp,
                        callback=write_coadd_images,
                        callback_args=(survey, brickname, version_header, tims,
                                       targetwcs))

    # Move (rename) the coadds into the desired output directory.
    for suffix in np.atleast_1d('model'):
        for band in bands:
            shutil.copy(
                os.path.join(
                    survey.output_dir, 'coadd', brickname[:3], brickname,
                    'legacysurvey-{}-{}-{}.fits.fz'.format(
                        brickname, suffix, band)),
                os.path.join(
                    survey.output_dir,
                    '{}-{}-nocentral-{}.fits.fz'.format(objid, suffix, band)))

    if True:
        shutil.rmtree(os.path.join(survey.output_dir, 'coadd'))

    # Build png postage stamps of the coadds.
    rgbkwargs = dict(mnmx=(-1, 100), arcsinh=1)
    #rgbkwargs_resid = dict(mnmx=(0.1, 2), arcsinh=1)
    rgbkwargs_resid = dict(mnmx=(-1, 100), arcsinh=1)

    #coadd_list = [('image-central', C.coimgs,   rgbkwargs),
    #              ('model-central', C.comods,   rgbkwargs),
    #              ('resid-central', C.coresids, rgbkwargs_resid)]
    coadd_list = [('model-nocentral', C.comods, rgbkwargs),
                  ('image-central', C.coresids, rgbkwargs_resid)]

    for name, ims, rgbkw in coadd_list:
        rgb = get_rgb(ims, bands, **rgbkw)
        kwa = {}
        outfn = os.path.join(survey.output_dir,
                             '{}-{}.jpg'.format(objid, name))
        print('Writing {}'.format(outfn), flush=True, file=log)
        imsave_jpeg(outfn, rgb, origin='lower', **kwa)
        del rgb
Ejemplo n.º 52
0
    def load_mechanism(self, path, silent=False):
        def loader(self, path):
            # path is assumed to be the path dictionary
            surfaces = []
            if path['mech'].suffix in ['.yaml', '.yml'
                                       ]:  # check if it's a yaml cantera file
                mech_path = str(path['mech'])
            else:  # if not convert into yaml cantera file
                mech_path = str(path['Cantera_Mech'])

                if path['mech'].suffix == '.cti':
                    cti2yaml.convert(path['mech'], path['Cantera_Mech'])
                elif path['mech'].suffix in ['.ctml', '.xml']:
                    raise Exception('not implemented')
                    #ctml2yaml.convert(path['mech'], path['Cantera_Mech'])
                else:  # if not a cantera file, assume chemkin
                    surfaces = self.chemkin2cantera(path)

            print('Validating mechanism...', end='')
            try:  # This test taken from ck2cti
                self.yaml_txt = path['Cantera_Mech'].read_text(
                )  # Storing full text could be bad if large
                self.gas = ct.Solution(yaml=self.yaml_txt)
                for surfname in surfaces:
                    phase = ct.Interface(outName, surfname, [self.gas])
                print('PASSED.')
            except RuntimeError as e:
                print('FAILED.')
                print(e)

        output = {'success': False, 'message': []}
        # Intialize and report any problems to log, not to console window
        stdout = io.StringIO()
        stderr = io.StringIO()
        with contextlib.redirect_stderr(stderr):
            with contextlib.redirect_stdout(stdout):
                try:
                    loader(self, path)
                    output['success'] = True
                except Exception as e:
                    output['message'].append(
                        'Error in loading mech\n{:s}'.format(str(e)))
                except:
                    pass
                    # output['message'].append('Error when loading mech:\n')

        ct_out = stdout.getvalue()
        ct_err = stderr.getvalue().replace('INFO:root:', 'Warning: ')

        if 'FAILED' in ct_out:
            output['success'] = False
            self.isLoaded = False
        elif 'PASSED' in ct_out:
            output['success'] = True
            self.isLoaded = True

        for log_str in [ct_out, ct_err]:
            if log_str != '' and not silent:
                output['message'].append(log_str)
                output['message'].append('\n')

        if self.isLoaded:
            self.set_rate_expression_coeffs()  # set copy of coeffs
            self.set_thermo_expression_coeffs()  # set copy of thermo coeffs

        return output
Ejemplo n.º 53
0
def _parse_meta_yaml_impl(
    text: str,
    for_pinning=False,
    platform=None,
    arch=None,
    recipe_dir=None,
    cbc_path=None,
    log_debug=False,
    **kwargs: Any,
) -> "MetaYamlTypedDict":
    from conda_build.config import Config
    from conda_build.metadata import parse, MetaData
    import conda_build.api
    import conda_build.environ
    from conda_build.variants import explode_variants

    if (recipe_dir is not None and cbc_path is not None and arch is not None
            and platform is not None):
        with tempfile.TemporaryDirectory() as tmpdir:
            with open(os.path.join(tmpdir, "meta.yaml"), "w") as fp:
                fp.write(text)

            def _run_parsing():
                config = conda_build.config.get_or_merge_config(
                    None,
                    platform=platform,
                    arch=arch,
                    exclusive_config_file=cbc_path,
                )
                _cbc, _ = conda_build.variants.get_package_combined_spec(
                    tmpdir,
                    config=config,
                )
                return config, _cbc

            if not log_debug:
                fout = io.StringIO()
                ferr = io.StringIO()
                with sys_pipes(), contextlib.redirect_stdout(
                        fout, ), contextlib.redirect_stderr(ferr):
                    config, _cbc = _run_parsing()
            else:
                config, _cbc = _run_parsing()

            cfg_as_dict = {}
            for var in explode_variants(_cbc):
                try:
                    m = MetaData(tmpdir, config=config, variant=var)
                except SystemExit as e:
                    raise RuntimeError(str(e))
                cfg_as_dict.update(conda_build.environ.get_dict(m=m))

            logger.debug("jinja2 environmment:\n%s",
                         pprint.pformat(cfg_as_dict))

        cbc = Config(
            platform=platform,
            arch=arch,
            variant=cfg_as_dict,
            **kwargs,
        )
    else:
        _cfg = {}
        _cfg.update(kwargs)
        if platform is not None:
            _cfg["platform"] = platform
        if arch is not None:
            _cfg["arch"] = arch
        cbc = Config(**_cfg)
        cfg_as_dict = {}

    if for_pinning:
        content = render_meta_yaml(text,
                                   for_pinning=for_pinning,
                                   **cfg_as_dict)
    else:
        content = render_meta_yaml(text, **cfg_as_dict)

    try:
        return parse(content, cbc)
    except Exception:
        logger.debug("template: %s", text)
        logger.debug("context:\n%s", pprint.pformat(cfg_as_dict))
        raise
Ejemplo n.º 54
0
def main():
    parser = argparse.ArgumentParser(prog="check50")

    parser.add_argument("slug", help=_("prescribed identifier of work to check"))
    parser.add_argument("-d", "--dev",
                        action="store_true",
                        help=_("run check50 in development mode (implies --offline and --verbose).\n"
                               "causes SLUG to be interpreted as a literal path to a checks package"))
    parser.add_argument("--offline",
                        action="store_true",
                        help=_("run checks completely offline (implies --local)"))
    parser.add_argument("-l", "--local",
                        action="store_true",
                        help=_("run checks locally instead of uploading to cs50"))
    parser.add_argument("--log",
                        action="store_true",
                        help=_("display more detailed information about check results"))
    parser.add_argument("-o", "--output",
                        action="store",
                        nargs="+",
                        default=["ansi", "html"],
                        choices=["ansi", "json", "html"],
                        help=_("format of check results"))
    parser.add_argument("--target",
                        action="store",
                        nargs="+",
                        help=_("target specific checks to run"))
    parser.add_argument("--output-file",
                        action="store",
                        metavar="FILE",
                        help=_("file to write output to"))
    parser.add_argument("-v", "--verbose",
                        action="store_true",
                        help=_("display the full tracebacks of any errors (also implies --log)"))
    parser.add_argument("-V", "--version",
                        action="version",
                        version=f"%(prog)s {__version__}")
    parser.add_argument("--logout", action=LogoutAction)

    args = parser.parse_args()

    global SLUG
    SLUG = args.slug


    if args.dev:
        args.offline = True
        args.verbose = True

    if args.offline:
        args.local = True

    if args.verbose:
        # Show lib50 commands being run in verbose mode
        logging.basicConfig(level=os.environ.get("CHECK50_LOGLEVEL", "INFO"))
        lib50.ProgressBar.DISABLED = True
        args.log = True

    # Filter out any duplicates from args.output
    seen_output = set()
    args.output = [output for output in args.output if not (output in seen_output or seen_output.add(output))]

    # Set excepthook
    excepthook.verbose = args.verbose
    excepthook.outputs = args.output
    excepthook.output_file = args.output_file

    if not args.local:
        commit_hash = lib50.push("check50", SLUG, internal.CONFIG_LOADER, data={"check50": True})[1]
        with lib50.ProgressBar("Waiting for results") if "ansi" in args.output else nullcontext():
            tag_hash, results = await_results(commit_hash, SLUG)
    else:
        with lib50.ProgressBar("Checking") if not args.verbose and "ansi" in args.output else nullcontext():
            # If developing, assume slug is a path to check_dir
            if args.dev:
                internal.check_dir = Path(SLUG).expanduser().resolve()
                if not internal.check_dir.is_dir():
                    raise internal.Error(_("{} is not a directory").format(internal.check_dir))
            else:
                # Otherwise have lib50 create a local copy of slug
                try:
                    internal.check_dir = lib50.local(SLUG, offline=args.offline)
                except lib50.ConnectionError:
                    raise internal.Error(_("check50 could not retrieve checks from GitHub. Try running check50 again with --offline.").format(SLUG))
                except lib50.InvalidSlugError:
                    raise_invalid_slug(SLUG, offline=args.offline)

            # Load config
            config = internal.load_config(internal.check_dir)
            # Compile local checks if necessary
            if isinstance(config["checks"], dict):
                config["checks"] = internal.compile_checks(config["checks"], prompt=args.dev)

            install_translations(config["translations"])

            if not args.offline:
                install_dependencies(config["dependencies"], verbose=args.verbose)

            checks_file = (internal.check_dir / config["checks"]).resolve()

            # Have lib50 decide which files to include
            included = lib50.files(config.get("files"))[0]

            # Only open devnull conditionally
            ctxmanager = open(os.devnull, "w") if not args.verbose else nullcontext()
            with ctxmanager as devnull:
                if args.verbose:
                    stdout = sys.stdout
                    stderr = sys.stderr
                else:
                    stdout = stderr = devnull

                # Create a working_area (temp dir) with all included student files named -
                with lib50.working_area(included, name='-') as working_area, \
                        contextlib.redirect_stdout(stdout), \
                        contextlib.redirect_stderr(stderr):

                    runner = CheckRunner(checks_file)

                    # Run checks
                    if args.target:
                        check_results = runner.run(args.target, included, working_area)
                    else:
                        check_results = runner.run_all(included, working_area)

                    results = {
                        "slug": SLUG,
                        "results": [attr.asdict(result) for result in check_results],
                        "version": __version__
                    }


    # Render output
    file_manager = open(args.output_file, "w") if args.output_file else nullcontext(sys.stdout)
    with file_manager as output_file:
        for output in args.output:
            if output == "json":
                output_file.write(renderer.to_json(**results))
                output_file.write("\n")
            elif output == "ansi":
                output_file.write(renderer.to_ansi(**results, log=args.log))
                output_file.write("\n")
            elif output == "html":
                if os.environ.get("CS50_IDE_TYPE") and args.local:
                    html = renderer.to_html(**results)
                    subprocess.check_call(["c9", "exec", "renderresults", "check50", html])
                else:
                    if args.local:
                        html = renderer.to_html(**results)
                        with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".html") as html_file:
                            html_file.write(html)
                        url = f"file://{html_file.name}"
                    else:
                        url = f"https://submit.cs50.io/check50/{tag_hash}"

                    termcolor.cprint(_("To see the results in your browser go to {}").format(url), "white", attrs=["bold"])
Ejemplo n.º 55
0
    def _run_file_processor(
        result_channel: MultiprocessingConnection,
        parent_channel: MultiprocessingConnection,
        file_path: str,
        pickle_dags: bool,
        dag_ids: Optional[List[str]],
        thread_name: str,
        callback_requests: List[CallbackRequest],
    ) -> None:
        """
        Process the given file.

        :param result_channel: the connection to use for passing back the result
        :type result_channel: multiprocessing.Connection
        :param parent_channel: the parent end of the channel to close in the child
        :type parent_channel: multiprocessing.Connection
        :param file_path: the file to process
        :type file_path: str
        :param pickle_dags: whether to pickle the DAGs found in the file and
            save them to the DB
        :type pickle_dags: bool
        :param dag_ids: if specified, only examine DAG ID's that are
            in this list
        :type dag_ids: list[str]
        :param thread_name: the name to use for the process that is launched
        :type thread_name: str
        :param callback_requests: failure callback to execute
        :type callback_requests: List[airflow.utils.callback_requests.CallbackRequest]
        :return: the process that was launched
        :rtype: multiprocessing.Process
        """
        # This helper runs in the newly created process
        log: logging.Logger = logging.getLogger("airflow.processor")

        # Since we share all open FDs from the parent, we need to close the parent side of the pipe here in
        # the child, else it won't get closed properly until we exit.
        log.info("Closing parent pipe")

        parent_channel.close()
        del parent_channel

        set_context(log, file_path)
        setproctitle(f"airflow scheduler - DagFileProcessor {file_path}")

        try:
            # redirect stdout/stderr to log
            with redirect_stdout(StreamLogWriter(
                    log, logging.INFO)), redirect_stderr(
                        StreamLogWriter(log,
                                        logging.WARN)), Stats.timer() as timer:
                # Re-configure the ORM engine as there are issues with multiple processes
                settings.configure_orm()

                # Change the thread name to differentiate log lines. This is
                # really a separate process, but changing the name of the
                # process doesn't work, so changing the thread name instead.
                threading.current_thread().name = thread_name

                log.info("Started process (PID=%s) to work on %s", os.getpid(),
                         file_path)
                dag_file_processor = DagFileProcessor(dag_ids=dag_ids, log=log)
                result: Tuple[int, int] = dag_file_processor.process_file(
                    file_path=file_path,
                    pickle_dags=pickle_dags,
                    callback_requests=callback_requests,
                )
                result_channel.send(result)
            log.info("Processing %s took %.3f seconds", file_path,
                     timer.duration)
        except Exception:
            # Log exceptions through the logging framework.
            log.exception("Got an exception! Propagating...")
            raise
        finally:
            # We re-initialized the ORM within this Process above so we need to
            # tear it down manually here
            settings.dispose_orm()

            result_channel.close()
Ejemplo n.º 56
0
def task_run(args, dag=None):
    """Runs a single task instance"""
    # Load custom airflow config
    if args.cfg_path:
        with open(args.cfg_path, 'r') as conf_file:
            conf_dict = json.load(conf_file)

        if os.path.exists(args.cfg_path):
            os.remove(args.cfg_path)

        conf.read_dict(conf_dict, source=args.cfg_path)
        settings.configure_vars()

    # IMPORTANT, have to use the NullPool, otherwise, each "run" command may leave
    # behind multiple open sleeping connections while heartbeating, which could
    # easily exceed the database connection limit when
    # processing hundreds of simultaneous tasks.
    settings.configure_orm(disable_connection_pool=True)

    if dag and args.pickle:
        raise AirflowException(
            "You cannot use the --pickle option when using DAG.cli() method.")
    elif args.pickle:
        print(f'Loading pickle id: {args.pickle}')
        dag = get_dag_by_pickle(args.pickle)
    elif not dag:
        dag = get_dag(args.subdir, args.dag_id)
    else:
        # Use DAG from parameter
        pass

    task = dag.get_task(task_id=args.task_id)
    ti = TaskInstance(task, args.execution_date)
    ti.refresh_from_db()
    ti.init_run_context(raw=args.raw)

    hostname = get_hostname()

    print(f"Running {ti} on host {hostname}")

    if args.interactive:
        _run_task_by_selected_method(args, dag, ti)
    else:
        if settings.DONOT_MODIFY_HANDLERS:
            with redirect_stdout(StreamLogWriter(ti.log, logging.INFO)), \
                    redirect_stderr(StreamLogWriter(ti.log, logging.WARN)):
                _run_task_by_selected_method(args, dag, ti)
        else:
            # Get all the Handlers from 'airflow.task' logger
            # Add these handlers to the root logger so that we can get logs from
            # any custom loggers defined in the DAG
            airflow_logger_handlers = logging.getLogger(
                'airflow.task').handlers
            root_logger = logging.getLogger()
            root_logger_handlers = root_logger.handlers

            # Remove all handlers from Root Logger to avoid duplicate logs
            for handler in root_logger_handlers:
                root_logger.removeHandler(handler)

            for handler in airflow_logger_handlers:
                root_logger.addHandler(handler)
            root_logger.setLevel(logging.getLogger('airflow.task').level)

            with redirect_stdout(StreamLogWriter(ti.log, logging.INFO)), \
                    redirect_stderr(StreamLogWriter(ti.log, logging.WARN)):
                _run_task_by_selected_method(args, dag, ti)

            # We need to restore the handlers to the loggers as celery worker process
            # can call this command multiple times,
            # so if we don't reset this then logs from next task would go to the wrong place
            for handler in airflow_logger_handlers:
                root_logger.removeHandler(handler)
            for handler in root_logger_handlers:
                root_logger.addHandler(handler)

    logging.shutdown()
Ejemplo n.º 57
0
print("=" * 5, "closing", "=" * 5)
print("1 - Avant d'obtenir la ressource")
with contextlib.closing(Ressource()):
    print("2 - Avec la ressource!")
print("3 - Après l'avoir relâchée")

print("=" * 5, "suppress", "=" * 5)
with contextlib.suppress(ArithmeticError):
    print("À l'intérieur")
    raise ArithmeticError
    print("On se rend jamais ici")
print("Rien a brisé!")

print("=" * 5, "redirect_stdout", "=" * 5)
sortie = io.StringIO()
with contextlib.redirect_stdout(sortie):
    print("Bonjour")
sortie.seek(0)
print("On a print: `{}`".format(sortie.read()))

print("=" * 5, "plusieurs contextes", "=" * 5)
sortie = io.StringIO()
with contextlib.redirect_stdout(sortie), contextlib.redirect_stderr(sortie):
    print("Print to stdout")
    print("Print to stderr", file=sys.stderr)
sortie.seek(0)
print("On a print: `{}`".format(sortie.read()))

# Pour plus d'information: https://docs.python.org/3/library/contextlib.html
Ejemplo n.º 58
0
with cfg_component("web") as CFG:
    uyuni_roster_config.update({
        "ssh_push_port_https":
        CFG.SSH_PUSH_PORT_HTTPS,
        "ssh_pre_flight_script":
        CFG.SSH_SALT_PRE_FLIGHT_SCRIPT,
        "ssh_use_salt_thin":
        CFG.SSH_USE_SALT_THIN == "true",
    })
    if CFG.SSH_PUSH_SUDO_USER:
        uyuni_roster_config.update({
            "ssh_push_sudo_user":
            CFG.SSH_PUSH_SUDO_USER,
        })

with redirect_stderr(io.StringIO()) as f, cfg_component("cobbler") as CFG:
    try:
        uyuni_roster_config.update({
            "host": CFG.HOST,
        })
    except AttributeError:
        pass

with cfg_component("server") as CFG:
    secret_hash = hashlib.sha512(CFG.secret_key.encode('ascii')).hexdigest()

os.umask(0o66)

with open("/etc/salt/master.d/susemanager_engine.conf", "w") as f:
    f.write(
        yaml.safe_dump(mgr_events_config,
# contextlib_redirect.py

from contextlib import redirect_stdout, redirect_stderr
import io
import sys


def misbehaving_function(a):
    sys.stdout.write('(stdout) A: {!r}\n'.format(a))
    sys.stderr.write('(stderr) A: {!r}\n'.format(a))


capture = io.StringIO()
with redirect_stdout(capture), redirect_stderr(capture):
    misbehaving_function(5)

print(capture.getvalue())
Ejemplo n.º 60
0
def update_team(org_name, repo_name, commit=None):
    if not repo_name.endswith("-feedstock"):
        return

    gh = github.Github(os.environ['GH_TOKEN'])
    org = gh.get_organization(org_name)
    gh_repo = org.get_repo(repo_name)

    tmp_dir = None
    try:
        tmp_dir = tempfile.mkdtemp('_recipe')

        Repo.clone_from(gh_repo.clone_url, tmp_dir, depth=1)
        with open(os.path.join(tmp_dir, "recipe", "meta.yaml"), "r") as fp:
            keep_lines = []
            skip = True
            for line in fp.readlines():
                if line.startswith("extra:"):
                    skip = False
                if not skip:
                    keep_lines.append(line)
        meta = DummyMeta("".join(keep_lines))

        with io.StringIO() as buf, redirect_stdout(buf), redirect_stderr(buf):
            (
                current_maintainers,
                prev_maintainers,
                new_conda_forge_members,
            ) = configure_github_team(
                meta,
                gh_repo,
                org,
                repo_name.replace("-feedstock", ""),
            )

            LOGGER.info(buf.getvalue())

        if commit:
            message = textwrap.dedent("""
                Hi! This is the friendly automated conda-forge-webservice.

                I updated the Github team because of this commit.
                """)
            newm = get_handles(new_conda_forge_members)
            if newm:
                message += textwrap.dedent("""
                    - {} {} added to conda-forge. Welcome to conda-forge!
                      Go to https://github.com/orgs/conda-forge/invitation see your invitation.
                """.format(newm,
                           "were" if newm.count(",") >= 1 else "was"))  # noqa

            addm = get_handles(current_maintainers - prev_maintainers -
                               new_conda_forge_members)
            if addm:
                message += textwrap.dedent("""
                    - {} {} added to this feedstock maintenance team.
                """.format(addm, "were" if addm.count(",") >= 1 else "was"))

            if addm or newm:
                message += textwrap.dedent("""
                    You should get push access to this feedstock and CI services.

                    Feel free to join the community [chat room](https://gitter.im/conda-forge/conda-forge.github.io).

                    NOTE: Please make sure to not push to the repository directly.
                          Use branches in your fork for any changes and send a PR.
                          More details [here](https://conda-forge.org/docs/maintainer/updating_pkgs.html#forking-and-pull-requests)
                """)  # noqa

                c = gh_repo.get_commit(commit)
                c.create_comment(message)
    finally:
        if tmp_dir is not None:
            shutil.rmtree(tmp_dir)