Ejemplo n.º 1
0
def register_parallel_pytest_cov():
    try:
        from pytest_cov.embed import cleanup_on_sigterm
    except ImportError:
        pass
    else:
        cleanup_on_sigterm()
Ejemplo n.º 2
0
def manager_proc(ipc_dir):
    try:
        from pytest_cov.embed import cleanup_on_sigterm
        cleanup_on_sigterm()
    except ImportError:
        pass

    addrs = {
        'results': 'ipc://%s/manager_results' % ipc_dir,
        'comm': 'ipc://%s/manager_comm' % ipc_dir,
        'graph': 'ipc://%s/manager_graph' % ipc_dir,
        'msg': 'ipc://%s/manager_msg' % ipc_dir,
        'info': 'ipc://%s/manager_info' % ipc_dir,
        'export': 'ipc://%s/manager_export' % ipc_dir,
        'view': 'ipc://%s/manager_view' % ipc_dir,
    }

    # start the manager process
    proc = mp.Process(
        name='manager',
        target=run_manager,
        args=(1, 1, addrs['results'], addrs['graph'], addrs['comm'],
              addrs['msg'], addrs['info'], addrs['export'], addrs['view'],
              None, None, None)
    )
    proc.daemon = False
    proc.start()

    yield addrs

    # cleanup the manager process
    proc.terminate()
    proc.join(1)
    return proc.exitcode
Ejemplo n.º 3
0
    def run(self, q):
        with ExitStack() as stack:
            try:
                # Handle SIGTERM by calling sys.exit(0), which will raise a SystemExit exception,
                # properly executing cleanup code in `finally` clauses and context managers.
                # This is required to terminate buildbox-casd on SIGTERM.
                signal.signal(signal.SIGTERM,
                              lambda signalnum, frame: sys.exit(0))

                try:
                    from pytest_cov.embed import cleanup_on_sigterm
                except ImportError:
                    pass
                else:
                    cleanup_on_sigterm()

                server = stack.enter_context(self._create_server())
                port = server.add_insecure_port("localhost:0")
                server.start()
            except Exception:
                q.put(None)
                raise

            # Send port to parent
            q.put(port)

            # Sleep until termination by signal
            signal.pause()
Ejemplo n.º 4
0
def broker(ipc_dir, graphmgr_addr):
    try:
        from pytest_cov.embed import cleanup_on_sigterm
        cleanup_on_sigterm()
    except ImportError:
        pass

    parent_comm, child_comm = mp.Pipe()
    # start the manager process
    proc = mp.Process(name='broker',
                      target=BrokerHelper.execute,
                      args=(graphmgr_addr, ipc_dir, child_comm),
                      daemon=False)
    proc.start()

    broker = BrokerProxy(parent_comm)
    yield broker

    # cleanup the manager process
    broker.exit()
    proc.join(2)
    # if ami still hasn't exitted then kill it
    if proc.is_alive():
        proc.terminate()
        proc.join()
    return proc.exitcode
Ejemplo n.º 5
0
    def _start_sub_pool(cls, actor_config: ActorPoolConfig, process_index: int,
                        started: multiprocessing.Event):
        if not _is_windows:
            try:
                # register coverage hooks on SIGTERM
                from pytest_cov.embed import cleanup_on_sigterm
                if 'COV_CORE_SOURCE' in os.environ:  # pragma: no branch
                    cleanup_on_sigterm()
            except ImportError:  # pragma: no cover
                pass

        conf = actor_config.get_pool_config(process_index)
        suspend_sigint = conf['suspend_sigint']
        if suspend_sigint:
            signal.signal(signal.SIGINT, lambda *_: None)

        logging_conf = conf['logging_conf'] or {}
        if logging_conf.get('file'):
            logging.config.fileConfig(logging_conf['file'])
        elif logging_conf.get('level'):
            logging.basicConfig(level=logging_conf['level'],
                                format=logging_conf.get('format'))

        use_uvloop = conf['use_uvloop']
        if use_uvloop:
            import uvloop
            asyncio.set_event_loop(uvloop.new_event_loop())
        else:
            asyncio.set_event_loop(asyncio.new_event_loop())

        coro = cls._create_sub_pool(actor_config, process_index, started)
        asyncio.run(coro)
Ejemplo n.º 6
0
 def setUp(self):
     try:
         from pytest_cov.embed import cleanup_on_sigterm
     except ImportError:
         pass
     else:
         cleanup_on_sigterm()
     self.barrier = Barrier(2)
     MockNodeHandler.BARRIER = self.barrier
     self.node_handler = node_handler_process.NodeHandlerProcess
     node_handler_process.NodeHandlerProcess = MockNodeHandler
     from src.backup_scheduler.backup_scheduler import BackupScheduler
     shutil.rmtree('/tmp/disk_db_concus', ignore_errors=True)
     os.mkdir('/tmp/disk_db_concus')
     shutil.rmtree('/tmp/data_for_backup', ignore_errors=True)
     os.mkdir('/tmp/data_for_backup')
     with open('/tmp/data_for_backup/data', 'w') as data_file:
         data_file.write("adasdsa")
     database = DiskDatabase('/tmp/disk_db_concus')
     shutil.rmtree('/tmp/backup_scheduler_path', ignore_errors=True)
     os.mkdir('/tmp/backup_scheduler_path')
     with open('/tmp/backup_scheduler_path/trash', 'w') as trash_file:
         trash_file.write("trash")
     backup_scheduler_recv, self.client_listener_send = Pipe(False)
     self.client_listener_recv, backup_scheduler_send = Pipe(False)
     backup_scheduler = BackupScheduler('/tmp/backup_scheduler_path', database,
                                        backup_scheduler_recv, backup_scheduler_send, 10)
     self.p = Process(target=backup_scheduler)
     self.p.start()
Ejemplo n.º 7
0
 def __new__(cls, *args, **kwargs):
     try:
         if 'COV_CORE_SOURCE' in os.environ:  # pragma: no branch
             # register coverage hooks on SIGTERM
             from pytest_cov.embed import cleanup_on_sigterm
             cleanup_on_sigterm()
     except ImportError:  # pragma: no cover
         pass
     return super().__new__(cls, *args, **kwargs)
Ejemplo n.º 8
0
def enable_code_coverage():
    """Ensure code coverage works as expected for `multiprocesses` tests.
    As its harmless for other types of test, we always run this fixture."""
    try:
        from pytest_cov.embed import cleanup_on_sigterm
    except ImportError:
        pass
    else:
        cleanup_on_sigterm()
Ejemplo n.º 9
0
def setup_module(module):
    # Here, we add compatibility for multiprocessing coverage reports.
    # via: https://pytest-cov.readthedocs.io/en/latest/subprocess-support.html#if-you-use-multiprocessing-pool
    try:
        from pytest_cov.embed import cleanup_on_sigterm
    except ImportError:
        pass
    else:
        cleanup_on_sigterm()
Ejemplo n.º 10
0
 def setUp(self) -> None:
     try:
         from pytest_cov.embed import cleanup_on_sigterm
     except ImportError:
         pass
     else:
         cleanup_on_sigterm()
     self.barrier = Barrier(2)
     self.p = None
Ejemplo n.º 11
0
 def _inner():
     from pytest_cov.embed import cleanup_on_sigterm
     cleanup_on_sigterm()
     app.run(
         host='localhost',
         port=LIVE_SERVER_PORT,
         debug=False,
         use_reloader=False,
         threaded=False
     )
Ejemplo n.º 12
0
def sync_proc(request, ipc_dir):
    try:
        from pytest_cov.embed import cleanup_on_sigterm
        cleanup_on_sigterm()
    except ImportError:
        pass

    interval = 10
    addr = "ipc://%s/sync" % ipc_dir
    comm_addr = "ipc://%s/sync-comm" % ipc_dir
    if isinstance(request.param, tuple):
        nclients, start = request.param
    else:
        nclients = request.param
        start = 0

    sync_proc = mp.Process(target=run_syncer,
                           args=(addr, comm_addr, start, interval),
                           name='syncer',
                           daemon=True)
    sync_proc.start()

    ctx = zmq.Context()
    # socket for communicating with syncer
    comm = ctx.socket(zmq.REQ)
    comm.connect(comm_addr)

    # sync socket to return from the fixture
    syncs = []
    for _ in range(nclients):
        sync = ctx.socket(zmq.REQ)
        sync.connect(addr)
        syncs.append(sync)
    yield syncs, start

    # cleanup the syncer
    comm.send_string('exit')

    ret = comm.recv_pyobj()
    # join syncer thread
    sync_proc.join(2)
    # if ami still hasn't exitted then kill it
    if sync_proc.is_alive():
        print("try killing sync_proc")
        sync_proc.terminate()
        sync_proc.join(1)

    # cleanup zmq
    for sync in syncs:
        sync.close()
    comm.close()
    ctx.term()

    return ret
Ejemplo n.º 13
0
def dbus_service(request):
    cleanup_on_sigterm()
    service = DBusObject()
    try:
        service.request_name(request.param)
    except RuntimeError:
        pytest.skip("Can't get the requested name")

    try:
        yield service
    finally:
        service.stop()
Ejemplo n.º 14
0
Archivo: pool.py Proyecto: haijohn/mars
    def _start_sub_pool(cls, actor_config: ActorPoolConfig, process_index: int,
                        started: multiprocessing.Event):
        try:
            # register coverage hooks on SIGTERM
            from pytest_cov.embed import cleanup_on_sigterm
            if 'COV_CORE_SOURCE' in os.environ:  # pragma: no branch
                cleanup_on_sigterm()
        except ImportError:  # pragma: no cover
            pass

        asyncio.set_event_loop(asyncio.new_event_loop())
        coro = cls._create_sub_pool(actor_config, process_index, started)
        asyncio.run(coro)
Ejemplo n.º 15
0
def dbus_service():
    cleanup_on_sigterm()
    name = 'com.example.object'
    service = DBusObject()
    try:
        service.request_name(name)
    except RuntimeError:
        pytest.skip("Can't get the requested name")

    try:
        yield service
    finally:
        service.stop()
Ejemplo n.º 16
0
def test_concurrency():
    ret = []
    with concurrent_pool("s", ret=ret) as c:
        for i in range(4):
            c.submit(fn, i, bias=1)

    assert list(sorted(ret)) == [1, 2, 3, 4]

    ret = []
    with concurrent_pool("c", ret=ret) as c:
        for i in range(4):
            c.submit(async_fn, i, bias=1)

    assert list(sorted(ret)) == [1, 2, 3, 4]

    ret = []
    with concurrent_pool("t", ret=ret) as c:
        for i in range(4):
            c.submit(fn, i, bias=1)

    assert list(sorted(ret)) == [1, 2, 3, 4]

    if "Windows" in platform.architecture()[1]:
        def shutdown(frame, signum):
            # your app's shutdown or whatever
            pass

        signal.signal(signal.SIGBREAK, shutdown)

        try:
            from pytest_cov.embed import cleanup_on_signal
        except ImportError:
            pass
        else:
            cleanup_on_signal(signal.SIGBREAK)
        return
    else:
        try:
            from pytest_cov.embed import cleanup_on_sigterm
        except ImportError:
            pass
        else:
            cleanup_on_sigterm()

    ret = []
    with concurrent_pool("p", ret=ret) as c:
        for i in range(4):
            c.submit(fn, i, bias=1)

    assert list(sorted(ret)) == [1, 2, 3, 4]
Ejemplo n.º 17
0
def flowchart(request, workerjson, broker, ipc_dir, graphmgr_addr,
              qevent_loop):
    try:
        from pytest_cov.embed import cleanup_on_sigterm
        cleanup_on_sigterm()
    except ImportError:
        pass

    parser = build_parser()
    args = parser.parse_args([
        "-n", "1", '-i',
        str(ipc_dir), '--headless',
        '%s://%s' % (request.param, workerjson)
    ])

    queue = mp.Queue()
    ami = mp.Process(name='ami', target=run_ami, args=(args, queue))
    ami.start()

    try:
        # wait for ami to be fully up before updating the sources
        with GraphCommHandler(graphmgr_addr.name, graphmgr_addr.comm) as comm:
            while not comm.sources:
                time.sleep(0.1)

        with Flowchart(broker_addr=broker.broker_sub_addr,
                       graphmgr_addr=graphmgr_addr,
                       checkpoint_addr=broker.checkpoint_pub_addr) as fc:

            qevent_loop.run_until_complete(fc.updateSources(init=True))

            yield (fc, broker)

    except Exception as e:
        # let the fixture exit 'gracefully' if it fails
        print("error setting up flowchart fixture:", e)
        yield None
    finally:
        queue.put(None)
        ami.join(2)
        # if ami still hasn't exitted then kill it
        if ami.is_alive():
            ami.terminate()
            ami.join()

        if ami.exitcode == 0 or ami.exitcode == -signal.SIGTERM:
            return 0
        else:
            print('AMI exited with non-zero status code: %d' % ami.exitcode)
            return 1
Ejemplo n.º 18
0
 def setUp(self):
     try:
         from pytest_cov.embed import cleanup_on_sigterm
     except ImportError:
         pass
     else:
         cleanup_on_sigterm()
     self.barrier = Barrier(2)
     self.client_listener = None
     self.backup_scheduler_recv, client_listener_send = Pipe(False)
     client_listener_recv, self.backup_scheduler_send = Pipe(False)
     self.p = Process(target=self._launch_process,
                      args=(client_listener_send, client_listener_recv))
     self.p.start()
Ejemplo n.º 19
0
def deploy_server(tmpdir):
    db_path = str(tmpdir.join("test.worky"))

    server.app.config['STORAGE'] = Storage(db_path)
    server_process = Process(target=server.app.run, args=(host_ip, ))

    cleanup_on_sigterm()

    server_process.start()

    yield

    server_process.terminate()
    server_process.join()
Ejemplo n.º 20
0
def graphmgr_addr(ipc_dir):
    try:
        from pytest_cov.embed import cleanup_on_sigterm
        cleanup_on_sigterm()
    except ImportError:
        pass

    comm_addr = "ipc://%s/comm" % ipc_dir
    view_addr = "ipc://%s/view" % ipc_dir
    graphinfo_addr = "ipc://%s/info" % ipc_dir

    graphmgr = GraphMgrAddress("graph", comm_addr, view_addr, graphinfo_addr)

    yield graphmgr
Ejemplo n.º 21
0
 def setUp(self) -> None:
     try:
         from pytest_cov.embed import cleanup_on_sigterm
     except ImportError:
         pass
     else:
         cleanup_on_sigterm()
     self.processes_to_join = []
     self.queues_to_purge = []
     self.dirs_to_delete = []
     self.recv_pipe, self.write_pipe = Pipe(False)
     self.connection = pika.BlockingConnection(
         pika.ConnectionParameters(host="localhost"))
     self.channel = self.connection.channel()
     self._setup_queue('pipeline_start')
Ejemplo n.º 22
0
def run_until_timeout(target, args, timeout=1):
    # https://pytest-cov.readthedocs.io/en/v2.10.1_a/subprocess-support.html
    try:
        from pytest_cov.embed import cleanup_on_sigterm
    except ImportError:
        pass
    else:
        cleanup_on_sigterm()

    p = Process(target=target, args=args)
    try:
        p.start()
        p.join(timeout=timeout)
    finally:
        p.terminate()
Ejemplo n.º 23
0
 def setUp(self) -> None:
     try:
         from pytest_cov.embed import cleanup_on_sigterm
     except ImportError:
         pass
     else:
         cleanup_on_sigterm()
     shutil.rmtree('/tmp/backup_output', ignore_errors=True)
     os.mkdir('/tmp/backup_output')
     with open('/tmp/example', 'w') as example_file:
         example_file.write("asd")
     BackupFile.create_from_path('/tmp/example', "/tmp/backup_output/out2")
     SidecarProcess.logger = logging.getLogger("dummy")
     self.sidecar_process = SidecarProcess(TestSidecar.PORT, 3)
     self.p = Process(target=self.sidecar_process)
     self.p.start()
Ejemplo n.º 24
0
 def setUp(self) -> None:
     try:
         from pytest_cov.embed import cleanup_on_sigterm
     except ImportError:
         pass
     else:
         cleanup_on_sigterm()
     self.recv_pipe, self.write_pipe = Pipe(False)
     self.connection = pika.BlockingConnection(
         pika.ConnectionParameters(host="localhost"))
     self.channel = self.connection.channel()
     self.channel.queue_declare(queue=CONSUME_QUEUE)
     self.channel.queue_purge(CONSUME_QUEUE)
     self.test_process = None
     self.consume_process = Process(target=self._read_process,
                                    args=(self.write_pipe, ))
     self.consume_process.start()
Ejemplo n.º 25
0
def _serve(host="", port=5000, quiet=False, use_coverage=False):
    """
    Wrapper function for :func:`jsonrpcserver.serve` and :func:`.quiet_serve`. Can be forked into background.
    
    Sets up SIGTERM hook using :py:func:`pytest_cov.embed.cleanup_on_sigterm` so coverage data is correctly
    saved when the subprocess is terminated.
    """
    # If this is being called from a unit test, then attempt to setup the pytest-cov SIGTERM hook to ensure
    # coverage data is generated correctly for this subprocess.
    if use_coverage:
        try:
            from pytest_cov.embed import cleanup_on_sigterm
            cleanup_on_sigterm()
        except ImportError:
            warnings.warn(
                "Could not import coverage module in child process...")
            pass
    srv = quiet_serve if quiet else serve
    srv(host, port)
Ejemplo n.º 26
0
def run_service_datajoint_server():
    # The following cleanup is needed because we terminate this compute resource process
    # See: https://pytest-cov.readthedocs.io/en/latest/subprocess-support.html
    from pytest_cov.embed import cleanup_on_sigterm
    cleanup_on_sigterm()

    os.environ['RUNNING_PYTEST'] = 'TRUE'

    with hi.ConsoleCapture(label='[datajoint-server]'):
        ss = hi.ShellScript(f"""
        #!/bin/bash
        set -ex

        docker kill datajoint-server-fixture > /dev/null 2>&1 || true
        docker rm datajoint-server-fixture > /dev/null 2>&1 || true
        exec docker run --name datajoint-server-fixture -e MYSQL_ROOT_PASSWORD=tutorial -p {DATAJOINT_SERVER_PORT}:3306 datajoint/mysql
        """, redirect_output_to_stdout=True)
        ss.start()
        ss.wait()
 def setUp(self) -> None:
     try:
         from pytest_cov.embed import cleanup_on_sigterm
     except ImportError:
         pass
     else:
         cleanup_on_sigterm()
     shutil.rmtree('/tmp/message_set', ignore_errors=True)
     os.mkdir('/tmp/message_set')
     self.message_set = DiskMessageSet('/tmp/message_set', recover_state_on_init=True)
     self.recv_pipe, self.write_pipe = Pipe(False)
     self.connection = pika.BlockingConnection(pika.ConnectionParameters(host="localhost"))
     self.channel = self.connection.channel()
     self.channel.queue_declare(queue=CONSUME_QUEUE)
     self.channel.queue_declare(queue=RESPONSE_QUEUE)
     self.channel.queue_purge(CONSUME_QUEUE)
     self.channel.queue_purge(RESPONSE_QUEUE)
     self.test_process = None
     self.consume_process = Process(target=self._read_process, args=(self.write_pipe,))
     self.consume_process.start()
Ejemplo n.º 28
0
 def setUp(self) -> None:
     try:
         from pytest_cov.embed import cleanup_on_sigterm
     except ImportError:
         pass
     else:
         cleanup_on_sigterm()
     self.processes_to_join = []
     self.queues_to_purge = []
     self.dirs_to_delete = []
     self.recv_pipe, self.write_pipe = Pipe(False)
     self.connection = pika.BlockingConnection(
         pika.ConnectionParameters(host="localhost"))
     self.channel = self.connection.channel()
     self._setup_queue('yelp_businesses_news')
     self._setup_queue('pipeline_result')
     self._setup_queue('notify_business_load_end')
     self._setup_queue('queue_to_join')
     self._self_register_dir('/tmp/downloader_data')
     self._self_register_dir('/tmp/joiner_data')
Ejemplo n.º 29
0
def start_ami(request, workerjson):
    try:
        from pytest_cov.embed import cleanup_on_sigterm
        cleanup_on_sigterm()
    except ImportError:
        pass

    parser = build_parser()
    args = parser.parse_args([
        "-n", "1", '--headless', '--tcp',
        '%s://%s' % (request.param, workerjson)
    ])

    queue = mp.Queue()
    ami = mp.Process(name='ami', target=run_ami, args=(args, queue))
    ami.start()

    try:
        host = "127.0.0.1"
        comm_addr = "tcp://%s:%d" % (host, BasePort + Ports.Comm)
        with GraphCommHandler(args.graph_name, comm_addr) as comm_handler:
            yield comm_handler
    except Exception as e:
        # let the fixture exit 'gracefully' if it fails
        print(e)
        yield None
    finally:
        queue.put(None)
        ami.join(1)
        # if ami still hasn't exitted then kill it
        if ami.is_alive():
            ami.terminate()
            ami.join(1)

        if ami.exitcode == 0 or ami.exitcode == -signal.SIGTERM:
            return 0
        else:
            print('AMI exited with non-zero status code: %d' % ami.exitcode)
            return 1
Ejemplo n.º 30
0
def test_2d_bc(tmp_path):
    try:
        from pytest_cov.embed import cleanup_on_sigterm
    except ImportError:
        pass
    else:
        cleanup_on_sigterm()
    sample_n = 10
    worker = 4
    path = tmp_path / "test"
    sys.argv = [
        "layout_generator",
        "generate",
        "--data_dir",
        str(path),
        "--bcs",
        "[[0.01, 0], [0.02, 0]]",
        "--bcs",
        "[[0.08, 0], [0.09, 0]]",
        "--sample_n",
        str(sample_n),
        "--worker",
        str(worker),
    ]
    options = main(options_flag=True)
    data_dir = path
    # assert data_dir == 's'
    # data_path_list = os.listdir(options.data_dir)
    data_path_list = list(data_dir.glob(f"*.{options.file_format}"))
    assert data_dir.exists()
    assert len(data_path_list) == options.sample_n
    datum_path = data_path_list[0]

    r = io.load_mat(datum_path)
    assert set(["u", "F", "list", "xs", "ys"]).issubset(set(r.keys()))
    u = r["u"]
    assert u.shape == (options.nx,) * options.ndim
    assert u.min() >= options.u_D
Ejemplo n.º 31
0
import os
import platform
import sys
from pathlib import Path
from subprocess import PIPE, run

import pytest

pytest_plugins = ['helpers_namespace']

try:
    from pytest_cov.embed import cleanup_on_sigterm
except ImportError:
    pass
else:
    cleanup_on_sigterm()

# pylint: disable=E1101
# pytest.helpers is dynamic so it confuses pylint

if sys.version_info.major < 3:
    print("Requires Python 3.4+")
    sys.exit(1)


@pytest.helpers.register
def is_linux():
    return platform.system() == 'Linux'


@pytest.helpers.register