コード例 #1
0
    def __init__(
        self,
        conf_path,
        channel_conf_path,
        server_address,
        datasource,
        conf_override=None,
        channel_conf_override=None,
    ):
        """format of args should match what you set in conf_mamanger"""
        super().__init__(name="DETestWorker")
        self.server_address = server_address
        self.conf_path = conf_path
        self.channel_conf_path = channel_conf_path

        self.global_config, self.channel_config_loader = _get_de_conf_manager(
            conf_path, channel_conf_path, parse_program_options([])
        )

        # Because multiple DETestWorkers can be in use concurrently,
        # we assign different database numbers (from 0 through 15 by
        # redis default) where possible to avoid kombu messaging
        # collisions whenever routing keys are the same.

        # If pytest-xdist is being used, and 16 or fewer xdist workers
        # are being used, we can use the number in the worker ID as
        # the database ID.  Otherwise, we assign a random number
        # between 0 and 15 (inclusive).
        db_number = 0
        xdist_worker_count = os.environ.get("PYTEST_XDIST_WORKER_COUNT")
        if xdist_worker_count and int(xdist_worker_count) <= 16:
            worker_id = os.environ["PYTEST_XDIST_WORKER"]
            db_number = re.sub(r"^gw(\d{1,2})", r"\1", worker_id)  # Remove the gw prefix
            assert int(db_number) < 16
        else:
            db_number = random.randrange(0, 16)

        # Override global configuration for testing
        self.global_config["broker_url"] = f"redis://localhost:6379/{db_number}"
        self.global_config["shutdown_timeout"] = 1
        self.global_config["server_address"] = self.server_address
        self.global_config["dataspace"]["datasource"] = datasource
        self.global_config["no_webserver"] = False
        self.global_config["webserver"] = {}
        self.global_config["webserver"]["port"] = get_random_port()

        self.de_server = _create_de_server(self.global_config, self.channel_config_loader)
        self.stdout_at_setup = None
コード例 #2
0
ファイル: fixtures.py プロジェクト: goodenou/decisionengine
    def de_server_factory(request):
        '''
        This parameterized fixture will mock up various datasources.

        Add datasource objects to DATABASES_TO_TEST once they've got
        our basic schema loaded.  Pytest should take it from there and
        automatically run it throught all the below tests
        '''
        logger = logging.getLogger()
        if port:
            host_port = (host, port)
        else:
            host_port = (host, get_random_port())

        conn_fixture = request.getfixturevalue(request.param)

        datasource = {}
        try:
            # SQL Alchemy
            datasource["config"] = {}
            datasource[
                "module"] = "decisionengine.framework.dataspace.datasources.sqlalchemy_ds"
            datasource["name"] = "SQLAlchemyDS"
            datasource["config"]["url"] = conn_fixture["url"]
            datasource["config"]["echo"] = True
        except TypeError:
            datasource[
                "module"] = "decisionengine.framework.dataspace.datasources.postgresql"
            datasource["name"] = "Postgresql"
            datasource["config"] = {}
            try:
                # psycopg2
                datasource["config"]['host'] = conn_fixture.info.host
                datasource["config"]['port'] = conn_fixture.info.port
                datasource["config"]['user'] = conn_fixture.info.user
                datasource["config"]['password'] = conn_fixture.info.password
                datasource["config"]['database'] = conn_fixture.info.dbname
            except AttributeError:
                # psycopg2cffi
                for element in conn_fixture.dsn.split():
                    (key, value) = element.split('=')
                    if value != "''" and value != '""':
                        datasource["config"][key] = value

        logger.debug(f"DE Fixture has datasource config: {datasource}")

        server_proc = DETestWorker(
            conf_path,
            channel_conf_path,
            host_port,
            datasource,
            conf_override,
            channel_conf_override,
        )
        logger.debug("Starting DE Fixture")
        server_proc.start()

        # Ensure the channels have started
        logger.debug(
            f"DE Fixture: Waiting on startup state is_set={server_proc.de_server.startup_complete.is_set()}"
        )
        server_proc.de_server.startup_complete.wait(timeout=3)
        logger.debug(
            f"DE Fixture: startup state is_set={server_proc.de_server.startup_complete.is_set()}"
        )

        # The following block only works if there are
        # active workers; if it is called before any workers
        # exist, then it will return and not block as requested.
        # so long as your config contains at least one worker,
        # this will work as you'd expect.
        logger.debug("DE Fixture: Waiting on channels to start, timeout=3")
        server_proc.de_server.block_while(State.BOOT, timeout=3)

        if not server_proc.is_alive():
            raise RuntimeError('Could not start PrivateDEServer fixture')

        yield server_proc

        # does not error out even if the server is stopped
        # so this should be safe to call under all conditions
        server_proc.de_server.rpc_stop()

        server_proc.join()

        del server_proc
        gc.collect()
コード例 #3
0
ファイル: fixtures.py プロジェクト: vitodb/decisionengine
    def de_server_factory(request):
        '''
        actually make the fixture
        '''
        if port:
            host_port = (host, port)
        else:
            host_port = (host, get_random_port())

        db_info = {}

        proc_fixture = request.getfixturevalue(pg_prog_name)
        db_info['host'] = proc_fixture.host
        db_info['port'] = proc_fixture.port
        db_info['user'] = proc_fixture.user
        db_info['password'] = proc_fixture.password

        # used to find the version of postgres
        conn_fixture = request.getfixturevalue(pg_db_conn_name)

        # pseudo random database name for testing
        db_info['database'] = DE_DB_NAME + '_test_' + ''.join(
            random.choices(string.ascii_uppercase + string.digits, k=5))

        # Due to the multi-threaded/connection pooled nature
        # of the DE Server, it is cleaner to build out an
        # unscoped database.  The one created by the `DE_DB`
        # fixture is private to a single socket/connection
        # and cannot be shared cleanly.
        #
        # And even if we could share it, then we wouldn't
        # be testing the production data path or pooling
        # with those tricks

        # DatabaseJanitor will create and drop the tablespace for us
        with DatabaseJanitor(user=db_info['user'],
                             password=db_info['password'],
                             host=db_info['host'],
                             port=db_info['port'],
                             db_name=db_info['database'],
                             version=conn_fixture.server_version):
            # if you swap this for the `DE_DB` fixture, it will
            # block and changes will not be visable to the connection
            # fired up within the DE Server thread.
            with psycopg2.connect(**db_info) as connection:
                for filename in DE_SCHEMA:  # noqa: F405
                    with open(filename, 'r') as _fd, \
                         connection.cursor() as cur:
                        cur.execute(_fd.read())

            server_proc = DETestWorker(conf_path, channel_conf_path, host_port,
                                       db_info, conf_override,
                                       channel_conf_override)
            server_proc.start()
            # The following block only works if there are
            # active workers; if it is called before any workers
            # exist, then it will return and not block as requested.
            # so long as your config contains at least one worker,
            # this will work as you'd expect.
            server_proc.de_server.block_while(State.BOOT)

            if not server_proc.is_alive():
                raise RuntimeError('Could not start PrivateDEServer fixture')

            yield server_proc

            if server_proc.is_alive():
                server_proc.de_server.rpc_stop()

            server_proc.join()
コード例 #4
0
import re

import pytest

from decisionengine.framework.dataspace.datasources.tests.fixtures import mock_data_block  # noqa: F401
from decisionengine.framework.engine.DecisionEngine import _get_de_conf_manager
from decisionengine.framework.engine.DecisionEngine import _create_de_server
from decisionengine.framework.engine.DecisionEngine import parse_program_options
from decisionengine.framework.taskmanager.TaskManager import State
from decisionengine.framework.util.sockets import get_random_port

_this_dir = pathlib.Path(__file__).parent.resolve()
_CONFIG_PATH = os.path.join(_this_dir, "etc/decisionengine")
_CHANNEL_CONFIG_PATH = os.path.join(_CONFIG_PATH, 'config.d')

_port = get_random_port()


@pytest.fixture
def deserver_mock_data_block(mock_data_block):  # noqa: F811
    global_config, channel_config_handler = _get_de_conf_manager(_CONFIG_PATH,
                                                                 _CHANNEL_CONFIG_PATH,
                                                                 parse_program_options([f'--port={_port}']))
    server = _create_de_server(global_config, channel_config_handler)
    server.start_channels()
    server.block_while(State.BOOT)
    yield server
    server.stop_channels()


# Because pytest will reorder the tests based on the method name,
コード例 #5
0
    def de_server_factory(tmp_path, request, capsys, monkeypatch):
        """
        This parameterized fixture will mock up various datasources.

        Add datasource objects to DATABASES_TO_TEST once they've got
        our basic schema loaded.  Pytest should take it from there and
        automatically run it throught all the below tests
        """
        logger = logging.getLogger()
        if port:
            host_port = (host, port)
        else:
            host_port = (host, get_random_port())

        conn_fixture = request.getfixturevalue(request.param)

        datasource = {}

        # SQL Alchemy
        datasource["config"] = {}
        datasource["module"] = "decisionengine.framework.dataspace.datasources.sqlalchemy_ds"
        datasource["name"] = "SQLAlchemyDS"
        datasource["config"]["url"] = conn_fixture["url"]
        datasource["config"]["echo"] = True

        logger.debug(f"DE Fixture has datasource config: {datasource}")

        # make it easy to give each fixture a unique private config path
        # for more flexible startup options
        with tempfile.TemporaryDirectory(dir=tmp_path) as tmppath:
            nonlocal conf_path
            nonlocal channel_conf_path
            if conf_path is None:
                conf_path = os.path.join(tmppath, "conf.d")
            if channel_conf_path is None:
                channel_conf_path = os.path.join(tmppath, "channel.conf.d")
            prometheus_multiproc_dir = str(os.path.join(tmppath, "PROMETHEUS_MULTIPROC_DIR"))

            if make_conf_dirs_if_missing and not os.path.exists(conf_path):
                logger.debug(f"DE Fixture making {conf_path}")
                os.makedirs(conf_path)
            if make_conf_dirs_if_missing and not os.path.exists(channel_conf_path):
                logger.debug(f"DE Fixture making {channel_conf_path}")
                os.makedirs(channel_conf_path)
            if not os.path.exists(prometheus_multiproc_dir):
                logger.debug(f"DE Fixture making {prometheus_multiproc_dir}")
                os.makedirs(prometheus_multiproc_dir)

            monkeypatch.setenv("PROMETHEUS_MULTIPROC_DIR", prometheus_multiproc_dir)
            server_proc = DETestWorker(
                conf_path,
                channel_conf_path,
                host_port,
                datasource,
                conf_override,
                channel_conf_override,
            )
            logger.debug("Starting DE Fixture")
            server_proc.start()

            # Ensure the channels have started
            logger.debug(f"DE Fixture: Wait on startup state: is_set={server_proc.de_server.startup_complete.is_set()}")
            server_proc.de_server.startup_complete.wait()
            server_proc.stdout_at_setup = capsys.readouterr().out

            logger.debug(
                f"DE Fixture: Done waiting for startup state: is_set={server_proc.de_server.startup_complete.is_set()}"
            )

            if not server_proc.is_alive():
                raise RuntimeError("Could not start PrivateDEServer fixture")

            yield server_proc

        logger.debug("DE Fixture: beginning cleanup")

        monkeypatch.delenv("PROMETHEUS_MULTIPROC_DIR")

        # does not error out even if the server is stopped
        # so this should be safe to call under all conditions
        logger.debug("DE Fixture: running rpc_stop()")
        server_proc.de_server.rpc_stop()

        logger.debug("DE Fixture: waiting for server_proc.join()")
        server_proc.join()

        del server_proc
        gc.collect()