def execute_before_any_test():
    print "Initializing tests"
    platform = pytest.config.getoption("--platform")
    if not platform:
        message = ["\n",
                   "Error running tests. Please specify a platform with a --platform flag, for example py.test tests --platform=browser",
                   "List of available platforms:",
                   "browser: run tests in the browser (with Peerio.AutomationSocket === true)",
                   "ios: run tests in the simulator",
                   "android: run tests in the GenyMotion simulator (please launch it before start)",
                   ""]
        pytest.exit('\n'.join(message))
        return

    method = getattr(common.helper, 'platform_' + platform)
    if not method:
        pytest.exit('platform not found: ' + platform)
        return

    platform_options = method()
    set_platform(platform_options)

    if 'appium' in platform_options and platform_options['appium']:
        restartAppium()

    if 'browserautomation' in platform_options and platform_options['browserautomation']:
        restartBrowserAutomation()

    if 'chromeriver' in platform_options and platform_options['chromedriver']:
        restartChromedriver()
Beispiel #2
0
    def stop(self):
        """Stop ManagedSubprocess instance and perform a cleanup

        This method makes sure that there are no child processes left after
        the object destruction finalizes. In case when a process cannot stop
        on it's own, it's forced to using SIGTERM/SIGKILL.
        """
        self._process.poll()
        if self._process.returncode is not None:
            msg_fmt = "`%s` process has already terminated with code `%s`"
            pytest.exit(msg_fmt % (self.id, self._process.returncode))
            return

        log.info("Send SIGINT to `%s` session leader", self.id)
        self._process.send_signal(signal.SIGINT)
        try:
            self._process.wait(self._EXIT_TIMEOUT / 2.0)
        except subprocess.TimeoutExpired:
            log.info("Send SIGTERM to `%s` session leader", self.id)
            self._process.send_signal(signal.SIGTERM)
            try:
                self._process.wait(self._EXIT_TIMEOUT / 2.0)
            except subprocess.TimeoutExpired:
                log.info("Send SIGKILL to all `%s` processess", self.id)
                os.killpg(os.getpgid(self._process.pid), signal.SIGKILL)
                log.info("wait() for `%s` session leader to die", self.id)
                self._process.wait()

        log.info("`%s` session leader has terminated", self.id)
Beispiel #3
0
def pytest_configure(config):
    """ Session-wide test configuration.

    Executes before any tests do and can be used to configure the test
    environment.

    To guard against accidental data loss ``pytest_configure`` will bail
    instantly if the test db contains any data. It also configures it's own
    logging which will override any previous user-configured logging. This
    is to ensure the correct messags reach the users console during test run.

    """
    db = neo4j.Graph(DEFAULT_DB)
    rels = next(db.match(), None)
    if rels:
        logging.warning(
            'Test Runner will only operate on an empty graph.    '
            'Either clear the DB with `neotool clear` or provide '
            'a differnt DB URI ')

        pytest.exit(1)

    handler = ColorizingStreamHandler()
    handler.level_map = PY2NEO_LOGGING_LEVEL_COLOUR_MAP
    logger.addHandler(handler)

    logger.info('- all logging captured at >= DEUBG and piped '
                'to stdout for test failures.                 ')
    logger.info('- tests running on Neo4J %s', db.neo4j_version)
Beispiel #4
0
def pytest_sessionstart(session):
    from .core import LibJulia, JuliaInfo, Julia, enable_debug

    options = JuliaOptions()
    for desc in JuliaOptions.supported_options():
        cli_option = "--julia-{}".format(desc.cli_argument_name().lstrip("-"))
        desc.__set__(options, session.config.getoption(cli_option))

    julia_runtime = session.config.getoption("julia_runtime")

    global _USING_DEFAULT_SETUP
    _USING_DEFAULT_SETUP = not (julia_runtime != "julia" or options.as_args())

    if not session.config.getoption("julia"):
        return

    enable_debug()
    global _JULIA_INFO
    _JULIA_INFO = info = JuliaInfo.load(julia=julia_runtime)

    if not info.is_pycall_built():
        print(
            """
PyCall is not installed or built.  Run the following code in Python REPL:

    >>> import julia
    >>> julia.install()

See:
    https://pyjulia.readthedocs.io/en/latest/installation.html
            """,
            file=sys.stderr,
        )
        pytest.exit("PyCall not built", returncode=1)

    if (
        options.compiled_modules != "no"
        and not info.is_compatible_python()
        and info.version_info >= (0, 7)
    ):
        print(
            """
PyJulia does not fully support this combination of Julia and Python.
Try:

    * Pass `--julia-compiled-modules=no` option to disable
      precompilation cache.

    * Use `--julia-runtime` option to specify different Julia
      executable.

    * Pass `--no-julia` to run tests that do not rely on Julia
      runtime.
            """,
            file=sys.stderr,
        )
        pytest.exit("incompatible runtimes", returncode=1)

    api = LibJulia.from_juliainfo(info)
    api.init_julia(options)
Beispiel #5
0
def check_options(config):
    """Process options to manipulate (produce other options) important for pytest-cloud."""
    if getattr(config, 'slaveinput', {}).get('slaveid', 'local') == 'local' and config.option.cloud_nodes:
        patches.apply_patches()
        mem_per_process = config.option.cloud_mem_per_process
        if mem_per_process:
            mem_per_process = mem_per_process * 1024 * 1024
        virtualenv_path = config.option.cloud_virtualenv_path
        chdir = config.option.cloud_chdir
        python = config.option.cloud_python
        node_specs = get_nodes_specs(
            config.option.cloud_nodes,
            chdir=chdir,
            python=python,
            virtualenv_path=virtualenv_path,
            rsync_max_processes=config.option.cloud_rsync_max_processes,
            rsync_bandwidth_limit=config.option.cloud_rsync_bandwidth_limit,
            max_processes=config.option.cloud_max_processes,
            mem_per_process=mem_per_process,
            config=config)
        if node_specs:
            print('Scheduling with {0} parallel test sessions'.format(len(node_specs)))
        if not node_specs:
            pytest.exit('None of the given test nodes are able to serve as a test node due to capabilities')
        config.option.tx += node_specs
        config.option.dist = 'load'
Beispiel #6
0
    def __init__(self):
        self.logger = logging.getLogger(__name__)
        self.docker = docker.from_env()
        if len(self.docker.images(name=self.image_name)) == 0:
            self._fetch_image()

        host_config = self.docker.create_host_config(
            binds=self.volumes_bindings,
            port_bindings=self.port_bindings
        )

        try:
            # following avoids conflict between different builds
            container_name_prefixed = '{}_{}'.format(os.path.basename(os.path.realpath('{}/../..'.format(__file__))),
                                                     self.container_name)
            self.container_id = self.docker.create_container(self.image_name, name=container_name_prefixed,
                                                             ports=self.ports, command=self.command,
                                                             environment=self.env_vars,
                                                             volumes=self.volumes, host_config=host_config).get('Id')
            self.logger.info("docker id is {}".format(self.container_id))
            self.logger.info("starting the temporary docker for image {}".format(self.image_name))
            self.docker.start(self.container_id)
            self.ip_addr = self.docker.inspect_container(self.container_id).get('NetworkSettings', {}).get('IPAddress')
            if not self.ip_addr:
                self.logger.error("temporary docker {} not started".format(self.container_id))
                assert False
            self.logger.info("IP addr is {}".format(self.ip_addr))
            self.wait_until_available()
        except APIError as e:
            pytest.exit(
                "error during setup of docker container {}, aborting. Details:\n{}".format(container_name_prefixed,
                                                                                           str(e)))
Beispiel #7
0
def pytest_cmdline_main(config):
    """
    Enforce test environment before loading fixtures and running tests
    This helps prevent accidents like creating/dropping the db in a stage/prod
    environment when running the test suite
    """
    if os.environ.get('CONFIG_ENV') != 'test':
        pytest.exit('Please set CONFIG_ENV=test before running test suite')
Beispiel #8
0
def exit_if_switch_used_with_xdist(session, switch_list):
    if hasattr(session.config, 'slaveinput'):
        for switch_str in switch_list:
            if session.config.getoption(switch_str):
                pytest.exit(
                    'Cannot use {} when running in parallel under pytest-xdist '
                    '(e.g., -n --dist --tx)'.format(switch_str)
                )
Beispiel #9
0
	def prepare_test(dirname, testfile):
		if not os.path.exists(nserv_datadir + '/' + dirname + '.nzb'):
			os.makedirs(nserv_datadir + '/' + dirname)
			shutil.copyfile(testdata_dir + '/rarrenamer/' + testfile + '.part01.rar', nserv_datadir + '/' + dirname + '/abc.21')
			shutil.copyfile(testdata_dir + '/rarrenamer/' + testfile + '.part02.rar', nserv_datadir + '/' + dirname + '/abc.02')
			shutil.copyfile(testdata_dir + '/rarrenamer/' + testfile + '.part03.rar', nserv_datadir + '/' + dirname + '/abc.15')
			os.chdir(nserv_datadir + '/' + dirname)
			if 0 != subprocess.call([par2_bin, 'c', '-b20', 'parrename.par2', '*']):
				pytest.exit('Test file generation failed')
Beispiel #10
0
def pytest_generate_tests(metafunc):
    if 'engine' in metafunc.fixturenames:
        engines = []
        for e in metafunc.config.option.engine.split(','):
            if e in ['theano_engine', 'dict_engine']:
                engines.append(e)
        if not engines:
            pytest.exit("Unknown engine.")
        metafunc.parametrize("engine", engines, scope="session")
Beispiel #11
0
def validate_pytest_config():
  """
  Validate that pytest command line options make sense.
  """
  if pytest.config.option.testing_remote_cluster:
    local_prefixes = ('localhost', '127.', '0.0.0.0')
    if any(pytest.config.option.impalad.startswith(loc) for loc in local_prefixes):
      logging.error("--testing_remote_cluster can not be used with a local impalad")
      pytest.exit("Invalid pytest config option: --testing_remote_cluster")
Beispiel #12
0
def verify_cleanup():
    """Verify that the test has cleaned up resources correctly."""
    yield

    if len(INSTANCES) >= 2:
        count = len(INSTANCES)
        for inst in INSTANCES:
            inst.stop()
        pytest.exit("Detected non stopped instances "
                    "({}), aborting test run".format(count))
Beispiel #13
0
def pytest_configure(config):
    "Assert that the environment is correctly configured."

    global topology_only

    if not diagnose_env():
        pytest.exit('enviroment has errors, please read the logs')

    if config.getoption('--topology-only'):
        topology_only = True
Beispiel #14
0
def atomic_data_fname(tardis_ref_path):
    atomic_data_fname = os.path.join(
        tardis_ref_path, 'atom_data', 'kurucz_cd23_chianti_H_He.h5')

    atom_data_missing_str = ("{0} atomic datafiles "
                             "does not seem to exist".format(atomic_data_fname))

    if not os.path.exists(atomic_data_fname):
        pytest.exit(atom_data_missing_str)

    return atomic_data_fname
Beispiel #15
0
    def check_fullfilled(self):
        try:
            result = self.request_check()
        except SproutException as e:
            # TODO: ensure we only exit this way on sprout usage
            self.destroy_pool()
            log.error("sprout pool could not be fulfilled\n%s", str(e))
            pytest.exit(1)

        log.debug("fulfilled at %f %%", result['progress'])
        return result["fulfilled"]
Beispiel #16
0
def handle_existing_list(redis_connection, do_force_deletion, list_key):
    """Clear a list in a redis instance or warns the user about deletion."""
    if redis_connection.llen(list_key):
        if not do_force_deletion:
            exit_string = ("redis list '{}' exists in redis instance. "
                           "pass --force to ignore this and delete "
                           "list.").format(list_key)
            pytest.exit(exit_string)
        else:
            print "Deleting pre-exiting redis list '{}'".format(
                list_key)
            clean_list(redis_connection, list_key)
Beispiel #17
0
def pytest_configure():

    # Make sure we are in the correct folder for the test.
    dirname = os.path.split(os.getcwd())[1]
    if not dirname=='Tests':
        pytest.exit("Not running in the Tests folder!")


    # Make sure the data is downloaded
    from ..Scripts import get_data_files
                                                                                
    return           
Beispiel #18
0
def secrets(request):
    spath = request.config.getoption("--secrets") or "test_options.json"
    fstring = "{}"
    if os.path.exists(spath):
        with open(spath) as f:
            fstring = f.read()

    marshalled, errors = SecretsContainer().loads(fstring)

    if errors:
        pytest.exit("Errors with the secrets container unmarshal")

    return marshalled
Beispiel #19
0
    def start(self):
        """Start a subprocess

        This method makes python actually spawn the subprocess and wait for it
        to finish initializing.
        """
        self._start_subprocess()

        self._register_stdout_stderr_to_logcatcher()

        if not self._wait_for_subprocess_to_finish_init():
            self.stop()
            pytest.exit("Failed to start `{}` process".format(self.id))
def check_required_loopback_interfaces_available():
    """
    We need at least 3 loopback interfaces configured to run almost all dtests. On Linux, loopback
    interfaces are automatically created as they are used, but on Mac they need to be explicitly
    created. Check if we're running on Mac (Darwin), and if so check we have at least 3 loopback
    interfaces available, otherwise bail out so we don't run the tests in a known bad config and
    give the user some helpful advice on how to get their machine into a good known config
    """
    if platform.system() == "Darwin":
        if len(ni.ifaddresses('lo0')[AF_INET]) < 9:
            pytest.exit("At least 9 loopback interfaces are required to run dtests. "
                            "On Mac you can create the required loopback interfaces by running "
                            "'for i in {1..9}; do sudo ifconfig lo0 alias 127.0.0.$i up; done;'")
Beispiel #21
0
def pytest_configure(config):
    msgs = []

    if not os.path.exists(_testdata):
        msg = "testdata not available! "
        if os.path.exists(os.path.join(_root, ".git")):
            msg += ("Please run git submodule update --init --recursive " +
                    "and then run tests again.")
        else:
            msg += ("The testdata doesn't appear to be included with this package, " +
                    "so finding the right version will be hard. :(")
        msgs.append(msg)

    if config.option.update_xfail:
        # Check for optional requirements
        req_file = os.path.join(_root, "requirements-optional.txt")
        if os.path.exists(req_file):
            with open(req_file, "r") as fp:
                for line in fp:
                    if (line.strip() and
                        not (line.startswith("-r") or
                             line.startswith("#"))):
                        if ";" in line:
                            spec, marker = line.strip().split(";", 1)
                        else:
                            spec, marker = line.strip(), None
                        req = pkg_resources.Requirement.parse(spec)
                        if marker and not pkg_resources.evaluate_marker(marker):
                            msgs.append("%s not available in this environment" % spec)
                        else:
                            try:
                                installed = pkg_resources.working_set.find(req)
                            except pkg_resources.VersionConflict:
                                msgs.append("Outdated version of %s installed, need %s" % (req.name, spec))
                            else:
                                if not installed:
                                    msgs.append("Need %s" % spec)

        # Check cElementTree
        import xml.etree.ElementTree as ElementTree

        try:
            import xml.etree.cElementTree as cElementTree
        except ImportError:
            msgs.append("cElementTree unable to be imported")
        else:
            if cElementTree.Element is ElementTree.Element:
                msgs.append("cElementTree is just an alias for ElementTree")

    if msgs:
        pytest.exit("\n".join(msgs))
Beispiel #22
0
def pytest_runtest_call():
    """
    This function must be run after setup_module(), it does standarized post
    setup routines. It is only being used for the 'topology-only' option.
    """
    global topology_only

    if topology_only:
        tgen = get_topogen()
        if tgen is not None:
            # Allow user to play with the setup.
            tgen.mininet_cli()

        pytest.exit('the topology executed successfully')
def dtest_config(request):
    dtest_config = DTestConfig()
    dtest_config.setup(request)

    # if we're on mac, check that we have the required loopback interfaces before doing anything!
    check_required_loopback_interfaces_available()

    try:
        if dtest_config.cassandra_dir is not None:
            validate_install_dir(dtest_config.cassandra_dir)
    except Exception as e:
        pytest.exit("{}. Did you remember to build C*? ('ant clean jar')".format(e))

    yield dtest_config
def pytest_sessionstart(session):
    if pytest.store.parallelizer_role == 'master':
        return
    if session.config.getoption("dev_repo") is None:
        return
    if pytest.store.current_appliance.is_downstream:
        pytest.store.write_line("Cannot git update downstream appliances ...")
        pytest.exit('Failed to git update this appliance, because it is downstream')
    dev_repo = session.config.getoption("dev_repo")
    dev_branch = session.config.getoption("dev_branch")
    pytest.store.write_line(
        "Changing the upstream appliance {} to {}#{} ...".format(
            pytest.store.current_appliance.hostname, dev_repo, dev_branch))
    pytest.store.current_appliance.use_dev_branch(dev_repo, dev_branch)
    pytest.store.write_line("Appliance change finished ...")
Beispiel #25
0
def app(request):
    """Starts and stops the server for each app in APPS"""

    _app = APPS[request.param]
    _app.name = request.param

    try:
        # Run the live server.
        _app.live(kill_port=True)
    except Exception as e:
        # Skip test if not started.
        pytest.exit(e.message)

    request.addfinalizer(lambda: _app.die())
    return _app
Beispiel #26
0
def create_test_file(bigdir, sevenzip_bin, sizemb, partmb):
	print('Preparing test file (' + str(sizemb) + 'MB)')

	if not os.path.exists(bigdir):
		os.makedirs(bigdir)

	f = open(bigdir + '/' + str(sizemb) + 'mb.dat', 'wb')
	for n in xrange(sizemb // partmb):
		print('Writing block %i from %i' % (n + 1, sizemb // partmb))
		f.write(os.urandom(partmb * 1024 * 1024))
	f.close()

	if 0 != subprocess.call([sevenzip_bin, 'a', bigdir + '/' + str(sizemb) + 'mb.7z', '-mx=0', '-v' + str(partmb) + 'm', bigdir + '/' + str(sizemb) + 'mb.dat']):
		pytest.exit('Test file generation failed')

	os.remove(bigdir + '/' + str(sizemb) + 'mb.dat')
def pytest_collection_finish(session):
    """ This function modified names of test cases "on the fly"
        when we are using --collect-only parameter for pytest
        (to get the full list of all existing test cases).
    """

    if session.config.option.collectonly is True:
        for item in session.items:
            # If test case has a doc string we need to modify it's name to
            # it's doc string to show human-readable reports and to
            # automatically import test cases to test management system.
            if item._obj.__doc__:
                full_name = get_test_case_docstring(item)
                print full_name

    pytest.exit('Done!')
Beispiel #28
0
def setup_config(request):
    config.clear()
    config.update(DEFAULT_CONFIG.copy())
    tempdir = tempfile.mkdtemp()
    config["LOG_FILE"] = os.path.join(tempdir, 'oar.log')

    db_type = os.environ.get('DB_TYPE', 'memory')
    os.environ.setdefault('DB_TYPE', db_type)

    if db_type not in ('memory', 'sqlite', 'postgresql'):
        pytest.exit("Unsupported database '%s'" % db_type)

    if db_type == "sqlite":
        config['DB_BASE_FILE'] = os.path.join(tempdir, 'db.sqlite')
        config['DB_TYPE'] = 'sqlite'
    elif db_type == "memory":
        config['DB_TYPE'] = 'sqlite'
        config['DB_BASE_FILE'] = ':memory:'
    else:
        config['DB_TYPE'] = 'Pg'
        config['DB_PORT'] = '5432'
        config['DB_BASE_NAME'] = 'oar'
        config['DB_BASE_PASSWD'] = 'oar'
        config['DB_BASE_LOGIN'] = '******'
        config['DB_BASE_PASSWD_RO'] = 'oar_ro'
        config['DB_BASE_LOGIN_RO'] = 'oar_ro'
        config['DB_HOSTNAME'] = 'localhost'

    def dump_configuration(filename):
        with open(filename, 'w', encoding='utf-8') as fd:
            for key, value in six.iteritems(config):
                if not key.startswith('SQLALCHEMY_'):
                    fd.write("%s=%s\n" % (key, str(value)))

    dump_configuration('/etc/oar/oar.conf')
    db.metadata.drop_all(bind=db.engine)
    db.create_all(bind=db.engine)
    kw = {"nullable": True}
    db.op.add_column('resources', db.Column('core', db.Integer, **kw))
    db.op.add_column('resources', db.Column('cpu', db.Integer, **kw))
    db.op.add_column('resources', db.Column('host', db.String(255), **kw))
    db.op.add_column('resources', db.Column('mem', db.Integer, **kw))
    db.reflect()
    yield
    db.close()
    shutil.rmtree(tempdir)
def pytest_collection_modifyitems(session, config, items):
    """Add marker to test name, if test marked with `testrail_id` marker

    If optional kwargs passed - test parameters should be a
    superset of this kwargs to mark be applied.
    Also kwargs can be passed as `params` argument.
    """
    ids = defaultdict(list)
    for item in items:
        markers = item.get_marker('testrail_id') or []
        suffix_string = ''
        for marker in markers:
            test_id = marker.args[0]
            params = marker.kwargs.get('params', marker.kwargs)
            if len(params) > 0:
                if not hasattr(item, 'callspec'):
                    raise Exception("testrail_id decorator with filter "
                                    "parameters requires parametrizing "
                                    "of test method")
                params_in_callspec = all(param in item.callspec.params.items()
                                         for param in params.items())
                if not params_in_callspec:
                    continue
            suffix_string = '[({})]'.format(test_id)
            ids[test_id].append(item)
            break
        else:
            ids[None].append(item)
        item.name += suffix_string
        if item.cls is not None and issubclass(item.cls, unittest.TestCase):
            setattr(item.cls, item.name, item.function)

    if config.getoption("--check-testrail-id"):
        errors = []
        witout_id = ids.pop(None, [])
        if witout_id:
            errors += ["Tests witout testrail_id:"]
            errors += ['  ' + x.nodeid for x in witout_id]
        for test_id, items in ids.items():
            if len(items) > 1:
                errors += ["Single testrail_id for many cases:"]
                errors += ['  ' + x.nodeid for x in items]
        if errors:
            print('')
            print('\n'.join(errors))
            pytest.exit('Errors with testrail_id')
Beispiel #30
0
            def detailed_check():
                try:
                    result = self.sprout_client.request_check(self.sprout_pool)
                except SproutException as e:
                    # TODO: ensure we only exit this way on sprout usage
                    try:
                        self.sprout_client.destroy_pool(pool_id)
                    except Exception:
                        pass
                    self.println(
                        "sprout pool could not be fulfilled\n{}".format(e))
                    pytest.exit(1)

                self.println("[{now:%H:%M}] fulfilled at {progress:2}%".format(
                    now=datetime.now(),
                    progress=result['progress']
                ))
                return result["fulfilled"]
Beispiel #31
0
# that, we start getting scary-looking warning messages, saying that this
# makes HTTPS insecure. The following silences those warnings:
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

# Test that the Boto libraries are new enough. These tests want to test a
# large variety of DynamoDB API features, and to do this we need a new-enough
# version of the the Boto libraries (boto3 and botocore) so that they can
# access all these API features.
# In particular, the BillingMode feature was added in botocore 1.12.54.
import botocore
import sys
from distutils.version import LooseVersion
if (LooseVersion(botocore.__version__) < LooseVersion('1.12.54')):
    pytest.exit(
        "Your Boto library is too old. Please upgrade it,\ne.g. using:\n    sudo pip{} install --upgrade boto3"
        .format(sys.version_info[0]))


# By default, tests run against a local Scylla installation on localhost:8080/.
# The "--aws" option can be used to run against Amazon DynamoDB in the us-east-1
# region.
def pytest_addoption(parser):
    parser.addoption(
        "--aws",
        action="store_true",
        help="run against AWS instead of a local Scylla installation")
    parser.addoption(
        "--https",
        action="store_true",
        help="communicate via HTTPS protocol on port 8043 instead of HTTP when"
Beispiel #32
0
def prepare(request):
    if not os.getcwd().endswith('GeenuFF/geenuff'):
        pytest.exit('Tests need to be run from GeenuFF/geenuff directory')
Beispiel #33
0
def _smooth_shutdown(*args):
    cov = pytest.config.coverage
    if cov:
        cov.stop()
    pytest.exit("Smooth shutdown requested")
Beispiel #34
0
    def setUp(self):
        if os.environ.get('TILDEMUSH_ENV') != 'test':
            pytest.exit('Run tildemush tests with TILDEMUSH_ENV=test')

        reset_db()
        GameWorld.reset()
Beispiel #35
0
def dd_environment_runner(request):
    testing_plugin = os.getenv(TESTING_PLUGIN) == 'true'

    # Do nothing if no e2e action is triggered and continue with tests
    if not testing_plugin and not e2e_active():  # no cov
        return
    # If e2e tests are being run it means the environment has
    # already been spun up so we prevent another invocation
    elif e2e_testing():  # no cov
        # Since the scope is `session` there should only ever be one definition
        fixture_def = request._fixturemanager._arg2fixturedefs[
            E2E_FIXTURE_NAME][0]

        # Make the underlying function a no-op
        fixture_def.func = lambda: None
        return

    try:
        config = request.getfixturevalue(E2E_FIXTURE_NAME)
    except Exception as e:
        # pytest doesn't export this exception class so we have to do some introspection
        if e.__class__.__name__ == 'FixtureLookupError':
            # Make it explicit for our command
            pytest.exit('NO E2E FIXTURE AVAILABLE')

        raise

    metadata = {}

    # Environment fixture also returned some metadata
    if isinstance(config, tuple):
        config, possible_metadata = config

        # Support only defining the env_type for ease-of-use
        if isinstance(possible_metadata, str):
            metadata['env_type'] = possible_metadata
        else:
            metadata.update(possible_metadata)

    # Default to Docker as that is the most common
    metadata.setdefault('env_type', 'docker')

    # Save any environment variables
    metadata.setdefault('env_vars', {})
    metadata['env_vars'].update(get_env_vars(raw=True))

    data = {'config': config, 'metadata': metadata}

    # Serialize to json
    data = json.dumps(data, separators=(',', ':'))

    # Using base64 ensures:
    # 1. Printing to stdout won't fail
    # 2. Easy parsing since there are no spaces
    message = urlsafe_b64encode(data.encode('utf-8'))

    message = 'DDEV_E2E_START_MESSAGE {} DDEV_E2E_END_MESSAGE'.format(
        message.decode('utf-8'))

    if testing_plugin:
        return message
    else:  # no cov
        # Exit testing and pass data back up to command
        pytest.exit(message)
Beispiel #36
0
    def post_manifest(self):
        if not DEV:
            return

        run("docker", "kill", self.path.k8s)
        run("docker", "rm", self.path.k8s)

        image = os.environ["AMBASSADOR_DOCKER_IMAGE"]

        if not AmbassadorTest.IMAGE_BUILT:
            AmbassadorTest.IMAGE_BUILT = True
            context = os.path.dirname(
                os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
            print("Starting docker build...", end="")
            sys.stdout.flush()
            result = run("docker", "build", context, "-t", image)
            try:
                result.check_returncode()
                print("done.")
            except Exception as e:
                print((result.stdout + b"\n" + result.stderr).decode("utf8"))
                pytest.exit("container failed to build")

        fname = "/tmp/k8s-%s.yaml" % self.path.k8s
        if os.path.exists(fname):
            with open(fname) as fd:
                content = fd.read()
        else:
            nsp = getattr(self, 'namespace', None) or 'default'

            result = run("kubectl", "get", "-n", nsp, "-o", "yaml", "secret",
                         self.path.k8s)
            result.check_returncode()
            with open(fname, "wb") as fd:
                fd.write(result.stdout)
            content = result.stdout
        try:
            secret = yaml.load(content)
        except Exception as e:
            print("could not parse YAML:\n%s" % content)
            raise e

        data = secret['data']
        # secret_dir = tempfile.mkdtemp(prefix=self.path.k8s, suffix="secret")
        secret_dir = "/tmp/%s-ambassadormixin-%s" % (self.path.k8s, 'secret')

        shutil.rmtree(secret_dir, ignore_errors=True)
        os.mkdir(secret_dir, 0o777)

        for k, v in data.items():
            with open(os.path.join(secret_dir, k), "wb") as f:
                f.write(base64.decodebytes(bytes(v, "utf8")))
        print("Launching %s container." % self.path.k8s)
        command = ["docker", "run", "-d", "--name", self.path.k8s]

        envs = [
            "KUBERNETES_SERVICE_HOST=kubernetes",
            "KUBERNETES_SERVICE_PORT=443",
            "AMBASSADOR_ID=%s" % self.ambassador_id
        ]

        if self.namespace:
            envs.append("AMBASSADOR_NAMESPACE=%s" % self.namespace)

        if self.single_namespace:
            envs.append("AMBASSADOR_SINGLE_NAMESPACE=yes")

        envs.extend(self.env)
        [command.extend(["-e", env]) for env in envs]

        ports = [
            "%s:8877" % (8877 + self.index),
            "%s:80" % (8080 + self.index),
            "%s:443" % (8443 + self.index)
        ]
        [command.extend(["-p", port]) for port in ports]

        volumes = [
            "%s:/var/run/secrets/kubernetes.io/serviceaccount" % secret_dir
        ]
        [command.extend(["-v", volume]) for volume in volumes]

        command.append(image)

        result = run(*command)
        result.check_returncode()
Beispiel #37
0
def pytest_collection_modifyitems(session, config, items):
    if len(items) == 0:
        pytest.exit("No tests found; Matplotlib was likely installed without "
                    "test data.")
    knownfail_message = "Test known to fail with mplcairo."
    irrelevant_message = "Test irrelevant for mplcairo."
    textfail_message = ("Test failure with large diff due to different text "
                        "rendering by mplcairo.")
    xfail_modules = {
        "matplotlib.tests.test_compare_images": irrelevant_message,
        "matplotlib.tests.test_mathtext": textfail_message,
        "matplotlib.tests.test_constrainedlayout": textfail_message,
        "matplotlib.tests.test_tightlayout": textfail_message,
    }
    xfail_nodeids = {
        "matplotlib/tests/" + nodeid: message
        for message, nodeids in [
            (knownfail_message, [
                "test_image.py::test_jpeg_alpha",
                "test_image.py::test_figimage[pdf-False]",
                "test_image.py::test_figimage[pdf-True]",
                "test_image.py::test_figimage0[pdf]",
                "test_image.py::test_figimage1[pdf]",
            ]),
            (irrelevant_message, [
                "test_agg.py::test_repeated_save_with_alpha",
                "test_artist.py::test_cull_markers",
                "test_axes.py::test_log_scales[png]",
                "test_axes.py::test_get_tightbbox_polar",
                "test_axes.py::test_normal_axes",
                "test_backend_bases.py::test_non_gui_warning",
                "test_backend_pdf.py::test_composite_image",
                "test_backend_pdf.py::test_multipage_keep_empty",
                "test_backend_pdf.py::test_multipage_pagecount",
                "test_backend_pdf.py::test_multipage_properfinalize",
                "test_backend_ps.py::test_partial_usetex",
                "test_backend_ps.py::test_savefig_to_stringio[ps-landscape]",
                "test_backend_ps.py::test_savefig_to_stringio[ps-portrait]",
                "test_backend_ps.py::test_savefig_to_stringio[eps-landscape]",
                "test_backend_ps.py::test_savefig_to_stringio[eps-portrait]",
                "test_backend_ps.py::test_savefig_to_stringio[eps afm-landscape]",
                "test_backend_ps.py::test_savefig_to_stringio[eps afm-portrait]",
                "test_backend_ps.py::test_source_date_epoch",
                "test_backend_svg.py::test_gid",
                "test_backend_svg.py::test_svg_clear_all_metadata",
                "test_backend_svg.py::test_svg_clear_default_metadata",
                "test_backend_svg.py::test_svg_default_metadata",
                "test_backend_svg.py::test_svg_metadata",
                "test_backend_svg.py::test_svgnone_with_data_coordinates",
                "test_backend_svg.py::test_text_urls",
                "test_backend_svg.py::test_url",
                "test_backend_svg.py::test_url_tick",
                "test_bbox_tight.py::test_bbox_inches_tight_suptile_legend[",
                "test_figure.py::test_savefig_warns",
                "test_image.py::test_composite[",
                "test_polar.py::test_get_tightbbox_polar",
                "test_scale.py::test_logscale_mask[png]",
                "test_simplification.py::test_throw_rendering_complexity_exceeded",
            ]),
            (textfail_message, [
                "test_axes.py::test_gettightbbox_ignoreNaN",
                "test_figure.py::test_align_labels[",
                "test_figure.py::test_tightbbox",
            ])
        ]
        for nodeid in nodeids
    }
    if LooseVersion(mpl.__version__) < "3.0":
        xfail_modules.update({
            "matplotlib.sphinxext.test_tinypages": irrelevant_message,  # matplotlib#11360.
        })
        xfail_nodeids.update({
            "matplotlib/tests" + nodeid: message
            for message, nodeids in [
                (irrelevant_message, [
                    "test_backend_pdf.py::test_empty_rasterised",
                ])
            ]
            for nodeid in nodeids
        })
    xfails = []
    for item in items:
        reason = (xfail_modules.get(item.module.__name__)
                  or xfail_nodeids.get(item.nodeid)
                  or (xfail_nodeids.get(item.nodeid.split("[")[0] + "[")
                      if "[" in item.nodeid else None))
        if reason:
            xfails.append(item)
            item.add_marker(pytest.mark.xfail(reason=reason))
    if config.getoption("file_or_dir") == ["matplotlib"]:
        invalid_xfails = (
            ({*xfail_modules} - {item.module.__name__ for item in xfails})
            | ({*xfail_nodeids}
               - {item.nodeid for item in xfails}
               - {item.nodeid.split("[")[0] + "[" for item in xfails
                  if "[" in item.nodeid}))
        if invalid_xfails:
            warnings.warn("Unused xfails:\n    {}"
                          .format("\n    ".join(sorted(invalid_xfails))))
Beispiel #38
0
def process_cluster_cli_params(config):
    """
    Process cluster related cli parameters

    Args:
        config (pytest.config): Pytest config object

    Raises:
        ClusterPathNotProvidedError: If a cluster path is missing
        ClusterNameNotProvidedError: If a cluster name is missing
        ClusterNameLengthError: If a cluster name is too short or too long
    """
    cluster_path = get_cli_param(config, "cluster_path")
    if not cluster_path:
        raise ClusterPathNotProvidedError()
    cluster_path = os.path.expanduser(cluster_path)
    if not os.path.exists(cluster_path):
        os.makedirs(cluster_path)
    # Importing here cause once the function is invoked we have already config
    # loaded, so this is OK to import once you sure that config is loaded.
    from ocs_ci.ocs.openshift_ops import OCP

    OCP.set_kubeconfig(
        os.path.join(cluster_path, ocsci_config.RUN["kubeconfig_location"])
    )
    cluster_name = get_cli_param(config, "cluster_name")
    ocsci_config.RUN["cli_params"]["teardown"] = get_cli_param(
        config, "teardown", default=False
    )
    ocsci_config.RUN["cli_params"]["deploy"] = get_cli_param(
        config, "deploy", default=False
    )
    live_deployment = get_cli_param(config, "live_deploy", default=False)
    ocsci_config.DEPLOYMENT["live_deployment"] = live_deployment or (
        ocsci_config.DEPLOYMENT.get("live_deployment", False)
    )
    io_in_bg = get_cli_param(config, "io_in_bg")
    if io_in_bg:
        ocsci_config.RUN["io_in_bg"] = True
        io_load = get_cli_param(config, "io_load")
        if io_load:
            ocsci_config.RUN["io_load"] = io_load
    log_utilization = get_cli_param(config, "log_cluster_utilization")
    if log_utilization:
        ocsci_config.RUN["log_utilization"] = True
    upgrade_ocs_version = get_cli_param(config, "upgrade_ocs_version")
    if upgrade_ocs_version:
        ocsci_config.UPGRADE["upgrade_ocs_version"] = upgrade_ocs_version
    ocs_registry_image = get_cli_param(config, "ocs_registry_image")
    if ocs_registry_image:
        ocsci_config.DEPLOYMENT["ocs_registry_image"] = ocs_registry_image
    upgrade_ocs_registry_image = get_cli_param(config, "upgrade_ocs_registry_image")
    if upgrade_ocs_registry_image:
        ocsci_config.UPGRADE["upgrade_ocs_registry_image"] = upgrade_ocs_registry_image
    ocsci_config.ENV_DATA["cluster_name"] = cluster_name
    ocsci_config.ENV_DATA["cluster_path"] = cluster_path
    get_cli_param(config, "collect-logs")
    if ocsci_config.RUN.get("cli_params").get("deploy"):
        if not cluster_name:
            raise ClusterNameNotProvidedError()
        if (
            len(cluster_name) < CLUSTER_NAME_MIN_CHARACTERS
            or len(cluster_name) > CLUSTER_NAME_MAX_CHARACTERS
        ):
            raise ClusterNameLengthError(cluster_name)
    elif not cluster_name:
        try:
            ocsci_config.ENV_DATA["cluster_name"] = get_cluster_name(cluster_path)
        except FileNotFoundError:
            raise ClusterNameNotProvidedError()
    if get_cli_param(config, "email") and not get_cli_param(config, "--html"):
        pytest.exit("--html option must be provided to send email reports")
    get_cli_param(config, "squad_analysis")
    get_cli_param(config, "-m")
    osd_size = get_cli_param(config, "--osd-size")
    if osd_size:
        ocsci_config.ENV_DATA["device_size"] = osd_size
    ocp_version = get_cli_param(config, "--ocp-version")
    if ocp_version:
        version_config_file = f"ocp-{ocp_version}-config.yaml"
        version_config_file_path = os.path.join(
            OCP_VERSION_CONF_DIR, version_config_file
        )
        load_config_file(version_config_file_path)
    upgrade_ocp_version = get_cli_param(config, "--upgrade-ocp-version")
    if upgrade_ocp_version:
        version_config_file = f"ocp-{upgrade_ocp_version}-upgrade.yaml"
        version_config_file_path = os.path.join(
            OCP_VERSION_CONF_DIR, version_config_file
        )
        load_config_file(version_config_file_path)
    upgrade_ocp_image = get_cli_param(config, "--upgrade-ocp-image")
    if upgrade_ocp_image:
        ocp_image = upgrade_ocp_image.rsplit(":", 1)
        ocsci_config.UPGRADE["ocp_upgrade_path"] = ocp_image[0]
        ocsci_config.UPGRADE["ocp_upgrade_version"] = ocp_image[1]
    ocp_installer_version = get_cli_param(config, "--ocp-installer-version")
    if ocp_installer_version:
        ocsci_config.DEPLOYMENT["installer_version"] = ocp_installer_version
        ocsci_config.RUN["client_version"] = ocp_installer_version
    csv_change = get_cli_param(config, "--csv-change")
    if csv_change:
        csv_change = csv_change.split("::")
        ocsci_config.DEPLOYMENT["csv_change_from"] = csv_change[0]
        ocsci_config.DEPLOYMENT["csv_change_to"] = csv_change[1]
    collect_logs_on_success_run = get_cli_param(config, "collect_logs_on_success_run")
    if collect_logs_on_success_run:
        ocsci_config.REPORTING["collect_logs_on_success_run"] = True
    get_cli_param(config, "dev_mode")
    ceph_debug = get_cli_param(config, "ceph_debug")
    if ceph_debug:
        ocsci_config.DEPLOYMENT["ceph_debug"] = True
Beispiel #39
0
def test_pytest_exit():
    try:
        pytest.exit("hello")
    except pytest.exit.Exception:
        excinfo = _pytest._code.ExceptionInfo()
        assert excinfo.errisinstance(KeyboardInterrupt)
Beispiel #40
0
 def teardown_class(cls):
     log.info("TestMethod--------END")
     pytest.exit("测试完成")
Beispiel #41
0
def _smooth_shutdown(*args):
    if COVERAGE_INSTANCE[0]:
        COVERAGE_INSTANCE[0].stop()
    pytest.exit("Smooth shutdown requested")
Beispiel #42
0
def test_pytest_exit():
    with pytest.raises(pytest.exit.Exception) as excinfo:
        pytest.exit("hello")
    assert excinfo.errisinstance(pytest.exit.Exception)
Beispiel #43
0
def pytest_configure(config):
    "Assert that the environment is correctly configured."
    if not diagnose_env():
        pytest.exit('enviroment has errors, please read the logs')
Beispiel #44
0
# Py.test `incremental` marker, see http://stackoverflow.com/a/12579625/107049
def pytest_runtest_makereport(item, call):
    if "incremental" in item.keywords:
        if call.excinfo is not None:
            parent = item.parent
            parent._previousfailed = item


def pytest_runtest_setup(item):
    previousfailed = getattr(item.parent, "_previousfailed", None)
    if previousfailed is not None:
        pytest.xfail("previous test failed (%s)" % previousfailed.name)


###############################################################################
#
# Check requirements
#
###############################################################################

try:
    docker_client.images.get('nginxproxy/nginx-proxy:test')
except docker.errors.ImageNotFound:
    pytest.exit("The docker image 'nginxproxy/nginx-proxy:test' is missing")

if docker.__version__ != "2.1.0":
    pytest.exit(
        "This test suite is meant to work with the python docker module v2.1.0"
    )
    def test_multi_hard_roam_6g_to_6g_802dot1x_sha256_wpa3(
            self, get_configuration, lf_test, lf_reports, lf_tools, run_lf,
            add_env_properties, instantiate_profile, get_controller_logs,
            get_ap_config_slots, get_lf_logs, roaming_delay, iteration, client,
            duration, radius_info):
        ttls_passwd = radius_info["password"]
        identity = radius_info['user']
        instantiate_profile_obj = instantiate_profile(
            controller_data=get_configuration['controller'],
            timeout="10",
            ap_data=get_configuration['access_point'],
            type=0)
        print("shut down 2g  band")
        instantiate_profile_obj.ap_2ghz_shutdown()
        print("enable only 5g and 6g")
        instantiate_profile_obj.no_ap_5ghz_shutdown()
        instantiate_profile_obj.no_ap_6ghz_shutdown()

        profile_data = setup_params_general["ssid_modes"]["wpa3_personal"][0]
        ssid_name = profile_data["ssid_name"]
        security_key = profile_data["security_key"]
        security = "wpa3"
        mode = "BRIDGE"
        band = "sixg"
        vlan = 1
        print("disable wlan ")
        instantiate_profile_obj.disable_wlan(
            wlan=setup_params_general["ssid_modes"]["wpa2_personal"][0]
            ["ssid_name"])
        instantiate_profile_obj.disable_wlan(
            wlan=setup_params_general["ssid_modes"]["wpa2_personal"][1]
            ["ssid_name"])
        dut_name = []
        for i in range(len(get_configuration["access_point"])):
            dut_name.append(get_configuration["access_point"][i]["ap_name"])

        print("dut names", dut_name)

        # check channel
        lf_test.create_n_clients(sta_prefix="wlan1",
                                 num_sta=1,
                                 dut_ssid=ssid_name,
                                 dut_security=security,
                                 dut_passwd=security_key,
                                 band="sixg",
                                 lf_tools=lf_tools,
                                 type="11r-sae-802.1x")
        sta_list = lf_tools.get_station_list()
        print(sta_list)
        val = lf_test.wait_for_ip(station=sta_list)
        ch = ""
        if val:
            for sta_name in sta_list:
                sta = sta_name.split(".")[2]
                time.sleep(5)
                ch = lf_tools.station_data_query(station_name=str(sta),
                                                 query="channel")
            print(ch)
            lf_test.Client_disconnect(station_name=sta_list)

        else:
            pytest.exit("station failed to get ip")
            assert False

        lf_test.hard_roam(run_lf=run_lf,
                          get_configuration=get_configuration,
                          lf_tools=lf_tools,
                          lf_reports=lf_reports,
                          instantiate_profile=instantiate_profile,
                          ssid_name=ssid_name,
                          security=security,
                          security_key=security_key,
                          band=band,
                          test="6g",
                          iteration=int(iteration),
                          num_sta=int(client),
                          roaming_delay=roaming_delay,
                          option="ota",
                          channel=ch,
                          duration=duration,
                          iteration_based=True,
                          duration_based=False,
                          dut_name=dut_name,
                          identity=identity,
                          ttls_passwd=ttls_passwd)
def output_hdul(set_inandout_filenames, config):
    # determine if the pipeline is to be run in full, per steps, or skipped
    run_calwebb_spec3 = config.get("calwebb_spec3", "run_calwebb_spec3")
    print("run_calwebb_spec3 = ", run_calwebb_spec3)
    if run_calwebb_spec3 == "skip":
        print(
            '\n * PTT finished processing run_calwebb_spec3 is set to skip. \n'
        )
        pytest.exit(
            "Finished processing file, run_calwebb_spec3 is set to skip in configuration file."
        )
    else:
        run_calwebb_spec3 = bool(run_calwebb_spec3)

    # get the general info
    step, step_input_filename, output_file, in_file_suffix, outstep_file_suffix, True_steps_suffix_map = set_inandout_filenames
    output_directory = config.get("calwebb_spec2_input_file",
                                  "output_directory")
    txt_name = os.path.join(output_directory, True_steps_suffix_map)
    step_input_file = os.path.join(output_directory, step_input_filename)
    step_output_file = os.path.join(output_directory, output_file)
    mode_used = config.get("calwebb_spec2_input_file", "mode_used").lower()

    # start the timer to compute the step running time of NPTT
    nptt_start_time = time.time()

    # determine which steps are to be run, if not run in full
    run_pipe_step = config.getboolean("run_pipe_steps", step)

    # determine which tests are to be run
    master_background_completion_tests = config.getboolean(
        "run_pytest", "_".join((step, "completion", "tests")))
    master_background_reffile_tests = config.getboolean(
        "run_pytest", "_".join((step, "reffile", "tests")))
    master_background_validation_tests = config.getboolean(
        "run_pytest", "_".join((step, "validation", "tests")))
    run_pytests = [
        master_background_completion_tests, master_background_reffile_tests,
        master_background_validation_tests
    ]

    # Get the detector used
    detector = fits.getval(step_input_file, "DETECTOR", 0)

    # get main header from input file
    inhdu = core_utils.read_hdrfits(step_input_file,
                                    info=False,
                                    show_hdr=False)

    # if run_calwebb_spec3 is True, calwebb_spec3 will be called, else individual steps will be ran
    step_completed = False
    end_time = '0.0'

    # get the shutter configuration file for MOS data only
    msa_shutter_conf = "No shutter configuration file will be used."
    if core_utils.check_MOS_true(inhdu):
        msa_shutter_conf = config.get("esa_intermediary_products",
                                      "msa_conf_name")

        # check if the configuration shutter file name is in the header of the fits file and if not add it
        msametfl = fits.getval(step_input_file, "MSAMETFL", 0)
        if os.path.basename(msa_shutter_conf) != msametfl:
            msametfl = os.path.basename(msa_shutter_conf)
            fits.setval(step_input_file, "MSAMETFL", 0, value=msametfl)

        # copy the MSA shutter configuration file to this directory if not the working directory
        if os.getcwd() != os.path.dirname(msa_shutter_conf):
            # copy the MSA shutter configuration file into the pytest directory
            print("Removing MSA config file from: ", os.getcwd())
            subprocess.run(["cp", msa_shutter_conf, "."])

    # check if processing an image, then set proper variables
    if mode_used in ('image', 'confirm', 'taconfirm', 'wata', 'msata', 'bota',
                     'focus', 'mimf'):
        run_calwebb_spec3 = True
        imaging_mode = True
        print('\n * Image processing stops after calwebb_spec2. ')
        # end script for imaging case
        if imaging_mode:
            print('\n * NPTT finished processing imaging mode. \n')
            pytest.exit(
                "Imaging does not get processed through calwebb_spec3.")

    # get the name of the configuration file and run the pipeline
    calwebb_spec3_cfg = config.get("calwebb_spec3", "calwebb_spec3_cfg")

    # copy the configuration file to create the pipeline log
    if not os.path.isfile(os.path.join(output_directory, "stpipe-log.cfg")):
        stpipelogcfg = calwebb_spec3_cfg.replace("calwebb_spec3.cfg",
                                                 "stpipe-log.cfg")
        subprocess.run(["cp", stpipelogcfg, os.getcwd()])

    # run the pipeline
    if run_calwebb_spec3:

        # Create the logfile for NPTT, but remove the previous log file
        npttcalspec3_log = os.path.join(output_directory,
                                        'NPTT_calspec3_' + detector + '.log')
        if os.path.isfile(npttcalspec3_log):
            os.remove(npttcalspec3_log)
        print(
            "Spec3 screen information output from NPTT will be logged in file: ",
            npttcalspec3_log)
        for handler in logging.root.handlers[:]:
            logging.root.removeHandler(handler)
        logging.basicConfig(filename=npttcalspec3_log, level=logging.INFO)
        logging.info(pipeline_version)

        run_calwebb_spec3_msg = " *** Will run pipeline in full ... "
        print(run_calwebb_spec3_msg)
        logging.info(run_calwebb_spec3_msg)

        # create the map
        txt_name = "spec3_full_run_map_" + detector + ".txt"
        if os.path.isfile(txt_name):
            os.remove(txt_name)
        master_background_utils.create_completed_steps_txtfile(
            txt_name, step_input_file)

        # start the timer to compute the step running time of NPTT
        core_utils.start_end_PTT_time(txt_name,
                                      start_time=nptt_start_time,
                                      end_time=None)

        if mode_used == "bots":
            calwebb_spec3_cfg = calwebb_spec3_cfg.replace(
                "calwebb_spec3.cfg", "calwebb_tso-spec3.cfg")
            print(
                '\nUsing the following configuration file to run TSO pipeline:'
            )
        else:
            print(
                '\nUsing the following configuration file to run spectroscopic pipeline:'
            )
        print(calwebb_spec3_cfg, '\n')

        # start the timer to compute the step running time
        start_time = time.time()

        # run the pipeline
        print('Running pipeline... \n')
        Spec3Pipeline.call(step_input_file, config_file=calwebb_spec3_cfg)

        # end the timer to compute calwebb_spec3 running time
        end_time = repr(time.time() - start_time)  # this is in seconds
        calspec3_time = " * Pipeline took " + end_time + " seconds to finish.\n"
        print(calspec3_time)
        logging.info(calspec3_time)

        # add the detector string to the name of the files and move them to the working directory
        core_utils.add_detector2filename(output_directory, step_input_file)

        # state name of the final spec3 _cal file
        if "spec2" in step_input_file:
            final_output_name = step_input_file.replace("spec2", "spec3")
        else:
            final_output_name = "final_output_spec3_" + detector + "_cal.fits"
        final_output_name_msg = "\nThe final pipeline product was saved in: " + final_output_name
        print(final_output_name_msg)
        logging.info(final_output_name_msg)

        # read the assign wcs fits file
        hdul = core_utils.read_hdrfits(step_output_file,
                                       info=False,
                                       show_hdr=False)
        # scihdul = core_utils.read_hdrfits(step_output_file, info=False, show_hdr=False, ext=1)

        if core_utils.check_MOS_true(inhdu):
            if os.getcwd() != os.path.dirname(msa_shutter_conf):
                # remove the copy of the MSA shutter configuration file
                print("Removing MSA config file from: ", os.getcwd())
                subprocess.run(["rm", msametfl])

        # rename and move the pipeline log file
        calspec3_pipelog = "calspec3_pipeline_" + detector + ".log"
        try:
            path_where_pipeline_was_run = os.getcwd()
            logfile = glob(path_where_pipeline_was_run + "/pipeline.log")[0]
            print(logfile)
            os.rename(logfile, os.path.join(output_directory,
                                            calspec3_pipelog))
        except IndexError:
            print(
                "\nWARNING: Something went wrong. Could not find a pipeline.log file \n"
            )

        # make sure we are able to find calspec3_pipelog either in the calwebb_spec3 directory or in the working dir
        if not os.path.isfile(calspec3_pipelog):
            calspec3_pipelog = os.path.join(output_directory, calspec3_pipelog)

        # add the running time for all steps
        step_running_times = core_utils.calculate_step_run_time(
            calspec3_pipelog)
        end_time_list = []
        for stp in core_utils.step_string_dict:
            if stp in step_running_times:
                step_completed = True
                step_time = step_running_times[stp]["run_time"]
                out_suffix = core_utils.step_string_dict[stp]["suffix"]
                core_utils.add_completed_steps(txt_name, stp, out_suffix,
                                               step_completed, step_time)
                end_time_list.append(step_time)

        # print total running time in the text file and move it to the indicated directory
        string2print = "pipeline_total_time"
        if float(end_time) <= sum(end_time_list):
            tot_time = repr(sum(end_time_list))
        else:
            tot_time = end_time
        master_background_utils.print_time2file(txt_name, tot_time,
                                                string2print)
        nptt_runtimes_msg = "Pipeline and NPTT run times written in file: " + os.path.basename(
            txt_name) + " in working directory. \n"
        print(nptt_runtimes_msg)
        logging.info(nptt_runtimes_msg)

        # move the final reporting text files to the working directory
        if os.getcwd() != output_directory:
            core_utils.move_txt_files_2workdir(config, detector)

        return hdul, step_output_file, msa_shutter_conf, run_pytests, mode_used

    else:

        # create the map but remove a previous one if it exists
        if os.path.isfile(txt_name):
            os.remove(txt_name)
        master_background_utils.create_completed_steps_txtfile(
            txt_name, step_input_file)

        # start the timer to compute the step running time of NPTT
        core_utils.start_end_PTT_time(txt_name,
                                      start_time=nptt_start_time,
                                      end_time=None)
        msg = "\n Pipeline and NPTT run times will be written in file: " + os.path.basename(
            txt_name) + " in working directory. \n"
        print(msg)
        logging.info(msg)

        if run_pipe_step:

            # Create the logfile for NPTT, but erase the previous one if it exists
            npttcalspec3_log = os.path.join(
                output_directory,
                'NPTT_calspec3_' + detector + '_' + step + '.log')
            if os.path.isfile(npttcalspec3_log):
                os.remove(npttcalspec3_log)
            print(
                "Output information on screen from NPTT will be logged in file: ",
                npttcalspec3_log)
            for handler in logging.root.handlers[:]:
                logging.root.removeHandler(handler)
            logging.basicConfig(filename=npttcalspec3_log, level=logging.INFO)
            logging.info(pipeline_version)

            # check that previous pipeline steps were run up to this point
            core_utils.check_completed_steps(step, step_input_file)

            if os.path.isfile(step_input_file):
                msg = " *** Step " + step + " set to True"
                print(msg)
                logging.info(msg)
                stp = MasterBackgroundStep()

                # get the right configuration files to run the step
                local_pipe_cfg_path = config.get("calwebb_spec2_input_file",
                                                 "local_pipe_cfg_path")

                # start the timer to compute the step running time
                print("running pipeline...")
                start_time = time.time()
                if local_pipe_cfg_path == "pipe_source_tree_code":
                    result = stp.call(step_input_file)
                else:
                    result = stp.call(step_input_file,
                                      config_file=local_pipe_cfg_path +
                                      '/master_background.cfg')
                result.save(step_output_file)

                # end the timer to compute the step running time
                end_time = repr(time.time() - start_time)  # this is in seconds
                msg = "Step " + step + " took " + end_time + " seconds to finish"
                print(msg)
                logging.info(msg)

                if core_utils.check_MOS_true(inhdu):
                    # remove the copy of the MSA shutter configuration file
                    if os.getcwd() != os.path.dirname(msa_shutter_conf):
                        print("Removing MSA config file from: ", os.getcwd())
                        subprocess.run(["rm", msametfl])

                # rename and move the pipeline log file
                pipelog = "pipeline_" + detector + ".log"
                try:
                    calspec3_pipelog = "calspec3_pipeline_" + step + "_" + detector + ".log"
                    pytest_workdir = TESTSDIR
                    logfile = glob(pytest_workdir + "/" + pipelog)[0]
                    os.rename(logfile,
                              os.path.join(output_directory, calspec3_pipelog))
                except IndexError:
                    print(
                        "\n* WARNING: Something went wrong. Could not find a ",
                        pipelog, " file \n")

            else:
                msg = "Skipping step. Input file " + step_input_file + " does not exit."
                print(msg)
                logging.info(msg)
                core_utils.add_completed_steps(txt_name, step,
                                               outstep_file_suffix,
                                               step_completed, end_time)
                pytest.skip("Skipping " + step +
                            " because the input file does not exist.")

        else:
            print("Skipping running pipeline step ", step)
            # add the running time for this step
            end_time = core_utils.get_stp_run_time_from_screenfile(
                step, detector, output_directory)

        if os.path.isfile(step_output_file):
            hdul = core_utils.read_hdrfits(step_output_file,
                                           info=False,
                                           show_hdr=False)
            step_completed = True
            # add the running time for this step
            core_utils.add_completed_steps(txt_name, step, outstep_file_suffix,
                                           step_completed, end_time)
            return hdul, step_output_file, msa_shutter_conf, run_pytests, mode_used
        else:
            step_completed = False
            # add the running time for this step
            core_utils.add_completed_steps(txt_name, step, outstep_file_suffix,
                                           step_completed, end_time)
            pytest.skip("Test skipped because input file " + step_output_file +
                        " does not exist.")
Beispiel #47
0
def pytest_collection_modifyitems(session, config, items):
    if len(items) == 0:
        pytest.exit("No tests found; Matplotlib was likely installed without "
                    "test data.")
    knownfail_message = "Test known to fail with mplcairo."
    irrelevant_message = "Test irrelevant for mplcairo."
    textfail_message = ("Test failure with large diff due to different text "
                        "rendering by mplcairo.")
    xfail_modules = {
        "matplotlib.tests.test_compare_images": irrelevant_message,
        "matplotlib.tests.test_mathtext": textfail_message,
        "matplotlib.tests.test_constrainedlayout": textfail_message,
        "matplotlib.tests.test_tightlayout": textfail_message,
    }
    xfail_nodeids = {
        "matplotlib/tests/" + nodeid: message
        for message, nodeids in [
            (knownfail_message, [
                "test_image.py::test_jpeg_alpha",
                "test_image.py::test_figimage0[pdf]",
                "test_image.py::test_figimage1[pdf]",
            ]),
            (irrelevant_message, [
                "test_agg.py::test_repeated_save_with_alpha",
                "test_artist.py::test_cull_markers",
                "test_axes.py::test_log_scales[png]",
                "test_backend_bases.py::test_non_gui_warning",
                "test_backend_pdf.py::test_composite_image",
                "test_backend_pdf.py::test_multipage_keep_empty",
                "test_backend_pdf.py::test_multipage_pagecount",
                "test_backend_pdf.py::test_multipage_properfinalize",
                "test_backend_ps.py::test_savefig_to_stringio[eps afm]",
                "test_backend_ps.py::test_savefig_to_stringio[eps with usetex]",
                "test_backend_ps.py::test_savefig_to_stringio[eps]",
                "test_backend_ps.py::test_savefig_to_stringio[ps with distiller]",
                "test_backend_ps.py::test_savefig_to_stringio[ps with usetex]",
                "test_backend_ps.py::test_savefig_to_stringio[ps]",
                "test_backend_ps.py::test_source_date_epoch",
                "test_backend_svg.py::test_text_urls",
                "test_bbox_tight.py::test_bbox_inches_tight_suptile_legend[pdf]",
                "test_bbox_tight.py::test_bbox_inches_tight_suptile_legend[png]",
                "test_bbox_tight.py::test_bbox_inches_tight_suptile_legend[svg]",
                "test_image.py::test_composite[True-1-ps- colorimage]",
                "test_image.py::test_composite[False-2-ps- colorimage]",
                "test_scale.py::test_logscale_mask[png]",
                "test_simplification.py::test_throw_rendering_complexity_exceeded",
            ]),
            (textfail_message, [
                "test_axes.py::test_gettightbbox_ignoreNaN",
                "test_figure.py::test_align_labels[pdf]",
                "test_figure.py::test_align_labels[png]",
                "test_figure.py::test_align_labels[svg]",
                "test_figure.py::test_tightbbox",
            ])
        ]
        for nodeid in nodeids
    }
    xfails = []
    for item in items:
        reason = (xfail_modules.get(item.module.__name__)
                  or xfail_nodeids.get(item.nodeid))
        if reason:
            xfails.append(item)
            item.add_marker(pytest.mark.xfail(reason=reason))
    invalid_xfails = (  # Py3.4 compat.
        (set(xfail_modules) - {item.module.__name__ for item in xfails})
        | (set(xfail_nodeids) - {item.nodeid for item in xfails}))
    if invalid_xfails:
        warnings.warn("Unused xfails:\n    {}"
                      .format("\n    ".join(sorted(invalid_xfails))))
Beispiel #48
0
    def kafka_server(request, docker, docker_ip_address, unused_port,
                     session_id, ssl_folder):
        image = request.config.getoption('--docker-image')
        if not image:
            pytest.skip(
                "Skipping functional test as `--docker-image` not provided")
            return
        if not request.config.getoption('--no-pull'):
            docker.images.pull(image)
        kafka_host = docker_ip_address
        kafka_port = unused_port()
        kafka_ssl_port = unused_port()
        kafka_sasl_plain_port = unused_port()
        kafka_sasl_ssl_port = unused_port()
        environment = {
            'ADVERTISED_HOST': kafka_host,
            'ADVERTISED_PORT': kafka_port,
            'ADVERTISED_SSL_PORT': kafka_ssl_port,
            'ADVERTISED_SASL_PLAINTEXT_PORT': kafka_sasl_plain_port,
            'ADVERTISED_SASL_SSL_PORT': kafka_sasl_ssl_port,
            'NUM_PARTITIONS': 2
        }
        kafka_version = image.split(":")[-1].split("_")[-1]
        kafka_version = tuple(int(x) for x in kafka_version.split('.'))
        if kafka_version >= (0, 10, 2):
            environment['SASL_MECHANISMS'] = (
                "PLAIN,GSSAPI,SCRAM-SHA-256,SCRAM-SHA-512")
            environment['SASL_JAAS_FILE'] = "kafka_server_jaas.conf"
        elif kafka_version >= (0, 10, 1):
            environment['SASL_MECHANISMS'] = "PLAIN,GSSAPI"
            environment['SASL_JAAS_FILE'] = "kafka_server_jaas_no_scram.conf"
        else:
            environment['SASL_MECHANISMS'] = "GSSAPI"
            environment['SASL_JAAS_FILE'] = "kafka_server_gssapi_jaas.conf"

        container = docker.containers.run(image=image,
                                          name='aiokafka-tests',
                                          ports={
                                              2181:
                                              2181,
                                              "88/udp":
                                              88,
                                              kafka_port:
                                              kafka_port,
                                              kafka_ssl_port:
                                              kafka_ssl_port,
                                              kafka_sasl_plain_port:
                                              kafka_sasl_plain_port,
                                              kafka_sasl_ssl_port:
                                              kafka_sasl_ssl_port
                                          },
                                          volumes={
                                              str(ssl_folder.resolve()): {
                                                  "bind": "/ssl_cert",
                                                  "mode": "ro"
                                              }
                                          },
                                          environment=environment,
                                          tty=True,
                                          detach=True,
                                          remove=True)

        try:
            if not wait_kafka(kafka_host, kafka_port):
                exit_code, output = container.exec_run(
                    ["supervisorctl", "tail", "kafka"])
                print("Kafka failed to start. \n--- STDOUT:")
                print(output.decode(), file=sys.stdout)
                exit_code, output = container.exec_run(
                    ["supervisorctl", "tail", "kafka", "stderr"])
                print("--- STDERR:")
                print(output.decode(), file=sys.stderr)
                pytest.exit("Could not start Kafka Server")

            yield KafkaServer(kafka_host, kafka_port, kafka_ssl_port,
                              kafka_sasl_plain_port, kafka_sasl_ssl_port,
                              container)
        finally:
            container.remove(force=True)
Beispiel #49
0
def sanity_check(request) -> None:
    """Sanity checks before running the tests."""
    log_dir = request.config.getoption("--log-dir")
    if not (log_dir is None or os.path.isdir(log_dir)):
        pytest.exit(f"{log_dir} doesn't exist")
Beispiel #50
0
def pytest_keyboard_interrupt(excinfo):
    docker_client = docker_py.from_env()
    docker_client.containers.prune()
    docker_client.volumes.prune()
    docker_client.networks.prune()
    pytest.exit("Keyboard Interrupt occurred, So stopping the execution of tests.")
def output_hdul(set_inandout_filenames, config):
    # determine if the pipeline is to be run in full, per steps, or skipped
    run_calwebb_spec2 = config.get("run_calwebb_spec2_in_full",
                                   "run_calwebb_spec2")
    if run_calwebb_spec2 == "skip":
        print(
            '\n * PTT finished processing run_calwebb_spec2 is set to skip. \n'
        )
        pytest.exit(
            "Skipping pipeline run and tests for spec2, run_calwebb_spec2 is set to skip in PTT_config file."
        )
    elif "T" in run_calwebb_spec2:
        run_calwebb_spec2 = True
    else:
        run_calwebb_spec2 = False

    # get the general info
    set_inandout_filenames_info = core_utils.read_info4outputhdul(
        config, set_inandout_filenames)
    step, txt_name, step_input_file, step_output_file, outstep_file_suffix = set_inandout_filenames_info
    run_pipe_step = config.getboolean("run_pipe_steps", step)
    # determine which tests are to be run
    resample_spec_completion_tests = config.getboolean(
        "run_pytest", "_".join((step, "completion", "tests")))
    #resample_spec_reffile_tests = config.getboolean("run_pytest", "_".join((step, "reffile", "tests")))
    #resample_spec_validation_tests = config.getboolean("run_pytest", "_".join((step, "validation", "tests")))
    run_pytests = [
        resample_spec_completion_tests
    ]  #, resample_spec_reffile_tests, resample_spec_validation_tests]

    end_time = '0.0'
    # Only run step if data is not IFU or BOTS
    mode_used = config.get("calwebb_spec2_input_file", "mode_used").lower()
    output_directory = config.get("calwebb_spec2_input_file",
                                  "output_directory")
    initial_input_file = config.get("calwebb_spec2_input_file", "input_file")
    initial_input_file = os.path.join(output_directory, initial_input_file)
    detector = fits.getval(initial_input_file, "DETECTOR", 0)
    if not os.path.isfile(initial_input_file):
        pytest.skip(
            "Skipping " + step +
            " because the initial input file given in PTT_config.cfg does not exist."
        )

    if mode_used != "bots" and mode_used != "ifu":
        # if run_calwebb_spec2 is True calwebb_spec2 will be called, else individual steps will be ran
        step_completed = False

        # check if the filter is to be changed
        change_filter_opaque = config.getboolean("calwebb_spec2_input_file",
                                                 "change_filter_opaque")
        if change_filter_opaque:
            is_filter_opaque, step_input_filename = change_filter_opaque2science.change_filter_opaque(
                step_input_file, step=step)
            if is_filter_opaque:
                filter_opaque_msg = "With FILTER=OPAQUE, the calwebb_spec2 will run up to the extract_2d step. " \
                                    "Resample pytest now set to Skip."
                print(filter_opaque_msg)
                core_utils.add_completed_steps(txt_name, step,
                                               outstep_file_suffix,
                                               step_completed, end_time)
                pytest.skip("Skipping " + step +
                            " because the input file does not exist.")

        if run_calwebb_spec2:
            hdul = core_utils.read_hdrfits(step_output_file,
                                           info=False,
                                           show_hdr=False)
            return hdul, step_output_file, run_pytests
        else:

            if run_pipe_step:
                # Create the logfile for PTT, but erase the previous one if it exists
                PTTcalspec2_log = os.path.join(
                    output_directory,
                    'PTT_calspec2_' + detector + '_' + step + '.log')
                if os.path.isfile(PTTcalspec2_log):
                    os.remove(PTTcalspec2_log)
                print(
                    "Information outputed to screen from PTT will be logged in file: ",
                    PTTcalspec2_log)
                for handler in logging.root.handlers[:]:
                    logging.root.removeHandler(handler)
                logging.basicConfig(filename=PTTcalspec2_log,
                                    level=logging.INFO)
                # print pipeline version
                import jwst
                pipeline_version = "\n *** Using jwst pipeline version: " + jwst.__version__ + " *** \n"
                print(pipeline_version)
                logging.info(pipeline_version)
                if change_filter_opaque:
                    logging.info(filter_opaque_msg)

                if os.path.isfile(step_input_file):

                    msg = " *** Step " + step + " set to True"
                    print(msg)
                    logging.info(msg)
                    stp = ResampleSpecStep()

                    # check that previous pipeline steps were run up to this point
                    core_utils.check_completed_steps(step, step_input_file)

                    # get the right configuration files to run the step
                    local_pipe_cfg_path = config.get(
                        "calwebb_spec2_input_file", "local_pipe_cfg_path")
                    # start the timer to compute the step running time
                    start_time = time.time()
                    if local_pipe_cfg_path == "pipe_source_tree_code":
                        result = stp.call(step_input_file)
                    else:
                        result = stp.call(step_input_file,
                                          config_file=local_pipe_cfg_path +
                                          '/resample_spec.cfg')
                    result.save(step_output_file)
                    # end the timer to compute the step running time
                    end_time = repr(time.time() -
                                    start_time)  # this is in seconds
                    msg = "Step " + step + " took " + end_time + " seconds to finish"
                    print(msg)
                    logging.info(msg)
                    step_completed = True
                    hdul = core_utils.read_hdrfits(step_output_file,
                                                   info=False,
                                                   show_hdr=False)

                    # rename and move the pipeline log file
                    pipelog = "pipeline_" + detector + ".log"
                    try:
                        calspec2_pilelog = "calspec2_pipeline_" + step + "_" + detector + ".log"
                        pytest_workdir = TESTSDIR
                        logfile = glob(pytest_workdir + "/" + pipelog)[0]
                        os.rename(
                            logfile,
                            os.path.join(output_directory, calspec2_pilelog))
                    except IndexError:
                        print(
                            "\n* WARNING: Something went wrong. Could not find a ",
                            pipelog, " file \n")

                    # add the running time for this step
                    core_utils.add_completed_steps(txt_name, step,
                                                   outstep_file_suffix,
                                                   step_completed, end_time)
                    return hdul, step_output_file, run_pytests

                else:
                    msg = " The input file does not exist. Skipping step."
                    print(msg)
                    logging.info(msg)
                    core_utils.add_completed_steps(txt_name, step,
                                                   outstep_file_suffix,
                                                   step_completed, end_time)
                    pytest.skip("Skipping " + step +
                                " because the input file does not exist.")

            else:
                msg = "Skipping running pipeline step " + step
                print(msg)
                logging.info(msg)
                end_time = core_utils.get_stp_run_time_from_screenfile(
                    step, detector, output_directory)
                if os.path.isfile(step_output_file):
                    hdul = core_utils.read_hdrfits(step_output_file,
                                                   info=False,
                                                   show_hdr=False)
                    step_completed = True
                    # add the running time for this step
                    core_utils.add_completed_steps(txt_name, step,
                                                   outstep_file_suffix,
                                                   step_completed, end_time)
                    return hdul, step_output_file, run_pytests
                else:
                    step_completed = False
                    # add the running time for this step
                    core_utils.add_completed_steps(txt_name, step,
                                                   outstep_file_suffix,
                                                   step_completed, end_time)
                    pytest.skip("Test skipped because input file " +
                                step_output_file + " does not exist.")

    else:
        pytest.skip("Skipping " + step +
                    " because data is either IFU or BOTS.")
Beispiel #52
0
def tf_dir(request):
    """Return Path of directory where Terraform files are located."""
    terraform_dir = request.config.getoption("--tf-dir")
    if not Path(terraform_dir).exists():
        pytest.exit(msg=f"'{terraform_dir}' is a non-existent directory")
    return Path(terraform_dir).resolve()
Beispiel #53
0
def filecheck(filepath):
    """Check if file is ulg file."""
    base, ext = os.path.splitext(filepath)
    if ext.lower() not in (".ulg") or not filepath:
        pytest.exit("passed file is not a .ulg file.")
Beispiel #54
0
def dd_environment_runner(request):
    # Skip the runner if the skip environment variable is specified
    do_skip = os.getenv(SKIP_ENVIRONMENT) == 'true'

    testing_plugin = os.getenv(TESTING_PLUGIN) == 'true'

    # Do nothing if no e2e action is triggered and continue with tests
    if not testing_plugin and not e2e_active() and not do_skip:  # no cov
        return
    # If e2e tests are being run it means the environment has
    # already been spun up so we prevent another invocation
    elif e2e_testing() or do_skip:  # no cov
        # Since the scope is `session` there should only ever be one definition
        fixture_def = request._fixturemanager._arg2fixturedefs[
            E2E_FIXTURE_NAME][0]

        # Make the underlying function a no-op
        fixture_def.func = lambda *args, **kwargs: None
        return

    try:
        config = request.getfixturevalue(E2E_FIXTURE_NAME)
    except Exception as e:
        # pytest doesn't export this exception class so we have to do some introspection
        if e.__class__.__name__ == 'FixtureLookupError':
            # Make it explicit for our command
            pytest.exit('NO E2E FIXTURE AVAILABLE')

        raise

    metadata = {}

    # Environment fixture also returned some metadata
    if isinstance(config, tuple):
        config, possible_metadata = config

        # Support only defining the env_type for ease-of-use
        if isinstance(possible_metadata, str):
            metadata['env_type'] = possible_metadata
        else:
            metadata.update(possible_metadata)

    # Default to Docker as that is the most common
    metadata.setdefault('env_type', 'docker')

    # Save any environment variables
    metadata.setdefault('env_vars', {})
    metadata['env_vars'].update(get_env_vars(raw=True))

    # Inject any log configuration
    logs_config = get_state('logs_config', [])
    if logs_config:
        config = format_config(config)
        config['logs'] = logs_config

    # Mount any volumes for Docker
    if metadata['env_type'] == 'docker':
        docker_volumes = get_state('docker_volumes', [])
        if docker_volumes:
            metadata.setdefault('docker_volumes', []).extend(docker_volumes)

    data = {'config': config, 'metadata': metadata}

    message = serialize_data(data)

    message = 'DDEV_E2E_START_MESSAGE {} DDEV_E2E_END_MESSAGE'.format(message)

    if testing_plugin:
        return message
    else:  # no cov
        # Exit testing and pass data back up to command
        pytest.exit(message)
Beispiel #55
0
import os
from rbc import errors
import numpy as np
import pytest


rbc_omnisci = pytest.importorskip('rbc.omniscidb')
available_version, reason = rbc_omnisci.is_available()
# Throw an error on Travis CI if the server is not available
if "TRAVIS" in os.environ and not available_version:
    pytest.exit(msg=reason, returncode=1)
pytestmark = pytest.mark.skipif(not available_version, reason=reason)


def test_get_client_config(tmpdir):
    d = tmpdir.mkdir("omnisci")
    fh = d.join("client.conf")
    fh.write("""
    [user]
name  =  foo
password = secret

[server]
port: 1234
host: example.com

[rbc]
debug: False
use_host_target: False
# server: Server [NOT IMPL]
# target_info: TargetInfo
Beispiel #56
0
def pytest_sessionstart(session):
    if not os.getenv('OMDB_API_KEY'):
        pytest.exit('ERROR: OMDB_API_KEY environment variable wasnt found,'
                    ' please specify one and retry')
Beispiel #57
0
def process_cluster_cli_params(config):
    """
    Process cluster related cli parameters

    Args:
        config (pytest.config): Pytest config object

    Raises:
        ClusterPathNotProvidedError: If a cluster path is missing
        ClusterNameNotProvidedError: If a cluster name is missing
        ClusterNameLengthError: If a cluster name is too short or too long
    """
    cluster_path = get_cli_param(config, 'cluster_path')
    if not cluster_path:
        raise ClusterPathNotProvidedError()
    cluster_path = os.path.expanduser(cluster_path)
    if not os.path.exists(cluster_path):
        os.makedirs(cluster_path)
    # Importing here cause once the function is invoked we have already config
    # loaded, so this is OK to import once you sure that config is loaded.
    from ocs_ci.ocs.openshift_ops import OCP
    OCP.set_kubeconfig(
        os.path.join(cluster_path, ocsci_config.RUN['kubeconfig_location']))
    cluster_name = get_cli_param(config, 'cluster_name')
    ocsci_config.RUN['cli_params']['teardown'] = get_cli_param(config,
                                                               "teardown",
                                                               default=False)
    ocsci_config.RUN['cli_params']['deploy'] = get_cli_param(config,
                                                             "deploy",
                                                             default=False)
    live_deployment = get_cli_param(config, "live_deploy", default=False)
    ocsci_config.DEPLOYMENT['live_deployment'] = live_deployment or (
        ocsci_config.DEPLOYMENT.get('live_deployment', False))
    ocsci_config.RUN['cli_params']['io_in_bg'] = get_cli_param(config,
                                                               "io_in_bg",
                                                               default=False)
    upgrade_ocs_version = get_cli_param(config, "upgrade_ocs_version")
    if upgrade_ocs_version:
        ocsci_config.UPGRADE['upgrade_ocs_version'] = upgrade_ocs_version
    ocs_registry_image = get_cli_param(config, "ocs_registry_image")
    if ocs_registry_image:
        ocsci_config.DEPLOYMENT['ocs_registry_image'] = ocs_registry_image
    upgrade_ocs_registry_image = get_cli_param(config,
                                               "upgrade_ocs_registry_image")
    if upgrade_ocs_registry_image:
        ocsci_config.UPGRADE[
            'upgrade_ocs_registry_image'] = upgrade_ocs_registry_image
    ocsci_config.ENV_DATA['cluster_name'] = cluster_name
    ocsci_config.ENV_DATA['cluster_path'] = cluster_path
    get_cli_param(config, 'collect-logs')
    if ocsci_config.RUN.get("cli_params").get("deploy"):
        if not cluster_name:
            raise ClusterNameNotProvidedError()
        if (len(cluster_name) < CLUSTER_NAME_MIN_CHARACTERS
                or len(cluster_name) > CLUSTER_NAME_MAX_CHARACTERS):
            raise ClusterNameLengthError(cluster_name)
    if get_cli_param(config, 'email') and not get_cli_param(config, '--html'):
        pytest.exit("--html option must be provided to send email reports")
    get_cli_param(config, '-m')
    osd_size = get_cli_param(config, '--osd-size')
    if osd_size:
        ocsci_config.ENV_DATA['device_size'] = osd_size
    ocp_version = get_cli_param(config, '--ocp-version')
    if ocp_version:
        version_config_file = f"ocp-{ocp_version}-config.yaml"
        version_config_file_path = os.path.join(OCP_VERSION_CONF_DIR,
                                                version_config_file)
        load_config_file(version_config_file_path)
Beispiel #58
0
# find a suitable font to test with
SUITABLE_FONTS = ["Times New Roman", "Liberation Serif", "Impact", "FreeSans"]
TEST_FONT = None
from tkinter import Tk, font
root = Tk()
available_fonts = font.families()
root.destroy()

for suitable_font in SUITABLE_FONTS:
    if suitable_font in available_fonts:
        TEST_FONT = suitable_font
        break

if TEST_FONT is None:
    pytest.exit("A suitable test font could not be found.")

def schedule_after_test(app, widget):
    callback_event = Event()
    def callback():
        callback_event.set()
    assert not callback_event.is_set()
    widget.after(0, callback)
    # call tk to update the app
    app.tk.update()
    assert callback_event.is_set()
    #widget.cancel(callback)

def schedule_repeat_test(app, widget):
    callback_event = Event()
    def callback():
def data_access_role(iam_client):
    try:
        return iam_client.get_role(RoleName="S3F2DataAccessRole")["Role"]
    except ClientError as e:
        logger.warning(str(e))
        pytest.exit("Abandoning test run due to missing data access role", 1)
Beispiel #60
0
def omnisci_fixture(caller_globals,
                    minimal_version=(0, 0),
                    suffices=['', '10', 'null', 'array', 'arraynull'],
                    load_columnar=True,
                    debug=False):
    """Usage from a rbc/tests/test_xyz.py file:

      import pytest
      from rbc.tests import omnisci_fixture
      @pytest.fixture(scope='module')
      def omnisci():
          from o in omnisci_fixture(globals()):
              # do some customization here
              yield o

    This fixture creates the following tables:

    f'{omnisci.table_name}' - contains columns f8, f4, i8, i4, i2, i1,
                              b with row size 5.

    f'{omnisci.table_name}10' - contains columns f8, f4, i8, i4, i2,
                                i1, b with row size 10.

    f'{omnisci.table_name}null' - contains columns f8, f4, i8, i4, i2,
                                  i1, b with row size 5, contains null
                                  values.

    f'{omnisci.table_name}array' - contains arrays f8, f4, i8, i4, i2,
                                   i1, b with row size 5

    f'{omnisci.table_name}arraynull' - contains arrays f8, f4, i8, i4, i2,
                                       i1, b with row size 5, contains null
                                       values.
    """
    rbc_omnisci = pytest.importorskip('rbc.omniscidb')
    available_version, reason = rbc_omnisci.is_available()

    def require_version(version, message=None, date=None):
        if not available_version:
            pytest.skip(reason)
        assert isinstance(version, tuple)
        if available_version < version:
            _reason = f'test requires version {version} or newer, got {available_version}'
            if message is not None:
                _reason += f': {message}'
            pytest.skip(_reason)
        if date is not None:
            assert isinstance(date, int)
            available_date = version_date(available_version)
            if not available_date:
                warnings.warn(
                    'could not determine date of {available_version}')
                return
            if available_date < date:
                _reason = (
                    f'test requires version {version} with date {date} or newer,'
                    f' got {available_version} with date {available_date}')
                if message is not None:
                    _reason += f': {message}'
                pytest.skip(_reason)

    # Throw an error on Travis CI if the server is not available
    if "TRAVIS" in os.environ and not available_version:
        pytest.exit(msg=reason, returncode=1)

    require_version(minimal_version)

    filename = caller_globals['__file__']
    table_name = os.path.splitext(os.path.basename(filename))[0]

    config = rbc_omnisci.get_client_config(debug=debug)
    m = rbc_omnisci.RemoteOmnisci(**config)

    sqltypes = [
        'FLOAT', 'DOUBLE', 'TINYINT', 'SMALLINT', 'INT', 'BIGINT', 'BOOLEAN'
    ]
    arrsqltypes = [t + '[]' for t in sqltypes]
    # todo: TEXT ENCODING DICT, TEXT ENCODING NONE, TIMESTAMP, TIME,
    # DATE, DECIMAL/NUMERIC, GEOMETRY: POINT, LINESTRING, POLYGON,
    # MULTIPOLYGON, See
    # https://www.omnisci.com/docs/latest/5_datatypes.html
    colnames = ['f4', 'f8', 'i1', 'i2', 'i4', 'i8', 'b']
    table_defn = ',\n'.join('%s %s' % (n, t)
                            for t, n in zip(sqltypes, colnames))
    arrtable_defn = ',\n'.join('%s %s' % (n, t)
                               for t, n in zip(arrsqltypes, colnames))

    for suffix in suffices:
        m.sql_execute(f'DROP TABLE IF EXISTS {table_name}{suffix}')
        if 'array' in suffix:
            m.sql_execute(
                f'CREATE TABLE IF NOT EXISTS {table_name}{suffix} ({arrtable_defn});'
            )
        else:
            m.sql_execute(
                f'CREATE TABLE IF NOT EXISTS {table_name}{suffix} ({table_defn});'
            )

    if load_columnar:
        # fast method using load_table_columnar thrift endpoint, use for large tables
        def row_value(row, col, colname, null=False, arr=False):
            if arr:
                if null and (0 == (row + col) % 2):
                    return None
                a = [
                    row_value(row + i, col, colname, null=null, arr=False)
                    for i in range(row)
                ]
                return a
            if null and (0 == (row + col) % 3):
                return None
            if colname == 'b':
                return row % 2 == 0
            return row

        for suffix in suffices:
            columns = defaultdict(list)
            for j, n in enumerate(colnames):
                for i in range(10 if '10' in suffix else 5):
                    v = row_value(i,
                                  j,
                                  n,
                                  null=('null' in suffix),
                                  arr=('array' in suffix))
                    columns[n].append(v)
            m.load_table_columnar(f'{table_name}{suffix}', **columns)

    else:
        # slow method using SQL query statements
        def row_value(row, col, colname, null=False, arr=False):
            if arr:
                if null and (0 == (row + col) % 2):
                    return 'NULL'
                a = [
                    row_value(row + i, col, colname, null=null, arr=False)
                    for i in range(row)
                ]
                return '{' + ', '.join(map(str, a)) + '}'
            if null and (0 == (row + col) % 3):
                return 'NULL'
            if colname == 'b':
                return ("'true'" if row % 2 == 0 else "'false'")
            return row

        for i in range(10):
            if i < 5:
                for suffix in suffices:
                    if suffix == '':
                        table_row = ', '.join(
                            str(row_value(i, j, n))
                            for j, n in enumerate(colnames))
                    elif suffix == 'null':
                        table_row = ', '.join(
                            str(row_value(i, j, n, null=True))
                            for j, n in enumerate(colnames))
                    elif suffix == 'array':
                        table_row = ', '.join(
                            str(row_value(i, j, n, arr=True))
                            for j, n in enumerate(colnames))
                    elif suffix == 'arraynull':
                        table_row = ', '.join(
                            str(row_value(i, j, n, null=True, arr=True))
                            for j, n in enumerate(colnames))
                    else:
                        continue
                    m.sql_execute(
                        f'INSERT INTO {table_name}{suffix} VALUES ({table_row})'
                    )
            if i < 10 and '10' in suffices:
                table_row = ', '.join(
                    str(row_value(i, j, n)) for j, n in enumerate(colnames))
                m.sql_execute(
                    f'INSERT INTO {table_name}10 VALUES ({table_row})')

    m.table_name = table_name
    m.require_version = require_version
    yield m
    for suffix in suffices:
        m.sql_execute(f'DROP TABLE IF EXISTS {table_name}{suffix}')