예제 #1
0
def pytest_runtest_makereport(item, call):
    outcome = yield
    rep = outcome.get_result()
    test_failed = item.session.testsfailed
    if 'test_no_super_user_cannot_get_access_to_any_member_of_rte' in rep.nodeid and rep.when == "setup" and rep.skipped == True:
        for item in item.session.items:
            if 'test_multi.py' in item.nodeid:
                item.own_markers.append(Mark(name='skip', args=(), kwargs={}))
예제 #2
0
def markdecorators_to_markinfos(marks  # type: Sequence[MarkDecorator]
                                ):
    # type: (...) -> Tuple[Mark, ...]
    if PYTEST3_OR_GREATER:
        return tuple(m.mark for m in marks)
    elif len(marks) == 0:
        return ()
    else:
        return tuple(Mark(m.name, m.args, m.kwargs) for m in marks)
예제 #3
0
def _load_up_the_tests():
    "reads the files from the samples directory and parametrizes the test"
    tests = []
    for i in os.scandir(
            os.path.abspath(os.curdir) + '/tests/integration/sqcmds/samples'):
        if not i.path.endswith('.yml'):
            continue
        with open(i, 'r') as f:
            out = yaml.load(f.read(), Loader=yaml.BaseLoader)
            # The format of the YAML file assumed is as follows:
            # description: <string>
            # tests:
            #   - command: <sqcmd to execute in non-modal format
            #     data-directory: <where the data is present>, not used yet
            #     marks: <space separated string of marks to mark the test>
            #     output: |
            #       <json_output>
            #
            #   - command:
            #     ....
            if out and 'tests' in out:
                for t in out['tests']:
                    # We use tags to dynamically mark the parametrized test
                    # the marks MUST be registered in pytest.ini
                    markers = []
                    if 'marks' in t:
                        markers = [
                            MarkDecorator(Mark(x, [], {}))
                            for x in t['marks'].split()
                        ]
                    if 'xfail' in t:
                        except_err = None
                        if 'raises' in t['xfail']:
                            except_err = globals()['__builtins__'].get(
                                t['xfail']['raises'], None)

                        if except_err:
                            markers += [
                                pytest.mark.xfail(reason=t['xfail']['reason'],
                                                  raises=except_err)
                            ]
                        else:
                            if 'reason' in t['xfail']:
                                markers += [
                                    pytest.mark.xfail(
                                        reason=t['xfail']['reason'])
                                ]
                            else:
                                markers += [pytest.mark.xfail()]
                    if markers:
                        tests += [
                            pytest.param(t, marks=markers, id=t['command'])
                        ]
                    else:
                        tests += [pytest.param(t, id=t['command'])]

    return tests
예제 #4
0
import pytest
from _pytest.mark.structures import Mark, MarkDecorator

from tests.conftest import DATADIR
from suzieq.sqobjects import get_tables, get_sqobject


@pytest.mark.schema
@pytest.mark.parametrize('table', [
    pytest.param(x, marks=MarkDecorator(Mark(x, [], {})))
    for x in get_tables()
])
@pytest.mark.parametrize('datadir', DATADIR)
@pytest.mark.parametrize('columns', [['*'], ['default']])
def test_schema_data_consistency(table, datadir, columns, get_table_data_cols):
    '''Test that all fields in dataframe and schema are consistent
        Only applies to show command for now
        It tests show columns=* is consistent with all fields in schema
        and that columns='default' matches all display fields in schema
    '''

    if table in [
            'path', 'tables', 'ospfIf', 'ospfNbr', 'topcpu', 'topmem',
            'ifCounters', 'time'
    ]:
        return

    df = get_table_data_cols

    # We have to get rid of false assertions. A bunch of data sets don't
    # have valid values for one or more tables.
예제 #5
0
from tests.conftest import (load_up_the_tests, tables, DATADIR, setup_sqcmds,
                            cli_commands, create_dummy_config_file)
from suzieq.sqobjects import get_sqobject, get_tables
from suzieq.cli.sqcmds import *  # noqa
from suzieq.version import SUZIEQ_VERSION
from suzieq.shared.utils import get_sq_install_dir

from tests.integration.utils import assert_df_equal

verbs = ['show', 'summarize', 'describe', 'help']


@pytest.mark.sqcmds
@pytest.mark.slow
@pytest.mark.parametrize("command", [
    pytest.param(cmd, marks=MarkDecorator(Mark(cmd, [], {})))
    for cmd in cli_commands
])
@pytest.mark.parametrize("verb", [
    pytest.param(verb, marks=MarkDecorator(Mark(verb, [], {})))
    for verb in verbs
])
def test_commands(setup_nubia, get_cmd_object_dict, command, verb):
    """ runs through all of the commands for each of the sqcmds
    command: one of the sqcmds
    verbs: for each command, the list of verbs
    args: arguments
    """

    _test_command(get_cmd_object_dict[command], verb, None)
예제 #6
0
 def pytest_runtest_makereport(self, item, call):
     # execute all other hooks to obtain the report object
     outcome = yield
     # Full path to called test function
     call_path = '{}::{}'.format(str(item.fspath),
                                 item.name).replace('\\', '/')
     # Get @pytest.mark.case decorator data
     tr_decorator_data = item.get_closest_marker('case')
     # Get @pytest.mark.parametrize data
     tr_param_data = item.get_closest_marker('parametrize')
     # Get skip decorator data
     tr_skip_data = item.get_closest_marker('skip')
     # Make fake decorator to report such tests also
     if not tr_decorator_data:
         tr_decorator_data = Mark(
             'case',
             [re.sub('[^0-9a-zA-Z]+', '_', 'no_case_{}'.format(call_path))],
             {})
     # Get case id
     case_data = tr_decorator_data.args[0]
     if isinstance(case_data, dict):
         if not tr_param_data:
             raise XLogPluginException(
                 '@pytest.mark.case points out in dictionary but @pytest.mark.parametrize not found'
             )
         for case_id, params in case_data.items():
             if params == list(item.funcargs.values()):
                 break
     else:
         case_id = case_data
     # Prepare teh record for test
     if call.when == 'setup':
         if not self._results.get(case_id):
             self._results[case_id] = []
         self._results[case_id].append({
             'started': time(),
             'setup_status': None,
             'call_status': None,
             'teardown_status': None,
             'call_path': call_path
         })
         idx = len(self._results[case_id]) - 1
         # Add static options passed via command line --xopt key=value
         self._results[case_id][idx].update(self._always_report)
         # Add case decorator options
         if len(tr_decorator_data.kwargs) > 0:
             for name, value in tr_decorator_data.kwargs.items():
                 self._results[case_id][idx][name] = value
         if tr_skip_data:
             if len(tr_skip_data.args) > 0:
                 self._results[case_id][idx]['skip'] = tr_skip_data.args[0]
             elif tr_skip_data.kwargs.get('reason'):
                 self._results[case_id][idx]['skip'] = tr_skip_data.kwargs[
                     'reason']
             else:
                 raise XLogPluginException(
                     'Skip message not found in args nor kwargs ')
     elif call.when == 'teardown':
         self._results[case_id][len(self._results[case_id]) -
                                1]['finished'] = time()
     # Add the result
     self._results[case_id][len(self._results[case_id]) -
                            1]['{}_status'.format(
                                call.when)] = outcome.get_result().outcome