Example #1
0
 def generate_scenarios(cls):
     cls.scenarios = testscenarios.multiply_scenarios(cls._v1,
                                                      cls._v2,
                                                      cls._topics,
                                                      cls._priority,
                                                      cls._payload,
                                                      cls._context)
class DisabledTogglesTestCase(gallery.GalleryTestCase):

    toggles_scenarios = [('checkbox disabled unchecked',
                          dict(object_name='checkbox_disabled_unchecked',
                               initial_state=False)),
                         ('checkbox disabled checked',
                          dict(object_name='checkbox_disabled_checked',
                               initial_state=True)),
                         ('switch disabled unchecked',
                          dict(object_name='switch_disabled_unchecked',
                               initial_state=False)),
                         ('switch disabled checked',
                          dict(object_name='switch_disabled_checked',
                               initial_state=True))]

    scenarios = testscenarios.multiply_scenarios(
        ubuntu_scenarios.get_device_simulation_scenarios(), toggles_scenarios)

    def setUp(self):
        super().setUp()
        self.open_page('togglesElement')

    def test_change_toggles_state(self):
        toggle = self.main_view.select_single(emulators.CheckBox,
                                              objectName=self.object_name)
        self.assertThat(toggle.enabled, Equals(False))
        self.assertThat(toggle.checked, Equals(self.initial_state))

        self.assertRaises(AssertionError, toggle.change_state, timeout=1)
Example #3
0
class SendToSentryIsNoTest(SendToSentryBaseTest):

    no_scenarios = [
        (answer, dict(answer=answer)) for answer in ["n", "N", "NO", "no", "No"]
    ]

    tty_scenarios = [("tty yes", dict(tty=True)), ("tty no", dict(tty=False))]

    scenarios = multiply_scenarios(no_scenarios, tty_scenarios)

    def test_no_send(self):
        self.prompt_mock.return_value = self.answer
        self.mock_isatty.return_value = self.tty

        try:
            self.call_handler(RuntimeError("not a SnapcraftError"), False)
        except Exception:
            self.fail("Exception unexpectedly raised")

        self.raven_client_mock.assert_not_called()

        # It we have a tty, then the trace should be saved to a file and sent to sentry.
        # If we don't have a tty, then the same should happen, but the trace should
        # also be printed.
        self.error_mock.assert_called_once_with("not a SnapcraftError")
        self.exit_mock.assert_called_once_with(1)

        if self.tty:
            self.assert_print_exception_called_only_tracefile(RuntimeError)
        else:
            self.assert_print_exception_called_both_stdout_and_tempfile(RuntimeError)
Example #4
0
class TestBwLimitQoSOvs(_TestBwLimitQoS, base.BaseFullStackTestCase):
    l2_agent_type = constants.AGENT_TYPE_OVS
    direction_scenarios = [('ingress', {
        'direction': constants.INGRESS_DIRECTION
    }), ('egress', {
        'direction': constants.EGRESS_DIRECTION
    })]
    scenarios = testscenarios.multiply_scenarios(
        direction_scenarios, fullstack_utils.get_ovs_interface_scenarios())

    @staticmethod
    def _get_expected_burst_value(limit, direction):
        # For egress bandwidth limit this value should be calculated as
        # bandwidth_limit * qos_consts.DEFAULT_BURST_RATE
        if direction == constants.EGRESS_DIRECTION:
            return TestBwLimitQoSOvs._get_expected_egress_burst_value(limit)
        else:
            return 0

    def _wait_for_bw_rule_applied(self, vm, limit, burst, direction):
        if direction == constants.EGRESS_DIRECTION:
            utils.wait_until_true(
                lambda: vm.bridge.get_egress_bw_limit_for_port(vm.port.name) ==
                (limit, burst))
        elif direction == constants.INGRESS_DIRECTION:
            utils.wait_until_true(
                lambda: vm.bridge.get_ingress_bw_limit_for_port(
                    vm.port.name) == (limit, burst))
Example #5
0
class TestControllerHandlerScenarios(MAASServerTestCase):

    scenarios_controllers = (
        ("rack", dict(make_controller=factory.make_RackController)),
        ("region", dict(make_controller=factory.make_RegionController)),
        ("region+rack",
         dict(make_controller=factory.make_RegionRackController)),
    )

    scenarios_fetch_types = (
        ("in-full", dict(for_list=False)),
        ("for-list", dict(for_list=True)),
    )

    scenarios = multiply_scenarios(scenarios_controllers,
                                   scenarios_fetch_types)

    def test_fully_dehydrated_controller_contains_essential_fields(self):
        user = factory.make_User()
        controller = self.make_controller()
        handler = ControllerHandler(user, {}, None)
        data = handler.full_dehydrate(controller, for_list=False)
        self.assertThat(
            data,
            ContainsDict({
                handler._meta.pk:
                Equals(getattr(controller, handler._meta.pk)),
                handler._meta.batch_key:
                Equals(getattr(controller, handler._meta.batch_key)),
            }))
Example #6
0
def load_tests_input_scenario_utils(*args):
    """Wrapper for testscenarios to set the scenarios

    The purpose is to avoid running a getattr on the CONF object at import.
    """

    if getattr(args[0], 'suiteClass', None) is not None:
        loader, standard_tests, pattern = args
    else:
        standard_tests, module, loader = args
    output = None
    scenario_utils = None
    try:
        scenario_utils = InputScenarioUtils()
        scenario_flavor = scenario_utils.scenario_flavors
        scenario_image = scenario_utils.scenario_images
    except (exc_lib.InvalidCredentials, TypeError):
        output = standard_tests
    finally:
        if scenario_utils:
            scenario_utils.clear_creds()
    if output is not None:
        return output
    for test in testtools.iterate_tests(standard_tests):
        setattr(test, 'scenarios', testscenarios.multiply_scenarios(
            scenario_image,
            scenario_flavor))
    return testscenarios.load_tests_apply_scenarios(*args)
Example #7
0
class TestMinBwQoSOvs(_TestMinBwQoS, base.BaseFullStackTestCase):
    l2_agent_type = constants.AGENT_TYPE_OVS
    direction_scenarios = [('egress', {
        'direction': constants.EGRESS_DIRECTION
    })]
    scenarios = testscenarios.multiply_scenarios(
        direction_scenarios, fullstack_utils.get_ovs_interface_scenarios())

    def _wait_for_min_bw_rule_applied(self, vm, min_bw, direction):
        if direction == constants.EGRESS_DIRECTION:
            utils.wait_until_true(lambda: vm.bridge.get_egress_min_bw_for_port(
                vm.neutron_port['id']) == min_bw)
        elif direction == constants.INGRESS_DIRECTION:
            self.fail('"%s" direction not implemented' %
                      constants.INGRESS_DIRECTION)

    @tests_base.unstable_test('bug 1819125')
    def test_bw_limit_qos_port_removed(self):
        """Test if rate limit config is properly removed when whole port is
        removed.
        """
        # Create port with qos policy attached
        vm, qos_policy = self._prepare_vm_with_qos_policy([
            functools.partial(self._add_min_bw_rule, MIN_BANDWIDTH,
                              self.direction)
        ])
        self._wait_for_min_bw_rule_applied(vm, MIN_BANDWIDTH, self.direction)

        # Delete port with qos policy attached
        vm.destroy()
        self._wait_for_min_bw_rule_removed(vm, self.direction)
        self.assertIsNone(vm.bridge.find_qos(vm.port.name))
        self.assertIsNone(
            vm.bridge.find_queue(vm.port.name, ovs_lib.QOS_DEFAULT_QUEUE))
Example #8
0
    def generate_scenarios(cls):
        attr = [
            ('exchange', dict(attr='exchange')),
            ('topic', dict(attr='topic')),
            ('namespace', dict(attr='namespace')),
            ('version', dict(attr='version')),
            ('server', dict(attr='server')),
            ('fanout', dict(attr='fanout')),
        ]
        a = [
            ('a_notset', dict(a_value=_notset)),
            ('a_none', dict(a_value=None)),
            ('a_empty', dict(a_value='')),
            ('a_foo', dict(a_value='foo')),
            ('a_bar', dict(a_value='bar')),
        ]
        b = [
            ('b_notset', dict(b_value=_notset)),
            ('b_none', dict(b_value=None)),
            ('b_empty', dict(b_value='')),
            ('b_foo', dict(b_value='foo')),
            ('b_bar', dict(b_value='bar')),
        ]

        cls.scenarios = testscenarios.multiply_scenarios(attr, a, b)
        for s in cls.scenarios:
            s[1]['equals'] = (s[1]['a_value'] == s[1]['b_value'])
Example #9
0
class TestOvsConnectivitySameNetwork(BaseConnectivitySameNetworkTest):

    l2_agent_type = constants.AGENT_TYPE_OVS
    network_scenarios = [('VXLAN', {
        'network_type': 'vxlan',
        'l2_pop': False
    }), ('GRE and l2pop', {
        'network_type': 'gre',
        'l2_pop': True
    }), ('VLANs', {
        'network_type': 'vlan',
        'l2_pop': False
    })]
    interface_scenarios = [('openflow-cli_ovsdb-cli', {
        'of_interface': 'ovs-ofctl',
        'ovsdb_interface': 'vsctl'
    }),
                           ('openflow-native_ovsdb-cli', {
                               'of_interface': 'native',
                               'ovsdb_interface': 'vsctl'
                           }),
                           ('openflow-cli_ovsdb-native', {
                               'of_interface': 'ovs-ofctl',
                               'ovsdb_interface': 'native'
                           }),
                           ('openflow-native_ovsdb-native', {
                               'of_interface': 'native',
                               'ovsdb_interface': 'native'
                           })]
    scenarios = testscenarios.multiply_scenarios(network_scenarios,
                                                 interface_scenarios)

    def test_connectivity(self):
        self._test_connectivity()
Example #10
0
 def generate_scenarios(cls):
     cls.scenarios = testscenarios.multiply_scenarios(cls._unit_system,
                                                      cls._sign,
                                                      cls._magnitude,
                                                      cls._unit_prefix,
                                                      cls._unit_suffix,
                                                      cls._return_int)
Example #11
0
 def generate_scenarios(cls):
     workflow_scenarios = []
     for name, wf_change in cls.workflow_scenarios:
         wf = cls.default_workflow.copy()
         wf.update(wf_change)
         workflow_scenarios.append((name, wf))
     cls.scenarios = testscenarios.multiply_scenarios(cls.sample_scenarios, workflow_scenarios)
 def generate_scenarios(cls):
     cls.scenarios = testscenarios.multiply_scenarios(cls._n_senders,
                                                      cls._context,
                                                      cls._reply,
                                                      cls._reply_fail,
                                                      cls._failure,
                                                      cls._timeout)
Example #13
0
def load_tests_input_scenario_utils(*args):
    """
    Wrapper for testscenarios to set the scenarios to avoid running a getattr
    on the CONF object at import.
    """
    if getattr(args[0], "suiteClass", None) is not None:
        loader, standard_tests, pattern = args
    else:
        standard_tests, module, loader = args
    output = None
    scenario_utils = None
    try:
        scenario_utils = InputScenarioUtils()
        scenario_flavor = scenario_utils.scenario_flavors
        scenario_image = scenario_utils.scenario_images
    except (exc_lib.InvalidCredentials, TypeError):
        output = standard_tests
    finally:
        if scenario_utils:
            scenario_utils.clear_creds()
    if output is not None:
        return output
    for test in testtools.iterate_tests(standard_tests):
        setattr(test, "scenarios", testscenarios.multiply_scenarios(scenario_image, scenario_flavor))
    return testscenarios.load_tests_apply_scenarios(*args)
Example #14
0
    def generate_scenarios(cls):
        attr = [
            ('exchange', dict(attr='exchange')),
            ('topic', dict(attr='topic')),
            ('namespace', dict(attr='namespace')),
            ('version', dict(attr='version')),
            ('server', dict(attr='server')),
            ('fanout', dict(attr='fanout')),
        ]
        a = [
            ('a_notset', dict(a_value=_notset)),
            ('a_none', dict(a_value=None)),
            ('a_empty', dict(a_value='')),
            ('a_foo', dict(a_value='foo')),
            ('a_bar', dict(a_value='bar')),
        ]
        b = [
            ('b_notset', dict(b_value=_notset)),
            ('b_none', dict(b_value=None)),
            ('b_empty', dict(b_value='')),
            ('b_foo', dict(b_value='foo')),
            ('b_bar', dict(b_value='bar')),
        ]

        cls.scenarios = testscenarios.multiply_scenarios(attr, a, b)
        for s in cls.scenarios:
            s[1]['equals'] = (s[1]['a_value'] == s[1]['b_value'])
Example #15
0
class MetadataFromSourceWithIconFileTestCase(
        CreateMetadataFromSourceBaseTestCase):

    scenarios = testscenarios.multiply_scenarios(
        (('setup/gui', dict(directory=os.path.join('setup', 'gui'))),
         ('snap/gui', dict(directory=os.path.join('snap', 'gui')))),
        (('icon.png', dict(file_name='icon.png')),
         ('icon.svg', dict(file_name='icon.svg')))
    )

    def test_metadata_doesnt_overwrite_icon_file(self):
        os.makedirs(self.directory)
        icon_content = 'setup icon'
        _create_file(
            os.path.join(self.directory, self.file_name),
            content=icon_content)

        def _fake_extractor(file_path):
            return extractors.ExtractedMetadata(
                icon='test/extracted/unexistent/icon/path')

        self.useFixture(fixture_setup.FakeMetadataExtractor(
            'fake', _fake_extractor))

        self.generate_meta_yaml(build=True)

        expected_icon = os.path.join(self.meta_dir, 'gui', self.file_name)
        self.assertThat(expected_icon, FileContains(icon_content))
 def generate_scenarios(cls):
     cls.scenarios = testscenarios.multiply_scenarios(cls._unit_system,
                                                      cls._sign,
                                                      cls._magnitude,
                                                      cls._unit_prefix,
                                                      cls._unit_suffix,
                                                      cls._return_int)
class TestUninterruptedConnectivityOnL2AgentRestart(
        BaseConnectivitySameNetworkTest):

    num_hosts = 2

    ovs_agent_scenario = [('OVS', {'l2_agent_type': constants.AGENT_TYPE_OVS})]
    lb_agent_scenario = [('LB', {
        'l2_agent_type': constants.AGENT_TYPE_LINUXBRIDGE
    })]

    network_scenarios = [
        ('Flat network', {
            'network_type': 'flat',
            'l2_pop': False
        }),
        ('VLANs', {
            'network_type': 'vlan',
            'l2_pop': False
        }),
        ('VXLAN', {
            'network_type': 'vxlan',
            'l2_pop': False
        }),
    ]
    scenarios = (
        testscenarios.multiply_scenarios(ovs_agent_scenario, network_scenarios,
                                         utils.get_ovs_interface_scenarios()) +
        testscenarios.multiply_scenarios(lb_agent_scenario, network_scenarios))

    def test_l2_agent_restart(self, agent_restart_timeout=20):
        # Environment preparation is effectively the same as connectivity test
        vms = self._prepare_vms_in_single_network()
        vms.ping_all()

        ns0 = vms[0].namespace
        ip1 = vms[1].ip
        agents = [host.l2_agent for host in self.environment.hosts]

        # Restart agents on all nodes simultaneously while pinging across
        # the hosts. The ping has to cross int and phys bridges and travels
        # via central bridge as the vms are on separate hosts.
        self._assert_ping_during_agents_restart(
            agents,
            ns0, [ip1],
            restart_timeout=agent_restart_timeout,
            ping_timeout=2,
            count=agent_restart_timeout)
Example #18
0
class SendToSentryIsAlwaysTest(SendToSentryBaseTest):

    always_scenarios = [
        (answer, dict(answer=answer))
        for answer in ["a", "A", "ALWAYS", "always", "Always"]
    ]

    tty_scenarios = [("tty yes", dict(tty=True)), ("tty no", dict(tty=False))]

    scenarios = multiply_scenarios(always_scenarios, tty_scenarios)

    def test_send_and_set_to_always(self):
        self.prompt_mock.return_value = self.answer
        self.mock_isatty.return_value = self.tty

        try:
            self.call_handler(RuntimeError("not a SnapcraftError"), True)
        except Exception:
            self.fail("Exception unexpectedly raised")

        self.raven_client_mock.assert_called_once_with(
            mock.ANY,
            transport=self.raven_request_mock,
            name="snapcraft",
            processors=mock.ANY,
            release=mock.ANY,
            auto_log_stacks=False,
        )
        config_path = os.path.join(
            xdg.BaseDirectory.save_config_path("snapcraft"), "cli.cfg"
        )
        self.assertThat(
            config_path,
            FileContains(
                dedent(
                    """\
            [Sentry]
            always_send = true

            """
                )
            ),
        )

        # It we have a tty, then the trace should be saved to a file and sent to sentry.
        # If we don't have a tty, then the same should happen, but the trace should
        # also be printed.
        self.error_mock.assert_not_called
        self.exit_mock.assert_called_once_with(1)

        expected_calls = [
            mock.call(RuntimeError, mock.ANY, mock.ANY, file=_Tracefile(self))
        ]
        if not self.tty:
            expected_calls.append(
                mock.call(RuntimeError, mock.ANY, mock.ANY, file=sys.stdout)
            )

        self.print_exception_mock.assert_has_calls(expected_calls, any_order=True)
 def generate_scenarios(cls):
     cls.scenarios = testscenarios.multiply_scenarios(cls._v1,
                                                      cls._v2,
                                                      cls._publisher_id,
                                                      cls._topics,
                                                      cls._priority,
                                                      cls._payload,
                                                      cls._context)
 def generate_scenarios(cls):
     impl = [('blocking', dict(executor=impl_blocking.BlockingExecutor,
                               stop_before_return=True))]
     if impl_eventlet is not None:
         impl.append(
             ('eventlet', dict(executor=impl_eventlet.EventletExecutor,
                               stop_before_return=False)))
     cls.scenarios = testscenarios.multiply_scenarios(impl)
Example #21
0
 def generate_scenarios(cls):
     workflow_scenarios = []
     for name, wf_change in cls.workflow_scenarios:
         wf = cls.default_workflow.copy()
         wf.update(wf_change)
         workflow_scenarios.append((name, wf))
     cls.scenarios = testscenarios.multiply_scenarios(
         cls.sample_scenarios, workflow_scenarios)
 def generate_scenarios(cls):
     cls.scenarios = testscenarios.multiply_scenarios(
         cls._image_name,
         cls._file_format,
         cls._virtual_size,
         cls._disk_size,
         cls._garbage_before_snapshot,
         cls._snapshot_count)
Example #23
0
 def generate_scenarios(cls):
     impl = [('blocking', dict(executor=impl_blocking.BlockingExecutor,
                               stop_before_return=True))]
     if impl_eventlet is not None:
         impl.append(
             ('eventlet', dict(executor=impl_eventlet.EventletExecutor,
                               stop_before_return=False)))
     cls.scenarios = testscenarios.multiply_scenarios(impl)
Example #24
0
class ExtensionCombinationTest(ProjectLoaderBaseTest):
    scenarios = multiply_scenarios(*(
        [[
            (
                "{} extension".format(extension_name),
                {
                    "extension_{}".format(extension_name): extension_name
                },
            ),
            (
                "no {} extension".format(extension_name),
                {
                    "extension_{}".format(extension_name): None
                },
            ),
        ] for extension_name in supported_extension_names()] or
        [[]]  # This guard can be removed once at least one extension is added
    ))

    def test_extensions_all_combinations_validate(self):
        common_bases = set(["core18", "core20"])

        # Determine extension list given scenarios
        extension_names = list()
        for member_pair in inspect.getmembers(self):
            name = member_pair[0]
            if name.startswith("extension_"):
                value = getattr(self, name)
                if value:
                    extension_names.append(value)
                    ext = find_extension(value)
                    common_bases &= set(ext.get_supported_bases())

        # This shouldn't have any validation issues
        if common_bases:
            base = common_bases.pop()
            self.make_snapcraft_project(
                textwrap.dedent("""\
                    name: test
                    version: "1"
                    summary: test
                    description: test
                    base: {base}
                    grade: stable
                    confinement: strict

                    apps:
                        test-app:
                            command: test-command
                            adapter: full
                            extensions: {extensions}

                    parts:
                        part1:
                            plugin: nil
                    """).format(base=base, extensions=extension_names))
        else:
            self.skipTest(f"no common base: {extension_names!r}")
 def generate_scenarios(cls):
     impl = [
         ('blocking', dict(executor=impl_blocking.BlockingExecutor)),
         ('threaded', dict(executor=impl_thread.ThreadExecutor)),
     ]
     if impl_eventlet is not None:
         impl.append(
             ('eventlet', dict(executor=impl_eventlet.EventletExecutor)))
     cls.scenarios = testscenarios.multiply_scenarios(impl)
Example #26
0
class TestOvsConnectivitySameNetworkOnOvsBridgeControllerStop(
        BaseConnectivitySameNetworkTest):

    num_hosts = 2

    l2_agent_type = constants.AGENT_TYPE_OVS
    network_scenarios = [('VXLAN', {
        'network_type': 'vxlan',
        'l2_pop': False
    }), ('GRE and l2pop', {
        'network_type': 'gre',
        'l2_pop': True
    }), ('VLANs', {
        'network_type': 'vlan',
        'l2_pop': False
    })]

    # Do not test for CLI ofctl interface as controller is irrelevant for CLI
    scenarios = testscenarios.multiply_scenarios(
        network_scenarios, [(m, v)
                            for (m, v) in utils.get_ovs_interface_scenarios()
                            if v['of_interface'] != 'ovs-ofctl'])

    def _test_controller_timeout_does_not_break_connectivity(
            self, kill_signal=None):
        # Environment preparation is effectively the same as connectivity test
        vms = self._prepare_vms_in_single_network()
        vms.ping_all()

        ns0 = vms[0].namespace
        ip1 = vms[1].ip

        LOG.debug("Stopping agents (hence also OVS bridge controllers)")
        for host in self.environment.hosts:
            if kill_signal is not None:
                host.l2_agent.stop(kill_signal=kill_signal)
            else:
                host.l2_agent.stop()

        # Ping to make sure that 3 x 5 seconds is overcame even under a high
        # load. The time was chosen to match three times inactivity_probe time,
        # which is the time after which the OVS vswitchd
        # treats the controller as dead and starts managing the bridge
        # by itself when the fail type settings is not set to secure (see
        # ovs-vsctl man page for further details)
        with net_helpers.async_ping(ns0, [ip1], timeout=2, count=25) as done:
            common_utils.wait_until_true(done,
                                         exception=RuntimeError(
                                             "Networking interrupted after "
                                             "controllers have vanished"))

    def test_controller_timeout_does_not_break_connectivity_sigterm(self):
        self._test_controller_timeout_does_not_break_connectivity()

    def test_controller_timeout_does_not_break_connectivity_sigkill(self):
        self._test_controller_timeout_does_not_break_connectivity(
            signal.SIGKILL)
Example #27
0
 def generate_scenarios(cls):
     impl = [
         ('blocking', dict(executor=impl_blocking.BlockingExecutor)),
         ('threaded', dict(executor=impl_thread.ThreadExecutor)),
     ]
     if impl_eventlet is not None:
         impl.append(
             ('eventlet', dict(executor=impl_eventlet.EventletExecutor)))
     cls.scenarios = testscenarios.multiply_scenarios(impl)
Example #28
0
class TestNotificationHandlerListeningScenarios(MAASServerTestCase):
    """Tests for `NotificationHandler` listening to database messages."""

    scenarios_users = (
        ("user", dict(make_user=factory.make_User)),
        ("admin", dict(make_user=factory.make_admin)),
    )

    scenarios_notifications = (
        (
            "to-user=%s;to-users=%s;to-admins=%s" % scenario,
            dict(zip(("to_user", "to_users", "to_admins"), scenario)),
        )
        for scenario in product(
            (False, True, "Other"),  # To specific user.
            (False, True),  # To all users.
            (False, True),  # To all admins.
        )
    )

    scenarios = multiply_scenarios(scenarios_users, scenarios_notifications)

    def test_on_listen(self):
        user = self.make_user()

        if self.to_user is False:
            to_user = None
        elif self.to_user is True:
            to_user = user
        else:
            to_user = factory.make_User()

        notification = factory.make_Notification(
            user=to_user, users=self.to_users, admins=self.to_admins
        )

        if notification.is_relevant_to(user):
            expected = MatchesAll(
                IsInstance(tuple),
                MatchesListwise(
                    (
                        Equals("notification"),
                        Equals("create"),
                        MatchesRenderedNotification(notification),
                    )
                ),
                first_only=True,
            )
        else:
            expected = Is(None)

        handler = NotificationHandler(user, {}, None)
        self.assertThat(
            handler.on_listen("notification", "create", notification.id),
            expected,
        )
Example #29
0
 def generate_scenarios(cls):
     impl = [
         ("blocking", dict(executor=impl_blocking.BlockingExecutor)),
         ("threaded", dict(executor=impl_thread.ThreadExecutor)),
     ]
     if impl_eventlet is not None:
         impl.append(("eventlet", dict(executor=impl_eventlet.EventletExecutor)))
     if impl_aioeventlet is not None:
         impl.append(("aioeventlet", dict(executor=impl_aioeventlet.AsyncioEventletExecutor)))
     cls.scenarios = testscenarios.multiply_scenarios(impl)
Example #30
0
class ParseRequirementsTestScenarios(base.BaseTestCase):

    versioned_scenarios = [
        ('non-versioned', {'versioned': False, 'expected': ['bar']}),
        ('versioned', {'versioned': True, 'expected': ['bar>=1.2.3']})
    ]

    scenarios = [
        ('normal', {'url': "foo\nbar", 'expected': ['foo', 'bar']}),
        ('normal_with_comments', {
            'url': "# this is a comment\nfoo\n# and another one\nbar",
            'expected': ['foo', 'bar']}),
        ('removes_index_lines', {'url': '-f foobar', 'expected': []}),
    ]

    scenarios = scenarios + testscenarios.multiply_scenarios([
        ('ssh_egg_url', {'url': 'git+ssh://foo.com/zipball#egg=bar'}),
        ('git_https_egg_url', {'url': 'git+https://foo.com/zipball#egg=bar'}),
        ('http_egg_url', {'url': 'https://foo.com/zipball#egg=bar'}),
    ], versioned_scenarios)

    scenarios = scenarios + testscenarios.multiply_scenarios(
        [
            ('git_egg_url',
                {'url': 'git://foo.com/zipball#egg=bar', 'name': 'bar'})
        ], [
            ('non-editable', {'editable': False}),
            ('editable', {'editable': True}),
        ],
        versioned_scenarios)

    def test_parse_requirements(self):
        tmp_file = tempfile.NamedTemporaryFile()
        req_string = self.url
        if hasattr(self, 'editable') and self.editable:
            req_string = ("-e %s" % req_string)
        if hasattr(self, 'versioned') and self.versioned:
            req_string = ("%s-1.2.3" % req_string)
        with open(tmp_file.name, 'w') as fh:
            fh.write(req_string)
        self.assertEqual(self.expected,
                         packaging.parse_requirements([tmp_file.name]))
Example #31
0
class TestUninterruptedConnectivityOnL2AgentRestartOvs(
        _TestUninterruptedConnectivityOnL2AgentRestart):

    scenario = [('OVS', {'l2_agent_type': constants.AGENT_TYPE_OVS})]

    scenarios = (testscenarios.multiply_scenarios(
        scenario,
        _TestUninterruptedConnectivityOnL2AgentRestart.network_scenarios))

    def test_l2_agent_restart(self, agent_restart_timeout=20):
        self._test_l2_agent_restart(agent_restart_timeout)
 def generate_scenarios(cls):
     cls.scenarios = testscenarios.multiply_scenarios(
         cls._image_name,
         cls._file_format,
         cls._virtual_size,
         cls._disk_size,
         cls._garbage_before_snapshot,
         cls._snapshot_count,
         cls._qcow2_cluster_size,
         cls._qcow2_encrypted,
         cls._qcow2_backing_file)
class TestConnectivitySameNetwork(base.BaseFullStackTestCase):

    network_scenarios = [('VXLAN', {
        'network_type': 'vxlan',
        'l2_pop': False
    }), ('GRE and l2pop', {
        'network_type': 'gre',
        'l2_pop': True
    }), ('VLANs', {
        'network_type': 'vlan',
        'l2_pop': False
    })]
    interface_scenarios = [('Ofctl', {
        'of_interface': 'ovs-ofctl'
    }), ('Native', {
        'of_interface': 'native'
    })]
    scenarios = testscenarios.multiply_scenarios(network_scenarios,
                                                 interface_scenarios)

    def setUp(self):
        host_descriptions = [
            # There's value in enabling L3 agents registration when l2pop
            # is enabled, because l2pop code makes assumptions about the
            # agent types present on machines.
            environment.HostDescription(l3_agent=self.l2_pop,
                                        of_interface=self.of_interface)
            for _ in range(2)
        ]
        env = environment.Environment(
            environment.EnvironmentDescription(network_type=self.network_type,
                                               l2_pop=self.l2_pop),
            host_descriptions)
        super(TestConnectivitySameNetwork, self).setUp(env)

    def test_connectivity(self):
        tenant_uuid = uuidutils.generate_uuid()

        network = self.safe_client.create_network(tenant_uuid)
        self.safe_client.create_subnet(tenant_uuid, network['id'],
                                       '20.0.0.0/24')

        vms = [
            self.useFixture(
                machine.FakeFullstackMachine(self.environment.hosts[i],
                                             network['id'], tenant_uuid,
                                             self.safe_client))
            for i in range(2)
        ]

        for vm in vms:
            vm.block_until_boot()

        vms[0].block_until_ping(vms[1].ip)
Example #34
0
class TestUninterruptedConnectivityOnL2AgentRestartLB(
        _TestUninterruptedConnectivityOnL2AgentRestart):

    scenario = [('LB', {'l2_agent_type': constants.AGENT_TYPE_LINUXBRIDGE})]

    scenarios = (testscenarios.multiply_scenarios(
        scenario,
        _TestUninterruptedConnectivityOnL2AgentRestart.network_scenarios))

    @tests_base.unstable_test("bug 1928764")
    def test_l2_agent_restart(self, agent_restart_timeout=20):
        self._test_l2_agent_restart(agent_restart_timeout)
Example #35
0
    def generate_scenarios(cls):
        cls.scenarios = testscenarios.multiply_scenarios(cls._exchanges,
                                                         cls._topics,
                                                         cls._server,
                                                         cls._fanout,
                                                         cls._method,
                                                         cls._endpoints)

        # fanout call not supported
        def filter_fanout_call(scenario):
            params = scenario[1]
            fanout = params['fanout1'] or params['fanout2']
            call = params['call1'] or params['call2']
            return not (call and fanout)

        # listening multiple times on same topic/server pair not supported
        def filter_same_topic_and_server(scenario):
            params = scenario[1]
            single_topic = params['topic1'] == params['topic2']
            single_server = params['server1'] == params['server2']
            return not (single_topic and single_server)

        # fanout to multiple servers on same topic and exchange
        # each endpoint will receive both messages
        def fanout_to_servers(scenario):
            params = scenario[1]
            fanout = params['fanout1'] or params['fanout2']
            single_exchange = params['exchange1'] == params['exchange2']
            single_topic = params['topic1'] == params['topic2']
            multi_servers = params['server1'] != params['server2']
            if fanout and single_exchange and single_topic and multi_servers:
                params['expect1'] = params['expect1'][:] + params['expect1']
                params['expect2'] = params['expect2'][:] + params['expect2']
            return scenario

        # multiple endpoints on same topic and exchange
        # either endpoint can get either message
        def single_topic_multi_endpoints(scenario):
            params = scenario[1]
            single_exchange = params['exchange1'] == params['exchange2']
            single_topic = params['topic1'] == params['topic2']
            if single_topic and single_exchange and params['multi_endpoints']:
                params['expect_either'] = (params['expect1'] +
                                           params['expect2'])
                params['expect1'] = params['expect2'] = []
            else:
                params['expect_either'] = []
            return scenario

        for f in [filter_fanout_call, filter_same_topic_and_server]:
            cls.scenarios = [i for i in cls.scenarios if f(i)]
        for m in [fanout_to_servers, single_topic_multi_endpoints]:
            cls.scenarios = [m(i) for i in cls.scenarios]
Example #36
0
    def generate_scenarios(cls):
        cls.scenarios = testscenarios.multiply_scenarios(cls._exchanges,
                                                         cls._topics,
                                                         cls._server,
                                                         cls._fanout,
                                                         cls._method,
                                                         cls._endpoints)

        # fanout call not supported
        def filter_fanout_call(scenario):
            params = scenario[1]
            fanout = params['fanout1'] or params['fanout2']
            call = params['call1'] or params['call2']
            return not (call and fanout)

        # listening multiple times on same topic/server pair not supported
        def filter_same_topic_and_server(scenario):
            params = scenario[1]
            single_topic = params['topic1'] == params['topic2']
            single_server = params['server1'] == params['server2']
            return not (single_topic and single_server)

        # fanout to multiple servers on same topic and exchange
        # each endpoint will receive both messages
        def fanout_to_servers(scenario):
            params = scenario[1]
            fanout = params['fanout1'] or params['fanout2']
            single_exchange = params['exchange1'] == params['exchange2']
            single_topic = params['topic1'] == params['topic2']
            multi_servers = params['server1'] != params['server2']
            if fanout and single_exchange and single_topic and multi_servers:
                params['expect1'] = params['expect1'][:] + params['expect1']
                params['expect2'] = params['expect2'][:] + params['expect2']
            return scenario

        # multiple endpoints on same topic and exchange
        # either endpoint can get either message
        def single_topic_multi_endpoints(scenario):
            params = scenario[1]
            single_exchange = params['exchange1'] == params['exchange2']
            single_topic = params['topic1'] == params['topic2']
            if single_topic and single_exchange and params['multi_endpoints']:
                params['expect_either'] = (params['expect1'] +
                                           params['expect2'])
                params['expect1'] = params['expect2'] = []
            else:
                params['expect_either'] = []
            return scenario

        for f in [filter_fanout_call, filter_same_topic_and_server]:
            cls.scenarios = filter(f, cls.scenarios)
        for m in [fanout_to_servers, single_topic_multi_endpoints]:
            cls.scenarios = map(m, cls.scenarios)
class WriteAndClearTextInputTestCase(GalleryTestCase):

    def text_to_write_string():
        return 'Hello World'

    def text_to_write_number():
        return locale.format('%.2f', -1001.23)

    # text_to_write is a function to ensure
    # that locale is evaluated after setUp
    text_input_scenarios = [
        ('standard textfield', dict(
            objectName='textfield_standard',
            text_to_write=text_to_write_string,
            expected_text=text_to_write_string())),
        ('password textfield', dict(
            objectName='textfield_password',
            text_to_write=text_to_write_string,
            expected_text=text_to_write_string())),
        # The text_to_write contains a decimal separator based on locale
        # eg. -1001.23 or -1001,23 or -Û±Û°Û°Û±Ù«Û²Û³
        # The test expects integers, TextField rejects that character
        ('only integers textfield', dict(
            objectName='textfield_numbers',
            text_to_write=text_to_write_number,
            expected_text='-100123'))
    ]

    scenarios = testscenarios.multiply_scenarios(
        ubuntu_scenarios.get_device_simulation_scenarios(),
        text_input_scenarios)

    def setUp(self):
        super().setUp()
        # Apply the user locale from the environment
        # The UITK does the same, so the test must be localized
        locale.setlocale(locale.LC_ALL, "")
        self.open_page('textinputsElement')

    def test_write_on_textfield_must_update_text(self):
        textfield = self.main_view.select_single(
            emulators.TextField, objectName=self.objectName)

        textfield.write(self.text_to_write())
        self.assertEqual(self.expected_text, textfield.text)

    def test_clear_textfield_must_remove_text(self):
        textfield = self.main_view.select_single(
            emulators.TextField, objectName=self.objectName)
        textfield.write(self.text_to_write())

        textfield.clear()
        self.assertEqual('', textfield.text)
Example #38
0
def merge_scenarios(*scenario_lists):
    """Multiply `scenarios` together but ignoring empty or undefined ones."""
    scenario_lists = [
        scenarios for scenarios in scenario_lists
        if scenarios is not None and len(scenarios) != 0
    ]
    if len(scenario_lists) == 0:
        return None  # Ensure that testscenarios does not expand.
    elif len(scenario_lists) == 1:
        return scenario_lists[0]  # No need to multiply up.
    else:
        return multiply_scenarios(*scenario_lists)
Example #39
0
class TestDeploy(TestParser):
    cmd = 'deploy'
    scenarios = testscenarios.multiply_scenarios(TestBuild.scenarios, [
        ('no_add', {
            'add_argv': [],
            'action_vals': {
                'dry_run': False,
                'export_dir': None
            }
        }),
        ('dry_run', {
            'add_argv': ['--dry-run'],
            'action_vals': {
                'dry_run': True,
                'export_dir': None
            }
        }),
        ('dry_run_export_dir', {
            'add_argv': ['--dry-run', '--export-dir', 'test'],
            'action_vals': {
                'dry_run': True,
                'export_dir': 'test'
            }
        }),
    ])

    add_argv = None
    components = None
    action_vals = None

    def test_parser(self):
        fixture = fixtures.MockPatch('fuel_ccp.deploy.deploy_components')
        dc_mock = self.useFixture(fixture).mock
        fixture = fixtures.MockPatch(
            'fuel_ccp.validation.service.validate_service_definitions')
        self.useFixture(fixture)
        self.useFixture(
            fixtures.MockPatch(
                'fuel_ccp.common.utils.get_deploy_components_info',
                return_value={}))
        self.useFixture(
            fixtures.MockPatch(
                'fuel_ccp.validation.service.validate_service_versions'))
        self.argv += self.add_argv
        self._run_app()
        if self.components is None:
            components = None
        else:
            components = set(self.components)
        dc_mock.assert_called_once_with({}, components)
        for k, v in self.action_vals.items():
            self.assertEqual(config.CONF.action[k], v)
Example #40
0
class MetadataFromSourceWithIconFileTestCase(CreateMetadataFromSourceBaseTestCase):

    scenarios = testscenarios.multiply_scenarios(
        (
            (
                "setup/gui",
                dict(
                    snapcraft_assets_dir="snap", directory=os.path.join("setup", "gui")
                ),
            ),
            (
                "snap/gui",
                dict(
                    snapcraft_assets_dir="snap", directory=os.path.join("snap", "gui")
                ),
            ),
            (
                "build-aux/snap/gui",
                dict(
                    snapcraft_assets_dir=os.path.join("build-aux", "snap"),
                    directory=os.path.join("build-aux", "snap", "gui"),
                ),
            ),
        ),
        (
            ("icon.png", dict(file_name="icon.png")),
            ("icon.svg", dict(file_name="icon.svg")),
        ),
    )

    def test_metadata_doesnt_overwrite_icon_file(self):
        os.makedirs(self.directory)
        icon_content = "setup icon"
        _create_file(os.path.join(self.directory, self.file_name), content=icon_content)

        def _fake_extractor(file_path, workdir):
            return extractors.ExtractedMetadata(
                icon="test/extracted/unexistent/icon/path"
            )

        self.useFixture(fixture_setup.FakeMetadataExtractor("fake", _fake_extractor))

        self.generate_meta_yaml(
            build=True,
            snapcraft_yaml_file_path=os.path.join(
                self.snapcraft_assets_dir, "snapcraft.yaml"
            ),
        )

        expected_icon = os.path.join(self.meta_dir, "gui", self.file_name)
        self.assertThat(expected_icon, FileContains(icon_content))
Example #41
0
class AppstreamTestCase(unit.TestCase):

    scenarios = testscenarios.multiply_scenarios(
        [('summary', {
            'key': 'summary',
            'attributes': {},
            'value': 'test-summary',
            'param_name': 'summary'
        }),
         ('description', {
             'key': 'description',
             'attributes': {},
             'value': 'test-description',
             'param_name': 'description',
         }),
         ('local icon', {
            'key': 'icon',
            'attributes': {'type': 'local'},
            'param_name': 'icon',
            'value': '/test/path',
         }),
         ('common id', {
             'key': 'id',
             'attributes': {},
             'param_name': 'common_id',
             'value': 'test-id'
         })],
        [('metainfo', {'file_extension': 'metainfo.xml'}),
         ('appdata', {'file_extension': 'appdata.xml'})]
    )

    def test_appstream(self):
        file_name = 'foo.{}'.format(self.file_extension)
        attributes = ' '.join(
            '{attribute_name}="{attribute_value}"'.format(
                attribute_name=attribute,
                attribute_value=self.attributes[attribute])
            for attribute in self.attributes)
        with open(file_name, 'w') as f:
            f.write(textwrap.dedent("""\
                <?xml version="1.0" encoding="UTF-8"?>
                <component>
                  <{key} {attributes}>{value}</{key}>
                </component>""".format(
                    key=self.key, value=self.value, attributes=attributes)))

        kwargs = {self.param_name: self.value}
        expected = ExtractedMetadata(**kwargs)

        self.assertThat(
            appstream.extract(file_name), Equals(expected))
Example #42
0
class TestMinBwQoSOvs(_TestMinBwQoS, base.BaseFullStackTestCase):
    l2_agent_type = constants.AGENT_TYPE_OVS
    direction_scenarios = [('egress', {
        'direction': constants.EGRESS_DIRECTION
    })]
    scenarios = testscenarios.multiply_scenarios(
        direction_scenarios, fullstack_utils.get_ovs_interface_scenarios())

    def _wait_for_min_bw_rule_applied(self, vm, min_bw, direction):
        if direction == constants.EGRESS_DIRECTION:
            utils.wait_until_true(lambda: vm.bridge.get_egress_min_bw_for_port(
                vm.neutron_port['id']) == min_bw)
        elif direction == constants.INGRESS_DIRECTION:
            self.fail('"%s" direction not implemented' %
                      constants.INGRESS_DIRECTION)

    def _find_agent_qos_and_queue(self, vm):
        # NOTE(ralonsoh): the "_min_bw_qos_id" in vm.bridge is not the same as
        # the ID in the agent br_int instance. We need first to find the QoS
        # register and the Queue assigned to vm.neutron_port['id']
        queue = vm.bridge._find_queue(vm.neutron_port['id'])
        queue_num = int(queue['external_ids']['queue-num'])
        qoses = vm.bridge._list_qos()
        for qos in qoses:
            qos_queue = qos['queues'].get(queue_num)
            if qos_queue and qos_queue.uuid == queue['_uuid']:
                return qos, qos_queue

        self.fail('QoS register not found with queue-num %s' % queue_num)

    def test_min_bw_qos_port_removed(self):
        """Test if min BW limit config is properly removed when port removed"""
        # Create port with qos policy attached
        vm, qos_policy = self._prepare_vm_with_qos_policy([
            functools.partial(self._add_min_bw_rule, MIN_BANDWIDTH,
                              self.direction)
        ])
        self._wait_for_min_bw_rule_applied(vm, MIN_BANDWIDTH, self.direction)

        qos, queue = self._find_agent_qos_and_queue(vm)
        self.assertEqual({'min-rate': str(MIN_BANDWIDTH * 1000)},
                         queue.other_config)
        queues = vm.bridge._list_queues(port=vm.neutron_port['id'])
        self.assertEqual(1, len(queues))
        self.assertEqual(queue.uuid, queues[0]['_uuid'])

        # Delete port with qos policy attached
        vm.destroy()
        self._wait_for_min_bw_rule_removed(vm, self.direction)
        self.assertEqual([],
                         vm.bridge._list_queues(port=vm.neutron_port['id']))
Example #43
0
def load_tests_input_scenario_utils(*args):
    """
    Wrapper for testscenarios to set the scenarios to avoid running a getattr
    on the CONF object at import.
    """
    if getattr(args[0], 'suiteClass', None) is not None:
        loader, standard_tests, pattern = args
    else:
        standard_tests, module, loader = args
    scenario_utils = InputScenarioUtils()
    scenario_flavor = scenario_utils.scenario_flavors
    scenario_image = scenario_utils.scenario_images
    for test in testtools.iterate_tests(standard_tests):
        setattr(test, 'scenarios', testscenarios.multiply_scenarios(
            scenario_image,
            scenario_flavor))
    return testscenarios.load_tests_apply_scenarios(*args)
 def generate_scenarios(cls):
     cls.scenarios = testscenarios.multiply_scenarios(cls._impl)
Example #45
0
 def generate_scenarios(cls):
     cls.scenarios = testscenarios.multiply_scenarios(
         cls.content_scenarios,
         cls.output_file_scenarios)
Example #46
0
 def generate_scenarios(cls):
     cls.scenarios = testscenarios.multiply_scenarios(cls._n_qpid_topology,
                                                      cls._n_msgs,
                                                      cls._n_senders,
                                                      cls._n_receivers,
                                                      cls._exchange_class)
Example #47
0
                    lambda x: x.as_text(),
                    traceback_matcher,
                )
            })
        ]),
        Equals(('stopTest', case)),
    ])


"""
A list that can be used with testscenarios to test every deterministic sample
case that we have.
"""
deterministic_sample_cases_scenarios = multiply_scenarios(
    _make_behavior_scenarios('set_up'),
    _make_behavior_scenarios('body'),
    _make_behavior_scenarios('tear_down'),
    _make_behavior_scenarios('cleanup'),
) + [
    ('tear_down_fails_after_upcall', {
        'post_tear_down_behavior': _error,
    }),
]


"""
A list that can be used with testscenarios to test every non-deterministic
sample case that we have.
"""
nondeterministic_sample_cases_scenarios = [
    ('setup-fails-global-state', _SetUpFailsOnGlobalState.make_scenario()),
]
Example #48
0
 def generate_scenarios(cls):
     cls.scenarios = testscenarios.multiply_scenarios(
         cls.scenarios,
         cls.storage_scenarios)
 def generate_scenarios(cls):
     cls.scenarios = (
         testscenarios.multiply_scenarios(cls._call_vs_cast,
                                          cls._cap_scenarios))
 def generate_scenarios(cls):
     cls.scenarios = testscenarios.multiply_scenarios(cls._base,
                                                      cls._prepare)
 def generate_scenarios(cls):
     cls.scenarios = testscenarios.multiply_scenarios(cls.sample_scenarios,
                                                      cls.worflow_scenarios)
scenario_listener_T = ("listener_T", {"listener_flag": True})
scenario_listener_F = ("listener_F", {"listener_flag": False})

scenario_pool_T = ("pool_T", {"pool_flag": True})
scenario_pool_F = ("pool_F", {"pool_flag": False})

scenario_healthmonitor_T = ("healthmonitor_T", {"healthmonitor_flag": True})
scenario_healthmonitor_F = ("healthmonitor_F", {"healthmonitor_flag": False})

scenario_healthmonitor_to_flag_T = ("healthmonitor_to_flag_T", {"healthmonitor_to_flag": True})
scenario_healthmonitor_to_flag_F = ("healthmonitor_to_flag_F", {"healthmonitor_to_flag": False})

# The following command creates 16 unique scenarios
scenario_create_health_monitor = testscenarios.multiply_scenarios(
    [scenario_lb_T, scenario_lb_F],
    [scenario_listener_T, scenario_listener_F],
    [scenario_pool_T, scenario_pool_F],
    [scenario_healthmonitor_T, scenario_healthmonitor_F],
)

# The following command creates 32 unique scenarios
scenario_update_health_monitor = testscenarios.multiply_scenarios(
    [scenario_healthmonitor_to_flag_T, scenario_healthmonitor_to_flag_F], scenario_create_health_monitor
)


class BaseHealthMonitorAdminStateTest(base_ddt.AdminStateTests):
    @classmethod
    def resource_setup(cls):
        super(BaseHealthMonitorAdminStateTest, cls).resource_setup()

    @classmethod
Example #53
0
 def generate_scenarios(cls):
     cls.scenarios = testscenarios.multiply_scenarios(cls._max_size,
                                                      cls._create_error)
scenario_listener_F = ('listener_F', {'listener_flag': False})

scenario_pool_T = ('pool_T', {'pool_flag': True})
scenario_pool_F = ('pool_F', {'pool_flag': False})

scenario_member_T = ('member_T', {'member_flag': True})
scenario_member_F = ('member_F', {'member_flag': False})


scenario_mem_to_flag_T = ('member_to_flag_T', {'member_to_flag': True})
scenario_mem_to_flag_F = ('member_to_flag_F', {'member_to_flag': False})

# The following command creates 16 unique scenarios
scenario_create_member = testscenarios.multiply_scenarios(
        [scenario_lb_T, scenario_lb_F],
        [scenario_listener_T, scenario_listener_F],
        [scenario_pool_T, scenario_pool_F],
        [scenario_member_T, scenario_member_F])

# The following command creates 32 unique scenarios
scenario_update_member = testscenarios.multiply_scenarios(
    [scenario_mem_to_flag_T, scenario_mem_to_flag_F],
    scenario_create_member)


class CreateMemberAdminStateTests(base_ddt.AdminStateTests):

    scenarios = scenario_create_member

    @classmethod
    def resource_setup(cls):
 def generate_scenarios(cls):
     cls.scenarios = testscenarios.multiply_scenarios(cls._log_failure,
                                                      cls._add_remote,
                                                      cls._exception_types)
Example #56
0
 def generate_scenarios(cls):
     cls.scenarios = testscenarios.multiply_scenarios(cls._priority,
                                                      cls._driver)
 def generate_scenarios(cls):
     cls.scenarios = testscenarios.multiply_scenarios(cls._msg,
                                                      cls._context,
                                                      cls._target)
Example #58
0
 def generate_scenarios(cls):
     cls.scenarios = testscenarios.multiply_scenarios(cls._n_qpid_topology,
                                                      cls._n_msgs)
 def generate_scenarios(cls):
     cls.scenarios = testscenarios.multiply_scenarios(
         cls.content_scenarios)
Example #60
0
 def generate_scenarios(cls):
     cls.scenarios = testscenarios.multiply_scenarios(
         cls.scenarios,
         cls.resource_scenarios)