コード例 #1
0
    def resource_setup(cls):
        """Setup resources needed by the tests."""
        super(IPv6TrafficOperationsScenarioTest, cls).resource_setup()

        lb_name = data_utils.rand_name("lb_member_lb1_ipv6_ops")
        lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
                     const.NAME: lb_name}

        ip_version = 6
        cls._setup_lb_network_kwargs(lb_kwargs, ip_version)

        lb = cls.mem_lb_client.create_loadbalancer(**lb_kwargs)
        cls.lb_id = lb[const.ID]
        cls.addClassResourceCleanup(
            cls.mem_lb_client.cleanup_loadbalancer,
            cls.lb_id)

        cls.lb_vip_address = lb[const.VIP_ADDRESS]

        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                cls.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.lb_build_interval,
                                CONF.load_balancer.lb_build_timeout)

        listener_name = data_utils.rand_name("lb_member_listener1_ipv6_ops")
        listener_kwargs = {
            const.NAME: listener_name,
            const.PROTOCOL: const.HTTP,
            const.PROTOCOL_PORT: '80',
            const.LOADBALANCER_ID: cls.lb_id,
        }
        listener = cls.mem_listener_client.create_listener(**listener_kwargs)
        cls.listener_id = listener[const.ID]
        cls.addClassResourceCleanup(
            cls.mem_listener_client.cleanup_listener,
            cls.listener_id,
            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)

        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                cls.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)

        pool_name = data_utils.rand_name("lb_member_pool1_ipv6_ops")
        pool_kwargs = {
            const.NAME: pool_name,
            const.PROTOCOL: const.HTTP,
            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
            const.LISTENER_ID: cls.listener_id,
        }
        pool = cls.mem_pool_client.create_pool(**pool_kwargs)
        cls.pool_id = pool[const.ID]
        cls.addClassResourceCleanup(
            cls.mem_pool_client.cleanup_pool,
            cls.pool_id,
            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)

        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                cls.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
コード例 #2
0
    def test_amphora_list_and_show(self):
        """Tests amphora show API.

        * Show amphora details.
        * Validate the show reflects the requested values.
        * Validates that other accounts cannot see the amphora.
        """
        lb_name = data_utils.rand_name("lb_member_lb2_amphora-list")
        lb = self.mem_lb_client.create_loadbalancer(
            name=lb_name,
            provider=CONF.load_balancer.provider,
            vip_network_id=self.lb_member_vip_net[const.ID])
        lb_id = lb[const.ID]
        self.addCleanup(self.mem_lb_client.cleanup_loadbalancer, lb_id)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer, lb_id,
                                const.PROVISIONING_STATUS, const.ACTIVE,
                                CONF.load_balancer.lb_build_interval,
                                CONF.load_balancer.lb_build_timeout)

        # Test that a user, without the load balancer member role, cannot
        # list amphorae
        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
            self.assertRaises(exceptions.Forbidden,
                              self.os_primary.amphora_client.list_amphorae)

        # Get an actual list of the amphorae
        amphorae = self.lb_admin_amphora_client.list_amphorae()

        # There should be AT LEAST 2, there may be more depending on the
        # configured topology
        self.assertGreaterEqual(len(amphorae),
                                2 * self._expected_amp_count(amphorae))

        # Test filtering by loadbalancer_id
        amphorae = self.lb_admin_amphora_client.list_amphorae(
            query_params='{loadbalancer_id}={lb_id}'.format(
                loadbalancer_id=const.LOADBALANCER_ID, lb_id=self.lb_id))
        self.assertEqual(self._expected_amp_count(amphorae), len(amphorae))
        self.assertEqual(self.lb_id, amphorae[0][const.LOADBALANCER_ID])

        # Test that a different user, with load balancer member role, cannot
        # see this amphora
        if not CONF.load_balancer.RBAC_test_type == const.NONE:
            member2_client = self.os_roles_lb_member2.amphora_client
            self.assertRaises(exceptions.Forbidden,
                              member2_client.show_amphora,
                              amphora_id=amphorae[0][const.ID])

        show_amphora_response_fields = const.SHOW_AMPHORA_RESPONSE_FIELDS
        if self.lb_admin_amphora_client.is_version_supported(
                self.api_version, '2.1'):
            show_amphora_response_fields.append('created_at')
            show_amphora_response_fields.append('updated_at')
            show_amphora_response_fields.append('image_id')

        for amp in amphorae:

            # Make sure all of the fields exist on the amp list records
            for field in show_amphora_response_fields:
                self.assertIn(field, amp)

            # Verify a few of the fields are the right type
            if self.lb_admin_amphora_client.is_version_supported(
                    self.api_version, '2.1'):
                parser.parse(amp[const.CREATED_AT])
                parser.parse(amp[const.UPDATED_AT])

            UUID(amp[const.ID])
            UUID(amp[const.HA_PORT_ID])
            UUID(amp[const.LOADBALANCER_ID])
            UUID(amp[const.COMPUTE_ID])
            UUID(amp[const.VRRP_PORT_ID])
            self.assertEqual(amp[const.STATUS], const.STATUS_ALLOCATED)
            self.assertIn(amp[const.ROLE], const.AMPHORA_ROLES)

            # Test that all of the fields from the amp list match those
            # from a show for the LB we created.
            amp_obj = self.lb_admin_amphora_client.show_amphora(
                amphora_id=amp[const.ID])
            for field in show_amphora_response_fields:
                self.assertEqual(amp[field], amp_obj[field])
コード例 #3
0
    def test_healthmonitor_show(self):
        """Tests healthmonitor show API.

        * Create a clean pool to use for the healthmonitor.
        * Create a fully populated healthmonitor.
        * Show healthmonitor details.
        * Validate the show reflects the requested values.
        * Validates that other accounts cannot see the healthmonitor.
        """
        pool_name = data_utils.rand_name("lb_member_pool1_hm-show")
        pool_kwargs = {
            const.NAME: pool_name,
            const.PROTOCOL: const.HTTP,
            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
            const.LOADBALANCER_ID: self.lb_id,
        }

        pool = self.mem_pool_client.create_pool(**pool_kwargs)
        self.addCleanup(self.mem_pool_client.cleanup_pool,
                        pool[const.ID],
                        lb_client=self.mem_lb_client,
                        lb_id=self.lb_id)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)

        hm_name = data_utils.rand_name("lb_member_hm1-show")
        hm_kwargs = {
            const.POOL_ID: pool[const.ID],
            const.NAME: hm_name,
            const.TYPE: const.HEALTH_MONITOR_HTTP,
            const.DELAY: 2,
            const.TIMEOUT: 3,
            const.MAX_RETRIES: 4,
            const.MAX_RETRIES_DOWN: 5,
            const.HTTP_METHOD: const.GET,
            const.URL_PATH: '/',
            const.EXPECTED_CODES: '200-204',
            const.ADMIN_STATE_UP: True,
        }

        hm = self.mem_healthmonitor_client.create_healthmonitor(**hm_kwargs)
        self.addCleanup(self.mem_healthmonitor_client.cleanup_healthmonitor,
                        hm[const.ID],
                        lb_client=self.mem_lb_client,
                        lb_id=self.lb_id)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
        hm = waiters.wait_for_status(
            self.mem_healthmonitor_client.show_healthmonitor, hm[const.ID],
            const.PROVISIONING_STATUS, const.ACTIVE,
            CONF.load_balancer.build_interval,
            CONF.load_balancer.build_timeout)

        parser.parse(hm[const.CREATED_AT])
        parser.parse(hm[const.UPDATED_AT])
        UUID(hm[const.ID])

        # Healthmonitors are always ONLINE
        self.assertEqual(const.ONLINE, hm[const.OPERATING_STATUS])

        equal_items = [
            const.NAME, const.TYPE, const.DELAY, const.TIMEOUT,
            const.MAX_RETRIES, const.MAX_RETRIES_DOWN, const.HTTP_METHOD,
            const.URL_PATH, const.EXPECTED_CODES, const.ADMIN_STATE_UP
        ]

        for item in equal_items:
            self.assertEqual(hm_kwargs[item], hm[item])

        # Test that a user with lb_admin role can see the healthmonitor
        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
            healthmonitor_client = self.os_roles_lb_admin.healthmonitor_client
            hm_adm = healthmonitor_client.show_healthmonitor(hm[const.ID])
            self.assertEqual(hm_name, hm_adm[const.NAME])

        # Test that a user with cloud admin role can see the healthmonitor
        if not CONF.load_balancer.RBAC_test_type == const.NONE:
            adm = self.os_admin.healthmonitor_client.show_healthmonitor(
                hm[const.ID])
            self.assertEqual(hm_name, adm[const.NAME])

        # Test that a different user, with loadbalancer member role, cannot
        # see this healthmonitor
        if not CONF.load_balancer.RBAC_test_type == const.NONE:
            member2_client = self.os_roles_lb_member2.healthmonitor_client
            self.assertRaises(exceptions.Forbidden,
                              member2_client.show_healthmonitor, hm[const.ID])

        # Test that a user, without the loadbalancer member role, cannot
        # show healthmonitors
        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
            self.assertRaises(
                exceptions.Forbidden,
                self.os_primary.healthmonitor_client.show_healthmonitor,
                hm[const.ID])
コード例 #4
0
    def test_l7policy_show(self):
        """Tests l7policy show API.

        * Create a fully populated l7policy.
        * Show l7policy details.
        * Validate the show reflects the requested values.
        * Validates that other accounts cannot see the l7policy.
        """
        listener_name = data_utils.rand_name(
            "lb_member_listener4_l7policy-show")
        listener_kwargs = {
            const.NAME: listener_name,
            const.PROTOCOL: const.HTTP,
            const.PROTOCOL_PORT: '81',
            const.LOADBALANCER_ID: self.lb_id,
        }
        listener = self.mem_listener_client.create_listener(**listener_kwargs)
        listener_id = listener[const.ID]
        self.addCleanup(self.mem_listener_client.cleanup_listener,
                        listener_id,
                        lb_client=self.mem_lb_client,
                        lb_id=self.lb_id)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
        l7policy_name = data_utils.rand_name("lb_member_l7policy1-show")
        l7policy_description = data_utils.arbitrary_string(size=255)
        l7policy_kwargs = {
            const.LISTENER_ID: listener_id,
            const.NAME: l7policy_name,
            const.DESCRIPTION: l7policy_description,
            const.ADMIN_STATE_UP: True,
            const.POSITION: 1,
            const.ACTION: const.REJECT,
        }

        l7policy = self.mem_l7policy_client.create_l7policy(**l7policy_kwargs)
        self.addClassResourceCleanup(self.mem_l7policy_client.cleanup_l7policy,
                                     l7policy[const.ID],
                                     lb_client=self.mem_lb_client,
                                     lb_id=self.lb_id)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
        l7policy = waiters.wait_for_status(
            self.mem_l7policy_client.show_l7policy, l7policy[const.ID],
            const.PROVISIONING_STATUS, const.ACTIVE,
            CONF.load_balancer.build_interval,
            CONF.load_balancer.build_timeout)
        if not CONF.load_balancer.test_with_noop:
            l7policy = waiters.wait_for_status(
                self.mem_l7policy_client.show_l7policy, l7policy[const.ID],
                const.OPERATING_STATUS, const.ONLINE,
                CONF.load_balancer.build_interval,
                CONF.load_balancer.build_timeout)

        self.assertEqual(l7policy_name, l7policy[const.NAME])
        self.assertEqual(l7policy_description, l7policy[const.DESCRIPTION])
        self.assertTrue(l7policy[const.ADMIN_STATE_UP])
        parser.parse(l7policy[const.CREATED_AT])
        parser.parse(l7policy[const.UPDATED_AT])
        UUID(l7policy[const.ID])
        # Operating status for a l7policy will be ONLINE if it is enabled:
        if l7policy[const.ADMIN_STATE_UP]:
            self.assertEqual(const.ONLINE, l7policy[const.OPERATING_STATUS])
        else:
            self.assertEqual(const.OFFLINE, l7policy[const.OPERATING_STATUS])
        self.assertEqual(listener_id, l7policy[const.LISTENER_ID])
        self.assertEqual(1, l7policy[const.POSITION])
        self.assertEqual(const.REJECT, l7policy[const.ACTION])
        self.assertIsNone(l7policy.pop(const.REDIRECT_URL, None))
        self.assertIsNone(l7policy.pop(const.REDIRECT_POOL_ID, None))

        # Test that a user with lb_admin role can see the l7policy
        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
            l7policy_client = self.os_roles_lb_admin.l7policy_client
            l7policy_adm = l7policy_client.show_l7policy(l7policy[const.ID])
            self.assertEqual(l7policy_name, l7policy_adm[const.NAME])

        # Test that a user with cloud admin role can see the l7policy
        if not CONF.load_balancer.RBAC_test_type == const.NONE:
            adm = self.os_admin.l7policy_client.show_l7policy(
                l7policy[const.ID])
            self.assertEqual(l7policy_name, adm[const.NAME])

        # Test that a different user, with load balancer member role, cannot
        # see this l7policy
        if not CONF.load_balancer.RBAC_test_type == const.NONE:
            member2_client = self.os_roles_lb_member2.l7policy_client
            self.assertRaises(exceptions.Forbidden,
                              member2_client.show_l7policy, l7policy[const.ID])

        # Test that a user, without the load balancer member role, cannot
        # show l7policies
        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
            self.assertRaises(exceptions.Forbidden,
                              self.os_primary.l7policy_client.show_l7policy,
                              l7policy[const.ID])
コード例 #5
0
    def test_healthmonitor_CRUD(self):
        """Tests healthmonitor create, read, update, delete, and member status

        * Create a fully populated healthmonitor.
        * Show healthmonitor details.
        * Update the healthmonitor.
        * Delete the healthmonitor.
        """
        # Healthmonitor create
        hm_name = data_utils.rand_name("lb_member_hm1-CRUD")
        hm_kwargs = {
            const.POOL_ID: self.pool_id,
            const.NAME: hm_name,
            const.TYPE: const.HEALTH_MONITOR_HTTP,
            const.DELAY: 2,
            const.TIMEOUT: 2,
            const.MAX_RETRIES: 2,
            const.MAX_RETRIES_DOWN: 2,
            const.HTTP_METHOD: const.GET,
            const.URL_PATH: '/',
            const.EXPECTED_CODES: '200',
            const.ADMIN_STATE_UP: True,
        }

        hm = self.mem_healthmonitor_client.create_healthmonitor(**hm_kwargs)
        self.addCleanup(self.mem_healthmonitor_client.cleanup_healthmonitor,
                        hm[const.ID],
                        lb_client=self.mem_lb_client,
                        lb_id=self.lb_id)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
        hm = waiters.wait_for_status(
            self.mem_healthmonitor_client.show_healthmonitor, hm[const.ID],
            const.PROVISIONING_STATUS, const.ACTIVE,
            CONF.load_balancer.build_interval,
            CONF.load_balancer.build_timeout)

        parser.parse(hm[const.CREATED_AT])
        parser.parse(hm[const.UPDATED_AT])
        UUID(hm[const.ID])
        self.assertEqual(const.ONLINE, hm[const.OPERATING_STATUS])

        equal_items = [
            const.NAME, const.TYPE, const.DELAY, const.TIMEOUT,
            const.MAX_RETRIES, const.MAX_RETRIES_DOWN, const.HTTP_METHOD,
            const.URL_PATH, const.EXPECTED_CODES, const.ADMIN_STATE_UP
        ]

        for item in equal_items:
            self.assertEqual(hm_kwargs[item], hm[item])

        # Healthmonitor update
        new_name = data_utils.rand_name("lb_member_hm1-update")
        hm_update_kwargs = {
            const.NAME: new_name,
            const.DELAY: hm_kwargs[const.DELAY] + 1,
            const.TIMEOUT: hm_kwargs[const.TIMEOUT] + 1,
            const.MAX_RETRIES: hm_kwargs[const.MAX_RETRIES] + 1,
            const.MAX_RETRIES_DOWN: hm_kwargs[const.MAX_RETRIES_DOWN] + 1,
            const.HTTP_METHOD: const.POST,
            const.URL_PATH: '/test',
            const.EXPECTED_CODES: '201,202',
            const.ADMIN_STATE_UP: not hm_kwargs[const.ADMIN_STATE_UP],
        }
        hm = self.mem_healthmonitor_client.update_healthmonitor(
            hm[const.ID], **hm_update_kwargs)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
        hm = waiters.wait_for_status(
            self.mem_healthmonitor_client.show_healthmonitor, hm[const.ID],
            const.PROVISIONING_STATUS, const.ACTIVE,
            CONF.load_balancer.build_interval,
            CONF.load_balancer.build_timeout)

        # Test changed items
        equal_items = [
            const.NAME, const.DELAY, const.TIMEOUT, const.MAX_RETRIES,
            const.MAX_RETRIES_DOWN, const.HTTP_METHOD, const.URL_PATH,
            const.EXPECTED_CODES, const.ADMIN_STATE_UP
        ]

        for item in equal_items:
            self.assertEqual(hm_update_kwargs[item], hm[item])

        # Test unchanged items
        equal_items = [const.TYPE]
        for item in equal_items:
            self.assertEqual(hm_kwargs[item], hm[item])

        # Healthmonitor delete
        self.mem_healthmonitor_client.delete_healthmonitor(hm[const.ID])

        waiters.wait_for_deleted_status_or_not_found(
            self.mem_healthmonitor_client.show_healthmonitor, hm[const.ID],
            const.PROVISIONING_STATUS, CONF.load_balancer.check_interval,
            CONF.load_balancer.check_timeout)
コード例 #6
0
    def test_l7policies_and_l7rules(self):
        """Tests sending traffic through a loadbalancer with l7rules

        * Create an extra pool.
        * Put one member on the default pool, and one on the second pool.
        * Create a policy/rule to redirect to the second pool.
        * Create a policy/rule to redirect to the identity URI.
        * Create a policy/rule to reject connections.
        * Test traffic to ensure it goes to the correct place.
        """
        # Create a second pool
        pool_name = data_utils.rand_name("lb_member_pool2_l7redirect")
        pool_kwargs = {
            const.NAME: pool_name,
            const.PROTOCOL: const.HTTP,
            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
            const.LOADBALANCER_ID: self.lb_id,
        }
        pool = self.mem_pool_client.create_pool(**pool_kwargs)
        pool_id = pool[const.ID]
        self.addCleanup(self.mem_pool_client.cleanup_pool,
                        pool_id,
                        lb_client=self.mem_lb_client,
                        lb_id=self.lb_id)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)

        # Set up Member 1 for Webserver 1 on the default pool
        member1_name = data_utils.rand_name("lb_member_member1-l7redirect")
        member1_kwargs = {
            const.POOL_ID: self.pool_id,
            const.NAME: member1_name,
            const.ADMIN_STATE_UP: True,
            const.ADDRESS: self.webserver1_ip,
            const.PROTOCOL_PORT: 80,
        }
        if self.lb_member_1_subnet:
            member1_kwargs[const.SUBNET_ID] = self.lb_member_1_subnet[const.ID]

        member1 = self.mem_member_client.create_member(**member1_kwargs)
        self.addCleanup(self.mem_member_client.cleanup_member,
                        member1[const.ID],
                        pool_id=self.pool_id,
                        lb_client=self.mem_lb_client,
                        lb_id=self.lb_id)
        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.check_interval,
                                CONF.load_balancer.check_timeout)

        # Set up Member 2 for Webserver 2 on the alternate pool
        member2_name = data_utils.rand_name("lb_member_member2-l7redirect")
        member2_kwargs = {
            const.POOL_ID: pool_id,
            const.NAME: member2_name,
            const.ADMIN_STATE_UP: True,
            const.ADDRESS: self.webserver2_ip,
            const.PROTOCOL_PORT: 80,
        }
        if self.lb_member_2_subnet:
            member2_kwargs[const.SUBNET_ID] = self.lb_member_2_subnet[const.ID]

        member2 = self.mem_member_client.create_member(**member2_kwargs)
        self.addCleanup(self.mem_member_client.cleanup_member,
                        member2[const.ID],
                        pool_id=self.pool_id,
                        lb_client=self.mem_lb_client,
                        lb_id=self.lb_id)
        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.check_interval,
                                CONF.load_balancer.check_timeout)

        # Create the l7policy to redirect to the alternate pool
        l7policy1_name = data_utils.rand_name("lb_member_l7policy1-l7redirect")
        l7policy1_description = data_utils.arbitrary_string(size=255)
        l7policy1_kwargs = {
            const.LISTENER_ID: self.listener_id,
            const.NAME: l7policy1_name,
            const.DESCRIPTION: l7policy1_description,
            const.ADMIN_STATE_UP: True,
            const.POSITION: 1,
            const.ACTION: const.REDIRECT_TO_POOL,
            const.REDIRECT_POOL_ID: pool_id,
        }
        l7policy1 = self.mem_l7policy_client.create_l7policy(
            **l7policy1_kwargs)
        self.addCleanup(self.mem_l7policy_client.cleanup_l7policy,
                        l7policy1[const.ID],
                        lb_client=self.mem_lb_client,
                        lb_id=self.lb_id)
        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)

        # Redirect slow queries to the alternate pool
        l7rule1_kwargs = {
            const.L7POLICY_ID: l7policy1[const.ID],
            const.ADMIN_STATE_UP: True,
            const.TYPE: const.PATH,
            const.VALUE: '/slow',
            const.COMPARE_TYPE: const.STARTS_WITH,
            const.INVERT: False,
        }

        l7rule1 = self.mem_l7rule_client.create_l7rule(**l7rule1_kwargs)
        self.addCleanup(self.mem_l7rule_client.cleanup_l7rule,
                        l7rule1[const.ID],
                        l7policy_id=l7rule1_kwargs[const.L7POLICY_ID],
                        lb_client=self.mem_lb_client,
                        lb_id=self.lb_id)
        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)

        # Create the l7policy to redirect to the identity URI
        l7policy2_name = data_utils.rand_name("lb_member_l7policy2-l7redirect")
        l7policy2_description = data_utils.arbitrary_string(size=255)
        l7policy2_kwargs = {
            const.LISTENER_ID: self.listener_id,
            const.NAME: l7policy2_name,
            const.DESCRIPTION: l7policy2_description,
            const.ADMIN_STATE_UP: True,
            const.POSITION: 1,
            const.ACTION: const.REDIRECT_TO_URL,
            const.REDIRECT_URL: CONF.identity.uri_v3,
        }
        l7policy2 = self.mem_l7policy_client.create_l7policy(
            **l7policy2_kwargs)
        self.addCleanup(self.mem_l7policy_client.cleanup_l7policy,
                        l7policy2[const.ID],
                        lb_client=self.mem_lb_client,
                        lb_id=self.lb_id)
        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)

        # Redirect queries for 'turtles' to identity
        l7rule2_kwargs = {
            const.L7POLICY_ID: l7policy2[const.ID],
            const.ADMIN_STATE_UP: True,
            const.TYPE: const.PATH,
            const.VALUE: '/turtles',
            const.COMPARE_TYPE: const.EQUAL_TO,
            const.INVERT: False,
        }

        l7rule2 = self.mem_l7rule_client.create_l7rule(**l7rule2_kwargs)
        self.addCleanup(self.mem_l7rule_client.cleanup_l7rule,
                        l7rule2[const.ID],
                        l7policy_id=l7rule2_kwargs[const.L7POLICY_ID],
                        lb_client=self.mem_lb_client,
                        lb_id=self.lb_id)
        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)

        # Create the l7policy to reject requests
        l7policy3_name = data_utils.rand_name("lb_member_l7policy3-l7redirect")
        l7policy3_description = data_utils.arbitrary_string(size=255)
        l7policy3_kwargs = {
            const.LISTENER_ID: self.listener_id,
            const.NAME: l7policy3_name,
            const.DESCRIPTION: l7policy3_description,
            const.ADMIN_STATE_UP: True,
            const.POSITION: 1,
            const.ACTION: const.REJECT,
        }
        l7policy3 = self.mem_l7policy_client.create_l7policy(
            **l7policy3_kwargs)
        self.addCleanup(self.mem_l7policy_client.cleanup_l7policy,
                        l7policy3[const.ID],
                        lb_client=self.mem_lb_client,
                        lb_id=self.lb_id)
        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)

        # Reject requests that include the header data 'reject=true'
        l7rule3_kwargs = {
            const.L7POLICY_ID: l7policy3[const.ID],
            const.ADMIN_STATE_UP: True,
            const.TYPE: const.HEADER,
            const.KEY: 'reject',
            const.VALUE: 'true',
            const.COMPARE_TYPE: const.EQUAL_TO,
            const.INVERT: False,
        }

        l7rule3 = self.mem_l7rule_client.create_l7rule(**l7rule3_kwargs)
        self.addCleanup(self.mem_l7rule_client.cleanup_l7rule,
                        l7rule3[const.ID],
                        l7policy_id=l7rule3_kwargs[const.L7POLICY_ID],
                        lb_client=self.mem_lb_client,
                        lb_id=self.lb_id)
        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)

        # Assert that normal traffic goes to pool1->member1
        url_for_member1 = 'http://{}/'.format(self.lb_vip_address)
        self.assertConsistentResponse((200, self.webserver1_response),
                                      url_for_member1)

        # Assert that slow traffic goes to pool2->member2
        url_for_member2 = 'http://{}/slow?delay=1s'.format(self.lb_vip_address)
        self.assertConsistentResponse((200, self.webserver2_response),
                                      url_for_member2)

        # Assert that /turtles is redirected to identity
        url_for_identity = 'http://{}/turtles'.format(self.lb_vip_address)
        self.assertConsistentResponse((302, CONF.identity.uri_v3),
                                      url_for_identity,
                                      redirect=True)

        # Assert that traffic with header 'reject=true' is rejected
        self.assertConsistentResponse((403, None),
                                      url_for_member1,
                                      headers={'reject': 'true'})
コード例 #7
0
    def _test_l7policy_create(self, url=None, pool_id=None):
        """Tests l7policy create and basic show APIs.

        * Tests that users without the loadbalancer member role cannot
          create l7policies.
        * Create a fully populated l7policy.
        * Show l7policy details.
        * Validate the show reflects the requested values.
        """
        l7policy_name = data_utils.rand_name("lb_member_l7policy1-create")
        l7policy_description = data_utils.arbitrary_string(size=255)
        l7policy_kwargs = {
            const.LISTENER_ID: self.listener_id,
            const.NAME: l7policy_name,
            const.DESCRIPTION: l7policy_description,
            const.ADMIN_STATE_UP: True,
            const.POSITION: 1,
        }
        if url:
            l7policy_kwargs[const.ACTION] = const.REDIRECT_TO_URL
            l7policy_kwargs[const.REDIRECT_URL] = url
        elif pool_id:
            l7policy_kwargs[const.ACTION] = const.REDIRECT_TO_POOL
            l7policy_kwargs[const.REDIRECT_POOL_ID] = pool_id
        else:
            l7policy_kwargs[const.ACTION] = const.REJECT

        # Test that a user without the load balancer role cannot
        # create a l7policy
        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
            self.assertRaises(exceptions.Forbidden,
                              self.os_primary.l7policy_client.create_l7policy,
                              **l7policy_kwargs)

        l7policy = self.mem_l7policy_client.create_l7policy(**l7policy_kwargs)
        self.addClassResourceCleanup(self.mem_l7policy_client.cleanup_l7policy,
                                     l7policy[const.ID],
                                     lb_client=self.mem_lb_client,
                                     lb_id=self.lb_id)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
        l7policy = waiters.wait_for_status(
            self.mem_l7policy_client.show_l7policy, l7policy[const.ID],
            const.PROVISIONING_STATUS, const.ACTIVE,
            CONF.load_balancer.build_interval,
            CONF.load_balancer.build_timeout)
        if not CONF.load_balancer.test_with_noop:
            l7policy = waiters.wait_for_status(
                self.mem_l7policy_client.show_l7policy, l7policy[const.ID],
                const.OPERATING_STATUS, const.ONLINE,
                CONF.load_balancer.build_interval,
                CONF.load_balancer.build_timeout)

        self.assertEqual(l7policy_name, l7policy[const.NAME])
        self.assertEqual(l7policy_description, l7policy[const.DESCRIPTION])
        self.assertTrue(l7policy[const.ADMIN_STATE_UP])
        parser.parse(l7policy[const.CREATED_AT])
        parser.parse(l7policy[const.UPDATED_AT])
        UUID(l7policy[const.ID])
        # Operating status for a l7policy will be ONLINE if it is enabled:
        if l7policy[const.ADMIN_STATE_UP]:
            self.assertEqual(const.ONLINE, l7policy[const.OPERATING_STATUS])
        else:
            self.assertEqual(const.OFFLINE, l7policy[const.OPERATING_STATUS])
        self.assertEqual(self.listener_id, l7policy[const.LISTENER_ID])
        self.assertEqual(1, l7policy[const.POSITION])
        if url:
            self.assertEqual(const.REDIRECT_TO_URL, l7policy[const.ACTION])
            self.assertEqual(url, l7policy[const.REDIRECT_URL])
            self.assertIsNone(l7policy.pop(const.REDIRECT_POOL_ID, None))
        elif pool_id:
            self.assertEqual(const.REDIRECT_TO_POOL, l7policy[const.ACTION])
            self.assertEqual(pool_id, l7policy[const.REDIRECT_POOL_ID])
            self.assertIsNone(l7policy.pop(const.REDIRECT_URL, None))
        else:
            self.assertEqual(const.REJECT, l7policy[const.ACTION])
            self.assertIsNone(l7policy.pop(const.REDIRECT_URL, None))
            self.assertIsNone(l7policy.pop(const.REDIRECT_POOL_ID, None))
コード例 #8
0
    def test_pool_list(self):
        """Tests pool list API and field filtering.

        * Create a clean loadbalancer.
        * Create three pools.
        * Validates that other accounts cannot list the pools.
        * List the pools using the default sort order.
        * List the pools using descending sort order.
        * List the pools using ascending sort order.
        * List the pools returning one field at a time.
        * List the pools returning two fields.
        * List the pools filtering to one of the three.
        * List the pools filtered, one field, and sorted.
        """
        lb_name = data_utils.rand_name("lb_member_lb2_pool-list")
        lb = self.mem_lb_client.create_loadbalancer(
            name=lb_name,
            provider=CONF.load_balancer.provider,
            vip_network_id=self.lb_member_vip_net[const.ID])
        lb_id = lb[const.ID]
        self.addCleanup(self.mem_lb_client.cleanup_loadbalancer, lb_id)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer, lb_id,
                                const.PROVISIONING_STATUS, const.ACTIVE,
                                CONF.load_balancer.lb_build_interval,
                                CONF.load_balancer.lb_build_timeout)

        pool1_name = data_utils.rand_name("lb_member_pool2-list")
        pool1_desc = 'B'
        pool1_sp_cookie_name = 'my_cookie1'
        pool1_kwargs = {
            const.NAME: pool1_name,
            const.DESCRIPTION: pool1_desc,
            const.ADMIN_STATE_UP: True,
            const.PROTOCOL: self.protocol,
            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
            const.LOADBALANCER_ID: lb_id,
        }
        if self.lb_feature_enabled.session_persistence_enabled:
            pool1_kwargs[const.SESSION_PERSISTENCE] = {
                const.TYPE: const.SESSION_PERSISTENCE_APP_COOKIE,
                const.COOKIE_NAME: pool1_sp_cookie_name,
            }
        pool1 = self.mem_pool_client.create_pool(**pool1_kwargs)
        self.addCleanup(self.mem_pool_client.cleanup_pool,
                        pool1[const.ID],
                        lb_client=self.mem_lb_client,
                        lb_id=lb_id)
        pool1 = waiters.wait_for_status(self.mem_pool_client.show_pool,
                                        pool1[const.ID],
                                        const.PROVISIONING_STATUS,
                                        const.ACTIVE,
                                        CONF.load_balancer.build_interval,
                                        CONF.load_balancer.build_timeout)
        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer, lb_id,
                                const.PROVISIONING_STATUS, const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
        # Time resolution for created_at is only to the second, and we need to
        # ensure that each object has a distinct creation time. Delaying one
        # second is both a simple and a reliable way to accomplish this.
        time.sleep(1)

        pool2_name = data_utils.rand_name("lb_member_pool1-list")
        pool2_desc = 'A'
        pool2_sp_cookie_name = 'my_cookie2'
        pool2_kwargs = {
            const.NAME: pool2_name,
            const.DESCRIPTION: pool2_desc,
            const.ADMIN_STATE_UP: True,
            const.PROTOCOL: self.protocol,
            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
            const.LOADBALANCER_ID: lb_id,
        }
        if self.lb_feature_enabled.session_persistence_enabled:
            pool2_kwargs[const.SESSION_PERSISTENCE] = {
                const.TYPE: const.SESSION_PERSISTENCE_APP_COOKIE,
                const.COOKIE_NAME: pool2_sp_cookie_name,
            }
        pool2 = self.mem_pool_client.create_pool(**pool2_kwargs)
        self.addCleanup(self.mem_pool_client.cleanup_pool,
                        pool2[const.ID],
                        lb_client=self.mem_lb_client,
                        lb_id=lb_id)
        pool2 = waiters.wait_for_status(self.mem_pool_client.show_pool,
                                        pool2[const.ID],
                                        const.PROVISIONING_STATUS,
                                        const.ACTIVE,
                                        CONF.load_balancer.build_interval,
                                        CONF.load_balancer.build_timeout)
        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer, lb_id,
                                const.PROVISIONING_STATUS, const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
        # Time resolution for created_at is only to the second, and we need to
        # ensure that each object has a distinct creation time. Delaying one
        # second is both a simple and a reliable way to accomplish this.
        time.sleep(1)

        pool3_name = data_utils.rand_name("lb_member_pool3-list")
        pool3_desc = 'C'
        pool3_kwargs = {
            const.NAME:
            pool3_name,
            const.DESCRIPTION:
            pool3_desc,
            const.ADMIN_STATE_UP:
            False,
            const.PROTOCOL:
            self.protocol,
            const.LB_ALGORITHM:
            const.LB_ALGORITHM_ROUND_ROBIN,
            # No session persistence, just so there's one test for that
            const.LOADBALANCER_ID:
            lb_id,
        }
        pool3 = self.mem_pool_client.create_pool(**pool3_kwargs)
        self.addCleanup(self.mem_pool_client.cleanup_pool,
                        pool3[const.ID],
                        lb_client=self.mem_lb_client,
                        lb_id=lb_id)
        pool3 = waiters.wait_for_status(self.mem_pool_client.show_pool,
                                        pool3[const.ID],
                                        const.PROVISIONING_STATUS,
                                        const.ACTIVE,
                                        CONF.load_balancer.build_interval,
                                        CONF.load_balancer.build_timeout)
        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer, lb_id,
                                const.PROVISIONING_STATUS, const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)

        # Test that a different user cannot list pools
        if not CONF.load_balancer.RBAC_test_type == const.NONE:
            member2_client = self.os_roles_lb_member2.pool_client
            primary = member2_client.list_pools(
                query_params='loadbalancer_id={lb_id}'.format(lb_id=lb_id))
            self.assertEqual(0, len(primary))

        # Test that a user without the lb member role cannot list load
        # balancers
        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
            self.assertRaises(exceptions.Forbidden,
                              self.os_primary.pool_client.list_pools)

        # Check the default sort order, created_at
        pools = self.mem_pool_client.list_pools(
            query_params='loadbalancer_id={lb_id}'.format(lb_id=lb_id))
        self.assertEqual(pool1[const.DESCRIPTION], pools[0][const.DESCRIPTION])
        self.assertEqual(pool2[const.DESCRIPTION], pools[1][const.DESCRIPTION])
        self.assertEqual(pool3[const.DESCRIPTION], pools[2][const.DESCRIPTION])

        # Test sort descending by description
        pools = self.mem_pool_client.list_pools(
            query_params='loadbalancer_id={lb_id}&{sort}={descr}:{desc}'.
            format(lb_id=lb_id,
                   sort=const.SORT,
                   descr=const.DESCRIPTION,
                   desc=const.DESC))
        self.assertEqual(pool1[const.DESCRIPTION], pools[1][const.DESCRIPTION])
        self.assertEqual(pool2[const.DESCRIPTION], pools[2][const.DESCRIPTION])
        self.assertEqual(pool3[const.DESCRIPTION], pools[0][const.DESCRIPTION])

        # Test sort ascending by description
        pools = self.mem_pool_client.list_pools(
            query_params='loadbalancer_id={lb_id}&{sort}={descr}:{asc}'.format(
                lb_id=lb_id,
                sort=const.SORT,
                descr=const.DESCRIPTION,
                asc=const.ASC))
        self.assertEqual(pool1[const.DESCRIPTION], pools[1][const.DESCRIPTION])
        self.assertEqual(pool2[const.DESCRIPTION], pools[0][const.DESCRIPTION])
        self.assertEqual(pool3[const.DESCRIPTION], pools[2][const.DESCRIPTION])

        # Test fields
        for field in const.SHOW_POOL_RESPONSE_FIELDS:
            pools = self.mem_pool_client.list_pools(
                query_params='loadbalancer_id={lb_id}&{fields}={field}'.format(
                    lb_id=lb_id, fields=const.FIELDS, field=field))
            self.assertEqual(1, len(pools[0]))
            self.assertEqual(pool1[field], pools[0][field])
            self.assertEqual(pool2[field], pools[1][field])
            self.assertEqual(pool3[field], pools[2][field])

        # Test multiple fields at the same time
        pools = self.mem_pool_client.list_pools(
            query_params='loadbalancer_id={lb_id}&{fields}={admin}&'
            '{fields}={created}'.format(lb_id=lb_id,
                                        fields=const.FIELDS,
                                        admin=const.ADMIN_STATE_UP,
                                        created=const.CREATED_AT))
        self.assertEqual(2, len(pools[0]))
        self.assertTrue(pools[0][const.ADMIN_STATE_UP])
        parser.parse(pools[0][const.CREATED_AT])
        self.assertTrue(pools[1][const.ADMIN_STATE_UP])
        parser.parse(pools[1][const.CREATED_AT])
        self.assertFalse(pools[2][const.ADMIN_STATE_UP])
        parser.parse(pools[2][const.CREATED_AT])

        # Test filtering
        pools = self.mem_pool_client.list_pools(
            query_params='loadbalancer_id={lb_id}&{desc}={lb_desc}'.format(
                lb_id=lb_id,
                desc=const.DESCRIPTION,
                lb_desc=pool2[const.DESCRIPTION]))
        self.assertEqual(1, len(pools))
        self.assertEqual(pool2[const.DESCRIPTION], pools[0][const.DESCRIPTION])

        # Test combined params
        pools = self.mem_pool_client.list_pools(
            query_params='loadbalancer_id={lb_id}&{admin}={true}&'
            '{fields}={descr}&{fields}={id}&'
            '{sort}={descr}:{desc}'.format(lb_id=lb_id,
                                           admin=const.ADMIN_STATE_UP,
                                           true=const.ADMIN_STATE_UP_TRUE,
                                           fields=const.FIELDS,
                                           descr=const.DESCRIPTION,
                                           id=const.ID,
                                           sort=const.SORT,
                                           desc=const.DESC))
        # Should get two pools
        self.assertEqual(2, len(pools))
        # pools should have two fields
        self.assertEqual(2, len(pools[0]))
        # Should be in descending order
        self.assertEqual(pool2[const.DESCRIPTION], pools[1][const.DESCRIPTION])
        self.assertEqual(pool1[const.DESCRIPTION], pools[0][const.DESCRIPTION])
コード例 #9
0
    def test_pool_show(self):
        """Tests pool show API.

        * Create a fully populated pool.
        * Show pool details.
        * Validate the show reflects the requested values.
        * Validates that other accounts cannot see the pool.
        """
        pool_name = data_utils.rand_name("lb_member_pool1-show")
        pool_description = data_utils.arbitrary_string(size=255)
        pool_sp_cookie_name = 'my_cookie'
        pool_kwargs = {
            const.NAME: pool_name,
            const.DESCRIPTION: pool_description,
            const.ADMIN_STATE_UP: True,
            const.PROTOCOL: self.protocol,
            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
            const.LOADBALANCER_ID: self.lb_id,
        }
        if self.lb_feature_enabled.session_persistence_enabled:
            pool_kwargs[const.SESSION_PERSISTENCE] = {
                const.TYPE: const.SESSION_PERSISTENCE_APP_COOKIE,
                const.COOKIE_NAME: pool_sp_cookie_name,
            }

        pool = self.mem_pool_client.create_pool(**pool_kwargs)
        self.addClassResourceCleanup(self.mem_pool_client.cleanup_pool,
                                     pool[const.ID],
                                     lb_client=self.mem_lb_client,
                                     lb_id=self.lb_id)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
        pool = waiters.wait_for_status(self.mem_pool_client.show_pool,
                                       pool[const.ID],
                                       const.PROVISIONING_STATUS, const.ACTIVE,
                                       CONF.load_balancer.build_interval,
                                       CONF.load_balancer.build_timeout)

        self.assertEqual(pool_name, pool[const.NAME])
        self.assertEqual(pool_description, pool[const.DESCRIPTION])
        self.assertTrue(pool[const.ADMIN_STATE_UP])
        parser.parse(pool[const.CREATED_AT])
        parser.parse(pool[const.UPDATED_AT])
        UUID(pool[const.ID])
        # Operating status for pools will always be offline without members
        self.assertEqual(const.OFFLINE, pool[const.OPERATING_STATUS])
        self.assertEqual(self.protocol, pool[const.PROTOCOL])
        self.assertEqual(1, len(pool[const.LOADBALANCERS]))
        self.assertEqual(self.lb_id, pool[const.LOADBALANCERS][0][const.ID])
        self.assertEmpty(pool[const.LISTENERS])
        self.assertEqual(const.LB_ALGORITHM_ROUND_ROBIN,
                         pool[const.LB_ALGORITHM])
        if self.lb_feature_enabled.session_persistence_enabled:
            self.assertIsNotNone(pool.get(const.SESSION_PERSISTENCE))
            self.assertEqual(const.SESSION_PERSISTENCE_APP_COOKIE,
                             pool[const.SESSION_PERSISTENCE][const.TYPE])
            self.assertEqual(
                pool_sp_cookie_name,
                pool[const.SESSION_PERSISTENCE][const.COOKIE_NAME])

        # Test that a user with lb_admin role can see the pool
        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
            pool_client = self.os_roles_lb_admin.pool_client
            pool_adm = pool_client.show_pool(pool[const.ID])
            self.assertEqual(pool_name, pool_adm[const.NAME])

        # Test that a user with cloud admin role can see the pool
        if not CONF.load_balancer.RBAC_test_type == const.NONE:
            adm = self.os_admin.pool_client.show_pool(pool[const.ID])
            self.assertEqual(pool_name, adm[const.NAME])

        # Test that a different user, with load balancer member role, cannot
        # see this pool
        if not CONF.load_balancer.RBAC_test_type == const.NONE:
            member2_client = self.os_roles_lb_member2.pool_client
            self.assertRaises(exceptions.Forbidden, member2_client.show_pool,
                              pool[const.ID])

        # Test that a user, without the load balancer member role, cannot
        # show pools
        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
            self.assertRaises(exceptions.Forbidden,
                              self.os_primary.pool_client.show_pool,
                              pool[const.ID])
コード例 #10
0
    def test_load_balancer_show_status(self):
        """Tests load balancer show status tree API.

        * Create a load balancer.
        * Validates that other accounts cannot see the status for the
        *   load balancer.
        * Show load balancer status tree.
        * Validate the show reflects the expected values.
        """
        lb_name = data_utils.rand_name("lb_member_lb1-status")
        lb = self.mem_lb_client.create_loadbalancer(
            name=lb_name, provider=CONF.load_balancer.provider,
            vip_network_id=self.lb_member_vip_net[const.ID])
        self.addClassResourceCleanup(
            self.mem_lb_client.cleanup_loadbalancer,
            lb[const.ID])

        lb = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                     lb[const.ID], const.PROVISIONING_STATUS,
                                     const.ACTIVE,
                                     CONF.load_balancer.lb_build_interval,
                                     CONF.load_balancer.lb_build_timeout)
        if not CONF.load_balancer.test_with_noop:
            lb = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                         lb[const.ID], const.OPERATING_STATUS,
                                         const.ONLINE,
                                         CONF.load_balancer.check_interval,
                                         CONF.load_balancer.check_timeout)

        # Test that a user, without the load balancer member role, cannot
        # use this method
        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
            self.assertRaises(
                exceptions.Forbidden,
                self.os_primary.loadbalancer_client.get_loadbalancer_status,
                lb[const.ID])

        # Test that a different user, with load balancer role, cannot see
        # the load balancer status
        if not CONF.load_balancer.RBAC_test_type == const.NONE:
            member2_client = self.os_roles_lb_member2.loadbalancer_client
            self.assertRaises(exceptions.Forbidden,
                              member2_client.get_loadbalancer_status,
                              lb[const.ID])

        status = self.mem_lb_client.get_loadbalancer_status(lb[const.ID])

        self.assertEqual(1, len(status))
        lb_status = status[const.LOADBALANCER]
        self.assertEqual(5, len(lb_status))
        self.assertEqual(lb[const.ID], lb_status[const.ID])
        self.assertEqual([], lb_status[const.LISTENERS])
        self.assertEqual(lb_name, lb_status[const.NAME])
        # Operating status is a measured status, so no-op will not go online
        if CONF.load_balancer.test_with_noop:
            self.assertEqual(const.OFFLINE, lb_status[const.OPERATING_STATUS])
        else:
            self.assertEqual(const.ONLINE, lb_status[const.OPERATING_STATUS])
        self.assertEqual(const.ACTIVE, lb_status[const.PROVISIONING_STATUS])

        # Attempt to clean up so that one full test run doesn't start 10+
        # amps before the cleanup phase fires
        try:
            self.mem_lb_client.delete_loadbalancer(lb[const.ID])

            waiters.wait_for_deleted_status_or_not_found(
                self.mem_lb_client.show_loadbalancer, lb[const.ID],
                const.PROVISIONING_STATUS,
                CONF.load_balancer.lb_build_interval,
                CONF.load_balancer.lb_build_timeout)
        except Exception:
            pass
コード例 #11
0
    def test_load_balancer_failover(self):
        """Tests load balancer failover API.

        * Create a load balancer.
        * Validates that other accounts cannot failover the load balancer
        * Wait for the load balancer to go ACTIVE.
        * Failover the load balancer.
        * Wait for the load balancer to go ACTIVE.
        """
        lb_name = data_utils.rand_name("lb_member_lb1-failover")
        lb = self.mem_lb_client.create_loadbalancer(
            name=lb_name, provider=CONF.load_balancer.provider,
            vip_network_id=self.lb_member_vip_net[const.ID])
        self.addClassResourceCleanup(
            self.mem_lb_client.cleanup_loadbalancer,
            lb[const.ID])

        lb = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                     lb[const.ID], const.PROVISIONING_STATUS,
                                     const.ACTIVE,
                                     CONF.load_balancer.lb_build_interval,
                                     CONF.load_balancer.lb_build_timeout)

        # Test RBAC not authorized for non-admin role
        if not CONF.load_balancer.RBAC_test_type == const.NONE:
            self.assertRaises(exceptions.Forbidden,
                              self.mem_lb_client.failover_loadbalancer,
                              lb[const.ID])

        # Assert we didn't go into PENDING_*
        lb = self.mem_lb_client.show_loadbalancer(lb[const.ID])
        self.assertEqual(const.ACTIVE, lb[const.PROVISIONING_STATUS])

        if CONF.load_balancer.provider in const.AMPHORA_PROVIDERS:
            before_amphorae = self.lb_admin_amphora_client.list_amphorae(
                query_params='{loadbalancer_id}={lb_id}'.format(
                    loadbalancer_id=const.LOADBALANCER_ID, lb_id=lb[const.ID]))

        self.os_roles_lb_admin.loadbalancer_client.failover_loadbalancer(
            lb[const.ID])

        lb = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                     lb[const.ID], const.PROVISIONING_STATUS,
                                     const.ACTIVE,
                                     CONF.load_balancer.lb_build_interval,
                                     CONF.load_balancer.lb_build_timeout)

        if CONF.load_balancer.provider in const.AMPHORA_PROVIDERS:
            after_amphorae = self.lb_admin_amphora_client.list_amphorae(
                query_params='{loadbalancer_id}={lb_id}'.format(
                    loadbalancer_id=const.LOADBALANCER_ID, lb_id=lb[const.ID]))

            # Make sure all of the amphora on the load balancer have
            # failed over
            for amphora in before_amphorae:
                for new_amp in after_amphorae:
                    self.assertNotEqual(amphora[const.ID], new_amp[const.ID])

        # Attempt to clean up so that one full test run doesn't start 10+
        # amps before the cleanup phase fires
        try:
            self.mem_lb_client.delete_loadbalancer(lb[const.ID])

            waiters.wait_for_deleted_status_or_not_found(
                self.mem_lb_client.show_loadbalancer, lb[const.ID],
                const.PROVISIONING_STATUS,
                CONF.load_balancer.lb_build_interval,
                CONF.load_balancer.lb_build_timeout)
        except Exception:
            pass
コード例 #12
0
    def test_load_balancer_update(self):
        """Tests load balancer update and show APIs.

        * Create a fully populated load balancer.
        * Show load balancer details.
        * Validate the show reflects the initial values.
        * Validates that other accounts cannot update the load balancer.
        * Update the load balancer details.
        * Show load balancer details.
        * Validate the show reflects the updated values.
        """
        lb_name = data_utils.rand_name("lb_member_lb1-update")
        lb_description = data_utils.arbitrary_string(size=255)

        lb_kwargs = {const.ADMIN_STATE_UP: False,
                     const.DESCRIPTION: lb_description,
                     const.PROVIDER: CONF.load_balancer.provider,
                     # TODO(johnsom) Fix test to use a real flavor
                     # flavor=lb_flavor,
                     # TODO(johnsom) Add QoS
                     # vip_qos_policy_id=lb_qos_policy_id)
                     const.NAME: lb_name}

        self._setup_lb_network_kwargs(lb_kwargs, 4, use_fixed_ip=True)

        lb = self.mem_lb_client.create_loadbalancer(**lb_kwargs)

        self.addClassResourceCleanup(
            self.mem_lb_client.cleanup_loadbalancer,
            lb[const.ID])

        lb = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                     lb[const.ID], const.PROVISIONING_STATUS,
                                     const.ACTIVE,
                                     CONF.load_balancer.lb_build_interval,
                                     CONF.load_balancer.lb_build_timeout)

        self.assertFalse(lb[const.ADMIN_STATE_UP])
        parser.parse(lb[const.CREATED_AT])
        parser.parse(lb[const.UPDATED_AT])
        self.assertEqual(lb_description, lb[const.DESCRIPTION])
        UUID(lb[const.ID])
        self.assertEqual(lb_name, lb[const.NAME])
        self.assertEqual(const.OFFLINE, lb[const.OPERATING_STATUS])
        self.assertEqual(self.os_roles_lb_member.credentials.project_id,
                         lb[const.PROJECT_ID])
        self.assertEqual(CONF.load_balancer.provider, lb[const.PROVIDER])
        self.assertEqual(self.lb_member_vip_net[const.ID],
                         lb[const.VIP_NETWORK_ID])
        self.assertIsNotNone(lb[const.VIP_PORT_ID])
        if lb_kwargs[const.VIP_SUBNET_ID]:
            self.assertEqual(lb_kwargs[const.VIP_ADDRESS],
                             lb[const.VIP_ADDRESS])
            self.assertEqual(lb_kwargs[const.VIP_SUBNET_ID],
                             lb[const.VIP_SUBNET_ID])

        new_name = data_utils.rand_name("lb_member_lb1-update")
        new_description = data_utils.arbitrary_string(size=255,
                                                      base_text='new')

        # Test that a user, without the load balancer member role, cannot
        # use this command
        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
            self.assertRaises(
                exceptions.Forbidden,
                self.os_primary.loadbalancer_client.update_loadbalancer,
                lb[const.ID], admin_state_up=True)

        # Assert we didn't go into PENDING_*
        lb_check = self.mem_lb_client.show_loadbalancer(lb[const.ID])
        self.assertEqual(const.ACTIVE, lb_check[const.PROVISIONING_STATUS])
        self.assertFalse(lb_check[const.ADMIN_STATE_UP])

        # Test that a user, without the load balancer member role, cannot
        # update this load balancer
        if not CONF.load_balancer.RBAC_test_type == const.NONE:
            member2_client = self.os_roles_lb_member2.loadbalancer_client
            self.assertRaises(exceptions.Forbidden,
                              member2_client.update_loadbalancer,
                              lb[const.ID], admin_state_up=True)

        # Assert we didn't go into PENDING_*
        lb_check = self.mem_lb_client.show_loadbalancer(lb[const.ID])
        self.assertEqual(const.ACTIVE, lb_check[const.PROVISIONING_STATUS])
        self.assertFalse(lb_check[const.ADMIN_STATE_UP])

        lb = self.mem_lb_client.update_loadbalancer(
            lb[const.ID],
            admin_state_up=True,
            description=new_description,
            # TODO(johnsom) Add QoS
            # vip_qos_policy_id=lb_qos_policy_id)
            name=new_name)

        lb = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                     lb[const.ID], const.PROVISIONING_STATUS,
                                     const.ACTIVE,
                                     CONF.load_balancer.lb_build_interval,
                                     CONF.load_balancer.lb_build_timeout)

        self.assertTrue(lb[const.ADMIN_STATE_UP])
        self.assertEqual(new_description, lb[const.DESCRIPTION])
        self.assertEqual(new_name, lb[const.NAME])
        # TODO(johnsom) Add QoS

        # Attempt to clean up so that one full test run doesn't start 10+
        # amps before the cleanup phase fires
        try:
            self.mem_lb_client.delete_loadbalancer(lb[const.ID])

            waiters.wait_for_deleted_status_or_not_found(
                self.mem_lb_client.show_loadbalancer, lb[const.ID],
                const.PROVISIONING_STATUS,
                CONF.load_balancer.lb_build_interval,
                CONF.load_balancer.lb_build_timeout)
        except Exception:
            pass
コード例 #13
0
    def _test_load_balancer_create(self, ip_version):
        """Tests load balancer create and basic show APIs.

        * Tests that users without the load balancer member role cannot
          create load balancers.
        * Create a fully populated load balancer.
        * Show load balancer details.
        * Validate the show reflects the requested values.
        """
        lb_name = data_utils.rand_name("lb_member_lb1-create-"
                                       "ipv{}".format(ip_version))
        lb_description = data_utils.arbitrary_string(size=255)

        lb_kwargs = {const.ADMIN_STATE_UP: True,
                     const.DESCRIPTION: lb_description,
                     const.PROVIDER: CONF.load_balancer.provider,
                     # TODO(johnsom) Fix test to use a real flavor
                     # flavor=lb_flavor,
                     # TODO(johnsom) Add QoS
                     # vip_qos_policy_id=lb_qos_policy_id)
                     const.NAME: lb_name}

        self._setup_lb_network_kwargs(lb_kwargs, ip_version, use_fixed_ip=True)

        # Test that a user without the load balancer role cannot
        # create a load balancer
        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
            self.assertRaises(
                exceptions.Forbidden,
                self.os_primary.loadbalancer_client.create_loadbalancer,
                **lb_kwargs)

        lb = self.mem_lb_client.create_loadbalancer(**lb_kwargs)

        self.addClassResourceCleanup(
            self.mem_lb_client.cleanup_loadbalancer,
            lb[const.ID])

        lb = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                     lb[const.ID], const.PROVISIONING_STATUS,
                                     const.ACTIVE,
                                     CONF.load_balancer.lb_build_interval,
                                     CONF.load_balancer.lb_build_timeout)
        if not CONF.load_balancer.test_with_noop:
            lb = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                         lb[const.ID], const.OPERATING_STATUS,
                                         const.ONLINE,
                                         CONF.load_balancer.check_interval,
                                         CONF.load_balancer.check_timeout)

        self.assertTrue(lb[const.ADMIN_STATE_UP])
        parser.parse(lb[const.CREATED_AT])
        parser.parse(lb[const.UPDATED_AT])
        self.assertEqual(lb_description, lb[const.DESCRIPTION])
        UUID(lb[const.ID])
        self.assertEqual(lb_name, lb[const.NAME])
        # Operating status is a measured status, so no-op will not go online
        if CONF.load_balancer.test_with_noop:
            self.assertEqual(const.OFFLINE, lb[const.OPERATING_STATUS])
        else:
            self.assertEqual(const.ONLINE, lb[const.OPERATING_STATUS])
            if ip_version == 4:
                self.assertEqual(self.lb_member_vip_net[const.ID],
                                 lb[const.VIP_NETWORK_ID])
            else:
                self.assertEqual(self.lb_member_vip_ipv6_net[const.ID],
                                 lb[const.VIP_NETWORK_ID])

        self.assertEqual(self.os_roles_lb_member.credentials.project_id,
                         lb[const.PROJECT_ID])
        self.assertEqual(CONF.load_balancer.provider, lb[const.PROVIDER])
        self.assertIsNotNone(lb[const.VIP_PORT_ID])
        if lb_kwargs[const.VIP_SUBNET_ID]:
            if ip_version == 4 or self.lb_member_vip_ipv6_subnet_stateful:
                self.assertEqual(lb_kwargs[const.VIP_ADDRESS],
                                 lb[const.VIP_ADDRESS])
            self.assertEqual(lb_kwargs[const.VIP_SUBNET_ID],
                             lb[const.VIP_SUBNET_ID])

        # Attempt to clean up so that one full test run doesn't start 10+
        # amps before the cleanup phase fires
        try:
            self.mem_lb_client.delete_loadbalancer(lb[const.ID])

            waiters.wait_for_deleted_status_or_not_found(
                self.mem_lb_client.show_loadbalancer, lb[const.ID],
                const.PROVISIONING_STATUS,
                CONF.load_balancer.lb_build_interval,
                CONF.load_balancer.lb_build_timeout)
        except Exception:
            pass
コード例 #14
0
    def test_load_balancer_list(self):
        """Tests load balancer list API and field filtering.

        * Create three load balancers.
        * Validates that other accounts cannot list the load balancers.
        * List the load balancers using the default sort order.
        * List the load balancers using descending sort order.
        * List the load balancers using ascending sort order.
        * List the load balancers returning one field at a time.
        * List the load balancers returning two fields.
        * List the load balancers filtering to one of the three.
        * List the load balancers filtered, one field, and sorted.
        """
        # Get a list of pre-existing LBs to filter from test data
        pretest_lbs = self.mem_lb_client.list_loadbalancers()
        # Store their IDs for easy access
        pretest_lb_ids = [lb['id'] for lb in pretest_lbs]

        lb_name = data_utils.rand_name("lb_member_lb2-list")
        lb_description = data_utils.rand_name('B')

        lb = self.mem_lb_client.create_loadbalancer(
            admin_state_up=True,
            description=lb_description,
            # TODO(johnsom) Fix test to use a real flavor
            # flavor=lb_flavor,
            provider=CONF.load_balancer.provider,
            name=lb_name,
            # TODO(johnsom) Add QoS
            # vip_qos_policy_id=lb_qos_policy_id)
            vip_network_id=self.lb_member_vip_net[const.ID])
        self.addCleanup(
            self.mem_lb_client.cleanup_loadbalancer,
            lb[const.ID])

        lb1 = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                      lb[const.ID],
                                      const.PROVISIONING_STATUS,
                                      const.ACTIVE,
                                      CONF.load_balancer.lb_build_interval,
                                      CONF.load_balancer.lb_build_timeout)
        if not CONF.load_balancer.test_with_noop:
            lb1 = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                          lb[const.ID], const.OPERATING_STATUS,
                                          const.ONLINE,
                                          CONF.load_balancer.check_interval,
                                          CONF.load_balancer.check_timeout)

        # Time resolution for created_at is only to the second, and we need to
        # ensure that each object has a distinct creation time. Delaying one
        # second is both a simple and a reliable way to accomplish this.
        time.sleep(1)

        lb_name = data_utils.rand_name("lb_member_lb1-list")
        lb_description = data_utils.rand_name('A')

        lb = self.mem_lb_client.create_loadbalancer(
            admin_state_up=True,
            description=lb_description,
            provider=CONF.load_balancer.provider,
            name=lb_name,
            vip_network_id=self.lb_member_vip_net[const.ID])
        self.addCleanup(
            self.mem_lb_client.cleanup_loadbalancer,
            lb[const.ID])

        lb2 = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                      lb[const.ID],
                                      const.PROVISIONING_STATUS,
                                      const.ACTIVE,
                                      CONF.load_balancer.lb_build_interval,
                                      CONF.load_balancer.lb_build_timeout)
        if not CONF.load_balancer.test_with_noop:
            lb2 = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                          lb[const.ID], const.OPERATING_STATUS,
                                          const.ONLINE,
                                          CONF.load_balancer.check_interval,
                                          CONF.load_balancer.check_timeout)

        # Time resolution for created_at is only to the second, and we need to
        # ensure that each object has a distinct creation time. Delaying one
        # second is both a simple and a reliable way to accomplish this.
        time.sleep(1)

        lb_name = data_utils.rand_name("lb_member_lb3-list")
        lb_description = data_utils.rand_name('C')

        lb = self.mem_lb_client.create_loadbalancer(
            admin_state_up=False,
            description=lb_description,
            provider=CONF.load_balancer.provider,
            name=lb_name,
            vip_network_id=self.lb_member_vip_net[const.ID])
        self.addCleanup(
            self.mem_lb_client.cleanup_loadbalancer,
            lb[const.ID])

        lb3 = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                      lb[const.ID],
                                      const.PROVISIONING_STATUS,
                                      const.ACTIVE,
                                      CONF.load_balancer.lb_build_interval,
                                      CONF.load_balancer.lb_build_timeout)

        # Test that a different user cannot list load balancers
        if not CONF.load_balancer.RBAC_test_type == const.NONE:
            member2_client = self.os_roles_lb_member2.loadbalancer_client
            primary = member2_client.list_loadbalancers()
            self.assertEqual(0, len(primary))

        # Test that a user without the lb member role cannot list load
        # balancers
        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
            self.assertRaises(
                exceptions.Forbidden,
                self.os_primary.loadbalancer_client.list_loadbalancers)

        # Check the default sort order, created_at
        lbs = self.mem_lb_client.list_loadbalancers()
        lbs = self._filter_lbs_by_id(lbs, pretest_lb_ids)
        self.assertEqual(lb1[const.DESCRIPTION], lbs[0][const.DESCRIPTION])
        self.assertEqual(lb2[const.DESCRIPTION], lbs[1][const.DESCRIPTION])
        self.assertEqual(lb3[const.DESCRIPTION], lbs[2][const.DESCRIPTION])

        # Test sort descending by description
        lbs = self.mem_lb_client.list_loadbalancers(
            query_params='{sort}={descr}:{desc}'.format(
                sort=const.SORT, descr=const.DESCRIPTION, desc=const.DESC))
        lbs = self._filter_lbs_by_id(lbs, pretest_lb_ids)
        self.assertEqual(lb1[const.DESCRIPTION], lbs[1][const.DESCRIPTION])
        self.assertEqual(lb2[const.DESCRIPTION], lbs[2][const.DESCRIPTION])
        self.assertEqual(lb3[const.DESCRIPTION], lbs[0][const.DESCRIPTION])

        # Test sort ascending by description
        lbs = self.mem_lb_client.list_loadbalancers(
            query_params='{sort}={descr}:{asc}'.format(sort=const.SORT,
                                                       descr=const.DESCRIPTION,
                                                       asc=const.ASC))
        lbs = self._filter_lbs_by_id(lbs, pretest_lb_ids)
        self.assertEqual(lb1[const.DESCRIPTION], lbs[1][const.DESCRIPTION])
        self.assertEqual(lb2[const.DESCRIPTION], lbs[0][const.DESCRIPTION])
        self.assertEqual(lb3[const.DESCRIPTION], lbs[2][const.DESCRIPTION])

        # Determine indexes of pretest LBs in default sort
        pretest_lb_indexes = []
        lbs = self.mem_lb_client.list_loadbalancers()
        for i, lb in enumerate(lbs):
            if lb['id'] in pretest_lb_ids:
                pretest_lb_indexes.append(i)

        # Test fields
        for field in const.SHOW_LOAD_BALANCER_RESPONSE_FIELDS:
            lbs = self.mem_lb_client.list_loadbalancers(
                query_params='{fields}={field}'.format(fields=const.FIELDS,
                                                       field=field))
            lbs = self._filter_lbs_by_index(lbs, pretest_lb_indexes)
            self.assertEqual(1, len(lbs[0]))
            self.assertEqual(lb1[field], lbs[0][field])
            self.assertEqual(lb2[field], lbs[1][field])
            self.assertEqual(lb3[field], lbs[2][field])

        # Test multiple fields at the same time
        lbs = self.mem_lb_client.list_loadbalancers(
            query_params='{fields}={admin}&{fields}={created}'.format(
                fields=const.FIELDS, admin=const.ADMIN_STATE_UP,
                created=const.CREATED_AT))
        lbs = self._filter_lbs_by_index(lbs, pretest_lb_indexes)
        self.assertEqual(2, len(lbs[0]))
        self.assertTrue(lbs[0][const.ADMIN_STATE_UP])
        parser.parse(lbs[0][const.CREATED_AT])
        self.assertTrue(lbs[1][const.ADMIN_STATE_UP])
        parser.parse(lbs[1][const.CREATED_AT])
        self.assertFalse(lbs[2][const.ADMIN_STATE_UP])
        parser.parse(lbs[2][const.CREATED_AT])

        # Test filtering
        lbs = self.mem_lb_client.list_loadbalancers(
            query_params='{desc}={lb_desc}'.format(
                desc=const.DESCRIPTION, lb_desc=lb2[const.DESCRIPTION]))
        self.assertEqual(1, len(lbs))
        self.assertEqual(lb2[const.DESCRIPTION], lbs[0][const.DESCRIPTION])

        # Test combined params
        lbs = self.mem_lb_client.list_loadbalancers(
            query_params='{admin}={true}&{fields}={descr}&{fields}={id}&'
                         '{sort}={descr}:{desc}'.format(
                             admin=const.ADMIN_STATE_UP,
                             true=const.ADMIN_STATE_UP_TRUE,
                             fields=const.FIELDS, descr=const.DESCRIPTION,
                             id=const.ID, sort=const.SORT, desc=const.DESC))
        lbs = self._filter_lbs_by_id(lbs, pretest_lb_ids)
        # Should get two load balancers
        self.assertEqual(2, len(lbs))
        # Load balancers should have two fields
        self.assertEqual(2, len(lbs[0]))
        # Should be in descending order
        self.assertEqual(lb2[const.DESCRIPTION], lbs[1][const.DESCRIPTION])
        self.assertEqual(lb1[const.DESCRIPTION], lbs[0][const.DESCRIPTION])
コード例 #15
0
    def test_healthmonitor_traffic(self):
        """Tests traffic is correctly routed based on healthmonitor status

        * Create three members:
          * One should be working, and ONLINE with a healthmonitor (passing)
          * One should be working, and ERROR with a healthmonitor (failing)
          * One should be disabled, and OFFLINE with a healthmonitor
        * Verify members are in their correct respective operating statuses.
        * Verify that traffic is balanced evenly between the working members.
        * Create a fully populated healthmonitor.
        * Verify members are in their correct respective operating statuses.
        * Verify that traffic is balanced *unevenly*.
        * Delete the healthmonitor.
        * Verify members are in their correct respective operating statuses.
        * Verify that traffic is balanced evenly between the working members.
        """
        member1_name = data_utils.rand_name("lb_member_member1-hm-traffic")
        member1_kwargs = {
            const.POOL_ID: self.pool_id,
            const.NAME: member1_name,
            const.ADMIN_STATE_UP: True,
            const.ADDRESS: self.webserver1_ip,
            const.PROTOCOL_PORT: 80,
        }
        if self.lb_member_1_subnet:
            member1_kwargs[const.SUBNET_ID] = self.lb_member_1_subnet[const.ID]

        member1 = self.mem_member_client.create_member(**member1_kwargs)
        member1_id = member1[const.ID]
        self.addCleanup(self.mem_member_client.cleanup_member,
                        member1_id,
                        pool_id=self.pool_id,
                        lb_client=self.mem_lb_client,
                        lb_id=self.lb_id)
        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.check_interval,
                                CONF.load_balancer.check_timeout)

        # Set up Member 2 for Webserver 2
        member2_name = data_utils.rand_name("lb_member_member2-hm-traffic")
        member2_kwargs = {
            const.POOL_ID: self.pool_id,
            const.NAME: member2_name,
            const.ADMIN_STATE_UP: True,
            const.ADDRESS: self.webserver2_ip,
            const.PROTOCOL_PORT: 80,
            const.MONITOR_PORT: 9999,  # We want this to go offline with a HM
        }
        if self.lb_member_2_subnet:
            member2_kwargs[const.SUBNET_ID] = self.lb_member_2_subnet[const.ID]

        member2 = self.mem_member_client.create_member(**member2_kwargs)
        member2_id = member2[const.ID]
        self.addCleanup(self.mem_member_client.cleanup_member,
                        member2_id,
                        pool_id=self.pool_id,
                        lb_client=self.mem_lb_client,
                        lb_id=self.lb_id)
        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.check_interval,
                                CONF.load_balancer.check_timeout)

        # Set up Member 3 as a non-existent disabled node
        member3_name = data_utils.rand_name("lb_member_member3-hm-traffic")
        member3_kwargs = {
            const.POOL_ID: self.pool_id,
            const.NAME: member3_name,
            const.ADMIN_STATE_UP: False,
            const.ADDRESS: '192.0.2.1',
            const.PROTOCOL_PORT: 80,
        }

        member3 = self.mem_member_client.create_member(**member3_kwargs)
        member3_id = member3[const.ID]
        self.addCleanup(self.mem_member_client.cleanup_member,
                        member3_id,
                        pool_id=self.pool_id,
                        lb_client=self.mem_lb_client,
                        lb_id=self.lb_id)
        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.check_interval,
                                CONF.load_balancer.check_timeout)

        # Wait for members to adjust to the correct OPERATING_STATUS
        waiters.wait_for_status(self.mem_member_client.show_member,
                                member1_id,
                                const.OPERATING_STATUS,
                                const.NO_MONITOR,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout,
                                pool_id=self.pool_id)
        waiters.wait_for_status(self.mem_member_client.show_member,
                                member2_id,
                                const.OPERATING_STATUS,
                                const.NO_MONITOR,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout,
                                pool_id=self.pool_id)
        waiters.wait_for_status(self.mem_member_client.show_member,
                                member3_id,
                                const.OPERATING_STATUS,
                                const.OFFLINE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout,
                                pool_id=self.pool_id)

        # Send some traffic and verify it is balanced
        self.check_members_balanced(self.lb_vip_address,
                                    traffic_member_count=2)

        # Create the healthmonitor
        hm_name = data_utils.rand_name("lb_member_hm1-hm-traffic")
        hm_kwargs = {
            const.POOL_ID: self.pool_id,
            const.NAME: hm_name,
            const.TYPE: const.HEALTH_MONITOR_HTTP,
            const.DELAY: 2,
            const.TIMEOUT: 2,
            const.MAX_RETRIES: 2,
            const.MAX_RETRIES_DOWN: 2,
            const.HTTP_METHOD: const.GET,
            const.URL_PATH: '/',
            const.EXPECTED_CODES: '200',
            const.ADMIN_STATE_UP: True,
        }

        hm = self.mem_healthmonitor_client.create_healthmonitor(**hm_kwargs)
        self.addCleanup(self.mem_healthmonitor_client.cleanup_healthmonitor,
                        hm[const.ID],
                        lb_client=self.mem_lb_client,
                        lb_id=self.lb_id)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
        hm = waiters.wait_for_status(
            self.mem_healthmonitor_client.show_healthmonitor, hm[const.ID],
            const.PROVISIONING_STATUS, const.ACTIVE,
            CONF.load_balancer.build_interval,
            CONF.load_balancer.build_timeout)

        # Wait for members to adjust to the correct OPERATING_STATUS
        waiters.wait_for_status(self.mem_member_client.show_member,
                                member1_id,
                                const.OPERATING_STATUS,
                                const.ONLINE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout,
                                pool_id=self.pool_id)
        waiters.wait_for_status(self.mem_member_client.show_member,
                                member2_id,
                                const.OPERATING_STATUS,
                                const.ERROR,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout,
                                pool_id=self.pool_id)
        waiters.wait_for_status(self.mem_member_client.show_member,
                                member3_id,
                                const.OPERATING_STATUS,
                                const.OFFLINE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout,
                                pool_id=self.pool_id)

        # Send some traffic and verify it is *unbalanced*, as expected
        self.check_members_balanced(self.lb_vip_address,
                                    traffic_member_count=1)

        # Delete the healthmonitor
        self.mem_healthmonitor_client.delete_healthmonitor(hm[const.ID])

        waiters.wait_for_deleted_status_or_not_found(
            self.mem_healthmonitor_client.show_healthmonitor, hm[const.ID],
            const.PROVISIONING_STATUS, CONF.load_balancer.check_interval,
            CONF.load_balancer.check_timeout)

        # Wait for members to adjust to the correct OPERATING_STATUS
        waiters.wait_for_status(self.mem_member_client.show_member,
                                member1_id,
                                const.OPERATING_STATUS,
                                const.NO_MONITOR,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout,
                                pool_id=self.pool_id)
        waiters.wait_for_status(self.mem_member_client.show_member,
                                member2_id,
                                const.OPERATING_STATUS,
                                const.NO_MONITOR,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout,
                                pool_id=self.pool_id)
        waiters.wait_for_status(self.mem_member_client.show_member,
                                member3_id,
                                const.OPERATING_STATUS,
                                const.OFFLINE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout,
                                pool_id=self.pool_id)

        # Send some traffic and verify it is balanced again
        self.check_members_balanced(self.lb_vip_address)
コード例 #16
0
    def test_pool_update(self):
        """Tests pool update and show APIs.

        * Create a fully populated pool.
        * Show pool details.
        * Validate the show reflects the initial values.
        * Validates that other accounts cannot update the pool.
        * Update the pool details.
        * Show pool details.
        * Validate the show reflects the updated values.
        """
        pool_name = data_utils.rand_name("lb_member_pool1-update")
        pool_description = data_utils.arbitrary_string(size=255)
        pool_sp_cookie_name = 'my_cookie'
        pool_kwargs = {
            const.NAME: pool_name,
            const.DESCRIPTION: pool_description,
            const.ADMIN_STATE_UP: False,
            const.PROTOCOL: self.protocol,
            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
            const.LOADBALANCER_ID: self.lb_id,
        }
        if self.lb_feature_enabled.session_persistence_enabled:
            pool_kwargs[const.SESSION_PERSISTENCE] = {
                const.TYPE: const.SESSION_PERSISTENCE_APP_COOKIE,
                const.COOKIE_NAME: pool_sp_cookie_name,
            }
        pool = self.mem_pool_client.create_pool(**pool_kwargs)
        self.addClassResourceCleanup(self.mem_pool_client.cleanup_pool,
                                     pool[const.ID],
                                     lb_client=self.mem_lb_client,
                                     lb_id=self.lb_id)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
        pool = waiters.wait_for_status(self.mem_pool_client.show_pool,
                                       pool[const.ID],
                                       const.PROVISIONING_STATUS, const.ACTIVE,
                                       CONF.load_balancer.build_interval,
                                       CONF.load_balancer.build_timeout)

        self.assertEqual(pool_name, pool[const.NAME])
        self.assertEqual(pool_description, pool[const.DESCRIPTION])
        self.assertFalse(pool[const.ADMIN_STATE_UP])
        parser.parse(pool[const.CREATED_AT])
        parser.parse(pool[const.UPDATED_AT])
        UUID(pool[const.ID])
        # Operating status for pools will always be offline without members
        self.assertEqual(const.OFFLINE, pool[const.OPERATING_STATUS])
        self.assertEqual(self.protocol, pool[const.PROTOCOL])
        self.assertEqual(1, len(pool[const.LOADBALANCERS]))
        self.assertEqual(self.lb_id, pool[const.LOADBALANCERS][0][const.ID])
        self.assertEmpty(pool[const.LISTENERS])
        self.assertEqual(const.LB_ALGORITHM_ROUND_ROBIN,
                         pool[const.LB_ALGORITHM])
        if self.lb_feature_enabled.session_persistence_enabled:
            self.assertIsNotNone(pool.get(const.SESSION_PERSISTENCE))
            self.assertEqual(const.SESSION_PERSISTENCE_APP_COOKIE,
                             pool[const.SESSION_PERSISTENCE][const.TYPE])
            self.assertEqual(
                pool_sp_cookie_name,
                pool[const.SESSION_PERSISTENCE][const.COOKIE_NAME])

        # Test that a user, without the load balancer member role, cannot
        # use this command
        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
            self.assertRaises(exceptions.Forbidden,
                              self.os_primary.pool_client.update_pool,
                              pool[const.ID],
                              admin_state_up=True)

        # Assert we didn't go into PENDING_*
        pool_check = self.mem_pool_client.show_pool(pool[const.ID])
        self.assertEqual(const.ACTIVE, pool_check[const.PROVISIONING_STATUS])
        self.assertFalse(pool_check[const.ADMIN_STATE_UP])

        # Test that a user, without the load balancer member role, cannot
        # update this pool
        if not CONF.load_balancer.RBAC_test_type == const.NONE:
            member2_client = self.os_roles_lb_member2.pool_client
            self.assertRaises(exceptions.Forbidden,
                              member2_client.update_pool,
                              pool[const.ID],
                              admin_state_up=True)

        # Assert we didn't go into PENDING_*
        pool_check = self.mem_pool_client.show_pool(pool[const.ID])
        self.assertEqual(const.ACTIVE, pool_check[const.PROVISIONING_STATUS])
        self.assertFalse(pool_check[const.ADMIN_STATE_UP])

        new_name = data_utils.rand_name("lb_member_pool1-UPDATED")
        new_description = data_utils.arbitrary_string(size=255,
                                                      base_text='new')
        pool_update_kwargs = {
            const.NAME: new_name,
            const.DESCRIPTION: new_description,
            const.ADMIN_STATE_UP: True,
            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
        }
        if self.lb_feature_enabled.session_persistence_enabled:
            pool_update_kwargs[const.SESSION_PERSISTENCE] = {
                const.TYPE: const.SESSION_PERSISTENCE_HTTP_COOKIE,
            }
        pool = self.mem_pool_client.update_pool(pool[const.ID],
                                                **pool_update_kwargs)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
        pool = waiters.wait_for_status(self.mem_pool_client.show_pool,
                                       pool[const.ID],
                                       const.PROVISIONING_STATUS, const.ACTIVE,
                                       CONF.load_balancer.build_interval,
                                       CONF.load_balancer.build_timeout)

        self.assertEqual(new_name, pool[const.NAME])
        self.assertEqual(new_description, pool[const.DESCRIPTION])
        self.assertTrue(pool[const.ADMIN_STATE_UP])
        self.assertEqual(const.LB_ALGORITHM_ROUND_ROBIN,
                         pool[const.LB_ALGORITHM])
        if self.lb_feature_enabled.session_persistence_enabled:
            self.assertIsNotNone(pool.get(const.SESSION_PERSISTENCE))
            self.assertEqual(const.SESSION_PERSISTENCE_HTTP_COOKIE,
                             pool[const.SESSION_PERSISTENCE][const.TYPE])
            self.assertIsNone(pool[const.SESSION_PERSISTENCE].get(
                const.COOKIE_NAME))

        # Also test removing a Session Persistence
        if self.lb_feature_enabled.session_persistence_enabled:
            pool_update_kwargs = {
                const.SESSION_PERSISTENCE: None,
            }
        pool = self.mem_pool_client.update_pool(pool[const.ID],
                                                **pool_update_kwargs)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
        pool = waiters.wait_for_status(self.mem_pool_client.show_pool,
                                       pool[const.ID],
                                       const.PROVISIONING_STATUS, const.ACTIVE,
                                       CONF.load_balancer.build_interval,
                                       CONF.load_balancer.build_timeout)
        if self.lb_feature_enabled.session_persistence_enabled:
            self.assertIsNone(pool.get(const.SESSION_PERSISTENCE))
コード例 #17
0
    def resource_setup(cls):
        """Setup resources needed by the tests."""
        super(TrafficOperationsScenarioTest, cls).resource_setup()

        lb_name = data_utils.rand_name("lb_member_lb1_operations")
        lb_kwargs = {
            const.PROVIDER: CONF.load_balancer.provider,
            const.NAME: lb_name
        }

        # TODO(rm_work): Make this work with ipv6 and split this test for both
        ip_version = 4
        cls._setup_lb_network_kwargs(lb_kwargs, ip_version)

        lb = cls.mem_lb_client.create_loadbalancer(**lb_kwargs)
        cls.lb_id = lb[const.ID]
        cls.addClassResourceCleanup(cls.mem_lb_client.cleanup_loadbalancer,
                                    cls.lb_id)

        if CONF.validation.connect_method == 'floating':
            port_id = lb[const.VIP_PORT_ID]
            result = cls.lb_mem_float_ip_client.create_floatingip(
                floating_network_id=CONF.network.public_network_id,
                port_id=port_id)
            floating_ip = result['floatingip']
            LOG.info('lb1_floating_ip: {}'.format(floating_ip))
            cls.addClassResourceCleanup(
                waiters.wait_for_not_found,
                cls.lb_mem_float_ip_client.delete_floatingip,
                cls.lb_mem_float_ip_client.show_floatingip,
                floatingip_id=floating_ip['id'])
            cls.lb_vip_address = floating_ip['floating_ip_address']
        else:
            cls.lb_vip_address = lb[const.VIP_ADDRESS]

        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer, cls.lb_id,
                                const.PROVISIONING_STATUS, const.ACTIVE,
                                CONF.load_balancer.lb_build_interval,
                                CONF.load_balancer.lb_build_timeout)

        listener_name = data_utils.rand_name("lb_member_listener1_operations")
        listener_kwargs = {
            const.NAME: listener_name,
            const.PROTOCOL: const.HTTP,
            const.PROTOCOL_PORT: '80',
            const.LOADBALANCER_ID: cls.lb_id,
        }
        listener = cls.mem_listener_client.create_listener(**listener_kwargs)
        cls.listener_id = listener[const.ID]
        cls.addClassResourceCleanup(cls.mem_listener_client.cleanup_listener,
                                    cls.listener_id,
                                    lb_client=cls.mem_lb_client,
                                    lb_id=cls.lb_id)

        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer, cls.lb_id,
                                const.PROVISIONING_STATUS, const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)

        pool_name = data_utils.rand_name("lb_member_pool1_operations")
        pool_kwargs = {
            const.NAME: pool_name,
            const.PROTOCOL: const.HTTP,
            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
            const.LISTENER_ID: cls.listener_id,
        }
        pool = cls.mem_pool_client.create_pool(**pool_kwargs)
        cls.pool_id = pool[const.ID]
        cls.addClassResourceCleanup(cls.mem_pool_client.cleanup_pool,
                                    cls.pool_id,
                                    lb_client=cls.mem_lb_client,
                                    lb_id=cls.lb_id)

        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer, cls.lb_id,
                                const.PROVISIONING_STATUS, const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
コード例 #18
0
    def _test_pool_create(self, has_listener):
        """Tests pool create and basic show APIs.

        * Tests that users without the loadbalancer member role cannot
          create pools.
        * Create a fully populated pool.
        * Show pool details.
        * Validate the show reflects the requested values.
        """
        pool_name = data_utils.rand_name("lb_member_pool1-create")
        pool_description = data_utils.arbitrary_string(size=255)
        pool_sp_cookie_name = 'my_cookie'
        pool_kwargs = {
            const.NAME: pool_name,
            const.DESCRIPTION: pool_description,
            const.ADMIN_STATE_UP: True,
            const.PROTOCOL: self.protocol,
            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
        }
        if self.lb_feature_enabled.session_persistence_enabled:
            pool_kwargs[const.SESSION_PERSISTENCE] = {
                const.TYPE: const.SESSION_PERSISTENCE_APP_COOKIE,
                const.COOKIE_NAME: pool_sp_cookie_name,
            }

        if has_listener:
            pool_kwargs[const.LISTENER_ID] = self.listener_id
        else:
            pool_kwargs[const.LOADBALANCER_ID] = self.lb_id

        # Test that a user without the load balancer role cannot
        # create a pool
        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
            self.assertRaises(exceptions.Forbidden,
                              self.os_primary.pool_client.create_pool,
                              **pool_kwargs)

        pool = self.mem_pool_client.create_pool(**pool_kwargs)
        self.addClassResourceCleanup(self.mem_pool_client.cleanup_pool,
                                     pool[const.ID],
                                     lb_client=self.mem_lb_client,
                                     lb_id=self.lb_id)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
        pool = waiters.wait_for_status(self.mem_pool_client.show_pool,
                                       pool[const.ID],
                                       const.PROVISIONING_STATUS, const.ACTIVE,
                                       CONF.load_balancer.build_interval,
                                       CONF.load_balancer.build_timeout)
        if has_listener and not CONF.load_balancer.test_with_noop:
            pool = waiters.wait_for_status(self.mem_pool_client.show_pool,
                                           pool[const.ID],
                                           const.OPERATING_STATUS,
                                           const.ONLINE,
                                           CONF.load_balancer.build_interval,
                                           CONF.load_balancer.build_timeout)

        self.assertEqual(pool_name, pool[const.NAME])
        self.assertEqual(pool_description, pool[const.DESCRIPTION])
        self.assertTrue(pool[const.ADMIN_STATE_UP])
        parser.parse(pool[const.CREATED_AT])
        parser.parse(pool[const.UPDATED_AT])
        UUID(pool[const.ID])
        # Operating status for a pool without members will be:
        if has_listener and not CONF.load_balancer.test_with_noop:
            # ONLINE if it is attached to a listener and is a live test
            self.assertEqual(const.ONLINE, pool[const.OPERATING_STATUS])
        else:
            # OFFLINE if it is just on the LB directly or is in noop mode
            self.assertEqual(const.OFFLINE, pool[const.OPERATING_STATUS])
        self.assertEqual(self.protocol, pool[const.PROTOCOL])
        self.assertEqual(1, len(pool[const.LOADBALANCERS]))
        self.assertEqual(self.lb_id, pool[const.LOADBALANCERS][0][const.ID])
        if has_listener:
            self.assertEqual(1, len(pool[const.LISTENERS]))
            self.assertEqual(self.listener_id,
                             pool[const.LISTENERS][0][const.ID])
        else:
            self.assertEmpty(pool[const.LISTENERS])
        self.assertEqual(const.LB_ALGORITHM_ROUND_ROBIN,
                         pool[const.LB_ALGORITHM])
        if self.lb_feature_enabled.session_persistence_enabled:
            self.assertIsNotNone(pool.get(const.SESSION_PERSISTENCE))
            self.assertEqual(const.SESSION_PERSISTENCE_APP_COOKIE,
                             pool[const.SESSION_PERSISTENCE][const.TYPE])
            self.assertEqual(
                pool_sp_cookie_name,
                pool[const.SESSION_PERSISTENCE][const.COOKIE_NAME])
コード例 #19
0
    def _test_load_balancer_CRUD(self, ip_version):
        """Tests load balancer create, read, update, delete

        * Create a fully populated load balancer.
        * Show load balancer details.
        * Update the load balancer.
        * Delete the load balancer.
        """
        lb_name = data_utils.rand_name("lb_member_lb1-CRUD")
        lb_description = data_utils.arbitrary_string(size=255)

        lb_kwargs = {
            const.ADMIN_STATE_UP: False,
            const.DESCRIPTION: lb_description,
            const.PROVIDER: CONF.load_balancer.provider,
            const.NAME: lb_name
        }

        if self.lb_admin_flavor_profile_client.is_version_supported(
                self.api_version, '2.6') and self.flavor_id:
            lb_kwargs[const.FLAVOR_ID] = self.flavor_id

        self._setup_lb_network_kwargs(lb_kwargs, ip_version)

        lb = self.mem_lb_client.create_loadbalancer(**lb_kwargs)
        self.addCleanup(self.mem_lb_client.cleanup_loadbalancer, lb[const.ID])

        lb = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                     lb[const.ID], const.PROVISIONING_STATUS,
                                     const.ACTIVE,
                                     CONF.load_balancer.lb_build_interval,
                                     CONF.load_balancer.lb_build_timeout)

        self.assertFalse(lb[const.ADMIN_STATE_UP])
        parser.parse(lb[const.CREATED_AT])
        parser.parse(lb[const.UPDATED_AT])
        self.assertEqual(lb_description, lb[const.DESCRIPTION])
        UUID(lb[const.ID])
        self.assertEqual(lb_name, lb[const.NAME])
        self.assertEqual(const.OFFLINE, lb[const.OPERATING_STATUS])
        self.assertEqual(self.os_roles_lb_member.credentials.project_id,
                         lb[const.PROJECT_ID])
        self.assertEqual(CONF.load_balancer.provider, lb[const.PROVIDER])
        if ip_version == 4:
            self.assertEqual(self.lb_member_vip_net[const.ID],
                             lb[const.VIP_NETWORK_ID])
        else:
            self.assertEqual(self.lb_member_vip_ipv6_net[const.ID],
                             lb[const.VIP_NETWORK_ID])
        self.assertIsNotNone(lb[const.VIP_PORT_ID])
        if lb_kwargs[const.VIP_SUBNET_ID]:
            self.assertEqual(lb_kwargs[const.VIP_SUBNET_ID],
                             lb[const.VIP_SUBNET_ID])

        # Load balancer update
        new_name = data_utils.rand_name("lb_member_lb1-update")
        new_description = data_utils.arbitrary_string(size=255,
                                                      base_text='new')
        lb = self.mem_lb_client.update_loadbalancer(
            lb[const.ID],
            admin_state_up=True,
            description=new_description,
            name=new_name)

        lb = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                     lb[const.ID], const.PROVISIONING_STATUS,
                                     const.ACTIVE,
                                     CONF.load_balancer.lb_build_interval,
                                     CONF.load_balancer.lb_build_timeout)

        self.assertTrue(lb[const.ADMIN_STATE_UP])
        self.assertEqual(new_description, lb[const.DESCRIPTION])
        self.assertEqual(new_name, lb[const.NAME])

        # Load balancer delete
        self.mem_lb_client.delete_loadbalancer(lb[const.ID], cascade=True)

        waiters.wait_for_deleted_status_or_not_found(
            self.mem_lb_client.show_loadbalancer, lb[const.ID],
            const.PROVISIONING_STATUS, CONF.load_balancer.check_interval,
            CONF.load_balancer.check_timeout)
コード例 #20
0
    def test_listener_create_on_same_port(self):
        """Tests listener creation on same port number.

        * Create a first listener.
        * Create a new listener on an existing port, but with a different
          protocol.
        * Create a second listener with the same parameters and ensure that
          an error is triggered.
        * Create a third listener with another protocol over TCP, and ensure
          that it fails.
        """

        # Using listeners on the same port for TCP and UDP was not supported
        # before Train. Use 2.11 API version as reference to detect previous
        # releases and skip the test.
        if not self.mem_listener_client.is_version_supported(
                self.api_version, '2.11'):
            raise self.skipException('TCP and UDP listeners on same port fix '
                                     'is only available on Octavia API '
                                     'version 2.11 or newer.')

        listener_name = data_utils.rand_name("lb_member_listener1-create")

        listener_kwargs = {
            const.NAME: listener_name,
            const.ADMIN_STATE_UP: True,
            const.PROTOCOL: self.protocol,
            const.PROTOCOL_PORT: 8080,
            const.LOADBALANCER_ID: self.lb_id,
            const.CONNECTION_LIMIT: 200
        }

        try:
            listener = self.mem_listener_client.create_listener(
                **listener_kwargs)
        except exceptions.BadRequest as e:
            faultstring = e.resp_body.get('faultstring', '')
            if ("Invalid input for field/attribute protocol." in faultstring
                    and "Value should be one of:" in faultstring):
                raise self.skipException("Skipping unsupported protocol")
            raise e

        self.addClassResourceCleanup(self.mem_listener_client.cleanup_listener,
                                     listener[const.ID],
                                     lb_client=self.mem_lb_client,
                                     lb_id=self.lb_id)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)

        if self.protocol == const.UDP:
            protocol = const.TCP
        else:
            protocol = const.UDP

        # Create a listener on the same port, but with a different protocol
        listener2_name = data_utils.rand_name("lb_member_listener2-create")

        listener2_kwargs = {
            const.NAME: listener2_name,
            const.ADMIN_STATE_UP: True,
            const.PROTOCOL: protocol,
            const.PROTOCOL_PORT: 8080,
            const.LOADBALANCER_ID: self.lb_id,
            const.CONNECTION_LIMIT: 200,
        }

        try:
            listener2 = self.mem_listener_client.create_listener(
                **listener2_kwargs)
        except exceptions.BadRequest as e:
            faultstring = e.resp_body.get('faultstring', '')
            if ("Invalid input for field/attribute protocol." in faultstring
                    and "Value should be one of:" in faultstring):
                raise self.skipException("Skipping unsupported protocol")
            raise e

        self.addClassResourceCleanup(self.mem_listener_client.cleanup_listener,
                                     listener2[const.ID],
                                     lb_client=self.mem_lb_client,
                                     lb_id=self.lb_id)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)

        # Create a listener on the same port, with an already used protocol
        listener3_name = data_utils.rand_name("lb_member_listener3-create")

        listener3_kwargs = {
            const.NAME: listener3_name,
            const.ADMIN_STATE_UP: True,
            const.PROTOCOL: protocol,
            const.PROTOCOL_PORT: 8080,
            const.LOADBALANCER_ID: self.lb_id,
            const.CONNECTION_LIMIT: 200,
        }

        self.assertRaises(exceptions.Conflict,
                          self.mem_listener_client.create_listener,
                          **listener3_kwargs)

        # Create a listener on the same port, with another protocol over TCP,
        # only if layer-7 protocols are enabled
        lb_feature_enabled = CONF.loadbalancer_feature_enabled
        if lb_feature_enabled.l7_protocol_enabled:
            if self.protocol == const.HTTP:
                protocol = const.HTTPS
            else:
                protocol = const.HTTP

            listener4_name = data_utils.rand_name("lb_member_listener4-create")

            listener4_kwargs = {
                const.NAME: listener4_name,
                const.ADMIN_STATE_UP: True,
                const.PROTOCOL: protocol,
                const.PROTOCOL_PORT: 8080,
                const.LOADBALANCER_ID: self.lb_id,
                const.CONNECTION_LIMIT: 200,
            }

            self.assertRaises(exceptions.Conflict,
                              self.mem_listener_client.create_listener,
                              **listener4_kwargs)
コード例 #21
0
    def test_l7policy_list(self):
        """Tests l7policy list API and field filtering.

        * Create a clean listener.
        * Create three l7policies.
        * Validates that other accounts cannot list the l7policies.
        * List the l7policies using the default sort order.
        * List the l7policies using descending sort order.
        * List the l7policies using ascending sort order.
        * List the l7policies returning one field at a time.
        * List the l7policies returning two fields.
        * List the l7policies filtering to one of the three.
        * List the l7policies filtered, one field, and sorted.
        """
        listener_name = data_utils.rand_name(
            "lb_member_listener2_l7policy-list")
        listener_kwargs = {
            const.NAME: listener_name,
            const.PROTOCOL: const.HTTP,
            const.PROTOCOL_PORT: '81',
            const.LOADBALANCER_ID: self.lb_id,
        }
        listener = self.mem_listener_client.create_listener(**listener_kwargs)
        listener_id = listener[const.ID]
        self.addCleanup(self.mem_listener_client.cleanup_listener,
                        listener_id,
                        lb_client=self.mem_lb_client,
                        lb_id=self.lb_id)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)

        l7policy1_name = data_utils.rand_name("lb_member_l7policy2-list")
        l7policy1_desc = 'B'
        l7policy1_kwargs = {
            const.LISTENER_ID: listener_id,
            const.NAME: l7policy1_name,
            const.DESCRIPTION: l7policy1_desc,
            const.ADMIN_STATE_UP: True,
            const.POSITION: 1,
            const.ACTION: const.REJECT
        }
        l7policy1 = self.mem_l7policy_client.create_l7policy(
            **l7policy1_kwargs)
        self.addCleanup(self.mem_l7policy_client.cleanup_l7policy,
                        l7policy1[const.ID],
                        lb_client=self.mem_lb_client,
                        lb_id=self.lb_id)
        l7policy1 = waiters.wait_for_status(
            self.mem_l7policy_client.show_l7policy, l7policy1[const.ID],
            const.PROVISIONING_STATUS, const.ACTIVE,
            CONF.load_balancer.build_interval,
            CONF.load_balancer.build_timeout)
        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
        # Time resolution for created_at is only to the second, and we need to
        # ensure that each object has a distinct creation time. Delaying one
        # second is both a simple and a reliable way to accomplish this.
        time.sleep(1)

        l7policy2_name = data_utils.rand_name("lb_member_l7policy1-list")
        l7policy2_desc = 'A'
        l7policy2_kwargs = {
            const.LISTENER_ID: listener_id,
            const.NAME: l7policy2_name,
            const.DESCRIPTION: l7policy2_desc,
            const.ADMIN_STATE_UP: True,
            const.POSITION: 1,
            const.ACTION: const.REDIRECT_TO_POOL,
            const.REDIRECT_POOL_ID: self.pool_id
        }
        l7policy2 = self.mem_l7policy_client.create_l7policy(
            **l7policy2_kwargs)
        self.addCleanup(self.mem_l7policy_client.cleanup_l7policy,
                        l7policy2[const.ID],
                        lb_client=self.mem_lb_client,
                        lb_id=self.lb_id)
        l7policy2 = waiters.wait_for_status(
            self.mem_l7policy_client.show_l7policy, l7policy2[const.ID],
            const.PROVISIONING_STATUS, const.ACTIVE,
            CONF.load_balancer.build_interval,
            CONF.load_balancer.build_timeout)
        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
        # Time resolution for created_at is only to the second, and we need to
        # ensure that each object has a distinct creation time. Delaying one
        # second is both a simple and a reliable way to accomplish this.
        time.sleep(1)

        l7policy3_name = data_utils.rand_name("lb_member_l7policy3-list")
        l7policy3_desc = 'C'
        l7_redirect_url = 'http://localhost'
        l7policy3_kwargs = {
            const.LISTENER_ID: listener_id,
            const.NAME: l7policy3_name,
            const.DESCRIPTION: l7policy3_desc,
            const.ADMIN_STATE_UP: False,
            const.POSITION: 1,
            const.ACTION: const.REDIRECT_TO_URL,
            const.REDIRECT_URL: l7_redirect_url
        }
        l7policy3 = self.mem_l7policy_client.create_l7policy(
            **l7policy3_kwargs)
        self.addCleanup(self.mem_l7policy_client.cleanup_l7policy,
                        l7policy3[const.ID],
                        lb_client=self.mem_lb_client,
                        lb_id=self.lb_id)
        l7policy3 = waiters.wait_for_status(
            self.mem_l7policy_client.show_l7policy, l7policy3[const.ID],
            const.PROVISIONING_STATUS, const.ACTIVE,
            CONF.load_balancer.build_interval,
            CONF.load_balancer.build_timeout)
        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)

        # Test that a different user cannot list l7policies
        if not CONF.load_balancer.RBAC_test_type == const.NONE:
            member2_client = self.os_roles_lb_member2.l7policy_client
            primary = member2_client.list_l7policies(
                query_params='listener_id={listener_id}'.format(
                    listener_id=listener_id))
            self.assertEqual(0, len(primary))

        # Test that a user without the lb member role cannot list load
        # balancers
        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
            self.assertRaises(exceptions.Forbidden,
                              self.os_primary.l7policy_client.list_l7policies)

        # Check the default sort order, created_at
        l7policies = self.mem_l7policy_client.list_l7policies(
            query_params='listener_id={listener_id}'.format(
                listener_id=listener_id))
        self.assertEqual(l7policy1[const.DESCRIPTION],
                         l7policies[0][const.DESCRIPTION])
        self.assertEqual(l7policy2[const.DESCRIPTION],
                         l7policies[1][const.DESCRIPTION])
        self.assertEqual(l7policy3[const.DESCRIPTION],
                         l7policies[2][const.DESCRIPTION])

        # Test sort descending by description
        l7policies = self.mem_l7policy_client.list_l7policies(
            query_params='listener_id={listener_id}&{sort}={descr}:{desc}'.
            format(listener_id=listener_id,
                   sort=const.SORT,
                   descr=const.DESCRIPTION,
                   desc=const.DESC))
        self.assertEqual(l7policy1[const.DESCRIPTION],
                         l7policies[1][const.DESCRIPTION])
        self.assertEqual(l7policy2[const.DESCRIPTION],
                         l7policies[2][const.DESCRIPTION])
        self.assertEqual(l7policy3[const.DESCRIPTION],
                         l7policies[0][const.DESCRIPTION])

        # Test sort ascending by description
        l7policies = self.mem_l7policy_client.list_l7policies(
            query_params='listener_id={listener_id}&{sort}={descr}:{asc}'.
            format(listener_id=listener_id,
                   sort=const.SORT,
                   descr=const.DESCRIPTION,
                   asc=const.ASC))
        self.assertEqual(l7policy1[const.DESCRIPTION],
                         l7policies[1][const.DESCRIPTION])
        self.assertEqual(l7policy2[const.DESCRIPTION],
                         l7policies[0][const.DESCRIPTION])
        self.assertEqual(l7policy3[const.DESCRIPTION],
                         l7policies[2][const.DESCRIPTION])

        # Use this opportunity to verify the position insertion is working
        l7policies = self.mem_l7policy_client.list_l7policies(
            query_params='listener_id={listener_id}&{sort}={position}:{asc}'.
            format(listener_id=listener_id,
                   sort=const.SORT,
                   position=const.POSITION,
                   asc=const.ASC))
        self.assertEqual(1, l7policies[0][const.POSITION])
        self.assertEqual(2, l7policies[1][const.POSITION])
        self.assertEqual(3, l7policies[2][const.POSITION])
        self.assertEqual(l7policy1[const.NAME], l7policies[2][const.NAME])
        self.assertEqual(l7policy2[const.NAME], l7policies[1][const.NAME])
        self.assertEqual(l7policy3[const.NAME], l7policies[0][const.NAME])

        # Test fields
        for field in const.SHOW_L7POLICY_RESPONSE_FIELDS:
            # Test position / updated fields separately, because they're odd
            if field not in (const.POSITION, const.UPDATED_AT):
                l7policies = self.mem_l7policy_client.list_l7policies(
                    query_params='listener_id={listener_id}&{fields}={field}'.
                    format(listener_id=listener_id,
                           fields=const.FIELDS,
                           field=field))
                self.assertEqual(1, len(l7policies[0]))
                self.assertEqual(l7policy1[field], l7policies[0][field])
                self.assertEqual(l7policy2[field], l7policies[1][field])
                self.assertEqual(l7policy3[field], l7policies[2][field])
            elif field == const.POSITION:
                l7policies = self.mem_l7policy_client.list_l7policies(
                    query_params='listener_id={listener_id}&{fields}={field}'.
                    format(listener_id=listener_id,
                           fields=const.FIELDS,
                           field=field))
                self.assertEqual(1, len(l7policies[0]))
                # Positions won't match the request due to insertion reordering
                self.assertEqual(3, l7policies[0][field])
                self.assertEqual(2, l7policies[1][field])
                self.assertEqual(1, l7policies[2][field])
            elif field == const.UPDATED_AT:
                l7policies = self.mem_l7policy_client.list_l7policies(
                    query_params='listener_id={listener_id}&{fields}={field}'.
                    format(listener_id=listener_id,
                           fields=const.FIELDS,
                           field=field))
                # Just test that we get it -- the actual value is unpredictable
                self.assertEqual(1, len(l7policies[0]))

        # Test multiple fields at the same time
        l7policies = self.mem_l7policy_client.list_l7policies(
            query_params='listener_id={listener_id}&{fields}={admin}&'
            '{fields}={created}'.format(listener_id=listener_id,
                                        fields=const.FIELDS,
                                        admin=const.ADMIN_STATE_UP,
                                        created=const.CREATED_AT))
        self.assertEqual(2, len(l7policies[0]))
        self.assertTrue(l7policies[0][const.ADMIN_STATE_UP])
        parser.parse(l7policies[0][const.CREATED_AT])
        self.assertTrue(l7policies[1][const.ADMIN_STATE_UP])
        parser.parse(l7policies[1][const.CREATED_AT])
        self.assertFalse(l7policies[2][const.ADMIN_STATE_UP])
        parser.parse(l7policies[2][const.CREATED_AT])

        # Test filtering
        l7policies = self.mem_l7policy_client.list_l7policies(
            query_params='listener_id={listener_id}&'
            '{desc}={l7policy_desc}'.format(listener_id=listener_id,
                                            desc=const.DESCRIPTION,
                                            l7policy_desc=l7policy2[
                                                const.DESCRIPTION]))
        self.assertEqual(1, len(l7policies))
        self.assertEqual(l7policy2[const.DESCRIPTION],
                         l7policies[0][const.DESCRIPTION])

        # Test combined params
        l7policies = self.mem_l7policy_client.list_l7policies(
            query_params='listener_id={listener_id}&{admin}={true}&'
            '{fields}={descr}&{fields}={id}&'
            '{sort}={descr}:{desc}'.format(listener_id=listener_id,
                                           admin=const.ADMIN_STATE_UP,
                                           true=const.ADMIN_STATE_UP_TRUE,
                                           fields=const.FIELDS,
                                           descr=const.DESCRIPTION,
                                           id=const.ID,
                                           sort=const.SORT,
                                           desc=const.DESC))
        # Should get two l7policies
        self.assertEqual(2, len(l7policies))
        # l7policies should have two fields
        self.assertEqual(2, len(l7policies[0]))
        # Should be in descending order
        self.assertEqual(l7policy2[const.DESCRIPTION],
                         l7policies[1][const.DESCRIPTION])
        self.assertEqual(l7policy1[const.DESCRIPTION],
                         l7policies[0][const.DESCRIPTION])
コード例 #22
0
    def test_listener_list(self):
        """Tests listener list API and field filtering.

        * Create a clean loadbalancer.
        * Create three listeners.
        * Validates that other accounts cannot list the listeners.
        * List the listeners using the default sort order.
        * List the listeners using descending sort order.
        * List the listeners using ascending sort order.
        * List the listeners returning one field at a time.
        * List the listeners returning two fields.
        * List the listeners filtering to one of the three.
        * List the listeners filtered, one field, and sorted.
        """
        lb_name = data_utils.rand_name("lb_member_lb2_listener-list")
        lb = self.mem_lb_client.create_loadbalancer(
            name=lb_name,
            provider=CONF.load_balancer.provider,
            vip_network_id=self.lb_member_vip_net[const.ID])
        lb_id = lb[const.ID]
        self.addCleanup(self.mem_lb_client.cleanup_loadbalancer, lb_id)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer, lb_id,
                                const.PROVISIONING_STATUS, const.ACTIVE,
                                CONF.load_balancer.lb_build_interval,
                                CONF.load_balancer.lb_build_timeout)

        listener1_name = data_utils.rand_name("lb_member_listener2-list")
        listener1_desc = 'B'
        listener1_kwargs = {
            const.NAME: listener1_name,
            const.DESCRIPTION: listener1_desc,
            const.ADMIN_STATE_UP: True,
            const.PROTOCOL: self.protocol,
            const.PROTOCOL_PORT: 80,
            const.LOADBALANCER_ID: lb_id,
        }
        listener1 = self.mem_listener_client.create_listener(
            **listener1_kwargs)
        self.addCleanup(self.mem_listener_client.cleanup_listener,
                        listener1[const.ID],
                        lb_client=self.mem_lb_client,
                        lb_id=lb_id)
        listener1 = waiters.wait_for_status(
            self.mem_listener_client.show_listener, listener1[const.ID],
            const.PROVISIONING_STATUS, const.ACTIVE,
            CONF.load_balancer.build_interval,
            CONF.load_balancer.build_timeout)
        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer, lb_id,
                                const.PROVISIONING_STATUS, const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
        # Time resolution for created_at is only to the second, and we need to
        # ensure that each object has a distinct creation time. Delaying one
        # second is both a simple and a reliable way to accomplish this.
        time.sleep(1)

        listener2_name = data_utils.rand_name("lb_member_listener1-list")
        listener2_desc = 'A'
        listener2_kwargs = {
            const.NAME: listener2_name,
            const.DESCRIPTION: listener2_desc,
            const.ADMIN_STATE_UP: True,
            const.PROTOCOL: self.protocol,
            const.PROTOCOL_PORT: 81,
            const.LOADBALANCER_ID: lb_id,
        }
        listener2 = self.mem_listener_client.create_listener(
            **listener2_kwargs)
        self.addCleanup(self.mem_listener_client.cleanup_listener,
                        listener2[const.ID],
                        lb_client=self.mem_lb_client,
                        lb_id=lb_id)
        listener2 = waiters.wait_for_status(
            self.mem_listener_client.show_listener, listener2[const.ID],
            const.PROVISIONING_STATUS, const.ACTIVE,
            CONF.load_balancer.build_interval,
            CONF.load_balancer.build_timeout)
        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer, lb_id,
                                const.PROVISIONING_STATUS, const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
        # Time resolution for created_at is only to the second, and we need to
        # ensure that each object has a distinct creation time. Delaying one
        # second is both a simple and a reliable way to accomplish this.
        time.sleep(1)

        listener3_name = data_utils.rand_name("lb_member_listener3-list")
        listener3_desc = 'C'
        listener3_kwargs = {
            const.NAME: listener3_name,
            const.DESCRIPTION: listener3_desc,
            const.ADMIN_STATE_UP: False,
            const.PROTOCOL: self.protocol,
            const.PROTOCOL_PORT: 82,
            const.LOADBALANCER_ID: lb_id,
        }
        listener3 = self.mem_listener_client.create_listener(
            **listener3_kwargs)
        self.addCleanup(self.mem_listener_client.cleanup_listener,
                        listener3[const.ID],
                        lb_client=self.mem_lb_client,
                        lb_id=lb_id)
        listener3 = waiters.wait_for_status(
            self.mem_listener_client.show_listener, listener3[const.ID],
            const.PROVISIONING_STATUS, const.ACTIVE,
            CONF.load_balancer.build_interval,
            CONF.load_balancer.build_timeout)
        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer, lb_id,
                                const.PROVISIONING_STATUS, const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)

        if not CONF.load_balancer.test_with_noop:
            # Wait for the enabled listeners to come ONLINE
            listener1 = waiters.wait_for_status(
                self.mem_listener_client.show_listener, listener1[const.ID],
                const.OPERATING_STATUS, const.ONLINE,
                CONF.load_balancer.build_interval,
                CONF.load_balancer.build_timeout)
            listener2 = waiters.wait_for_status(
                self.mem_listener_client.show_listener, listener2[const.ID],
                const.OPERATING_STATUS, const.ONLINE,
                CONF.load_balancer.build_interval,
                CONF.load_balancer.build_timeout)

        # Test that a different user cannot list listeners
        if not CONF.load_balancer.RBAC_test_type == const.NONE:
            member2_client = self.os_roles_lb_member2.listener_client
            primary = member2_client.list_listeners(
                query_params='loadbalancer_id={lb_id}'.format(lb_id=lb_id))
            self.assertEqual(0, len(primary))

        # Test that a user without the lb member role cannot list load
        # balancers
        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
            self.assertRaises(exceptions.Forbidden,
                              self.os_primary.listener_client.list_listeners)

        # Check the default sort order, created_at
        listeners = self.mem_listener_client.list_listeners(
            query_params='loadbalancer_id={lb_id}'.format(lb_id=lb_id))
        self.assertEqual(listener1[const.DESCRIPTION],
                         listeners[0][const.DESCRIPTION])
        self.assertEqual(listener2[const.DESCRIPTION],
                         listeners[1][const.DESCRIPTION])
        self.assertEqual(listener3[const.DESCRIPTION],
                         listeners[2][const.DESCRIPTION])

        # Test sort descending by description
        listeners = self.mem_listener_client.list_listeners(
            query_params='loadbalancer_id={lb_id}&{sort}={descr}:{desc}'.
            format(lb_id=lb_id,
                   sort=const.SORT,
                   descr=const.DESCRIPTION,
                   desc=const.DESC))
        self.assertEqual(listener1[const.DESCRIPTION],
                         listeners[1][const.DESCRIPTION])
        self.assertEqual(listener2[const.DESCRIPTION],
                         listeners[2][const.DESCRIPTION])
        self.assertEqual(listener3[const.DESCRIPTION],
                         listeners[0][const.DESCRIPTION])

        # Test sort ascending by description
        listeners = self.mem_listener_client.list_listeners(
            query_params='loadbalancer_id={lb_id}&{sort}={descr}:{asc}'.format(
                lb_id=lb_id,
                sort=const.SORT,
                descr=const.DESCRIPTION,
                asc=const.ASC))
        self.assertEqual(listener1[const.DESCRIPTION],
                         listeners[1][const.DESCRIPTION])
        self.assertEqual(listener2[const.DESCRIPTION],
                         listeners[0][const.DESCRIPTION])
        self.assertEqual(listener3[const.DESCRIPTION],
                         listeners[2][const.DESCRIPTION])

        # Test fields
        show_listener_response_fields = const.SHOW_LISTENER_RESPONSE_FIELDS
        if self.mem_listener_client.is_version_supported(
                self.api_version, '2.1'):
            show_listener_response_fields.append('timeout_client_data')
            show_listener_response_fields.append('timeout_member_connect')
            show_listener_response_fields.append('timeout_member_data')
            show_listener_response_fields.append('timeout_tcp_inspect')
        for field in show_listener_response_fields:
            if field in (const.DEFAULT_POOL_ID, const.L7_POLICIES):
                continue
            listeners = self.mem_listener_client.list_listeners(
                query_params='loadbalancer_id={lb_id}&{fields}={field}'.format(
                    lb_id=lb_id, fields=const.FIELDS, field=field))
            self.assertEqual(1, len(listeners[0]))
            self.assertEqual(listener1[field], listeners[0][field])
            self.assertEqual(listener2[field], listeners[1][field])
            self.assertEqual(listener3[field], listeners[2][field])

        # Test multiple fields at the same time
        listeners = self.mem_listener_client.list_listeners(
            query_params='loadbalancer_id={lb_id}&{fields}={admin}&'
            '{fields}={created}'.format(lb_id=lb_id,
                                        fields=const.FIELDS,
                                        admin=const.ADMIN_STATE_UP,
                                        created=const.CREATED_AT))
        self.assertEqual(2, len(listeners[0]))
        self.assertTrue(listeners[0][const.ADMIN_STATE_UP])
        parser.parse(listeners[0][const.CREATED_AT])
        self.assertTrue(listeners[1][const.ADMIN_STATE_UP])
        parser.parse(listeners[1][const.CREATED_AT])
        self.assertFalse(listeners[2][const.ADMIN_STATE_UP])
        parser.parse(listeners[2][const.CREATED_AT])

        # Test filtering
        listeners = self.mem_listener_client.list_listeners(
            query_params='loadbalancer_id={lb_id}&{desc}={lb_desc}'.format(
                lb_id=lb_id,
                desc=const.DESCRIPTION,
                lb_desc=listener2[const.DESCRIPTION]))
        self.assertEqual(1, len(listeners))
        self.assertEqual(listener2[const.DESCRIPTION],
                         listeners[0][const.DESCRIPTION])

        # Test combined params
        listeners = self.mem_listener_client.list_listeners(
            query_params='loadbalancer_id={lb_id}&{admin}={true}&'
            '{fields}={descr}&{fields}={id}&'
            '{sort}={descr}:{desc}'.format(lb_id=lb_id,
                                           admin=const.ADMIN_STATE_UP,
                                           true=const.ADMIN_STATE_UP_TRUE,
                                           fields=const.FIELDS,
                                           descr=const.DESCRIPTION,
                                           id=const.ID,
                                           sort=const.SORT,
                                           desc=const.DESC))
        # Should get two listeners
        self.assertEqual(2, len(listeners))
        # listeners should have two fields
        self.assertEqual(2, len(listeners[0]))
        # Should be in descending order
        self.assertEqual(listener2[const.DESCRIPTION],
                         listeners[1][const.DESCRIPTION])
        self.assertEqual(listener1[const.DESCRIPTION],
                         listeners[0][const.DESCRIPTION])
コード例 #23
0
    def test_l7policy_update(self):
        """Tests l7policy update and show APIs.

        * Create a clean listener.
        * Create a fully populated l7policy.
        * Show l7policy details.
        * Validate the show reflects the initial values.
        * Validates that other accounts cannot update the l7policy.
        * Update the l7policy details.
        * Show l7policy details.
        * Validate the show reflects the updated values.
        """
        listener_name = data_utils.rand_name(
            "lb_member_listener3_l7policy-update")
        listener_kwargs = {
            const.NAME: listener_name,
            const.PROTOCOL: const.HTTP,
            const.PROTOCOL_PORT: '81',
            const.LOADBALANCER_ID: self.lb_id,
        }
        listener = self.mem_listener_client.create_listener(**listener_kwargs)
        listener_id = listener[const.ID]
        self.addCleanup(self.mem_listener_client.cleanup_listener,
                        listener_id,
                        lb_client=self.mem_lb_client,
                        lb_id=self.lb_id)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)

        l7policy_name = data_utils.rand_name("lb_member_l7policy1-update")
        l7policy_description = data_utils.arbitrary_string(size=255)
        l7_redirect_url = 'http://localhost'
        l7policy_kwargs = {
            const.LISTENER_ID: listener_id,
            const.NAME: l7policy_name,
            const.DESCRIPTION: l7policy_description,
            const.ADMIN_STATE_UP: False,
            const.POSITION: 1,
            const.ACTION: const.REDIRECT_TO_URL,
            const.REDIRECT_URL: l7_redirect_url,
        }

        l7policy = self.mem_l7policy_client.create_l7policy(**l7policy_kwargs)
        self.addClassResourceCleanup(self.mem_l7policy_client.cleanup_l7policy,
                                     l7policy[const.ID],
                                     lb_client=self.mem_lb_client,
                                     lb_id=self.lb_id)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
        l7policy = waiters.wait_for_status(
            self.mem_l7policy_client.show_l7policy, l7policy[const.ID],
            const.PROVISIONING_STATUS, const.ACTIVE,
            CONF.load_balancer.build_interval,
            CONF.load_balancer.build_timeout)

        self.assertEqual(l7policy_name, l7policy[const.NAME])
        self.assertEqual(l7policy_description, l7policy[const.DESCRIPTION])
        self.assertFalse(l7policy[const.ADMIN_STATE_UP])
        parser.parse(l7policy[const.CREATED_AT])
        parser.parse(l7policy[const.UPDATED_AT])
        UUID(l7policy[const.ID])
        # Operating status for a l7policy will be ONLINE if it is enabled:
        if l7policy[const.ADMIN_STATE_UP]:
            self.assertEqual(const.ONLINE, l7policy[const.OPERATING_STATUS])
        else:
            self.assertEqual(const.OFFLINE, l7policy[const.OPERATING_STATUS])
        self.assertEqual(listener_id, l7policy[const.LISTENER_ID])
        self.assertEqual(1, l7policy[const.POSITION])
        self.assertEqual(const.REDIRECT_TO_URL, l7policy[const.ACTION])
        self.assertEqual(l7_redirect_url, l7policy[const.REDIRECT_URL])
        self.assertIsNone(l7policy.pop(const.REDIRECT_POOL_ID, None))

        # Test that a user, without the load balancer member role, cannot
        # use this command
        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
            self.assertRaises(exceptions.Forbidden,
                              self.os_primary.l7policy_client.update_l7policy,
                              l7policy[const.ID],
                              admin_state_up=True)

        # Assert we didn't go into PENDING_*
        l7policy_check = self.mem_l7policy_client.show_l7policy(
            l7policy[const.ID])
        self.assertEqual(const.ACTIVE,
                         l7policy_check[const.PROVISIONING_STATUS])
        self.assertFalse(l7policy_check[const.ADMIN_STATE_UP])

        # Test that a user, without the load balancer member role, cannot
        # update this l7policy
        if not CONF.load_balancer.RBAC_test_type == const.NONE:
            member2_client = self.os_roles_lb_member2.l7policy_client
            self.assertRaises(exceptions.Forbidden,
                              member2_client.update_l7policy,
                              l7policy[const.ID],
                              admin_state_up=True)

        # Assert we didn't go into PENDING_*
        l7policy_check = self.mem_l7policy_client.show_l7policy(
            l7policy[const.ID])
        self.assertEqual(const.ACTIVE,
                         l7policy_check[const.PROVISIONING_STATUS])
        self.assertFalse(l7policy_check[const.ADMIN_STATE_UP])

        new_name = data_utils.rand_name("lb_member_l7policy1-UPDATED")
        new_description = data_utils.arbitrary_string(size=255,
                                                      base_text='new')
        l7policy_update_kwargs = {
            const.NAME: new_name,
            const.DESCRIPTION: new_description,
            const.ADMIN_STATE_UP: True,
            const.POSITION: 2,
            const.ACTION: const.REDIRECT_TO_POOL,
            const.REDIRECT_POOL_ID: self.pool_id,
        }
        l7policy = self.mem_l7policy_client.update_l7policy(
            l7policy[const.ID], **l7policy_update_kwargs)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
        l7policy = waiters.wait_for_status(
            self.mem_l7policy_client.show_l7policy, l7policy[const.ID],
            const.PROVISIONING_STATUS, const.ACTIVE,
            CONF.load_balancer.build_interval,
            CONF.load_balancer.build_timeout)
        if not CONF.load_balancer.test_with_noop:
            l7policy = waiters.wait_for_status(
                self.mem_l7policy_client.show_l7policy, l7policy[const.ID],
                const.OPERATING_STATUS, const.ONLINE,
                CONF.load_balancer.build_interval,
                CONF.load_balancer.build_timeout)

        self.assertEqual(new_name, l7policy[const.NAME])
        self.assertEqual(new_description, l7policy[const.DESCRIPTION])
        self.assertTrue(l7policy[const.ADMIN_STATE_UP])
        parser.parse(l7policy[const.CREATED_AT])
        parser.parse(l7policy[const.UPDATED_AT])
        UUID(l7policy[const.ID])
        # Operating status for a l7policy will be ONLINE if it is enabled:
        if l7policy[const.ADMIN_STATE_UP]:
            self.assertEqual(const.ONLINE, l7policy[const.OPERATING_STATUS])
        else:
            self.assertEqual(const.OFFLINE, l7policy[const.OPERATING_STATUS])
        self.assertEqual(listener_id, l7policy[const.LISTENER_ID])
        self.assertEqual(1, l7policy[const.POSITION])
        self.assertEqual(const.REDIRECT_TO_POOL, l7policy[const.ACTION])
        self.assertEqual(self.pool_id, l7policy[const.REDIRECT_POOL_ID])
        self.assertIsNone(l7policy.pop(const.REDIRECT_URL, None))
コード例 #24
0
    def test_listener_create(self):
        """Tests listener create and basic show APIs.

        * Tests that users without the loadbalancer member role cannot
          create listeners.
        * Create a fully populated listener.
        * Show listener details.
        * Validate the show reflects the requested values.
        """
        listener_name = data_utils.rand_name("lb_member_listener1-create")
        listener_description = data_utils.arbitrary_string(size=255)

        listener_kwargs = {
            const.NAME: listener_name,
            const.DESCRIPTION: listener_description,
            const.ADMIN_STATE_UP: True,
            const.PROTOCOL: self.protocol,
            const.PROTOCOL_PORT: 80,
            const.LOADBALANCER_ID: self.lb_id,
            const.CONNECTION_LIMIT: 200,
            const.INSERT_HEADERS: {
                const.X_FORWARDED_FOR: "true",
                const.X_FORWARDED_PORT: "true"
            },
            # Don't test with a default pool -- we'll do that in the scenario,
            # but this will allow us to test that the field isn't mandatory,
            # as well as not conflate pool failures with listener test failures
            # const.DEFAULT_POOL_ID: self.pool_id,

            # TODO(rm_work): need to add TLS related stuff
            # const.DEFAULT_TLS_CONTAINER_REF: '',
            # const.SNI_CONTAINER_REFS: [],
        }
        if self.mem_listener_client.is_version_supported(
                self.api_version, '2.1'):
            listener_kwargs.update({
                const.TIMEOUT_CLIENT_DATA: 1000,
                const.TIMEOUT_MEMBER_CONNECT: 1000,
                const.TIMEOUT_MEMBER_DATA: 1000,
                const.TIMEOUT_TCP_INSPECT: 50,
            })

        # Test that a user without the load balancer role cannot
        # create a listener
        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
            self.assertRaises(exceptions.Forbidden,
                              self.os_primary.listener_client.create_listener,
                              **listener_kwargs)

        listener = self.mem_listener_client.create_listener(**listener_kwargs)
        self.addClassResourceCleanup(self.mem_listener_client.cleanup_listener,
                                     listener[const.ID],
                                     lb_client=self.mem_lb_client,
                                     lb_id=self.lb_id)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
        listener = waiters.wait_for_status(
            self.mem_listener_client.show_listener, listener[const.ID],
            const.PROVISIONING_STATUS, const.ACTIVE,
            CONF.load_balancer.build_interval,
            CONF.load_balancer.build_timeout)
        if not CONF.load_balancer.test_with_noop:
            listener = waiters.wait_for_status(
                self.mem_listener_client.show_listener, listener[const.ID],
                const.OPERATING_STATUS, const.ONLINE,
                CONF.load_balancer.build_interval,
                CONF.load_balancer.build_timeout)

        self.assertEqual(listener_name, listener[const.NAME])
        self.assertEqual(listener_description, listener[const.DESCRIPTION])
        self.assertTrue(listener[const.ADMIN_STATE_UP])
        parser.parse(listener[const.CREATED_AT])
        parser.parse(listener[const.UPDATED_AT])
        UUID(listener[const.ID])
        # Operating status is a measured status, so no-op will not go online
        if CONF.load_balancer.test_with_noop:
            self.assertEqual(const.OFFLINE, listener[const.OPERATING_STATUS])
        else:
            self.assertEqual(const.ONLINE, listener[const.OPERATING_STATUS])
        self.assertEqual(self.protocol, listener[const.PROTOCOL])
        self.assertEqual(80, listener[const.PROTOCOL_PORT])
        self.assertEqual(200, listener[const.CONNECTION_LIMIT])
        insert_headers = listener[const.INSERT_HEADERS]
        self.assertTrue(
            strutils.bool_from_string(insert_headers[const.X_FORWARDED_FOR]))
        self.assertTrue(
            strutils.bool_from_string(insert_headers[const.X_FORWARDED_PORT]))
        if self.mem_listener_client.is_version_supported(
                self.api_version, '2.1'):
            self.assertEqual(1000, listener[const.TIMEOUT_CLIENT_DATA])
            self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_CONNECT])
            self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_DATA])
            self.assertEqual(50, listener[const.TIMEOUT_TCP_INSPECT])
コード例 #25
0
    def test_member_CRUD(self):
        """Tests member create, read, update, delete

        * Create a fully populated member.
        * Show member details.
        * Update the member.
        * Delete the member.
        """
        # Member create
        member_name = data_utils.rand_name("lb_member_member1-CRUD")
        member_kwargs = {
            const.NAME: member_name,
            const.ADMIN_STATE_UP: True,
            const.POOL_ID: self.pool_id,
            const.ADDRESS: '192.0.2.1',
            const.PROTOCOL_PORT: 80,
            const.WEIGHT: 50,
            const.MONITOR_ADDRESS: '192.0.2.2',
            const.MONITOR_PORT: 8080,
        }
        if self.mem_member_client.is_version_supported(self.api_version,
                                                       '2.1'):
            member_kwargs.update({
                const.BACKUP: False,
            })

        if self.lb_member_vip_subnet:
            member_kwargs[const.SUBNET_ID] = self.lb_member_vip_subnet[
                const.ID]

        member = self.mem_member_client.create_member(**member_kwargs)
        self.addCleanup(self.mem_member_client.cleanup_member,
                        member[const.ID],
                        pool_id=self.pool_id,
                        lb_client=self.mem_lb_client,
                        lb_id=self.lb_id)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
        member = waiters.wait_for_status(self.mem_member_client.show_member,
                                         member[const.ID],
                                         const.PROVISIONING_STATUS,
                                         const.ACTIVE,
                                         CONF.load_balancer.build_interval,
                                         CONF.load_balancer.build_timeout,
                                         pool_id=self.pool_id)

        parser.parse(member[const.CREATED_AT])
        parser.parse(member[const.UPDATED_AT])
        UUID(member[const.ID])

        # Members may be in a transitional state initially
        # like DOWN or MAINT, give it some time to stablize on
        # NO_MONITOR. This is LIVE status.
        member = waiters.wait_for_status(self.mem_member_client.show_member,
                                         member[const.ID],
                                         const.OPERATING_STATUS,
                                         const.NO_MONITOR,
                                         CONF.load_balancer.check_interval,
                                         CONF.load_balancer.check_timeout,
                                         pool_id=self.pool_id)

        equal_items = [
            const.NAME, const.ADMIN_STATE_UP, const.ADDRESS,
            const.PROTOCOL_PORT, const.WEIGHT, const.MONITOR_ADDRESS,
            const.MONITOR_PORT
        ]
        if self.mem_member_client.is_version_supported(self.api_version,
                                                       '2.1'):
            equal_items.append(const.BACKUP)

        if const.SUBNET_ID in member_kwargs:
            equal_items.append(const.SUBNET_ID)
        else:
            self.assertIsNone(member.get(const.SUBNET_ID))

        for item in equal_items:
            self.assertEqual(member_kwargs[item], member[item])

        # Member update
        new_name = data_utils.rand_name("lb_member_member1-update")
        member_update_kwargs = {
            const.POOL_ID: member_kwargs[const.POOL_ID],
            const.NAME: new_name,
            const.ADMIN_STATE_UP: not member[const.ADMIN_STATE_UP],
            const.WEIGHT: member[const.WEIGHT] + 1,
            const.MONITOR_ADDRESS: '192.0.2.3',
            const.MONITOR_PORT: member[const.MONITOR_PORT] + 1,
        }
        if self.mem_member_client.is_version_supported(self.api_version,
                                                       '2.1'):
            member_update_kwargs.update({
                const.BACKUP: not member[const.BACKUP],
            })

        member = self.mem_member_client.update_member(member[const.ID],
                                                      **member_update_kwargs)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
        member = waiters.wait_for_status(self.mem_member_client.show_member,
                                         member[const.ID],
                                         const.PROVISIONING_STATUS,
                                         const.ACTIVE,
                                         CONF.load_balancer.build_interval,
                                         CONF.load_balancer.build_timeout,
                                         pool_id=self.pool_id)

        # Test changed items
        equal_items = [
            const.NAME, const.ADMIN_STATE_UP, const.WEIGHT,
            const.MONITOR_ADDRESS, const.MONITOR_PORT
        ]
        if self.mem_member_client.is_version_supported(self.api_version,
                                                       '2.1'):
            equal_items.append(const.BACKUP)

        for item in equal_items:
            self.assertEqual(member_update_kwargs[item], member[item])

        # Test unchanged items
        equal_items = [const.ADDRESS, const.PROTOCOL_PORT]
        if const.SUBNET_ID in member_kwargs:
            equal_items.append(const.SUBNET_ID)
        else:
            self.assertIsNone(member.get(const.SUBNET_ID))

        for item in equal_items:
            self.assertEqual(member_kwargs[item], member[item])

        # Member delete
        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.check_interval,
                                CONF.load_balancer.check_timeout)
        self.mem_member_client.delete_member(member[const.ID],
                                             pool_id=self.pool_id)

        waiters.wait_for_deleted_status_or_not_found(
            self.mem_member_client.show_member,
            member[const.ID],
            const.PROVISIONING_STATUS,
            CONF.load_balancer.check_interval,
            CONF.load_balancer.check_timeout,
            pool_id=self.pool_id)
コード例 #26
0
    def test_listener_update(self):
        """Tests listener update and show APIs.

        * Create a fully populated listener.
        * Show listener details.
        * Validate the show reflects the initial values.
        * Validates that other accounts cannot update the listener.
        * Update the listener details.
        * Show listener details.
        * Validate the show reflects the updated values.
        """
        listener_name = data_utils.rand_name("lb_member_listener1-update")
        listener_description = data_utils.arbitrary_string(size=255)

        listener_kwargs = {
            const.NAME: listener_name,
            const.DESCRIPTION: listener_description,
            const.ADMIN_STATE_UP: False,
            const.PROTOCOL: self.protocol,
            const.PROTOCOL_PORT: 82,
            const.LOADBALANCER_ID: self.lb_id,
            const.CONNECTION_LIMIT: 200,
            const.INSERT_HEADERS: {
                const.X_FORWARDED_FOR: "true",
                const.X_FORWARDED_PORT: "true"
            },
            # TODO(rm_work): need to finish the rest of this stuff
            # const.DEFAULT_POOL_ID: '',
            # const.DEFAULT_TLS_CONTAINER_REF: '',
            # const.SNI_CONTAINER_REFS: [],
        }
        if self.mem_listener_client.is_version_supported(
                self.api_version, '2.1'):
            listener_kwargs.update({
                const.TIMEOUT_CLIENT_DATA: 1000,
                const.TIMEOUT_MEMBER_CONNECT: 1000,
                const.TIMEOUT_MEMBER_DATA: 1000,
                const.TIMEOUT_TCP_INSPECT: 50,
            })

        listener = self.mem_listener_client.create_listener(**listener_kwargs)
        self.addClassResourceCleanup(self.mem_listener_client.cleanup_listener,
                                     listener[const.ID],
                                     lb_client=self.mem_lb_client,
                                     lb_id=self.lb_id)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
        listener = waiters.wait_for_status(
            self.mem_listener_client.show_listener, listener[const.ID],
            const.PROVISIONING_STATUS, const.ACTIVE,
            CONF.load_balancer.build_interval,
            CONF.load_balancer.build_timeout)

        self.assertEqual(listener_name, listener[const.NAME])
        self.assertEqual(listener_description, listener[const.DESCRIPTION])
        self.assertFalse(listener[const.ADMIN_STATE_UP])
        parser.parse(listener[const.CREATED_AT])
        parser.parse(listener[const.UPDATED_AT])
        UUID(listener[const.ID])
        # Operating status will be OFFLINE while admin_state_up = False
        self.assertEqual(const.OFFLINE, listener[const.OPERATING_STATUS])
        self.assertEqual(self.protocol, listener[const.PROTOCOL])
        self.assertEqual(82, listener[const.PROTOCOL_PORT])
        self.assertEqual(200, listener[const.CONNECTION_LIMIT])
        insert_headers = listener[const.INSERT_HEADERS]
        self.assertTrue(
            strutils.bool_from_string(insert_headers[const.X_FORWARDED_FOR]))
        self.assertTrue(
            strutils.bool_from_string(insert_headers[const.X_FORWARDED_PORT]))
        if self.mem_listener_client.is_version_supported(
                self.api_version, '2.1'):
            self.assertEqual(1000, listener[const.TIMEOUT_CLIENT_DATA])
            self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_CONNECT])
            self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_DATA])
            self.assertEqual(50, listener[const.TIMEOUT_TCP_INSPECT])

        # Test that a user, without the load balancer member role, cannot
        # use this command
        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
            self.assertRaises(exceptions.Forbidden,
                              self.os_primary.listener_client.update_listener,
                              listener[const.ID],
                              admin_state_up=True)

        # Assert we didn't go into PENDING_*
        listener_check = self.mem_listener_client.show_listener(
            listener[const.ID])
        self.assertEqual(const.ACTIVE,
                         listener_check[const.PROVISIONING_STATUS])
        self.assertFalse(listener_check[const.ADMIN_STATE_UP])

        # Test that a user, without the load balancer member role, cannot
        # update this listener
        if not CONF.load_balancer.RBAC_test_type == const.NONE:
            member2_client = self.os_roles_lb_member2.listener_client
            self.assertRaises(exceptions.Forbidden,
                              member2_client.update_listener,
                              listener[const.ID],
                              admin_state_up=True)

        # Assert we didn't go into PENDING_*
        listener_check = self.mem_listener_client.show_listener(
            listener[const.ID])
        self.assertEqual(const.ACTIVE,
                         listener_check[const.PROVISIONING_STATUS])
        self.assertFalse(listener_check[const.ADMIN_STATE_UP])

        new_name = data_utils.rand_name("lb_member_listener1-UPDATED")
        new_description = data_utils.arbitrary_string(size=255,
                                                      base_text='new')
        listener_update_kwargs = {
            const.NAME: new_name,
            const.DESCRIPTION: new_description,
            const.ADMIN_STATE_UP: True,
            const.CONNECTION_LIMIT: 400,
            const.INSERT_HEADERS: {
                const.X_FORWARDED_FOR: "false",
                const.X_FORWARDED_PORT: "false"
            },
            # TODO(rm_work): need to finish the rest of this stuff
            # const.DEFAULT_POOL_ID: '',
            # const.DEFAULT_TLS_CONTAINER_REF: '',
            # const.SNI_CONTAINER_REFS: [],
        }
        if self.mem_listener_client.is_version_supported(
                self.api_version, '2.1'):
            listener_update_kwargs.update({
                const.TIMEOUT_CLIENT_DATA: 2000,
                const.TIMEOUT_MEMBER_CONNECT: 2000,
                const.TIMEOUT_MEMBER_DATA: 2000,
                const.TIMEOUT_TCP_INSPECT: 100,
            })

        listener = self.mem_listener_client.update_listener(
            listener[const.ID], **listener_update_kwargs)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
        listener = waiters.wait_for_status(
            self.mem_listener_client.show_listener, listener[const.ID],
            const.PROVISIONING_STATUS, const.ACTIVE,
            CONF.load_balancer.build_interval,
            CONF.load_balancer.build_timeout)
        if not CONF.load_balancer.test_with_noop:
            listener = waiters.wait_for_status(
                self.mem_listener_client.show_listener, listener[const.ID],
                const.OPERATING_STATUS, const.ONLINE,
                CONF.load_balancer.build_interval,
                CONF.load_balancer.build_timeout)

        self.assertEqual(new_name, listener[const.NAME])
        self.assertEqual(new_description, listener[const.DESCRIPTION])
        self.assertTrue(listener[const.ADMIN_STATE_UP])
        # Operating status is a measured status, so no-op will not go online
        if CONF.load_balancer.test_with_noop:
            self.assertEqual(const.OFFLINE, listener[const.OPERATING_STATUS])
        else:
            self.assertEqual(const.ONLINE, listener[const.OPERATING_STATUS])
        self.assertEqual(400, listener[const.CONNECTION_LIMIT])
        insert_headers = listener[const.INSERT_HEADERS]
        self.assertFalse(
            strutils.bool_from_string(insert_headers[const.X_FORWARDED_FOR]))
        self.assertFalse(
            strutils.bool_from_string(insert_headers[const.X_FORWARDED_PORT]))
        if self.mem_listener_client.is_version_supported(
                self.api_version, '2.1'):
            self.assertEqual(2000, listener[const.TIMEOUT_CLIENT_DATA])
            self.assertEqual(2000, listener[const.TIMEOUT_MEMBER_CONNECT])
            self.assertEqual(2000, listener[const.TIMEOUT_MEMBER_DATA])
            self.assertEqual(100, listener[const.TIMEOUT_TCP_INSPECT])
コード例 #27
0
    def test_healthmonitor_list(self):
        """Tests healthmonitor list API and field filtering.

        * Create three clean pools to use for the healthmonitors.
        * Create three healthmonitors.
        * Validates that other accounts cannot list the healthmonitors.
        * List the healthmonitors using the default sort order.
        * List the healthmonitors using descending sort order.
        * List the healthmonitors using ascending sort order.
        * List the healthmonitors returning one field at a time.
        * List the healthmonitors returning two fields.
        * List the healthmonitors filtering to one of the three.
        * List the healthmonitors filtered, one field, and sorted.
        """
        # Get a list of pre-existing HMs to filter from test data
        pretest_hms = self.mem_healthmonitor_client.list_healthmonitors()
        # Store their IDs for easy access
        pretest_hm_ids = [hm['id'] for hm in pretest_hms]

        pool1_name = data_utils.rand_name("lb_member_pool1_hm-list")
        pool1_kwargs = {
            const.NAME: pool1_name,
            const.PROTOCOL: const.HTTP,
            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
            const.LOADBALANCER_ID: self.lb_id,
        }

        pool1 = self.mem_pool_client.create_pool(**pool1_kwargs)
        pool1_id = pool1[const.ID]
        self.addCleanup(self.mem_pool_client.cleanup_pool,
                        pool1_id,
                        lb_client=self.mem_lb_client,
                        lb_id=self.lb_id)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)

        pool2_name = data_utils.rand_name("lb_member_pool2_hm-list")
        pool2_kwargs = {
            const.NAME: pool2_name,
            const.PROTOCOL: const.HTTP,
            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
            const.LOADBALANCER_ID: self.lb_id,
        }

        pool2 = self.mem_pool_client.create_pool(**pool2_kwargs)
        pool2_id = pool2[const.ID]
        self.addCleanup(self.mem_pool_client.cleanup_pool,
                        pool2_id,
                        lb_client=self.mem_lb_client,
                        lb_id=self.lb_id)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)

        pool3_name = data_utils.rand_name("lb_member_pool3_hm-list")
        pool3_kwargs = {
            const.NAME: pool3_name,
            const.PROTOCOL: const.HTTP,
            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
            const.LOADBALANCER_ID: self.lb_id,
        }

        pool3 = self.mem_pool_client.create_pool(**pool3_kwargs)
        pool3_id = pool3[const.ID]
        self.addCleanup(self.mem_pool_client.cleanup_pool,
                        pool3_id,
                        lb_client=self.mem_lb_client,
                        lb_id=self.lb_id)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)

        hm1_name = data_utils.rand_name("lb_member_hm2-list")
        hm1_kwargs = {
            const.POOL_ID: pool1_id,
            const.NAME: hm1_name,
            const.TYPE: const.HEALTH_MONITOR_HTTP,
            const.DELAY: 2,
            const.TIMEOUT: 3,
            const.MAX_RETRIES: 4,
            const.MAX_RETRIES_DOWN: 5,
            const.HTTP_METHOD: const.GET,
            const.URL_PATH: '/B',
            const.EXPECTED_CODES: '200-204',
            const.ADMIN_STATE_UP: True,
        }
        hm1 = self.mem_healthmonitor_client.create_healthmonitor(**hm1_kwargs)
        self.addCleanup(self.mem_healthmonitor_client.cleanup_healthmonitor,
                        hm1[const.ID],
                        lb_client=self.mem_lb_client,
                        lb_id=self.lb_id)
        hm1 = waiters.wait_for_status(
            self.mem_healthmonitor_client.show_healthmonitor, hm1[const.ID],
            const.PROVISIONING_STATUS, const.ACTIVE,
            CONF.load_balancer.build_interval,
            CONF.load_balancer.build_timeout)
        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
        # Time resolution for created_at is only to the second, and we need to
        # ensure that each object has a distinct creation time. Delaying one
        # second is both a simple and a reliable way to accomplish this.
        time.sleep(1)

        hm2_name = data_utils.rand_name("lb_member_hm1-list")
        hm2_kwargs = {
            const.POOL_ID: pool2_id,
            const.NAME: hm2_name,
            const.TYPE: const.HEALTH_MONITOR_HTTP,
            const.DELAY: 2,
            const.TIMEOUT: 3,
            const.MAX_RETRIES: 4,
            const.MAX_RETRIES_DOWN: 5,
            const.HTTP_METHOD: const.GET,
            const.URL_PATH: '/A',
            const.EXPECTED_CODES: '200-204',
            const.ADMIN_STATE_UP: True,
        }
        hm2 = self.mem_healthmonitor_client.create_healthmonitor(**hm2_kwargs)
        self.addCleanup(self.mem_healthmonitor_client.cleanup_healthmonitor,
                        hm2[const.ID],
                        lb_client=self.mem_lb_client,
                        lb_id=self.lb_id)
        hm2 = waiters.wait_for_status(
            self.mem_healthmonitor_client.show_healthmonitor, hm2[const.ID],
            const.PROVISIONING_STATUS, const.ACTIVE,
            CONF.load_balancer.build_interval,
            CONF.load_balancer.build_timeout)
        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
        # Time resolution for created_at is only to the second, and we need to
        # ensure that each object has a distinct creation time. Delaying one
        # second is both a simple and a reliable way to accomplish this.
        time.sleep(1)

        hm3_name = data_utils.rand_name("lb_member_hm3-list")
        hm3_kwargs = {
            const.POOL_ID: pool3_id,
            const.NAME: hm3_name,
            const.TYPE: const.HEALTH_MONITOR_HTTP,
            const.DELAY: 2,
            const.TIMEOUT: 3,
            const.MAX_RETRIES: 4,
            const.MAX_RETRIES_DOWN: 5,
            const.HTTP_METHOD: const.GET,
            const.URL_PATH: '/C',
            const.EXPECTED_CODES: '200-204',
            const.ADMIN_STATE_UP: False,
        }
        hm3 = self.mem_healthmonitor_client.create_healthmonitor(**hm3_kwargs)
        self.addCleanup(self.mem_healthmonitor_client.cleanup_healthmonitor,
                        hm3[const.ID],
                        lb_client=self.mem_lb_client,
                        lb_id=self.lb_id)
        hm3 = waiters.wait_for_status(
            self.mem_healthmonitor_client.show_healthmonitor, hm3[const.ID],
            const.PROVISIONING_STATUS, const.ACTIVE,
            CONF.load_balancer.build_interval,
            CONF.load_balancer.build_timeout)
        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)

        # Test that a different user cannot list healthmonitors
        if not CONF.load_balancer.RBAC_test_type == const.NONE:
            member2_client = self.os_roles_lb_member2.healthmonitor_client
            primary = member2_client.list_healthmonitors(
                query_params='pool_id={pool_id}'.format(pool_id=pool1_id))
            self.assertEqual(0, len(primary))

        # Test that users without the lb member role cannot list healthmonitors
        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
            self.assertRaises(
                exceptions.Forbidden,
                self.os_primary.healthmonitor_client.list_healthmonitors)

        # Check the default sort order, created_at
        hms = self.mem_healthmonitor_client.list_healthmonitors()
        hms = self._filter_hms_by_pool_id(hms, (pool1_id, pool2_id, pool3_id))
        self.assertEqual(hm1[const.URL_PATH], hms[0][const.URL_PATH])
        self.assertEqual(hm2[const.URL_PATH], hms[1][const.URL_PATH])
        self.assertEqual(hm3[const.URL_PATH], hms[2][const.URL_PATH])

        # Test sort descending by description
        hms = self.mem_healthmonitor_client.list_healthmonitors(
            query_params='{sort}={url_path}:{desc}'.format(
                sort=const.SORT, url_path=const.URL_PATH, desc=const.DESC))
        hms = self._filter_hms_by_pool_id(hms, (pool1_id, pool2_id, pool3_id))
        self.assertEqual(hm1[const.URL_PATH], hms[1][const.URL_PATH])
        self.assertEqual(hm2[const.URL_PATH], hms[2][const.URL_PATH])
        self.assertEqual(hm3[const.URL_PATH], hms[0][const.URL_PATH])

        # Test sort ascending by description
        hms = self.mem_healthmonitor_client.list_healthmonitors(
            query_params='{sort}={url_path}:{asc}'.format(
                sort=const.SORT, url_path=const.URL_PATH, asc=const.ASC))
        hms = self._filter_hms_by_pool_id(hms, (pool1_id, pool2_id, pool3_id))
        self.assertEqual(hm1[const.URL_PATH], hms[1][const.URL_PATH])
        self.assertEqual(hm2[const.URL_PATH], hms[0][const.URL_PATH])
        self.assertEqual(hm3[const.URL_PATH], hms[2][const.URL_PATH])

        # Determine indexes of pretest HMs in default sort
        pretest_hm_indexes = []
        hms = self.mem_healthmonitor_client.list_healthmonitors()
        for i, hm in enumerate(hms):
            if hm['id'] in pretest_hm_ids:
                pretest_hm_indexes.append(i)

        # Test fields
        for field in const.SHOW_HEALTHMONITOR_RESPONSE_FIELDS:
            hms = self.mem_healthmonitor_client.list_healthmonitors(
                query_params='{fields}={field}'.format(fields=const.FIELDS,
                                                       field=field))
            hms = self._filter_hms_by_index(hms, pretest_hm_indexes)
            self.assertEqual(1, len(hms[0]))
            self.assertEqual(hm1[field], hms[0][field])
            self.assertEqual(hm2[field], hms[1][field])
            self.assertEqual(hm3[field], hms[2][field])

        # Test multiple fields at the same time
        hms = self.mem_healthmonitor_client.list_healthmonitors(
            query_params='{fields}={admin}&'
            '{fields}={created}&'
            '{fields}={pools}'.format(fields=const.FIELDS,
                                      admin=const.ADMIN_STATE_UP,
                                      created=const.CREATED_AT,
                                      pools=const.POOLS))
        hms = self._filter_hms_by_pool_id(hms, (pool1_id, pool2_id, pool3_id))
        self.assertEqual(3, len(hms[0]))
        self.assertTrue(hms[0][const.ADMIN_STATE_UP])
        parser.parse(hms[0][const.CREATED_AT])
        self.assertTrue(hms[1][const.ADMIN_STATE_UP])
        parser.parse(hms[1][const.CREATED_AT])
        self.assertFalse(hms[2][const.ADMIN_STATE_UP])
        parser.parse(hms[2][const.CREATED_AT])

        # Test filtering
        hms = self.mem_healthmonitor_client.list_healthmonitors(
            query_params='{name}={hm_name}'.format(name=const.NAME,
                                                   hm_name=hm2[const.NAME]))
        self.assertEqual(1, len(hms))
        self.assertEqual(hm2[const.NAME], hms[0][const.NAME])

        # Test combined params
        hms = self.mem_healthmonitor_client.list_healthmonitors(
            query_params='{admin}={true}&'
            '{fields}={name}&{fields}={pools}&'
            '{sort}={name}:{desc}'.format(admin=const.ADMIN_STATE_UP,
                                          true=const.ADMIN_STATE_UP_TRUE,
                                          fields=const.FIELDS,
                                          name=const.NAME,
                                          pools=const.POOLS,
                                          sort=const.SORT,
                                          desc=const.DESC))
        hms = self._filter_hms_by_pool_id(hms, (pool1_id, pool2_id, pool3_id))
        # Should get two healthmonitors
        self.assertEqual(2, len(hms))
        # healthmonitors should have two fields
        self.assertEqual(2, len(hms[0]))
        # Should be in descending order
        self.assertEqual(hm2[const.NAME], hms[1][const.NAME])
        self.assertEqual(hm1[const.NAME], hms[0][const.NAME])
コード例 #28
0
    def test_listener_show_stats(self):
        """Tests listener show statistics API.

        * Create a listener.
        * Validates that other accounts cannot see the stats for the
        *   listener.
        * Show listener statistics.
        * Validate the show reflects the expected values.
        """
        listener_name = data_utils.rand_name("lb_member_listener1-stats")
        listener_description = data_utils.arbitrary_string(size=255)

        listener_kwargs = {
            const.NAME: listener_name,
            const.DESCRIPTION: listener_description,
            const.ADMIN_STATE_UP: True,
            const.PROTOCOL: const.HTTP,
            const.PROTOCOL_PORT: 84,
            const.LOADBALANCER_ID: self.lb_id,
            const.CONNECTION_LIMIT: 200,
        }

        listener = self.mem_listener_client.create_listener(**listener_kwargs)
        self.addCleanup(self.mem_listener_client.cleanup_listener,
                        listener[const.ID],
                        lb_client=self.mem_lb_client,
                        lb_id=self.lb_id)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
        listener = waiters.wait_for_status(
            self.mem_listener_client.show_listener, listener[const.ID],
            const.PROVISIONING_STATUS, const.ACTIVE,
            CONF.load_balancer.build_interval,
            CONF.load_balancer.build_timeout)
        if not CONF.load_balancer.test_with_noop:
            listener = waiters.wait_for_status(
                self.mem_listener_client.show_listener, listener[const.ID],
                const.OPERATING_STATUS, const.ONLINE,
                CONF.load_balancer.build_interval,
                CONF.load_balancer.build_timeout)

        # Test that a user, without the load balancer member role, cannot
        # use this command
        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
            self.assertRaises(
                exceptions.Forbidden,
                self.os_primary.listener_client.get_listener_stats,
                listener[const.ID])

        # Test that a different user, with the load balancer role, cannot see
        # the listener stats
        if not CONF.load_balancer.RBAC_test_type == const.NONE:
            member2_client = self.os_roles_lb_member2.listener_client
            self.assertRaises(exceptions.Forbidden,
                              member2_client.get_listener_stats,
                              listener[const.ID])

        stats = self.mem_listener_client.get_listener_stats(listener[const.ID])

        self.assertEqual(5, len(stats))
        self.assertEqual(0, stats[const.ACTIVE_CONNECTIONS])
        self.assertEqual(0, stats[const.BYTES_IN])
        self.assertEqual(0, stats[const.BYTES_OUT])
        self.assertEqual(0, stats[const.REQUEST_ERRORS])
        self.assertEqual(0, stats[const.TOTAL_CONNECTIONS])
コード例 #29
0
    def test_healthmonitor_update(self):
        """Tests healthmonitor update and show APIs.

        * Create a clean pool to use for the healthmonitor.
        * Create a fully populated healthmonitor.
        * Show healthmonitor details.
        * Validate the show reflects the initial values.
        * Validates that other accounts cannot update the healthmonitor.
        * Update the healthmonitor details.
        * Show healthmonitor details.
        * Validate the show reflects the updated values.
        """
        pool_name = data_utils.rand_name("lb_member_pool1_hm-update")
        pool_kwargs = {
            const.NAME: pool_name,
            const.PROTOCOL: const.HTTP,
            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
            const.LOADBALANCER_ID: self.lb_id,
        }

        pool = self.mem_pool_client.create_pool(**pool_kwargs)
        self.addCleanup(self.mem_pool_client.cleanup_pool,
                        pool[const.ID],
                        lb_client=self.mem_lb_client,
                        lb_id=self.lb_id)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)

        hm_name = data_utils.rand_name("lb_member_hm1-update")
        hm_kwargs = {
            const.POOL_ID: pool[const.ID],
            const.NAME: hm_name,
            const.TYPE: const.HEALTH_MONITOR_HTTP,
            const.DELAY: 2,
            const.TIMEOUT: 3,
            const.MAX_RETRIES: 4,
            const.MAX_RETRIES_DOWN: 5,
            const.HTTP_METHOD: const.GET,
            const.URL_PATH: '/',
            const.EXPECTED_CODES: '200-204',
            const.ADMIN_STATE_UP: False,
        }

        hm = self.mem_healthmonitor_client.create_healthmonitor(**hm_kwargs)
        self.addCleanup(self.mem_healthmonitor_client.cleanup_healthmonitor,
                        hm[const.ID],
                        lb_client=self.mem_lb_client,
                        lb_id=self.lb_id)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
        hm = waiters.wait_for_status(
            self.mem_healthmonitor_client.show_healthmonitor, hm[const.ID],
            const.PROVISIONING_STATUS, const.ACTIVE,
            CONF.load_balancer.build_interval,
            CONF.load_balancer.build_timeout)

        parser.parse(hm[const.CREATED_AT])
        parser.parse(hm[const.UPDATED_AT])
        UUID(hm[const.ID])

        # Healthmonitors are ONLINE if admin_state_up = True, else OFFLINE
        if hm_kwargs[const.ADMIN_STATE_UP]:
            self.assertEqual(const.ONLINE, hm[const.OPERATING_STATUS])
        else:
            self.assertEqual(const.OFFLINE, hm[const.OPERATING_STATUS])

        equal_items = [
            const.NAME, const.TYPE, const.DELAY, const.TIMEOUT,
            const.MAX_RETRIES, const.MAX_RETRIES_DOWN, const.HTTP_METHOD,
            const.URL_PATH, const.EXPECTED_CODES, const.ADMIN_STATE_UP
        ]

        for item in equal_items:
            self.assertEqual(hm_kwargs[item], hm[item])

        # Test that a user, without the loadbalancer member role, cannot
        # use this command
        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
            self.assertRaises(
                exceptions.Forbidden,
                self.os_primary.healthmonitor_client.update_healthmonitor,
                hm[const.ID],
                admin_state_up=True)

        # Assert we didn't go into PENDING_*
        hm_check = self.mem_healthmonitor_client.show_healthmonitor(
            hm[const.ID])
        self.assertEqual(const.ACTIVE, hm_check[const.PROVISIONING_STATUS])
        self.assertFalse(hm_check[const.ADMIN_STATE_UP])

        # Test that a user, without the loadbalancer member role, cannot
        # update this healthmonitor
        if not CONF.load_balancer.RBAC_test_type == const.NONE:
            member2_client = self.os_roles_lb_member2.healthmonitor_client
            self.assertRaises(exceptions.Forbidden,
                              member2_client.update_healthmonitor,
                              hm[const.ID],
                              admin_state_up=True)

        # Assert we didn't go into PENDING_*
        hm_check = self.mem_healthmonitor_client.show_healthmonitor(
            hm[const.ID])
        self.assertEqual(const.ACTIVE, hm_check[const.PROVISIONING_STATUS])
        self.assertFalse(hm_check[const.ADMIN_STATE_UP])

        new_name = data_utils.rand_name("lb_member_hm1-UPDATED")
        hm_update_kwargs = {
            const.NAME: new_name,
            const.DELAY: hm_kwargs[const.DELAY] + 1,
            const.TIMEOUT: hm_kwargs[const.TIMEOUT] + 1,
            const.MAX_RETRIES: hm_kwargs[const.MAX_RETRIES] + 1,
            const.MAX_RETRIES_DOWN: hm_kwargs[const.MAX_RETRIES_DOWN] + 1,
            const.HTTP_METHOD: const.POST,
            const.URL_PATH: '/test',
            const.EXPECTED_CODES: '201,202',
            const.ADMIN_STATE_UP: not hm_kwargs[const.ADMIN_STATE_UP],
        }
        hm = self.mem_healthmonitor_client.update_healthmonitor(
            hm[const.ID], **hm_update_kwargs)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
        hm = waiters.wait_for_status(
            self.mem_healthmonitor_client.show_healthmonitor, hm[const.ID],
            const.PROVISIONING_STATUS, const.ACTIVE,
            CONF.load_balancer.build_interval,
            CONF.load_balancer.build_timeout)

        # Healthmonitors are ONLINE if admin_state_up = True, else OFFLINE
        if hm_update_kwargs[const.ADMIN_STATE_UP]:
            self.assertEqual(const.ONLINE, hm[const.OPERATING_STATUS])
        else:
            self.assertEqual(const.OFFLINE, hm[const.OPERATING_STATUS])

        # Test changed items
        equal_items = [
            const.NAME, const.DELAY, const.TIMEOUT, const.MAX_RETRIES,
            const.MAX_RETRIES_DOWN, const.HTTP_METHOD, const.URL_PATH,
            const.EXPECTED_CODES, const.ADMIN_STATE_UP
        ]

        for item in equal_items:
            self.assertEqual(hm_update_kwargs[item], hm[item])

        # Test unchanged items
        equal_items = [const.TYPE]
        for item in equal_items:
            self.assertEqual(hm_kwargs[item], hm[item])
コード例 #30
0
    def resource_setup(cls):
        """Setup resources needed by the tests."""
        super(TLSWithBarbicanTest, cls).resource_setup()

        # Create a CA self-signed cert and key
        cls.ca_cert, ca_key = cert_utils.generate_ca_cert_and_key()

        LOG.debug('CA Cert: %s' %
                  cls.ca_cert.public_bytes(serialization.Encoding.PEM))
        LOG.debug('CA private Key: %s' % ca_key.private_bytes(
            encoding=serialization.Encoding.PEM,
            format=serialization.PrivateFormat.TraditionalOpenSSL,
            encryption_algorithm=serialization.NoEncryption()))
        LOG.debug('CA public Key: %s' % ca_key.public_key().public_bytes(
            encoding=serialization.Encoding.PEM,
            format=serialization.PublicFormat.SubjectPublicKeyInfo))

        # Create a server cert and key
        cls.server_uuid = uuidutils.generate_uuid()
        server_cert, server_key = cert_utils.generate_server_cert_and_key(
            cls.ca_cert, ca_key, cls.server_uuid)

        LOG.debug('Server Cert: %s' %
                  server_cert.public_bytes(serialization.Encoding.PEM))
        LOG.debug('Server private Key: %s' % server_key.private_bytes(
            encoding=serialization.Encoding.PEM,
            format=serialization.PrivateFormat.TraditionalOpenSSL,
            encryption_algorithm=serialization.NoEncryption()))
        server_public_key = server_key.public_key()
        LOG.debug('Server public Key: %s' % server_public_key.public_bytes(
            encoding=serialization.Encoding.PEM,
            format=serialization.PublicFormat.SubjectPublicKeyInfo))

        # Create the pkcs12 bundle
        pkcs12 = cert_utils.generate_pkcs12_bundle(server_cert, server_key)
        LOG.debug('Server PKCS12 bundle: %s' % base64.b64encode(pkcs12))

        # Load the secret into the barbican service under the
        # os_roles_lb_member tenant
        barbican_mgr = barbican_client_mgr.BarbicanClientManager(
            cls.os_roles_lb_member)

        cls.secret_ref = barbican_mgr.store_secret(pkcs12)
        cls.addClassResourceCleanup(barbican_mgr.delete_secret, cls.secret_ref)

        # Set the barbican ACL if the Octavia API version doesn't do it
        # automatically.
        if not cls.mem_lb_client.is_version_supported(cls.api_version, '2.1'):
            user_list = cls.os_admin.users_v3_client.list_users(
                name=CONF.load_balancer.octavia_svc_username)
            msg = 'Only one user named "{0}" should exist, {1} found.'.format(
                CONF.load_balancer.octavia_svc_username,
                len(user_list['users']))
            assert 1 == len(user_list['users']), msg
            barbican_mgr.add_acl(cls.secret_ref, user_list['users'][0]['id'])

        # Setup a load balancer for the tests to use
        lb_name = data_utils.rand_name("lb_member_lb1-tls")
        lb_kwargs = {
            const.PROVIDER: CONF.load_balancer.provider,
            const.NAME: lb_name
        }

        # TODO(johnsom) Update for IPv6
        cls._setup_lb_network_kwargs(lb_kwargs, 4)

        lb = cls.mem_lb_client.create_loadbalancer(**lb_kwargs)
        cls.lb_id = lb[const.ID]
        cls.addClassResourceCleanup(cls.mem_lb_client.cleanup_loadbalancer,
                                    cls.lb_id)

        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer, cls.lb_id,
                                const.PROVISIONING_STATUS, const.ACTIVE,
                                CONF.load_balancer.lb_build_interval,
                                CONF.load_balancer.lb_build_timeout)

        if CONF.validation.connect_method == 'floating':
            port_id = lb[const.VIP_PORT_ID]
            result = cls.lb_mem_float_ip_client.create_floatingip(
                floating_network_id=CONF.network.public_network_id,
                port_id=port_id)
            floating_ip = result['floatingip']
            LOG.info('lb1_floating_ip: {}'.format(floating_ip))
            cls.addClassResourceCleanup(
                waiters.wait_for_not_found,
                cls.lb_mem_float_ip_client.delete_floatingip,
                cls.lb_mem_float_ip_client.show_floatingip,
                floatingip_id=floating_ip['id'])
            cls.lb_vip_address = floating_ip['floating_ip_address']
        else:
            cls.lb_vip_address = lb[const.VIP_ADDRESS]

        pool_name = data_utils.rand_name("lb_member_pool1-tls")
        pool_kwargs = {
            const.NAME: pool_name,
            const.PROTOCOL: const.HTTP,
            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
            const.LOADBALANCER_ID: cls.lb_id,
        }
        pool = cls.mem_pool_client.create_pool(**pool_kwargs)
        cls.pool_id = pool[const.ID]
        cls.addClassResourceCleanup(cls.mem_pool_client.cleanup_pool,
                                    cls.pool_id,
                                    lb_client=cls.mem_lb_client,
                                    lb_id=cls.lb_id)

        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer, cls.lb_id,
                                const.PROVISIONING_STATUS, const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)

        # Set up Member 1 for Webserver 1
        member1_name = data_utils.rand_name("lb_member_member1-tls")
        member1_kwargs = {
            const.POOL_ID: cls.pool_id,
            const.NAME: member1_name,
            const.ADMIN_STATE_UP: True,
            const.ADDRESS: cls.webserver1_ip,
            const.PROTOCOL_PORT: 80,
        }
        if cls.lb_member_1_subnet:
            member1_kwargs[const.SUBNET_ID] = cls.lb_member_1_subnet[const.ID]

        member1 = cls.mem_member_client.create_member(**member1_kwargs)
        cls.addClassResourceCleanup(cls.mem_member_client.cleanup_member,
                                    member1[const.ID],
                                    pool_id=cls.pool_id,
                                    lb_client=cls.mem_lb_client,
                                    lb_id=cls.lb_id)
        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer, cls.lb_id,
                                const.PROVISIONING_STATUS, const.ACTIVE,
                                CONF.load_balancer.check_interval,
                                CONF.load_balancer.check_timeout)

        # Set up Member 2 for Webserver 2
        member2_name = data_utils.rand_name("lb_member_member2-tls")
        member2_kwargs = {
            const.POOL_ID: cls.pool_id,
            const.NAME: member2_name,
            const.ADMIN_STATE_UP: True,
            const.ADDRESS: cls.webserver2_ip,
            const.PROTOCOL_PORT: 80,
        }
        if cls.lb_member_2_subnet:
            member2_kwargs[const.SUBNET_ID] = cls.lb_member_2_subnet[const.ID]

        member2 = cls.mem_member_client.create_member(**member2_kwargs)
        cls.addClassResourceCleanup(cls.mem_member_client.cleanup_member,
                                    member2[const.ID],
                                    pool_id=cls.pool_id,
                                    lb_client=cls.mem_lb_client,
                                    lb_id=cls.lb_id)
        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer, cls.lb_id,
                                const.PROVISIONING_STATUS, const.ACTIVE,
                                CONF.load_balancer.check_interval,
                                CONF.load_balancer.check_timeout)