def test_load_balancer_show_stats(self):
        """Tests load balancer show statistics API.

        * Create a load balancer.
        * Validates that other accounts cannot see the stats for the
        *   load balancer.
        * Show load balancer statistics.
        * Validate the show reflects the expected values.
        """
        lb_name = data_utils.rand_name("lb_member_lb1-show_stats")
        lb = self.mem_lb_client.create_loadbalancer(
            name=lb_name,
            provider=CONF.load_balancer.provider,
            vip_network_id=self.lb_member_vip_net[const.ID])
        self.addClassResourceCleanup(self.mem_lb_client.cleanup_loadbalancer,
                                     lb[const.ID])

        lb = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                     lb[const.ID], const.PROVISIONING_STATUS,
                                     const.ACTIVE,
                                     CONF.load_balancer.lb_build_interval,
                                     CONF.load_balancer.lb_build_timeout)

        # Test that a user, without the load balancer member role, cannot
        # use this command
        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
            self.assertRaises(
                exceptions.Forbidden,
                self.os_primary.loadbalancer_client.get_loadbalancer_stats,
                lb[const.ID])

        # Test that a different user, with the load balancer role, cannot see
        # the load balancer stats
        if not CONF.load_balancer.RBAC_test_type == const.NONE:
            member2_client = self.os_roles_lb_member2.loadbalancer_client
            self.assertRaises(exceptions.Forbidden,
                              member2_client.get_loadbalancer_stats,
                              lb[const.ID])

        stats = self.mem_lb_client.get_loadbalancer_stats(lb[const.ID])

        self.assertEqual(5, len(stats))
        self.assertEqual(0, stats[const.ACTIVE_CONNECTIONS])
        self.assertEqual(0, stats[const.BYTES_IN])
        self.assertEqual(0, stats[const.BYTES_OUT])
        self.assertEqual(0, stats[const.REQUEST_ERRORS])
        self.assertEqual(0, stats[const.TOTAL_CONNECTIONS])

        # Attempt to clean up so that one full test run doesn't start 10+
        # amps before the cleanup phase fires
        try:
            self.mem_lb_client.delete_loadbalancer(lb[const.ID])

            waiters.wait_for_deleted_status_or_not_found(
                self.mem_lb_client.show_loadbalancer, lb[const.ID],
                const.PROVISIONING_STATUS,
                CONF.load_balancer.lb_build_interval,
                CONF.load_balancer.lb_build_timeout)
        except Exception:
            pass
Esempio n. 2
0
    def test_pool_delete(self):
        """Tests pool create and delete APIs.

        * Creates a pool.
        * Validates that other accounts cannot delete the pool
        * Deletes the pool.
        * Validates the pool is in the DELETED state.
        """
        pool_name = data_utils.rand_name("lb_member_pool1-delete")
        pool_sp_cookie_name = 'my_cookie'
        pool_kwargs = {
            const.NAME: pool_name,
            const.PROTOCOL: self.protocol,
            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
            const.LOADBALANCER_ID: self.lb_id,
        }
        if self.lb_feature_enabled.session_persistence_enabled:
            pool_kwargs[const.SESSION_PERSISTENCE] = {
                const.TYPE: const.SESSION_PERSISTENCE_APP_COOKIE,
                const.COOKIE_NAME: pool_sp_cookie_name,
            }
        pool = self.mem_pool_client.create_pool(**pool_kwargs)
        self.addClassResourceCleanup(self.mem_pool_client.cleanup_pool,
                                     pool[const.ID],
                                     lb_client=self.mem_lb_client,
                                     lb_id=self.lb_id)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)

        # Test that a user without the load balancer role cannot
        # delete this pool
        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
            self.assertRaises(exceptions.Forbidden,
                              self.os_primary.pool_client.delete_pool,
                              pool[const.ID])

        # Test that a different user, with the load balancer member role
        # cannot delete this pool
        if not CONF.load_balancer.RBAC_test_type == const.NONE:
            member2_client = self.os_roles_lb_member2.pool_client
            self.assertRaises(exceptions.Forbidden, member2_client.delete_pool,
                              pool[const.ID])

        self.mem_pool_client.delete_pool(pool[const.ID])

        waiters.wait_for_deleted_status_or_not_found(
            self.mem_pool_client.show_pool, pool[const.ID],
            const.PROVISIONING_STATUS, CONF.load_balancer.check_interval,
            CONF.load_balancer.check_timeout)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.check_interval,
                                CONF.load_balancer.check_timeout)
    def test_listener_delete(self):
        """Tests listener create and delete APIs.

        * Creates a listener.
        * Validates that other accounts cannot delete the listener
        * Deletes the listener.
        * Validates the listener is in the DELETED state.
        """
        listener_name = data_utils.rand_name("lb_member_listener1-delete")

        listener_kwargs = {
            const.NAME: listener_name,
            const.PROTOCOL: self.protocol,
            const.PROTOCOL_PORT: 83,
            const.LOADBALANCER_ID: self.lb_id,
        }
        listener = self.mem_listener_client.create_listener(**listener_kwargs)
        self.addClassResourceCleanup(self.mem_listener_client.cleanup_listener,
                                     listener[const.ID],
                                     lb_client=self.mem_lb_client,
                                     lb_id=self.lb_id)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)

        # Test that a user without the load balancer role cannot
        # delete this listener
        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
            self.assertRaises(exceptions.Forbidden,
                              self.os_primary.listener_client.delete_listener,
                              listener[const.ID])

        # Test that a different user, with the load balancer member role
        # cannot delete this listener
        if not CONF.load_balancer.RBAC_test_type == const.NONE:
            member2_client = self.os_roles_lb_member2.listener_client
            self.assertRaises(exceptions.Forbidden,
                              member2_client.delete_listener,
                              listener[const.ID])

        self.mem_listener_client.delete_listener(listener[const.ID])

        waiters.wait_for_deleted_status_or_not_found(
            self.mem_listener_client.show_listener, listener[const.ID],
            const.PROVISIONING_STATUS, CONF.load_balancer.check_interval,
            CONF.load_balancer.check_timeout)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.check_interval,
                                CONF.load_balancer.check_timeout)
    def test_load_balancer_failover(self):
        """Tests load balancer failover API.

        * Create a load balancer.
        * Validates that other accounts cannot failover the load balancer
        * Wait for the load balancer to go ACTIVE.
        * Failover the load balancer.
        * Wait for the load balancer to go ACTIVE.
        """
        lb_name = data_utils.rand_name("lb_member_lb1-failover")
        lb = self.mem_lb_client.create_loadbalancer(
            name=lb_name, vip_network_id=self.lb_member_vip_net[const.ID])
        self.addClassResourceCleanup(self.mem_lb_client.cleanup_loadbalancer,
                                     lb[const.ID])

        lb = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                     lb[const.ID], const.PROVISIONING_STATUS,
                                     const.ACTIVE,
                                     CONF.load_balancer.lb_build_interval,
                                     CONF.load_balancer.lb_build_timeout)

        # Test RBAC not authorized for non-admin role
        if not CONF.load_balancer.RBAC_test_type == const.NONE:
            self.assertRaises(exceptions.Forbidden,
                              self.mem_lb_client.failover_loadbalancer,
                              lb[const.ID])

        # Assert we didn't go into PENDING_*
        lb = self.mem_lb_client.show_loadbalancer(lb[const.ID])
        self.assertEqual(const.ACTIVE, lb[const.PROVISIONING_STATUS])

        self.os_roles_lb_admin.loadbalancer_client.failover_loadbalancer(
            lb[const.ID])

        lb = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                     lb[const.ID], const.PROVISIONING_STATUS,
                                     const.ACTIVE,
                                     CONF.load_balancer.lb_build_interval,
                                     CONF.load_balancer.lb_build_timeout)
        # TODO(johnsom) Assert the amphora ID has changed when amp client
        #               is available.

        # Attempt to clean up so that one full test run doesn't start 10+
        # amps before the cleanup phase fires
        try:
            self.mem_lb_client.delete_loadbalancer(lb[const.ID])

            waiters.wait_for_deleted_status_or_not_found(
                self.mem_lb_client.show_loadbalancer, lb[const.ID],
                const.PROVISIONING_STATUS,
                CONF.load_balancer.lb_build_interval,
                CONF.load_balancer.lb_build_timeout)
        except Exception:
            pass
    def test_l7policy_delete(self):
        """Tests l7policy create and delete APIs.

        * Creates a l7policy.
        * Validates that other accounts cannot delete the l7policy
        * Deletes the l7policy.
        * Validates the l7policy is in the DELETED state.
        """
        l7policy_name = data_utils.rand_name("lb_member_l7policy1-delete")
        l7policy_kwargs = {
            const.LISTENER_ID: self.listener_id,
            const.NAME: l7policy_name,
            const.ACTION: const.REJECT,
        }
        l7policy = self.mem_l7policy_client.create_l7policy(**l7policy_kwargs)
        self.addClassResourceCleanup(self.mem_l7policy_client.cleanup_l7policy,
                                     l7policy[const.ID],
                                     lb_client=self.mem_lb_client,
                                     lb_id=self.lb_id)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)

        # Test that a user without the load balancer role cannot
        # delete this l7policy
        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
            self.assertRaises(exceptions.Forbidden,
                              self.os_primary.l7policy_client.delete_l7policy,
                              l7policy[const.ID])

        # Test that a different user, with the load balancer member role
        # cannot delete this l7policy
        if not CONF.load_balancer.RBAC_test_type == const.NONE:
            member2_client = self.os_roles_lb_member2.l7policy_client
            self.assertRaises(exceptions.Forbidden,
                              member2_client.delete_l7policy,
                              l7policy[const.ID])

        self.mem_l7policy_client.delete_l7policy(l7policy[const.ID])

        waiters.wait_for_deleted_status_or_not_found(
            self.mem_l7policy_client.show_l7policy, l7policy[const.ID],
            const.PROVISIONING_STATUS, CONF.load_balancer.check_interval,
            CONF.load_balancer.check_timeout)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.check_interval,
                                CONF.load_balancer.check_timeout)
    def test_load_balancer_delete_cascade(self):
        """Tests load balancer create and cascade delete APIs.

        * Creates a load balancer.
        * Validates that other accounts cannot delete the load balancer
        * Deletes the load balancer with the cascade parameter.
        * Validates the load balancer is in the DELETED state.
        """
        lb_name = data_utils.rand_name("lb_member_lb1-cascade_delete")
        lb = self.mem_lb_client.create_loadbalancer(
            name=lb_name,
            provider=CONF.load_balancer.provider,
            vip_network_id=self.lb_member_vip_net[const.ID])
        self.addClassResourceCleanup(self.mem_lb_client.cleanup_loadbalancer,
                                     lb[const.ID])

        lb = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                     lb[const.ID], const.PROVISIONING_STATUS,
                                     const.ACTIVE,
                                     CONF.load_balancer.lb_build_interval,
                                     CONF.load_balancer.lb_build_timeout)

        # TODO(johnsom) Add other objects when we have clients for them

        # Test that a user without the load balancer role cannot
        # delete this load balancer
        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
            self.assertRaises(
                exceptions.Forbidden,
                self.os_primary.loadbalancer_client.delete_loadbalancer,
                lb[const.ID],
                cascade=True)

        # Test that a different user, with the load balancer member role
        # cannot delete this load balancer
        if not CONF.load_balancer.RBAC_test_type == const.NONE:
            member2_client = self.os_roles_lb_member2.loadbalancer_client
            self.assertRaises(exceptions.Forbidden,
                              member2_client.delete_loadbalancer,
                              lb[const.ID],
                              cascade=True)

        self.mem_lb_client.delete_loadbalancer(lb[const.ID], cascade=True)

        waiters.wait_for_deleted_status_or_not_found(
            self.mem_lb_client.show_loadbalancer, lb[const.ID],
            const.PROVISIONING_STATUS, CONF.load_balancer.lb_build_interval,
            CONF.load_balancer.lb_build_timeout)
    def test_l7rule_CRUD(self):
        """Tests l7rule create, read, update, delete

        * Create a fully populated l7rule.
        * Show l7rule details.
        * Update the l7rule.
        * Delete the l7rule.
        """

        # L7Rule create
        l7rule_kwargs = {
            const.ADMIN_STATE_UP: False,
            const.L7POLICY_ID: self.l7policy_id,
            const.TYPE: const.HEADER,
            const.VALUE: 'myvalue-create',
            const.COMPARE_TYPE: const.EQUAL_TO,
            const.KEY: 'mykey-create',
            const.INVERT: False,
        }

        l7rule = self.mem_l7rule_client.create_l7rule(**l7rule_kwargs)
        self.addCleanup(self.mem_l7rule_client.cleanup_l7rule,
                        l7rule[const.ID],
                        l7policy_id=self.l7policy_id,
                        lb_client=self.mem_lb_client,
                        lb_id=self.lb_id)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
        l7rule = waiters.wait_for_status(self.mem_l7rule_client.show_l7rule,
                                         l7rule[const.ID],
                                         const.PROVISIONING_STATUS,
                                         const.ACTIVE,
                                         CONF.load_balancer.build_interval,
                                         CONF.load_balancer.build_timeout,
                                         l7policy_id=self.l7policy_id)

        parser.parse(l7rule[const.CREATED_AT])
        parser.parse(l7rule[const.UPDATED_AT])
        UUID(l7rule[const.ID])
        # Operating status will be OFFLINE while admin_state_up = False
        self.assertEqual(const.OFFLINE, l7rule[const.OPERATING_STATUS])

        equal_items = [
            const.ADMIN_STATE_UP, const.TYPE, const.VALUE, const.COMPARE_TYPE,
            const.KEY, const.INVERT
        ]

        for item in equal_items:
            self.assertEqual(l7rule_kwargs[item], l7rule[item])

        # L7Rule update
        l7rule_update_kwargs = {
            const.L7POLICY_ID: self.l7policy_id,
            const.ADMIN_STATE_UP: True,
            const.TYPE: const.COOKIE,
            const.VALUE: 'myvalue-UPDATED',
            const.COMPARE_TYPE: const.CONTAINS,
            const.KEY: 'mykey-UPDATED',
            const.INVERT: True,
        }
        l7rule = self.mem_l7rule_client.update_l7rule(l7rule[const.ID],
                                                      **l7rule_update_kwargs)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
        l7rule = waiters.wait_for_status(self.mem_l7rule_client.show_l7rule,
                                         l7rule[const.ID],
                                         const.PROVISIONING_STATUS,
                                         const.ACTIVE,
                                         CONF.load_balancer.build_interval,
                                         CONF.load_balancer.build_timeout,
                                         l7policy_id=self.l7policy_id)

        # Operating status for a l7rule will be ONLINE if it is enabled:
        self.assertEqual(const.ONLINE, l7rule[const.OPERATING_STATUS])

        # Test changed items (which is all of them, for l7rules)
        equal_items = [
            const.ADMIN_STATE_UP, const.TYPE, const.VALUE, const.COMPARE_TYPE,
            const.KEY, const.INVERT
        ]
        for item in equal_items:
            self.assertEqual(l7rule_update_kwargs[item], l7rule[item])

        # L7Rule delete
        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.check_interval,
                                CONF.load_balancer.check_timeout)
        self.mem_l7rule_client.delete_l7rule(l7rule[const.ID],
                                             l7policy_id=self.l7policy_id)

        waiters.wait_for_deleted_status_or_not_found(
            self.mem_l7rule_client.show_l7rule,
            l7rule[const.ID],
            const.PROVISIONING_STATUS,
            CONF.load_balancer.check_interval,
            CONF.load_balancer.check_timeout,
            l7policy_id=self.l7policy_id)
Esempio n. 8
0
    def _test_healthmonitor_traffic(self, protocol, protocol_port):
        """Tests traffic is correctly routed based on healthmonitor status

        * Create three members:
          * One should be working, and ONLINE with a healthmonitor (passing)
          * One should be working, and ERROR with a healthmonitor (failing)
          * One should be disabled, and OFFLINE with a healthmonitor
        * Verify members are in their correct respective operating statuses.
        * Verify that traffic is balanced evenly between the working members.
        * Create a fully populated healthmonitor.
        * Verify members are in their correct respective operating statuses.
        * Verify that traffic is balanced *unevenly*.
        * Delete the healthmonitor.
        * Verify members are in their correct respective operating statuses.
        * Verify that traffic is balanced evenly between the working members.
        """

        member1_name = data_utils.rand_name("lb_member_member1-hm-traffic")
        member1_kwargs = {
            const.POOL_ID: self.pool_ids[protocol],
            const.NAME: member1_name,
            const.ADMIN_STATE_UP: True,
            const.ADDRESS: self.webserver1_ip,
            const.PROTOCOL_PORT: 80,
        }
        if self.lb_member_1_subnet:
            member1_kwargs[const.SUBNET_ID] = self.lb_member_1_subnet[const.ID]

        member1 = self.mem_member_client.create_member(**member1_kwargs)
        member1_id = member1[const.ID]
        self.addCleanup(self.mem_member_client.cleanup_member,
                        member1_id,
                        pool_id=self.pool_ids[protocol],
                        lb_client=self.mem_lb_client,
                        lb_id=self.lb_id)
        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.check_interval,
                                CONF.load_balancer.check_timeout)

        # Set up Member 2 for Webserver 2
        member2_name = data_utils.rand_name("lb_member_member2-hm-traffic")
        member2_kwargs = {
            const.POOL_ID: self.pool_ids[protocol],
            const.NAME: member2_name,
            const.ADMIN_STATE_UP: True,
            const.ADDRESS: self.webserver2_ip,
            const.PROTOCOL_PORT: 80,
            const.MONITOR_PORT: 9999,  # We want this to go offline with a HM
        }
        if self.lb_member_2_subnet:
            member2_kwargs[const.SUBNET_ID] = self.lb_member_2_subnet[const.ID]

        member2 = self.mem_member_client.create_member(**member2_kwargs)
        member2_id = member2[const.ID]
        self.addCleanup(self.mem_member_client.cleanup_member,
                        member2_id,
                        pool_id=self.pool_ids[protocol],
                        lb_client=self.mem_lb_client,
                        lb_id=self.lb_id)
        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.check_interval,
                                CONF.load_balancer.check_timeout)

        # Set up Member 3 as a non-existent disabled node
        member3_name = data_utils.rand_name("lb_member_member3-hm-traffic")
        member3_kwargs = {
            const.POOL_ID: self.pool_ids[protocol],
            const.NAME: member3_name,
            const.ADMIN_STATE_UP: False,
            const.ADDRESS: '192.0.2.1',
            const.PROTOCOL_PORT: 80,
        }

        member3 = self.mem_member_client.create_member(**member3_kwargs)
        member3_id = member3[const.ID]
        self.addCleanup(self.mem_member_client.cleanup_member,
                        member3_id,
                        pool_id=self.pool_ids[protocol],
                        lb_client=self.mem_lb_client,
                        lb_id=self.lb_id)
        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.check_interval,
                                CONF.load_balancer.check_timeout)

        # Wait for members to adjust to the correct OPERATING_STATUS
        waiters.wait_for_status(self.mem_member_client.show_member,
                                member1_id,
                                const.OPERATING_STATUS,
                                const.NO_MONITOR,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout,
                                pool_id=self.pool_ids[protocol])
        waiters.wait_for_status(self.mem_member_client.show_member,
                                member2_id,
                                const.OPERATING_STATUS,
                                const.NO_MONITOR,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout,
                                pool_id=self.pool_ids[protocol])
        waiters.wait_for_status(self.mem_member_client.show_member,
                                member3_id,
                                const.OPERATING_STATUS,
                                const.OFFLINE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout,
                                pool_id=self.pool_ids[protocol])

        # Send some traffic and verify it is balanced
        self.check_members_balanced(self.lb_vip_address,
                                    protocol_port=protocol_port,
                                    protocol=protocol,
                                    traffic_member_count=2)

        # Create the healthmonitor
        hm_name = data_utils.rand_name("lb_member_hm1-hm-traffic")
        if protocol != const.HTTP:
            if protocol == const.UDP:
                hm_type = const.HEALTH_MONITOR_UDP_CONNECT
            elif protocol == const.TCP:
                hm_type = const.HEALTH_MONITOR_TCP

            hm_kwargs = {
                const.POOL_ID: self.pool_ids[protocol],
                const.NAME: hm_name,
                const.TYPE: hm_type,
                const.DELAY: 3,
                const.TIMEOUT: 2,
                const.MAX_RETRIES: 2,
                const.MAX_RETRIES_DOWN: 2,
                const.ADMIN_STATE_UP: True,
            }
        else:
            hm_kwargs = {
                const.POOL_ID: self.pool_ids[protocol],
                const.NAME: hm_name,
                const.TYPE: const.HEALTH_MONITOR_HTTP,
                const.DELAY: 2,
                const.TIMEOUT: 2,
                const.MAX_RETRIES: 2,
                const.MAX_RETRIES_DOWN: 2,
                const.HTTP_METHOD: const.GET,
                const.URL_PATH: '/',
                const.EXPECTED_CODES: '200',
                const.ADMIN_STATE_UP: True,
            }

        hm = self.mem_healthmonitor_client.create_healthmonitor(**hm_kwargs)
        self.addCleanup(self.mem_healthmonitor_client.cleanup_healthmonitor,
                        hm[const.ID],
                        lb_client=self.mem_lb_client,
                        lb_id=self.lb_id)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
        hm = waiters.wait_for_status(
            self.mem_healthmonitor_client.show_healthmonitor, hm[const.ID],
            const.PROVISIONING_STATUS, const.ACTIVE,
            CONF.load_balancer.build_interval,
            CONF.load_balancer.build_timeout)

        # Wait for members to adjust to the correct OPERATING_STATUS
        waiters.wait_for_status(self.mem_member_client.show_member,
                                member1_id,
                                const.OPERATING_STATUS,
                                const.ONLINE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout,
                                error_ok=True,
                                pool_id=self.pool_ids[protocol])
        waiters.wait_for_status(self.mem_member_client.show_member,
                                member2_id,
                                const.OPERATING_STATUS,
                                const.ERROR,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout,
                                pool_id=self.pool_ids[protocol])
        waiters.wait_for_status(self.mem_member_client.show_member,
                                member3_id,
                                const.OPERATING_STATUS,
                                const.OFFLINE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout,
                                pool_id=self.pool_ids[protocol])

        # Send some traffic and verify it is *unbalanced*, as expected
        self.check_members_balanced(self.lb_vip_address,
                                    protocol_port=protocol_port,
                                    protocol=protocol,
                                    traffic_member_count=1)

        # Delete the healthmonitor
        self.mem_healthmonitor_client.delete_healthmonitor(hm[const.ID])

        waiters.wait_for_deleted_status_or_not_found(
            self.mem_healthmonitor_client.show_healthmonitor, hm[const.ID],
            const.PROVISIONING_STATUS, CONF.load_balancer.check_interval,
            CONF.load_balancer.check_timeout)

        # Wait for members to adjust to the correct OPERATING_STATUS
        waiters.wait_for_status(self.mem_member_client.show_member,
                                member1_id,
                                const.OPERATING_STATUS,
                                const.NO_MONITOR,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout,
                                pool_id=self.pool_ids[protocol])
        waiters.wait_for_status(self.mem_member_client.show_member,
                                member2_id,
                                const.OPERATING_STATUS,
                                const.NO_MONITOR,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout,
                                pool_id=self.pool_ids[protocol])
        waiters.wait_for_status(self.mem_member_client.show_member,
                                member3_id,
                                const.OPERATING_STATUS,
                                const.OFFLINE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout,
                                pool_id=self.pool_ids[protocol])

        # Send some traffic and verify it is balanced again
        self.check_members_balanced(self.lb_vip_address,
                                    protocol_port=protocol_port,
                                    protocol=protocol)
    def _cleanup_obj(self, obj_id, lb_client=None, lb_id=None, parent_id=None):
        """Clean up an object (for use in tempest addClassResourceCleanup).

        We always need to wait for the parent LB to be in a mutable state
        before deleting the child object, and the cleanups will not guarantee
        this if we just pass the delete function to tempest cleanup.
        For example, if we add multiple listeners on the same LB to cleanup,
        tempest will delete the first one and then immediately try to delete
        the second one, which will fail because the LB will be immutable.

        We also need to wait to return until the parent LB is back in a mutable
        state so future tests don't break right at the start.

        This function:
        * Waits until the parent LB is ACTIVE
        * Deletes the object
        * Waits until the parent LB is ACTIVE

        :param obj_id: The object ID to clean up.
        :param lb_client: (Optional) The loadbalancer client, if this isn't the
                          loadbalancer client already.
        :param lb_id: (Optional) The ID of the parent loadbalancer, if the main
                      obj_id is for a sub-object and not a loadbalancer.
        :return:
        """
        if parent_id:
            uri = self.uri.format(parent=parent_id)
        else:
            uri = self.uri

        if lb_client and lb_id:
            wait_id = lb_id
            wait_client = lb_client
            wait_func = lb_client.show_loadbalancer
        else:
            wait_id = obj_id
            wait_client = self
            wait_func = self._show_object

        LOG.info("Starting cleanup for %s %s...", self.root_tag, obj_id)

        try:
            request_uri = '{0}/{1}'.format(uri, obj_id)
            response, body = self.get(request_uri)
            resp_obj = jsonutils.loads(body.decode('utf-8'))[self.root_tag]
            if (response.status == 404
                    or resp_obj['provisioning_status'] == const.DELETED):
                raise exceptions.NotFound()
        except exceptions.NotFound:
            # Already gone, cleanup complete
            LOG.info("%s %s is already gone. Cleanup considered complete.",
                     self.root_tag, obj_id)
            return

        LOG.info("Waiting for %s %s to be ACTIVE...", wait_client.root_tag,
                 wait_id)
        try:
            waiters.wait_for_status(wait_func, wait_id,
                                    const.PROVISIONING_STATUS, const.ACTIVE,
                                    self.build_interval, self.timeout)
        except exceptions.UnexpectedResponseCode:
            # Status is ERROR, go ahead with deletion
            LOG.debug("Found %s %s in ERROR status, proceeding with cleanup.",
                      wait_client.root_tag, wait_id)
        except exceptions.TimeoutException:
            # Timed out, nothing to be done, let errors happen
            LOG.error("Timeout exceeded waiting to clean up %s %s.",
                      self.root_tag, obj_id)
        except exceptions.NotFound:
            # Already gone, cleanup complete
            LOG.info("%s %s is already gone. Cleanup considered complete.",
                     wait_client.root_tag.capitalize(), wait_id)
            return
        except Exception as e:
            # Log that something weird happens, then let the chips fall
            LOG.error(
                "Cleanup encountered an unknown exception while waiting "
                "for %s %s: %s", wait_client.root_tag, wait_id, e)

        uri = '{0}/{1}'.format(uri, obj_id)
        LOG.info("Cleaning up %s %s...", self.root_tag, obj_id)
        return_status = test_utils.call_and_ignore_notfound_exc(
            self.delete, uri)

        if lb_id and lb_client:
            LOG.info("Waiting for %s %s to be ACTIVE...", wait_client.root_tag,
                     wait_id)
            waiters.wait_for_status(wait_func, wait_id,
                                    const.PROVISIONING_STATUS, const.ACTIVE,
                                    self.build_interval, self.timeout)
        else:
            LOG.info("Waiting for %s %s to be DELETED...",
                     wait_client.root_tag, wait_id)
            waiters.wait_for_deleted_status_or_not_found(
                wait_func, wait_id, const.PROVISIONING_STATUS,
                CONF.load_balancer.check_interval,
                CONF.load_balancer.check_timeout)

        LOG.info("Cleanup complete for %s %s...", self.root_tag, obj_id)
        return return_status
    def test_load_balancer_show_status(self):
        """Tests load balancer show status tree API.

        * Create a load balancer.
        * Validates that other accounts cannot see the status for the
        *   load balancer.
        * Show load balancer status tree.
        * Validate the show reflects the expected values.
        """
        lb_name = data_utils.rand_name("lb_member_lb1-status")
        lb = self.mem_lb_client.create_loadbalancer(
            name=lb_name, vip_network_id=self.lb_member_vip_net[const.ID])
        self.addClassResourceCleanup(self.mem_lb_client.cleanup_loadbalancer,
                                     lb[const.ID])

        lb = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                     lb[const.ID], const.PROVISIONING_STATUS,
                                     const.ACTIVE,
                                     CONF.load_balancer.lb_build_interval,
                                     CONF.load_balancer.lb_build_timeout)
        if not CONF.load_balancer.test_with_noop:
            lb = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                         lb[const.ID], const.OPERATING_STATUS,
                                         const.ONLINE,
                                         CONF.load_balancer.check_interval,
                                         CONF.load_balancer.check_timeout)

        # Test that a user, without the load balancer member role, cannot
        # use this method
        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
            self.assertRaises(
                exceptions.Forbidden,
                self.os_primary.loadbalancer_client.get_loadbalancer_status,
                lb[const.ID])

        # Test that a different user, with load balancer role, cannot see
        # the load balancer status
        if not CONF.load_balancer.RBAC_test_type == const.NONE:
            member2_client = self.os_roles_lb_member2.loadbalancer_client
            self.assertRaises(exceptions.Forbidden,
                              member2_client.get_loadbalancer_status,
                              lb[const.ID])

        status = self.mem_lb_client.get_loadbalancer_status(lb[const.ID])

        self.assertEqual(1, len(status))
        lb_status = status[const.LOADBALANCER]
        self.assertEqual(5, len(lb_status))
        self.assertEqual(lb[const.ID], lb_status[const.ID])
        self.assertEqual([], lb_status[const.LISTENERS])
        self.assertEqual(lb_name, lb_status[const.NAME])
        # Operating status is a measured status, so no-op will not go online
        if CONF.load_balancer.test_with_noop:
            self.assertEqual(const.OFFLINE, lb_status[const.OPERATING_STATUS])
        else:
            self.assertEqual(const.ONLINE, lb_status[const.OPERATING_STATUS])
        self.assertEqual(const.ACTIVE, lb_status[const.PROVISIONING_STATUS])

        # Attempt to clean up so that one full test run doesn't start 10+
        # amps before the cleanup phase fires
        try:
            self.mem_lb_client.delete_loadbalancer(lb[const.ID])

            waiters.wait_for_deleted_status_or_not_found(
                self.mem_lb_client.show_loadbalancer, lb[const.ID],
                const.PROVISIONING_STATUS,
                CONF.load_balancer.lb_build_interval,
                CONF.load_balancer.lb_build_timeout)
        except Exception:
            pass
    def _test_load_balancer_create(self, ip_version):
        """Tests load balancer create and basic show APIs.

        * Tests that users without the load balancer member role cannot
          create load balancers.
        * Create a fully populated load balancer.
        * Show load balancer details.
        * Validate the show reflects the requested values.
        """
        lb_name = data_utils.rand_name("lb_member_lb1-create-"
                                       "ipv{}".format(ip_version))
        lb_description = data_utils.arbitrary_string(size=255)

        lb_kwargs = {
            const.ADMIN_STATE_UP:
            True,
            const.DESCRIPTION:
            lb_description,
            const.PROVIDER:
            CONF.load_balancer.provider,
            # TODO(johnsom) Fix test to use a real flavor
            # flavor=lb_flavor,
            # TODO(johnsom) Add QoS
            # vip_qos_policy_id=lb_qos_policy_id)
            const.NAME:
            lb_name
        }

        self._setup_lb_network_kwargs(lb_kwargs, ip_version, use_fixed_ip=True)

        # Test that a user without the load balancer role cannot
        # create a load balancer
        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
            self.assertRaises(
                exceptions.Forbidden,
                self.os_primary.loadbalancer_client.create_loadbalancer,
                **lb_kwargs)

        lb = self.mem_lb_client.create_loadbalancer(**lb_kwargs)

        self.addClassResourceCleanup(self.mem_lb_client.cleanup_loadbalancer,
                                     lb[const.ID])

        lb = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                     lb[const.ID], const.PROVISIONING_STATUS,
                                     const.ACTIVE,
                                     CONF.load_balancer.lb_build_interval,
                                     CONF.load_balancer.lb_build_timeout)
        if not CONF.load_balancer.test_with_noop:
            lb = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                         lb[const.ID], const.OPERATING_STATUS,
                                         const.ONLINE,
                                         CONF.load_balancer.check_interval,
                                         CONF.load_balancer.check_timeout)

        self.assertTrue(lb[const.ADMIN_STATE_UP])
        parser.parse(lb[const.CREATED_AT])
        parser.parse(lb[const.UPDATED_AT])
        self.assertEqual(lb_description, lb[const.DESCRIPTION])
        UUID(lb[const.ID])
        self.assertEqual(lb_name, lb[const.NAME])
        # Operating status is a measured status, so no-op will not go online
        if CONF.load_balancer.test_with_noop:
            self.assertEqual(const.OFFLINE, lb[const.OPERATING_STATUS])
        else:
            self.assertEqual(const.ONLINE, lb[const.OPERATING_STATUS])
            if ip_version == 4:
                self.assertEqual(self.lb_member_vip_net[const.ID],
                                 lb[const.VIP_NETWORK_ID])
            else:
                self.assertEqual(self.lb_member_vip_ipv6_net[const.ID],
                                 lb[const.VIP_NETWORK_ID])

        self.assertEqual(self.os_roles_lb_member.credentials.project_id,
                         lb[const.PROJECT_ID])
        self.assertEqual(CONF.load_balancer.provider, lb[const.PROVIDER])
        self.assertIsNotNone(lb[const.VIP_PORT_ID])
        if lb_kwargs[const.VIP_SUBNET_ID]:
            self.assertEqual(lb_kwargs[const.VIP_ADDRESS],
                             lb[const.VIP_ADDRESS])
            self.assertEqual(lb_kwargs[const.VIP_SUBNET_ID],
                             lb[const.VIP_SUBNET_ID])

        # Attempt to clean up so that one full test run doesn't start 10+
        # amps before the cleanup phase fires
        try:
            self.mem_lb_client.delete_loadbalancer(lb[const.ID])

            waiters.wait_for_deleted_status_or_not_found(
                self.mem_lb_client.show_loadbalancer, lb[const.ID],
                const.PROVISIONING_STATUS,
                CONF.load_balancer.lb_build_interval,
                CONF.load_balancer.lb_build_timeout)
        except Exception:
            pass
    def test_load_balancer_update(self):
        """Tests load balancer update and show APIs.

        * Create a fully populated load balancer.
        * Show load balancer details.
        * Validate the show reflects the initial values.
        * Validates that other accounts cannot update the load balancer.
        * Update the load balancer details.
        * Show load balancer details.
        * Validate the show reflects the updated values.
        """
        lb_name = data_utils.rand_name("lb_member_lb1-update")
        lb_description = data_utils.arbitrary_string(size=255)

        lb_kwargs = {
            const.ADMIN_STATE_UP:
            False,
            const.DESCRIPTION:
            lb_description,
            const.PROVIDER:
            CONF.load_balancer.provider,
            # TODO(johnsom) Fix test to use a real flavor
            # flavor=lb_flavor,
            # TODO(johnsom) Add QoS
            # vip_qos_policy_id=lb_qos_policy_id)
            const.NAME:
            lb_name
        }

        self._setup_lb_network_kwargs(lb_kwargs, 4, use_fixed_ip=True)

        lb = self.mem_lb_client.create_loadbalancer(**lb_kwargs)

        self.addClassResourceCleanup(self.mem_lb_client.cleanup_loadbalancer,
                                     lb[const.ID])

        lb = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                     lb[const.ID], const.PROVISIONING_STATUS,
                                     const.ACTIVE,
                                     CONF.load_balancer.lb_build_interval,
                                     CONF.load_balancer.lb_build_timeout)

        self.assertFalse(lb[const.ADMIN_STATE_UP])
        parser.parse(lb[const.CREATED_AT])
        parser.parse(lb[const.UPDATED_AT])
        self.assertEqual(lb_description, lb[const.DESCRIPTION])
        UUID(lb[const.ID])
        self.assertEqual(lb_name, lb[const.NAME])
        self.assertEqual(const.OFFLINE, lb[const.OPERATING_STATUS])
        self.assertEqual(self.os_roles_lb_member.credentials.project_id,
                         lb[const.PROJECT_ID])
        self.assertEqual(CONF.load_balancer.provider, lb[const.PROVIDER])
        self.assertEqual(self.lb_member_vip_net[const.ID],
                         lb[const.VIP_NETWORK_ID])
        self.assertIsNotNone(lb[const.VIP_PORT_ID])
        if lb_kwargs[const.VIP_SUBNET_ID]:
            self.assertEqual(lb_kwargs[const.VIP_ADDRESS],
                             lb[const.VIP_ADDRESS])
            self.assertEqual(lb_kwargs[const.VIP_SUBNET_ID],
                             lb[const.VIP_SUBNET_ID])

        new_name = data_utils.rand_name("lb_member_lb1-update")
        new_description = data_utils.arbitrary_string(size=255,
                                                      base_text='new')

        # Test that a user, without the load balancer member role, cannot
        # use this command
        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
            self.assertRaises(
                exceptions.Forbidden,
                self.os_primary.loadbalancer_client.update_loadbalancer,
                lb[const.ID],
                admin_state_up=True)

        # Assert we didn't go into PENDING_*
        lb_check = self.mem_lb_client.show_loadbalancer(lb[const.ID])
        self.assertEqual(const.ACTIVE, lb_check[const.PROVISIONING_STATUS])
        self.assertFalse(lb_check[const.ADMIN_STATE_UP])

        # Test that a user, without the load balancer member role, cannot
        # update this load balancer
        if not CONF.load_balancer.RBAC_test_type == const.NONE:
            member2_client = self.os_roles_lb_member2.loadbalancer_client
            self.assertRaises(exceptions.Forbidden,
                              member2_client.update_loadbalancer,
                              lb[const.ID],
                              admin_state_up=True)

        # Assert we didn't go into PENDING_*
        lb_check = self.mem_lb_client.show_loadbalancer(lb[const.ID])
        self.assertEqual(const.ACTIVE, lb_check[const.PROVISIONING_STATUS])
        self.assertFalse(lb_check[const.ADMIN_STATE_UP])

        lb = self.mem_lb_client.update_loadbalancer(
            lb[const.ID],
            admin_state_up=True,
            description=new_description,
            # TODO(johnsom) Add QoS
            # vip_qos_policy_id=lb_qos_policy_id)
            name=new_name)

        lb = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                     lb[const.ID], const.PROVISIONING_STATUS,
                                     const.ACTIVE,
                                     CONF.load_balancer.lb_build_interval,
                                     CONF.load_balancer.lb_build_timeout)

        self.assertTrue(lb[const.ADMIN_STATE_UP])
        self.assertEqual(new_description, lb[const.DESCRIPTION])
        self.assertEqual(new_name, lb[const.NAME])
        # TODO(johnsom) Add QoS

        # Attempt to clean up so that one full test run doesn't start 10+
        # amps before the cleanup phase fires
        try:
            self.mem_lb_client.delete_loadbalancer(lb[const.ID])

            waiters.wait_for_deleted_status_or_not_found(
                self.mem_lb_client.show_loadbalancer, lb[const.ID],
                const.PROVISIONING_STATUS,
                CONF.load_balancer.lb_build_interval,
                CONF.load_balancer.lb_build_timeout)
        except Exception:
            pass
Esempio n. 13
0
    def test_member_CRUD(self):
        """Tests member create, read, update, delete

        * Create a fully populated member.
        * Show member details.
        * Update the member.
        * Delete the member.
        """
        # Member create
        member_name = data_utils.rand_name("lb_member_member1-CRUD")
        member_kwargs = {
            const.NAME: member_name,
            const.ADMIN_STATE_UP: True,
            const.POOL_ID: self.pool_id,
            const.ADDRESS: '192.0.2.1',
            const.PROTOCOL_PORT: 80,
            const.WEIGHT: 50,
            const.MONITOR_ADDRESS: '192.0.2.2',
            const.MONITOR_PORT: 8080,
        }
        if self.mem_member_client.is_version_supported(self.api_version,
                                                       '2.1'):
            member_kwargs.update({
                const.BACKUP: False,
            })

        if self.lb_member_vip_subnet:
            member_kwargs[const.SUBNET_ID] = self.lb_member_vip_subnet[
                const.ID]
        hm_enabled = CONF.loadbalancer_feature_enabled.health_monitor_enabled
        if not hm_enabled:
            del member_kwargs[const.MONITOR_ADDRESS]
            del member_kwargs[const.MONITOR_PORT]
        member = self.mem_member_client.create_member(**member_kwargs)
        self.addCleanup(self.mem_member_client.cleanup_member,
                        member[const.ID],
                        pool_id=self.pool_id,
                        lb_client=self.mem_lb_client,
                        lb_id=self.lb_id)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
        member = waiters.wait_for_status(self.mem_member_client.show_member,
                                         member[const.ID],
                                         const.PROVISIONING_STATUS,
                                         const.ACTIVE,
                                         CONF.load_balancer.build_interval,
                                         CONF.load_balancer.build_timeout,
                                         pool_id=self.pool_id)

        parser.parse(member[const.CREATED_AT])
        parser.parse(member[const.UPDATED_AT])
        UUID(member[const.ID])

        # Members may be in a transitional state initially
        # like DOWN or MAINT, give it some time to stablize on
        # NO_MONITOR. This is LIVE status.
        member = waiters.wait_for_status(self.mem_member_client.show_member,
                                         member[const.ID],
                                         const.OPERATING_STATUS,
                                         const.NO_MONITOR,
                                         CONF.load_balancer.check_interval,
                                         CONF.load_balancer.check_timeout,
                                         pool_id=self.pool_id)

        equal_items = [
            const.NAME, const.ADMIN_STATE_UP, const.ADDRESS,
            const.PROTOCOL_PORT, const.WEIGHT
        ]
        if hm_enabled:
            equal_items += [const.MONITOR_ADDRESS, const.MONITOR_PORT]
        if self.mem_member_client.is_version_supported(self.api_version,
                                                       '2.1'):
            equal_items.append(const.BACKUP)

        if const.SUBNET_ID in member_kwargs:
            equal_items.append(const.SUBNET_ID)
        else:
            self.assertIsNone(member.get(const.SUBNET_ID))

        for item in equal_items:
            self.assertEqual(member_kwargs[item], member[item])

        # Member update
        new_name = data_utils.rand_name("lb_member_member1-update")
        member_update_kwargs = {
            const.POOL_ID: member_kwargs[const.POOL_ID],
            const.NAME: new_name,
            const.ADMIN_STATE_UP: not member[const.ADMIN_STATE_UP],
            const.WEIGHT: member[const.WEIGHT] + 1,
        }
        if self.mem_member_client.is_version_supported(self.api_version,
                                                       '2.1'):
            member_update_kwargs.update({
                const.BACKUP: not member[const.BACKUP],
            })

        if hm_enabled:
            member_update_kwargs[const.MONITOR_ADDRESS] = '192.0.2.3'
            member_update_kwargs[
                const.MONITOR_PORT] = member[const.MONITOR_PORT] + 1
        member = self.mem_member_client.update_member(member[const.ID],
                                                      **member_update_kwargs)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
        member = waiters.wait_for_status(self.mem_member_client.show_member,
                                         member[const.ID],
                                         const.PROVISIONING_STATUS,
                                         const.ACTIVE,
                                         CONF.load_balancer.build_interval,
                                         CONF.load_balancer.build_timeout,
                                         pool_id=self.pool_id)

        # Test changed items
        equal_items = [const.NAME, const.ADMIN_STATE_UP, const.WEIGHT]
        if hm_enabled:
            equal_items += [const.MONITOR_ADDRESS, const.MONITOR_PORT]
        if self.mem_member_client.is_version_supported(self.api_version,
                                                       '2.1'):
            equal_items.append(const.BACKUP)

        for item in equal_items:
            self.assertEqual(member_update_kwargs[item], member[item])

        # Test unchanged items
        equal_items = [const.ADDRESS, const.PROTOCOL_PORT]
        if const.SUBNET_ID in member_kwargs:
            equal_items.append(const.SUBNET_ID)
        else:
            self.assertIsNone(member.get(const.SUBNET_ID))

        for item in equal_items:
            self.assertEqual(member_kwargs[item], member[item])

        # Member delete
        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.check_interval,
                                CONF.load_balancer.check_timeout)
        self.mem_member_client.delete_member(member[const.ID],
                                             pool_id=self.pool_id)

        waiters.wait_for_deleted_status_or_not_found(
            self.mem_member_client.show_member,
            member[const.ID],
            const.PROVISIONING_STATUS,
            CONF.load_balancer.check_interval,
            CONF.load_balancer.check_timeout,
            pool_id=self.pool_id)
Esempio n. 14
0
    def test_l7policy_CRUD(self):
        """Tests l7policy create, read, update, delete

        * Create a fully populated l7policy.
        * Show l7policy details.
        * Update the l7policy.
        * Delete the l7policy.
        """

        # L7Policy create
        l7policy_name = data_utils.rand_name("lb_member_l7policy1-CRUD")
        l7policy_description = data_utils.arbitrary_string(size=255)
        l7policy_kwargs = {
            const.LISTENER_ID: self.listener_id,
            const.NAME: l7policy_name,
            const.DESCRIPTION: l7policy_description,
            const.ADMIN_STATE_UP: False,
            const.POSITION: 1,
            const.ACTION: const.REDIRECT_TO_POOL,
            const.REDIRECT_POOL_ID: self.pool_id,
        }

        l7policy = self.mem_l7policy_client.create_l7policy(**l7policy_kwargs)
        self.addCleanup(self.mem_l7policy_client.cleanup_l7policy,
                        l7policy[const.ID],
                        lb_client=self.mem_lb_client,
                        lb_id=self.lb_id)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
        l7policy = waiters.wait_for_status(
            self.mem_l7policy_client.show_l7policy, l7policy[const.ID],
            const.PROVISIONING_STATUS, const.ACTIVE,
            CONF.load_balancer.build_interval,
            CONF.load_balancer.build_timeout)

        self.assertEqual(l7policy_name, l7policy[const.NAME])
        self.assertEqual(l7policy_description, l7policy[const.DESCRIPTION])
        self.assertFalse(l7policy[const.ADMIN_STATE_UP])
        parser.parse(l7policy[const.CREATED_AT])
        parser.parse(l7policy[const.UPDATED_AT])
        UUID(l7policy[const.ID])
        # Operating status will be OFFLINE while admin_state_up = False
        self.assertEqual(const.OFFLINE, l7policy[const.OPERATING_STATUS])
        self.assertEqual(self.listener_id, l7policy[const.LISTENER_ID])
        self.assertEqual(1, l7policy[const.POSITION])
        self.assertEqual(const.REDIRECT_TO_POOL, l7policy[const.ACTION])
        self.assertEqual(self.pool_id, l7policy[const.REDIRECT_POOL_ID])
        self.assertIsNone(l7policy.pop(const.REDIRECT_URL, None))

        # L7Policy update
        new_name = data_utils.rand_name("lb_member_l7policy1-update")
        new_description = data_utils.arbitrary_string(size=255,
                                                      base_text='new')
        redirect_url = 'http://localhost'
        l7policy_update_kwargs = {
            const.NAME: new_name,
            const.DESCRIPTION: new_description,
            const.ADMIN_STATE_UP: True,
            const.POSITION: 2,
            const.ACTION: const.REDIRECT_TO_URL,
            const.REDIRECT_URL: redirect_url,
        }
        l7policy = self.mem_l7policy_client.update_l7policy(
            l7policy[const.ID], **l7policy_update_kwargs)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
        l7policy = waiters.wait_for_status(
            self.mem_l7policy_client.show_l7policy, l7policy[const.ID],
            const.PROVISIONING_STATUS, const.ACTIVE,
            CONF.load_balancer.build_interval,
            CONF.load_balancer.build_timeout)

        self.assertEqual(new_name, l7policy[const.NAME])
        self.assertEqual(new_description, l7policy[const.DESCRIPTION])
        self.assertTrue(l7policy[const.ADMIN_STATE_UP])
        # Operating status for a l7policy will be ONLINE if it is enabled:
        self.assertEqual(const.ONLINE, l7policy[const.OPERATING_STATUS])
        self.assertEqual(self.listener_id, l7policy[const.LISTENER_ID])
        # Position will have recalculated to 1
        self.assertEqual(1, l7policy[const.POSITION])
        self.assertEqual(const.REDIRECT_TO_URL, l7policy[const.ACTION])
        self.assertEqual(redirect_url, l7policy[const.REDIRECT_URL])
        self.assertIsNone(l7policy.pop(const.REDIRECT_POOL_ID, None))

        # L7Policy delete
        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.check_interval,
                                CONF.load_balancer.check_timeout)
        self.mem_l7policy_client.delete_l7policy(l7policy[const.ID])

        waiters.wait_for_deleted_status_or_not_found(
            self.mem_l7policy_client.show_l7policy, l7policy[const.ID],
            const.PROVISIONING_STATUS, CONF.load_balancer.check_interval,
            CONF.load_balancer.check_timeout)
Esempio n. 15
0
    def test_listener_CRUD(self):
        """Tests listener create, read, update, delete

        * Create a fully populated listener.
        * Show listener details.
        * Update the listener.
        * Delete the listener.
        """

        # Listener create
        listener_name = data_utils.rand_name("lb_member_listener1-CRUD")
        listener_description = data_utils.arbitrary_string(size=255)
        listener_kwargs = {
            const.NAME: listener_name,
            const.DESCRIPTION: listener_description,
            const.ADMIN_STATE_UP: False,
            const.PROTOCOL: self.protocol,
            const.PROTOCOL_PORT: 80,
            const.LOADBALANCER_ID: self.lb_id,
            const.CONNECTION_LIMIT: 200,
            const.INSERT_HEADERS: {
                const.X_FORWARDED_FOR: "true",
                const.X_FORWARDED_PORT: "true"
            },
            const.DEFAULT_POOL_ID: self.pool1_id,
            # TODO(rm_work): need to finish the rest of this stuff
            # const.DEFAULT_TLS_CONTAINER_REF: '',
            # const.SNI_CONTAINER_REFS: [],
        }
        if self.mem_listener_client.is_version_supported(
                self.api_version, '2.1'):
            listener_kwargs.update({
                const.TIMEOUT_CLIENT_DATA: 1000,
                const.TIMEOUT_MEMBER_CONNECT: 1000,
                const.TIMEOUT_MEMBER_DATA: 1000,
                const.TIMEOUT_TCP_INSPECT: 50,
            })

        listener = self.mem_listener_client.create_listener(**listener_kwargs)
        self.addCleanup(self.mem_listener_client.cleanup_listener,
                        listener[const.ID],
                        lb_client=self.mem_lb_client,
                        lb_id=self.lb_id)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
        listener = waiters.wait_for_status(
            self.mem_listener_client.show_listener, listener[const.ID],
            const.PROVISIONING_STATUS, const.ACTIVE,
            CONF.load_balancer.build_interval,
            CONF.load_balancer.build_timeout)

        self.assertEqual(listener_name, listener[const.NAME])
        self.assertEqual(listener_description, listener[const.DESCRIPTION])
        self.assertFalse(listener[const.ADMIN_STATE_UP])
        parser.parse(listener[const.CREATED_AT])
        parser.parse(listener[const.UPDATED_AT])
        UUID(listener[const.ID])
        # Operating status will be OFFLINE while admin_state_up = False
        self.assertEqual(const.OFFLINE, listener[const.OPERATING_STATUS])
        self.assertEqual(self.protocol, listener[const.PROTOCOL])
        self.assertEqual(80, listener[const.PROTOCOL_PORT])
        self.assertEqual(200, listener[const.CONNECTION_LIMIT])
        insert_headers = listener[const.INSERT_HEADERS]
        self.assertTrue(
            strutils.bool_from_string(insert_headers[const.X_FORWARDED_FOR]))
        self.assertTrue(
            strutils.bool_from_string(insert_headers[const.X_FORWARDED_PORT]))
        self.assertEqual(self.pool1_id, listener[const.DEFAULT_POOL_ID])
        if self.mem_listener_client.is_version_supported(
                self.api_version, '2.1'):
            self.assertEqual(1000, listener[const.TIMEOUT_CLIENT_DATA])
            self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_CONNECT])
            self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_DATA])
            self.assertEqual(50, listener[const.TIMEOUT_TCP_INSPECT])

        # Listener update
        new_name = data_utils.rand_name("lb_member_listener1-update")
        new_description = data_utils.arbitrary_string(size=255,
                                                      base_text='new')
        listener_update_kwargs = {
            const.NAME: new_name,
            const.DESCRIPTION: new_description,
            const.ADMIN_STATE_UP: True,
            const.CONNECTION_LIMIT: 400,
            const.INSERT_HEADERS: {
                const.X_FORWARDED_FOR: "false",
                const.X_FORWARDED_PORT: "false"
            },
            const.DEFAULT_POOL_ID: self.pool2_id,
            # TODO(rm_work): need to finish the rest of this stuff
            # const.DEFAULT_TLS_CONTAINER_REF: '',
            # const.SNI_CONTAINER_REFS: [],
        }
        if self.mem_listener_client.is_version_supported(
                self.api_version, '2.1'):
            listener_update_kwargs.update({
                const.TIMEOUT_CLIENT_DATA: 2000,
                const.TIMEOUT_MEMBER_CONNECT: 2000,
                const.TIMEOUT_MEMBER_DATA: 2000,
                const.TIMEOUT_TCP_INSPECT: 100,
            })

        listener = self.mem_listener_client.update_listener(
            listener[const.ID], **listener_update_kwargs)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
        listener = waiters.wait_for_status(
            self.mem_listener_client.show_listener, listener[const.ID],
            const.PROVISIONING_STATUS, const.ACTIVE,
            CONF.load_balancer.build_interval,
            CONF.load_balancer.build_timeout)
        if not CONF.load_balancer.test_with_noop:
            listener = waiters.wait_for_status(
                self.mem_listener_client.show_listener, listener[const.ID],
                const.OPERATING_STATUS, const.ONLINE,
                CONF.load_balancer.build_interval,
                CONF.load_balancer.build_timeout)

        self.assertEqual(new_name, listener[const.NAME])
        self.assertEqual(new_description, listener[const.DESCRIPTION])
        self.assertTrue(listener[const.ADMIN_STATE_UP])
        # Operating status is a measured status, so no-op will not go online
        if CONF.load_balancer.test_with_noop:
            self.assertEqual(const.OFFLINE, listener[const.OPERATING_STATUS])
        else:
            self.assertEqual(const.ONLINE, listener[const.OPERATING_STATUS])
        self.assertEqual(self.protocol, listener[const.PROTOCOL])
        self.assertEqual(80, listener[const.PROTOCOL_PORT])
        self.assertEqual(400, listener[const.CONNECTION_LIMIT])
        insert_headers = listener[const.INSERT_HEADERS]
        self.assertFalse(
            strutils.bool_from_string(insert_headers[const.X_FORWARDED_FOR]))
        self.assertFalse(
            strutils.bool_from_string(insert_headers[const.X_FORWARDED_PORT]))
        self.assertEqual(self.pool2_id, listener[const.DEFAULT_POOL_ID])
        if self.mem_listener_client.is_version_supported(
                self.api_version, '2.1'):
            self.assertEqual(2000, listener[const.TIMEOUT_CLIENT_DATA])
            self.assertEqual(2000, listener[const.TIMEOUT_MEMBER_CONNECT])
            self.assertEqual(2000, listener[const.TIMEOUT_MEMBER_DATA])
            self.assertEqual(100, listener[const.TIMEOUT_TCP_INSPECT])

        # Listener delete
        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.check_interval,
                                CONF.load_balancer.check_timeout)
        self.mem_listener_client.delete_listener(listener[const.ID])

        waiters.wait_for_deleted_status_or_not_found(
            self.mem_listener_client.show_listener, listener[const.ID],
            const.PROVISIONING_STATUS, CONF.load_balancer.check_interval,
            CONF.load_balancer.check_timeout)
    def test_healthmonitor_CRUD(self):
        """Tests healthmonitor create, read, update, delete, and member status

        * Create a fully populated healthmonitor.
        * Show healthmonitor details.
        * Update the healthmonitor.
        * Delete the healthmonitor.
        """
        # Healthmonitor create
        hm_name = data_utils.rand_name("lb_member_hm1-CRUD")
        hm_kwargs = {
            const.POOL_ID: self.pool_id,
            const.NAME: hm_name,
            const.TYPE: const.HEALTH_MONITOR_HTTP,
            const.DELAY: 2,
            const.TIMEOUT: 2,
            const.MAX_RETRIES: 2,
            const.MAX_RETRIES_DOWN: 2,
            const.HTTP_METHOD: const.GET,
            const.URL_PATH: '/',
            const.EXPECTED_CODES: '200',
            const.ADMIN_STATE_UP: True,
        }

        hm = self.mem_healthmonitor_client.create_healthmonitor(**hm_kwargs)
        self.addCleanup(self.mem_healthmonitor_client.cleanup_healthmonitor,
                        hm[const.ID],
                        lb_client=self.mem_lb_client,
                        lb_id=self.lb_id)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
        hm = waiters.wait_for_status(
            self.mem_healthmonitor_client.show_healthmonitor, hm[const.ID],
            const.PROVISIONING_STATUS, const.ACTIVE,
            CONF.load_balancer.build_interval,
            CONF.load_balancer.build_timeout)

        parser.parse(hm[const.CREATED_AT])
        parser.parse(hm[const.UPDATED_AT])
        UUID(hm[const.ID])
        self.assertEqual(const.ONLINE, hm[const.OPERATING_STATUS])

        equal_items = [
            const.NAME, const.TYPE, const.DELAY, const.TIMEOUT,
            const.MAX_RETRIES, const.MAX_RETRIES_DOWN, const.HTTP_METHOD,
            const.URL_PATH, const.EXPECTED_CODES, const.ADMIN_STATE_UP
        ]

        for item in equal_items:
            self.assertEqual(hm_kwargs[item], hm[item])

        # Healthmonitor update
        new_name = data_utils.rand_name("lb_member_hm1-update")
        hm_update_kwargs = {
            const.NAME: new_name,
            const.DELAY: hm_kwargs[const.DELAY] + 1,
            const.TIMEOUT: hm_kwargs[const.TIMEOUT] + 1,
            const.MAX_RETRIES: hm_kwargs[const.MAX_RETRIES] + 1,
            const.MAX_RETRIES_DOWN: hm_kwargs[const.MAX_RETRIES_DOWN] + 1,
            const.HTTP_METHOD: const.POST,
            const.URL_PATH: '/test',
            const.EXPECTED_CODES: '201,202',
            const.ADMIN_STATE_UP: not hm_kwargs[const.ADMIN_STATE_UP],
        }
        hm = self.mem_healthmonitor_client.update_healthmonitor(
            hm[const.ID], **hm_update_kwargs)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)
        hm = waiters.wait_for_status(
            self.mem_healthmonitor_client.show_healthmonitor, hm[const.ID],
            const.PROVISIONING_STATUS, const.ACTIVE,
            CONF.load_balancer.build_interval,
            CONF.load_balancer.build_timeout)

        # Test changed items
        equal_items = [
            const.NAME, const.DELAY, const.TIMEOUT, const.MAX_RETRIES,
            const.MAX_RETRIES_DOWN, const.HTTP_METHOD, const.URL_PATH,
            const.EXPECTED_CODES, const.ADMIN_STATE_UP
        ]

        for item in equal_items:
            self.assertEqual(hm_update_kwargs[item], hm[item])

        # Test unchanged items
        equal_items = [const.TYPE]
        for item in equal_items:
            self.assertEqual(hm_kwargs[item], hm[item])

        # Healthmonitor delete
        self.mem_healthmonitor_client.delete_healthmonitor(hm[const.ID])

        waiters.wait_for_deleted_status_or_not_found(
            self.mem_healthmonitor_client.show_healthmonitor, hm[const.ID],
            const.PROVISIONING_STATUS, CONF.load_balancer.check_interval,
            CONF.load_balancer.check_timeout)
    def test_l7rule_delete(self):
        """Tests l7rule create and delete APIs.

        * Creates a l7rule.
        * Validates that other accounts cannot delete the l7rule
        * Deletes the l7rule.
        * Validates the l7rule is in the DELETED state.
        """
        l7rule_kwargs = {
            const.L7POLICY_ID: self.l7policy_id,
            const.TYPE: const.HEADER,
            const.VALUE: 'myvalue-delete',
            const.COMPARE_TYPE: const.EQUAL_TO,
            const.KEY: 'mykey-delete',
        }
        l7rule = self.mem_l7rule_client.create_l7rule(**l7rule_kwargs)
        self.addClassResourceCleanup(self.mem_l7rule_client.cleanup_l7rule,
                                     l7rule[const.ID],
                                     l7policy_id=self.l7policy_id,
                                     lb_client=self.mem_lb_client,
                                     lb_id=self.lb_id)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)

        # Test that a user without the load balancer role cannot
        # delete this l7rule
        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
            self.assertRaises(exceptions.Forbidden,
                              self.os_primary.l7rule_client.delete_l7rule,
                              l7rule[const.ID],
                              l7policy_id=self.l7policy_id)

        # Test that a different user, with the load balancer member role
        # cannot delete this l7rule
        if not CONF.load_balancer.RBAC_test_type == const.NONE:
            member2_client = self.os_roles_lb_member2.l7rule_client
            self.assertRaises(exceptions.Forbidden,
                              member2_client.delete_l7rule,
                              l7rule[const.ID],
                              l7policy_id=self.l7policy_id)

        self.mem_l7rule_client.delete_l7rule(l7rule[const.ID],
                                             l7policy_id=self.l7policy_id)

        waiters.wait_for_deleted_status_or_not_found(
            self.mem_l7rule_client.show_l7rule,
            l7rule[const.ID],
            const.PROVISIONING_STATUS,
            CONF.load_balancer.check_interval,
            CONF.load_balancer.check_timeout,
            l7policy_id=self.l7policy_id)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.check_interval,
                                CONF.load_balancer.check_timeout)
    def test_healthmonitor_delete(self):
        """Tests healthmonitor create and delete APIs.

        * Create a clean pool to use for the healthmonitor.
        * Creates a healthmonitor.
        * Validates that other accounts cannot delete the healthmonitor
        * Deletes the healthmonitor.
        * Validates the healthmonitor is in the DELETED state.
        """
        pool_name = data_utils.rand_name("lb_member_pool1_hm-delete")
        pool_kwargs = {
            const.NAME: pool_name,
            const.PROTOCOL: const.HTTP,
            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
            const.LOADBALANCER_ID: self.lb_id,
        }

        pool = self.mem_pool_client.create_pool(**pool_kwargs)
        self.addCleanup(
            self.mem_pool_client.cleanup_pool, pool[const.ID],
            lb_client=self.mem_lb_client, lb_id=self.lb_id)

        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                self.lb_id, const.PROVISIONING_STATUS,
                                const.ACTIVE,
                                CONF.load_balancer.build_interval,
                                CONF.load_balancer.build_timeout)

        hm_name = data_utils.rand_name("lb_member_hm1-delete")
        hm_kwargs = {
            const.POOL_ID: pool[const.ID],
            const.NAME: hm_name,
            const.TYPE: const.HEALTH_MONITOR_TCP,
            const.DELAY: 2,
            const.TIMEOUT: 3,
            const.MAX_RETRIES: 4,
        }
        hm = self.mem_healthmonitor_client.create_healthmonitor(**hm_kwargs)
        self.addCleanup(
            self.mem_healthmonitor_client.cleanup_healthmonitor,
            hm[const.ID],
            lb_client=self.mem_lb_client, lb_id=self.lb_id)

        waiters.wait_for_status(
            self.mem_lb_client.show_loadbalancer,
            self.lb_id, const.PROVISIONING_STATUS,
            const.ACTIVE,
            CONF.load_balancer.build_interval,
            CONF.load_balancer.build_timeout)

        # Test that a user without the loadbalancer role cannot
        # delete this healthmonitor
        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
            self.assertRaises(
                exceptions.Forbidden,
                self.os_primary.healthmonitor_client.delete_healthmonitor,
                hm[const.ID])

        # Test that a different user, with the loadbalancer member role
        # cannot delete this healthmonitor
        if not CONF.load_balancer.RBAC_test_type == const.NONE:
            member2_client = self.os_roles_lb_member2.healthmonitor_client
            self.assertRaises(exceptions.Forbidden,
                              member2_client.delete_healthmonitor,
                              hm[const.ID])

        self.mem_healthmonitor_client.delete_healthmonitor(hm[const.ID])

        waiters.wait_for_deleted_status_or_not_found(
            self.mem_healthmonitor_client.show_healthmonitor, hm[const.ID],
            const.PROVISIONING_STATUS,
            CONF.load_balancer.check_interval,
            CONF.load_balancer.check_timeout)

        waiters.wait_for_status(
            self.mem_lb_client.show_loadbalancer,
            self.lb_id, const.PROVISIONING_STATUS,
            const.ACTIVE,
            CONF.load_balancer.check_interval,
            CONF.load_balancer.check_timeout)
    def test_load_balancer_failover(self):
        """Tests load balancer failover API.

        * Create a load balancer.
        * Validates that other accounts cannot failover the load balancer
        * Wait for the load balancer to go ACTIVE.
        * Failover the load balancer.
        * Wait for the load balancer to go ACTIVE.
        """
        lb_name = data_utils.rand_name("lb_member_lb1-failover")
        lb = self.mem_lb_client.create_loadbalancer(
            name=lb_name,
            provider=CONF.load_balancer.provider,
            vip_network_id=self.lb_member_vip_net[const.ID])
        self.addClassResourceCleanup(self.mem_lb_client.cleanup_loadbalancer,
                                     lb[const.ID])

        lb = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                     lb[const.ID], const.PROVISIONING_STATUS,
                                     const.ACTIVE,
                                     CONF.load_balancer.lb_build_interval,
                                     CONF.load_balancer.lb_build_timeout)

        # Test RBAC not authorized for non-admin role
        if not CONF.load_balancer.RBAC_test_type == const.NONE:
            self.assertRaises(exceptions.Forbidden,
                              self.mem_lb_client.failover_loadbalancer,
                              lb[const.ID])

        # Assert we didn't go into PENDING_*
        lb = self.mem_lb_client.show_loadbalancer(lb[const.ID])
        self.assertEqual(const.ACTIVE, lb[const.PROVISIONING_STATUS])

        if CONF.load_balancer.provider in ['amphora', 'octavia']:
            before_amphorae = self.lb_admin_amphora_client.list_amphorae(
                query_params='{loadbalancer_id}={lb_id}'.format(
                    loadbalancer_id=const.LOADBALANCER_ID, lb_id=lb[const.ID]))

        self.os_roles_lb_admin.loadbalancer_client.failover_loadbalancer(
            lb[const.ID])

        lb = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                     lb[const.ID], const.PROVISIONING_STATUS,
                                     const.ACTIVE,
                                     CONF.load_balancer.lb_build_interval,
                                     CONF.load_balancer.lb_build_timeout)

        if CONF.load_balancer.provider in ['amphora', 'octavia']:
            after_amphorae = self.lb_admin_amphora_client.list_amphorae(
                query_params='{loadbalancer_id}={lb_id}'.format(
                    loadbalancer_id=const.LOADBALANCER_ID, lb_id=lb[const.ID]))

            # Make sure all of the amphora on the load balancer have
            # failed over
            for amphora in before_amphorae:
                for new_amp in after_amphorae:
                    self.assertNotEqual(amphora[const.ID], new_amp[const.ID])

        # Attempt to clean up so that one full test run doesn't start 10+
        # amps before the cleanup phase fires
        try:
            self.mem_lb_client.delete_loadbalancer(lb[const.ID])

            waiters.wait_for_deleted_status_or_not_found(
                self.mem_lb_client.show_loadbalancer, lb[const.ID],
                const.PROVISIONING_STATUS,
                CONF.load_balancer.lb_build_interval,
                CONF.load_balancer.lb_build_timeout)
        except Exception:
            pass
    def _test_pool_CRUD(self, has_listener):
        """Tests pool create, read, update, delete

        * Create a fully populated pool.
        * Show pool details.
        * Update the pool.
        * Delete the pool.
        """
        # Pool create
        pool_name = data_utils.rand_name("lb_member_pool1-CRUD")
        pool_description = data_utils.arbitrary_string(size=255)
        pool_sp_cookie_name = 'my_cookie'
        pool_kwargs = {
            const.NAME: pool_name,
            const.DESCRIPTION: pool_description,
            const.ADMIN_STATE_UP: False,
            const.PROTOCOL: self.protocol,
            const.LB_ALGORITHM: self.lb_algorithm,
        }
        if self.lb_feature_enabled.session_persistence_enabled:
            pool_kwargs[const.SESSION_PERSISTENCE] = {
                const.TYPE: const.SESSION_PERSISTENCE_APP_COOKIE,
                const.COOKIE_NAME: pool_sp_cookie_name,
            }
        if has_listener:
            pool_kwargs[const.LISTENER_ID] = self.listener_id
        else:
            pool_kwargs[const.LOADBALANCER_ID] = self.lb_id

        pool = self.mem_pool_client.create_pool(**pool_kwargs)
        self.addCleanup(
            self.mem_pool_client.cleanup_pool,
            pool[const.ID],
            lb_client=self.mem_lb_client, lb_id=self.lb_id)

        waiters.wait_for_status(
            self.mem_lb_client.show_loadbalancer, self.lb_id,
            const.PROVISIONING_STATUS, const.ACTIVE,
            CONF.load_balancer.build_interval,
            CONF.load_balancer.build_timeout)
        pool = waiters.wait_for_status(
            self.mem_pool_client.show_pool,
            pool[const.ID], const.PROVISIONING_STATUS,
            const.ACTIVE,
            CONF.load_balancer.build_interval,
            CONF.load_balancer.build_timeout)

        self.assertEqual(pool_name, pool[const.NAME])
        self.assertEqual(pool_description, pool[const.DESCRIPTION])
        self.assertFalse(pool[const.ADMIN_STATE_UP])
        parser.parse(pool[const.CREATED_AT])
        parser.parse(pool[const.UPDATED_AT])
        UUID(pool[const.ID])
        self.assertEqual(const.OFFLINE, pool[const.OPERATING_STATUS])
        self.assertEqual(self.protocol, pool[const.PROTOCOL])
        self.assertEqual(1, len(pool[const.LOADBALANCERS]))
        self.assertEqual(self.lb_id, pool[const.LOADBALANCERS][0][const.ID])
        if has_listener:
            self.assertEqual(1, len(pool[const.LISTENERS]))
            self.assertEqual(self.listener_id,
                             pool[const.LISTENERS][0][const.ID])
        else:
            self.assertEmpty(pool[const.LISTENERS])
        self.assertEqual(self.lb_algorithm,
                         pool[const.LB_ALGORITHM])
        if self.lb_feature_enabled.session_persistence_enabled:
            self.assertIsNotNone(pool.get(const.SESSION_PERSISTENCE))
            self.assertEqual(const.SESSION_PERSISTENCE_APP_COOKIE,
                             pool[const.SESSION_PERSISTENCE][const.TYPE])
            self.assertEqual(pool_sp_cookie_name,
                             pool[const.SESSION_PERSISTENCE][
                                 const.COOKIE_NAME])

        # Pool update
        new_name = data_utils.rand_name("lb_member_pool1-update")
        new_description = data_utils.arbitrary_string(size=255,
                                                      base_text='new')
        pool_update_kwargs = {
            const.NAME: new_name,
            const.DESCRIPTION: new_description,
            const.ADMIN_STATE_UP: True,
        }

        if self.lb_feature_enabled.pool_algorithms_enabled:
            pool_update_kwargs[const.LB_ALGORITHM] = (
                const.LB_ALGORITHM_LEAST_CONNECTIONS)

        if self.protocol == const.HTTP and (
                self.lb_feature_enabled.session_persistence_enabled):
            pool_update_kwargs[const.SESSION_PERSISTENCE] = {
                const.TYPE: const.SESSION_PERSISTENCE_HTTP_COOKIE}
        pool = self.mem_pool_client.update_pool(
            pool[const.ID], **pool_update_kwargs)

        waiters.wait_for_status(
            self.mem_lb_client.show_loadbalancer, self.lb_id,
            const.PROVISIONING_STATUS, const.ACTIVE,
            CONF.load_balancer.build_interval,
            CONF.load_balancer.build_timeout)
        pool = waiters.wait_for_status(
            self.mem_pool_client.show_pool,
            pool[const.ID], const.PROVISIONING_STATUS,
            const.ACTIVE,
            CONF.load_balancer.build_interval,
            CONF.load_balancer.build_timeout)

        self.assertEqual(new_name, pool[const.NAME])
        self.assertEqual(new_description, pool[const.DESCRIPTION])
        self.assertTrue(pool[const.ADMIN_STATE_UP])
        if self.lb_feature_enabled.pool_algorithms_enabled:
            self.assertEqual(const.LB_ALGORITHM_LEAST_CONNECTIONS,
                             pool[const.LB_ALGORITHM])
        if self.lb_feature_enabled.session_persistence_enabled:
            self.assertIsNotNone(pool.get(const.SESSION_PERSISTENCE))
            self.assertEqual(const.SESSION_PERSISTENCE_HTTP_COOKIE,
                             pool[const.SESSION_PERSISTENCE][const.TYPE])
            self.assertIsNone(
                pool[const.SESSION_PERSISTENCE].get(const.COOKIE_NAME))

        # Pool delete
        waiters.wait_for_status(
            self.mem_lb_client.show_loadbalancer,
            self.lb_id, const.PROVISIONING_STATUS,
            const.ACTIVE,
            CONF.load_balancer.check_interval,
            CONF.load_balancer.check_timeout)
        self.mem_pool_client.delete_pool(pool[const.ID])

        waiters.wait_for_deleted_status_or_not_found(
            self.mem_pool_client.show_pool, pool[const.ID],
            const.PROVISIONING_STATUS,
            CONF.load_balancer.check_interval,
            CONF.load_balancer.check_timeout)

        waiters.wait_for_status(
            self.mem_lb_client.show_loadbalancer,
            self.lb_id, const.PROVISIONING_STATUS,
            const.ACTIVE,
            CONF.load_balancer.check_interval,
            CONF.load_balancer.check_timeout)
    def _test_load_balancer_CRUD(self, ip_version):
        """Tests load balancer create, read, update, delete

        * Create a fully populated load balancer.
        * Show load balancer details.
        * Update the load balancer.
        * Delete the load balancer.
        """
        lb_name = data_utils.rand_name("lb_member_lb1-CRUD")
        lb_description = data_utils.arbitrary_string(size=255)

        lb_kwargs = {
            const.ADMIN_STATE_UP: False,
            const.DESCRIPTION: lb_description,
            const.PROVIDER: CONF.load_balancer.provider,
            const.NAME: lb_name
        }

        self._setup_lb_network_kwargs(lb_kwargs, ip_version)

        lb = self.mem_lb_client.create_loadbalancer(**lb_kwargs)
        self.addCleanup(self.mem_lb_client.cleanup_loadbalancer, lb[const.ID])

        lb = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                     lb[const.ID], const.PROVISIONING_STATUS,
                                     const.ACTIVE,
                                     CONF.load_balancer.lb_build_interval,
                                     CONF.load_balancer.lb_build_timeout)

        self.assertFalse(lb[const.ADMIN_STATE_UP])
        parser.parse(lb[const.CREATED_AT])
        parser.parse(lb[const.UPDATED_AT])
        self.assertEqual(lb_description, lb[const.DESCRIPTION])
        UUID(lb[const.ID])
        self.assertEqual(lb_name, lb[const.NAME])
        self.assertEqual(const.OFFLINE, lb[const.OPERATING_STATUS])
        self.assertEqual(self.os_roles_lb_member.credentials.project_id,
                         lb[const.PROJECT_ID])
        self.assertEqual(CONF.load_balancer.provider, lb[const.PROVIDER])
        if ip_version == 4:
            self.assertEqual(self.lb_member_vip_net[const.ID],
                             lb[const.VIP_NETWORK_ID])
        else:
            self.assertEqual(self.lb_member_vip_ipv6_net[const.ID],
                             lb[const.VIP_NETWORK_ID])
        self.assertIsNotNone(lb[const.VIP_PORT_ID])
        if lb_kwargs[const.VIP_SUBNET_ID]:
            self.assertEqual(lb_kwargs[const.VIP_SUBNET_ID],
                             lb[const.VIP_SUBNET_ID])

        # Load balancer update
        new_name = data_utils.rand_name("lb_member_lb1-update")
        new_description = data_utils.arbitrary_string(size=255,
                                                      base_text='new')
        lb = self.mem_lb_client.update_loadbalancer(
            lb[const.ID],
            admin_state_up=True,
            description=new_description,
            name=new_name)

        lb = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                     lb[const.ID], const.PROVISIONING_STATUS,
                                     const.ACTIVE,
                                     CONF.load_balancer.lb_build_interval,
                                     CONF.load_balancer.lb_build_timeout)

        self.assertTrue(lb[const.ADMIN_STATE_UP])
        self.assertEqual(new_description, lb[const.DESCRIPTION])
        self.assertEqual(new_name, lb[const.NAME])

        # Load balancer delete
        self.mem_lb_client.delete_loadbalancer(lb[const.ID], cascade=True)

        waiters.wait_for_deleted_status_or_not_found(
            self.mem_lb_client.show_loadbalancer, lb[const.ID],
            const.PROVISIONING_STATUS, CONF.load_balancer.check_interval,
            CONF.load_balancer.check_timeout)