def setup_for_attempt_operation(self, ip): """Create a loadbalancer. This is necessary so that the attempt is to show the load-balancer and this is an operator that the policy can stop. Unfortunately, octavia, whilst it has a policy for just listing load-balancers, unfortunately, it doesn't work; whereas showing the load-balancer can be stopped. NB this only works if the setup phase of the octavia tests have been completed. :param ip: the ip of for keystone. :type ip: str """ logging.info("Setting up loadbalancer.") auth = openstack_utils.get_overcloud_auth(address=ip) sess = openstack_utils.get_keystone_session(auth) octavia_client = openstack_utils.get_octavia_session_client(sess) neutron_client = openstack_utils.get_neutron_session_client(sess) if openstack_utils.dvr_enabled(): network_name = 'private_lb_fip_network' else: network_name = 'private' resp = neutron_client.list_networks(name=network_name) vip_subnet_id = resp['networks'][0]['subnets'][0] res = octavia_client.load_balancer_create( json={ 'loadbalancer': { 'description': 'Created by Zaza', 'admin_state_up': True, 'vip_subnet_id': vip_subnet_id, 'name': 'zaza-lb-0', }}) self.lb_id = res['loadbalancer']['id'] # now wait for it to get to the active state @tenacity.retry(wait=tenacity.wait_fixed(1), reraise=True, stop=tenacity.stop_after_delay(900)) def wait_for_lb_resource(client, resource_id): resp = client.load_balancer_show(resource_id) logging.info(resp['provisioning_status']) assert resp['provisioning_status'] == 'ACTIVE', ( 'load balancer resource has not reached ' 'expected provisioning status: {}' .format(resp)) return resp logging.info('Awaiting loadbalancer to reach provisioning_status ' '"ACTIVE"') resp = wait_for_lb_resource(octavia_client, self.lb_id) logging.info(resp) logging.info("Setup loadbalancer complete.")
def test_create_loadbalancer(self): """Create load balancer.""" # Prepare payload instances # First we allow communication to port 80 by adding a security group # rule project_id = openstack_utils.get_project_id(self.keystone_client, 'admin', domain_name='admin_domain') openstack_utils.add_neutron_secgroup_rules(self.neutron_client, project_id, [{ 'protocol': 'tcp', 'port_range_min': '80', 'port_range_max': '80', 'direction': 'ingress' }]) # Then we request two Ubuntu instances with the Apache web server # installed instance_1, instance_2 = self.launch_guests( userdata='#cloud-config\npackages:\n - apache2\n') # Get IP of the prepared payload instances payload_ips = [] for server in (instance_1, instance_2): payload_ips.append(server.networks['private'][0]) self.assertTrue(len(payload_ips) > 0) resp = self.neutron_client.list_networks(name='private') subnet_id = resp['networks'][0]['subnets'][0] if openstack_utils.dvr_enabled(): resp = self.neutron_client.list_networks( name='private_lb_fip_network') vip_subnet_id = resp['networks'][0]['subnets'][0] else: vip_subnet_id = subnet_id for provider in self.get_lb_providers(self.octavia_client).keys(): logging.info( 'Creating loadbalancer with provider {}'.format(provider)) lb = self._create_lb_resources(self.octavia_client, provider, vip_subnet_id, subnet_id, payload_ips) self.loadbalancers.append(lb) lb_fp = openstack_utils.create_floating_ip( self.neutron_client, 'ext_net', port={'id': lb['vip_port_id']}) snippet = 'This is the default welcome page' assert snippet in self._get_payload(lb_fp['floating_ip_address']) logging.info('Found "{}" in page retrieved through load balancer ' ' (provider="{}") at "http://{}/"'.format( snippet, provider, lb_fp['floating_ip_address'])) # If we get here, it means the tests passed self.run_resource_cleanup = True
def test_create_loadbalancer(self): """Create load balancer.""" nova_client = openstack_utils.get_nova_session_client( self.keystone_session) # Get IP of the prepared payload instances payload_ips = [] for server in nova_client.servers.list(): payload_ips.append(server.networks['private'][0]) self.assertTrue(len(payload_ips) > 0) resp = self.neutron_client.list_networks(name='private') subnet_id = resp['networks'][0]['subnets'][0] if openstack_utils.dvr_enabled(): resp = self.neutron_client.list_networks( name='private_lb_fip_network') vip_subnet_id = resp['networks'][0]['subnets'][0] else: vip_subnet_id = subnet_id for provider in self.get_lb_providers(self.octavia_client).keys(): logging.info( 'Creating loadbalancer with provider {}'.format(provider)) lb = self._create_lb_resources(self.octavia_client, provider, vip_subnet_id, subnet_id, payload_ips) self.loadbalancers.append(lb) lb_fp = openstack_utils.create_floating_ip( self.neutron_client, 'ext_net', port={'id': lb['vip_port_id']}) snippet = 'This is the default welcome page' assert snippet in self._get_payload(lb_fp['floating_ip_address']) logging.info('Found "{}" in page retrieved through load balancer ' ' (provider="{}") at "http://{}/"'.format( snippet, provider, lb_fp['floating_ip_address'])) # If we get here, it means the tests passed self.run_tearDown = True
def test_create_loadbalancer(self): """Create load balancer.""" keystone_session = openstack_utils.get_overcloud_keystone_session() neutron_client = openstack_utils.get_neutron_session_client( keystone_session) nova_client = openstack_utils.get_nova_session_client(keystone_session) # Get IP of the prepared payload instances payload_ips = [] for server in nova_client.servers.list(): payload_ips.append(server.networks['private'][0]) self.assertTrue(len(payload_ips) > 0) resp = neutron_client.list_networks(name='private') subnet_id = resp['networks'][0]['subnets'][0] if openstack_utils.dvr_enabled(): resp = neutron_client.list_networks(name='private_lb_fip_network') vip_subnet_id = resp['networks'][0]['subnets'][0] else: vip_subnet_id = subnet_id octavia_client = openstack_utils.get_octavia_session_client( keystone_session) result = octavia_client.load_balancer_create( json={ 'loadbalancer': { 'description': 'Created by Zaza', 'admin_state_up': True, 'vip_subnet_id': vip_subnet_id, 'name': 'zaza-lb-0', } }) lb_id = result['loadbalancer']['id'] lb_vip_port_id = result['loadbalancer']['vip_port_id'] @tenacity.retry(wait=tenacity.wait_fixed(1), reraise=True, stop=tenacity.stop_after_delay(900)) def wait_for_lb_resource(octavia_show_func, resource_id, operating_status=None): resp = octavia_show_func(resource_id) logging.info(resp['provisioning_status']) assert resp['provisioning_status'] == 'ACTIVE', ( 'load balancer resource has not reached ' 'expected provisioning status: {}'.format(resp)) if operating_status: logging.info(resp['operating_status']) assert resp['operating_status'] == operating_status, ( 'load balancer resource has not reached ' 'expected operating status: {}'.format(resp)) return resp logging.info('Awaiting loadbalancer to reach provisioning_status ' '"ACTIVE"') resp = wait_for_lb_resource(octavia_client.load_balancer_show, lb_id) logging.info(resp) result = octavia_client.listener_create( json={ 'listener': { 'loadbalancer_id': lb_id, 'name': 'listener1', 'protocol': 'HTTP', 'protocol_port': 80 }, }) listener_id = result['listener']['id'] logging.info('Awaiting listener to reach provisioning_status ' '"ACTIVE"') resp = wait_for_lb_resource(octavia_client.listener_show, listener_id) logging.info(resp) result = octavia_client.pool_create( json={ 'pool': { 'listener_id': listener_id, 'name': 'pool1', 'lb_algorithm': 'ROUND_ROBIN', 'protocol': 'HTTP', }, }) pool_id = result['pool']['id'] logging.info('Awaiting pool to reach provisioning_status ' '"ACTIVE"') resp = wait_for_lb_resource(octavia_client.pool_show, pool_id) logging.info(resp) result = octavia_client.health_monitor_create( json={ 'healthmonitor': { 'pool_id': pool_id, 'delay': 5, 'max_retries': 4, 'timeout': 10, 'type': 'HTTP', 'url_path': '/', }, }) healthmonitor_id = result['healthmonitor']['id'] logging.info('Awaiting healthmonitor to reach provisioning_status ' '"ACTIVE"') resp = wait_for_lb_resource(octavia_client.health_monitor_show, healthmonitor_id) logging.info(resp) for ip in payload_ips: result = octavia_client.member_create(pool_id=pool_id, json={ 'member': { 'subnet_id': subnet_id, 'address': ip, 'protocol_port': 80, }, }) member_id = result['member']['id'] logging.info('Awaiting member to reach provisioning_status ' '"ACTIVE"') resp = wait_for_lb_resource(lambda x: octavia_client.member_show( pool_id=pool_id, member_id=x), member_id, operating_status='ONLINE') logging.info(resp) lb_fp = openstack_utils.create_floating_ip(neutron_client, 'ext_net', port={'id': lb_vip_port_id}) @tenacity.retry(wait=tenacity.wait_fixed(1), reraise=True, stop=tenacity.stop_after_delay(900)) def get_payload(): return subprocess.check_output([ 'wget', '-O', '-', 'http://{}/'.format( lb_fp['floating_ip_address']) ], universal_newlines=True) snippet = 'This is the default welcome page' assert snippet in get_payload() logging.info('Found "{}" in page retrieved through load balancer at ' '"http://{}/"'.format(snippet, lb_fp['floating_ip_address']))
def centralized_fip_network(): """Create network with centralized router for connecting lb and fips. There are currently a few outstanding upstream issues with connecting a Octavia loadbalancer to the outside world through a Floating IP when used in conjunction with Neutron DVR [0][1][2][3][4][5]. Although there are some fixes provided in the referenced material, the current implementation still show issues and appearas to limit how we can model a DVR deployment. A approach to work around this is to create a separate non-distributed network for hosting the load balancer VIP and connecting it to a FIP. The payload- and loadbalancer- instances can stay in a distributed network, only the VIP must be in a non-distributed network. (although the actual hosting of said router can be on a compute host acting as a "centralized" snat router in a DVR deployment.) 0: https://bit.ly/30LgX4T 1: https://bugs.launchpad.net/neutron/+bug/1583694 2: https://bugs.launchpad.net/neutron/+bug/1667877 3: https://review.opendev.org/#/c/437970/ 4: https://review.opendev.org/#/c/437986/ 5: https://review.opendev.org/#/c/466434/ """ if not openstack.dvr_enabled(): logging.info('DVR not enabled, skip.') return keystone_session = openstack.get_overcloud_keystone_session() neutron_client = openstack.get_neutron_session_client(keystone_session) resp = neutron_client.create_network( {'network': { 'name': 'private_lb_fip_network' }}) network = resp['network'] resp = neutron_client.create_subnet({ 'subnets': [ { 'name': 'private_lb_fip_subnet', 'network_id': network['id'], 'ip_version': 4, 'cidr': '10.42.0.0/24', }, ], }) subnet = resp['subnets'][0] resp = neutron_client.create_router({ 'router': { 'name': 'lb_fip_router', 'external_gateway_info': { 'network_id': openstack.get_net_uuid(neutron_client, 'ext_net'), }, 'distributed': False, }, }) router = resp['router'] neutron_client.add_interface_router(router['id'], {'subnet_id': subnet['id']})