def _update_listener_acls(self, loadbalancer, listener_id, allowed_cidrs): admin_state_up = True if allowed_cidrs is None: # World accessible, no restriction on the listeners pass elif len(allowed_cidrs) == 0: # Prevent any traffic as no CIDR is allowed admin_state_up = False request = { 'allowed_cidrs': allowed_cidrs, 'admin_state_up': admin_state_up, } # Wait for the loadbalancer to be ACTIVE if not self._wait_for_provisioning( loadbalancer, _ACTIVATION_TIMEOUT, _LB_STS_POLL_FAST_INTERVAL): LOG.debug('Skipping ACLs update. ' 'No Load Balancer Provisioned.') return lbaas = clients.get_loadbalancer_client() try: lbaas.update_listener(listener_id, **request) except os_exc.SDKException: LOG.error('Error when updating listener %s' % listener_id) raise k_exc.ResourceNotReady(listener_id)
def release_loadbalancer(self, loadbalancer): neutron = clients.get_neutron_client() lbaas = clients.get_loadbalancer_client() if lbaas.cascading_capable: self._release( loadbalancer, loadbalancer, lbaas.delete, lbaas.lbaas_loadbalancer_path % loadbalancer.id, params={'cascade': True}) else: self._release(loadbalancer, loadbalancer, lbaas.delete_loadbalancer, loadbalancer.id) sg_id = self._find_listeners_sg(loadbalancer) if sg_id: # Note: reusing activation timeout as deletion timeout self._wait_for_deletion(loadbalancer, _ACTIVATION_TIMEOUT) try: neutron.delete_security_group(sg_id) except n_exc.NeutronClientException: LOG.exception('Error when deleting loadbalancer security ' 'group. Leaving it orphaned.') except n_exc.NotFound: LOG.debug('Security group %s already deleted', sg_id)
def test_setup_clients_lbaasv2(self, m_neutron, m_k8s, m_cfg): k8s_api_root = 'http://127.0.0.1:1234' neutron_mock = mock.Mock() k8s_dummy = object() neutron_mock.list_extensions.return_value = { 'extensions': [{ 'alias': 'lbaasv2', 'description': 'Provides Load Balancing', 'links': [], 'name': 'Load Balancing v2', 'updated': '2017-11-28T09:00:00-00:00' }] } m_cfg.kubernetes.api_root = k8s_api_root m_neutron.return_value = neutron_mock m_k8s.return_value = k8s_dummy clients.setup_clients() m_k8s.assert_called_with(k8s_api_root) self.assertIs(k8s_dummy, clients.get_kubernetes_client()) self.assertIs(neutron_mock, clients.get_neutron_client()) self.assertIs(neutron_mock, clients.get_loadbalancer_client())
def get_octavia_version(self): lbaas = clients.get_loadbalancer_client() region_name = getattr(CONF.neutron, 'region_name', None) regions = lbaas.get_all_version_data() # If region was specified take it, otherwise just take first as default endpoints = regions.get(region_name, list(regions.values())[0]) # Take the first endpoint services = list(endpoints.values())[0] # Try load-balancer service, if not take the first versions = services.get('load-balancer', list(services.values())[0]) # Lookup the latest version. For safety, we won't look for # version['status'] == 'CURRENT' and assume it's the maximum. Also we # won't assume this dict is sorted. max_ver = 0, 0 for version in versions: if version.get('version') is None: raise k_exc.UnreachableOctavia('Unable to reach Octavia API') v_tuple = versionutils.convert_version_to_tuple( version['version']) if v_tuple > max_ver: max_ver = v_tuple LOG.debug("Detected Octavia version %d.%d", *max_ver) return max_ver
def _update_listener_acls(self, loadbalancer, listener_id, allowed_cidrs): admin_state_up = True if allowed_cidrs is None: # World accessible, no restriction on the listeners pass elif len(allowed_cidrs) == 0: # Prevent any traffic as no CIDR is allowed admin_state_up = False request = { 'allowed_cidrs': allowed_cidrs, 'admin_state_up': admin_state_up, } # Wait for the loadbalancer to be ACTIVE self._wait_for_provisioning(loadbalancer, _ACTIVATION_TIMEOUT, _LB_STS_POLL_FAST_INTERVAL) lbaas = clients.get_loadbalancer_client() response = lbaas.put(o_lis.Listener.base_path + '/' + listener_id, json={o_lis.Listener.resource_key: request}) if not response.ok: LOG.error('Error when updating %s: %s', o_lis.Listener.resource_key, response.text) raise k_exc.ResourceNotReady(listener_id)
def ensure_member(self, loadbalancer, pool, subnet_id, ip, port, target_ref_namespace, target_ref_name, listener_port=None): lbaas = clients.get_loadbalancer_client() name = ("%s/%s" % (target_ref_namespace, target_ref_name)) name += ":%s" % port member = { 'name': name, 'project_id': loadbalancer['project_id'], 'pool_id': pool['id'], 'subnet_id': subnet_id, 'ip': ip, 'port': port } result = self._ensure_provisioned(loadbalancer, member, self._create_member, self._find_member, update=lbaas.update_member) network_policy = ( 'policy' in CONF.kubernetes.enabled_handlers and CONF.kubernetes.service_security_groups_driver == 'policy') if (network_policy and CONF.octavia_defaults.enforce_sg_rules and listener_port): protocol = pool['protocol'] sg_rule_name = pool['name'] listener_id = pool['listener_id'] self._apply_members_security_groups(loadbalancer, listener_port, port, protocol, sg_rule_name, listener_id) return result
def release_listener(self, loadbalancer, listener): os_net = clients.get_network_client() lbaas = clients.get_loadbalancer_client() self._release(loadbalancer, listener, lbaas.delete_listener, listener['id']) # NOTE(maysams): since lbs created with ovn-octavia provider # does not have a sg in place, only need to delete sg rules # when enforcing sg rules on the lb sg, meaning octavia # Amphora provider is configured. if CONF.octavia_defaults.enforce_sg_rules: try: sg_id = self._get_vip_port(loadbalancer).security_group_ids[0] except AttributeError: sg_id = None if sg_id: rules = os_net.security_group_rules(security_group_id=sg_id, description=listener[ 'name']) try: os_net.delete_security_group_rule(next(rules).id) except StopIteration: LOG.warning('Cannot find SG rule for %s (%s) listener.', listener['id'], listener['name'])
def _create_pool(self, pool): # TODO(ivc): make lb_algorithm configurable lb_algorithm = 'ROUND_ROBIN' lbaas = clients.get_loadbalancer_client() try: response = lbaas.create_lbaas_pool({ 'pool': { 'name': pool.name, 'project_id': pool.project_id, 'listener_id': pool.listener_id, 'loadbalancer_id': pool.loadbalancer_id, 'protocol': pool.protocol, 'lb_algorithm': lb_algorithm } }) pool.id = response['pool']['id'] return pool except n_exc.StateInvalidClient: (type_, value, tb) = sys.exc_info() try: self._cleanup_bogus_pool(lbaas, pool, lb_algorithm) except Exception: LOG.error('Pool creation traceback: %s', traceback.format_exception(type_, value, tb)) raise else: six.reraise(type_, value, tb)
def _create_loadbalancer(self, loadbalancer): request = { 'name': loadbalancer.name, 'project_id': loadbalancer.project_id, 'vip_address': str(loadbalancer.ip), 'vip_subnet_id': loadbalancer.subnet_id, } if loadbalancer.provider is not None: request['provider'] = loadbalancer.provider self.add_tags('loadbalancer', request) lbaas = clients.get_loadbalancer_client() response = lbaas.create_load_balancer(**request) loadbalancer.id = response.id loadbalancer.port_id = self._get_vip_port(loadbalancer).id if (loadbalancer.provider is not None and loadbalancer.provider != response.provider): LOG.error("Request provider(%s) != Response provider(%s)", loadbalancer.provider, response.provider) return None loadbalancer.provider = response.provider return loadbalancer
def _create_listener(self, listener): request = { 'name': listener['name'], 'project_id': listener['project_id'], 'loadbalancer_id': listener['loadbalancer_id'], 'protocol': listener['protocol'], 'protocol_port': listener['port'], } timeout_cli = listener.get('timeout_client_data') timeout_mem = listener.get('timeout_member_data') if self._octavia_timeouts: if timeout_cli: request['timeout_client_data'] = timeout_cli if timeout_mem: request['timeout_member_data'] = timeout_mem if K8S_DEFAULT_SVC_NAME in listener['name']: request['timeout_client_data'] = 600000 request['timeout_member_data'] = 600000 self.add_tags('listener', request) lbaas = clients.get_loadbalancer_client() response = lbaas.create_listener(**request) listener['id'] = response.id if timeout_cli: listener['timeout_client_data'] = response.timeout_client_data if timeout_mem: listener['timeout_member_data'] = response.timeout_member_data return listener
def _create_loadbalancer(self, loadbalancer): lbaas = clients.get_loadbalancer_client() request = { 'loadbalancer': { 'name': loadbalancer.name, 'project_id': loadbalancer.project_id, 'vip_address': str(loadbalancer.ip), 'vip_subnet_id': loadbalancer.subnet_id } } if loadbalancer.provider is not None: request['loadbalancer']['provider'] = loadbalancer.provider response = lbaas.create_loadbalancer(request) loadbalancer.id = response['loadbalancer']['id'] loadbalancer.port_id = self._get_vip_port(loadbalancer).get("id") if (loadbalancer.provider is not None and loadbalancer.provider != response['loadbalancer']['provider']): LOG.error("Request provider(%s) != Response provider(%s)", loadbalancer.provider, response['loadbalancer']['provider']) return None loadbalancer.provider = response['loadbalancer']['provider'] return loadbalancer
def ensure_pool(self, loadbalancer, listener): pool = { 'name': listener['name'], 'project_id': loadbalancer['project_id'], 'loadbalancer_id': loadbalancer['id'], 'listener_id': listener['id'], 'protocol': listener['protocol'] } pool = self._ensure_provisioned(loadbalancer, pool, self._create_pool, self._find_pool) if (pool and K8S_DEFAULT_SVC_NAME in pool['name'] and loadbalancer['provider'] != 'ovn'): if not self._wait_for_provisioning(loadbalancer, _ACTIVATION_TIMEOUT, _LB_STS_POLL_FAST_INTERVAL): return lbaas = clients.get_loadbalancer_client() lbaas.create_health_monitor(pool_id=pool['id'], type='TCP', timeout=10, delay=10, max_retries=3, name=pool['name']) return pool
def _wait_for_provisioning(self, loadbalancer, timeout, interval=_LB_STS_POLL_FAST_INTERVAL): lbaas = clients.get_loadbalancer_client() for remaining in self._provisioning_timer(timeout, interval): response = lbaas.get_load_balancer(loadbalancer.id) status = response.provisioning_status if status == 'ACTIVE': LOG.debug("Provisioning complete for %(lb)s", {'lb': loadbalancer}) return elif status == 'ERROR': LOG.debug("Releasing loadbalancer %s with error status", loadbalancer.id) self.release_loadbalancer(loadbalancer) break else: LOG.debug( "Provisioning status %(status)s for %(lb)s, " "%(rem).3gs remaining until timeout", { 'status': status, 'lb': loadbalancer, 'rem': remaining }) raise k_exc.ResourceNotReady(loadbalancer)
def release_loadbalancer(self, loadbalancer): lbaas = clients.get_loadbalancer_client() self._release(loadbalancer, loadbalancer, lbaas.delete_load_balancer, loadbalancer['id'], cascade=True) self._wait_for_deletion(loadbalancer, _ACTIVATION_TIMEOUT)
def update_l7_rule(self, l7_rule, new_value): lbaas = clients.get_loadbalancer_client() try: lbaas.update_l7_rule(l7_rule.id, l7_rule.l7policy_id, value=new_value) except o_exc.SDKException: LOG.exception("Failed to update l7_rule- id=%s ", l7_rule.id) raise
def _wait_for_deletion(self, loadbalancer, timeout, interval=_LB_STS_POLL_FAST_INTERVAL): lbaas = clients.get_loadbalancer_client() for remaining in self._provisioning_timer(timeout, interval): try: lbaas.get_load_balancer(loadbalancer['id']) except os_exc.NotFoundException: return
def is_pool_used_by_other_l7policies(self, l7policy, pool): lbaas = clients.get_loadbalancer_client() l7policy_list = lbaas.l7_policies(project_id=l7policy.project_id) for entry in l7policy_list: if not entry: continue if (entry.redirect_pool_id == pool.id and entry.id != l7policy.id): return True return False
def _create_l7_rule(self, l7_rule): lbaas = clients.get_loadbalancer_client() response = lbaas.create_lbaas_l7rule( l7_rule.l7policy_id, {'rule': {'compare_type': l7_rule.compare_type, 'type': l7_rule.type, 'value': l7_rule.value}}) l7_rule.id = response['rule']['id'] return l7_rule
def _create_member(self, member): lbaas = clients.get_loadbalancer_client() response = lbaas.create_lbaas_member(member.pool_id, {'member': { 'name': member.name, 'project_id': member.project_id, 'subnet_id': member.subnet_id, 'address': str(member.ip), 'protocol_port': member.port}}) member.id = response['member']['id'] return member
def update_l7_rule(self, l7_rule, new_value): lbaas = clients.get_loadbalancer_client() try: lbaas.update_lbaas_l7rule( l7_rule.id, l7_rule.l7policy_id, {'rule': {'value': new_value}}) except n_exc.NeutronClientException: LOG.exception("Failed to update l7_rule- id=%s ", l7_rule.id) raise
def _create_listener(self, listener): lbaas = clients.get_loadbalancer_client() response = lbaas.create_listener({'listener': { 'name': listener.name, 'project_id': listener.project_id, 'loadbalancer_id': listener.loadbalancer_id, 'protocol': listener.protocol, 'protocol_port': listener.port}}) listener.id = response['listener']['id'] return listener
def _create_l7_policy(self, l7_policy): lbaas = clients.get_loadbalancer_client() response = lbaas.create_lbaas_l7policy({'l7policy': { 'action': _L7_POLICY_ACT_REDIRECT_TO_POOL, 'listener_id': l7_policy.listener_id, 'name': l7_policy.name, 'project_id': l7_policy.project_id, 'redirect_pool_id': l7_policy.redirect_pool_id}}) l7_policy.id = response['l7policy']['id'] return l7_policy
def _find_l7_rule(self, l7_rule): lbaas = clients.get_loadbalancer_client() response = lbaas.list_lbaas_l7rules(l7_rule.l7policy_id, type=l7_rule.type, value=l7_rule.value, compare_type=l7_rule.compare_type) try: l7_rule.id = response['rules'][0]['id'] except (KeyError, IndexError): return None return l7_rule
def __init__(self): self.application = flask.Flask('prometheus-exporter') self.ctx = None self.application.add_url_rule('/metrics', methods=['GET'], view_func=self.metrics) self.headers = {'Connection': 'close'} self._os_net = clients.get_network_client() self._os_lb = clients.get_loadbalancer_client() self._project_id = config.CONF.neutron_defaults.project self._create_metrics()
def _find_l7_rule(self, l7_rule): lbaas = clients.get_loadbalancer_client() response = lbaas.l7_rules(l7_rule.l7policy_id, type=l7_rule.type, value=l7_rule.value, compare_type=l7_rule.compare_type) try: l7_rule.id = next(response)['id'] except (KeyError, StopIteration): return None return l7_rule
def _trigger_reconciliation(self, loadbalancer_crds): LOG.debug("Reconciling the KuryrLoadBalancer CRDs") lbaas = clients.get_loadbalancer_client() resources_fn = { 'loadbalancer': lbaas.load_balancers, 'listener': lbaas.listeners, 'pool': lbaas.pools } resources = {'loadbalancer': [], 'listener': [], 'pool': []} for klb in loadbalancer_crds: if klb['metadata'].get('deletionTimestamp'): continue selflink = utils.get_res_link(klb) lb_id = klb.get('status', {}).get('loadbalancer', {}).get('id') if lb_id: resources['loadbalancer'].append({ 'id': lb_id, 'selflink': selflink, 'klb': klb }) for lbl in klb.get('status', {}).get('listeners', []): resources['listener'].append({ 'id': lbl['id'], 'selflink': selflink, 'lklb': klb }) for pl in klb.get('status', {}).get('pools', []): resources['pool'].append({ 'id': pl['id'], 'selflink': selflink, 'pklb': klb }) resources_already_triggered = [] # let's reconcile load balancers first, listeners and then pools resource_types = ('loadbalancer', 'listener', 'pool') for resource_type in resource_types: filters = {} self._drv_lbaas.add_tags(resource_type, filters) os_list = resources_fn[resource_type] os_resources = os_list(**filters) os_resources_id = [rsrc['id'] for rsrc in os_resources] for data in resources[resource_type]: if data['selflink'] in resources_already_triggered: continue if data['id'] not in os_resources_id: resources_already_triggered.append(data['selflink']) LOG.debug("Reconciling KuryrLoadBalancer CRD: %s", data['selflink']) self._reconcile_lb(data)
def _find_l7_policy(self, l7_policy): lbaas = clients.get_loadbalancer_client() response = lbaas.list_lbaas_l7policies( name=l7_policy.name, project_id=l7_policy.project_id, redirect_pool_id=l7_policy.redirect_pool_id, listener_id=l7_policy.listener_id) try: l7_policy.id = response['l7policies'][0]['id'] except (KeyError, IndexError): return None return l7_policy
def _find_l7_policy(self, l7_policy): lbaas = clients.get_loadbalancer_client() response = lbaas.l7_policies( name=l7_policy.name, project_id=l7_policy.project_id, redirect_pool_id=l7_policy.redirect_pool_id, listener_id=l7_policy.listener_id) try: l7_policy.id = next(response)['id'] except (KeyError, StopIteration): return None return l7_policy
def _create_member(self, member): request = { 'name': member['name'], 'project_id': member['project_id'], 'subnet_id': member['subnet_id'], 'address': str(member['ip']), 'protocol_port': member['port'], } self.add_tags('member', request) lbaas = clients.get_loadbalancer_client() response = lbaas.create_member(member['pool_id'], **request) member['id'] = response.id return member
def _wait_for_deletion(self, loadbalancer, timeout, interval=_LB_STS_POLL_FAST_INTERVAL): lbaas = clients.get_loadbalancer_client() status = 'PENDING_DELETE' for remaining in self._provisioning_timer(timeout, interval): try: lb = lbaas.get_load_balancer(loadbalancer['id']) status = lb.provisioning_status except os_exc.NotFoundException: return raise k_exc.LoadBalancerNotReady(loadbalancer['id'], status)