def on_present(self, endpoints): lbaas_spec = self._get_lbaas_spec(endpoints) if self._should_ignore(endpoints, lbaas_spec): return lbaas_state = self._get_lbaas_state(endpoints) if not lbaas_state: lbaas_state = obj_lbaas.LBaaSState() if self._sync_lbaas_members(endpoints, lbaas_state, lbaas_spec): # REVISIT(ivc): since _sync_lbaas_members is responsible for # creating all lbaas components (i.e. load balancer, listeners, # pools, members), it is currently possible for it to fail (due # to invalid Kuryr/K8s/Neutron configuration, e.g. Members' IPs # not belonging to configured Neutron subnet or Service IP being # in use by gateway or VMs) leaving some Neutron entities without # properly updating annotation. Some sort of failsafe mechanism is # required to deal with such situations (e.g. cleanup, or skip # failing items, or validate configuration) to prevent annotation # being out of sync with the actual Neutron state. try: self._set_lbaas_state(endpoints, lbaas_state) except k_exc.K8sResourceNotFound: # Note(yboaron) It's impossible to store neutron resources # in K8S object since object was deleted. In that case # we should rollback all neutron resources. LOG.debug("LoadBalancerHandler failed to store Openstack " "resources in K8S object (not found)") self.on_deleted(endpoints, lbaas_state)
def on_present(self, endpoints): lbaas_spec = self._get_lbaas_spec(endpoints) if self._should_ignore(endpoints, lbaas_spec): if not (self._is_validendpoints(endpoints)): reason = 'Endpoints not valid' elif not lbaas_spec: reason = 'No lbaas_spec' elif not self._has_pods(endpoints): reason = 'No valid endpoints' elif not self._svc_handler_annotations_updated( endpoints, lbaas_spec): reason = 'Not updated' LOG.debug("Ignoring Kubernetes endpoints %s for reason: %s", endpoints['metadata']['name'], reason) return lbaas_state = utils.get_lbaas_state(endpoints) if not lbaas_state: lbaas_state = obj_lbaas.LBaaSState() if self._sync_lbaas_members(endpoints, lbaas_state, lbaas_spec): # Note(yboaron) For LoadBalancer services, we should allocate FIP, # associate it to LB VIP and update K8S service status if lbaas_state.service_pub_ip_info is None: service_pub_ip_info = ( self._drv_service_pub_ip.acquire_service_pub_ip_info( lbaas_spec.type, lbaas_spec.lb_ip, lbaas_spec.project_id, lbaas_state.loadbalancer.port_id)) if service_pub_ip_info: self._drv_service_pub_ip.associate_pub_ip( service_pub_ip_info, lbaas_state.loadbalancer.port_id) lbaas_state.service_pub_ip_info = service_pub_ip_info self._update_lb_status( endpoints, lbaas_state.service_pub_ip_info.ip_addr) # REVISIT(ivc): since _sync_lbaas_members is responsible for # creating all lbaas components (i.e. load balancer, listeners, # pools, members), it is currently possible for it to fail (due # to invalid Kuryr/K8s/Neutron configuration, e.g. Members' IPs # not belonging to configured Neutron subnet or Service IP being # in use by gateway or VMs) leaving some Neutron entities without # properly updating annotation. Some sort of failsafe mechanism is # required to deal with such situations (e.g. cleanup, or skip # failing items, or validate configuration) to prevent annotation # being out of sync with the actual Neutron state. try: utils.set_lbaas_state(endpoints, lbaas_state) except k_exc.K8sResourceNotFound: # Note(yboaron) It's impossible to store neutron resources # in K8S object since object was deleted. In that case # we should rollback all neutron resources. LOG.debug("LoadBalancerHandler failed to store Openstack " "resources in K8S object (not found)") self.on_deleted(endpoints, lbaas_state)
def _generate_lbaas_state(self, vip, targets, project_id, subnet_id): name = 'DUMMY_NAME' drv = FakeLBaaSDriver() lb = drv.ensure_loadbalancer(name, project_id, subnet_id, vip, None, 'ClusterIP') listeners = {} pools = {} members = {} for ip, (listen_port, target_port) in targets.items(): lsnr = listeners.setdefault( listen_port, drv.ensure_listener(lb, 'TCP', listen_port)) pool = pools.setdefault(listen_port, drv.ensure_pool(lb, lsnr)) members.setdefault((ip, listen_port, target_port), drv.ensure_member(lb, pool, subnet_id, ip, target_port, None, None)) return obj_lbaas.LBaaSState(loadbalancer=lb, listeners=list(listeners.values()), pools=list(pools.values()), members=list(members.values()))
def on_present(self, endpoints): lbaas_spec = self._get_lbaas_spec(endpoints) if self._should_ignore(endpoints, lbaas_spec): return lbaas_state = self._get_lbaas_state(endpoints) if not lbaas_state: lbaas_state = obj_lbaas.LBaaSState() if self._sync_lbaas_members(endpoints, lbaas_state, lbaas_spec): # REVISIT(ivc): since _sync_lbaas_members is responsible for # creating all lbaas components (i.e. load balancer, listeners, # pools, members), it is currently possible for it to fail (due # to invalid Kuryr/K8s/Neutron configuration, e.g. Members' IPs # not belonging to configured Neutron subnet or Service IP being # in use by gateway or VMs) leaving some Neutron entities without # properly updating annotation. Some sort of failsafe mechanism is # required to deal with such situations (e.g. cleanup, or skip # failing items, or validate configuration) to prevent annotation # being out of sync with the actual Neutron state. self._set_lbaas_state(endpoints, lbaas_state)
def _generate_lbaas_state(self, vip, targets, project_id, subnet_id): endpoints = mock.sentinel.endpoints drv = FakeLBaaSDriver() lb = drv.ensure_loadbalancer( endpoints, project_id, subnet_id, vip, None) listeners = {} pools = {} members = {} for ip, (listen_port, target_port) in targets.items(): lsnr = listeners.setdefault(listen_port, drv.ensure_listener( endpoints, lb, 'TCP', listen_port)) pool = pools.setdefault(listen_port, drv.ensure_pool( endpoints, lb, lsnr)) members.setdefault((ip, listen_port, target_port), drv.ensure_member(endpoints, lb, pool, subnet_id, ip, target_port, None)) return obj_lbaas.LBaaSState( loadbalancer=lb, listeners=list(listeners.values()), pools=list(pools.values()), members=list(members.values()))