def _ensure_provisioned(self, loadbalancer, obj, create, find): for remaining in self._provisioning_timer(_ACTIVATION_TIMEOUT): self._wait_for_provisioning(loadbalancer, remaining) try: result = self._ensure(obj, create, find) if result: return result except n_exc.StateInvalidClient: continue raise k_exc.ResourceNotReady(obj)
def activate_vif(self, pod, vif): if vif.active: return neutron = clients.get_neutron_client() port = neutron.show_port(vif.id).get('port') if port['status'] != kl_const.PORT_STATUS_ACTIVE: raise k_exc.ResourceNotReady(vif) vif.active = True
def _release(self, loadbalancer, obj, delete, *args, **kwargs): for remaining in self._provisioning_timer(_ACTIVATION_TIMEOUT): try: try: delete(*args, **kwargs) return except (n_exc.Conflict, n_exc.StateInvalidClient): self._wait_for_provisioning(loadbalancer, remaining) except n_exc.NotFound: return raise k_exc.ResourceNotReady(obj)
def _get_port_from_pool(self, pool_key, pod, subnets): try: port_id = self._available_ports_pools[pool_key].pop() except IndexError: raise exceptions.ResourceNotReady(pod) if config.CONF.rancher.port_debug: neutron = clients.get_neutron_client() neutron.update_port(port_id, {"port": { 'name': pod['name'], }}) # check if the pool needs to be populated if (self._get_pool_size(pool_key) < oslo_cfg.CONF.vif_pool.ports_pool_min): eventlet.spawn(self._populate_pool, pool_key, pod, subnets) return self._existing_vifs[port_id]
def _update_lb_status(self, endpoints, lb_ip_address): status_data = { "loadBalancer": { "ingress": [{ "ip": lb_ip_address.format() }] } } k8s = clients.get_kubernetes_client() svc_link = self._get_service_link(endpoints) try: k8s.patch_status(svc_link, status_data) except k_exc.K8sClientException: # REVISIT(ivc): only raise ResourceNotReady for NotFound raise k_exc.ResourceNotReady(svc_link)
def _wait_for_provisioning(self, loadbalancer, timeout): neutron = clients.get_neutron_client() for remaining in self._provisioning_timer(timeout): response = neutron.show_loadbalancer(loadbalancer.id) status = response['loadbalancer']['provisioning_status'] if status == 'ACTIVE': LOG.debug("Provisioning complete for %(lb)s", { 'lb': loadbalancer}) return else: LOG.debug("Provisioning status %(status)s for %(lb)s, " "%(rem).3gs remaining until timeout", {'status': status, 'lb': loadbalancer, 'rem': remaining}) raise k_exc.ResourceNotReady(loadbalancer)
def ensure_loadbalancer(self, endpoints, project_id, subnet_id, ip, security_groups_ids): name = "%(namespace)s/%(name)s" % endpoints['metadata'] request = obj_lbaas.LBaaSLoadBalancer(name=name, project_id=project_id, subnet_id=subnet_id, ip=ip) response = self._ensure(request, self._create_loadbalancer, self._find_loadbalancer) if not response: # NOTE(ivc): load balancer was present before 'create', but got # deleted externally between 'create' and 'find' raise k_exc.ResourceNotReady(request) # TODO(ivc): handle security groups return response
def _set_lbaas_spec(self, service, lbaas_spec): # TODO(ivc): extract annotation interactions if lbaas_spec is None: LOG.debug("Removing LBaaSServiceSpec annotation: %r", lbaas_spec) annotation = None else: lbaas_spec.obj_reset_changes(recursive=True) LOG.debug("Setting LBaaSServiceSpec annotation: %r", lbaas_spec) annotation = jsonutils.dumps(lbaas_spec.obj_to_primitive(), sort_keys=True) svc_link = service['metadata']['selfLink'] ep_link = self._get_endpoints_link(service) k8s = clients.get_kubernetes_client() try: k8s.annotate(ep_link, {k_const.K8S_ANNOTATION_LBAAS_SPEC: annotation}) except k_exc.K8sClientException: # REVISIT(ivc): only raise ResourceNotReady for NotFound raise k_exc.ResourceNotReady(ep_link) k8s.annotate(svc_link, {k_const.K8S_ANNOTATION_LBAAS_SPEC: annotation}, resource_version=service['metadata']['resourceVersion'])