def revert(self, loadbalancer, *args, **kwargs): """Handle failed listeners updates.""" LOG.warning(_LW("Reverting listeners updates.")) for listener in loadbalancer.listeners: try: self.listener_repo.update(db_apis.get_session(), id=listener.id, provisioning_status=constants.ERROR) except Exception: LOG.warning(_LW("Failed to update listener %s provisioning " "status..."), listener.id) return None
def revert(self, member_id, *args, **kwargs): """Mark the member ERROR since the delete couldn't happen :returns: None """ LOG.warn(_LW("Reverting delete in DB " "for member id %s"), member_id)
def revert(self, listener, *args, **kwargs): """Handle a failed listener start.""" LOG.warning(_LW("Reverting listener start.")) self.listener_repo.update(db_apis.get_session(), id=listener.id, provisioning_status=constants.ERROR) return None
def _delete_vip_security_group(self, sec_grp): """Deletes a security group in neutron. Retries upon an exception because removing a security group from a neutron port does not happen immediately. """ attempts = 0 while attempts <= CONF.networking.max_retries: try: self.neutron_client.delete_security_group(sec_grp) LOG.info(_LI("Deleted security group %s"), sec_grp) return except neutron_client_exceptions.NotFound: LOG.info(_LI("Security group %s not found, will assume it is " "already deleted"), sec_grp) return except Exception: LOG.warning(_LW("Attempt %(attempt)s to remove security group " "%(sg)s failed."), {'attempt': attempts + 1, 'sg': sec_grp}) attempts += 1 time.sleep(CONF.networking.retry_interval) message = _LE("All attempts to remove security group {0} have " "failed.").format(sec_grp) LOG.exception(message) raise base.DeallocateVIPException(message)
def _extract_amp_image_id_by_tag(client, image_tag, image_owner): if image_owner: images = list(client.images.list( filters={'tag': [image_tag], 'owner': image_owner, 'status': constants.GLANCE_IMAGE_ACTIVE}, sort='created_at:desc', limit=2)) else: images = list(client.images.list( filters={'tag': [image_tag], 'status': constants.GLANCE_IMAGE_ACTIVE}, sort='created_at:desc', limit=2)) if not images: raise exceptions.GlanceNoTaggedImages(tag=image_tag) image_id = images[0]['id'] num_images = len(images) if num_images > 1: LOG.warning( _LW("A single Glance image should be tagged with %(tag)s tag, " "but at least two were found. Using %(image_id)s."), {'tag': image_tag, 'image_id': image_id} ) return image_id
def request(self, method, amp, path='/', **kwargs): LOG.debug("request url %s", path) _request = getattr(self.session, method.lower()) _url = self._base_url(amp.lb_network_ip) + path reqargs = { 'verify': CONF.haproxy_amphora.server_ca, 'url': _url, } reqargs.update(kwargs) headers = reqargs.setdefault('headers', {}) headers['User-Agent'] = OCTAVIA_API_CLIENT self.ssl_adapter.uuid = amp.id # Keep retrying for a in six.moves.xrange(CONF.haproxy_amphora.connection_max_retries): try: r = _request(**reqargs) except requests.ConnectionError: LOG.warn(_LW("Could not talk to instance")) time.sleep(CONF.haproxy_amphora.connection_retry_interval) if a >= CONF.haproxy_amphora.connection_max_retries: raise driver_except.TimeOutException() else: return r raise driver_except.UnavailableException()
def revert(self, listener, *args, **kwargs): """Mark the listener as broken and ready to be cleaned up.""" LOG.warn(_LW("Reverting mark listener pending delete in DB " "for listener id %s"), listener.id) self.listener_repo.update(db_apis.get_session(), listener.id, provisioning_status=constants.ERROR)
def request(self, method, amp, path='/', **kwargs): LOG.debug("request url %s", path) _request = getattr(self.session, method.lower()) _url = self._base_url(amp.lb_network_ip) + path LOG.debug("request url " + _url) timeout_tuple = (CONF.haproxy_amphora.rest_request_conn_timeout, CONF.haproxy_amphora.rest_request_read_timeout) reqargs = { 'verify': CONF.haproxy_amphora.server_ca, 'url': _url, 'timeout': timeout_tuple, } reqargs.update(kwargs) headers = reqargs.setdefault('headers', {}) headers['User-Agent'] = OCTAVIA_API_CLIENT self.ssl_adapter.uuid = amp.id # Keep retrying for a in six.moves.xrange(CONF.haproxy_amphora.connection_max_retries): try: with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message="A true SSLContext object is not available" ) r = _request(**reqargs) except (requests.ConnectionError, requests.Timeout): LOG.warning(_LW("Could not connect to instance. Retrying.")) time.sleep(CONF.haproxy_amphora.connection_retry_interval) if a == CONF.haproxy_amphora.connection_max_retries - 1: raise driver_except.TimeOutException() else: return r raise driver_except.UnavailableException()
def revert(self, result, amphora, loadbalancer, *args, **kwargs): """Handle a failed amphora vip plug notification.""" if isinstance(result, failure.Failure): return LOG.warning(_LW("Reverting post vip plug.")) self.task_utils.mark_amphora_status_error(amphora.id) self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id)
def revert(self, listener, *args, **kwargs): """Handle a failed listener delete.""" LOG.warn(_LW("Reverting listener delete.")) self.listener_repo.update(db_apis.get_session(), id=listener.id, provisioning_status=constants.ERROR)
def request(self, method, amp, path='/', **kwargs): LOG.debug("request url %s", path) _request = getattr(self.session, method.lower()) _url = self._base_url(amp.lb_network_ip) + path LOG.debug("request url " + _url) reqargs = { 'verify': CONF.haproxy_amphora.server_ca, 'url': _url, } reqargs.update(kwargs) headers = reqargs.setdefault('headers', {}) headers['User-Agent'] = OCTAVIA_API_CLIENT self.ssl_adapter.uuid = amp.id # Keep retrying for a in six.moves.xrange(CONF.haproxy_amphora.connection_max_retries): try: r = _request(**reqargs) except requests.ConnectionError: LOG.warn(_LW("Could not talk to instance")) time.sleep(CONF.haproxy_amphora.connection_retry_interval) if a >= CONF.haproxy_amphora.connection_max_retries: raise driver_except.TimeOutException() else: return r raise driver_except.UnavailableException()
def post_vip_plug(self, amphora, load_balancer, amphorae_network_config): if amphora.status != consts.DELETED: subnet = amphorae_network_config.get(amphora.id).vip_subnet # NOTE(blogan): using the vrrp port here because that # is what the allowed address pairs network driver sets # this particular port to. This does expose a bit of # tight coupling between the network driver and amphora # driver. We will need to revisit this to try and remove # this tight coupling. # NOTE (johnsom): I am loading the vrrp_ip into the # net_info structure here so that I don't break # compatibility with old amphora agent versions. port = amphorae_network_config.get(amphora.id).vrrp_port host_routes = [{'nexthop': hr.nexthop, 'destination': hr.destination} for hr in subnet.host_routes] net_info = {'subnet_cidr': subnet.cidr, 'gateway': subnet.gateway_ip, 'mac_address': port.mac_address, 'vrrp_ip': amphora.vrrp_ip, 'mtu': port.network.mtu, 'host_routes': host_routes} try: self.client.plug_vip(amphora, load_balancer.vip.ip_address, net_info) except exc.Conflict: LOG.warning(_LW('VIP with MAC {mac} already exists on ' 'amphora, skipping post_vip_plug').format( mac=port.mac_address))
def _delete_vip_security_group(self, sec_grp): """Deletes a security group in neutron. Retries upon an exception because removing a security group from a neutron port does not happen immediately. """ attempts = 0 while attempts <= cfg.CONF.networking.max_retries: try: self.neutron_client.delete_security_group(sec_grp) LOG.info(_LI("Deleted security group %s"), sec_grp) return except neutron_client_exceptions.NotFound: LOG.info( _LI("Security group %s not found, will assume it is " "already deleted"), sec_grp) return except Exception: LOG.warning( _LW("Attempt %(attempt)s to remove security group " "%(sg)s failed."), { 'attempt': attempts + 1, 'sg': sec_grp }) attempts += 1 time.sleep(cfg.CONF.networking.retry_interval) message = _LE("All attempts to remove security group {0} have " "failed.").format(sec_grp) LOG.exception(message) raise base.DeallocateVIPException(message)
def revert(self, pool, *args, **kwargs): """Mark the pool ERROR since the delete couldn't happen :returns: None """ LOG.warning(_LW("Reverting delete in DB " "for pool id %s"), pool.id)
def revert(self, loadbalancer_id, server_group_id, *args, **kwargs): LOG.warn(_LW('Reverting Server Group updated with id: %(s1)s for ' 'load balancer id: %(s2)s '), {'s1': server_group_id, 's2': loadbalancer_id}) self.loadbalancer_repo.update(db_apis.get_session(), id=loadbalancer_id, server_group_id=None)
def revert(self, result, amphora, *args, **kwargs): """Handle a failed post network plug.""" if isinstance(result, failure.Failure): return LOG.warning(_LW("Reverting post network plug.")) self.amphora_repo.update(db_apis.get_session(), id=amphora.id, status=constants.ERROR)
def revert(self, loadbalancer_id, server_group_id, *args, **kwargs): LOG.warning(_LW('Reverting Server Group updated with id: %(s1)s for ' 'load balancer id: %(s2)s '), {'s1': server_group_id, 's2': loadbalancer_id}) self.loadbalancer_repo.update(db_apis.get_session(), id=loadbalancer_id, server_group_id=None)
def request(self, method, amp, path='/', **kwargs): LOG.debug("request url %s", path) _request = getattr(self.session, method.lower()) _url = self._base_url(amp.lb_network_ip) + path LOG.debug("request url " + _url) timeout_tuple = (CONF.haproxy_amphora.rest_request_conn_timeout, CONF.haproxy_amphora.rest_request_read_timeout) reqargs = { 'url': _url, 'timeout': timeout_tuple, } reqargs.update(kwargs) headers = reqargs.setdefault('headers', {}) headers['User-Agent'] = OCTAVIA_API_CLIENT # Keep retrying for a in six.moves.xrange(CONF.haproxy_amphora.connection_max_retries): try: with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message="A true SSLContext object is not available") r = _request(**reqargs) LOG.debug( "Connected to amphora. Response: {resp}".format(resp=r)) return r except (requests.ConnectionError, requests.Timeout): LOG.warning(_LW("Could not connect to instance. Retrying.")) time.sleep(CONF.haproxy_amphora.connection_retry_interval) LOG.error( _LE("Connection retries (currently set to %s) " "exhausted. The amphora is unavailable."), CONF.haproxy_amphora.connection_max_retries) raise driver_except.TimeOutException()
def _extract_amp_image_id_by_tag(client, image_tag, image_owner): if image_owner: images = list( client.images.list(filters={ 'tag': [image_tag], 'owner': image_owner, 'status': constants.GLANCE_IMAGE_ACTIVE }, sort='created_at:desc', limit=2)) else: images = list( client.images.list(filters={ 'tag': [image_tag], 'status': constants.GLANCE_IMAGE_ACTIVE }, sort='created_at:desc', limit=2)) if not images: raise exceptions.GlanceNoTaggedImages(tag=image_tag) image_id = images[0]['id'] num_images = len(images) if num_images > 1: LOG.warning( _LW("A single Glance image should be tagged with %(tag)s tag, " "but at least two were found. Using %(image_id)s."), { 'tag': image_tag, 'image_id': image_id }) return image_id
def request(self, method, amp, path='/', **kwargs): LOG.debug("request url %s", path) _request = getattr(self.session, method.lower()) _url = self._base_url(amp.lb_network_ip) + path LOG.debug("request url " + _url) timeout_tuple = (CONF.haproxy_amphora.rest_request_conn_timeout, CONF.haproxy_amphora.rest_request_read_timeout) reqargs = { 'verify': CONF.haproxy_amphora.server_ca, 'url': _url, 'timeout': timeout_tuple, } reqargs.update(kwargs) headers = reqargs.setdefault('headers', {}) headers['User-Agent'] = OCTAVIA_API_CLIENT self.ssl_adapter.uuid = amp.id # Keep retrying for a in six.moves.xrange(CONF.haproxy_amphora.connection_max_retries): try: with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message="A true SSLContext object is not available") r = _request(**reqargs) except (requests.ConnectionError, requests.Timeout): LOG.warn(_LW("Could not connect to instance. Retrying.")) time.sleep(CONF.haproxy_amphora.connection_retry_interval) if a >= CONF.haproxy_amphora.connection_max_retries: raise driver_except.TimeOutException() else: return r raise driver_except.UnavailableException()
def revert(self, result, amphora, *args, **kwargs): """Handle a failed post network plug.""" if isinstance(result, failure.Failure): return LOG.warn(_LW("Reverting post network plug.")) self.amphora_repo.update(db_apis.get_session(), id=amphora.id, status=constants.ERROR)
def revert(self, result, loadbalancer, *args, **kwargs): """Handle a failed amphora vip plug notification.""" if isinstance(result, failure.Failure): return LOG.warn(_LW("Reverting post vip plug.")) self.loadbalancer_repo.update(db_apis.get_session(), id=loadbalancer.id, status=constants.ERROR)
def revert(self, result, loadbalancer, *args, **kwargs): """Handle a failure to plumb a vip.""" if isinstance(result, failure.Failure): return LOG.warn(_LW("Unable to plug VIP for loadbalancer id %s"), loadbalancer.id) self.network_driver.unplug_vip(loadbalancer, loadbalancer.vip)
def revert(self, result, loadbalancer, *args, **kwargs): """Handle a failed amphora vip plug notification.""" if isinstance(result, failure.Failure): return LOG.warning(_LW("Reverting post vip plug.")) self.loadbalancer_repo.update(db_apis.get_session(), id=loadbalancer.id, provisioning_status=constants.ERROR)
def revert(self, result, loadbalancer, *args, **kwargs): """Handle a failed amphora vip plug notification.""" if isinstance(result, failure.Failure): return LOG.warn(_LW("Reverting Get Amphora VRRP Interface.")) for amp in loadbalancer.amphorae: self.amphora_repo.update(db_apis.get_session(), amp.id, vrrp_interface=None)
def revert(self, l7policy_id, *args, **kwargs): """Mark the l7policy ERROR since the delete couldn't happen :returns: None """ LOG.warn(_LW("Reverting delete in DB " "for l7policy id %s"), l7policy_id)
def revert(self, result, loadbalancer, added_ports, *args, **kwargs): """Handle a failed post network plug.""" if isinstance(result, failure.Failure): return LOG.warn(_LW("Reverting post network plug.")) for amphora in loadbalancer.amphorae: self.amphora_repo.update(db_apis.get_session(), id=amphora.id, status=constants.ERROR)
def revert(self, l7rule, *args, **kwargs): """Mark the l7rule ERROR since the delete couldn't happen :returns: None """ LOG.warning(_LW("Reverting delete in DB " "for l7rule id %s"), l7rule.id)
def revert(self, amphora, *args, **kwargs): """Mark the amphora as broken and ready to be cleaned up.""" LOG.warn(_LW("Reverting mark amphora pending update in DB " "for amp id %(amp)s and compute id %(comp)s"), {'amp': amphora.id, 'comp': amphora.compute_id}) self.amphora_repo.update(db_apis.get_session(), amphora.id, status=constants.ERROR)
def revert(self, pool_id, *args, **kwargs): """Mark the health monitor ERROR since the mark active couldn't happen :returns: None """ LOG.warn(_LW("Reverting mark health monitor delete in DB " "for health monitor on pool with id %s"), pool_id)
def execute(self, listener): """Delete the listener in DB :param listener: The listener to delete :returns: None """ LOG.debug(_LW("Delete in DB for listener id: %s") % listener.id) self.listener_repo.delete(db_apis.get_session(), id=listener.id)
def revert(self, l7rule_id, *args, **kwargs): """Mark the l7rule ERROR since the delete couldn't happen :returns: None """ LOG.warning(_LW("Reverting delete in DB " "for l7rule id %s"), l7rule_id)
def execute(self, loadbalancer): """Update amphorae statuses to DELETED in the database. """ for amp in loadbalancer.amphorae: LOG.debug(_LW("Marking amphora %s DELETED ") % amp.id) self.amphora_repo.update(db_apis.get_session(), id=amp.id, status=constants.DELETED)
def revert(self, listener_id, *args, **kwargs): """Mark the listener ERROR since the listener didn't delete :returns: None """ LOG.warning(_LW("Reverting mark listener delete in DB " "for listener id %s"), listener_id)
def revert(self, listener, *args, **kwargs): """Handle a failed listener start.""" LOG.warning(_LW("Reverting listener start.")) self.task_utils.mark_listener_prov_status_error(listener.id) return None
def revert(self, pool_id, *args, **kwargs): """Mark the pool ERROR since the delete couldn't happen :returns: None """ LOG.warn(_LW("Reverting delete in DB " "for pool id %s"), pool_id)
def revert(self, loadbalancer, *args, **kwargs): """Handle failed listeners updates.""" LOG.warning(_LW("Reverting listeners updates.")) for listener in loadbalancer.listeners: self.task_utils.mark_listener_prov_status_error(listener.id) return None
def revert(self, result, loadbalancer, *args, **kwargs): """Handle a failure to allocate vip.""" if isinstance(result, failure.Failure): LOG.exception(_LE("Unable to allocate VIP")) return vip = result LOG.warn(_LW("Deallocating vip %s"), vip.ip_address) self.network_driver.deallocate_vip(vip)
def revert(self, listener, *args, **kwargs): """Mark the listener ERROR since the listener didn't delete :returns: None """ LOG.warning( _LW("Reverting mark listener delete in DB " "for listener id %s"), listener.id)
def revert(self, listeners, *args, **kwargs): """Handle failed listeners updates.""" LOG.warn(_LW("Reverting listeners updates.")) for listener in listeners: self.listener_repo.update(db_apis.get_session(), id=listener.id, provisioning_status=constants.ERROR) return None
def _get_image_uuid(client, image_id, image_tag, image_owner): if image_id: if image_tag: LOG.warning( _LW("Both amp_image_id and amp_image_tag options defined. " "Using the amp_image_id.")) return image_id return _extract_amp_image_id_by_tag(client, image_tag, image_owner)
def revert(self, pool, *args, **kwargs): """Mark the pool ERROR since the update couldn't happen :returns: None """ LOG.warn(_LW("Reverting update pool in DB " "for pool id %s"), pool.id) # TODO(johnsom) fix this to set the upper ojects to ERROR self.pool_repo.update(db_apis.get_session(), pool.id, enabled=0)
def _get_image_uuid(client, image_id, image_tag): if image_id: if image_tag: LOG.warn( _LW("Both amp_image_id and amp_image_tag options defined. " "Using the former.")) return image_id return _extract_amp_image_id_by_tag(client, image_tag)
def revert(self, result, *args, **kwargs): """This method will revert the creation of the :param result: here it refers to server group id """ server_group_id = result LOG.warning(_LW("Reverting server group create with id:%s"), server_group_id) self.compute.delete_server_group(server_group_id)
def revert(self, result, loadbalancer, added_ports, *args, **kwargs): """Handle a failed post network plug.""" if isinstance(result, failure.Failure): return LOG.warning(_LW("Reverting post network plug.")) for amphora in six.moves.filter( lambda amp: amp.status == constants.AMPHORA_ALLOCATED, loadbalancer.amphorae): self.task_utils.mark_amphora_status_error(amphora.id)
def revert(self, listener, *args, **kwargs): """Mark the listener ERROR since the delete couldn't happen :returns: None """ LOG.warn(_LW("Reverting mark listener deleted in DB " "for listener id %s"), listener.id) self.listener_repo.update(db_apis.get_session(), listener.id, provisioning_status=constants.ERROR)
def revert(self, amphora, *args, **kwargs): """Mark the amphora as broken and ready to be cleaned up.""" LOG.warning(_LW("Reverting mark amphora ready in DB for amp " "id %(amp)s and compute id %(comp)s"), {'amp': amphora.id, 'comp': amphora.compute_id}) self.amphora_repo.update(db_apis.get_session(), amphora.id, status=constants.ERROR, compute_id=amphora.compute_id, lb_network_ip=amphora.lb_network_ip)
def revert(self, member, *args, **kwargs): """Mark the member ERROR since the update couldn't happen :returns: None """ LOG.warning(_LW("Reverting update member in DB " "for member id %s"), member.id) # TODO(johnsom) fix this to set the upper ojects to ERROR self.member_repo.update(db_apis.get_session(), member.id, enabled=0)
def revert(self, loadbalancer, listeners, *args, **kwargs): """Mark the load balancer and listeners as broken.""" LOG.warning(_LW("Reverting mark load balancer " "and listeners active in DB " "for load balancer id %(LB)s and " "listener ids: %(list)s"), {'LB': loadbalancer.id, 'list': ', '.join([l.id for l in listeners])}) self.loadbalancer_repo.update(db_apis.get_session(), loadbalancer.id, provisioning_status=constants.ERROR) for listener in listeners: try: self.listener_repo.update(db_apis.get_session(), listener.id, provisioning_status=constants.ERROR) except Exception: LOG.warning(_LW("Failed to update listener %s provisioning " "status..."), listener.id)
def revert(self, health_mon, *args, **kwargs): """Mark the health monitor ERROR since the update couldn't happen :returns: None """ LOG.warn(_LW("Reverting update health monitor in DB " "for health monitor id %s"), health_mon.pool_id) # TODO(johnsom) fix this to set the upper ojects to ERROR self.health_mon_repo.update(db_apis.get_session(), health_mon.pool_id, enabled=0)