def _lookup(self, pool_id, *remainder): """Overriden pecan _lookup method for custom routing. Verifies that the pool passed in the url exists, and if so decides which controller, if any, should control be passed. """ session = db_api.get_session() if pool_id and len(remainder) and remainder[0] == 'members': remainder = remainder[1:] db_pool = self.repositories.pool.get(session, id=pool_id) if not db_pool: LOG.info(_LI("Pool %s not found."), pool_id) raise exceptions.NotFound(resource=data_models.Pool._name(), id=pool_id) return member.MembersController( load_balancer_id=self.load_balancer_id, listener_id=self.listener_id, pool_id=db_pool.id), remainder if pool_id and len(remainder) and remainder[0] == 'healthmonitor': remainder = remainder[1:] db_pool = self.repositories.pool.get(session, id=pool_id) if not db_pool: LOG.info(_LI("Pool %s not found."), pool_id) raise exceptions.NotFound(resource=data_models.Pool._name(), id=pool_id) return health_monitor.HealthMonitorController( load_balancer_id=self.load_balancer_id, listener_id=self.listener_id, pool_id=db_pool.id), remainder
def _delete_vip_security_group(self, sec_grp): """Deletes a security group in neutron. Retries upon an exception because removing a security group from a neutron port does not happen immediately. """ attempts = 0 while attempts <= CONF.networking.max_retries: try: self.neutron_client.delete_security_group(sec_grp) LOG.info(_LI("Deleted security group %s"), sec_grp) return except neutron_client_exceptions.NotFound: LOG.info(_LI("Security group %s not found, will assume it is " "already deleted"), sec_grp) return except Exception: LOG.warning(_LW("Attempt %(attempt)s to remove security group " "%(sg)s failed."), {'attempt': attempts + 1, 'sg': sec_grp}) attempts += 1 time.sleep(CONF.networking.retry_interval) message = _LE("All attempts to remove security group {0} have " "failed.").format(sec_grp) LOG.exception(message) raise base.DeallocateVIPException(message)
def delete(self, id): """Deletes a load balancer.""" context = pecan.request.context.get('octavia_context') # Purely to make lines smaller length lb_repo = self.repositories.load_balancer db_lb = self.repositories.load_balancer.get(context.session, id=id) if not db_lb: LOG.info(_LI("Load Balancer %s was not found."), id) raise exceptions.NotFound( resource=data_models.LoadBalancer._name(), id=id) # Check load balancer is in a mutable status if not lb_repo.test_and_set_provisioning_status( context.session, id, constants.PENDING_DELETE): LOG.info(_LI("Load Balancer %s is immutable."), id) raise exceptions.ImmutableObject(resource=db_lb._name(), id=id) db_lb = self.repositories.load_balancer.get(context.session, id=id) try: LOG.info(_LI("Sending deleted Load Balancer %s to the handler"), db_lb.id) self.handler.delete(db_lb) except Exception: with excutils.save_and_reraise_exception(reraise=False): self.repositories.load_balancer.update( context.session, db_lb.id, provisioning_status=constants.ERROR) return self._convert_db_to_type(db_lb, lb_types.LoadBalancerResponse)
def health_check(self): amp_health_repo = repo.AmphoraHealthRepository() with futures.ThreadPoolExecutor(max_workers=self.threads) as executor: try: # Don't start checking immediately, as the health manager may # have been down for a while and amphorae not able to check in. LOG.debug("Pausing before starting health check") time.sleep(CONF.health_manager.heartbeat_timeout) while True: session = db_api.get_session() LOG.debug("Starting amphora health check") failover_count = 0 while True: amp = amp_health_repo.get_stale_amphora(session) if amp is None: break failover_count += 1 LOG.info(_LI("Stale amphora's id is: %s"), amp.amphora_id) executor.submit(self.cw.failover_amphora, amp.amphora_id) if failover_count > 0: LOG.info(_LI("Failed over %s amphora"), failover_count) time.sleep(CONF.health_manager.health_check_interval) finally: executor.shutdown(wait=True)
def main(): service.prepare_service(sys.argv) gmr.TextGuruMeditation.setup_autorun(version) timestamp = str(datetime.datetime.utcnow()) LOG.info(_LI("Starting house keeping at %s"), timestamp) # Thread to perform spare amphora check spare_amp_thread = threading.Thread(target=spare_amphora_check) spare_amp_thread.daemon = True spare_amp_thread_event.set() spare_amp_thread.start() # Thread to perform db cleanup db_cleanup_thread = threading.Thread(target=db_cleanup) db_cleanup_thread.daemon = True db_cleanup_thread_event.set() db_cleanup_thread.start() # Try-Exception block should be at the end to gracefully exit threads try: while True: time.sleep(1) except KeyboardInterrupt: LOG.info(_LI("Attempting to gracefully terminate House-Keeping")) spare_amp_thread_event.clear() db_cleanup_thread_event.clear() spare_amp_thread.join() db_cleanup_thread.join() LOG.info(_LI("House-Keeping process terminated"))
def listener_controller(listener, delete=False, update=False, create=False): time.sleep(ASYNC_TIME) LOG.info(_LI("Simulating controller operation for listener...")) if delete: repo.listener.update(db_api.get_session(), listener.id, operating_status=constants.OFFLINE, provisioning_status=constants.DELETED) elif update: db_listener = repo.listener.get(db_api.get_session(), id=listener.id) listener_dict = listener.to_dict() listener_dict['operating_status'] = db_listener.operating_status repo.listener.update(db_api.get_session(), listener.id, **listener_dict) elif create: repo.listener.update(db_api.get_session(), listener.id, operating_status=constants.ONLINE, provisioning_status=constants.ACTIVE) repo.load_balancer.update(db_api.get_session(), listener.load_balancer.id, operating_status=constants.ONLINE, provisioning_status=constants.ACTIVE) LOG.info(_LI("Simulated Controller Handler Thread Complete"))
def main(): service.prepare_service(sys.argv) timestamp = str(datetime.datetime.utcnow()) LOG.info(_LI("Starting house keeping at %s"), timestamp) # Thread to perform spare amphora check spare_amp_thread = threading.Thread(target=spare_amphora_check) spare_amp_thread.daemon = True spare_amp_thread_event.set() spare_amp_thread.start() # Thread to perform db cleanup db_cleanup_thread = threading.Thread(target=db_cleanup) db_cleanup_thread.daemon = True db_cleanup_thread_event.set() db_cleanup_thread.start() # Try-Exception block should be at the end to gracefully exit threads try: while True: time.sleep(1) except KeyboardInterrupt: LOG.info(_LI("Attempting to gracefully terminate House-Keeping")) spare_amp_thread_event.clear() db_cleanup_thread_event.clear() spare_amp_thread.join() db_cleanup_thread.join() LOG.info(_LI("House-Keeping process terminated"))
def member_controller(member, delete=False, update=False, create=False): time.sleep(ASYNC_TIME) LOG.info(_LI("Simulating controller operation for member...")) db_mem = None if delete: db_mem = repo.member.get(db_api.get_session(), member.id) repo.member.delete(db_api.get_session(), id=member.id) elif update: db_mem = repo.member.get(db_api.get_session(), member.id) member_dict = member.to_dict() member_dict['operating_status'] = db_mem.operating_status repo.member.update(db_api.get_session(), member.id, **member_dict) elif create: repo.member.update(db_api.get_session(), member.id, operating_status=constants.ONLINE) listeners = [] if db_mem: for listener in db_mem.pool.listeners: if listener not in listeners: listeners.append(listener) if member.pool.listeners: for listener in member.pool.listeners: if listener not in listeners: listeners.append(listener) if listeners: for listener in listeners: repo.listener.update(db_api.get_session(), listener.id, operating_status=constants.ONLINE, provisioning_status=constants.ACTIVE) repo.load_balancer.update(db_api.get_session(), member.pool.load_balancer.id, operating_status=constants.ONLINE, provisioning_status=constants.ACTIVE) LOG.info(_LI("Simulated Controller Handler Thread Complete"))
def deallocate_vip(self, vip): try: port = self.get_port(vip.port_id) except base.PortNotFound: msg = ("Can't deallocate VIP because the vip port {0} cannot be " "found in neutron".format(vip.port_id)) raise base.VIPConfigurationNotFound(msg) if port.device_owner != OCTAVIA_OWNER: LOG.info(_LI("Port %s will not be deleted by Octavia as it was " "not created by Octavia."), vip.port_id) if self.sec_grp_enabled: sec_grp = self._get_lb_security_group(vip.load_balancer.id) sec_grp = sec_grp.get('id') LOG.info( _LI("Removing security group %(sg)s from port %(port)s"), {'sg': sec_grp, 'port': vip.port_id}) raw_port = self.neutron_client.show_port(port.id) sec_grps = raw_port.get('port', {}).get('security_groups', []) if sec_grp in sec_grps: sec_grps.remove(sec_grp) port_update = {'port': {'security_groups': sec_grps}} self.neutron_client.update_port(port.id, port_update) self._delete_vip_security_group(sec_grp) return try: self.neutron_client.delete_port(vip.port_id) except Exception: message = _LE('Error deleting VIP port_id {port_id} from ' 'neutron').format(port_id=vip.port_id) LOG.exception(message) raise base.DeallocateVIPException(message) if self.sec_grp_enabled: sec_grp = self._get_lb_security_group(vip.load_balancer.id) sec_grp = sec_grp.get('id') self._delete_vip_security_group(sec_grp)
def main(): service.prepare_service(sys.argv) gmr.TextGuruMeditation.setup_autorun(version) processes = [] hm_listener_proc = multiprocessing.Process(name='HM_listener', target=hm_listener) processes.append(hm_listener_proc) hm_health_check_proc = multiprocessing.Process(name='HM_health_check', target=hm_health_check) processes.append(hm_health_check_proc) LOG.info(_LI("Health Manager listener process starts:")) hm_listener_proc.start() LOG.info(_LI("Health manager check process starts:")) hm_health_check_proc.start() try: for process in processes: process.join() except KeyboardInterrupt: LOG.info(_LI("Health Manager existing due to signal")) hm_listener_proc.terminate() hm_health_check_proc.terminate()
def pool_controller(pool, delete=False, update=False, create=False): time.sleep(ASYNC_TIME) LOG.info(_LI("Simulating controller operation for pool...")) db_pool = None if delete: db_pool = repo.pool.get(db_api.get_session(), id=pool.id) repo.pool.delete(db_api.get_session(), id=pool.id) elif update: db_pool = repo.pool.get(db_api.get_session(), id=pool.id) pool_dict = pool.to_dict() pool_dict['operating_status'] = db_pool.operating_status repo.update_pool_and_sp(db_api.get_session(), pool.id, pool_dict) elif create: repo.pool.update(db_api.get_session(), pool.id, operating_status=constants.ONLINE) listeners = [] if db_pool: for listener in db_pool.listeners: if listener not in listeners: listeners.append(listener) if pool.listeners: for listener in pool.listeners: if listener not in listeners: listeners.append(listener) if listeners: for listener in listeners: repo.listener.update(db_api.get_session(), listener.id, operating_status=constants.ONLINE, provisioning_status=constants.ACTIVE) repo.load_balancer.update(db_api.get_session(), pool.load_balancer.id, operating_status=constants.ONLINE, provisioning_status=constants.ACTIVE) LOG.info(_LI("Simulated Controller Handler Thread Complete"))
def _validate_cert(cls, ca_cert, ca_key, ca_key_pass): if not ca_cert: LOG.info(_LI("Using CA Certificate from config.")) try: ca_cert = open(CONF.certificates.ca_certificate).read() except IOError: raise exceptions.CertificateGenerationException( msg="Failed to load {0}." .format(CONF.certificates.ca_certificate) ) if not ca_key: LOG.info(_LI("Using CA Private Key from config.")) try: ca_key = open(CONF.certificates.ca_private_key).read() except IOError: raise exceptions.CertificateGenerationException( msg="Failed to load {0}." .format(CONF.certificates.ca_certificate) ) if not ca_key_pass: ca_key_pass = CONF.certificates.ca_private_key_passphrase if ca_key_pass: LOG.info(_LI( "Using CA Private Key Passphrase from config." )) else: LOG.info(_LI( "No Passphrase found for CA Private Key, not using one." ))
def delete(self, id): """Deletes a pool member.""" session = db_api.get_session() db_member = self.repositories.member.get(session, id=id) if not db_member: LOG.info(_LI("Member %s not found"), id) raise exceptions.NotFound(resource=data_models.Member._name(), id=id) # Verify load balancer is in a mutable status. If so it can be assumed # that the listener is also in a mutable status because a load balancer # will only be ACTIVE when all its listeners as ACTIVE. if not self.repositories.test_and_set_lb_and_listener_prov_status( session, self.load_balancer_id, self.listener_id, constants.PENDING_UPDATE, constants.PENDING_UPDATE): LOG.info( _LI("Member %s cannot be deleted because its Load " "Balancer is in an immutable state."), id) lb_repo = self.repositories.load_balancer db_lb = lb_repo.get(session, id=self.load_balancer_id) raise exceptions.ImmutableObject(resource=db_lb._name(), id=self.load_balancer_id) db_member = self.repositories.member.get(session, id=id) try: LOG.info(_LI("Sending Deletion of Member %s to handler"), db_member.id) self.handler.delete(db_member) except Exception: with excutils.save_and_reraise_exception(reraise=False): self.repositories.listener.update( session, self.listener_id, operating_status=constants.ERROR) db_member = self.repositories.member.get(session, id=id) return self._convert_db_to_type(db_member, member_types.MemberResponse)
def spare_check(self): """Checks the DB for the Spare amphora count. If it's less than the requirement, starts new amphora. """ session = db_api.get_session() conf_spare_cnt = CONF.house_keeping.spare_amphora_pool_size curr_spare_cnt = self.amp_repo.get_spare_amphora_count(session) LOG.debug("Required Spare Amphora count : %d", conf_spare_cnt) LOG.debug("Current Spare Amphora count : %d", curr_spare_cnt) diff_count = conf_spare_cnt - curr_spare_cnt # When the current spare amphora is less than required if diff_count > 0: LOG.info(_LI("Initiating creation of %d spare amphora.") % diff_count) # Call Amphora Create Flow diff_count times for i in range(1, diff_count + 1): LOG.debug("Starting amphorae number %d ...", i) self.cw.create_amphora() else: LOG.debug(_LI("Current spare amphora count satisfies the " "requirement"))
def pool_controller(pool, delete=False, update=False, create=False): time.sleep(ASYNC_TIME) LOG.info(_LI("Simulating controller operation for pool...")) if delete: repo.pool.delete(db_api.get_session(), id=pool.id) elif update: db_pool = repo.pool.get(db_api.get_session(), id=pool.id) pool_dict = pool.to_dict() pool_dict['operating_status'] = db_pool.operating_status sp_dict = pool_dict.pop('session_persistence', None) repo.update_pool_on_listener(db_api.get_session(), pool.id, pool_dict, sp_dict) elif create: repo.pool.update(db_api.get_session(), pool.id, operating_status=constants.ONLINE) repo.listener.update(db_api.get_session(), pool.listener.id, operating_status=constants.ONLINE, provisioning_status=constants.ACTIVE) repo.load_balancer.update(db_api.get_session(), pool.listener.load_balancer.id, operating_status=constants.ONLINE, provisioning_status=constants.ACTIVE) LOG.info(_LI("Simulated Controller Handler Thread Complete"))
def health_monitor_controller(health_monitor, delete=False, update=False, create=False): time.sleep(ASYNC_TIME) LOG.info(_LI("Simulating controller operation for health monitor...")) if delete: repo.health_monitor.delete(db_api.get_session(), pool_id=health_monitor.pool.id) elif update: hm = repo.health_monitor.get(db_api.get_session(), health_monitor.pool_id) hm_dict = health_monitor.to_dict() hm_dict['operating_status'] = hm.operating_status() repo.health_monitor.update(db_api.get_session(), **hm_dict) elif create: repo.pool.update(db_api.get_session(), health_monitor.pool_id, operating_status=constants.ONLINE) repo.test_and_set_lb_and_listener_prov_status( db_api.get_session(), health_monitor.pool.listener.load_balancer.id, health_monitor.pool.listener.id, constants.ACTIVE, constants.ACTIVE) repo.listener.update(db_api.get_session(), health_monitor.pool.listener.id, operating_status=constants.ONLINE, provisioning_status=constants.ACTIVE) repo.load_balancer.update( db_api.get_session(), health_monitor.pool.listener.load_balancer.id, operating_status=constants.ONLINE, provisioning_status=constants.ACTIVE) LOG.info(_LI("Simulated Controller Handler Thread Complete"))
def put(self, id, load_balancer): """Updates a load balancer.""" session = db_api.get_session() # Purely to make lines smaller length lb_repo = self.repositories.load_balancer db_lb = self.repositories.load_balancer.get(session, id=id) if not db_lb: LOG.info(_LI("Load Balancer %s was not found.") % id) raise exceptions.NotFound( resource=data_models.LoadBalancer._name(), id=id) # Check load balancer is in a mutable status if not lb_repo.test_and_set_provisioning_status( session, id, constants.PENDING_UPDATE): LOG.info(_LI("Load Balancer %s is immutable.") % id) raise exceptions.ImmutableObject(resource=db_lb._name(), id=id) try: LOG.info(_LI("Sending updated Load Balancer %s to the handler") % id) self.handler.update(db_lb, load_balancer) except Exception: with excutils.save_and_reraise_exception(reraise=False): self.repositories.load_balancer.update( session, id, provisioning_status=constants.ERROR) lb = self.repositories.load_balancer.get(session, id=id) return self._convert_db_to_type(lb, lb_types.LoadBalancerResponse)
def _test_lb_and_listener_statuses( self, session, lb_id, id=None, listener_status=constants.PENDING_UPDATE): """Verify load balancer is in a mutable state.""" lb_repo = self.repositories.load_balancer if id: if not self.repositories.test_and_set_lb_and_listeners_prov_status( session, lb_id, constants.PENDING_UPDATE, listener_status, listener_ids=[id]): LOG.info(_LI("Load Balancer %s is immutable."), lb_id) db_lb = lb_repo.get(session, id=lb_id) raise exceptions.ImmutableObject(resource=db_lb._name(), id=lb_id) else: if not lb_repo.test_and_set_provisioning_status( session, lb_id, constants.PENDING_UPDATE): db_lb = lb_repo.get(session, id=lb_id) LOG.info(_LI("Load Balancer %s is immutable."), db_lb.id) raise exceptions.ImmutableObject(resource=db_lb._name(), id=lb_id)
def delete(self): """Deletes a health monitor.""" session = db_api.get_session() db_hm = self.repositories.health_monitor.get( session, pool_id=self.pool_id) if not db_hm: LOG.info(_LI("Health Monitor for Pool %s cannot be updated " "because the Load Balancer is immutable.") % self.pool_id) raise exceptions.NotFound( resource=data_models.HealthMonitor._name(), id=id) # Verify load balancer is in a mutable status. If so it can be assumed # that the listener is also in a mutable status because a load balancer # will only be ACTIVE when all it's listeners as ACTIVE. if not self.repositories.test_and_set_lb_and_listener_prov_status( session, self.load_balancer_id, self.listener_id, constants.PENDING_UPDATE, constants.PENDING_UPDATE): lb_repo = self.repositories.load_balancer db_lb = lb_repo.get(session, id=self.load_balancer_id) raise exceptions.ImmutableObject(resource=db_lb._name(), id=self.load_balancer_id) db_hm = self.repositories.health_monitor.get(session, pool_id=self.pool_id) try: LOG.info(_LI("Sending Deletion of Health Monitor for Pool %s to " "handler") % self.pool_id) self.handler.delete(db_hm) except Exception: with excutils.save_and_reraise_exception(reraise=False): self.repositories.listener.update( session, self.listener_id, operating_status=constants.ERROR) db_hm = self.repositories.health_monitor.get( session, pool_id=self.pool_id) return self._convert_db_to_type(db_hm, hm_types.HealthMonitorResponse)
def put(self, id, load_balancer): """Updates a load balancer.""" session = db_api.get_session() # Purely to make lines smaller length lb_repo = self.repositories.load_balancer db_lb = self.repositories.load_balancer.get(session, id=id) if not db_lb: LOG.info(_LI("Load Balancer %s was not found."), id) raise exceptions.NotFound( resource=data_models.LoadBalancer._name(), id=id) # Check load balancer is in a mutable status if not lb_repo.test_and_set_provisioning_status( session, id, constants.PENDING_UPDATE): LOG.info(_LI("Load Balancer %s is immutable."), id) raise exceptions.ImmutableObject(resource=db_lb._name(), id=id) try: LOG.info(_LI("Sending updated Load Balancer %s to the handler"), id) self.handler.update(db_lb, load_balancer) except Exception: with excutils.save_and_reraise_exception(reraise=False): self.repositories.load_balancer.update( session, id, provisioning_status=constants.ERROR) lb = self.repositories.load_balancer.get(session, id=id) return self._convert_db_to_type(lb, lb_types.LoadBalancerResponse)
def spare_check(self): """Checks the DB for the Spare amphora count. If it's less than the requirement, starts new amphora. """ session = db_api.get_session() conf_spare_cnt = CONF.house_keeping.spare_amphora_pool_size curr_spare_cnt = self.amp_repo.get_spare_amphora_count(session) LOG.debug("Required Spare Amphora count : %d", conf_spare_cnt) LOG.debug("Current Spare Amphora count : %d", curr_spare_cnt) diff_count = conf_spare_cnt - curr_spare_cnt # When the current spare amphora is less than required if diff_count > 0: LOG.info( _LI("Initiating creation of %d spare amphora.") % diff_count) # Call Amphora Create Flow diff_count times for i in range(1, diff_count + 1): LOG.debug("Starting amphorae number %d ...", i) self.cw.create_amphora() else: LOG.debug( _LI("Current spare amphora count satisfies the " "requirement"))
def delete(self, id): """Deletes a listener from a load balancer.""" session = db_api.get_session() db_listener = self.repositories.listener.get(session, id=id) if not db_listener: LOG.info(_LI("Listener %s not found.") % id) raise exceptions.NotFound( resource=data_models.Listener._name(), id=id) # Verify load balancer is in a mutable status. If so it can be assumed # that the listener is also in a mutable status because a load balancer # will only be ACTIVE when all it's listeners as ACTIVE. if not self.repositories.test_and_set_lb_and_listener_prov_status( session, self.load_balancer_id, id, constants.PENDING_UPDATE, constants.PENDING_DELETE): lb_repo = self.repositories.load_balancer db_lb = lb_repo.get(session, id=self.load_balancer_id) raise exceptions.ImmutableObject(resource=db_lb._name(), id=self.load_balancer_id) db_listener = self.repositories.listener.get(session, id=id) try: LOG.info(_LI("Sending Deletion of Listener %s to handler") % db_listener.id) self.handler.delete(db_listener) except Exception: with excutils.save_and_reraise_exception(reraise=False): self.repositories.listener.update( session, db_listener.id, provisioning_status=constants.ERROR) db_listener = self.repositories.listener.get( session, id=db_listener.id) return self._convert_db_to_type(db_listener, listener_types.ListenerResponse)
def _delete_vip_security_group(self, sec_grp): """Deletes a security group in neutron. Retries upon an exception because removing a security group from a neutron port does not happen immediately. """ attempts = 0 while attempts <= cfg.CONF.networking.max_retries: try: self.neutron_client.delete_security_group(sec_grp) LOG.info(_LI("Deleted security group %s"), sec_grp) return except neutron_client_exceptions.NotFound: LOG.info( _LI("Security group %s not found, will assume it is " "already deleted"), sec_grp) return except Exception: LOG.warning( _LW("Attempt %(attempt)s to remove security group " "%(sg)s failed."), { 'attempt': attempts + 1, 'sg': sec_grp }) attempts += 1 time.sleep(cfg.CONF.networking.retry_interval) message = _LE("All attempts to remove security group {0} have " "failed.").format(sec_grp) LOG.exception(message) raise base.DeallocateVIPException(message)
def l7policy_controller(l7policy, delete=False, update=False, create=False): time.sleep(ASYNC_TIME) LOG.info(_LI("Simulating controller operation for l7policy...")) db_l7policy = None if delete: db_l7policy = repo.l7policy.get(db_api.get_session(), l7policy.id) repo.l7policy.delete(db_api.get_session(), id=l7policy.id) elif update: db_l7policy = repo.l7policy.get(db_api.get_session(), l7policy.id) l7policy_dict = l7policy.to_dict() repo.l7policy.update(db_api.get_session(), l7policy.id, **l7policy_dict) elif create: db_l7policy = repo.l7policy.create(db_api.get_session(), **l7policy_dict) if db_l7policy.listener: repo.listener.update(db_api.get_session(), db_l7policy.listener.id, operating_status=constants.ONLINE, provisioning_status=constants.ACTIVE) repo.load_balancer.update(db_api.get_session(), db_l7policy.listener.load_balancer.id, operating_status=constants.ONLINE, provisioning_status=constants.ACTIVE) LOG.info(_LI("Simulated Controller Handler Thread Complete"))
def _lookup(self, pool_id, *remainder): """Overriden pecan _lookup method for custom routing. Verifies that the pool passed in the url exists, and if so decides which controller, if any, should control be passed. """ session = db_api.get_session() if pool_id and len(remainder) and remainder[0] == 'members': remainder = remainder[1:] db_pool = self.repositories.pool.get(session, id=pool_id) if not db_pool: LOG.info(_LI("Pool %s not found.") % pool_id) raise exceptions.NotFound(resource=data_models.Pool._name(), id=pool_id) return member.MembersController( load_balancer_id=self.load_balancer_id, listener_id=self.listener_id, pool_id=db_pool.id), remainder if pool_id and len(remainder) and remainder[0] == 'healthmonitor': remainder = remainder[1:] db_pool = self.repositories.pool.get(session, id=pool_id) if not db_pool: LOG.info(_LI("Pool %s not found.") % pool_id) raise exceptions.NotFound(resource=data_models.Pool._name(), id=pool_id) return health_monitor.HealthMonitorController( load_balancer_id=self.load_balancer_id, listener_id=self.listener_id, pool_id=db_pool.id), remainder
def delete(self, id): """Deletes a pool member.""" context = pecan.request.context.get('octavia_context') db_member = self.repositories.member.get(context.session, id=id) if not db_member: LOG.info(_LI("Member %s not found"), id) raise exceptions.NotFound( resource=data_models.Member._name(), id=id) # Verify load balancer is in a mutable status. If so it can be assumed # that the listener is also in a mutable status because a load balancer # will only be ACTIVE when all its listeners as ACTIVE. if not self.repositories.test_and_set_lb_and_listener_prov_status( context.session, self.load_balancer_id, self.listener_id, constants.PENDING_UPDATE, constants.PENDING_UPDATE): LOG.info(_LI("Member %s cannot be deleted because its Load " "Balancer is in an immutable state."), id) lb_repo = self.repositories.load_balancer db_lb = lb_repo.get(context.session, id=self.load_balancer_id) raise exceptions.ImmutableObject(resource=db_lb._name(), id=self.load_balancer_id) db_member = self.repositories.member.get(context.session, id=id) try: LOG.info(_LI("Sending Deletion of Member %s to handler"), db_member.id) self.handler.delete(db_member) except Exception: with excutils.save_and_reraise_exception(reraise=False): self.repositories.listener.update( context.session, self.listener_id, operating_status=constants.ERROR) db_member = self.repositories.member.get(context.session, id=id) return self._convert_db_to_type(db_member, member_types.MemberResponse)
def put(self, health_monitor): """Updates a health monitor. Updates a health monitor on a pool if it exists. Only one health monitor is allowed per pool so there is no need for a health monitor id. """ session = db_api.get_session() db_hm = self.repositories.health_monitor.get( session, pool_id=self.pool_id) if not db_hm: LOG.info(_LI("Health Monitor for Pool %s was not found") % self.pool_id) raise exceptions.NotFound( resource=data_models.HealthMonitor._name(), id=id) hm_dict = health_monitor.to_dict(render_unsets=False) # Verify load balancer is in a mutable status. If so it can be assumed # that the listener is also in a mutable status because a load balancer # will only be ACTIVE when all it's listeners as ACTIVE. if not self.repositories.test_and_set_lb_and_listener_prov_status( session, self.load_balancer_id, self.listener_id, constants.PENDING_UPDATE, constants.PENDING_UPDATE): LOG.info(_LI("Health Monitor for Pool %s cannot be updated " "because the Load Balancer is immutable.") % self.pool_id) lb_repo = self.repositories.load_balancer db_lb = lb_repo.get(session, id=self.load_balancer_id) raise exceptions.ImmutableObject(resource=db_lb._name(), id=self.load_balancer_id) try: self.repositories.health_monitor.update( session, self.pool_id, **hm_dict) except odb_exceptions.DBError: # Setting LB and Listener back to active because this is just a # validation failure self.repositories.load_balancer.update( session, self.load_balancer_id, provisioning_status=constants.ACTIVE) self.repositories.listener.update( session, self.listener_id, provisioning_status=constants.ACTIVE) raise exceptions.InvalidOption(value=hm_dict.get('type'), option='type') db_hm = self.repositories.health_monitor.get( session, pool_id=self.pool_id) try: LOG.info(_LI("Sending Update of Health Monitor for Pool %s to " "handler") % self.pool_id) self.handler.update(db_hm) except Exception: with excutils.save_and_reraise_exception(reraise=False): self.repositories.listener.update( session, self.listener_id, operating_status=constants.ERROR) db_hm = self.repositories.health_monitor.get( session, pool_id=self.pool_id) return self._convert_db_to_type(db_hm, hm_types.HealthMonitorResponse)
def listen(self): try: self.server.start() LOG.info(_LI('Consumer is now listening...')) self.server.wait() finally: LOG.info(_LI('Stopping consumer...')) self.server.stop() LOG.info(_LI('Consumer successfully stopped.'))
def stop(self, graceful=False): if self.server: LOG.info(_LI('Stopping consumer...')) self.server.stop() if graceful: LOG.info( _LI('Consumer successfully stopped. Waiting for final ' 'messages to be processed...')) self.server.wait() super(Consumer, self).stop(graceful=graceful)
def _check_extension_enabled(self, extension_alias): try: self.neutron_client.show_extension(extension_alias) LOG.info(_LI('Neutron extension {ext} found enabled').format( ext=extension_alias)) return True except neutron_client_exceptions.NotFound: LOG.info(_LI('Neutron extension {ext} is not enabled').format( ext=extension_alias)) return False
def terminate(self, graceful=False): if self.message_listener: LOG.info(_LI('Stopping consumer...')) self.message_listener.stop() if graceful: LOG.info( _LI('Consumer successfully stopped. Waiting for final ' 'messages to be processed...')) self.message_listener.wait() super(ConsumerService, self).terminate()
def post(self, health_monitor): """Creates a health monitor on a pool.""" session = db_api.get_session() try: db_hm = self.repositories.health_monitor.get(session, pool_id=self.pool_id) if db_hm: raise exceptions.DuplicateHealthMonitor() except exceptions.NotFound: pass hm_dict = health_monitor.to_dict() hm_dict['pool_id'] = self.pool_id # Verify load balancer is in a mutable status. If so it can be assumed # that the listener is also in a mutable status because a load balancer # will only be ACTIVE when all it's listeners as ACTIVE. if not self.repositories.test_and_set_lb_and_listener_prov_status( session, self.load_balancer_id, self.listener_id, constants.PENDING_UPDATE, constants.PENDING_UPDATE): LOG.info( _LI("Health Monitor for Pool %s cannot be updated " "because the Load Balancer is immutable."), self.pool_id) lb_repo = self.repositories.load_balancer db_lb = lb_repo.get(session, id=self.load_balancer_id) raise exceptions.ImmutableObject(resource=db_lb._name(), id=self.load_balancer_id) try: db_hm = self.repositories.health_monitor.create(session, **hm_dict) except odb_exceptions.DBError: # Setting LB and Listener back to active because this is just a # validation failure self.repositories.load_balancer.update( session, self.load_balancer_id, provisioning_status=constants.ACTIVE) self.repositories.listener.update( session, self.listener_id, provisioning_status=constants.ACTIVE) raise exceptions.InvalidOption(value=hm_dict.get('type'), option='type') try: LOG.info( _LI("Sending Creation of Health Monitor for Pool %s to " "handler"), self.pool_id) self.handler.create(db_hm) except Exception: with excutils.save_and_reraise_exception(reraise=False): self.repositories.listener.update( session, self.listener_id, operating_status=constants.ERROR) db_hm = self.repositories.health_monitor.get(session, pool_id=self.pool_id) return self._convert_db_to_type(db_hm, hm_types.HealthMonitorResponse)
def listen(self): try: LOG.info(_LI('Starting consumer...')) service.launch(cfg.CONF, self.server).wait() finally: LOG.info(_LI('Stopping consumer...')) self.server.stop() LOG.info(_LI('Consumer successfully stopped. Waiting for final ' 'messages to be processed...')) self.server.wait() LOG.info(_LI('Finished waiting.'))
def post(self, pool): """Creates a pool on a listener. This does not allow more than one pool to be on a listener so once one is created, another cannot be created until the first one has been deleted. """ session = db_api.get_session() if self.repositories.listener.has_pool(session, self.listener_id): raise exceptions.DuplicatePoolEntry() # Verify load balancer is in a mutable status. If so it can be assumed # that the listener is also in a mutable status because a load balancer # will only be ACTIVE when all it's listeners as ACTIVE. if not self.repositories.test_and_set_lb_and_listener_prov_status( session, self.load_balancer_id, self.listener_id, constants.PENDING_UPDATE, constants.PENDING_UPDATE): LOG.info(_LI("Pool cannot be created because the Load " "Balancer is in an immutable state")) lb_repo = self.repositories.load_balancer db_lb = lb_repo.get(session, id=self.load_balancer_id) raise exceptions.ImmutableObject(resource=db_lb._name(), id=self.load_balancer_id) pool_dict = pool.to_dict() sp_dict = pool_dict.pop('session_persistence', None) pool_dict['operating_status'] = constants.OFFLINE try: db_pool = self.repositories.create_pool_on_listener( session, self.listener_id, pool_dict, sp_dict=sp_dict) except odb_exceptions.DBError: # Setting LB and Listener back to active because this is just a # validation failure self.repositories.load_balancer.update( session, self.load_balancer_id, provisioning_status=constants.ACTIVE) self.repositories.listener.update( session, self.listener_id, provisioning_status=constants.ACTIVE) # TODO(blogan): will have to do separate validation protocol # before creation or update since the exception messages # do not give any information as to what constraint failed raise exceptions.InvalidOption(value='', option='') try: LOG.info(_LI("Sending Creation of Pool %s to handler") % db_pool.id) self.handler.create(db_pool) except Exception: with excutils.save_and_reraise_exception(reraise=False): self.repositories.listener.update( session, self.listener_id, operating_status=constants.ERROR) db_pool = self.repositories.pool.get(session, id=db_pool.id) return self._convert_db_to_type(db_pool, pool_types.PoolResponse)
def post(self, listener): """Creates a listener on a load balancer.""" self._secure_data(listener) session = db_api.get_session() lb_repo = self.repositories.load_balancer if not lb_repo.test_and_set_provisioning_status( session, self.load_balancer_id, constants.PENDING_UPDATE): db_lb = lb_repo.get(session, id=self.load_balancer_id) LOG.info(_LI("Load Balancer %s is immutable.") % db_lb.id) raise exceptions.ImmutableObject(resource=db_lb._name(), id=self.load_balancer_id) listener_dict = listener.to_dict() listener_dict['load_balancer_id'] = self.load_balancer_id listener_dict['provisioning_status'] = constants.PENDING_CREATE listener_dict['operating_status'] = constants.OFFLINE # NOTE(blogan): Throwing away because we should not store secure data # in the database nor should we send it to a handler. if 'tls_termination' in listener_dict: del listener_dict['tls_termination'] # This is the extra validation layer for wrong protocol or duplicate # listeners on the same load balancer. try: db_listener = self.repositories.listener.create( session, **listener_dict) except odb_exceptions.DBDuplicateEntry: # Setting LB back to active because this is just a validation # failure lb_repo.update(session, self.load_balancer_id, provisioning_status=constants.ACTIVE) raise exceptions.DuplicateListenerEntry( port=listener_dict.get('protocol_port')) except odb_exceptions.DBError: # Setting LB back to active because this is just a validation # failure lb_repo.update(session, self.load_balancer_id, provisioning_status=constants.ACTIVE) raise exceptions.InvalidOption(value=listener_dict.get('protocol'), option='protocol') # Handler will be responsible for sending to controller try: LOG.info(_LI("Sending Creation of Listener %s to handler") % db_listener.id) self.handler.create(db_listener) except Exception: with excutils.save_and_reraise_exception(reraise=False): self.repositories.listener.update( session, db_listener.id, provisioning_status=constants.ERROR) db_listener = self.repositories.listener.get( session, id=db_listener.id) return self._convert_db_to_type(db_listener, listener_types.ListenerResponse)
def db_cleanup(): """Perform db cleanup for old amphora.""" # Read the interval from CONF interval = CONF.house_keeping.cleanup_interval LOG.info(_LI("DB cleanup interval is set to %d sec"), interval) LOG.info(_LI('Amphora expiry age is %s seconds'), CONF.house_keeping.amphora_expiry_age) db_cleanup = house_keeping.DatabaseCleanup() while db_cleanup_thread_event.is_set(): LOG.debug("Initiating the cleanup of old amphora...") db_cleanup.delete_old_amphorae() time.sleep(interval)
def post(self, member): """Creates a pool member on a pool.""" session = db_api.get_session() member_dict = member.to_dict() member_dict['pool_id'] = self.pool_id member_dict['operating_status'] = constants.OFFLINE # Verify load balancer is in a mutable status. If so it can be assumed # that the listener is also in a mutable status because a load balancer # will only be ACTIVE when all its listeners as ACTIVE. if not self.repositories.test_and_set_lb_and_listener_prov_status( session, self.load_balancer_id, self.listener_id, constants.PENDING_UPDATE, constants.PENDING_UPDATE): LOG.info( _LI("Member cannot be created because its Load " "Balancer is in an immutable state.")) lb_repo = self.repositories.load_balancer db_lb = lb_repo.get(session, id=self.load_balancer_id) raise exceptions.ImmutableObject(resource=db_lb._name(), id=self.load_balancer_id) try: db_member = self.repositories.member.create(session, **member_dict) except oslo_exc.DBDuplicateEntry as de: # Setting LB and Listener back to active because this is just a # validation failure self.repositories.load_balancer.update( session, self.load_balancer_id, provisioning_status=constants.ACTIVE) self.repositories.listener.update( session, self.listener_id, provisioning_status=constants.ACTIVE) if ['id'] == de.columns: raise exceptions.IDAlreadyExists() elif (set(['pool_id', 'ip_address', 'protocol_port']) == set(de.columns)): raise exceptions.DuplicateMemberEntry( ip_address=member_dict.get('ip_address'), port=member_dict.get('protocol_port')) try: LOG.info(_LI("Sending Creation of Member %s to handler"), db_member.id) self.handler.create(db_member) except Exception: with excutils.save_and_reraise_exception(reraise=False): self.repositories.listener.update( session, self.listener_id, operating_status=constants.ERROR) db_member = self.repositories.member.get(session, id=db_member.id) return self._convert_db_to_type(db_member, member_types.MemberResponse)
def post(self, health_monitor): """Creates a health monitor on a pool.""" context = pecan.request.context.get('octavia_context') try: db_hm = self.repositories.health_monitor.get( context.session, pool_id=self.pool_id) if db_hm: raise exceptions.DuplicateHealthMonitor() except exceptions.NotFound: pass hm_dict = health_monitor.to_dict() hm_dict['pool_id'] = self.pool_id # Verify load balancer is in a mutable status. If so it can be assumed # that the listener is also in a mutable status because a load balancer # will only be ACTIVE when all it's listeners as ACTIVE. if not self.repositories.test_and_set_lb_and_listener_prov_status( context.session, self.load_balancer_id, self.listener_id, constants.PENDING_UPDATE, constants.PENDING_UPDATE): LOG.info(_LI("Health Monitor for Pool %s cannot be updated " "because the Load Balancer is immutable."), self.pool_id) lb_repo = self.repositories.load_balancer db_lb = lb_repo.get(context.session, id=self.load_balancer_id) raise exceptions.ImmutableObject(resource=db_lb._name(), id=self.load_balancer_id) try: db_hm = self.repositories.health_monitor.create( context.session, **hm_dict) except odb_exceptions.DBError: # Setting LB and Listener back to active because this is just a # validation failure self.repositories.load_balancer.update( context.session, self.load_balancer_id, provisioning_status=constants.ACTIVE) self.repositories.listener.update( context.session, self.listener_id, provisioning_status=constants.ACTIVE) raise exceptions.InvalidOption(value=hm_dict.get('type'), option='type') try: LOG.info(_LI("Sending Creation of Health Monitor for Pool %s to " "handler"), self.pool_id) self.handler.create(db_hm) except Exception: with excutils.save_and_reraise_exception(reraise=False): self.repositories.listener.update( context.session, self.listener_id, operating_status=constants.ERROR) db_hm = self.repositories.health_monitor.get( context.session, pool_id=self.pool_id) return self._convert_db_to_type(db_hm, hm_types.HealthMonitorResponse)
def deallocate_vip(self, vip): # Delete the vrrp_port (instance port) in case nova didn't # This can happen if a failover has occurred. try: for amphora in six.moves.filter( lambda amp: amp.status == constants.AMPHORA_ALLOCATED, vip.load_balancer.amphorae): self.neutron_client.delete_port(amphora.vrrp_port_id) except (neutron_client_exceptions.NotFound, neutron_client_exceptions.PortNotFoundClient): LOG.debug('VIP instance port {0} already deleted. ' 'Skipping.'.format(amphora.vrrp_port_id)) try: port = self.get_port(vip.port_id) except base.PortNotFound: msg = ("Can't deallocate VIP because the vip port {0} cannot be " "found in neutron".format(vip.port_id)) raise base.VIPConfigurationNotFound(msg) if port.device_owner != OCTAVIA_OWNER: LOG.info( _LI("Port %s will not be deleted by Octavia as it was " "not created by Octavia."), vip.port_id) if self.sec_grp_enabled: sec_grp = self._get_lb_security_group(vip.load_balancer.id) sec_grp = sec_grp.get('id') LOG.info( _LI("Removing security group %(sg)s from port %(port)s"), { 'sg': sec_grp, 'port': vip.port_id }) raw_port = self.neutron_client.show_port(port.id) sec_grps = raw_port.get('port', {}).get('security_groups', []) if sec_grp in sec_grps: sec_grps.remove(sec_grp) port_update = {'port': {'security_groups': sec_grps}} self.neutron_client.update_port(port.id, port_update) self._delete_vip_security_group(sec_grp) return try: self.neutron_client.delete_port(vip.port_id) except Exception: message = _LE('Error deleting VIP port_id {port_id} from ' 'neutron').format(port_id=vip.port_id) LOG.exception(message) raise base.DeallocateVIPException(message) if self.sec_grp_enabled: sec_grp = self._get_lb_security_group(vip.load_balancer.id) sec_grp = sec_grp.get('id') self._delete_vip_security_group(sec_grp)
def delete_old_amphorae(self): """Checks the DB for old amphora and deletes them based on it's age.""" exp_age = datetime.timedelta( seconds=CONF.house_keeping.amphora_expiry_age) session = db_api.get_session() amphora = self.amp_repo.get_all(session, status=constants.DELETED) for amp in amphora: if self.amp_health_repo.check_amphora_expired(session, amp.id, exp_age): LOG.info(_LI('Attempting to delete Amphora id : %s'), amp.id) self.amp_repo.delete(session, id=amp.id) LOG.info(_LI('Deleted Amphora id : %s'), amp.id)
def delete_old_amphorae(self): """Checks the DB for old amphora and deletes them based on it's age.""" exp_age = datetime.timedelta( seconds=CONF.house_keeping.amphora_expiry_age) session = db_api.get_session() amphora = self.amp_repo.get_all(session, status=constants.DELETED) for amp in amphora: if self.amp_health_repo.check_amphora_expired( session, amp.id, exp_age): LOG.info(_LI('Attempting to delete Amphora id : %s'), amp.id) self.amp_repo.delete(session, id=amp.id) LOG.info(_LI('Deleted Amphora id : %s'), amp.id)
def put(self, id, pool): """Updates a pool on a listener.""" session = db_api.get_session() old_db_pool = self.repositories.pool.get(session, id=id) if not old_db_pool: LOG.info(_LI("Pool %s not found.") % id) raise exceptions.NotFound(resource=data_models.Pool._name(), id=id) # Verify load balancer is in a mutable status. If so it can be assumed # that the listener is also in a mutable status because a load balancer # will only be ACTIVE when all it's listeners as ACTIVE. if not self.repositories.test_and_set_lb_and_listener_prov_status( session, self.load_balancer_id, self.listener_id, constants.PENDING_UPDATE, constants.PENDING_UPDATE): LOG.info(_LI("Pool %s cannot be updated because the Load " "Balancer is in an immutable state") % id) lb_repo = self.repositories.load_balancer db_lb = lb_repo.get(session, id=self.load_balancer_id) raise exceptions.ImmutableObject(resource=db_lb._name(), id=self.load_balancer_id) pool_dict = pool.to_dict(render_unsets=False) pool_dict['operating_status'] = old_db_pool.operating_status sp_dict = pool_dict.pop('session_persistence', None) try: self.repositories.update_pool_on_listener(session, id, pool_dict, sp_dict) except odb_exceptions.DBError: # Setting LB and Listener back to active because this is just a # validation failure self.repositories.load_balancer.update( session, self.load_balancer_id, provisioning_status=constants.ACTIVE) self.repositories.listener.update( session, self.listener_id, provisioning_status=constants.ACTIVE) # TODO(blogan): will have to do separate validation protocol # before creation or update since the exception messages # do not give any information as to what constraint failed raise exceptions.InvalidOption(value='', option='') db_pool = self.repositories.pool.get(session, id=id) try: LOG.info(_LI("Sending Update of Pool %s to handler") % db_pool.id) self.handler.update(db_pool) except Exception: with excutils.save_and_reraise_exception(reraise=False): self.repositories.listener.update( session, self.listener_id, operating_status=constants.ERROR) db_pool = self.repositories.pool.get(session, id=db_pool.id) return self._convert_db_to_type(db_pool, pool_types.PoolResponse)
def post(self, member): """Creates a pool member on a pool.""" context = pecan.request.context.get('octavia_context') member_dict = member.to_dict() member_dict['pool_id'] = self.pool_id member_dict['operating_status'] = constants.OFFLINE # Verify load balancer is in a mutable status. If so it can be assumed # that the listener is also in a mutable status because a load balancer # will only be ACTIVE when all its listeners as ACTIVE. if not self.repositories.test_and_set_lb_and_listener_prov_status( context.session, self.load_balancer_id, self.listener_id, constants.PENDING_UPDATE, constants.PENDING_UPDATE): LOG.info(_LI("Member cannot be created because its Load " "Balancer is in an immutable state.")) lb_repo = self.repositories.load_balancer db_lb = lb_repo.get(context.session, id=self.load_balancer_id) raise exceptions.ImmutableObject(resource=db_lb._name(), id=self.load_balancer_id) try: db_member = self.repositories.member.create( context.session, **member_dict) except oslo_exc.DBDuplicateEntry as de: # Setting LB and Listener back to active because this is just a # validation failure self.repositories.load_balancer.update( context.session, self.load_balancer_id, provisioning_status=constants.ACTIVE) self.repositories.listener.update( context.session, self.listener_id, provisioning_status=constants.ACTIVE) if ['id'] == de.columns: raise exceptions.IDAlreadyExists() elif (set(['pool_id', 'ip_address', 'protocol_port']) == set(de.columns)): raise exceptions.DuplicateMemberEntry( ip_address=member_dict.get('ip_address'), port=member_dict.get('protocol_port')) try: LOG.info(_LI("Sending Creation of Member %s to handler"), db_member.id) self.handler.create(db_member) except Exception: with excutils.save_and_reraise_exception(reraise=False): self.repositories.listener.update( context.session, self.listener_id, operating_status=constants.ERROR) db_member = self.repositories.member.get(context.session, id=db_member.id) return self._convert_db_to_type(db_member, member_types.MemberResponse)
def deallocate_vip(self, vip): # Delete the vrrp_port (instance port) in case nova didn't # This can happen if a failover has occurred. try: for amphora in six.moves.filter( lambda amp: amp.status == constants.AMPHORA_ALLOCATED, vip.load_balancer.amphorae): self.neutron_client.delete_port(amphora.vrrp_port_id) except (neutron_client_exceptions.NotFound, neutron_client_exceptions.PortNotFoundClient): LOG.debug('VIP instance port {0} already deleted. ' 'Skipping.'.format(amphora.vrrp_port_id)) try: port = self.get_port(vip.port_id) except base.PortNotFound: msg = ("Can't deallocate VIP because the vip port {0} cannot be " "found in neutron".format(vip.port_id)) raise base.VIPConfigurationNotFound(msg) if port.device_owner != OCTAVIA_OWNER: LOG.info(_LI("Port %s will not be deleted by Octavia as it was " "not created by Octavia."), vip.port_id) if self.sec_grp_enabled: sec_grp = self._get_lb_security_group(vip.load_balancer.id) sec_grp = sec_grp.get('id') LOG.info( _LI("Removing security group %(sg)s from port %(port)s"), {'sg': sec_grp, 'port': vip.port_id}) raw_port = self.neutron_client.show_port(port.id) sec_grps = raw_port.get('port', {}).get('security_groups', []) if sec_grp in sec_grps: sec_grps.remove(sec_grp) port_update = {'port': {'security_groups': sec_grps}} self.neutron_client.update_port(port.id, port_update) self._delete_vip_security_group(sec_grp) return try: self.neutron_client.delete_port(vip.port_id) except Exception: message = _LE('Error deleting VIP port_id {port_id} from ' 'neutron').format(port_id=vip.port_id) LOG.exception(message) raise base.DeallocateVIPException(message) if self.sec_grp_enabled: sec_grp = self._get_lb_security_group(vip.load_balancer.id) sec_grp = sec_grp.get('id') self._delete_vip_security_group(sec_grp)
def _lookup(self, lb_id, *remainder): """Overriden pecan _lookup method for custom routing. Verifies that the load balancer passed in the url exists, and if so decides which controller, if any, should control be passed. """ context = pecan.request.context.get('octavia_context') if lb_id and len(remainder) and (remainder[0] == 'listeners' or remainder[0] == 'pools' or remainder[0] == 'delete_cascade'): controller = remainder[0] remainder = remainder[1:] db_lb = self.repositories.load_balancer.get(context.session, id=lb_id) if not db_lb: LOG.info(_LI("Load Balancer %s was not found."), lb_id) raise exceptions.NotFound( resource=data_models.LoadBalancer._name(), id=lb_id) if controller == 'listeners': return listener.ListenersController( load_balancer_id=db_lb.id), remainder elif controller == 'pools': return pool.PoolsController( load_balancer_id=db_lb.id), remainder elif (controller == 'delete_cascade'): return LBCascadeDeleteController(db_lb.id), ''
def post(self, load_balancer): """Creates a load balancer.""" context = pecan.request.context.get('octavia_context') lb_dict = load_balancer.to_dict() vip_dict = lb_dict.pop('vip') lb_dict['provisioning_status'] = constants.PENDING_CREATE lb_dict['operating_status'] = constants.OFFLINE lb_dict['project_id'] = lb_dict.get('project_id') or context.project_id try: db_lb = self.repositories.create_load_balancer_and_vip( context.session, lb_dict, vip_dict) except odb_exceptions.DBDuplicateEntry: raise exceptions.IDAlreadyExists() # Handler will be responsible for sending to controller try: LOG.info(_LI("Sending created Load Balancer %s to the handler"), db_lb.id) self.handler.create(db_lb) except Exception: with excutils.save_and_reraise_exception(reraise=False): self.repositories.load_balancer.update( context.session, db_lb.id, provisioning_status=constants.ERROR) return self._convert_db_to_type(db_lb, lb_types.LoadBalancerResponse)
def _actually_delete_cert(cert_ref): """Deletes the specified cert. Very dangerous. Do not recommend. :param cert_ref: the UUID of the cert to delete :raises Exception: if certificate deletion fails """ connection = barbican_common.BarbicanAuth.get_barbican_client() LOG.info( _LI("Recursively deleting certificate container {0} from Barbican." ).format(cert_ref)) try: certificate_container = connection.containers.get(cert_ref) certificate_container.certificate.delete() if certificate_container.intermediates: certificate_container.intermediates.delete() if certificate_container.private_key_passphrase: certificate_container.private_key_passphrase.delete() certificate_container.private_key.delete() certificate_container.delete() except Exception as e: with excutils.save_and_reraise_exception(): LOG.error( _LE("Error recursively deleting container {0}: {1}"). format(cert_ref, str(e)))
def put(self, id, listener_): """Updates a listener on a load balancer.""" listener = listener_.listener context = pecan.request.context.get('octavia_context') db_listener = self._get_db_listener(context.session, id) load_balancer_id = db_listener.load_balancer_id # TODO(rm_work): Do we need something like this? What do we do on an # empty body for a PUT? if not listener: raise exceptions.ValidationException( detail='No listener object supplied.') if listener.default_pool_id: self._validate_pool(context.session, load_balancer_id, listener.default_pool_id) self._test_lb_and_listener_statuses(context.session, load_balancer_id, id=id) try: LOG.info(_LI("Sending Update of Listener %s to handler"), id) self.handler.update(db_listener, listener) except Exception: with excutils.save_and_reraise_exception(reraise=False): self.repositories.listener.update( context.session, id, provisioning_status=constants.ERROR) db_listener = self._get_db_listener(context.session, id) result = self._convert_db_to_type(db_listener, listener_types.ListenerResponse) return listener_types.ListenerRootResponse(listener=result)
def update(self, old_lb, load_balancer): validate_input(data_models.LoadBalancer, load_balancer) LOG.info(_LI("%(entity)s handling the update of " "load balancer %(id)s"), {"entity": self.__class__.__name__, "id": old_lb.id}) load_balancer.id = old_lb.id simulate_controller(load_balancer, update=True)
def delete(self, id): """Deletes a listener from a load balancer.""" context = pecan.request.context.get('octavia_context') db_listener = self._get_db_listener(context.session, id) load_balancer_id = db_listener.load_balancer_id self._test_lb_and_listener_statuses( context.session, load_balancer_id, id=id, listener_status=constants.PENDING_DELETE) try: LOG.info(_LI("Sending Deletion of Listener %s to handler"), db_listener.id) self.handler.delete(db_listener) except Exception: with excutils.save_and_reraise_exception(reraise=False): self.repositories.listener.update( context.session, db_listener.id, provisioning_status=constants.ERROR) db_listener = self.repositories.listener.get(context.session, id=db_listener.id) result = self._convert_db_to_type(db_listener, listener_types.ListenerResponse) return listener_types.ListenerRootResponse(listener=result)
def delete(self, id): """Deletes a pool member.""" context = pecan.request.context.get('octavia_context') db_member = self._get_db_member(context.session, id) self._test_lb_and_listener_and_pool_statuses(context.session, member=db_member) self.repositories.member.update( context.session, db_member.id, provisioning_status=constants.PENDING_DELETE) try: LOG.info(_LI("Sending Deletion of Member %s to handler"), db_member.id) self.handler.delete(db_member) except Exception: with excutils.save_and_reraise_exception( reraise=False), db_api.get_lock_session() as lock_session: self._reset_lb_listener_pool_statuses( lock_session, member=db_member) # Member now goes to ERROR self.repositories.member.update( lock_session, db_member.id, provisioning_status=constants.ERROR) db_member = self.repositories.member.get(context.session, id=id) result = self._convert_db_to_type(db_member, member_types.MemberResponse) return member_types.MembersRootResponse(member=result)