def update_status(self, context, obj_type, obj_id, provisioning_status=None, operating_status=None): if not provisioning_status and not operating_status: LOG.warning(_LW('update_status for %(obj_type)s %(obj_id)s called ' 'without specifying provisioning_status or ' 'operating_status') % {'obj_type': obj_type, 'obj_id': obj_id}) return model_mapping = { 'loadbalancer': db_models.LoadBalancer, 'pool': db_models.PoolV2, 'listener': db_models.Listener, 'member': db_models.MemberV2, 'healthmonitor': db_models.HealthMonitorV2 } if obj_type not in model_mapping: raise n_exc.Invalid(_('Unknown object type: %s') % obj_type) try: self.plugin.db.update_status( context, model_mapping[obj_type], obj_id, provisioning_status=provisioning_status, operating_status=operating_status) except n_exc.NotFound: # update_status may come from agent on an object which was # already deleted from db with other request LOG.warning(_LW('Cannot update status: %(obj_type)s %(obj_id)s ' 'not found in the DB, it was probably deleted ' 'concurrently'), {'obj_type': obj_type, 'obj_id': obj_id})
def update_status(self, context, obj_type, obj_id, provisioning_status=None, operating_status=None): if not provisioning_status and not operating_status: LOG.warning(_LW('update_status for %(obj_type)s %(obj_id)s called ' 'without specifying provisioning_status or ' 'operating_status') % {'obj_type': obj_type, 'obj_id': obj_id}) return model_mapping = { 'loadbalancer': db_models.LoadBalancer, 'pool': db_models.PoolV2, 'listener': db_models.Listener, 'member': db_models.MemberV2, 'healthmonitor': db_models.HealthMonitorV2 } if obj_type not in model_mapping: raise n_exc.Invalid(_('Unknown object type: %s') % obj_type) try: self.plugin.db.update_status( context, model_mapping[obj_type], obj_id, provisioning_status=provisioning_status, operating_status=operating_status) except n_exc.NotFound: # update_status may come from agent on an object which was # already deleted from db with other request LOG.warning(_LW('Cannot update status: %(obj_type)s %(obj_id)s ' 'not found in the DB, it was probably deleted ' 'concurrently'), {'obj_type': obj_type, 'obj_id': obj_id})
def call(self, action, resource, data, headers, binary=False): resp = self._call(action, resource, data, headers, binary) if resp[RESP_STATUS] == -1: LOG.warning(_LW('vDirect server is not responding (%s).'), self.server) return self._recover(action, resource, data, headers, binary) elif resp[RESP_STATUS] in (301, 307): LOG.warning(_LW('vDirect server is not active (%s).'), self.server) return self._recover(action, resource, data, headers, binary) else: return resp
def call(self, action, resource, data, headers, binary=False): resp = self._call(action, resource, data, headers, binary) if resp[RESP_STATUS] == -1: LOG.warning(_LW('vDirect server is not responding (%s).'), self.server) return self._recover(action, resource, data, headers, binary) elif resp[RESP_STATUS] in (301, 307): LOG.warning(_LW('vDirect server is not active (%s).'), self.server) return self._recover(action, resource, data, headers, binary) else: return resp
def get_ready_devices(self, context, host=None): with context.session.begin(subtransactions=True): agents = self.plugin.db.get_lbaas_agents( context, filters={'host': [host]}) if not agents: return [] elif len(agents) > 1: LOG.warning(_LW('Multiple lbaas agents found on host %s'), host) loadbalancers = self.plugin.db.list_loadbalancers_on_lbaas_agent( context, agents[0].id) loadbalancer_ids = [ l.id for l in loadbalancers] qry = context.session.query( loadbalancer_dbv2.models.LoadBalancer.id) qry = qry.filter( loadbalancer_dbv2.models.LoadBalancer.id.in_( loadbalancer_ids)) qry = qry.filter( loadbalancer_dbv2.models.LoadBalancer.provisioning_status.in_( constants.ACTIVE_PENDING_STATUSES)) up = True # makes pep8 and sqlalchemy happy qry = qry.filter( loadbalancer_dbv2.models.LoadBalancer.admin_state_up == up) return [id for id, in qry]
def get_ready_devices(self, context, host=None): with context.session.begin(subtransactions=True): agents = self.plugin.db.get_lbaas_agents( context, filters={'host': [host]}) if not agents: return [] elif len(agents) > 1: LOG.warning(_LW('Multiple lbaas agents found on host %s'), host) loadbalancers = self.plugin.db.list_loadbalancers_on_lbaas_agent( context, agents[0].id) loadbalancer_ids = [ l.id for l in loadbalancers] qry = context.session.query( loadbalancer_dbv2.models.LoadBalancer.id) qry = qry.filter( loadbalancer_dbv2.models.LoadBalancer.id.in_( loadbalancer_ids)) qry = qry.filter( loadbalancer_dbv2.models.LoadBalancer.provisioning_status.in_( constants.ACTIVE_PENDING_STATUSES)) up = True # makes pep8 and sqlalchemy happy qry = qry.filter( loadbalancer_dbv2.models.LoadBalancer.admin_state_up == up) return [id for id, in qry]
def _flip_servers(self): LOG.warning( _LW('Fliping servers. Current is: %(server)s, ' 'switching to %(secondary)s'), { 'server': self.server, 'secondary': self.secondary_server }) self.server, self.secondary_server = self.secondary_server, self.server
def schedule(self, plugin, context, pool, device_driver): """Schedule the pool to an active loadbalancer agent if there is no enabled agent hosting it. """ with context.session.begin(subtransactions=True): lbaas_agent = plugin.get_lbaas_agent_hosting_pool( context, pool['id']) if lbaas_agent: LOG.debug( 'Pool %(pool_id)s has already been hosted' ' by lbaas agent %(agent_id)s', { 'pool_id': pool['id'], 'agent_id': lbaas_agent['id'] }) return active_agents = plugin.get_lbaas_agents(context, active=True) if not active_agents: LOG.warning(_LW('No active lbaas agents for pool %s'), pool['id']) return candidates = plugin.get_lbaas_agent_candidates( device_driver, active_agents) if not candidates: LOG.warning(_LW('No lbaas agent supporting device driver %s'), device_driver) return chosen_agent = self._schedule(candidates, plugin, context) binding = PoolLoadbalancerAgentBinding() binding.agent = chosen_agent binding.pool_id = pool['id'] context.session.add(binding) LOG.debug( 'Pool %(pool_id)s is scheduled to lbaas agent ' '%(agent_id)s', { 'pool_id': pool['id'], 'agent_id': chosen_agent['id'] }) return chosen_agent
def schedule(self, plugin, context, loadbalancer, device_driver): """Schedule the load balancer to an active loadbalancer agent if there is no enabled agent hosting it. """ with context.session.begin(subtransactions=True): lbaas_agent = plugin.db.get_agent_hosting_loadbalancer( context, loadbalancer.id) if lbaas_agent: LOG.debug( 'Load balancer %(loadbalancer_id)s ' 'has already been hosted' ' by lbaas agent %(agent_id)s', { 'loadbalancer_id': loadbalancer.id, 'agent_id': lbaas_agent['id'] }) return active_agents = plugin.db.get_lbaas_agents(context, active=True) if not active_agents: LOG.warning(_LW('No active lbaas agents for load balancer %s'), loadbalancer.id) return candidates = plugin.db.get_lbaas_agent_candidates( device_driver, active_agents) if not candidates: LOG.warning(_LW('No lbaas agent supporting device driver %s'), device_driver) return chosen_agent = random.choice(candidates) binding = LoadbalancerAgentBinding() binding.agent = chosen_agent binding.loadbalancer_id = loadbalancer.id context.session.add(binding) LOG.debug( 'Load balancer %(loadbalancer_id)s is scheduled ' 'to lbaas agent %(agent_id)s', { 'loadbalancer_id': loadbalancer.id, 'agent_id': chosen_agent['id'] }) return chosen_agent
def _cleanup_namespace(self, loadbalancer_id): namespace = get_ns_name(loadbalancer_id) ns = ip_lib.IPWrapper(namespace=namespace) try: for device in ns.get_devices(exclude_loopback=True): if ip_lib.device_exists(device.name): self.vif_driver.unplug(device.name, namespace=namespace) except RuntimeError as re: LOG.warn(_LW('An error happened on namespace cleanup: ' '%s') % re.message) ns.garbage_collect_namespace()
def _cleanup_namespace(self, loadbalancer_id): namespace = get_ns_name(loadbalancer_id) ns = ip_lib.IPWrapper(namespace=namespace) try: for device in ns.get_devices(exclude_loopback=True): if ip_lib.device_exists(device.name): self.vif_driver.unplug(device.name, namespace=namespace) except RuntimeError as re: LOG.warning(_LW('An error happened on namespace cleanup: ' '%s'), re.message) ns.garbage_collect_namespace()
def schedule(self, plugin, context, loadbalancer, device_driver): """Schedule the load balancer to an active loadbalancer agent if there is no enabled agent hosting it. """ with context.session.begin(subtransactions=True): lbaas_agent = plugin.db.get_agent_hosting_loadbalancer( context, loadbalancer.id) if lbaas_agent: LOG.debug('Load balancer %(loadbalancer_id)s ' 'has already been hosted' ' by lbaas agent %(agent_id)s', {'loadbalancer_id': loadbalancer.id, 'agent_id': lbaas_agent['id']}) return active_agents = plugin.db.get_lbaas_agents(context, active=True) if not active_agents: LOG.warn( _LW('No active lbaas agents for load balancer %s'), loadbalancer.id) return candidates = plugin.db.get_lbaas_agent_candidates(device_driver, active_agents) if not candidates: LOG.warn(_LW('No lbaas agent supporting device driver %s'), device_driver) return chosen_agent = random.choice(candidates) binding = LoadbalancerAgentBinding() binding.agent = chosen_agent binding.loadbalancer_id = loadbalancer.id context.session.add(binding) LOG.debug( 'Load balancer %(loadbalancer_id)s is scheduled ' 'to lbaas agent %(agent_id)s', { 'loadbalancer_id': loadbalancer.id, 'agent_id': chosen_agent['id']} ) return chosen_agent
def _delete_pip_nports(success): if success: for port in ports: try: self.plugin._core_plugin.delete_port( context, port['id']) LOG.debug('pip nport id: %s', port['id']) except Exception as exception: # stop exception propagation, nport may have # been deleted by other means LOG.warning(_LW('pip nport delete failed: %r'), exception)
def _delete_pip_nports(success): if success: for port in ports: try: self.plugin._core_plugin.delete_port( context, port['id']) LOG.debug('pip nport id: %s', port['id']) except Exception as exception: # stop exception propagation, nport may have # been deleted by other means LOG.warning(_LW('pip nport delete failed: %r'), exception)
def get_stats(self, loadbalancer_id): socket_path = self._get_state_file_path(loadbalancer_id, "haproxy_stats.sock", False) if os.path.exists(socket_path): parsed_stats = self._get_stats_from_socket( socket_path, entity_type=(STATS_TYPE_BACKEND_REQUEST | STATS_TYPE_SERVER_REQUEST) ) lb_stats = self._get_backend_stats(parsed_stats) lb_stats["members"] = self._get_servers_stats(parsed_stats) return lb_stats else: LOG.warning(_LW("Stats socket not found for loadbalancer %s"), loadbalancer_id) return {}
def _deploy_existing_instances(self): dirs = self._retrieve_deployed_instance_dirs() loadbalancers = self._retrieve_db_loadbalancers_from_dirs(dirs) loadbalancer_ids = [loadbalancer.id for loadbalancer in loadbalancers] self.deployed_loadbalancer_ids.update(loadbalancer_ids) for loadbalancer in loadbalancers: try: self.update_instance(loadbalancer) except RuntimeError: # do not stop anything this is a minor error LOG.warn(_LW("Existing load balancer %s could not be deployed" " on the system.") % loadbalancer.id)
def update_status(self, context, obj_type, obj_id, provisioning_status=None, operating_status=None): if not provisioning_status and not operating_status: LOG.warning( _LW( "update_status for %(obj_type)s %(obj_id)s called " "without specifying provisioning_status or " "operating_status" ) % {"obj_type": obj_type, "obj_id": obj_id} ) return model_mapping = { "loadbalancer": db_models.LoadBalancer, "pool": db_models.PoolV2, "listener": db_models.Listener, "member": db_models.MemberV2, "healthmonitor": db_models.HealthMonitorV2, } if obj_type not in model_mapping: raise n_exc.Invalid(_("Unknown object type: %s") % obj_type) try: self.plugin.db.update_status( context, model_mapping[obj_type], obj_id, provisioning_status=provisioning_status, operating_status=operating_status, ) except n_exc.NotFound: # update_status may come from agent on an object which was # already deleted from db with other request LOG.warning( _LW( "Cannot update status: %(obj_type)s %(obj_id)s " "not found in the DB, it was probably deleted " "concurrently" ), {"obj_type": obj_type, "obj_id": obj_id}, )
def _deploy_existing_instances(self): dirs = self._retrieve_deployed_instance_dirs() loadbalancers = self._retrieve_db_loadbalancers_from_dirs(dirs) loadbalancer_ids = [loadbalancer.id for loadbalancer in loadbalancers] self.deployed_loadbalancer_ids.update(loadbalancer_ids) for loadbalancer in loadbalancers: try: self.update_instance(loadbalancer) except RuntimeError: # do not stop anything this is a minor error LOG.warning( _LW("Existing load balancer %s could not be " "deployed on the system."), loadbalancer.id)
def schedule(self, plugin, context, pool, device_driver): """Schedule the pool to an active loadbalancer agent if there is no enabled agent hosting it. """ with context.session.begin(subtransactions=True): lbaas_agent = plugin.get_lbaas_agent_hosting_pool( context, pool['id']) if lbaas_agent: LOG.debug('Pool %(pool_id)s has already been hosted' ' by lbaas agent %(agent_id)s', {'pool_id': pool['id'], 'agent_id': lbaas_agent['id']}) return active_agents = plugin.get_lbaas_agents(context, active=True) if not active_agents: LOG.warning(_LW('No active lbaas agents for pool %s'), pool['id']) return candidates = plugin.get_lbaas_agent_candidates(device_driver, active_agents) if not candidates: LOG.warning(_LW('No lbaas agent supporting device driver %s'), device_driver) return chosen_agent = self._schedule(candidates, plugin, context) binding = PoolLoadbalancerAgentBinding() binding.agent = chosen_agent binding.pool_id = pool['id'] context.session.add(binding) LOG.debug('Pool %(pool_id)s is scheduled to lbaas agent ' '%(agent_id)s', {'pool_id': pool['id'], 'agent_id': chosen_agent['id']}) return chosen_agent
def get_stats(self, pool_id): socket_path = self._get_state_file_path(pool_id, 'sock', False) TYPE_BACKEND_REQUEST = 2 TYPE_SERVER_REQUEST = 4 if os.path.exists(socket_path): parsed_stats = self._get_stats_from_socket( socket_path, entity_type=TYPE_BACKEND_REQUEST | TYPE_SERVER_REQUEST) pool_stats = self._get_backend_stats(parsed_stats) pool_stats['members'] = self._get_servers_stats(parsed_stats) return pool_stats else: LOG.warning(_LW('Stats socket not found for pool %s'), pool_id) return {}
def get_stats(self, pool_id): socket_path = self._get_state_file_path(pool_id, 'sock', False) TYPE_BACKEND_REQUEST = 2 TYPE_SERVER_REQUEST = 4 if os.path.exists(socket_path): parsed_stats = self._get_stats_from_socket( socket_path, entity_type=TYPE_BACKEND_REQUEST | TYPE_SERVER_REQUEST) pool_stats = self._get_backend_stats(parsed_stats) pool_stats['members'] = self._get_servers_stats(parsed_stats) return pool_stats else: LOG.warning(_LW('Stats socket not found for pool %s'), pool_id) return {}
def get_stats(self, loadbalancer_id): socket_path = self._get_state_file_path(loadbalancer_id, 'haproxy_stats.sock', False) if os.path.exists(socket_path): parsed_stats = self._get_stats_from_socket( socket_path, entity_type=(STATS_TYPE_BACKEND_REQUEST | STATS_TYPE_SERVER_REQUEST)) lb_stats = self._get_backend_stats(parsed_stats) lb_stats['members'] = self._get_servers_stats(parsed_stats) return lb_stats else: LOG.warning(_LW('Stats socket not found for loadbalancer %s'), loadbalancer_id) return {}
def _get_stats_from_socket(self, socket_path, entity_type): try: s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) s.connect(socket_path) s.send('show stat -1 %s -1\n' % entity_type) raw_stats = '' chunk_size = 1024 while True: chunk = s.recv(chunk_size) raw_stats += chunk if len(chunk) < chunk_size: break return self._parse_stats(raw_stats) except socket.error as e: LOG.warning(_LW('Error while connecting to stats socket: %s'), e) return {}
def _get_stats_from_socket(self, socket_path, entity_type): try: s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) s.connect(socket_path) s.send('show stat -1 %s -1\n' % entity_type) raw_stats = '' chunk_size = 1024 while True: chunk = s.recv(chunk_size) raw_stats += chunk if len(chunk) < chunk_size: break return self._parse_stats(raw_stats) except socket.error as e: LOG.warning(_LW('Error while connecting to stats socket: %s'), e) return {}
def get_ready_devices(self, context, host=None): with context.session.begin(subtransactions=True): agents = self.plugin.get_lbaas_agents(context, filters={'host': [host]}) if not agents: return [] elif len(agents) > 1: LOG.warning(_LW('Multiple lbaas agents found on host %s'), host) pools = self.plugin.list_pools_on_lbaas_agent(context, agents[0].id) pool_ids = [pool['id'] for pool in pools['pools']] qry = context.session.query(loadbalancer_db.Pool.id) qry = qry.filter(loadbalancer_db.Pool.id.in_(pool_ids)) qry = qry.filter( loadbalancer_db.Pool.status.in_( np_const.ACTIVE_PENDING_STATUSES)) up = True # makes pep8 and sqlalchemy happy qry = qry.filter(loadbalancer_db.Pool.admin_state_up == up) return [id for id, in qry]
def _delete_proxy_port(self, ctx, lb): port_filter = { 'name': ['proxy_' + lb.id], } ports = self.plugin.db._core_plugin.get_ports(ctx, filters=port_filter) if ports: proxy_port = ports[0] proxy_port_ip_data = proxy_port['fixed_ips'][0] try: LOG.info( _LI('Deleting LB %(lb_id)s proxy port on subnet \ %(subnet_id)s with ip address %(ip_address)s') % { 'lb_id': lb.id, 'subnet_id': proxy_port_ip_data['subnet_id'], 'ip_address': proxy_port_ip_data['ip_address'] }) self.plugin.db._core_plugin.delete_port(ctx, proxy_port['id']) except Exception as exception: # stop exception propagation, nport may have # been deleted by other means LOG.warning(_LW('Proxy port deletion failed: %r'), exception)
def get_stats(self, loadbalancer_id): socket_path = self._get_state_file_path(loadbalancer_id, 'haproxy_stats.sock', False) if os.path.exists(socket_path): parsed_stats = self._get_stats_from_socket( socket_path, entity_type=(STATS_TYPE_BACKEND_REQUEST | STATS_TYPE_SERVER_REQUEST)) lb_stats = self._get_backend_stats(parsed_stats) lb_stats['members'] = self._get_servers_stats(parsed_stats) return lb_stats else: lb_config = self.plugin_rpc.get_loadbalancer(loadbalancer_id) loadbalancer = data_models.LoadBalancer.from_dict(lb_config) if self._is_active(loadbalancer): LOG.warning(_LW('Stats socket not found for loadbalancer %s'), loadbalancer_id) else: LOG.debug('Stats socket not found for loadbalancer %s,' ' but loadbalancer has no VIP in state UP.' ' Perhaps the lbaas-listener is not yet created?', loadbalancer_id) return {}
def _delete_proxy_port(self, ctx, lb): port_filter = { 'name': ['proxy_' + lb.id], } ports = self.plugin.db._core_plugin.get_ports( ctx, filters=port_filter) if ports: proxy_port = ports[0] proxy_port_ip_data = proxy_port['fixed_ips'][0] try: LOG.info(_LI('Deleting LB %(lb_id)s proxy port on subnet \ %(subnet_id)s with ip address %(ip_address)s') % {'lb_id': lb.id, 'subnet_id': proxy_port_ip_data['subnet_id'], 'ip_address': proxy_port_ip_data['ip_address']}) self.plugin.db._core_plugin.delete_port( ctx, proxy_port['id']) except Exception as exception: # stop exception propagation, nport may have # been deleted by other means LOG.warning(_LW('Proxy port deletion failed: %r'), exception)
def update_status(self, context, obj_type, obj_id, status): model_mapping = { 'pool': loadbalancer_db.Pool, 'vip': loadbalancer_db.Vip, 'member': loadbalancer_db.Member, 'health_monitor': loadbalancer_db.PoolMonitorAssociation } if obj_type not in model_mapping: raise n_exc.Invalid(_('Unknown object type: %s') % obj_type) try: if obj_type == 'health_monitor': self.plugin.update_pool_health_monitor( context, obj_id['monitor_id'], obj_id['pool_id'], status) else: self.plugin.update_status( context, model_mapping[obj_type], obj_id, status) except n_exc.NotFound: # update_status may come from agent on an object which was # already deleted from db with other request LOG.warning(_LW('Cannot update status: %(obj_type)s %(obj_id)s ' 'not found in the DB, it was probably deleted ' 'concurrently'), {'obj_type': obj_type, 'obj_id': obj_id})
def _flip_servers(self): LOG.warning(_LW('Fliping servers. Current is: %(server)s, ' 'switching to %(secondary)s'), {'server': self.server, 'secondary': self.secondary_server}) self.server, self.secondary_server = self.secondary_server, self.server
# Copyright 2014-2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from neutron_lbaas._i18n import _LW from neutron_lbaas.drivers.haproxy import synchronous_namespace_driver LOG = logging.getLogger(__name__) LOG.warn(_LW("This path has been deprecated. " "Use neutron_lbaas.drivers.haproxy." "synchronous_namespace_driver instead.")) class HaproxyNSDriver(synchronous_namespace_driver.HaproxyNSDriver): pass
# Copyright 2014, Doug Wiegley (dougwig), A10 Networks # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from neutron_lbaas._i18n import _LW from neutron_lbaas.drivers import logging_noop LOG = logging.getLogger(__name__) LOG.warning( _LW("This path has been deprecated. " "Use neutron_lbaas.drivers.logging_noop instead.")) __path__ = logging_noop.__path__
def store_cert(self, project_id, certificate, private_key, intermediates=None, private_key_passphrase=None, expiration=None, name='LBaaS TLS Cert'): """Stores a certificate in the certificate manager. :param certificate: PEM encoded TLS certificate :param private_key: private key for the supplied certificate :param intermediates: ordered and concatenated intermediate certs :param private_key_passphrase: optional passphrase for the supplied key :param expiration: the expiration time of the cert in ISO 8601 format :param name: a friendly name for the cert :returns: the container_ref of the stored cert :raises Exception: if certificate storage fails """ connection = self.auth.get_barbican_client(project_id) LOG.info( _LI("Storing certificate container '{0}' in Barbican.").format( name)) certificate_secret = None private_key_secret = None intermediates_secret = None pkp_secret = None try: certificate_secret = connection.secrets.create( payload=certificate, expiration=expiration, name="Certificate") private_key_secret = connection.secrets.create( payload=private_key, expiration=expiration, name="Private Key") certificate_container = connection.containers.create_certificate( name=name, certificate=certificate_secret, private_key=private_key_secret) if intermediates: intermediates_secret = connection.secrets.create( payload=intermediates, expiration=expiration, name="Intermediates") certificate_container.intermediates = intermediates_secret if private_key_passphrase: pkp_secret = connection.secrets.create( payload=private_key_passphrase, expiration=expiration, name="Private Key Passphrase") certificate_container.private_key_passphrase = pkp_secret certificate_container.store() return certificate_container.container_ref # Barbican (because of Keystone-middleware) sometimes masks # exceptions strangely -- this will catch anything that it raises and # reraise the original exception, while also providing useful # feedback in the logs for debugging except Exception: for secret in [ certificate_secret, private_key_secret, intermediates_secret, pkp_secret ]: if secret and secret.secret_ref: old_ref = secret.secret_ref try: secret.delete() LOG.info( _LI("Deleted secret {0} ({1}) during rollback."). format(secret.name, old_ref)) except Exception: LOG.warning( _LW("Failed to delete {0} ({1}) during rollback. This " "is probably not a problem.").format( secret.name, old_ref)) with excutils.save_and_reraise_exception(): LOG.exception(_LE("Error storing certificate data"))
# Copyright 2014-2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from neutron_lbaas._i18n import _LW from neutron_lbaas.drivers.haproxy import synchronous_namespace_driver LOG = logging.getLogger(__name__) LOG.warning(_LW("This path has been deprecated. " "Use neutron_lbaas.drivers.haproxy." "synchronous_namespace_driver instead.")) class HaproxyNSDriver(synchronous_namespace_driver.HaproxyNSDriver): pass
def store_cert(certificate, private_key, intermediates=None, private_key_passphrase=None, expiration=None, name='Octavia TLS Cert', **kwargs): """Stores a certificate in the certificate manager. :param certificate: PEM encoded TLS certificate :param private_key: private key for the supplied certificate :param intermediates: ordered and concatenated intermediate certs :param private_key_passphrase: optional passphrase for the supplied key :param expiration: the expiration time of the cert in ISO 8601 format :param name: a friendly name for the cert :returns: the container_ref of the stored cert :raises Exception: if certificate storage fails """ connection = BarbicanKeystoneAuth.get_barbican_client() LOG.info(_LI( "Storing certificate container '{0}' in Barbican." ).format(name)) certificate_secret = None private_key_secret = None intermediates_secret = None pkp_secret = None try: certificate_secret = connection.secrets.create( payload=certificate, expiration=expiration, name="Certificate" ) private_key_secret = connection.secrets.create( payload=private_key, expiration=expiration, name="Private Key" ) certificate_container = connection.containers.create_certificate( name=name, certificate=certificate_secret, private_key=private_key_secret ) if intermediates: intermediates_secret = connection.secrets.create( payload=intermediates, expiration=expiration, name="Intermediates" ) certificate_container.intermediates = intermediates_secret if private_key_passphrase: pkp_secret = connection.secrets.create( payload=private_key_passphrase, expiration=expiration, name="Private Key Passphrase" ) certificate_container.private_key_passphrase = pkp_secret certificate_container.store() return certificate_container.container_ref # Barbican (because of Keystone-middleware) sometimes masks # exceptions strangely -- this will catch anything that it raises and # reraise the original exception, while also providing useful # feedback in the logs for debugging except Exception: for secret in [certificate_secret, private_key_secret, intermediates_secret, pkp_secret]: if secret and secret.secret_ref: old_ref = secret.secret_ref try: secret.delete() LOG.info(_LI( "Deleted secret {0} ({1}) during rollback." ).format(secret.name, old_ref)) except Exception: LOG.warning(_LW( "Failed to delete {0} ({1}) during rollback. This " "is probably not a problem." ).format(secret.name, old_ref)) with excutils.save_and_reraise_exception(): LOG.exception(_LE("Error storing certificate data"))
# Copyright 2014, Doug Wiegley (dougwig), A10 Networks # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from neutron_lbaas._i18n import _LW from neutron_lbaas.drivers import logging_noop LOG = logging.getLogger(__name__) LOG.warn(_LW("This path has been deprecated. " "Use neutron_lbaas.drivers.logging_noop instead.")) __path__ = logging_noop.__path__