def _configure_interfaces(self, config_data): out1 = subprocess.Popen('sudo dhclient eth1', shell=True, stdout=subprocess.PIPE).stdout.read() out2 = subprocess.Popen('sudo dhclient eth2', shell=True, stdout=subprocess.PIPE).stdout.read() out3 = subprocess.Popen('cat /etc/network/interfaces', shell=True, stdout=subprocess.PIPE).stdout.read() output = "%s\n%s\n%s" % (out1, out2, out3) LOG.info(_LI("Dhclient on eth0, result: %(initial_data)s"), {'initial_data': output}) LOG.info(_LI("Configures interfaces with configuration " "data : %(interface_data)s "), {'interface_data': config_data})
def __init__(self): # Registered node drivers, keyed by name. self.drivers = {} # Ordered list of node drivers. self.ordered_drivers = [] names = cfg.CONF.node_composition_plugin.node_drivers LOG.info(_LI("Configured service chain node driver names: %s"), names) super(NodeDriverManager, self).__init__( 'gbpservice.neutron.servicechain.ncp_drivers', names, invoke_on_load=True, name_order=True) LOG.info(_LI( "Loaded service chain node driver names: %s"), self.names()) self._register_drivers()
def __init__(self): # Ordered list of extension drivers, defining # the order in which the drivers are called. self.ordered_ext_drivers = [] LOG.info(_LI("Configured extension driver names: %s"), cfg.CONF.group_policy.extension_drivers) super(ExtensionManager, self).__init__( 'gbpservice.neutron.group_policy.extension_drivers', cfg.CONF.group_policy.extension_drivers, invoke_on_load=True, name_order=True) LOG.info(_LI("Loaded extension driver names: %s"), self.names()) self._register_drivers()
def firewall_deleted(self, agent_info, firewall_id, firewall=None): """ Enqueues the response from FwaaS operation to neutron plugin. :param context: Neutron context :param firewall_id: id of firewall resource """ msg = { 'info': { 'service_type': const.SERVICE_TYPE, 'context': agent_info['context'] }, 'notification': [{ 'resource': agent_info['resource'], 'data': { 'firewall_id': firewall_id, 'host': self.host, 'notification_type': ('firewall_deleted') } }] } LOG.info( _LI("Sending Notification 'Firewall Deleted' to " "Orchestrator for firewall: %(fw_id)s "), {'fw_id': firewall_id}) self.notify._notification(msg)
def delete_firewall(self, context, firewall, host): """ Receives request to delete firewall from configurator """ LOG.info(_LI("Received request 'Delete Firewall'.")) self._create_event(context, firewall, host, const.FIREWALL_DELETE_EVENT)
def set_firewall_status(self, context, notification_data): nfp_context = module_context.init() notification = notification_data['notification'][0] request_info = notification_data.get('info') request_context = request_info.get('context') logging_context = request_context.get('logging_context', {}) nfp_context['log_context'] = logging_context resource_data = notification['data'] firewall_id = resource_data['firewall_id'] status = resource_data['status'] LOG.info( _LI("Received firewall configuration create complete API, " "making an RPC call set firewall status for " "firewall:%(firewall)s and status: %(status)s"), { 'firewall': firewall_id, 'status': status }) # RPC call to plugin to set firewall status rpcClient = transport.RPCClient(a_topics.FW_NFP_PLUGIN_TOPIC) rpcClient.cctxt.cast(context, 'set_firewall_status', host=resource_data['host'], firewall_id=firewall_id, status=status)
def update_pool(self, context, old_pool, pool): """Enqueues event for worker to process update pool request. :param context: RPC context :param old_pool: old pool resource to be updated :param pool: new pool resource Returns: None """ old_val, new_val = self.get_diff_of_dict(old_pool, pool) LOG.info( _LI("Received request 'Update Pool' for Pool:%(pool)s " "in LB:%(lb_id)s with new Param:%(new_val)s and " "old Param:%(old_val)s"), { 'pool': pool['id'], 'lb_id': pool['loadbalancer_id'], 'old_val': old_val, 'new_val': new_val }) arg_dict = { 'context': context, lb_const.OLD_POOL: old_pool, lb_const.POOL: pool, } self._send_event(lb_const.EVENT_UPDATE_POOL_V2, arg_dict, serialize=True, binding_key=pool['loadbalancer_id'], key=pool['id'])
def configure_user(self, context, resource_data): headers = self._parse_vm_context(context) resource_data = self.parse.parse_data(common_const.HEALTHMONITOR, resource_data) mgmt_ip = resource_data['mgmt_ip'] url = const.request_url % (mgmt_ip, self.port, 'change_auth') data = {} LOG.info( _LI("Initiating POST request to configure Authentication " "service at mgmt ip:%(mgmt_ip)s"), {'mgmt_ip': mgmt_ip}) err_msg = ("Change Auth POST request to the VyOS firewall " "service at %s failed. " % url) try: resp = self.rest_api.fire(url, data, common_const.POST, headers) except Exception as err: err_msg += ("Reason: %r" % str(err).capitalize()) LOG.error(err_msg) return err_msg if resp is common_const.STATUS_SUCCESS: msg = ("Configured user authentication successfully" " for vyos service at %r." % mgmt_ip) LOG.info(msg) return resp err_msg += (("Failed to change Authentication para Status code " "Status code: %r, Reason: %r" % (resp['status'], resp['reason'])) if type(resp) is dict else ("Reason: " + resp)) LOG.error(err_msg) return err_msg
def vpnservice_updated(self, context, **resource_data): """Registers the VPNaas plugin events to update the vpn configurations. :param context: dictionary, confined to the specific service type. :param resource_data: dictionary, confined to the specific operation type. Returns: None """ LOG.info(_LI("Received request 'VPN Service Updated'." "for API '%(api)s'"), {'api': resource_data.get('reason', '')}) arg_dict = {'context': context, 'resource_data': resource_data} # Serializing the event because simultaneous configure # requests overrides the same crypto-map in the service VM # which results in corrupting the crypto-map resource_type = resource_data.get('rsrc_type') if resource_type and resource_type.lower() == 'ipsec_site_connection': ev = self.sc.new_event(id='VPNSERVICE_UPDATED', key=resource_data['resource']['id'], data=arg_dict, serialize=True, binding_key=resource_data[ 'resource']['vpnservice_id']) msg = "serializing event: %s" % ('VPNSERVICE_UPDATED') LOG.debug(msg) else: ev = self.sc.new_event(id='VPNSERVICE_UPDATED', data=arg_dict) self.sc.post_event(ev)
def handle_event(self, ev): """ Demultiplexes the vpn request to appropriate driver methods. :param ev: event object sent from the process model. Returns: None """ if ev.id == 'VPN_SYNC': self._sc.poll_event(ev) if ev.id == 'VPNSERVICE_UPDATED': try: msg = ("Worker process with ID: %s starting " "to handle task: %s of topic: %s. " % (os.getpid(), ev.id, const.VPN_GENERIC_CONFIG_RPC_TOPIC)) LOG.debug(msg) agent_info = ev.data['context']['agent_info'] service_vendor = agent_info['service_vendor'] service_feature = agent_info['service_feature'] driver = self._get_driver(service_vendor, service_feature) LOG.info(_LI("Invoking driver with service vendor:" "%(service_vendor)s "), {'service_vendor': service_vendor}) setattr(VPNaasEventHandler, "service_driver", driver) self._vpnservice_updated(ev, driver) except Exception as err: msg = ("Failed to perform the operation: %s. %s" % (ev.id, str(err).capitalize())) LOG.error(msg) finally: self._sc.event_complete(ev)
def update_member(self, context, old_member, member): """Enqueues event for worker to process update member request. :param context: RPC context :param old_member: old member resource to be updated :param member: new member resource Returns: None """ old_val, new_val = self.get_diff_of_dict(old_member, member) LOG.info( _LI("Received request 'Update Member' for Member:" "%(member_id)s in Pool:%(pool_id)s with new Param:" "%(new_val)s and old Param:%(old_val)s"), { 'pool_id': member['pool_id'], 'member_id': member['id'], 'old_val': old_val, 'new_val': new_val }) arg_dict = { 'context': context, lb_const.OLD_MEMBER: old_member, lb_const.MEMBER: member, } self._send_event(lb_const.EVENT_UPDATE_MEMBER_V2, arg_dict, serialize=True, binding_key=member[lb_const.POOL]['loadbalancer_id'], key=member['id'])
def update_healthmonitor(self, context, old_healthmonitor, healthmonitor): """Enqueues event for worker to process update health monitor request. :param context: RPC context :param old_health_monitor: health_monitor resource to be updated :param health_monitor: new health_monitor resource :param pool_id: pool_id to which health monitor is associated Returns: None """ old_val, new_val = self.get_diff_of_dict(old_healthmonitor, healthmonitor) LOG.info( _LI("Received request 'Update Pool Health Monitor' for " "Health monitor:%(hm)s with new Param:%(new_val)s and " "old Param:%(old_val)s"), { 'hm': healthmonitor['id'], 'old_val': old_val, 'new_val': new_val }) arg_dict = { 'context': context, lb_const.OLD_HEALTHMONITOR: old_healthmonitor, lb_const.HEALTHMONITOR: healthmonitor } self._send_event( lb_const.EVENT_UPDATE_HEALTH_MONITOR_V2, arg_dict, serialize=True, binding_key=healthmonitor[lb_const.POOL]['loadbalancer_id'], key=healthmonitor['id'])
def update_listener(self, context, old_listener, listener): """Enqueues event for worker to process update listener request. :param context: RPC context :param old_listener: old listener resource to be updated :param listener: new listener resource Returns: None """ old_val, new_val = self.get_diff_of_dict(old_listener, listener) LOG.info( _LI("Received request 'Update Listener' for Listener:" "%(listener)s in LB:%(lb_id)s with new Param:" "%(new_val)s and old Param:%(old_val)s"), { 'lb_id': listener['loadbalancer_id'], 'listener': listener['id'], 'old_val': old_val, 'new_val': new_val }) arg_dict = { 'context': context, lb_const.OLD_LISTENER: old_listener, lb_const.LISTENER: listener, } self._send_event(lb_const.EVENT_UPDATE_LISTENER_V2, arg_dict, serialize=True, binding_key=listener['loadbalancer_id'], key=listener['id'])
def create_loadbalancer(self, context, loadbalancer, driver_name): """Enqueues event for worker to process create loadbalancer request. :param context: RPC context :param loadbalancer: loadbalancer resource to be created Returns: None """ LOG.info( _LI("Received request 'Create Loadbalancer' for LB:%(lb)s " "with driver:%(driver_name)s"), { 'lb': loadbalancer['id'], 'driver_name': driver_name }) arg_dict = { 'context': context, lb_const.LOADBALANCER: loadbalancer, 'driver_name': driver_name } self._send_event(lb_const.EVENT_CREATE_LOADBALANCER_V2, arg_dict, serialize=True, binding_key=loadbalancer['id'], key=loadbalancer['id'])
def update_loadbalancer(self, context, old_loadbalancer, loadbalancer): """Enqueues event for worker to process update loadbalancer request. :param context: RPC context :param old_loadbalancer: old loadbalancer resource to be updated :param loadbalancer: new loadbalancer resource Returns: None """ old_val, new_val = self.get_diff_of_dict(old_loadbalancer, loadbalancer) arg_dict = { 'context': context, lb_const.OLD_LOADBALANCER: old_loadbalancer, lb_const.LOADBALANCER: loadbalancer, } LOG.info( _LI("Received request 'Update Loadbalancer' for LB:%(lb)s " "with new Param:%(new_val)s and old Param:%(old_val)s"), { 'lb': loadbalancer['id'], 'new_val': new_val, 'old_val': old_val }) self._send_event(lb_const.EVENT_UPDATE_LOADBALANCER_V2, arg_dict, serialize=True, binding_key=loadbalancer['id'], key=loadbalancer['id'])
def delete_network_function_device_config(self, context, request_data): """RPC method to clear configuration of a network service device. Clears configuration of a network service VM. This RPC method is invoked by the configurator REST server. It clears configuration of a network service based on the configuration request specified in the request_data argument. :param context: RPC context instance :param request_data: RPC data Returns: None """ try: nfp_context = module_context.init() log_info = request_data.get('info') logging_context = log_info['context'].get('logging_context', {}) nfp_context['log_context'] = logging_context LOG.info(_LI("Received RPC DELETE NETWORK FUNCTION DEVICE CONFIG " "for %(service_type)s, NFI: %(nfi)s, " "NF_ID: %(nf_id)s"), {'service_type': request_data['info']['service_type'], 'nfi': request_data['info']['context']['nfi_id'], 'nf_id': request_data['info']['context']['nf_id']}) self._invoke_service_agent('delete', request_data, True) except Exception as err: msg = ("Failed to delete network device configuration. %s" % str(err).capitalize()) LOG.error(msg)
def update_network_function_config(self, context, request_data): """RPC method to update of configuration in a network service. Updates configuration of a network service. This RPC method is invoked by the configurator REST server. It updates configuration of a network service based on the configuration request specified in the request_data argument. :param context: RPC context instance :param request_data: RPC data Returns: None """ try: nfp_context = module_context.init() log_info = request_data.get('info') logging_context = log_info['context'].get('logging_context', {}) nfp_context['log_context'] = logging_context LOG.info(_LI("Received RPC UPDATE NETWORK FUNCTION CONFIG " "for %(service_type)s "), {'service_type': request_data['info']['service_type']}) self._invoke_service_agent('update', request_data) except Exception as err: msg = ("Failed to update network service configuration. %s" % str(err).capitalize()) LOG.error(msg)
def _register_drivers(self): """Register all service chain node drivers.""" for ext in self: self.drivers[ext.name] = ext self.ordered_drivers.append(ext) LOG.info(_LI("Registered service chain node drivers: %s"), [driver.name for driver in self.ordered_drivers])
def create_floatingip(self, context, floatingip): fip = floatingip['floatingip'] # Verify that subnet is not a SNAT host-pool self._md.check_floatingip_external_address(context, fip) with context.session.begin(subtransactions=True): if fip.get('subnet_id') or fip.get('floating_ip_address'): result = super(ApicL3Plugin, self).create_floatingip( context, floatingip) else: # Iterate over non SNAT host-pool subnets and try to allocate # an address other_subs = self._md.get_subnets_for_fip(context, fip) result = None for ext_sn in other_subs: fip['subnet_id'] = ext_sn try: with context.session.begin(nested=True): result = (super(ApicL3Plugin, self) .create_floatingip(context, floatingip)) break except exceptions.IpAddressGenerationFailure: LOG.info(_LI('No more floating IP addresses available ' 'in subnet %s'), ext_sn) if not result: raise exceptions.IpAddressGenerationFailure( net_id=fip['floating_network_id']) self._md.create_floatingip(context, result) self.update_floatingip_status(context, result['id'], result['status']) return result
def update_pool_stats(self, pool_id, stats, context, pool=None): """ Enqueues the response from LBaaS operation to neutron plugin. :param pool_id: pool id :param stats: statistics of that pool """ msg = { 'info': { 'service_type': lb_const.SERVICE_TYPE, 'context': context.to_dict() }, 'notification': [{ 'resource': 'pool', 'data': { 'pool_id': pool_id, 'stats': stats, 'notification_type': ('update_pool_stats'), 'pool': pool_id } }] } LOG.info( _LI("Sending Notification 'Update Pool Stats' " "for pool: %(pool_id)s with stats:%(stats)s"), { 'pool_id': pool_id, 'stats': stats }) self.notify._notification(msg)
def _add_routes(self, route_info): LOG.info(_LI("Configuring routes with configuration " "data : %(route_data)s "), {'route_data': route_info['resource_data']}) source_cidrs, gateway_ip = self.get_source_cidrs_and_gateway_ip( route_info) default_route_commands = [] for cidr in source_cidrs: try: source_interface = self._get_if_name_by_cidr(cidr) except Exception: raise Exception(_("Some of the interfaces do not have " "IP Address")) try: interface_number_string = source_interface.split("eth", 1)[1] except IndexError: LOG.error(_LE("Retrieved wrong interface %(interface)s for " "configuring routes"), {'interface': source_interface}) try: routing_table_number = 20 + int(interface_number_string) ip_rule_command = "ip rule add from %s table %s" % ( cidr, routing_table_number) out1 = subprocess.Popen(ip_rule_command, shell=True, stdout=subprocess.PIPE).stdout.read() ip_rule_command = "ip rule add to %s table main" % (cidr) out2 = subprocess.Popen(ip_rule_command, shell=True, stdout=subprocess.PIPE).stdout.read() ip_route_command = "ip route add table %s default via %s" % ( routing_table_number, gateway_ip) default_route_commands.append(ip_route_command) output = "%s\n%s" % (out1, out2) LOG.info(_LI("Static route configuration result: %(output)s"), {'output': output}) except Exception as ex: raise Exception(_("Failed to add static routes: %(ex)s") % { 'ex': str(ex)}) for command in default_route_commands: try: out = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE).stdout.read() LOG.info(_LI("Static route configuration result: %(output)s"), {'output': out}) except Exception as ex: raise Exception(_("Failed to add static routes: %(ex)s") % { 'ex': str(ex)})
def __init__(self): # Registered policy drivers, keyed by name. self.policy_drivers = {} # Ordered list of policy drivers, defining # the order in which the drivers are called. self.ordered_policy_drivers = [] self.reverse_ordered_policy_drivers = [] LOG.info(_LI("Configured policy driver names: %s"), cfg.CONF.group_policy.policy_drivers) super(PolicyDriverManager, self).__init__('gbpservice.neutron.group_policy.policy_drivers', cfg.CONF.group_policy.policy_drivers, invoke_on_load=True, name_order=True) LOG.info(_LI("Loaded policy driver names: %s"), self.names()) self._register_policy_drivers()
def __init__(self): self.driver_manager = manager.NodeDriverManager() super(NodeCompositionPlugin, self).__init__() self.driver_manager.initialize() plumber_klass = cfg.CONF.node_composition_plugin.node_plumber self.plumber = utils.load_plugin(PLUMBER_NAMESPACE, plumber_klass) self.plumber.initialize() LOG.info(_LI("Initialized node plumber '%s'"), plumber_klass)
def _cleanup_l2_policy(self, context, l2p_id): if self._l2_policy_is_owned(context._plugin_context.session, l2p_id): try: self._delete_l2_policy(context._plugin_context, l2p_id) except gbp_ext.L2PolicyInUse: LOG.info(_LI( "Cannot delete implicit L2 Policy %s because it's " "in use."), l2p_id)
def process_request(self, sa_req_list, notification_data): """Forwards the RPC message from configurator to service agents. Checks if the request message contains multiple data blobs. If multiple data blobs are found, a batch event is generated otherwise a single event. :param sa_req_list: List of data blobs prepared by de-multiplexer for service agents processing. :param notification_data: Notification blobs prepared by the service agents after processing requests blobs. Each request blob will have a corresponding notification blob. Returns: None """ # In case of malformed input, send failure notification if not self.validate_request(sa_req_list, notification_data): # REVISIT(JAGADISH): Need to send failure notification return # Multiple request data blobs needs batch processing. Send batch # processing event or do direct processing of single request data blob if (len(sa_req_list) > 1): LOG.info(_LI("Creating event PROCESS BATCH")) args_dict = { 'sa_req_list': sa_req_list, 'notification_data': notification_data } ev = self.sc.new_event(id=const.EVENT_PROCESS_BATCH, data=args_dict, key=None) self.sc.post_event(ev) else: agent_info = sa_req_list[0]['agent_info'] # Renaming the neutron context in resource data of *aaS to context. # Adding agent_info which contains information required for # demux and response data in agent to neutron_context in *aaS if not sa_req_list[0]['is_generic_config'] and not ( agent_info['resource'] in const.NFP_SERVICE_LIST): # Here, the neutron context is overloaded with agent_info # dict which contains the API context in addition to other # fields like service type, service vendor, resource etc. # The agent_info dict is constructed inside the demuxer library sa_req_list[0]['resource_data']['neutron_context'].update( {'agent_info': agent_info}) # When calling the *aaS or NFPService agents, the # "neutron context" passed inside the resource data is # renamed to "context" sa_req_list[0]['resource_data']['context'] = sa_req_list[0][ 'resource_data'].pop('neutron_context') getattr(self, sa_req_list[0]['method'])( **sa_req_list[0]['resource_data']) else: sa_req_list[0]['agent_info'].update( {'notification_data': notification_data}) getattr(self, sa_req_list[0]['method'])( agent_info, sa_req_list[0]['resource_data'])
def create_firewall(self, context, firewall, host): """ Implements firewall creation Issues REST call to service VM for firewall creation :param context: Neutron context :param firewall: Firewall resource object from neutron fwaas plugin :param host: Name of the host machine Returns: SUCCESS/Failure message with reason. """ headers = self._parse_vm_context(context['agent_info']['context']) resource_data = self.parse.parse_data(common_const.FIREWALL, context) LOG.info( _LI("Processing request 'Create Firewall' in FWaaS Driver " "for Firewall ID: %(f_id)s"), {'f_id': firewall['id']}) mgmt_ip = resource_data.get('mgmt_ip') url = const.request_url % (mgmt_ip, self.port, 'configure-firewall-rule') msg = ("Initiating POST request for FIREWALL ID: %r Tenant ID:" " %r. URL: %s" % (firewall['id'], firewall['tenant_id'], url)) LOG.debug(msg) data = jsonutils.dumps(firewall) err_msg = ("Configure firewall POST request to the VyOS " "service at %s failed. " % url) try: resp = self.rest_api.fire(url, data, common_const.POST, headers) except Exception as err: err_msg += ("Reason: %r" % str(err).capitalize()) LOG.error(err_msg) return common_const.STATUS_ERROR if resp is common_const.STATUS_SUCCESS: LOG.info(_LI("Configured firewall successfully at URL: %(url)s "), {'url': url}) return common_const.STATUS_ACTIVE err_msg += (("Reason: %r, Response Content: %r" % (resp.pop('message'), resp)) if type(resp) is dict else ("Reason: " + resp)) LOG.error(err_msg) return common_const.STATUS_ERROR
def initialize(self): """Initialize all the service chain node drivers.""" self.native_bulk_support = True for driver in self.ordered_drivers: LOG.info(_LI("Initializing service chain node drivers '%s'"), driver.name) driver.obj.initialize(driver.name) self.native_bulk_support &= getattr(driver.obj, 'native_bulk_support', True)
def __init__(self): LOG.info(_LI("Ml2Plus initializing")) registry._get_callback_manager()._notify_loop = ( patch_neutron._notify_loop) # First load drivers, then initialize DB, then initialize drivers self.type_manager = ml2_managers.TypeManager() self.extension_manager = managers.ExtensionManager() self.mechanism_manager = managers.MechanismManager() super(ml2_plugin.Ml2Plugin, self).__init__() self.type_manager.initialize() self.extension_manager.initialize() self.mechanism_manager.initialize() registry.subscribe(self._port_provisioned, resources.PORT, provisioning_blocks.PROVISIONING_COMPLETE) registry.subscribe(self._handle_segment_change, resources.SEGMENT, events.PRECOMMIT_CREATE) registry.subscribe(self._handle_segment_change, resources.SEGMENT, events.PRECOMMIT_DELETE) registry.subscribe(self._handle_segment_change, resources.SEGMENT, events.AFTER_CREATE) registry.subscribe(self._handle_segment_change, resources.SEGMENT, events.AFTER_DELETE) try: registry.subscribe(self._subnet_delete_precommit_handler, resources.SUBNET, events.PRECOMMIT_DELETE) registry.subscribe(self._subnet_delete_after_delete_handler, resources.SUBNET, events.AFTER_DELETE) except AttributeError: LOG.info( _LI("Detected older version of Neutron, ML2Plus plugin " "is not subscribed to subnet_precommit_delete and " "subnet_after_delete events")) self._setup_dhcp() self._start_rpc_notifiers() self.add_agent_status_check_worker(self.agent_health_check) self._verify_service_plugins_requirements() self.refresh_network_db_obj = cfg.CONF.ml2plus.refresh_network_db_obj self.refresh_port_db_obj = cfg.CONF.ml2plus.refresh_port_db_obj self.refresh_subnet_db_obj = cfg.CONF.ml2plus.refresh_subnet_db_obj self.refresh_subnetpool_db_obj = ( cfg.CONF.ml2plus.refresh_subnetpool_db_obj) self.refresh_address_scope_db_obj = ( cfg.CONF.ml2plus.refresh_address_scope_db_obj) LOG.info(_LI("Modular L2 Plugin (extended) initialization complete"))
def delete_firewall(self, context, firewall, host): """ Implements firewall deletion Issues REST call to service VM for firewall deletion :param context: Neutron context :param firewall: Firewall resource object from neutron fwaas plugin :param host: Name of the host machine Returns: SUCCESS/Failure message with reason. """ headers = self._parse_vm_context(context['agent_info']['context']) LOG.info( _LI("Processing request 'Delete Firewall' in FWaaS Driver " "for Firewall ID:%(f_id)s"), {'f_id': firewall['id']}) resource_data = self.parse.parse_data(common_const.FIREWALL, context) mgmt_ip = resource_data.get('mgmt_ip') url = const.request_url % (mgmt_ip, self.port, 'delete-firewall-rule') msg = ("Initiating DELETE request. URL: %s" % url) LOG.info(msg) data = jsonutils.dumps(firewall) err_msg = ("Delete firewall POST request to the VyOS " "service at %s failed. " % url) try: resp = self.rest_api.fire(url, data, common_const.DELETE, headers) except Exception as err: err_msg += ("Reason: %r" % str(err).capitalize()) LOG.error(err_msg) return common_const.STATUS_SUCCESS if resp is common_const.STATUS_SUCCESS: msg = ("Deleted firewall successfully for service at %r." % url) LOG.info(msg) return common_const.STATUS_DELETED if type(resp) is dict: if not resp.get('delete_success') and ( resp.get('message') == const.INTERFACE_NOT_FOUND): err_msg += ("Firewall was not deleted as interface was not " "available in the firewall. It might have got " "detached. So marking this delete as SUCCESS. " "URL: %r, Response Content: %r" % (url, resp.content)) LOG.error(err_msg) return common_const.STATUS_SUCCESS else: err_msg += ("Response Content: %r" % resp) else: err_msg += ("Reason: " + resp) LOG.error(err_msg) msg = ("Firewall deletion has failed, but still sending" "status as firewall deleted success from configurator") LOG.info(msg) return common_const.STATUS_DELETED
def extension_aliases(self): exts = [] for driver in self.ordered_ext_drivers: alias = driver.obj.extension_alias exts.append(alias) LOG.info(_LI("Got %(alias)s extension from driver '%(drv)s'"), { 'alias': alias, 'drv': driver.name }) return exts