def create_dvr_fip_interfaces(self, ex_gw_port): floating_ips = self.get_floating_ips() fip_agent_port = self.get_floating_agent_gw_interface( ex_gw_port['network_id']) LOG.debug("FloatingIP agent gateway port received from the plugin: " "%s", fip_agent_port) if floating_ips: is_first = self.fip_ns.subscribe(self.router_id) if is_first and not fip_agent_port: LOG.debug("No FloatingIP agent gateway port possibly due to " "late binding of the private port to the host, " "requesting agent gateway port for 'network-id' :" "%s", ex_gw_port['network_id']) fip_agent_port = self.agent.plugin_rpc.get_agent_gateway_port( self.agent.context, ex_gw_port['network_id']) if not fip_agent_port: LOG.error(_LE("No FloatingIP agent gateway port " "returned from server for 'network-id': " "%s"), ex_gw_port['network_id']) if is_first and fip_agent_port: if 'subnets' not in fip_agent_port: LOG.error(_LE('Missing subnet/agent_gateway_port')) else: self.fip_ns.create_gateway_port(fip_agent_port) if self.fip_ns.agent_gateway_port and floating_ips: if self.dist_fip_count == 0: self.fip_ns.create_rtr_2_fip_link(self) # kicks the FW Agent to add rules for the IR namespace if # configured self.agent.process_router_add(self)
def run_idl(self, txn): try: port = self.row_by_value('Port', 'name', self.port) except RowNotFound: if self.if_exists: return msg = _LE("Port %s does not exist") % self.port raise RuntimeError(msg) if self.bridge: br = self.row_by_value('Bridge', 'name', self.bridge) else: br = next(b for b in self.api._tables['Bridge'].rows.values() if port in b.ports) if port.uuid not in br.ports and not self.if_exists: # TODO(twilson) Make real errors across both implementations msg = _LE("Port %(port)s does not exist on %(bridge)s!") % { 'port': self.name, 'bridge': self.bridge } LOG.error(msg) raise RuntimeError(msg) br.verify('ports') ports = br.ports ports.remove(port) br.ports = ports # Also remove port/interface directly for indexing? port.verify('interfaces') for iface in port.interfaces: del self.api._tables['Interface'].rows[iface.uuid] del self.api._tables['Port'].rows[port.uuid]
def delete_addr_and_conntrack_state(self, cidr): """Delete an address along with its conntrack state This terminates any active connections through an IP. cidr: the IP address for which state should be removed. This can be passed as a string with or without /NN. A netaddr.IPAddress or netaddr.Network representing the IP address can also be passed. """ self.addr.delete(cidr) ip_str = str(netaddr.IPNetwork(cidr).ip) ip_wrapper = IPWrapper(namespace=self.namespace) # Delete conntrack state for ingress traffic # If 0 flow entries have been deleted # conntrack -D will return 1 try: ip_wrapper.netns.execute(["conntrack", "-D", "-d", ip_str], check_exit_code=True, extra_ok_codes=[1]) except RuntimeError: LOG.exception(_LE("Failed deleting ingress connection state of" " floatingip %s"), ip_str) # Delete conntrack state for egress traffic try: ip_wrapper.netns.execute(["conntrack", "-D", "-q", ip_str], check_exit_code=True, extra_ok_codes=[1]) except RuntimeError: LOG.exception(_LE("Failed deleting egress connection state of" " floatingip %s"), ip_str)
def ensure_physical_in_bridge(self, network_id, network_type, physical_network, segmentation_id): if network_type == p_const.TYPE_VXLAN: if self.vxlan_mode == lconst.VXLAN_NONE: LOG.error(_LE("Unable to add vxlan interface for network %s"), network_id) return return self.ensure_vxlan_bridge(network_id, segmentation_id) physical_interface = self.interface_mappings.get(physical_network) if not physical_interface: LOG.error(_LE("No mapping for physical network %s"), physical_network) return if network_type == p_const.TYPE_FLAT: return self.ensure_flat_bridge(network_id, physical_interface) elif network_type == p_const.TYPE_VLAN: return self.ensure_vlan_bridge(network_id, physical_interface, segmentation_id) else: LOG.error(_LE("Unknown network_type %(network_type)s for network " "%(network_id)s."), {network_type: network_type, network_id: network_id})
def main(): common_config.init(sys.argv[1:]) common_config.setup_logging() try: config_parser = SriovNicAgentConfigParser() config_parser.parse() device_mappings = config_parser.device_mappings exclude_devices = config_parser.exclude_devices except ValueError: LOG.exception(_LE("Failed on Agent configuration parse. " "Agent terminated!")) raise SystemExit(1) LOG.info(_LI("Physical Devices mappings: %s"), device_mappings) LOG.info(_LI("Exclude Devices: %s"), exclude_devices) polling_interval = cfg.CONF.AGENT.polling_interval try: agent = SriovNicSwitchAgent(device_mappings, exclude_devices, polling_interval) except exc.SriovNicError: LOG.exception(_LE("Agent Initialization Failed")) raise SystemExit(1) # Start everything. LOG.info(_LI("Agent initialized successfully, now running... ")) agent.daemon_loop()
def port_up_segment_mode(self, lldpad_port, port_name, port_uuid, mac, net_uuid, segmentation_id, oui): lvm = self.local_vlan_map.get(net_uuid) if lvm and lvm.late_binding_vlan: vdp_vlan = lvm.late_binding_vlan ovs_cb_data = {'obj': self, 'mac': mac, 'port_uuid': port_uuid, 'net_uuid': net_uuid} lldpad_port.send_vdp_vnic_up(port_uuid=port_uuid, vsiid=port_uuid, gid=segmentation_id, mac=mac, vlan=vdp_vlan, oui=oui, vsw_cb_fn=self.vdp_vlan_change, vsw_cb_data=ovs_cb_data) lvm.port_uuid_list[port_uuid] = port_uuid return True else: int_br = self.integ_br_obj lvid = int_br.get_port_vlan_tag(port_name) if lvid != cconstants.INVALID_VLAN: ret, vdp_vlan = self.provision_vdp_overlay_networks( port_uuid, mac, net_uuid, segmentation_id, lvid, oui) if not lvm: lvm = LocalVlan(lvid, segmentation_id) self.local_vlan_map[net_uuid] = lvm lvm.lvid = lvid lvm.port_uuid_list[port_uuid] = port_uuid if vdp_vlan != cconstants.INVALID_VLAN: lvm.late_binding_vlan = vdp_vlan else: LOG.error(_LE("Cannot provision VDP overlay")) return ret else: LOG.error(_LE("Invalid VLAN")) return False
def sync_state(self, networks=None): """Sync the local DHCP state with Neutron. If no networks are passed, or 'None' is one of the networks, sync all of the networks. """ only_nets = set([] if (not networks or None in networks) else networks) LOG.info(_LI('Synchronizing state')) pool = eventlet.GreenPool(cfg.CONF.num_sync_threads) known_network_ids = set(self.cache.get_network_ids()) try: active_networks = self.plugin_rpc.get_active_networks_info() active_network_ids = set(network.id for network in active_networks) for deleted_id in known_network_ids - active_network_ids: try: self.disable_dhcp_helper(deleted_id) except Exception as e: self.schedule_resync(e, deleted_id) LOG.exception(_LE('Unable to sync network state on ' 'deleted network %s'), deleted_id) for network in active_networks: if (not only_nets or # specifically resync all network.id not in known_network_ids or # missing net network.id in only_nets): # specific network to sync pool.spawn(self.safe_configure_dhcp_for_network, network) pool.waitall() LOG.info(_LI('Synchronizing state complete')) except Exception as e: self.schedule_resync(e) LOG.exception(_LE('Unable to sync network state.'))
def _check_config_params(self): """Check items in configuration files. Check for required and invalid configuration items. The actual values are not verified for correctness. """ if not self.conf.interface_driver: msg = _LE('An interface driver must be specified') LOG.error(msg) raise SystemExit(1) if not self.conf.use_namespaces and not self.conf.router_id: msg = _LE('Router id is required if not using namespaces.') LOG.error(msg) raise SystemExit(1) if self.conf.ipv6_gateway: # ipv6_gateway configured. Check for valid v6 link-local address. try: msg = _LE("%s used in config as ipv6_gateway is not a valid " "IPv6 link-local address."), ip_addr = netaddr.IPAddress(self.conf.ipv6_gateway) if ip_addr.version != 6 or not ip_addr.is_link_local(): LOG.error(msg, self.conf.ipv6_gateway) raise SystemExit(1) except netaddr.AddrFormatError: LOG.error(msg, self.conf.ipv6_gateway) raise SystemExit(1)
def _delete_port_group(self, task): try: header, response = self.vcns.get_edge_id(task.userdata['job_id']) except exceptions.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.error(_LE("NSXv: Failed to get job for %s"), task.userdata) status = response['status'] if status != 'COMPLETED': if (status == 'QUEUED' or status == 'RUNNING' or status == 'ROLLBACK'): LOG.debug("NSXv: job is still pending for %s", task.userdata) return task_constants.TaskStatus.PENDING try: self.vcns.delete_port_group( task.userdata['dvs_id'], task.userdata['port_group_id']) except Exception as e: LOG.error(_LE('Unable to delete %(pg)s (job status %(state)s) ' 'exception %(ex)s'), {'pg': task.userdata['port_group_id'], 'state': status, 'ex': e}) if status == 'FAILED': return task_constants.TaskStatus.ERROR return task_constants.TaskStatus.COMPLETED
def process_update_network(self, context, data, result): """Implementation of abstract method from ExtensionDriver class.""" LOG.debug("RK: process_update_network(). data: %s" , data) net_id = result.get('id') net_min_attr = data.get(rk_const.RK_MIN_RATE) net_max_attr = data.get(rk_const.RK_MAX_RATE) LOG.debug("RK: update_network: %s and %s", net_min_attr, net_max_attr) if attributes.is_attr_set(net_min_attr) and \ attributes.is_attr_set(net_max_attr): with context.session.begin(subtransactions=True): try: res = rk_db.get_vnet_profile(net_id, context.session) if res: rk_db.update_vnet_rate_limit(net_id, net_min_attr, net_max_attr, context.session) else: # Network not found and can't be updated. Create instead try: rk_db.create_vnet_record(net_id, net_min_attr, net_max_attr, context.session) except Exception as e: LOG.error(_LE("RK: update_network: error %s" % e)) raise ml2_exc.MechanismDriverError() LOG.debug("RK: update_network: res: %s", res) except Exception as a: LOG.error(_LE("RK: update_network: error %s" % a)) raise ml2_exc.MechanismDriverError()
def send_vdp_port_event(self, port_uuid, mac, net_uuid, segmentation_id, status, oui): """Send vNIC UP/Down event to VDP :param port_uuid: a ovslib.VifPort object. :mac: MAC address of the VNIC :param net_uuid: the net_uuid this port is to be associated with. :param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel' :param status: Type of port event. 'up' or 'down' :oui: OUI Parameters """ lldpad_port = self.lldpad_info if not lldpad_port: LOG.error(_LE("There is no LLDPad port available.")) return False ret = False if status == "up": if self.vdp_mode == constants.VDP_SEGMENT_MODE: port_name = self.ext_br_obj.get_ofport_name(port_uuid) if port_name is None: LOG.error(_LE("Unknown portname for uuid %s"), port_uuid) return False LOG.info(_LI("portname for uuid %s is "), port_name) ret = self.port_up_segment_mode(lldpad_port, port_name, port_uuid, mac, net_uuid, segmentation_id, oui) else: if self.vdp_mode == constants.VDP_SEGMENT_MODE: ret = self.port_down_segment_mode(lldpad_port, port_uuid, mac, net_uuid, segmentation_id, oui) return ret
def send_events(self, batched_events): LOG.debug("Sending events: %s", batched_events) try: response = self.nclient.server_external_events.create( batched_events) except nova_exceptions.NotFound: LOG.warning(_LW("Nova returned NotFound for event: %s"), batched_events) except Exception: LOG.exception(_LE("Failed to notify nova on events: %s"), batched_events) else: if not isinstance(response, list): LOG.error(_LE("Error response returned from nova: %s"), response) return response_error = False for event in response: try: code = event['code'] except KeyError: response_error = True continue if code != 200: LOG.warning(_LW("Nova event: %s returned with failed " "status"), event) else: LOG.info(_LI("Nova event response: %s"), event) if response_error: LOG.error(_LE("Error response returned from nova: %s"), response)
def _bind_port_level(self, context, level, segments_to_bind): binding = context._binding port_id = context.current['id'] LOG.debug("Attempting to bind port %(port)s on host %(host)s " "at level %(level)s using segments %(segments)s", {'port': port_id, 'host': context.host, 'level': level, 'segments': segments_to_bind}) if level == MAX_BINDING_LEVELS: LOG.error(_LE("Exceeded maximum binding levels attempting to bind " "port %(port)s on host %(host)s"), {'port': context.current['id'], 'host': context.host}) return False for driver in self.ordered_mech_drivers: if not self._check_driver_to_bind(driver, segments_to_bind, context._binding_levels): continue try: context._prepare_to_bind(segments_to_bind) driver.obj.bind_port(context) segment = context._new_bound_segment if segment: context._push_binding_level( models.PortBindingLevel(port_id=port_id, host=context.host, level=level, driver=driver.name, segment_id=segment)) next_segments = context._next_segments_to_bind if next_segments: # Continue binding another level. if self._bind_port_level(context, level + 1, next_segments): return True else: context._pop_binding_level() else: # Binding complete. LOG.debug("Bound port: %(port)s, " "host: %(host)s, " "vif_type: %(vif_type)s, " "vif_details: %(vif_details)s, " "binding_levels: %(binding_levels)s", {'port': port_id, 'host': context.host, 'vif_type': binding.vif_type, 'vif_details': binding.vif_details, 'binding_levels': context.binding_levels}) return True except Exception: LOG.exception(_LE("Mechanism driver %s failed in " "bind_port"), driver.name) LOG.error(_LE("Failed to bind port %(port)s on host %(host)s"), {'port': context.current['id'], 'host': binding.host})
def mgmt_nw_id(cls): """Returns id of the management network.""" if cls._mgmt_nw_uuid is None: tenant_id = cls.l3_tenant_id() if not tenant_id: return net = manager.NeutronManager.get_plugin().get_networks( neutron_context.get_admin_context(), {'tenant_id': [tenant_id], 'name': [cfg.CONF.general.management_network]}, ['id', 'subnets']) if len(net) == 1: num_subnets = len(net[0]['subnets']) if num_subnets == 0: LOG.error(_LE('The virtual management network has no ' 'subnet. Please assign one.')) return elif num_subnets > 1: LOG.info(_LI('The virtual management network has %d ' 'subnets. The first one will be used.'), num_subnets) cls._mgmt_nw_uuid = net[0].get('id') elif len(net) > 1: # Management network must have a unique name. LOG.error(_LE('The virtual management network does not have ' 'unique name. Please ensure that it is.')) else: # Management network has not been created. LOG.error(_LE('There is no virtual management network. Please ' 'create one.')) return cls._mgmt_nw_uuid
def main(): common_config.init(sys.argv[1:]) common_config.setup_logging() try: interface_mappings = n_utils.parse_mappings( cfg.CONF.LINUX_BRIDGE.physical_interface_mappings) except ValueError as e: LOG.error(_LE("Parsing physical_interface_mappings failed: %s. " "Agent terminated!"), e) sys.exit(1) LOG.info(_LI("Interface mappings: %s"), interface_mappings) try: bridge_mappings = n_utils.parse_mappings( cfg.CONF.LINUX_BRIDGE.bridge_mappings) except ValueError as e: LOG.error(_LE("Parsing bridge_mappings failed: %s. " "Agent terminated!"), e) sys.exit(1) LOG.info(_LI("Bridge mappings: %s"), bridge_mappings) polling_interval = cfg.CONF.AGENT.polling_interval quitting_rpc_timeout = cfg.CONF.AGENT.quitting_rpc_timeout agent = LinuxBridgeNeutronAgentRPC(bridge_mappings, interface_mappings, polling_interval, quitting_rpc_timeout) LOG.info(_LI("Agent initialized successfully, now running... ")) launcher = service.launch(cfg.CONF, agent) launcher.wait()
def _agent_registration(self): """Register this agent with the server. This method registers the cfg agent with the neutron server so hosting devices can be assigned to it. In case the server is not ready to accept registration (it sends a False) then we retry registration for `MAX_REGISTRATION_ATTEMPTS` with a delay of `REGISTRATION_RETRY_DELAY`. If there is no server response or a failure to register after the required number of attempts, the agent stops itself. """ for attempts in xrange(MAX_REGISTRATION_ATTEMPTS): context = n_context.get_admin_context_without_session() self.send_agent_report(self.agent_state, context) res = self.devmgr_rpc.register_for_duty(context) if res is True: LOG.info(_LI("[Agent registration] Agent successfully " "registered")) return elif res is False: LOG.warning(_LW("[Agent registration] Neutron server said " "that device manager was not ready. Retrying " "in %0.2f seconds "), REGISTRATION_RETRY_DELAY) time.sleep(REGISTRATION_RETRY_DELAY) elif res is None: LOG.error(_LE("[Agent registration] Neutron server said that " "no device manager was found. Cannot continue. " "Exiting!")) raise SystemExit("Cfg Agent exiting") LOG.error(_LE("[Agent registration] %d unsuccessful registration " "attempts. Exiting!"), MAX_REGISTRATION_ATTEMPTS) raise SystemExit("Cfg Agent exiting")
def get(self, path, etag=None, raw=False): json_result = None etag_out = None headers = {'accept': 'application/json', 'content-type': 'application/json'} if etag: headers['ETag'] = etag try: result = self.session_op("get", path, raw, headers=headers) if 'etag' in result.headers: etag_out = result.headers['etag'] json_result = result.json() if result.status_code == "404": LOG.error(_LE("%(msg)s %(detail)s"), {'msg': json_result["message"], 'detail': json_result["details"]}) except Exception: LOG.error(_LE("exception when GET operation")) raise r = [json_result] if etag_out: r.append(etag_out) return [json_result]
def ensure_physical_in_bridge(self, network_id, network_type, physical_network, segmentation_id): if network_type == p_const.TYPE_VXLAN: if self.vxlan_mode == lconst.VXLAN_NONE: LOG.error(_LE("Unable to add vxlan interface for network %s"), network_id) return return self.ensure_vxlan_bridge(network_id, segmentation_id) # NOTE(nick-ma-z): Obtain mappings of physical bridge and interfaces physical_bridge = self.get_existing_bridge_name(physical_network) physical_interface = self.interface_mappings.get(physical_network) if not physical_bridge and not physical_interface: LOG.error(_LE("No bridge or interface mappings" " for physical network %s"), physical_network) return if network_type == p_const.TYPE_FLAT: return self.ensure_flat_bridge(network_id, physical_bridge, physical_interface) elif network_type == p_const.TYPE_VLAN: return self.ensure_vlan_bridge(network_id, physical_bridge, physical_interface, segmentation_id) else: LOG.error(_LE("Unknown network_type %(network_type)s for network " "%(network_id)s."), {network_type: network_type, network_id: network_id})
def _validate_multicast_ip_range(self, network_profile): """ Validate multicast ip range values. :param network_profile: network profile object """ try: min_ip, max_ip = (network_profile ['multicast_ip_range'].split('-', 1)) except ValueError: msg = _LE("Invalid multicast ip address range. " "example range: 224.1.1.1-224.1.1.10") LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) for ip in [min_ip, max_ip]: try: if not netaddr.IPAddress(ip).is_multicast(): msg = _LE("%s is not a valid multicast ip address") % ip LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) if netaddr.IPAddress(ip) <= netaddr.IPAddress('224.0.0.255'): msg = _LE("%s is reserved multicast ip address") % ip LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) except netaddr.AddrFormatError: msg = _LE("%s is not a valid ip address") % ip LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) if netaddr.IPAddress(min_ip) > netaddr.IPAddress(max_ip): msg = (_LE("Invalid multicast IP range '%(min_ip)s-%(max_ip)s':" " Range should be from low address to high address") % {'min_ip': min_ip, 'max_ip': max_ip}) LOG.error(msg) raise n_exc.InvalidInput(error_message=msg)
def _status_edge(self, task): edge_id = task.userdata['edge_id'] try: response = self.vcns.get_edge_deploy_status(edge_id)[1] task.userdata['retries'] = 0 system_status = response.get('systemStatus', None) if system_status is None: status = constants.TaskStatus.PENDING elif system_status == 'good': status = constants.TaskStatus.COMPLETED else: status = constants.TaskStatus.ERROR except exceptions.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.exception(_LE("VCNS: Edge %s status query failed."), edge_id) except Exception: retries = task.userdata.get('retries', 0) + 1 if retries < 3: task.userdata['retries'] = retries LOG.exception(_LE("VCNS: Unable to retrieve edge %(edge_id)s " "status. Retry %(retries)d."), {'edge_id': edge_id, 'retries': retries}) status = constants.TaskStatus.PENDING else: LOG.exception(_LE("VCNS: Unable to retrieve edge %s status. " "Abort."), edge_id) status = constants.TaskStatus.ERROR LOG.debug("VCNS: Edge %s status", edge_id) return status
def process_create_port(self, context, data, result): """Implementation of abstract method from ExtensionDriver class.""" port_id = result.get('id') policy_profile_attr = data.get(constants.N1KV_PROFILE) if not attributes.is_attr_set(policy_profile_attr): policy_profile_attr = (cfg.CONF.ml2_cisco_n1kv. default_policy_profile) with context.session.begin(subtransactions=True): try: n1kv_db.get_policy_binding(port_id, context.session) except n1kv_exc.PortBindingNotFound: if not uuidutils.is_uuid_like(policy_profile_attr): policy_profile = n1kv_db.get_policy_profile_by_name( policy_profile_attr, context.session) if policy_profile: policy_profile_attr = policy_profile.id else: LOG.error(_LE("Policy Profile %(profile)s does " "not exist."), {"profile": policy_profile_attr}) raise ml2_exc.MechanismDriverError() elif not (n1kv_db.get_policy_profile_by_uuid( context.session, policy_profile_attr)): LOG.error(_LE("Policy Profile %(profile)s does not " "exist."), {"profile": policy_profile_attr}) raise ml2_exc.MechanismDriverError() n1kv_db.add_policy_binding(port_id, policy_profile_attr, context.session) result[constants.N1KV_PROFILE] = policy_profile_attr
def load_class_by_alias_or_classname(namespace, name): """Load class using stevedore alias or the class name :param namespace: namespace where the alias is defined :param name: alias or class name of the class to be loaded :returns class if calls can be loaded :raises ImportError if class cannot be loaded """ if not name: LOG.error(_LE("Alias or class name is not set")) raise ImportError(_("Class not found.")) try: # Try to resolve class by alias mgr = driver.DriverManager(namespace, name) class_to_load = mgr.driver except RuntimeError: e1_info = sys.exc_info() # Fallback to class name try: class_to_load = importutils.import_class(name) except (ImportError, ValueError): LOG.error(_LE("Error loading class by alias"), exc_info=e1_info) LOG.error(_LE("Error loading class by class name"), exc_info=True) raise ImportError(_("Class not found.")) return class_to_load
def _get_profile_id(cls, p_type, resource, name): try: tenant_id = manager.NeutronManager.get_service_plugins()[ constants.L3_ROUTER_NAT].l3_tenant_id() except AttributeError: return if tenant_id is None: return core_plugin = manager.NeutronManager.get_plugin() if p_type == 'net_profile': profiles = core_plugin.get_network_profiles( n_context.get_admin_context(), {'tenant_id': [tenant_id], 'name': [name]}, ['id']) else: profiles = core_plugin.get_policy_profiles( n_context.get_admin_context(), {'tenant_id': [tenant_id], 'name': [name]}, ['id']) if len(profiles) == 1: return profiles[0]['id'] elif len(profiles) > 1: # Profile must have a unique name. LOG.error(_LE('The %(resource)s %(name)s does not have unique ' 'name. Please refer to admin guide and create one.'), {'resource': resource, 'name': name}) else: # Profile has not been created. LOG.error(_LE('There is no %(resource)s %(name)s. Please refer to ' 'admin guide and create one.'), {'resource': resource, 'name': name})
def create_project(self, org_name, part_name, dci_id, desc=None): """Create project on the DCNM. :param org_name: name of organization. :param part_name: name of partition. :param dci_id: Data Center interconnect id. :param desc: description of project. """ desc = desc or org_name res = self._create_org(org_name, desc) if res and res.status_code in self._resp_ok: LOG.debug("Created %s organization in DCNM.", org_name) else: LOG.error(_LE("Failed to create %(org)s organization in DCNM." "Response: %(res)s"), {'org': org_name, 'res': res}) raise dexc.DfaClientRequestFailed(reason=res) res = self._create_or_update_partition(org_name, part_name, dci_id, desc) if res and res.status_code in self._resp_ok: LOG.debug("Created %s partition in DCNM.", part_name) else: LOG.error(_LE("Failed to create %(part)s partition in DCNM." "Response: %(res)s"), {'part': part_name, 'res': res}) raise dexc.DfaClientRequestFailed(reason=res)
def mgmt_sec_grp_id(cls): """Returns id of security group used by the management network.""" if not utils.is_extension_supported( manager.NeutronManager.get_plugin(), "security-group"): return if cls._mgmt_sec_grp_id is None: # Get the id for the _mgmt_security_group_id tenant_id = cls.l3_tenant_id() res = manager.NeutronManager.get_plugin().get_security_groups( neutron_context.get_admin_context(), {'tenant_id': [tenant_id], 'name': [cfg.CONF.general.default_security_group]}, ['id']) if len(res) == 1: cls._mgmt_sec_grp_id = res[0].get('id') elif len(res) > 1: # the mgmt sec group must be unique. LOG.error(_LE('The security group for the virtual management ' 'network does not have unique name. Please ensure ' 'that it is.')) else: # CSR Mgmt security group is not present. LOG.error(_LE('There is no security group for the virtual ' 'management network. Please create one.')) return cls._mgmt_sec_grp_id
def create_router(self, host, username, password, rbridge_id, router_id): """create vrf and associate vrf.""" router_id = router_id[0:11] vrf_name = template.OS_VRF_NAME.format(id=router_id) rd = router_id + ":" + router_id try: mgr = self.connect(host, username, password) self.create_vrf(mgr, rbridge_id, vrf_name) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("NETCONF error")) self.close_session() try: # For Nos5.0.0 self.configure_rd_for_vrf(mgr, rbridge_id, vrf_name, rd) self.configure_address_family_for_vrf(mgr, rbridge_id, vrf_name) except Exception: with excutils.save_and_reraise_exception() as ctxt: try: # This is done because on 4.0.0 rd doesnt accept alpha # character nor hyphen rd = "".join(i for i in router_id if i in "0123456789") rd = rd[:4] + ":" + rd[:4] self.configure_rd_for_vrf(mgr, rbridge_id, vrf_name, rd) self.configure_address_family_for_vrf_v1(mgr, rbridge_id, vrf_name) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("NETCONF error")) self.close_session() ctxt.reraise = False
def process_uplink_event(self, msg, phy_uplink): LOG.info(_LI("Received New uplink Msg %(msg)s for uplink %(uplink)s"), {'msg': msg.get_status(), 'uplink': phy_uplink}) if msg.get_status() == 'up': ovs_exc_raised = False try: self.ovs_vdp_obj_dict[phy_uplink] = ovs_vdp.OVSNeutronVdp( phy_uplink, msg.get_integ_br(), msg.get_ext_br(), msg.get_root_helper()) except Exception as exc: LOG.error(_LE("OVS VDP Object creation failed %s"), str(exc)) ovs_exc_raised = True if (ovs_exc_raised or not self.ovs_vdp_obj_dict[phy_uplink]. is_lldpad_setup_done()): # Is there a way to delete the object?? LOG.error(_LE("UP Event Processing NOT Complete")) self.err_que.enqueue(constants.Q_UPL_PRIO, msg) else: self.uplink_det_compl = True veth_intf = (self.ovs_vdp_obj_dict[self.phy_uplink]. get_lldp_bridge_port()) LOG.info(_LI("UP Event Processing Complete Saving uplink " "%(ul)s and veth %(veth)s"), {'ul': self.phy_uplink, 'veth': veth_intf}) self.save_uplink(uplink=self.phy_uplink, veth_intf=veth_intf) elif msg.get_status() == 'down': # Free the object fixme(padkrish) if phy_uplink in self.ovs_vdp_obj_dict: self.ovs_vdp_obj_dict[phy_uplink].clear_obj_params() else: ovs_vdp.delete_uplink_and_flows(self.root_helper, self.br_ex, phy_uplink) self.save_uplink()
def serve_rpc(): plugin = manager.NeutronManager.get_plugin() # If 0 < rpc_workers then start_rpc_listeners would be called in a # subprocess and we cannot simply catch the NotImplementedError. It is # simpler to check this up front by testing whether the plugin supports # multiple RPC workers. if not plugin.rpc_workers_supported(): LOG.debug("Active plugin doesn't implement start_rpc_listeners") if 0 < cfg.CONF.rpc_workers: LOG.error(_LE("'rpc_workers = %d' ignored because " "start_rpc_listeners is not implemented."), cfg.CONF.rpc_workers) raise NotImplementedError() try: rpc = RpcWorker(plugin) if cfg.CONF.rpc_workers < 1: rpc.start() return rpc else: launcher = common_service.ProcessLauncher(wait_interval=1.0) launcher.launch_service(rpc, workers=cfg.CONF.rpc_workers) return launcher except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Unrecoverable error: please check log for ' 'details.'))
def check_edge_jobs(self, edge_id): retries = max(cfg.CONF.nsxv.retries, 1) delay = 0.5 for attempt in range(1, retries + 1): if attempt != 1: time.sleep(delay) delay = min(2 * delay, 60) h, jobs = self.vcns.get_edge_jobs(edge_id) if jobs['edgeJob'] == []: return job_number = len(jobs['edgeJob']) # Assume one job would wait time out after 20 minutes and one # job takes about 1 minute to be completed. if job_number < 20: LOG.warning(_LW("NSXv: %(num)s jobs still running on edge " "%(edge_id)s."), {'num': job_number, 'edge_id': edge_id}) else: LOG.error(_LE("NSXv: %(num)s jobs still running on edge " "%(edge_id)s. Too many jobs may lead to job " "time out at the backend"), {'num': job_number, 'edge_id': edge_id}) LOG.error(_LE('NSXv: jobs are still runnings!'))
def _snat_redirect_modify(self, gateway, sn_port, sn_int, is_add): """Adds or removes rules and routes for SNAT redirection.""" try: ns_ipr = ip_lib.IPRule(namespace=self.ns_name) ns_ipd = ip_lib.IPDevice(sn_int, namespace=self.ns_name) if is_add: ns_ipwrapr = ip_lib.IPWrapper(namespace=self.ns_name) for port_fixed_ip in sn_port["fixed_ips"]: # Find the first gateway IP address matching this IP version port_ip_addr = port_fixed_ip["ip_address"] port_ip_vers = netaddr.IPAddress(port_ip_addr).version for gw_fixed_ip in gateway["fixed_ips"]: gw_ip_addr = gw_fixed_ip["ip_address"] if netaddr.IPAddress(gw_ip_addr).version == port_ip_vers: sn_port_cidr = common_utils.ip_to_cidr(port_ip_addr, port_fixed_ip["prefixlen"]) snat_idx = self._get_snat_idx(sn_port_cidr) if is_add: ns_ipd.route.add_gateway(gw_ip_addr, table=snat_idx) ns_ipr.rule.add(sn_port_cidr, snat_idx, snat_idx) ns_ipwrapr.netns.execute(["sysctl", "-w", "net.ipv4.conf.%s.send_redirects=0" % sn_int]) else: self._snat_delete_device_gateway(ns_ipd, gw_ip_addr, snat_idx) ns_ipr.rule.delete(sn_port_cidr, snat_idx, snat_idx) break except Exception: if is_add: exc = _LE("DVR: error adding redirection logic") else: exc = _LE("DVR: removed snat failed") LOG.exception(exc)
def process_services_sync(self, ctx): """On RPC issues sync with plugin and apply the sync data.""" # avoid msg to plugin when fwaas is not configured if not self.fwaas_enabled: return try: # get all routers routers = self.plugin_rpc.get_routers(ctx) # get the list of tenants with firewalls configured # from the plugin tenant_ids = self.fwplugin_rpc.get_tenants_with_firewalls(ctx) LOG.debug("Tenants with Firewalls: '%s'", tenant_ids) for tenant_id in tenant_ids: ctx = context.Context('', tenant_id) fw_list = self.fwplugin_rpc.get_firewalls_for_tenant(ctx) if fw_list: # if fw present on tenant router_info_list = self._get_router_info_list_for_tenant( routers, tenant_id) if router_info_list: LOG.debug("Router List: '%s'", [ri.router['id'] for ri in router_info_list]) LOG.debug("fw_list: '%s'", [fw['id'] for fw in fw_list]) # apply sync data on fw for this tenant for fw in fw_list: # fw, routers present on this host for tenant # install LOG.debug( "Apply fw on Router List: '%s'", [ri.router['id'] for ri in router_info_list]) # no need to apply sync data for ACTIVE fw if fw['status'] != constants.ACTIVE: self._invoke_driver_for_sync_from_plugin( ctx, router_info_list, fw) self.services_sync = False except Exception: LOG.exception(_LE("Failed fwaas process services sync")) self.services_sync = True
def parse_uri(self, service_uri): self.parts = urlparse(service_uri) host_port_parts = self.parts.netloc.split(':') self.endpoint_port = None if len(host_port_parts) > 1: self.endpoint_host = host_port_parts[0] self.endpoint_port = host_port_parts[1] else: self.endpoint_host = host_port_parts[0] if type(self.endpoint_host).__name__ == 'unicode': self.endpoint_host = self.endpoint_host.encode('ascii', 'ignore') if self.endpoint_port and type( self.endpoint_port).__name__ == 'unicode': self.endpoint_port = self.endpoint_port.encode('ascii', 'ignore') self.host = self.endpoint_host if self.parts.scheme.lower() == "http": self.protocol = "http" if not self.endpoint_port: self.endpoint_port = 80 elif self.parts.scheme.lower() == "https": self.protocol = "https" if not self.endpoint_port: self.endpoint_port = 443 else: LOG.error(_LE("scheme in endpoint URL is unrecognized:%(scheme)s"), {"scheme": self.parts.scheme}) raise NCCException(NCCException.CONFIG_ERROR) LOG.info(_LI("RestClient using endpoint %(host)s:%(port)s"), { "host": self.endpoint_host, "port": self.endpoint_port })
def _check_acl(self, acl_no, network, netmask): """Check a ACL config exists in the running config. :param acl_no: access control list (ACL) number :param network: network which this ACL permits :param netmask: netmask of the network :return: """ exp_cfg_lines = [ 'ip access-list standard ' + str(acl_no), ' permit ' + str(network) + ' ' + str(netmask) ] ios_cfg = self._get_running_config() parse = ciscoconfparse.CiscoConfParse(ios_cfg) acls_raw = parse.find_children(exp_cfg_lines[0]) if acls_raw: if exp_cfg_lines[1] in acls_raw: return True LOG.error(_LE("Mismatch in ACL configuration for %s"), acl_no) return False LOG.debug("%s is not present in config", acl_no) return False
def delete_port(self, context, port_id): with context.session.begin(subtransactions=True): neutron_port = self.get_port(context, port_id) interface_mac = neutron_port['mac_address'] # convert mac format: xx:xx:xx:xx:xx:xx -> xxxx.xxxx.xxxx mac = self.mac_reformat_62to34(interface_mac) brocade_port = brocade_db.get_port(context, port_id) vlan_id = brocade_port['vlan_id'] switch = self._switch try: self._driver.dissociate_mac_from_network( switch['address'], switch['username'], switch['password'], vlan_id, mac) except Exception: LOG.exception(_LE("Brocade NOS driver error")) raise Exception( _("Brocade plugin raised exception, check logs")) super(BrocadePluginV2, self).delete_port(context, port_id) brocade_db.delete_port(context, port_id)
def update_agent_db(self, agent_info): session = db.get_session() host = agent_info.get('host') with session.begin(subtransactions=True): try: # Check if entry exists. session.query(DfaAgentsDb).filter_by(host=host).one() # Entry exist, only update the heartbeat and configurations. session.query(DfaAgentsDb).filter_by(host=host).update( {'heartbeat': agent_info.get('timestamp')}) except orm_exc.NoResultFound: LOG.info(_LI('Creating new entry for agent on %(host)s.'), {'host': host}) agent = DfaAgentsDb(host=host, created=agent_info.get('timestamp'), heartbeat=agent_info.get('timestamp'), configurations=agent_info.get('config')) session.add(agent) except orm_exc.MultipleResultsFound: LOG.error(_LE('More than one enty found for agent %(host)s.'), {'host': host})
def _extend_network_dict_provider(self, context, network): id = network['id'] segments = db.get_network_segments(context.session, id) if not segments: LOG.error(_LE("Network %s has no segments"), id) network[provider.NETWORK_TYPE] = None network[provider.PHYSICAL_NETWORK] = None network[provider.SEGMENTATION_ID] = None elif len(segments) > 1: network[mpnet.SEGMENTS] = [{ provider.NETWORK_TYPE: segment[api.NETWORK_TYPE], provider.PHYSICAL_NETWORK: segment[api.PHYSICAL_NETWORK], provider.SEGMENTATION_ID: segment[api.SEGMENTATION_ID] } for segment in segments] else: segment = segments[0] network[provider.NETWORK_TYPE] = segment[api.NETWORK_TYPE] network[provider.PHYSICAL_NETWORK] = segment[api.PHYSICAL_NETWORK] network[provider.SEGMENTATION_ID] = segment[api.SEGMENTATION_ID]
def authenticate(self): """Obtain a token to use for subsequent CSR REST requests. This is called when there is no token yet, or if the token has expired and attempts to use it resulted in an UNAUTHORIZED REST response. """ url = URL_BASE % {'host': self.host, 'resource': 'auth/token-services'} headers = {'Content-Length': '0', 'Accept': 'application/json'} headers.update(HEADER_CONTENT_TYPE_JSON) LOG.debug("%(auth)s with CSR %(host)s", {'auth': 'Authenticating' if self.token is None else 'Reauthenticating', 'host': self.host}) self.token = None response = self._request("POST", url, headers=headers, auth=self.auth) if response: self.token = response['token-id'] LOG.debug("Successfully authenticated with CSR %s", self.host) return True LOG.error(_LE("Failed authentication with CSR %(host)s [%(status)s]"), {'host': self.host, 'status': self.status})
def run_idl(self, txn): try: br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name', self.name) except idlutils.RowNotFound: if self.if_exists: return else: msg = _LE("Bridge %s does not exist") % self.name LOG.error(msg) raise RuntimeError(msg) self.api._ovs.verify('bridges') for port in br.ports: cmd = DelPortCommand(self.api, port.name, self.name, if_exists=True) cmd.run_idl(txn) bridges = self.api._ovs.bridges bridges.remove(br) self.api._ovs.bridges = bridges self.api._tables['Bridge'].rows[br.uuid].delete()
def create_dvr_fip_interfaces(self, ri, ex_gw_port): floating_ips = ri.get_floating_ips() fip_agent_port = self.get_floating_agent_gw_interface( ri, ex_gw_port['network_id']) LOG.debug("FloatingIP agent gateway port received from the plugin: " "%s", fip_agent_port) if floating_ips: is_first = ri.fip_ns.subscribe(ri.router_id) if is_first and fip_agent_port: if 'subnet' not in fip_agent_port: LOG.error(_LE('Missing subnet/agent_gateway_port')) else: self._set_subnet_info(fip_agent_port) ri.fip_ns.create_gateway_port(fip_agent_port) if ri.fip_ns.agent_gateway_port and floating_ips: if ri.dist_fip_count == 0: ri.fip_ns.create_rtr_2_fip_link(ri) # kicks the FW Agent to add rules for the IR namespace if # configured self.process_router_add(ri)
def reclaim_local_vlan(self, net_uuid): """Reclaim a local VLAN. :param net_uuid: the network uuid associated with this vlan. :param lvm: a LocalVLANMapping object that tracks (vlan, lsw_id, vif_ids) mapping. """ lvm = self.local_vlan_map.pop(net_uuid, None) if lvm is None: LOG.debug("Network %s not used on agent.", net_uuid) return LOG.info(_LI("Reclaiming vlan = %(vlan_id)s from " "net-id = %(net_uuid)s"), {'vlan_id': lvm.vlan, 'net_uuid': net_uuid}) if lvm.network_type in constants.TUNNEL_NETWORK_TYPES: if self.enable_tunneling: self.int_br.reclaim_tenant_tunnel(lvm.network_type, lvm.vlan, lvm.segmentation_id) # Try to remove tunnel ports if not used by other networks for ofport in lvm.tun_ofports: self.cleanup_tunnel_port(self.int_br, ofport, lvm.network_type) elif lvm.network_type in [p_const.TYPE_FLAT, p_const.TYPE_VLAN]: phys_port = self.int_ofports[lvm.physical_network] self.int_br.reclaim_tenant_physnet(lvm.network_type, lvm.vlan, lvm.segmentation_id, phys_port) elif lvm.network_type == p_const.TYPE_LOCAL: # no flows needed for local networks pass else: LOG.error(_LE("Cannot reclaim unknown network type " "%(network_type)s for net-id=%(net_uuid)s"), {'network_type': lvm.network_type, 'net_uuid': net_uuid}) self.available_local_vlans.add(lvm.vlan)
def delete_optimizer(self, agent_mode, apply_list, optimizer): LOG.debug('Deleting optimizer %(opt_id)s for tenant %(tid)s)', {'opt_id': optimizer['id'], 'tid': optimizer['tenant_id']}) optid = optimizer['id'] #OaaS namespace = "qrouter-%s" % str(optimizer['router_ids']).strip("u['']") self.solowan_delete_folder(optimizer['solowan'],namespace) try: for router_info in apply_list: ipt_if_prefix_list = self._get_ipt_mgrs_with_if_prefix( agent_mode, router_info) for ipt_if_prefix in ipt_if_prefix_list: ipt_mgr = ipt_if_prefix['ipt'] self._remove_chains(optid, ipt_mgr) self._remove_default_chains(ipt_mgr) # apply the changes immediately (no defer in optimizer path) ipt_mgr.defer_apply_off() except (LookupError, RuntimeError): # catch known library exceptions and raise Oaas generic exception LOG.exception(_LE("Failed to delete optimizer: %s"), optid) raise opt_ext.OptimizerInternalDriverError(driver=OAAS_DRIVER_NAME)
def _ipam_deallocate_ips(self, context, ipam_driver, port, ips, revert_on_fail=True): """Deallocate set of ips over IPAM. If any single ip deallocation fails, tries to allocate deallocated ip addresses with fixed ip request """ deallocated = [] try: for ip in ips: try: ipam_subnet = ipam_driver.get_subnet(ip['subnet_id']) ipam_subnet.deallocate(ip['ip_address']) deallocated.append(ip) except n_exc.SubnetNotFound: LOG.debug("Subnet was not found on ip deallocation: %s", ip) except Exception: with excutils.save_and_reraise_exception(): LOG.debug("An exception occurred during IP deallocation.") if revert_on_fail and deallocated: LOG.debug("Reverting deallocation") self._ipam_allocate_ips(context, ipam_driver, port, deallocated, revert_on_fail=False) elif not revert_on_fail and ips: addresses = ', '.join( self._get_failed_ips(ips, deallocated)) LOG.error( _LE("IP deallocation failed on " "external system for %s"), addresses) return deallocated
def treat_devices_removed(self, devices): resync = False self.sg_agent.remove_devices_filter(devices) for device in devices: LOG.info(_LI("Attachment %s removed"), device) details = None try: details = self.plugin_rpc.update_device_down(self.context, device, self.agent_id, cfg.CONF.host) except Exception: LOG.exception(_LE("Error occurred while removing port %s"), device) resync = True if details and details['exists']: LOG.info(_LI("Port %s updated."), device) else: LOG.debug("Device %s not defined on plugin", device) if self.prevent_arp_spoofing: arp_protect.delete_arp_spoofing_protection(devices) return resync
def _delete_service_vm_hosting_device(self, context, hosting_device): """Deletes a <hosting_device> service VM. This will indirectly make all of its hosted resources unscheduled. """ if hosting_device is None: return plugging_drv = self.get_hosting_device_plugging_driver() if plugging_drv is None: return res = plugging_drv.get_hosting_device_resources( context, hosting_device['id'], hosting_device['complementary_id'], self.l3_tenant_id(), self.mgmt_nw_id()) if not self._svc_vm_mgr.delete_service_vm(context, hosting_device['id']): LOG.error(_LE('Failed to delete hosting device %s service VM. ' 'Will un-register it anyway.'), hosting_device['id']) plugging_drv.delete_hosting_device_resources( context, self.l3_tenant_id(), **res) with context.session.begin(subtransactions=True): context.session.delete(hosting_device)
def destroy_namespace(conf, namespace, force=False): """Destroy a given namespace. If force is True, then dhcp (if it exists) will be disabled and all devices will be forcibly removed. """ try: root_helper = agent_config.get_root_helper(conf) ip = ip_lib.IPWrapper(root_helper, namespace) if force: kill_dhcp(conf, namespace) # NOTE: The dhcp driver will remove the namespace if is it empty, # so a second check is required here. if ip.netns.exists(namespace): for device in ip.get_devices(exclude_loopback=True): unplug_device(conf, device) ip.garbage_collect_namespace() except Exception: LOG.exception(_LE('Error unable to destroy namespace: %s'), namespace)
def wait_until_address_ready(self, address, wait_time=30): """Wait until an address is no longer marked 'tentative' raises AddressNotReady if times out or address not present on interface """ def is_address_ready(): try: addr_info = self.list(to=address)[0] except IndexError: raise AddressNotReady( address=address, reason=_LE('Address not present on interface')) if not addr_info['tentative']: return True if addr_info['dadfailed']: raise AddressNotReady( address=address, reason=_LE('Duplicate adddress detected')) errmsg = _LE("Exceeded %s second limit waiting for " "address to leave the tentative state.") % wait_time utils.utils.wait_until_true( is_address_ready, timeout=wait_time, sleep=0.20, exception=AddressNotReady(address=address, reason=errmsg))
def nova_services_up(self): """Checks if required Nova services are up and running. returns: True if all needed Nova services are up, False otherwise """ required = set(['nova-conductor', 'nova-cert', 'nova-scheduler', 'nova-compute', 'nova-consoleauth']) try: services = self._nclient.services.list() # There are several individual Nova client exceptions but they have # no other common base than Exception, hence the long list. except (nova_exc.UnsupportedVersion, nova_exc.CommandError, nova_exc.AuthorizationFailure, nova_exc.NoUniqueMatch, nova_exc.AuthSystemNotFound, nova_exc.NoTokenLookupException, nova_exc.EndpointNotFound, nova_exc.AmbiguousEndpoints, nova_exc.ConnectionRefused, nova_exc.ClientException, Exception) as e: LOG.error(_LE('Failure determining running Nova services: %s'), e) return False return not bool(required.difference( [service.binary for service in services if service.status == 'enabled' and service.state == 'up']))
def bind_port(self, context): """Attempt to bind a port using registered mechanism drivers. :param context: PortContext instance describing the port Called outside any transaction to attempt to establish a port binding. """ binding = context._binding LOG.debug("Attempting to bind port %(port)s on host %(host)s " "for vnic_type %(vnic_type)s with profile %(profile)s", {'port': context.current['id'], 'host': context.host, 'vnic_type': binding.vnic_type, 'profile': binding.profile}) context._clear_binding_levels() if not self._bind_port_level(context, 0, context.network.network_segments): binding.vif_type = portbindings.VIF_TYPE_BINDING_FAILED LOG.error(_LE("Failed to bind port %(port)s on host %(host)s"), {'port': context.current['id'], 'host': context.host})
def _get_unused_service_vm_trunk_port(self, context, hd_id, network_type): name = (n1kv_const.T2_PORT_NAME if network_type == 'vlan' else n1kv_const.T1_PORT_NAME) attempts = 0 res = [] while True: # mysql> SELECT * FROM ports WHERE device_id = 'hd_id1' AND # id NOT IN (SELECT hosting_port_id FROM hostedhostingportbindings) # AND # name LIKE '%t1%' # ORDER BY name; stmt = context.session.query( hd_models.HostedHostingPortBinding.hosting_port_id).subquery() query = context.session.query(models_v2.Port.id) query = query.filter( expr.and_(models_v2.Port.device_id == hd_id, ~models_v2.Port.id.in_(stmt), models_v2.Port.name.like('%' + name + '%'))) query = query.order_by(models_v2.Port.name) res = query.first() if res is None: if attempts >= MAX_HOSTING_PORT_LOOKUP_ATTEMPTS: # This should not happen ... LOG.error(_LE('Hosting port DB inconsistency for ' 'hosting device %s'), hd_id) return else: # The service VM may not have plugged its VIF into the # Neutron Port yet so we wait and make another lookup. attempts += 1 LOG.info(_LI('Attempt %(attempt)d to find trunk ports for ' 'hosting device %(hd_id)s failed. Trying ' 'again in %(time)d seconds.'), {'attempt': attempts, 'hd_id': hd_id, 'time': SECONDS_BETWEEN_HOSTING_PORT_LOOKSUPS}) eventlet.sleep(SECONDS_BETWEEN_HOSTING_PORT_LOOKSUPS) else: break return res[0]
def call_driver(self, action, network, **action_kwargs): """Invoke an action on a DHCP driver instance.""" LOG.debug('Calling driver for network: %(net)s action: %(action)s', { 'net': network.id, 'action': action }) try: # the Driver expects something that is duck typed similar to # the base models. driver = self.dhcp_driver_cls(self.conf, network, self._process_monitor, self.root_helper, self.dhcp_version, self.plugin_rpc) getattr(driver, action)(**action_kwargs) return True except exceptions.Conflict: # No need to resync here, the agent will receive the event related # to a status update for the network LOG.warning( _LW('Unable to %(action)s dhcp for %(net_id)s: there ' 'is a conflict with its current state; please ' 'check that the network and/or its subnet(s) ' 'still exist.'), { 'net_id': network.id, 'action': action }) except Exception as e: self.schedule_resync(e, network.id) if (isinstance(e, oslo_messaging.RemoteError) and e.exc_type == 'NetworkNotFound' or isinstance(e, exceptions.NetworkNotFound)): LOG.warning(_LW("Network %s has been deleted."), network.id) else: LOG.exception(_LE('Unable to %(action)s dhcp for %(net_id)s.'), { 'net_id': network.id, 'action': action })
def __init__(self, conf, gw_config): self.responses = [] self.connected = False self.gw_config = gw_config self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if gw_config.use_ssl: ssl_sock = ssl.wrap_socket(self.socket, server_side=False, keyfile=gw_config.private_key, certfile=gw_config.certificate, cert_reqs=ssl.CERT_REQUIRED, ssl_version=ssl.PROTOCOL_TLSv1, ca_certs=gw_config.ca_cert) self.socket = ssl_sock retryCount = 0 while True: try: self.socket.connect( (str(gw_config.ovsdb_ip), int(gw_config.ovsdb_port))) break except (socket.error, socket.timeout): LOG.warning(OVSDB_UNREACHABLE_MSG, gw_config.ovsdb_ip) if retryCount == conf.max_connection_retries: # Retried for max_connection_retries times. # Give up and return so that it can be tried in # the next periodic interval. with excutils.save_and_reraise_exception(reraise=True): LOG.exception( _LE("Socket error in connecting to " "the OVSDB server")) else: time.sleep(1) retryCount += 1 # Successfully connected to the socket LOG.debug(OVSDB_CONNECTED_MSG, gw_config.ovsdb_ip) self.connected = True
def execute(cmd, root_helper=None, process_input=None, addl_env=None, check_exit_code=True, return_stderr=False, log_fail_as_error=True, log_output=True): try: obj, cmd = create_process(cmd, root_helper=root_helper, addl_env=addl_env, log_output=log_output) _stdout, _stderr = (process_input and obj.communicate(process_input) or obj.communicate()) obj.stdin.close() m = _LE("\nCommand: %(cmd)s\nExit code: %(code)s\nStdout: %(stdout)r\n" "Stderr: %(stderr)r") % { 'cmd': cmd, 'code': obj.returncode, 'stdout': _stdout, 'stderr': _stderr } if obj.returncode and log_fail_as_error: LOG.error(m) else: log_output and LOG.info(m) if obj.returncode and check_exit_code: raise RuntimeError(m) finally: # NOTE(termie): this appears to be necessary to let the subprocess # call clean something up in between calls, without # it two execute calls in a row hangs the second one greenthread.sleep(0) return return_stderr and (_stdout, _stderr) or _stdout
def __init__(self, CPNR_server_ip, CPNR_server_port, CPNR_server_username, CPNR_server_password, timeout=20): """Constructor for the CPNR_restApi class""" try: info = "CPNR_server_ip = " + CPNR_server_ip + ", " + \ "CPNR_server_port = " +\ str(CPNR_server_port) + ", " + \ "CPNR_server_username = "******", " + \ "timeout = " + str(timeout) LOG.debug("info = {0}".format(info)) self.CPNR_server_ip = CPNR_server_ip self.CPNR_server_port = CPNR_server_port self.CPNR_server_username = CPNR_server_username self.url = "http://" + self.CPNR_server_ip + ":" + \ str(self.CPNR_server_port) self.auth = requests.auth.HTTPBasicAuth(CPNR_server_username, CPNR_server_password) self.headers = { 'Content-Type': 'application/json', 'Accept': 'application/json' } self._cpnr_reload_needed = False self.timeout = timeout cpnr_server = self.get_dhcp_server() LOG.debug("cpnr_server = {0}".format(cpnr_server)) except Exception as e: LOG.error( _LE("Unexpected error in constructor," "%(ex_type)s, %(ex_args)s. %(ex_info)s)"), { 'ex_type': str(type(e)), 'ex_args': str(e.args), 'ex_info': str(info) })
def process_amqp_msgs(self): """Process AMQP queue messages. It connects to AMQP server and calls callbacks to process DCNM events, i.e. routing key containing '.cisco.dcnm.', once they arrive in the queue. """ LOG.info(_LI('Starting process_amqp_msgs...')) while True: (mtd_fr, hdr_fr, body) = (None, None, None) try: if self.consume_channel: (mtd_fr, hdr_fr, body) = self.consume_channel.basic_get( self._dcnm_queue_name) if mtd_fr: # Queue has messages. LOG.info(_LI('RX message: %s'), body) self._cb_dcnm_msg(mtd_fr, body) self.consume_channel.basic_ack(mtd_fr.delivery_tag) else: # Queue is empty. try: self._conn.sleep(1) except AttributeError: time.sleep(1) except Exception: exc_type, exc_value, exc_tb = sys.exc_info() tb_str = traceback.format_exception(exc_type, exc_value, exc_tb) LOG.exception( _LE("Failed to read from queue: %(queue)s " "%(exc_type)s, %(exc_value)s, %(exc_tb)s."), { 'queue': self._dcnm_queue_name, 'exc_type': exc_type, 'exc_value': exc_value, 'exc_tb': tb_str })
def allocate_hosting_port(self, context, router_id, port_db, network_type, hosting_device_id): """Allocates a hosting port for a logical port. We create a hosting port for the router port """ l3admin_tenant_id = self._dev_mgr.l3_tenant_id() hostingport_name = 'hostingport_' + port_db['id'][:8] p_spec = { 'port': { 'tenant_id': l3admin_tenant_id, 'admin_state_up': True, 'name': hostingport_name, 'network_id': port_db['network_id'], 'mac_address': attributes.ATTR_NOT_SPECIFIED, 'fixed_ips': [], 'device_id': '', 'device_owner': '', 'port_security_enabled': False } } try: hosting_port = self._core_plugin.create_port(context, p_spec) except n_exc.NeutronException as e: LOG.error( _LE('Error %s when creating hosting port' 'Cleaning up.'), e) self.delete_hosting_device_resources(context, l3admin_tenant_id, hosting_port) hosting_port = None finally: if hosting_port: return { 'allocated_port_id': hosting_port['id'], 'allocated_vlan': None } else: return None
def rest_api(self, method, url, body=None, headers=None): url = REST_URL_PREFIX + url if body: body_data = jsonutils.dumps(body) else: body_data = '' if not headers: headers = {} enc = base64.b64encode('%s:%s' % (self.user, self.key)) headers['Authorization'] = 'Basic ' + enc LOG.debug("vArmourRestAPI: %(server)s %(port)s", {'server': self.server, 'port': self.port}) try: action = "https://" + self.server + ":" + self.port + url LOG.debug("vArmourRestAPI Sending: " "%(method)s %(action)s %(headers)s %(body_data)s", {'method': method, 'action': action, 'headers': headers, 'body_data': body_data}) h = httplib2.Http(timeout=3, disable_ssl_certificate_validation=True) resp, resp_str = h.request(action, method, body=body_data, headers=headers) LOG.debug("vArmourRestAPI Response: %(status)s %(resp_str)s", {'status': resp.status, 'resp_str': resp_str}) if resp.status == 200: return {'status': resp.status, 'reason': resp.reason, 'body': jsonutils.loads(resp_str)} except Exception: LOG.error(_LE('vArmourRestAPI: Could not establish HTTP ' 'connection'))
def set_driver(self, resource): """Set the driver for a neutron resource. :param resource: Neutron resource in dict format. Expected keys: { 'id': <value> 'hosting_device': { 'id': <value>, } 'router_type': {'cfg_agent_driver': <value>, } } :return driver : driver object """ try: resource_id = resource['id'] hosting_device = resource['hosting_device'] hd_id = hosting_device['id'] if hd_id in self._hosting_device_routing_drivers_binding: driver = self._hosting_device_routing_drivers_binding[hd_id] self._drivers[resource_id] = driver else: driver_class = resource['router_type']['cfg_agent_driver'] driver = importutils.import_object(driver_class, **hosting_device) self._hosting_device_routing_drivers_binding[hd_id] = driver self._drivers[resource_id] = driver return driver except ImportError: with excutils.save_and_reraise_exception(reraise=False): LOG.exception( _LE("Error loading cfg agent driver %(driver)s " "for hosting device template " "%(t_name)s(%(t_id)s)"), { 'driver': driver_class, 't_id': hd_id, 't_name': resource['name'] }) raise cfg_exceptions.DriverNotExist(driver=driver_class) except KeyError as e: with excutils.save_and_reraise_exception(reraise=False): raise cfg_exceptions.DriverNotSetForMissingParameter(e)
def result(self, raw_result): # If check_error=False, run_vsctl can return None if not raw_result: self._result = None return try: json = jsonutils.loads(raw_result) except (ValueError, TypeError): # This shouldn't happen, but if it does and we check_errors # log and raise. with excutils.save_and_reraise_exception(): LOG.exception(_LE("Could not parse: %s"), raw_result) headings = json['headings'] data = json['data'] results = [] for record in data: obj = {} for pos, heading in enumerate(headings): obj[heading] = ovsdb.val_to_py(record[pos]) results.append(obj) self._result = results
def _execute(self, task): """Execute task.""" LOG.debug("Start task %s", str(task)) task._start() try: status = task._execute_callback(task) except Exception: LOG.exception(_LE("Task %(task)s encountered exception in %(cb)s"), { 'task': str(task), 'cb': str(task._execute_callback) }) status = constants.TaskStatus.ERROR LOG.debug("Task %(task)s return %(status)s", { 'task': str(task), 'status': status }) task._update_status(status) task._executed() return status
def _check_pending_tasks(self): """Check all pending tasks status.""" for resource_id in self._tasks.keys(): if self._stopped: # Task manager is stopped, return now return tasks = self._tasks[resource_id] # only the first task is executed and pending task = tasks[0] try: status = task._status_callback(task) except Exception: LOG.exception( _LE("Task %(task)s encountered exception in " "%(cb)s"), { 'task': str(task), 'cb': str(task._status_callback) }) status = constants.TaskStatus.ERROR task._update_status(status) if status != constants.TaskStatus.PENDING: self._dequeue(task, True)