def _sync_base(self): ctx = context.get_admin_context() # Sync Networks for network in self.core_plugin.get_networks(ctx): mech_context = driver_context.NetworkContext(self.core_plugin, ctx, network) try: self.driver.create_network_postcommit(mech_context) except Exception: LOG.warning(_LW("Create network postcommit failed for " "network %s"), network['id']) # Sync Subnets for subnet in self.core_plugin.get_subnets(ctx): mech_context = driver_context.SubnetContext(self.core_plugin, ctx, subnet) try: self.driver.create_subnet_postcommit(mech_context) except Exception: LOG.warning(_LW("Create subnet postcommit failed for" " subnet %s"), subnet['id']) # Sync Ports (compute/gateway/dhcp) for port in self.core_plugin.get_ports(ctx): _, binding = l2_db.get_locked_port_and_binding(ctx.session, port['id']) network = self.core_plugin.get_network(ctx, port['network_id']) mech_context = driver_context.PortContext(self.core_plugin, ctx, port, network, binding, []) try: self.driver.create_port_postcommit(mech_context) except Exception: LOG.warning(_LW("Create port postcommit failed for" " port %s"), port['id'])
def _router_removed(self, router_id, deconfigure=True): """Operations when a router is removed. Get the RouterInfo object corresponding to the router in the service helpers's router_info dict. If deconfigure is set to True, remove this router's configuration from the hosting device. :param router_id: id of the router :param deconfigure: if True, the router's configuration is deleted from the hosting device. :return: None """ ri = self.router_info.get(router_id) if ri is None: LOG.warning( _LW("Info for router %s was not found. " "Skipping router removal"), router_id) return ri.router['gw_port'] = None ri.router[bc.constants.INTERFACE_KEY] = [] ri.router[bc.constants.FLOATINGIP_KEY] = [] try: hd = ri.router['hosting_device'] # We proceed to removing the configuration from the device # only if (a) deconfigure is set to True (default) # (b) the router's hosting device is reachable. if (deconfigure and self._dev_status.is_hosting_device_reachable(hd)): self._process_router(ri) driver = self.driver_manager.get_driver(router_id) driver.router_removed(ri) self.driver_manager.remove_driver(router_id) del self.router_info[router_id] self.removed_routers.discard(router_id) except cfg_exceptions.DriverException: LOG.warning( _LW("Router remove for router_id: %s was incomplete. " "Adding the router to removed_routers list"), router_id) self.removed_routers.add(router_id) # remove this router from updated_routers if it is there. It might # end up there too if exception was thrown earlier inside # `_process_router()` self.updated_routers.discard(router_id) except ncc_errors.SessionCloseError as e: LOG.exception( _LE("ncclient Unexpected session close %s" " while attempting to remove router"), e) if not self._dev_status.is_hosting_device_reachable(hd): LOG.debug("Lost connectivity to Hosting Device %s" % hd['id']) # rely on heartbeat to detect HD state # and schedule resync when the device comes back else: # retry the router removal on the next pass self.removed_routers.add(router_id) LOG.debug( "Interim connectivity lost to hosting device %s, " "enqueuing router %s in removed_routers set" % pp.pformat(hd), router_id)
def _maintain_hosting_device_pool(self, context, template): """Maintains the pool of hosting devices that are based on <template>. Ensures that the number of standby hosting devices (essentially service VMs) is kept at a suitable level so that resource creation is not slowed down by booting of the hosting device. :param context: context for this operation :param template: db object for hosting device template """ #TODO(bobmel): Support HA/load-balanced Neutron servers: #TODO(bobmel): Locking across multiple running Neutron server instances lock = self._get_template_pool_lock(template['id']) acquired = lock.acquire(False) if not acquired: # pool maintenance for this template already ongoing, so abort return try: # Maintain a pool of approximately 'desired_slots_free' available # for allocation. Approximately means that # abs(desired_slots_free-capacity) <= available_slots <= # desired_slots_free+capacity capacity = template['slot_capacity'] if capacity == 0: return desired = template['desired_slots_free'] available = self._get_total_available_slots( context, template['id'], capacity) grow_threshold = abs(desired - capacity) if available <= grow_threshold: num_req = int(math.ceil(grow_threshold / (1.0 * capacity))) num_created = len(self._create_svc_vm_hosting_devices( context, num_req, template)) if num_created < num_req: LOG.warning(_LW('Requested %(requested)d instances based ' 'on hosting device template %(template)s ' 'but could only create %(created)d ' 'instances'), {'requested': num_req, 'template': template['id'], 'created': num_created}) elif available >= desired + capacity: num_req = int( math.floor((available - desired) / (1.0 * capacity))) num_deleted = self._delete_idle_service_vm_hosting_devices( context, num_req, template) if num_deleted < num_req: LOG.warning(_LW('Tried to delete %(requested)d instances ' 'based on hosting device template ' '%(template)s but could only delete ' '%(deleted)d instances'), {'requested': num_req, 'template': template['id'], 'deleted': num_deleted}) finally: lock.release()
def _router_removed(self, router_id, deconfigure=True): """Operations when a router is removed. Get the RouterInfo object corresponding to the router in the service helpers's router_info dict. If deconfigure is set to True, remove this router's configuration from the hosting device. :param router_id: id of the router :param deconfigure: if True, the router's configuration is deleted from the hosting device. :return: None """ ri = self.router_info.get(router_id) if ri is None: LOG.warning(_LW("Info for router %s was not found. " "Skipping router removal"), router_id) return ri.router['gw_port'] = None ri.router[l3_constants.INTERFACE_KEY] = [] ri.router[l3_constants.FLOATINGIP_KEY] = [] try: hd = ri.router['hosting_device'] # We proceed to removing the configuration from the device # only if (a) deconfigure is set to True (default) # (b) the router's hosting device is reachable. if (deconfigure and self._dev_status.is_hosting_device_reachable(hd)): self._process_router(ri) driver = self.driver_manager.get_driver(router_id) driver.router_removed(ri) self.driver_manager.remove_driver(router_id) del self.router_info[router_id] self.removed_routers.discard(router_id) except cfg_exceptions.DriverException: LOG.warning(_LW("Router remove for router_id: %s was incomplete. " "Adding the router to removed_routers list"), router_id) self.removed_routers.add(router_id) # remove this router from updated_routers if it is there. It might # end up there too if exception was thrown earlier inside # `_process_router()` self.updated_routers.discard(router_id) except ncc_errors.SessionCloseError as e: LOG.exception(_LE("ncclient Unexpected session close %s" " while attempting to remove router"), e) if not self._dev_status.is_hosting_device_reachable(hd): LOG.debug("Lost connectivity to Hosting Device %s" % hd['id']) # rely on heartbeat to detect HD state # and schedule resync when the device comes back else: # retry the router removal on the next pass self.removed_routers.add(router_id) LOG.debug("Interim connectivity lost to hosting device %s, " "enqueuing router %s in removed_routers set" % pp.pformat(hd), router_id)
def get_version(): version = 0 try: pnr = _get_client() verstr = pnr.get_version() version = verstr.split()[2] except cpnr_client.CpnrException: LOG.warning(_LW("Failed to obtain CPNR version number")) except StandardError: LOG.warning(_LW("Failed to parse CPNR version number")) LOG.debug("CPNR version: %s", version) return version
def _delete_port_profile(self, handle, port_profile, ucsm_ip): """Deletes Port Profile from UCS Manager.""" port_profile_dest = (const.PORT_PROFILESETDN + const.VNIC_PATH_PREFIX + port_profile) try: handle.StartTransaction() # Find port profile on the UCS Manager p_profile = handle.GetManagedObject( None, self.ucsmsdk.VnicProfile.ClassId(), {self.ucsmsdk.VnicProfile.NAME: port_profile, self.ucsmsdk.VnicProfile.DN: port_profile_dest}) if not p_profile: LOG.warning(_LW('UCS Manager network driver did not find ' 'Port Profile %s to delete.'), port_profile) return handle.RemoveManagedObject(p_profile) handle.CompleteTransaction() except Exception as e: # Raise a Neutron exception. Include a description of # the original exception. raise cexc.UcsmConfigDeleteFailed(config=port_profile, ucsm_ip=ucsm_ip, exc=e)
def _delete_resource_port(self, context, port_id): try: self._core_plugin.delete_port(context, port_id) LOG.debug("Port %s deleted successfully", port_id) except n_exc.PortNotFound: LOG.warning(_LW('Trying to delete port:%s, but port is not found'), port_id)
def release_segment(self, session, segment): vxlan_vni = segment[api.SEGMENTATION_ID] inside = any(lo <= vxlan_vni <= hi for lo, hi in self.tunnel_ranges) with session.begin(subtransactions=True): query = (session.query(nexus_models_v2.NexusVxlanAllocation). filter_by(vxlan_vni=vxlan_vni)) if inside: count = query.update({"allocated": False}) if count: mcast_row = ( session.query(nexus_models_v2.NexusMcastGroup) .filter_by(associated_vni=vxlan_vni).first()) session.delete(mcast_row) LOG.debug("Releasing vxlan tunnel %s to pool", vxlan_vni) else: count = query.delete() if count: LOG.debug("Releasing vxlan tunnel %s outside pool", vxlan_vni) if not count: LOG.warning(_LW("vxlan_vni %s not found"), vxlan_vni)
def _edit_running_config(self, conf_str, snippet): conn = self._get_connection() LOG.info( _LI("Config generated for [%(device)s] %(snip)s is:%(conf)s " "caller:%(caller)s"), {"device": self.hosting_device["id"], "snip": snippet, "conf": conf_str, "caller": self.caller_name()}, ) try: rpc_obj = conn.edit_config(target="running", config=conf_str) self._check_response(rpc_obj, snippet, conf_str=conf_str) except Exception as e: # Here we catch all exceptions caused by REMOVE_/DELETE_ configs # to avoid config agent to get stuck once it hits this condition. # This is needed since the current ncclient version (0.4.2) # generates an exception when an attempt to configure the device # fails by the device (ASR1K router) but it doesn't provide any # details about the error message that the device reported. # With ncclient 0.4.4 version and onwards the exception returns # also the proper error. Hence this code can be changed when the # ncclient version is increased. if re.search(r"REMOVE_|DELETE_", snippet): LOG.warning(_LW("Pass exception for %s"), snippet) pass elif isinstance(e, ncclient.operations.rpc.RPCError): e_tag = e.tag e_type = e.type params = { "snippet": snippet, "type": e_type, "tag": e_tag, "dev_id": self.hosting_device["id"], "ip": self._host_ip, "confstr": conf_str, } raise cfg_exc.CSR1kvConfigException(**params)
def _agent_registration(self): """Register this agent with the server. This method registers the cfg agent with the neutron server so hosting devices can be assigned to it. In case the server is not ready to accept registration (it sends a False) then we retry registration for `MAX_REGISTRATION_ATTEMPTS` with a delay of `REGISTRATION_RETRY_DELAY`. If there is no server response or a failure to register after the required number of attempts, the agent stops itself. """ for attempts in range(MAX_REGISTRATION_ATTEMPTS): context = n_context.get_admin_context_without_session() self.send_agent_report(self.agent_state, context) res = self.devmgr_rpc.register_for_duty(context) if res is True: LOG.info(_LI("[Agent registration] Agent successfully " "registered")) return elif res is False: LOG.warning(_LW("[Agent registration] Neutron server said " "that device manager was not ready. Retrying " "in %0.2f seconds "), REGISTRATION_RETRY_DELAY) time.sleep(REGISTRATION_RETRY_DELAY) elif res is None: LOG.error(_LE("[Agent registration] Neutron server said that " "no device manager was found. Cannot continue. " "Exiting!")) raise SystemExit(_("Cfg Agent exiting")) LOG.error(_LE("[Agent registration] %d unsuccessful registration " "attempts. Exiting!"), MAX_REGISTRATION_ATTEMPTS) raise SystemExit(_("Cfg Agent exiting"))
def teardown_logical_port_connectivity(self, context, port_db, hosting_device_id): """Removes connectivity for a logical port. Unplugs the corresponding data interface from the CSR. """ if port_db is None or port_db.get('id') is None: LOG.warning( _LW("Port id is None! Cannot remove port " "from hosting_device:%s"), hosting_device_id) return hosting_port_id = port_db.hosting_info.hosting_port.id try: self._dev_mgr.svc_vm_mgr.interface_detach(hosting_device_id, hosting_port_id) self._gt_pool.spawn_n(self._cleanup_hosting_port, context, hosting_port_id) LOG.debug("Teardown logicalport completed for port:%s", port_db.id) except Exception as e: LOG.error( _LE("Failed to detach interface corresponding to port:" "%(p_id)s on hosting device:%(hd_id)s due to " "error %(error)s"), { 'p_id': hosting_port_id, 'hd_id': hosting_device_id, 'error': str(e) })
def sanitize_policy_profile_table(self): """Clear policy profiles from stale VSM.""" db_session = db.get_session() hosts = config.get_vsm_hosts() vsm_info = db_session.query( n1kv_models.PolicyProfile.vsm_ip).distinct() if vsm_info is None or hosts is None: return vsm_ips = [vsm_ip[0] for vsm_ip in vsm_info if vsm_ip[0] not in hosts] for vsm_ip in vsm_ips: pprofiles = n1kv_db.get_policy_profiles_by_host(vsm_ip, db_session) for pprofile in pprofiles: # Do not delete profile if it is in use and if it # is the only VSM to have it configured pp_in_use = n1kv_db.policy_profile_in_use( pprofile['id'], db_session) num_vsm_using_pp = db_session.query( n1kv_models.PolicyProfile).filter_by( id=pprofile['id']).count() if (not pp_in_use) or (num_vsm_using_pp > 1): db_session.delete(pprofile) db_session.flush() else: LOG.warning( _LW('Cannot delete policy profile %s ' 'as it is in use.'), pprofile['id'])
def remove_reserved_binding(vlan_id, switch_ip, instance_id, port_id): """Removes reserved binding. This overloads port bindings to support reserved Switch binding used to maintain the state of a switch so it can be viewed by all other neutron processes. There's also the case of a reserved port binding to keep switch information on a given interface. The values of these arguments is as follows: :param vlan_id: 0 :param switch_ip: ip address of the switch :param instance_id: fixed string RESERVED_NEXUS_SWITCH_DEVICE_ID_R1 : or RESERVED_NEXUS_PORT_DEVICE_ID_R1 :param port_id: switch-state of ACTIVE, RESTORE_S1, RESTORE_S2, INACTIVE : port-expected port_id """ if not port_id: LOG.warning(_LW("remove_reserved_binding called with no state")) return LOG.debug("remove_reserved_binding called") session = db.get_session() binding = _lookup_one_nexus_binding(session=session, vlan_id=vlan_id, switch_ip=switch_ip, instance_id=instance_id, port_id=port_id) for bind in binding: session.delete(bind) session.flush() return binding
def _delete_port_profile(self, handle, port_profile, ucsm_ip): """Deletes Port Profile from UCS Manager.""" port_profile_dest = (const.PORT_PROFILESETDN + const.VNIC_PATH_PREFIX + port_profile) try: handle.StartTransaction() # Find port profile on the UCS Manager p_profile = handle.GetManagedObject( None, self.ucsmsdk.VnicProfile.ClassId(), { self.ucsmsdk.VnicProfile.NAME: port_profile, self.ucsmsdk.VnicProfile.DN: port_profile_dest }) if not p_profile: LOG.warning( _LW('UCS Manager network driver did not find ' 'Port Profile %s to delete.'), port_profile) return handle.RemoveManagedObject(p_profile) handle.CompleteTransaction() except Exception as e: # Raise a Neutron exception. Include a description of # the original exception. raise cexc.UcsmConfigDeleteFailed(config=port_profile, ucsm_ip=ucsm_ip, exc=e)
def release_segment(self, context, segment): vxlan_vni = segment[api.SEGMENTATION_ID] inside = any(lo <= vxlan_vni <= hi for lo, hi in self.tunnel_ranges) session = bc.get_tunnel_session(context) with session.begin(subtransactions=True): query = (session.query( nexus_models_v2.NexusVxlanAllocation).filter_by( vxlan_vni=vxlan_vni)) if inside: count = query.update({"allocated": False}) if count: mcast_row = (session.query( nexus_models_v2.NexusMcastGroup).filter_by( associated_vni=vxlan_vni).first()) session.delete(mcast_row) LOG.debug("Releasing vxlan tunnel %s to pool", vxlan_vni) else: count = query.delete() if count: LOG.debug("Releasing vxlan tunnel %s outside pool", vxlan_vni) if not count: LOG.warning(_LW("vxlan_vni %s not found"), vxlan_vni)
def _agent_registration(self): """Register this agent with the server. This method registers the cfg agent with the neutron server so hosting devices can be assigned to it. In case the server is not ready to accept registration (it sends a False) then we retry registration for `MAX_REGISTRATION_ATTEMPTS` with a delay of `REGISTRATION_RETRY_DELAY`. If there is no server response or a failure to register after the required number of attempts, the agent stops itself. """ for attempts in range(MAX_REGISTRATION_ATTEMPTS): context = n_context.get_admin_context_without_session() self.send_agent_report(self.agent_state, context) res = self.devmgr_rpc.register_for_duty(context) if res is True: LOG.info(_LI("[Agent registration] Agent successfully " "registered")) return elif res is False: LOG.warning(_LW("[Agent registration] Neutron server said " "that device manager was not ready. Retrying " "in %0.2f seconds "), REGISTRATION_RETRY_DELAY) time.sleep(REGISTRATION_RETRY_DELAY) elif res is None: LOG.error(_LE("[Agent registration] Neutron server said that " "no device manager was found. Cannot continue. " "Exiting!")) raise SystemExit("Cfg Agent exiting") LOG.error(_LE("[Agent registration] %d unsuccessful registration " "attempts. Exiting!"), MAX_REGISTRATION_ATTEMPTS) raise SystemExit("Cfg Agent exiting")
def delete_hosting_device_resources(self, context, tenant_id, mgmt_port, **kwargs): attempts = 1 port_ids = set(p['id'] for p in kwargs['ports']) while mgmt_port is not None or port_ids: if attempts == DELETION_ATTEMPTS: LOG.warning(_LW('Aborting resource deletion after %d ' 'unsuccessful attempts'), DELETION_ATTEMPTS) return else: if attempts > 1: eventlet.sleep(SECONDS_BETWEEN_DELETION_ATTEMPTS) LOG.info(_LI('Resource deletion attempt %d starting'), attempts) # Remove anything created. if mgmt_port is not None: ml = {mgmt_port['id']} self._delete_resources(context, "management port", self._core_plugin.delete_port, n_exc.PortNotFound, ml) if not ml: mgmt_port = None self._delete_resources(context, "trunk port", self._core_plugin.delete_port, n_exc.PortNotFound, port_ids) attempts += 1 self._safe_delete_t1_network(context, tenant_id) self._safe_delete_t2_network(context, tenant_id) LOG.info(_LI('Resource deletion succeeded'))
def get_nexus_type(self, nexus_host): """Given the nexus host, get the type of Nexus switch. :param nexus_host: IP address of Nexus switch :returns Nexus type """ confstr = snipp.EXEC_GET_INVENTORY_SNIPPET starttime = time.time() response = self._get_config(nexus_host, confstr) self.capture_and_print_timeshot( starttime, "gettype", other=threading.current_thread().ident, switch=nexus_host) if response: nexus_type = re.findall( "\<[mod:]*desc\>\"*Nexus\s*(\d)\d+\s*[0-9A-Z]+\s*" "[cC]hassis\s*\"*\<\/[mod:]*desc\>", response) if len(nexus_type) > 0: LOG.debug("GET call returned Nexus type %d", int(nexus_type[0])) return int(nexus_type[0]) LOG.warning(_LW("GET call failed to return Nexus type")) return -1
def get_nexus_type(self, nexus_host): """Given the nexus host, get the type of Nexus switch. :param nexus_host: IP address of Nexus switch :returns Nexus type """ starttime = time.time() response = self.client.rest_get(snipp.PATH_GET_NEXUS_TYPE, nexus_host) self.capture_and_print_timeshot(starttime, "gettype", switch=nexus_host) if response: try: result = response['imdata'][0]["eqptCh"]['attributes']['descr'] except Exception: result = '' nexus_type = re.findall( "Nexus\s*(\d)\d+\s*[0-9A-Z]+\s*" "[cC]hassis", result) if len(nexus_type) > 0: LOG.debug("GET call returned Nexus type %d", int(nexus_type[0])) return int(nexus_type[0]) LOG.warning(_LW("GET call failed to return Nexus type")) return -1
def sanitize_policy_profile_table(self): """Clear policy profiles from stale VSM.""" db_session = db.get_session() hosts = config.get_vsm_hosts() vsm_info = db_session.query( n1kv_models.PolicyProfile.vsm_ip).distinct() if vsm_info is None or hosts is None: return vsm_ips = [vsm_ip[0] for vsm_ip in vsm_info if vsm_ip[0] not in hosts] for vsm_ip in vsm_ips: pprofiles = n1kv_db.get_policy_profiles_by_host(vsm_ip, db_session) for pprofile in pprofiles: # Do not delete profile if it is in use and if it # is the only VSM to have it configured pp_in_use = n1kv_db.policy_profile_in_use(pprofile['id'], db_session) num_vsm_using_pp = db_session.query( n1kv_models.PolicyProfile).filter_by( id=pprofile['id']).count() if (not pp_in_use) or (num_vsm_using_pp > 1): db_session.delete(pprofile) db_session.flush() else: LOG.warning(_LW('Cannot delete policy profile %s ' 'as it is in use.'), pprofile['id'])
def _sync_base(self): ctx = context.get_admin_context() # Sync Networks # Unroll to avoid unwanted additions during sync networks = [x for x in self.core_plugin.get_networks(ctx)] for network in networks: if constants.APIC_SYNC_NETWORK == network['name']: continue mech_context = driver_context.NetworkContext( self.core_plugin, ctx, network) try: self.driver.create_network_postcommit(mech_context) except aexc.ReservedSynchronizationName as e: LOG.debug(e.message) except Exception as e: LOG.warning(_LW("Create network postcommit failed for " "network %(net_id)s: %(message)s"), net_id=network['id'], message=e.message) # Sync Subnets subnets = [x for x in self.core_plugin.get_subnets(ctx)] for subnet in subnets: mech_context = driver_context.SubnetContext(self.core_plugin, ctx, subnet) try: self.driver.create_subnet_postcommit(mech_context) except Exception as e: LOG.warning(_LW("Create subnet postcommit failed for " "subnet %(sub_id)s: %(message)s"), sub_id=subnet['id'], message=e.message) # Sync Ports (compute/gateway/dhcp) ports = [x for x in self.core_plugin.get_ports(ctx)] for port in ports: binding = l2_db.get_locked_port_and_binding(ctx.session, port['id'])[1] levels = l2_db.get_binding_levels(ctx.session, port['id'], binding.host) network = self.core_plugin.get_network(ctx, port['network_id']) mech_context = driver_context.PortContext(self.core_plugin, ctx, port, network, binding, levels) try: self.driver.create_port_postcommit(mech_context) except Exception as e: LOG.warning(_LW("Create port postcommit failed for " "port %(port_id)s: %(message)s"), port_id=port['id'], message=e.message)
def update_port_postcommit(self, context): """Creates a port profile on UCS Manager. Creates a Port Profile for this VLAN if it does not already exist. """ vlan_id = self._get_vlanid(context) if not vlan_id: LOG.warning(_LW("update_port_postcommit: vlan_id is None.")) return # Checks to perform before UCS Manager can create a Port Profile. # 1. Make sure this host is on a known UCS Manager. host_id = context.current.get(portbindings.HOST_ID) ucsm_ip = self.driver.get_ucsm_ip_for_host(host_id) if not ucsm_ip: LOG.info( _LI('Host_id %s is not controlled by any known UCS ' 'Manager'), str(host_id)) return # 2. Make sure this is a vm_fex_port.(Port profiles are created # only for VM-FEX ports.) profile = context.current.get(portbindings.PROFILE, {}) vnic_type = context.current.get(portbindings.VNIC_TYPE, portbindings.VNIC_NORMAL) if (self.driver.check_vnic_type_and_vendor_info(vnic_type, profile) and self.driver.is_vmfex_port(profile)): # 3. Make sure update_port_precommit added an entry in the DB # for this port profile profile_name = self.ucsm_db.get_port_profile_for_vlan( vlan_id, ucsm_ip) # 4. Make sure that the Port Profile hasn't already been created # on the UCS Manager if profile_name and self.ucsm_db.is_port_profile_created( vlan_id, ucsm_ip): LOG.debug( "update_port_postcommit: Port Profile %s for " "vlan_id %d already exists on UCSM %s. ", profile_name, vlan_id, ucsm_ip) return # All checks are done. Ask the UCS Manager driver to create the # above Port Profile. if self.driver.create_portprofile(profile_name, vlan_id, vnic_type, host_id): # Port profile created on UCS, record that in the DB. self.ucsm_db.set_port_profile_created(vlan_id, profile_name, ucsm_ip) return else: # Enable vlan-id for this regular Neutron virtual port. LOG.debug("update_port_postcommit: Host_id is %s", host_id) self.driver.update_serviceprofile(host_id, vlan_id)
def __exit__(self, type, value, tb): _libc.setns(self.parent_fileno, 0) try: self.target_fd.close() except Exception: LOG.warning(_LW("Failed to close target_fd: %s"), self.target_fd) pass self.parent_fd.close()
def format_for_options(name, value): name = name.strip() if type(value) is str: value = value.strip() LOG.debug('name = %s value %s', name, value) if name not in OPTIONS: LOG.warning(_LW("Unrecognized DHCP options: %s"), name) return code, datatype = OPTIONS[name] try: value = _format_value(datatype, value) except Exception: LOG.warning(_LW("Failed to parse DHCP option: %s"), name) return value = ':'.join(value[i:i + 2] for i in range(0, len(value), 2)) LOG.debug('name = %s value %s', name, value) return value
def format_for_pnr(name, value): name = name.strip() value = value.strip() if name not in OPTIONS: LOG.warning(_LW("Unrecognized DHCP options: %s"), name) return None code, datatype = OPTIONS[name] return {'number': str(code), 'value': value}
def _remove_vrf(self, vrf_name): if vrf_name in self._get_vrfs(): conn = self._get_connection() confstr = snippets.REMOVE_VRF % vrf_name rpc_obj = conn.edit_config(target='running', config=confstr) if self._check_response(rpc_obj, 'REMOVE_VRF'): LOG.info(_LI("VRF %s removed"), vrf_name) else: LOG.warning(_LW("VRF %s not present"), vrf_name)
def _client_network_relay(self, namespace): # Open a socket in the DHCP network namespace try: with self.ns_lock, netns.Namespace(namespace): recv_sock, send_sock, int_addr = self._open_dhcp_int_socket() except Exception: self.int_sock_retries += 1 if self.int_sock_retries >= 2: LOG.exception(_LE('Failed to open dhcp server socket in %s'), namespace) self.int_sock_retries = 0 del self.ns_states[namespace] return self.int_sock_retries = 0 self.ns_states[namespace] = NS_RELAY_RUNNING vpnid = self._convert_ns_to_vpnid(namespace) self.debug_stats.add_network_stats(vpnid) self.int_sockets_by_vpn[vpnid] = send_sock recvbuf = bytearray(RECV_BUFFER_SIZE) LOG.debug('Opened dhcp server socket on ns:%s, addr:%s, vpn:%s', namespace, int_addr, vpnid) # Forward DHCP requests from internal to external networks while self.ns_states[namespace] != NS_RELAY_DELETING: if self.kill_now: break try: recv_sock.settimeout(1) recv_sock.recv_into(recvbuf) pkt = DhcpPacket.parse(recvbuf) options = [(5, int_addr), (11, int_addr), (151, vpnid), (152, '')] for option in options: pkt.set_relay_option(*option) pkt.set_giaddr(self.ext_addr) self.debug_stats.increment_pkts_from_client(vpnid) LOG.debug('Forwarding DHCP request for vpn %s', vpnid) self.ext_sock.send(pkt.data()) self.debug_stats.increment_pkts_to_server(vpnid) except socket.timeout: pass except Exception: LOG.exception(_LE('Failed to forward dhcp to server from %s'), namespace) # Cleanup socket and internal state try: del self.ns_states[namespace] del self.int_sockets_by_vpn[vpnid] self.debug_stats.del_network_stats(vpnid) recv_sock.close() send_sock.close() except Exception: LOG.warning(_LW('Failed to cleanup relay for %s'), namespace) LOG.debug('Client network relay exiting')
def _client_network_relay(self, namespace): # Open a socket in the DNS network namespace try: with self.ns_lock, netns.Namespace(namespace): int_sock, int_addr, int_port = self._open_dns_int_socket() except exceptions.BaseException: LOG.exception(_LE('Failed to open dns server socket in %s'), namespace) del self.ns_states[namespace] return self.ns_states[namespace] = NS_RELAY_RUNNING recvbuf = bytearray(RECV_BUFFER_SIZE) LOG.debug("Opened dns server socket on ns: %s, addr:%s:%i", namespace, int_addr, int_port) # Convert the namespace into a view id viewid = self._convert_namespace_to_viewid(namespace) self.debug_stats.add_network_stats(viewid) # Forward DNS requests from internal to external networks while self.ns_states[namespace] != NS_RELAY_DELETING: if self.kill_now: break try: int_sock.settimeout(1) size, (src_addr, src_port) = int_sock.recvfrom_into(recvbuf) LOG.debug("got dns request from ns: %s", namespace) self.debug_stats.increment_pkts_from_client(viewid) pkt = DnsPacket.parse(recvbuf, size) pkt.set_viewid(viewid) # Store off some state to know where to forward response later msgid = pkt.get_msgid() createtime = time.time() self.request_info_by_msgid[msgid] = [ int_sock, src_addr, src_port, createtime, viewid ] LOG.debug("forwarding request to external nameserver") self.ext_sock.send(pkt.data()) self.debug_stats.increment_pkts_to_server(viewid) except socket.timeout: pass except Exception: LOG.exception( _LE('Failed to forward dns request to server ' 'from %s'), namespace) # Cleanup socket and internal state try: del self.ns_states[namespace] self.debug_stats.del_network_stats(viewid) int_sock.close() except Exception: LOG.warning(_LW('Failed to cleanup dns relay for %s'), namespace) LOG.debug('Client network relay exiting')
def update_port_postcommit(self, context): """Creates a port profile on UCS Manager. Creates a Port Profile for this VLAN if it does not already exist. """ vlan_id = self._get_vlanid(context) if not vlan_id: LOG.warning(_LW("update_port_postcommit: vlan_id is None.")) return # Checks to perform before UCS Manager can create a Port Profile. # 1. Make sure this host is on a known UCS Manager. host_id = context.current.get(portbindings.HOST_ID) ucsm_ip = self.driver.get_ucsm_ip_for_host(host_id) if not ucsm_ip: LOG.info(_LI('Host_id %s is not controlled by any known UCS ' 'Manager'), str(host_id)) return # 2. Make sure this is a vm_fex_port.(Port profiles are created # only for VM-FEX ports.) profile = context.current.get(portbindings.PROFILE, {}) vnic_type = context.current.get(portbindings.VNIC_TYPE, portbindings.VNIC_NORMAL) if (self.driver.check_vnic_type_and_vendor_info(vnic_type, profile) and self.driver.is_vmfex_port(profile)): # 3. Make sure update_port_precommit added an entry in the DB # for this port profile profile_name = self.ucsm_db.get_port_profile_for_vlan(vlan_id, ucsm_ip) # 4. Make sure that the Port Profile hasn't already been created # on the UCS Manager if profile_name and self.ucsm_db.is_port_profile_created(vlan_id, ucsm_ip): LOG.debug("update_port_postcommit: Port Profile %s for " "vlan_id %d already exists on UCSM %s. ", profile_name, vlan_id, ucsm_ip) return # All checks are done. Ask the UCS Manager driver to create the # above Port Profile. if self.driver.create_portprofile(profile_name, vlan_id, vnic_type, host_id): # Port profile created on UCS, record that in the DB. self.ucsm_db.set_port_profile_created(vlan_id, profile_name, ucsm_ip) return else: # Enable vlan-id for this regular Neutron virtual port. LOG.debug("update_port_postcommit: Host_id is %s", host_id) self.driver.update_serviceprofile(host_id, vlan_id)
def schedule_hosting_device(self, plugin, context, hosting_device): """Selects Cisco cfg agent that will configure <hosting_device>.""" active_cfg_agents = plugin.get_cfg_agents(context, active=True) if not active_cfg_agents: LOG.warning(_LW('There are no active Cisco cfg agents')) # No worries, once a Cisco cfg agent is started and # announces itself any "dangling" hosting devices # will be scheduled to it. return return random.choice(list(active_cfg_agents))
def _sync_create_network_profiles(self, combined_res_info, vsm_ip): """Sync network profiles by creating missing ones on VSM.""" (vsm_net_profile_uuids, neutron_net_profiles) = combined_res_info for np_obj in neutron_net_profiles: if np_obj['id'] not in vsm_net_profile_uuids: # create these network profiles on VSM try: self.n1kvclient.create_network_segment_pool(np_obj, vsm_ip) except (n1kv_exc.VSMError, n1kv_exc.VSMConnectionFailed): LOG.warning(_LW('Sync exception: Network profile creation ' 'failed for %s.') % np_obj['id'])
def check_version(cls): """Checks server version against minimum required version.""" super(SimpleCpnrDriver, cls).check_version() model.configure_pnr() cls.recover_networks() ver = model.get_version() if ver < cls.MIN_VERSION: LOG.warning(_LW("CPNR version does not meet minimum requirements, " "expected: %(ever)f, actual: %(rver)f"), {'ever': cls.MIN_VERSION, 'rver': ver}) return ver
def bind_port(self, context): """Binds port to current network segment. Binds port only if the vnic_type is direct or macvtap and the port is from a supported vendor. While binding port set it in ACTIVE state and provide the Port Profile or Vlan Id as part vif_details. """ vnic_type = context.current.get(bc.portbindings.VNIC_TYPE, bc.portbindings.VNIC_NORMAL) LOG.debug( 'Attempting to bind port %(port)s with vnic_type ' '%(vnic_type)s on network %(network)s ', { 'port': context.current['id'], 'vnic_type': vnic_type, 'network': context.network.current['id'] }) profile = context.current.get(bc.portbindings.PROFILE, {}) if not self.driver.check_vnic_type_and_vendor_info(vnic_type, profile): return for segment in context.network.network_segments: if self.check_segment(segment): vlan_id = segment[api.SEGMENTATION_ID] if not vlan_id: LOG.warning(_LW('Cannot bind port: vlan_id is None.')) return LOG.debug("Port binding to Vlan_id: %s", str(vlan_id)) # Check if this is a Cisco VM-FEX port or Intel SR_IOV port if self.driver.is_vmfex_port(profile): profile_name = self.make_profile_name(vlan_id) self.vif_details[ const.VIF_DETAILS_PROFILEID] = profile_name else: self.vif_details[bc.portbindings.VIF_DETAILS_VLAN] = str( vlan_id) context.set_binding(segment[api.ID], self.vif_type, self.vif_details, bc.constants.PORT_STATUS_ACTIVE) return LOG.error( _LE('UCS Mech Driver: Failed binding port ID %(id)s ' 'on any segment of network %(network)s'), { 'id': context.current['id'], 'network': context.network.current['id'] })
def _sync_create_subnets(self, combined_res_info, vsm_ip): """Sync subnets by creating missing ones on VSM.""" (vsm_subnet_uuids, neutron_subnets) = combined_res_info missing_subnets = [subnet for subnet in neutron_subnets if subnet[ 'id'] not in vsm_subnet_uuids] for subnet in missing_subnets: try: self.n1kvclient.create_ip_pool(subnet, vsm_ip=vsm_ip) except (n1kv_exc.VSMError, n1kv_exc.VSMConnectionFailed): LOG.warning(_LW('Sync Exception: Subnet create failed for ' '%s.') % subnet['id'])
def _initialize_service_helpers(self, host): svc_helper_class = self.conf.cfg_agent.routing_svc_helper_class try: self.routing_service_helper = importutils.import_object( svc_helper_class, host, self.conf, self) except ImportError as e: LOG.warning(_LW("Error in loading routing service helper. Class " "specified is %(class)s. Reason:%(reason)s"), {'class': self.conf.cfg_agent.routing_svc_helper_class, 'reason': e}) self.routing_service_helper = None
def _sync_bridge_domains(self, combined_res_info, vsm_ip): """ Sync bridge domains by creating/deleting them on the VSM. :param combined_res_info: a two tuple storing the list of VSM BDs and the list of neutron networks :param vsm_ip: IP of the VSM being synced """ # create missing BDs on VSM (vsm_bds, neutron_vxlan_nets) = combined_res_info need_bd_sync = False for network in neutron_vxlan_nets: bd_name = network['id'] + n1kv_const.BRIDGE_DOMAIN_SUFFIX if bd_name not in vsm_bds: binding = n1kv_db.get_network_binding(network['id']) netp = n1kv_db.get_network_profile_by_uuid(binding.profile_id) network[bc.providernet.SEGMENTATION_ID] = ( binding.segmentation_id) network[bc.providernet.NETWORK_TYPE] = binding.network_type # create this BD on VSM try: self.n1kvclient.create_bridge_domain(network, netp, vsm_ip=vsm_ip) except(n1kv_exc.VSMConnectionFailed, n1kv_exc.VSMError): LOG.warning(_LW('Sync Exception: Bridge domain creation ' 'failed for %s.') % bd_name) need_bd_sync = True # delete extraneous BDs from VSM neutron_bds = {net_id + n1kv_const.BRIDGE_DOMAIN_SUFFIX for net_id in self._get_uuids(n1kv_const.NETWORKS, neutron_vxlan_nets)} for bd in vsm_bds - neutron_bds: try: # delete this BD from VSM self.n1kvclient.delete_bridge_domain(bd, vsm_ip=vsm_ip) except (n1kv_exc.VSMError, n1kv_exc.VSMConnectionFailed): LOG.warning(_LW('Sync Exception: Bridge domain deletion ' 'failed for %s.') % bd) need_bd_sync = True self.sync_bds[vsm_ip] = need_bd_sync
def update_nexusport_binding(port_id, new_vlan_id): """Updates nexusport binding.""" if not new_vlan_id: LOG.warning(_LW("update_nexusport_binding called with no vlan")) return LOG.debug("update_nexusport_binding called") session = db.get_session() binding = _lookup_one_nexus_binding(session=session, port_id=port_id) binding.vlan_id = new_vlan_id session.merge(binding) session.flush() return binding
def _sync_router(self): ctx = context.get_admin_context() # Sync Router Interfaces filters = {'device_owner': [n_constants.DEVICE_OWNER_ROUTER_INTF]} for interface in self.core_plugin.get_ports(ctx, filters=filters): try: self.driver.add_router_interface_postcommit( ctx, interface['device_id'], {'port_id': interface['id']}) except Exception: LOG.warning(_LW("Add interface postcommit failed for " "port %s"), interface['id'])
def f_retry(*args, **kwargs): mtries, mdelay = tries, delay while mtries > 1: try: return f(*args, **kwargs) except ExceptionToCheck as e: LOG.warning(_LW("%(ex)s, Retrying in %(delt)d seconds.."), {'ex': str(e), 'delt': mdelay}) time.sleep(mdelay) mtries -= 1 mdelay *= backoff return f(*args, **kwargs)
def _sync_delete_subnets(self, combined_res_info, vsm_ip): """Sync subnets by deleting extraneous ones from VSM.""" (vsm_subnet_uuids, neutron_subnets) = combined_res_info neutron_subnet_uuids = set(self._get_uuids(n1kv_const.SUBNETS, neutron_subnets)) for sub_id in vsm_subnet_uuids - neutron_subnet_uuids: # delete these subnets from the VSM try: self.n1kvclient.delete_ip_pool(sub_id, vsm_ip=vsm_ip) except (n1kv_exc.VSMError, n1kv_exc.VSMConnectionFailed): LOG.warning(_LW('Sync Exception: Subnet delete failed for ' '%s.') % sub_id)
def do_sync(self): """ Entry point function for VSM-Neutron sync. Triggered on an eventlet from the N1kv mechanism driver. """ while True: try: vsm_hosts = config.get_vsm_hosts() for vsm_ip in vsm_hosts: try: self._sync_vsm(vsm_ip=vsm_ip) except n1kv_exc.VSMConnectionFailed: LOG.warning(_LW('Sync thread exception: VSM ' '%s unreachable.') % vsm_ip) except n1kv_exc.VSMError: LOG.warning(_LW('Sync thread exception: Internal ' 'server error on VSM %s.') % vsm_ip) except Exception as e: LOG.warning(_LW('Sync thread exception: %s') % e.message) eventlet.sleep(seconds=self.sync_sleep_duration)
def _sync_bridge_domains(self, combined_res_info, vsm_ip): """ Sync bridge domains by creating/deleting them on the VSM. :param combined_res_info: a two tuple storing the list of VSM BDs and the list of neutron networks :param vsm_ip: IP of the VSM being synced """ # create missing BDs on VSM (vsm_bds, neutron_vxlan_nets) = combined_res_info need_bd_sync = False for network in neutron_vxlan_nets: bd_name = network['id'] + n1kv_const.BRIDGE_DOMAIN_SUFFIX if bd_name not in vsm_bds: binding = n1kv_db.get_network_binding(network['id']) netp = n1kv_db.get_network_profile_by_uuid(binding.profile_id) network[providernet.SEGMENTATION_ID] = binding.segmentation_id network[providernet.NETWORK_TYPE] = binding.network_type # create this BD on VSM try: self.n1kvclient.create_bridge_domain(network, netp, vsm_ip=vsm_ip) except(n1kv_exc.VSMConnectionFailed, n1kv_exc.VSMError): LOG.warning(_LW('Sync Exception: Bridge domain creation ' 'failed for %s.') % bd_name) need_bd_sync = True # delete extraneous BDs from VSM neutron_bds = {net_id + n1kv_const.BRIDGE_DOMAIN_SUFFIX for net_id in self._get_uuids(n1kv_const.NETWORKS, neutron_vxlan_nets)} for bd in vsm_bds - neutron_bds: try: # delete this BD from VSM self.n1kvclient.delete_bridge_domain(bd, vsm_ip=vsm_ip) except (n1kv_exc.VSMError, n1kv_exc.VSMConnectionFailed): LOG.warning(_LW('Sync Exception: Bridge domain deletion ' 'failed for %s.') % bd) need_bd_sync = True self.sync_bds[vsm_ip] = need_bd_sync
def _client_network_relay(self, namespace): # Open a socket in the DHCP network namespace try: with self.ns_lock, netns.Namespace(namespace): recv_sock, send_sock, int_addr = self._open_dhcp_int_socket() except Exception: self.int_sock_retries += 1 if self.int_sock_retries >= 2: LOG.exception(_LE('Failed to open dhcp server socket in %s'), namespace) self.int_sock_retries = 0 del self.ns_states[namespace] return self.int_sock_retries = 0 self.ns_states[namespace] = NS_RELAY_RUNNING vpnid = self._convert_ns_to_vpnid(namespace) self.debug_stats.add_network_stats(vpnid) self.int_sockets_by_vpn[vpnid] = send_sock recvbuf = bytearray(RECV_BUFFER_SIZE) LOG.debug('Opened dhcp server socket on ns:%s, addr:%s, vpn:%s', namespace, int_addr, vpnid) # Forward DHCP requests from internal to external networks while self.ns_states[namespace] != NS_RELAY_DELETING: try: recv_sock.recv_into(recvbuf) pkt = DhcpPacket.parse(recvbuf) options = [(5, int_addr), (11, int_addr), (151, vpnid), (152, '')] for option in options: pkt.set_relay_option(*option) pkt.set_giaddr(self.ext_addr) self.debug_stats.increment_pkts_from_client(vpnid) LOG.debug('Forwarding DHCP request for vpn %s', vpnid) self.ext_sock.send(pkt.data()) self.debug_stats.increment_pkts_to_server(vpnid) except Exception: LOG.exception(_LE('Failed to forward dhcp to server from %s'), namespace) # Cleanup socket and internal state try: del self.ns_states[namespace] del self.int_sockets_by_vpn[vpnid] self.debug_stats.del_network_stats(vpnid) recv_sock.close() send_sock.close() except Exception: LOG.warning(_LW('Failed to cleanup relay for %s'), namespace)