def _delete_port_group(self, pg_ref, name): remove_used_pg_try = 0 while True: try: pg_delete_task = self.connection.invoke_api( self.connection.vim, 'Destroy_Task', pg_ref) self.connection.wait_for_task(pg_delete_task) LOG.info(_LI('Network %(name)s deleted.'), {'name': name}) break except vmware_exceptions.VimException as e: if dvs_const.RESOURCE_IN_USE in e.message: remove_used_pg_try += 1 if remove_used_pg_try > 3: LOG.info(_LI('Network %(name)s was not deleted. Active' ' ports were found'), {'name': name}) break else: sleep(0.2) else: raise exceptions.wrap_wmvare_vim_exception(e) except vmware_exceptions.VMwareDriverException as e: if dvs_const.DELETED_TEXT in e.message: sleep(0.1) else: raise
def delete_port_group(session, dvs_name, pg_name): """Deletes a port group from DVS.""" port_group_mor = get_portgroup_mor_by_name(session, dvs_name, pg_name) if port_group_mor: try: destroy_task = session._call_method(session._get_vim(), "Destroy_Task", port_group_mor) session.wait_for_task(destroy_task) LOG.info( _LI("Successfully deleted portgroup %(pg)s from " "dvs %(dvs)s"), { 'pg': pg_name, 'dvs': dvs_name }) except Exception as e: LOG.exception( _LE("Failed to delete portgroup %(pg)s from " "dvs %(dvs)s .Cause : %(err)s"), { 'pg': pg_name, 'dvs': dvs_name, 'err': e }) raise error_util.RunTimeError("Failed to delete portgroup %s " "on dvs %s on vCenter.Cause" " : %s" % (pg_name, dvs_name, e)) else: LOG.info(_LI("portgroup %(pg)s not present on dvs %(dvs)s"), { 'pg': pg_name, 'dvs': dvs_name })
def _remove_from_sgid_device_map(self, deleted_dev, deleted_dev_group, ip, port_id, remove_list): for group, devices_dict in \ six.iteritems(self.sgid_devices_dict): for ip, ports_list in six.iteritems(devices_dict): if port_id in ports_list: deleted_dev = ip deleted_dev_group = group break else: ip = None if ip is not None: if len(ports_list) == 1: value = devices_dict.pop(ip, None) if value is None: LOG.info(_LI("KeyError for %s"), ip) LOG.info(_LI("KeyError devices_dict %(ddict)s," "%(deleted_dev)s"), {'ddict': devices_dict, 'deleted_dev': deleted_dev}) else: ports_list.remove(port_id) if len(devices_dict) == 0: remove_list.append(group) dev_groups = self.device_sgids_dict.get(port_id) if dev_groups is not None: if deleted_dev_group in dev_groups: dev_groups.remove(deleted_dev_group) if len(dev_groups) == 0: self.device_sgids_dict.pop(port_id) if self.pending_rules_dict.get(port_id) is not None: self.pending_rules_dict.pop(port_id) LOG.debug("Deleted device ip and group are: %s, %s", deleted_dev, deleted_dev_group) return deleted_dev, deleted_dev_group
def release_local_vlan(net_info): session = db_api.get_session() with session.begin(subtransactions=True): res_keys = ['vcenter_id', 'cluster_id', 'network_id'] res = dict((k, net_info[k]) for k in res_keys) try: query = session.query(models.ClusterVNIAllocations) allocation = (query.filter( models.ClusterVNIAllocations.vcenter_id == res['vcenter_id'], models.ClusterVNIAllocations.cluster_id == res['cluster_id'], models.ClusterVNIAllocations.network_id == res['network_id']).with_lockmode('update').one()) if allocation.network_port_count == 0: allocation.update({ 'network_id': None, 'allocated': False, 'network_port_count': 0 }) LOG.info(_LI("Released lvid for network: %s."), res) else: LOG.info( _LI("Unable to release local vlan for network_id %s " "because ports are available on network."), res['network_id']) except sa_exc.NoResultFound: # Nothing to do, may be another controller cleared the record # We will just log and return. LOG.error( _LE("Network %(network)s is already de-allocated for " "cluster %(cluster)s."), { 'network': net_info['network_id'], 'cluster': net_info['cluster_id'] })
def _delete_port_group(self, pg_ref, name): remove_used_pg_try = 0 while True: try: pg_delete_task = self.connection.invoke_api( self.connection.vim, 'Destroy_Task', pg_ref) self.connection.wait_for_task(pg_delete_task) LOG.info(_LI('Network %(name)s deleted.') % {'name': name}) break except vmware_exceptions.VimException as e: if dvs_const.RESOURCE_IN_USE in e.message: remove_used_pg_try += 1 if remove_used_pg_try > 3: LOG.info( _LI('Network %(name)s was not deleted. Active' ' ports were found') % {'name': name}) break else: sleep(0.2) else: raise exceptions.wrap_wmvare_vim_exception(e) except vmware_exceptions.VMwareDriverException as e: if dvs_const.DELETED_TEXT in e.message: sleep(0.1) else: raise
def release_local_vlan(net_info): session = db_api.get_session() with session.begin(subtransactions=True): res_keys = ['vcenter_id', 'cluster_id', 'network_id'] res = dict((k, net_info[k]) for k in res_keys) try: query = session.query(models.ClusterVNIAllocations) allocation = (query.filter( models.ClusterVNIAllocations.vcenter_id == res['vcenter_id'], models.ClusterVNIAllocations.cluster_id == res['cluster_id'], models.ClusterVNIAllocations.network_id == res['network_id'] ).with_lockmode('update').one()) if allocation.network_port_count == 0: allocation.update({'network_id': None, 'allocated': False, 'network_port_count': 0}) LOG.info(_LI("Released lvid for network: %s."), res) else: LOG.info(_LI("Unable to release local vlan for network_id %s " "because ports are available on network."), res['network_id']) except sa_exc.NoResultFound: # Nothing to do, may be another controller cleared the record # We will just log and return. LOG.error(_LE("Network %(network)s is already de-allocated for " "cluster %(cluster)s."), {'network': net_info['network_id'], 'cluster': net_info['cluster_id']})
def create_port_group(session, dvs_name, pg_name, net_id, vlan_id): """Creates a Portgroup on DVS with a vlan id.""" port_group_mor = get_portgroup_mor_by_net_id(session, dvs_name, pg_name, net_id) if port_group_mor: port_group_config = session._call_method( vim_util, "get_dynamic_property", port_group_mor, "DistributedVirtualPortgroup", "config") if vlan_id == port_group_config.defaultPortConfig.vlan.vlanId: LOG.debug("Portgroup %(pg)s with vlan id %(vid)s already exists", { 'pg': pg_name, 'vid': vlan_id }) return else: LOG.info( _LI("Portgroup %(pg)s already exists " "but with vlan id %(vid)s"), { 'pg': pg_name, 'vid': port_group_config.defaultPortConfig.vlan.vlanId }) raise error_util.RunTimeError( "Inconsistent vlan id for portgroup" " %s", pg_name) else: client_factory = session._get_vim().client.factory add_prt_grp_spec = _get_add_vswitch_port_group_spec( client_factory, pg_name, vlan_id) blocked = client_factory.create('ns0:BoolPolicy') blocked.value = False blocked.inherited = False add_prt_grp_spec.defaultPortConfig.blocked = blocked dvs_mor = get_dvs_mor_by_name(session, dvs_name) try: task_ref = session._call_method(session._get_vim(), "AddDVPortgroup_Task", dvs_mor, spec=add_prt_grp_spec) session.wait_for_task(task_ref) LOG.info( _LI("Successfully created portgroup " "%(pg)s with vlan id %(vid)s"), { 'pg': pg_name, 'vid': vlan_id }) except Exception as e: LOG.exception( _LE("Failed to create portgroup %(pg)s with " "vlan id %(vid)s on vCenter. Cause : %(err)s"), { 'pg': pg_name, 'vid': vlan_id, 'err': e }) raise error_util.RunTimeError("Failed to create portgroup %s " "with vlan id %s on vCenter.Cause" " : %s" % (pg_name, vlan_id, e))
def refresh_firewall(self, device_ids=None): LOG.info(_LI("Refresh firewall rules")) if not device_ids: device_ids = self.firewall.ports.keys() if not device_ids: LOG.info(_LI("No ports here to refresh firewall")) return devices = self.plugin_rpc.security_group_rules_for_devices( self.context, device_ids) self.firewall.update_port_filter(devices.values())
def monitor_events(self): try: LOG.info(_LI("Starting monitoring for vCenter updates")) version = "" self.state = constants.DRIVER_RUNNING while self.state in (constants.DRIVER_RUNNING): try: LOG.debug("Waiting for vCenter updates...") try: updateSet = self.session._call_method( vim_util, "wait_for_updates_ex", version) if self.state != constants.DRIVER_RUNNING: LOG.error(_LE("Driver is not in running state.")) break except error_util.SocketTimeoutException: # Ignore timeout. LOG.warning( _LW("Ignoring socket timeouts while " "monitoring for vCenter updates.")) continue if updateSet: version = updateSet.version events = self._process_update_set(updateSet) LOG.debug("Sending events : %s.", events) self.dispatch_events(events) except exceptions.VimFaultException as e: # InvalidCollectorVersionFault happens # on session re-connect. # Re-initialize WaitForUpdatesEx. if "InvalidCollectorVersion" in e.fault_list: LOG.debug("InvalidCollectorVersion - " "Re-initializing vCenter updates " "monitoring.") version = "" for cluster_mor in self.clusters_by_id.values(): pfo = self._register_cluster_for_updates( cluster_mor) clu_id = cluster_mor.value self.cluster_id_to_filter[clu_id] = pfo continue LOG.exception( _LE("VimFaultException while processing " "update set %s."), e) except Exception: LOG.exception( _LE("Exception while processing update" " set.")) time.sleep(0) LOG.info(_LI("Stopped monitoring for vCenter updates.")) except Exception: LOG.exception(_LE("Monitoring for vCenter updates failed."))
def set_node_state(self, is_up): if is_up != self.node_up: self.node_up = is_up if is_up: LOG.info(_LI("Making node up.")) self._initialize_managers() self._start_managers() else: self.state = constants.AGENT_INITIALIZING self._stop_managers() else: LOG.info(_LI("Ignoring node update as agent " "is already %s."), "ACTIVE" if self.node_up else "DOWN")
def monitor_events(self): try: LOG.info(_LI("Starting monitoring for vCenter updates")) version = "" self.state = constants.DRIVER_RUNNING while self.state in (constants.DRIVER_RUNNING): try: LOG.debug("Waiting for vCenter updates...") try: updateSet = self.session._call_method( vim_util, "wait_for_updates_ex", version) if self.state != constants.DRIVER_RUNNING: LOG.error(_LE("Driver is not in running state.")) break except error_util.SocketTimeoutException: # Ignore timeout. LOG.warning(_LW("Ignoring socket timeouts while " "monitoring for vCenter updates.")) continue if updateSet: version = updateSet.version events = self._process_update_set(updateSet) LOG.debug("Sending events : %s.", events) self.dispatch_events(events) except exceptions.VimFaultException as e: # InvalidCollectorVersionFault happens # on session re-connect. # Re-initialize WaitForUpdatesEx. if "InvalidCollectorVersion" in e.fault_list: LOG.debug("InvalidCollectorVersion - " "Re-initializing vCenter updates " "monitoring.") version = "" for cluster_mor in self.clusters_by_id.values(): pfo = self._register_cluster_for_updates( cluster_mor) clu_id = cluster_mor.value self.cluster_id_to_filter[clu_id] = pfo continue LOG.exception(_LE("VimFaultException while processing " "update set %s."), e) except Exception: LOG.exception(_LE("Exception while processing update" " set.")) time.sleep(0) LOG.info(_LI("Stopped monitoring for vCenter updates.")) except Exception: LOG.exception(_LE("Monitoring for vCenter updates failed."))
def check_to_reclaim_local_vlan(port_info): lvid = -1 session = db_api.get_session() with session.begin(subtransactions=True): res_keys = ['vcenter_id', 'cluster_id', 'network_id'] res = dict((k, port_info[k]) for k in res_keys) try: query = session.query(models.ClusterVNIAllocations) allocation = (query.filter( models.ClusterVNIAllocations.vcenter_id == res['vcenter_id'], models.ClusterVNIAllocations.cluster_id == res['cluster_id'], models.ClusterVNIAllocations.network_id == res['network_id'] ).with_lockmode('update').one()) count = allocation.network_port_count if count >= 1: count -= 1 allocation.update({'network_port_count': count}) LOG.debug("Decremented the allocated port count for network " "%s.", res) if count == 0: lvid = allocation.lvid LOG.info(_LI("lvid can be released for network: %s."), res) except sa_exc.NoResultFound: # Nothing to do, may be another controller cleared the record # We will just log and return back status as False. LOG.debug("Network %(network)s is already de-allocated for " "cluster %(cluster)s.", {'network': port_info['network_id'], 'cluster': port_info['cluster_id']}) return lvid
def create_network_precommit(self, context): if CONF.DVS.precreate_networks and self._check_net_type(context): LOG.info(_LI('Precreate network cast')) self.dvs_notifier.create_network_cast(context.current, context.network_segments[0]) # need to wait for agents. Cast message sleep(2)
def rpc_loop(self, polling_manager=None): if not polling_manager: polling_manager = polling.get_polling_manager( minimize_polling=False) while self.run_daemon_loop: start = time.time() port_stats = {'regular': {'added': 0, 'updated': 0, 'removed': 0}} if self.fullsync: LOG.info(_LI("Agent out of sync with plugin!")) connected_ports = self._get_dvs_ports() self.added_ports = connected_ports - self.known_ports if cfg.CONF.DVS.clean_on_restart: self._clean_up_vsphere_extra_resources(connected_ports) self.fullsync = False polling_manager.force_polling() if self._agent_has_updates(polling_manager): LOG.debug("Agent rpc_loop - update") self.process_ports() port_stats['regular']['added'] = len(self.added_ports) port_stats['regular']['updated'] = len(self.updated_ports) port_stats['regular']['removed'] = len(self.deleted_ports) polling_manager.polling_completed() self.loop_count_and_wait(start)
def __init__(self, vsphere_hostname, vsphere_login, vsphere_password, bridge_mappings, polling_interval, quitting_rpc_timeout=None): super(DVSAgent, self).__init__() self.agent_state = { 'binary': 'neutron-dvs-agent', 'host': cfg.CONF.host, 'topic': n_const.L2_AGENT_TOPIC, 'configurations': { 'bridge_mappings': bridge_mappings, 'vsphere_hostname': vsphere_hostname }, 'agent_type': 'DVS agent', 'start_flag': True } report_interval = cfg.CONF.DVS_AGENT.report_interval self.polling_interval = polling_interval # Security group agent support self.context = context.get_admin_context_without_session() self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN) self.sg_agent = dvs_rpc.DVSSecurityGroupRpc( self.context, self.sg_plugin_rpc, defer_refresh_firewall=True) self.setup_rpc() self.run_daemon_loop = True self.iter_num = 0 self.quitting_rpc_timeout = quitting_rpc_timeout self.network_map = dvs_util.create_network_map_from_config( cfg.CONF.ML2_VMWARE, pg_cache=True) uplink_map = dvs_util.create_uplink_map_from_config( cfg.CONF.ML2_VMWARE, self.network_map) for phys, dvs in self.network_map.iteritems(): if phys in uplink_map: dvs.load_uplinks(phys, uplink_map[phys]) self.updated_ports = set() self.deleted_ports = set() self.known_ports = set() self.added_ports = set() self.booked_ports = set() LOG.info(_LI("Agent out of sync with plugin!")) connected_ports = self._get_dvs_ports() self.added_ports = connected_ports if cfg.CONF.DVS.clean_on_restart: self._clean_up_vsphere_extra_resources(connected_ports) self.fullsync = False # The initialization is complete; we can start receiving messages self.connection.consume_in_threads() if report_interval: heartbeat = loopingcall.FixedIntervalLoopingCall( self._report_state) heartbeat.start(interval=report_interval)
def validate_cluster_switch_mapping(self, cluster_path, switch): """Validate the cluster_switch_mapping.""" if not cluster_path or not switch: return False, None cluster_mor = resource_util.get_cluster_mor_by_path( self.session, cluster_path) if not cluster_mor: LOG.error(_LE("Invalid cluster: %s."), cluster_path) return False, None else: if not self.is_valid_switch(cluster_mor, switch): LOG.error( _LE("Invalid Switch: %(switch)s for cluster: " "%(path)s."), { 'switch': switch, 'path': cluster_path }) return False, None else: LOG.info( _LI("Cluster: %(path)s and switch: %(sw)s are " "validated."), { 'path': cluster_path, 'sw': switch }) return True, cluster_mor
def _get_or_create_pg(self, pg_name, network, segment): try: return self._get_pg_by_name(pg_name) except exceptions.PortGroupNotFound: LOG.info(_LI('Network %(name)s is not present in vcenter. ' 'Perform network creation'), {'name': pg_name}) return self.create_network(network, segment)
def delete_network_postcommit(self, context): network = context.current segments = context.network_segments vxlan_segments = [] if segments: for segment in segments: if segment[api.NETWORK_TYPE] in self.supported_network_types: vxlan_segments.append(segment) if not vxlan_segments: return try: stale_entries = ovsvapp_db.get_stale_local_vlans_for_network( network['id']) if stale_entries: for (vcenter, cluster, lvid) in stale_entries: network_info = {'vcenter_id': vcenter, 'cluster_id': cluster, 'lvid': lvid, 'network_id': network['id']} if len(vxlan_segments) == 1: seg_id = vxlan_segments[0][api.SEGMENTATION_ID] net_type = vxlan_segments[0][api.NETWORK_TYPE] network_info.update({'segmentation_id': seg_id, 'network_type': net_type}) LOG.debug("Spawning thread for releasing network " "VNI allocations for %s.", network_info) self.threadpool.spawn_n(self._notify_agent, network_info) LOG.info(_LI("Spawned a thread for releasing network " "vni allocations for network: %s."), network_info) except Exception: LOG.exception(_LE("Failed checking stale local vlan allocations."))
def create_network(self, network, segment): name = self._get_net_name(network) blocked = not network['admin_state_up'] if network['provider:physical_network'] in self.uplink_map: uplinks = self.uplink_map[network['provider:physical_network']] else: uplinks = None try: pg_spec = self._build_pg_create_spec( name, segment['segmentation_id'], blocked, uplinks) pg_create_task = self.connection.invoke_api( self.connection.vim, 'CreateDVPortgroup_Task', self._dvs, spec=pg_spec) result = self.connection.wait_for_task(pg_create_task) except vmware_exceptions.VimException as e: raise exceptions.wrap_wmvare_vim_exception(e) else: pg = result.result LOG.info(_LI('Network %(name)s created \n%(pg_ref)s'), {'name': name, 'pg_ref': pg}) return pg
def clean_port_filters(self, ports, remove_port=False): """Method to remove OVS rules for an existing VM port.""" LOG.debug("OVSF Cleaning filters for %s ports.", len(ports)) if not ports: return with self.sg_br.deferred() as deferred_sec_br: for port_id in ports: try: if not self.filtered_ports.get(port_id): LOG.debug( "Attempted to remove port filter " "which is not in filtered %s.", port_id) continue if not remove_port: self._remove_all_flows(deferred_sec_br, port_id) else: self._remove_all_flows(deferred_sec_br, port_id, True) if self.provider_port_cache.__contains__(port_id): self.provider_port_cache.remove(port_id) else: LOG.info( _LI("KeyError:Remove requested for " "port_id not present: %s"), self.provider_port_cache) self.filtered_ports.pop(port_id, None) except Exception: LOG.exception(_LE("Unable to delete flows for" " %s."), port_id)
def get_ovsvapp_mitigated_clusters(self, context, filters=None, fields=None): _admin_check(context, 'GET') db_filters = dict() if filters: for filter_entry in filters: db_filters[filter_entry] = filters[filter_entry] LOG.info(_LI("Retrieving mitigated information of all clusters.")) mitigated_clusters = list() try: all_entries = self._get_collection_query(context, models.OVSvAppClusters, filters=db_filters).all() except sa_exc.NoResultFound: raise exc.InvalidInput(error_message='Cannot retreive mitigated ' 'information.') for entry in all_entries: mitigated_cluster = dict() mitigated_cluster['vcenter_id'] = entry.vcenter_id mitigated_cluster['cluster_id'] = entry.cluster_id mitigated_cluster['being_mitigated'] = entry.being_mitigated mitigated_cluster['threshold_reached'] = entry.threshold_reached mitigated_clusters.append(mitigated_cluster) return mitigated_clusters
def update_network(self, network, original=None): original_name = self._get_net_name(original) if original else None current_name = self._get_net_name(network) blocked = not network['admin_state_up'] try: pg_ref = self._get_pg_by_name(original_name or current_name) pg_config_info = self._get_config_by_ref(pg_ref) if (pg_config_info.defaultPortConfig.blocked.value != blocked or (original_name and original_name != current_name)): # we upgrade only defaultPortConfig, because it is inherited # by all ports in PortGroup, unless they are explicit # overwritten on specific port. pg_spec = self._build_pg_update_spec( pg_config_info.configVersion, blocked=blocked) pg_spec.name = current_name pg_update_task = self.connection.invoke_api( self.connection.vim, 'ReconfigureDVPortgroup_Task', pg_ref, spec=pg_spec) self.connection.wait_for_task(pg_update_task) LOG.info(_LI('Network %(name)s updated'), {'name': current_name}) except vmware_exceptions.VimException as e: raise exceptions.wrap_wmvare_vim_exception(e)
def book_port(self, network, port_name, segment): try: net_name = self._get_net_name(self.dvs_name, network) pg = self._get_or_create_pg(net_name, network, segment) while True: try: port_info = self._lookup_unbound_port(pg) break except exceptions.UnboundPortNotFound: try: self._increase_ports_on_portgroup(pg) except (vmware_exceptions.VMwareDriverException, exceptions.VMWareDVSException) as e: if dvs_const.CONCURRENT_MODIFICATION_TEXT in e.message: LOG.info(_LI('Concurent modification on ' 'increase port group.')) continue builder = SpecBuilder(self.connection.vim.client.factory) port_settings = builder.port_setting() port_settings.blocked = builder.blocked(False) update_spec = builder.port_config_spec( port_info.config.configVersion, port_settings, name=port_name) update_spec.key = port_info.key update_task = self.connection.invoke_api( self.connection.vim, 'ReconfigureDVPort_Task', self._dvs, port=[update_spec]) self.connection.wait_for_task(update_task) return port_info.key except vmware_exceptions.VimException as e: raise exceptions.wrap_wmvare_vim_exception(e)
def _notify_agent(self, network_info): host = None cluster_id = network_info['cluster_id'] if 'host' in network_info: host = network_info['host'] else: agent = self._get_ovsvapp_agent_from_cluster(self.context, cluster_id) LOG.debug("Agent chosen for notification: %s.", agent) if agent and 'host' in agent: host = agent['host'] else: LOG.error(_LE("Failed to find OVSvApp Agent with host " "%(host)s while releasing network allocations " "for %(cluster)s in vCenter %(vcenter)s."), {'host': host, 'vcenter': network_info['vcenter_id'], 'cluster': cluster_id}) return try: LOG.info(_LI("Initiating device_delete RPC for network " "%(network)s to OVSvApp agent on host %(host)s."), {'host': host, 'network': network_info}) self.notifier.device_delete(self.context, network_info, host, cluster_id) except Exception: LOG.exception(_LE("Failed to notify agent to delete port group."))
def get_ovsvapp_mitigated_cluster(self, context, vcenter_id, fields=None): _admin_check(context, 'GET') mitigated_info = vcenter_id.split(':') vcenter_id = mitigated_info[0] cluster_id = mitigated_info[1].replace('|', '/') LOG.info(_LI("Retrieving mitigated information for vcenter_id" " %s."), vcenter_id) mitigated_cluster = dict() try: query = context.session.query(models.OVSvAppClusters) cluster_row = (query.filter( models.OVSvAppClusters.vcenter_id == vcenter_id, models.OVSvAppClusters.cluster_id == cluster_id).one()) except sa_exc.NoResultFound: _msg = ("No entry found for specified vCenter %(vcenter_id)s " "cluster %(cluster_id)s") % { 'vcenter_id': vcenter_id, 'cluster_id': cluster_id } raise exc.InvalidInput(error_message=_msg) mitigated_cluster['vcenter_id'] = cluster_row.vcenter_id mitigated_cluster['cluster_id'] = cluster_row.cluster_id mitigated_cluster['being_mitigated'] = cluster_row.being_mitigated mitigated_cluster['threshold_reached'] = cluster_row.threshold_reached return mitigated_cluster
def clean_port_filters(self, ports, remove_port=False): """Method to remove OVS rules for an existing VM port.""" LOG.debug("OVSF Cleaning filters for %s ports.", len(ports)) if not ports: return with self.sg_br.deferred() as deferred_sec_br: for port_id in ports: try: if not self.filtered_ports.get(port_id): LOG.debug("Attempted to remove port filter " "which is not in filtered %s.", port_id) continue if not remove_port: self._remove_all_flows(deferred_sec_br, port_id) else: self._remove_all_flows(deferred_sec_br, port_id, True) if self.provider_port_cache.__contains__(port_id): self.provider_port_cache.remove(port_id) else: LOG.info(_LI("KeyError:Remove requested for " "port_id not present: %s"), self.provider_port_cache) self.filtered_ports.pop(port_id, None) except Exception: LOG.exception(_LE("Unable to delete flows for" " %s."), port_id)
def _process_port_filter(self, ports): LOG.info(_LI("Set security group rules for ports %s"), [p['id'] for p in ports]) ports_for_update = [] for port in ports: port_device = port['device'] stored_port_key = self.dvs_ports.get(port_device, {}).\ get('binding:vif_details', {}).get('dvs_port_key') port_key = port.get('binding:vif_details', {}).get('dvs_port_key') if port_key and port_key != stored_port_key: port_dvs = self._get_port_dvs(port) if port_dvs: try: port_info = port_dvs.get_port_info(port) if port['id'] == port_info.config.name: self.dvs_ports[port_device] = port ports_for_update.append(port) else: self.dvs_ports.pop(port_device, None) except exceptions.PortNotFound: self.dvs_ports.pop(port_device, None) else: self.dvs_ports.pop(port_device, None) else: self.dvs_ports[port_device] = port ports_for_update.append(port) self._apply_sg_rules_for_port(ports_for_update)
def check_to_reclaim_local_vlan(port_info): lvid = -1 session = db_api.get_session() with session.begin(subtransactions=True): res_keys = ['vcenter_id', 'cluster_id', 'network_id'] res = dict((k, port_info[k]) for k in res_keys) try: query = session.query(models.ClusterVNIAllocations) allocation = (query.filter( models.ClusterVNIAllocations.vcenter_id == res['vcenter_id'], models.ClusterVNIAllocations.cluster_id == res['cluster_id'], models.ClusterVNIAllocations.network_id == res['network_id']).with_lockmode('update').one()) count = allocation.network_port_count if count >= 1: count -= 1 allocation.update({'network_port_count': count}) LOG.debug( "Decremented the allocated port count for network " "%s.", res) if count == 0: lvid = allocation.lvid LOG.info(_LI("lvid can be released for network: %s."), res) except sa_exc.NoResultFound: # Nothing to do, may be another controller cleared the record # We will just log and return back status as False. LOG.debug( "Network %(network)s is already de-allocated for " "cluster %(cluster)s.", { 'network': port_info['network_id'], 'cluster': port_info['cluster_id'] }) return lvid
def refresh_firewall(self, device_ids=None): """Removes all rules for input port_ids and puts in new rules for them. This routine erases all rules and puts in new rules for the input ports shippped as device_ids. :param device_ids: set of port_ids for which firewall rules need to be refreshed. """ if not device_ids: device_ids = self.firewall.ports.keys() if not device_ids: LOG.info(_LI("No ports here to refresh firewall.")) return LOG.info(_LI("Refresh firewall rules for %s ports."), len(device_ids)) self._process_port_set(set(device_ids), True)
def remove_port_filter(self, ports): LOG.info(_LI("Remove ports with rules")) for p_id in ports: port = self.dvs_ports.get(p_id) if port: self.remove_queue.put(port) self.dvs_ports.pop(p_id, None)
def _process_port_filter(self, ports): LOG.info(_LI("Set security group rules for ports %s"), [p['id'] for p in ports]) ports_for_update = [] for port in ports: port_device = port['device'] stored_port_key = self.dvs_ports.get(port_device, {}). \ get('binding:vif_details', {}).get('dvs_port_key') port_key = port.get('binding:vif_details', {}).get('dvs_port_key') if port_key and port_key != stored_port_key: port_dvs = self._get_port_dvs(port) if port_dvs: try: port_info = port_dvs.get_port_info(port) if port['id'] == port_info.config.name: self.dvs_ports[port_device] = port ports_for_update.append(port) else: self.dvs_ports.pop(port_device, None) except exceptions.PortNotFound: self.dvs_ports.pop(port_device, None) else: self.dvs_ports.pop(port_device, None) else: self.dvs_ports[port_device] = port ports_for_update.append(port) self._apply_sg_rules_for_port(ports_for_update)
def create_network(self, network, virtual_switch): LOG.info( _LI("Creating portgroup %(nm)s with vlan id %(vid)s " "on virtual switch %(sw)s."), {"nm": network.name, "vid": network.config.vlan.vlanIds[0], "sw": virtual_switch.name}, ) network_util.create_port_group( self.session, dvs_name=virtual_switch.name, pg_name=network.name, vlan_id=network.config.vlan.vlanIds[0] )
def update_network_precommit(self, current, segment, original): try: dvs = self._lookup_dvs_for_context(segment) except exceptions.NoDVSForPhysicalNetwork as e: LOG.info(_LI('Network %(id)s not updated. Reason: %(reason)s'), {'id': current['id'], 'reason': e.message}) else: dvs.update_network(current, original)
def _initialize_managers(self): self.state = constants.AGENT_INITIALIZING LOG.info(_LI("Loading network driver manager: %s."), cfg.CONF.OVSVAPP.network_manager) self.net_mgr = utils.load_object(cfg.CONF.OVSVAPP.network_manager, manager.DriverManager, self) self.net_mgr.initialize_driver() self.state = constants.AGENT_INITIALIZED
def _get_or_create_pg(self, pg_name, network, segment): try: return self._get_pg_by_name(pg_name) except exceptions.PortGroupNotFound: LOG.debug( _LI('Network %s is not present in vcenter. ' 'Perform network creation' % pg_name)) return self.create_network(network, segment)
def _report_state(self): try: agent_status = self.state_rpc.report_state(self.context, self.agent_state, True) if agent_status == n_const.AGENT_REVIVED: LOG.info(_LI('Agent has just revived. Do a full sync.')) self.agent_state.pop('start_flag', None) except Exception: LOG.exception(_LE("Failed reporting state!"))
def get_vlanid_for_port_group(self, dvs_name, pg_name): LOG.info(_LI("Fetching details of port group %(pg)s on DVS %(dvs)s."), { 'pg': pg_name, 'dvs': dvs_name }) pg_vlan_id = network_util.get_portgroup_details( self.session, dvs_name, pg_name) return pg_vlan_id
def prepare_devices_filter(self, device_ids): if not device_ids: return LOG.info(_LI("Preparing filters for devices %s"), device_ids) devices = self.plugin_rpc.security_group_rules_for_devices( self.context, list(device_ids)) self.firewall.prepare_port_filter(devices.values())
def delete_port_postcommit(self, context): """Delete port non-database commit event.""" port = context.current if port and port['device_owner'].startswith('compute'): segment = context.top_bound_segment if (segment and segment[api.NETWORK_TYPE] in self.supported_network_types): LOG.debug( "OVSvApp Mech driver - delete_port_postcommit for " "port: %s with network_type as %s.", port['id'], segment[api.NETWORK_TYPE]) vni = segment[api.SEGMENTATION_ID] network_type = segment[api.NETWORK_TYPE] host = port[portbindings.HOST_ID] agent = None vcenter = None cluster = None net_info = None agents = self.plugin.get_agents( self.context, filters={ 'agent_type': [ovsvapp_const.AGENT_TYPE_OVSVAPP], 'host': [host] }) if agents: agent = agents[0] vcenter = agent['configurations']['vcenter_id'] cluster = agent['configurations']['cluster_id'] net_info = { 'vcenter_id': vcenter, 'cluster_id': cluster, 'network_id': port['network_id'], 'segmentation_id': vni, 'network_type': network_type, 'host': host } else: LOG.debug("Not a valid ESX port: %s.", port['id']) return try: lvid = ovsvapp_db.check_to_reclaim_local_vlan(net_info) if lvid >= 1: net_info.update({'lvid': lvid}) LOG.debug( "Spawning thread for releasing network " "VNI allocations for %s.", net_info) self.threadpool.spawn_n(self._notify_agent, net_info) LOG.info( _LI("Spawned a thread for releasing network " "vni allocations for network: %s."), net_info) except Exception: LOG.exception( _LE("Failed to check for reclaiming " "local vlan.")) else: self._check_and_fire_provider_update(port)
def _has_port(self, min_port): if min_port: if self.protocol == 'icmp' or self.protocol == 'ipv6-icmp': LOG.info(_LI('Vmware dvs driver does not support ' '"type" and "code" for ICMP/ipv6-icmp protocol.')) return False else: return True else: return False
def create_port_group(session, dvs_name, pg_name, vlan_id): """Creates a Portgroup on DVS with a vlan id.""" port_group_mor = get_portgroup_mor_by_name(session, dvs_name, pg_name) if port_group_mor: port_group_config = session._call_method( vim_util, "get_dynamic_property", port_group_mor, "DistributedVirtualPortgroup", "config") if vlan_id == port_group_config.defaultPortConfig.vlan.vlanId: LOG.debug("Portgroup %(pg)s with vlan id %(vid)s already exists", {'pg': pg_name, 'vid': vlan_id}) return else: LOG.info(_LI("Portgroup %(pg)s already exists " "but with vlan id %(vid)s"), {'pg': pg_name, 'vid': port_group_config.defaultPortConfig.vlan.vlanId}) raise error_util.RunTimeError("Inconsistent vlan id for portgroup" " %s", pg_name) else: client_factory = session._get_vim().client.factory add_prt_grp_spec = _get_add_vswitch_port_group_spec( client_factory, pg_name, vlan_id) blocked = client_factory.create('ns0:BoolPolicy') blocked.value = False blocked.inherited = False add_prt_grp_spec.defaultPortConfig.blocked = blocked dvs_mor = get_dvs_mor_by_name(session, dvs_name) try: task_ref = session._call_method( session._get_vim(), "AddDVPortgroup_Task", dvs_mor, spec=add_prt_grp_spec) session.wait_for_task(task_ref) LOG.info(_LI("Successfully created portgroup " "%(pg)s with vlan id %(vid)s"), {'pg': pg_name, 'vid': vlan_id}) except Exception as e: LOG.exception(_LE("Failed to create portgroup %(pg)s with " "vlan id %(vid)s on vCenter. Cause : %(err)s"), {'pg': pg_name, 'vid': vlan_id, 'err': e}) raise error_util.RunTimeError("Failed to create portgroup %s " "with vlan id %s on vCenter.Cause" " : %s" % (pg_name, vlan_id, e))
def _fetch_and_apply_rules(self, dev_ids, update=False): ovsvapplock.acquire() # This will help us prevent duplicate processing of same port # when we get back to back updates for same SG or Network. self.devices_to_refilter = self.devices_to_refilter - set(dev_ids) ovsvapplock.release() sg_info = self.ovsvapp_sg_rpc.security_group_info_for_esx_devices( self.context, dev_ids) time.sleep(0) LOG.debug( "Successfully serviced security_group_info_for_esx_devices " "RPC for %s.", dev_ids) ports = sg_info.get('ports') for port_id in ports: if port_id in dev_ids: port_info = { 'member_ips': sg_info.get('member_ips'), 'ports': { port_id: ports[port_id] } } port_sg_rules = self.expand_sg_rules(port_info) if len(port_sg_rules.get(port_id).get( 'sg_provider_rules')) == 0: LOG.info(_LI("Missing Provider Rules for port %s"), port_id) self.devices_to_refilter.add(port_id) return if self.deleted_devices_dict.get(port_id) is None: self._update_device_port_sg_map(port_sg_rules, port_id, update) LOG.debug("Port Cache: %s", port_sg_rules[port_id]) if len(port_sg_rules[port_id]['security_group_rules']) > 0 \ or \ port_sg_rules[port_id].get('security_group_rules_deleted') \ is not None: LOG.info(_LI("Applying Changed Rules for Port %s"), port_id) self.firewall.update_port_filter( port_sg_rules[port_id]) else: LOG.info(_LI("NO RULES CHANGED for Port %s"), port_id)
def delete_network_postcommit(self, current, segment): try: dvs = self._lookup_dvs_for_context(segment) except exceptions.NoDVSForPhysicalNetwork as e: LOG.info(_LI('Network %(id)s not deleted. Reason: %(reason)s') % { 'id': current['id'], 'reason': e.message}) except exceptions.InvalidNetwork: pass else: dvs.delete_network(current)
def _has_port(self, min_port): if min_port: if self.protocol == 'icmp' or self.protocol == 'ipv6-icmp': LOG.info( _LI('Vmware dvs driver does not support ' '"type" and "code" for ICMP/ipv6-icmp protocol.')) return False else: return True else: return False
def prepare_firewall(self, device_ids): """Puts in new rules for input port_ids. This routine puts in new rules for the input ports shippped as device_ids. :param device_ids: set of port_ids for which firewall rules need to be created. """ LOG.info(_LI("Prepare firewall rules for %s ports."), len(device_ids)) self._process_port_set(device_ids)
def update_and_get_cluster_lock(vcenter_id, cluster_id): session = db_api.get_session() with session.begin(subtransactions=True): try: query = session.query(models.OVSvAppClusters) cluster_row = (query.filter( models.OVSvAppClusters.vcenter_id == vcenter_id, models.OVSvAppClusters.cluster_id == cluster_id).with_lockmode( 'update').one()) if not cluster_row.threshold_reached: if not cluster_row.being_mitigated: cluster_row.update({'being_mitigated': True}) LOG.info(_LI("Blocked the cluster %s for maintenance."), cluster_id) return SUCCESS else: LOG.info( _LI("Cluster %s is under maintenance. " "Will retry later"), cluster_id) return RETRY else: LOG.warning( _LW("Cluster %(id)s in vCenter %(vc)s needs " "attention. " "Not able to put hosts to maintenance!"), { 'id': cluster_id, 'vc': vcenter_id }) return GIVE_UP except sa_exc.NoResultFound: # First fault case in this cluster_id. cluster_row = { 'vcenter_id': vcenter_id, 'cluster_id': cluster_id, 'being_mitigated': True } session.execute(models.OVSvAppClusters.__table__.insert(), cluster_row) LOG.info(_LI("Blocked the cluster %s for maintenance."), cluster_id) return SUCCESS
def delete_port_group(session, dvs_name, pg_name): """Deletes a port group from DVS.""" port_group_mor = get_portgroup_mor_by_name(session, dvs_name, pg_name) if port_group_mor: try: destroy_task = session._call_method(session._get_vim(), "Destroy_Task", port_group_mor) session.wait_for_task(destroy_task) LOG.info(_LI("Successfully deleted portgroup %(pg)s from " "dvs %(dvs)s"), {'pg': pg_name, 'dvs': dvs_name}) except Exception as e: LOG.exception(_LE("Failed to delete portgroup %(pg)s from " "dvs %(dvs)s .Cause : %(err)s"), {'pg': pg_name, 'dvs': dvs_name, 'err': e}) raise error_util.RunTimeError("Failed to delete portgroup %s " "on dvs %s on vCenter.Cause" " : %s" % (pg_name, dvs_name, e)) else: LOG.info(_LI("portgroup %(pg)s not present on dvs %(dvs)s"), {'pg': pg_name, 'dvs': dvs_name})
def __init__(self): self.vif_type = dvs_const.DVS sg_enabled = securitygroups_rpc.is_firewall_enabled() self.vif_details = {portbindings.CAP_PORT_FILTER: sg_enabled, portbindings.OVS_HYBRID_PLUG: sg_enabled} self.context = context.get_admin_context_without_session() self.dvs_notifier = dvs_agent_rpc_api.DVSClientAPI(self.context) LOG.info(_LI('DVS_notifier')) super(VMwareDVSMechanismDriver, self).__init__( dvs_const.AGENT_TYPE_DVS, self.vif_type, self.vif_details)