Ejemplo n.º 1
0
 def check_ovsvapp_data_path(self, agent):
     agent_config = agent['configurations']
     # Check if the Data path is alright.
     monitoring_ip = agent_config.get('monitoring_ip')
     datapath_health = self._check_datapath_health(monitoring_ip)
     if datapath_health:
         LOG.info(_LI("Data path looks to be OK on %s. "
                      "Skipping mitigation."), agent['host'])
         LOG.warning(_LW("Issues encountered in receiving "
                         "heartbeats from OVSvApp Agent on "
                         "host %s."), agent['host'])
     else:
         LOG.warning(_LW("Data path seems to be broken already on %s."
                         "Will continue with mitigation."), agent['host'])
     return datapath_health
Ejemplo n.º 2
0
    def update_port_filter(self, port):
        """Method to update OVS rules for an existing VM port."""
        LOG.debug("OVSF Updating port: %s filter.", port['id'])
        if port['id'] not in self.filtered_ports:
            LOG.warning(_LW("Attempted to update port filter which is not "
                            "filtered %s."), port['id'])
            return
        port_cookie = self.get_cookie(port['id'])
        port_provider_cookie = self.get_cookie('pr' + port['id'])
        try:
            with self.sg_br.deferred(full_ordered=True, order=(
                'del', 'mod', 'add')) as deferred_br:
                if port['id'] not in self.provider_port_cache:
                    self._remove_all_flows(deferred_br, port['id'], True)
                    self._add_flows(deferred_br, port,
                                    port_provider_cookie, True)
                    self.provider_port_cache.add(port['id'])
#                else:
#                    self._remove_all_flows(deferred_br, port['id'])
                self._setup_aap_flows(deferred_br, port)
                self._add_flows(deferred_br, port, port_cookie)
                if 'security_group_rules_deleted' in port:
                    self._remove_flows(deferred_br, port, port_cookie)

            self.filtered_ports[port['id']] = self._get_compact_port(port)
        except Exception:
            LOG.exception(_LE("Unable to update flows for %s."), port['id'])
def update_port_rules(dvs, ports):
    try:
        builder = dvs_util.SpecBuilder(dvs.connection.vim.client.factory)
        port_config_list = []
        for port in ports:
            try:
                if port['binding:vif_details'].get('dvs_port_key') is not None:
                    port_info = dvs._get_port_info_by_portkey(
                        port['binding:vif_details']['dvs_port_key'])
                else:
                    port_info = dvs.get_port_info_by_name(port['id'])
            except exceptions.PortNotFound:
                LOG.warning(_LW("Port %s was not found. Security rules "
                                "can not be applied."), port['id'])
                continue

            port_config = port_configuration(builder,
                                             str(port_info['key']),
                                             port['security_group_rules'])
            port_config.configVersion = port_info['config']['configVersion']
            port_config_list.append(port_config)
        if port_config_list:
            task = dvs.connection.invoke_api(
                dvs.connection.vim,
                'ReconfigureDVPort_Task',
                dvs._dvs,
                port=port_config_list
            )
            return dvs.connection.wait_for_task(task)
    except vmware_exceptions.VimException as e:
        raise exceptions.wrap_wmvare_vim_exception(e)
Ejemplo n.º 4
0
 def update_port(self, network=None, port=None, virtual_nic=None):
     device_id = port.vm_id
     mac_address = port.mac_address
     vm_mor = resource_util.get_vm_mor_for_uuid(self.session, device_id)
     if not vm_mor:
         LOG.warning(
             _LW("VM %(vm)s with mac address %(mac)s for "
                 "port %(uuid)s not found on this node."), {
                     'vm': device_id,
                     'mac': mac_address,
                     'uuid': port.uuid
                 })
         return False
     if port.port_status == constants.PORT_STATUS_UP:
         enabled = True
     elif port.port_status == constants.PORT_STATUS_DOWN:
         enabled = False
     else:
         msg = (_("Invalid port status %(port)s in update for port %(id)s"),
                {
                    'port': port.port_status,
                    'id': port.uuid
                })
         raise error.OVSvAppNeutronAgentError(msg)
     action = "Enabling" if enabled else "Disabling"
     LOG.debug(
         "%(action)s port used by VM %(id)s for VNIC with "
         "mac address %(mac)s.", {
             'action': action,
             'id': device_id,
             'mac': mac_address
         })
     status = network_util.enable_disable_port_of_vm(
         self.session, vm_mor, mac_address, enabled)
     return status
Ejemplo n.º 5
0
    def update_port_filter(self, port):
        """Method to update OVS rules for an existing VM port."""
        LOG.debug("OVSF Updating port: %s filter.", port['id'])
        if port['id'] not in self.filtered_ports:
            LOG.warning(
                _LW("Attempted to update port filter which is not "
                    "filtered %s."), port['id'])
            return
        port_cookie = self.get_cookie(port['id'])
        port_provider_cookie = self.get_cookie('pr' + port['id'])
        try:
            with self.sg_br.deferred(full_ordered=True,
                                     order=('del', 'mod',
                                            'add')) as deferred_br:
                if port['id'] not in self.provider_port_cache:
                    self._remove_all_flows(deferred_br, port['id'], True)
                    self._add_flows(deferred_br, port, port_provider_cookie,
                                    True)
                    self.provider_port_cache.add(port['id'])
#                else:
#                    self._remove_all_flows(deferred_br, port['id'])
                self._setup_aap_flows(deferred_br, port)
                self._add_flows(deferred_br, port, port_cookie)
                if 'security_group_rules_deleted' in port:
                    self._remove_flows(deferred_br, port, port_cookie)

            self.filtered_ports[port['id']] = self._get_compact_port(port)
        except Exception:
            LOG.exception(_LE("Unable to update flows for %s."), port['id'])
Ejemplo n.º 6
0
 def update_port(self, network=None, port=None, virtual_nic=None):
     device_id = port.vm_id
     mac_address = port.mac_address
     vm_mor = resource_util.get_vm_mor_for_uuid(self.session, device_id)
     if not vm_mor:
         LOG.warning(
             _LW("VM %(vm)s with mac address %(mac)s for " "port %(uuid)s not found on this node."),
             {"vm": device_id, "mac": mac_address, "uuid": port.uuid},
         )
         return False
     if port.port_status == constants.PORT_STATUS_UP:
         enabled = True
     elif port.port_status == constants.PORT_STATUS_DOWN:
         enabled = False
     else:
         msg = (
             _("Invalid port status %(port)s in update for port %(id)s"),
             {"port": port.port_status, "id": port.uuid},
         )
         raise error.OVSvAppNeutronAgentError(msg)
     action = "Enabling" if enabled else "Disabling"
     LOG.debug(
         "%(action)s port used by VM %(id)s for VNIC with " "mac address %(mac)s.",
         {"action": action, "id": device_id, "mac": mac_address},
     )
     status = network_util.enable_disable_port_of_vm(self.session, vm_mor, mac_address, enabled)
     return status
Ejemplo n.º 7
0
 def _get_port_info_by_name(self, name, port_list=None):
     if port_list is None:
         port_list = self.get_ports(None)
     ports = [port for port in port_list if port.config.name == name]
     if not ports:
         raise exceptions.PortNotFound(id=name)
     if len(ports) > 1:
         LOG.warning(_LW("Multiple ports found for name %s."), name)
     return ports[0]
Ejemplo n.º 8
0
 def ping_remote():
     try:
         self.ping_host(source, dest)
     except lib_exc.SSHExecCommandFailed:
         LOG.warning(_LW('Failed to ping IP: %(dest)s '
                         'via a ssh connection from: %(source)s.'),
                     {'dest': dest, 'source': source})
         return not should_succeed
     return should_succeed
Ejemplo n.º 9
0
 def _get_port_info_by_name(self, name, port_list=None):
     if port_list is None:
         port_list = self.get_ports(None)
     ports = [port for port in port_list if port.config.name == name]
     if not ports:
         raise exceptions.PortNotFound(id=name)
     if len(ports) > 1:
         LOG.warn(_LW("Multiple ports found for name %s."), name)
     return ports[0]
Ejemplo n.º 10
0
 def ping_remote():
     try:
         self.ping_host(source, dest)
     except lib_exc.SSHExecCommandFailed:
         LOG.warning(_LW('Failed to ping IP: %(dest)s '
                         'via a ssh connection from: %(source)s.'),
                     {'dest': dest, 'source': source})
         return not should_succeed
     return should_succeed
Ejemplo n.º 11
0
 def initialize_driver(self):
     self.stop()
     self.driver = None
     self.vcenter_ip = cfg.CONF.VMWARE.vcenter_ip
     self.vcenter_username = cfg.CONF.VMWARE.vcenter_username
     self.vcenter_password = cfg.CONF.VMWARE.vcenter_password
     self.vcenter_api_retry_count = cfg.CONF.VMWARE.vcenter_api_retry_count
     self.wsdl_location = cfg.CONF.VMWARE.wsdl_location
     self.https_port = cfg.CONF.VMWARE.https_port
     self.ca_path = None
     self.connection_timeout = cfg.CONF.VMWARE.connection_timeout
     if cfg.CONF.VMWARE.cert_check:
         if not cfg.CONF.VMWARE.cert_path:
             LOG.error(_LE("SSL certificate path is not defined to "
                           "establish secure vCenter connection. "
                           "Aborting agent!"))
             raise SystemExit(1)
         elif not os.path.isfile(cfg.CONF.VMWARE.cert_path):
             LOG.error(_LE("SSL certificate does not exist at "
                           "the specified path %s. Aborting agent!"),
                       cfg.CONF.VMWARE.cert_path)
             raise SystemExit(1)
         else:
             self.ca_path = cfg.CONF.VMWARE.cert_path
     if (self.vcenter_ip and self.vcenter_username and
             self.vcenter_password and self.wsdl_location):
         vim_session.ConnectionHandler.set_vc_details(
             self.vcenter_ip,
             self.vcenter_username,
             self.vcenter_password,
             self.vcenter_api_retry_count,
             self.wsdl_location,
             self.ca_path,
             self.connection_timeout,
             self.https_port)
         vim_session.ConnectionHandler.start()
         if self.connection_thread:
             self.connection_thread.kill()
         self.connection_thread = eventlet.spawn(
             vim_session.ConnectionHandler.try_connection)
         try:
             self.connection_thread.wait()
         except greenlet.GreenletExit:
             LOG.warning(_LW("Thread waiting on vCenter connection "
                             "exited."))
             return
     else:
         LOG.error(_LE("Must specify vcenter_ip, vcenter_username, "
                       "vcenter_password and wsdl_location."))
         return
     self.driver = dvs_driver.DvsNetworkDriver()
     self.driver.set_callback(self.netcallback)
     for mapping in cfg.CONF.VMWARE.cluster_dvs_mapping:
         cluster_dvs_list = self._parse_mapping(mapping)
         for cluster, vds in cluster_dvs_list:
             self._add_cluster(cluster, vds)
Ejemplo n.º 12
0
 def initialize_driver(self):
     self.stop()
     self.driver = None
     self.vcenter_ip = cfg.CONF.VMWARE.vcenter_ip
     self.vcenter_username = cfg.CONF.VMWARE.vcenter_username
     self.vcenter_password = cfg.CONF.VMWARE.vcenter_password
     self.vcenter_api_retry_count = cfg.CONF.VMWARE.vcenter_api_retry_count
     self.wsdl_location = cfg.CONF.VMWARE.wsdl_location
     self.https_port = cfg.CONF.VMWARE.https_port
     self.ca_path = None
     self.connection_timeout = cfg.CONF.VMWARE.connection_timeout
     if cfg.CONF.VMWARE.cert_check:
         if not cfg.CONF.VMWARE.cert_path:
             LOG.error(
                 _LE("SSL certificate path is not defined to "
                     "establish secure vCenter connection. "
                     "Aborting agent!"))
             raise SystemExit(1)
         elif not os.path.isfile(cfg.CONF.VMWARE.cert_path):
             LOG.error(
                 _LE("SSL certificate does not exist at "
                     "the specified path %s. Aborting agent!"),
                 cfg.CONF.VMWARE.cert_path)
             raise SystemExit(1)
         else:
             self.ca_path = cfg.CONF.VMWARE.cert_path
     if (self.vcenter_ip and self.vcenter_username and self.vcenter_password
             and self.wsdl_location):
         vim_session.ConnectionHandler.set_vc_details(
             self.vcenter_ip, self.vcenter_username, self.vcenter_password,
             self.vcenter_api_retry_count, self.wsdl_location, self.ca_path,
             self.connection_timeout, self.https_port)
         vim_session.ConnectionHandler.start()
         if self.connection_thread:
             self.connection_thread.kill()
         self.connection_thread = eventlet.spawn(
             vim_session.ConnectionHandler.try_connection)
         try:
             self.connection_thread.wait()
         except greenlet.GreenletExit:
             LOG.warning(
                 _LW("Thread waiting on vCenter connection "
                     "exited."))
             return
     else:
         LOG.error(
             _LE("Must specify vcenter_ip, vcenter_username, "
                 "vcenter_password and wsdl_location."))
         return
     self.driver = dvs_driver.DvsNetworkDriver()
     self.driver.set_callback(self.netcallback)
     for mapping in cfg.CONF.VMWARE.cluster_dvs_mapping:
         cluster_dvs_list = self._parse_mapping(mapping)
         for cluster, vds in cluster_dvs_list:
             self._add_cluster(cluster, vds)
Ejemplo n.º 13
0
 def get_plugin_and_initialize(self):
     """Initializes plugin and populates list of all agents."""
     try:
         self.context = neutron_context.get_admin_context()
         self.plugin = directory.get_plugin()
         if not self.plugin:
             return False
         self.agent_ext_support = self._check_plugin_ext_support('agent')
     except Exception:
         LOG.warning(_LW("Failed initialization of agent monitor.."))
         return False
     return True
Ejemplo n.º 14
0
 def monitor_events(self):
     try:
         LOG.info(_LI("Starting monitoring for vCenter updates"))
         version = ""
         self.state = constants.DRIVER_RUNNING
         while self.state in (constants.DRIVER_RUNNING):
             try:
                 LOG.debug("Waiting for vCenter updates...")
                 try:
                     updateSet = self.session._call_method(
                         vim_util, "wait_for_updates_ex", version)
                     if self.state != constants.DRIVER_RUNNING:
                         LOG.error(_LE("Driver is not in running state."))
                         break
                 except error_util.SocketTimeoutException:
                     # Ignore timeout.
                     LOG.warning(
                         _LW("Ignoring socket timeouts while "
                             "monitoring for vCenter updates."))
                     continue
                 if updateSet:
                     version = updateSet.version
                     events = self._process_update_set(updateSet)
                     LOG.debug("Sending events : %s.", events)
                     self.dispatch_events(events)
             except exceptions.VimFaultException as e:
                 # InvalidCollectorVersionFault happens
                 # on session re-connect.
                 # Re-initialize WaitForUpdatesEx.
                 if "InvalidCollectorVersion" in e.fault_list:
                     LOG.debug("InvalidCollectorVersion - "
                               "Re-initializing vCenter updates "
                               "monitoring.")
                     version = ""
                     for cluster_mor in self.clusters_by_id.values():
                         pfo = self._register_cluster_for_updates(
                             cluster_mor)
                         clu_id = cluster_mor.value
                         self.cluster_id_to_filter[clu_id] = pfo
                     continue
                 LOG.exception(
                     _LE("VimFaultException while processing "
                         "update set %s."), e)
             except Exception:
                 LOG.exception(
                     _LE("Exception while processing update"
                         " set."))
             time.sleep(0)
         LOG.info(_LI("Stopped monitoring for vCenter updates."))
     except Exception:
         LOG.exception(_LE("Monitoring for vCenter updates failed."))
Ejemplo n.º 15
0
 def monitor_events(self):
     try:
         LOG.info(_LI("Starting monitoring for vCenter updates"))
         version = ""
         self.state = constants.DRIVER_RUNNING
         while self.state in (constants.DRIVER_RUNNING):
             try:
                 LOG.debug("Waiting for vCenter updates...")
                 try:
                     updateSet = self.session._call_method(
                         vim_util,
                         "wait_for_updates_ex",
                         version)
                     if self.state != constants.DRIVER_RUNNING:
                         LOG.error(_LE("Driver is not in running state."))
                         break
                 except error_util.SocketTimeoutException:
                     # Ignore timeout.
                     LOG.warning(_LW("Ignoring socket timeouts while "
                                     "monitoring for vCenter updates."))
                     continue
                 if updateSet:
                     version = updateSet.version
                     events = self._process_update_set(updateSet)
                     LOG.debug("Sending events : %s.", events)
                     self.dispatch_events(events)
             except exceptions.VimFaultException as e:
                 # InvalidCollectorVersionFault happens
                 # on session re-connect.
                 # Re-initialize WaitForUpdatesEx.
                 if "InvalidCollectorVersion" in e.fault_list:
                     LOG.debug("InvalidCollectorVersion - "
                               "Re-initializing vCenter updates "
                               "monitoring.")
                     version = ""
                     for cluster_mor in self.clusters_by_id.values():
                         pfo = self._register_cluster_for_updates(
                             cluster_mor)
                         clu_id = cluster_mor.value
                         self.cluster_id_to_filter[clu_id] = pfo
                     continue
                 LOG.exception(_LE("VimFaultException while processing "
                                   "update set %s."), e)
             except Exception:
                 LOG.exception(_LE("Exception while processing update"
                                   " set."))
             time.sleep(0)
         LOG.info(_LI("Stopped monitoring for vCenter updates."))
     except Exception:
         LOG.exception(_LE("Monitoring for vCenter updates failed."))
Ejemplo n.º 16
0
 def _get_port_db(self, session, port_id, agent_id):
     try:
         port_db = (session.query(models_v2.Port).
                    enable_eagerloads(False).
                    filter(models_v2.Port.id.startswith(port_id)).
                    one())
         return port_db
     except sa_exc.NoResultFound:
         LOG.warning(_LW("Port %(port_id)s requested by agent "
                         "%(agent_id)s not found in database."),
                     {'port_id': port_id, 'agent_id': agent_id})
         return None
     except exc.MultipleResultsFound:
         LOG.error(_LE("Multiple ports have port_id starting with %s."),
                   port_id)
         return None
Ejemplo n.º 17
0
 def __init__(self):
     self.filtered_ports = {}
     self.provider_port_cache = set()
     if sg_conf.security_bridge_mapping is None:
         LOG.warning(_LW("Security bridge mapping not configured."))
         return
     secbr_list = (sg_conf.security_bridge_mapping).split(':')
     secbr_name = secbr_list[0]
     secbr_phyname = secbr_list[1]
     self.sg_br = ovs_lib.OVSBridge(secbr_name)
     self.phy_ofport = self.sg_br.get_port_ofport(secbr_phyname)
     self.patch_ofport = self.sg_br.get_port_ofport(
         ovsvapp_const.SEC_TO_INT_PATCH)
     self._defer_apply = False
     if not self.check_ovs_firewall_restart():
         self.setup_base_flows()
Ejemplo n.º 18
0
 def __init__(self):
     self.filtered_ports = {}
     self.provider_port_cache = set()
     if sg_conf.security_bridge_mapping is None:
         LOG.warning(_LW("Security bridge mapping not configured."))
         return
     secbr_list = (sg_conf.security_bridge_mapping).split(':')
     secbr_name = secbr_list[0]
     secbr_phyname = secbr_list[1]
     self.sg_br = ovs_lib.OVSBridge(secbr_name)
     self.phy_ofport = self.sg_br.get_port_ofport(secbr_phyname)
     self.patch_ofport = self.sg_br.get_port_ofport(
         ovsvapp_const.SEC_TO_INT_PATCH)
     self._defer_apply = False
     if not self.check_ovs_firewall_restart():
         self.setup_base_flows()
Ejemplo n.º 19
0
def update_and_get_cluster_lock(vcenter_id, cluster_id):
    session = db_api.get_session()
    with session.begin(subtransactions=True):
        try:
            query = session.query(models.OVSvAppClusters)
            cluster_row = (query.filter(
                models.OVSvAppClusters.vcenter_id == vcenter_id,
                models.OVSvAppClusters.cluster_id == cluster_id).with_lockmode(
                    'update').one())
            if not cluster_row.threshold_reached:
                if not cluster_row.being_mitigated:
                    cluster_row.update({'being_mitigated': True})
                    LOG.info(_LI("Blocked the cluster %s for maintenance."),
                             cluster_id)
                    return SUCCESS
                else:
                    LOG.info(
                        _LI("Cluster %s is under maintenance. "
                            "Will retry later"), cluster_id)
                    return RETRY
            else:
                LOG.warning(
                    _LW("Cluster %(id)s in vCenter %(vc)s needs "
                        "attention. "
                        "Not able to put hosts to maintenance!"), {
                            'id': cluster_id,
                            'vc': vcenter_id
                        })
                return GIVE_UP
        except sa_exc.NoResultFound:
            # First fault case in this cluster_id.
            cluster_row = {
                'vcenter_id': vcenter_id,
                'cluster_id': cluster_id,
                'being_mitigated': True
            }
            session.execute(models.OVSvAppClusters.__table__.insert(),
                            cluster_row)
            LOG.info(_LI("Blocked the cluster %s for maintenance."),
                     cluster_id)
            return SUCCESS
Ejemplo n.º 20
0
def update_and_get_cluster_lock(vcenter_id, cluster_id):
    session = db_api.get_session()
    with session.begin(subtransactions=True):
        try:
            query = session.query(models.OVSvAppClusters)
            cluster_row = (query.filter(
                models.OVSvAppClusters.vcenter_id == vcenter_id,
                models.OVSvAppClusters.cluster_id == cluster_id
            ).with_lockmode('update').one())
            if not cluster_row.threshold_reached:
                if not cluster_row.being_mitigated:
                    cluster_row.update({'being_mitigated': True})
                    LOG.info(_LI("Blocked the cluster %s for maintenance."),
                             cluster_id)
                    return SUCCESS
                else:
                    LOG.info(_LI("Cluster %s is under maintenance. "
                                 "Will retry later"), cluster_id)
                    return RETRY
            else:
                LOG.warning(_LW("Cluster %(id)s in vCenter %(vc)s needs "
                                "attention. "
                                "Not able to put hosts to maintenance!"),
                            {'id': cluster_id,
                             'vc': vcenter_id})
                return GIVE_UP
        except sa_exc.NoResultFound:
            # First fault case in this cluster_id.
            cluster_row = {'vcenter_id': vcenter_id,
                           'cluster_id': cluster_id,
                           'being_mitigated': True}
            session.execute(models.OVSvAppClusters.__table__.insert(),
                            cluster_row)
            LOG.info(_LI("Blocked the cluster %s for maintenance."),
                     cluster_id)
            return SUCCESS
Ejemplo n.º 21
0
    def get_ports_details_list(self, rpc_context, **kwargs):
        """Agent requests device details."""
        agent_id = kwargs.get('agent_id')
        port_ids = kwargs.get('port_ids')
        vcenter_id = kwargs['vcenter_id']
        cluster_id = kwargs['cluster_id']
        LOG.debug(
            "Port details requested by agent "
            "%(agent_id)s for ports %(ports)s.", {
                'ports': port_ids,
                'agent_id': agent_id
            })
        out_ports = []
        for port_id in port_ids:
            port_db = self._get_port_db(rpc_context.session, port_id, agent_id)
            if not port_db:
                continue
            port = self.plugin._make_port_dict(port_db)
            network = self.plugin.get_network(rpc_context, port['network_id'])
            levels = db.get_binding_levels(rpc_context, port_id,
                                           port_db.port_binding.host)
            port_context = driver_context.PortContext(self.plugin, rpc_context,
                                                      port, network,
                                                      port_db.port_binding,
                                                      levels)
            segment = port_context.top_bound_segment
            # Reference: ML2  Driver API changes for hierarchical port binding.
            bound_port = port_context.current
            if not segment:
                LOG.warning(
                    _LW("Port %(port_id)s requested by agent "
                        "%(agent_id)s on network %(network_id)s not "
                        "bound, vif_type: %(vif_type)s."), {
                            'port_id': port['id'],
                            'agent_id': agent_id,
                            'network_id': port['network_id'],
                            'vif_type': port[portbindings.VIF_TYPE]
                        })
                continue
            bound_port['lvid'] = None
            port_info = {
                'port_id': bound_port['id'],
                'vcenter_id': vcenter_id,
                'cluster_id': cluster_id,
                'network_id': bound_port['network_id']
            }
            lvid = ovsvapp_db.get_local_vlan(port_info, False)
            if lvid:
                bound_port['lvid'] = lvid
            else:
                # Local VLANs are exhausted !! No point processing
                # further.
                LOG.error(
                    _LE("Local VLAN not available in the cluster"
                        " %(cluster)s for port"
                        " %(port_id)s in vcenter %(vcenter)s."), {
                            'port_id': bound_port['id'],
                            'cluster': cluster_id,
                            'vcenter': vcenter_id
                        })
                # Skip sending back this port as there is no lvid.
                continue

            entry = {
                'network_id': bound_port['network_id'],
                'port_id': bound_port['id'],
                'lvid': bound_port['lvid'],
                'mac_address': bound_port['mac_address'],
                'admin_state_up': bound_port['admin_state_up'],
                'network_type': segment[api.NETWORK_TYPE],
                'segmentation_id': segment[api.SEGMENTATION_ID],
                'physical_network': segment[api.PHYSICAL_NETWORK],
                'fixed_ips': bound_port['fixed_ips'],
                'device_id': bound_port['device_id'],
                'security_groups': bound_port['security_groups'],
                'device_owner': bound_port['device_owner']
            }
            LOG.debug("Adding port detail: %s.", entry)
            out_ports.append(entry)
        return out_ports
Ejemplo n.º 22
0
    def get_ports_for_device(self, rpc_context, **kwargs):
        """RPC for getting port info.

        This method provides information about the network and port for
        a given device_id.
        """
        agent_id = kwargs.get('agent_id')
        host = kwargs.get('host')
        device = kwargs.get('device')
        device_id = device['id']
        vcenter_id = device['vcenter']
        cluster_id = device['cluster_id']
        LOG.info(
            _LI("Device %(device_id)s details requested by agent "
                "%(agent_id)s running on host %(host)s."), {
                    'device_id': device_id,
                    'agent_id': agent_id,
                    'host': host
                })
        if not device_id:
            return False
        try_count = 3
        try:
            while try_count > 0:
                ports = self.plugin.get_ports(
                    rpc_context, filters={'device_id': [device_id]})
                device_ports = []
                sg_port_ids = set()
                for port in ports:
                    network = self.plugin.get_network(rpc_context,
                                                      port['network_id'])
                    port.update({
                        'network_type':
                        network['provider:network_type'],
                        'segmentation_id':
                        network['provider:segmentation_id'],
                        'physical_network':
                        network['provider:physical_network']
                    })

                    port_info = {
                        'port_id': port['id'],
                        'vcenter_id': vcenter_id,
                        'cluster_id': cluster_id,
                        'network_id': port['network_id']
                    }
                    lvid = None
                    if port['status'] != common_const.PORT_STATUS_ACTIVE:
                        lvid = ovsvapp_db.get_local_vlan(port_info)
                    else:
                        lvid = ovsvapp_db.get_local_vlan(port_info, False)
                    if lvid:
                        port['lvid'] = lvid
                    else:
                        # Local VLANs are exhausted ! No point processing
                        # further.
                        LOG.error(
                            _LE("No VLAN available in the cluster "
                                "%(cluster)s for assignment to"
                                " device %(device)s in "
                                "vCenter %(vcenter)s."), {
                                    'device': device_id,
                                    'cluster': cluster_id,
                                    'vcenter': vcenter_id
                                })
                        return False
                    # Bind the port here. If binding succeeds, then
                    # add this port to process for security groups, otheriwse
                    # ignore it.
                    updated_port = self.update_port_binding(rpc_context,
                                                            agent_id=agent_id,
                                                            port_id=port['id'],
                                                            host=host)
                    if not updated_port:
                        LOG.error(_LE("Port binding failed for "
                                      "port %s."), port['id]'])
                        # process the next port for the device
                        continue
                    if 'security_groups' in port:
                        sg_port_ids.add(port['id'])
                    new_status = (common_const.PORT_STATUS_BUILD
                                  if port['admin_state_up'] else
                                  common_const.PORT_STATUS_DOWN)
                    if port['status'] != new_status:
                        self.plugin.update_port_status(rpc_context, port['id'],
                                                       new_status, host)
                    device_ports.append(port)
                if not device_ports:
                    try_count -= 1
                    LOG.warning(
                        _LW("Port details could not be retrieved for "
                            "device %s ..retrying."), device_id)
                    time.sleep(3)
                else:
                    LOG.debug("Device details returned by server: "
                              "%s.", device_ports)
                    # Get the SG rules for the security enabled ports.
                    sg_payload = {}
                    if sg_port_ids:
                        ports = self._get_devices_info(rpc_context,
                                                       sg_port_ids)
                        sg_rules = (
                            self.sg_rpc.security_group_info_for_esx_ports(
                                rpc_context, ports))
                        sg_payload[device_id] = sg_rules
                    self.notifier.device_create(rpc_context, device,
                                                device_ports, sg_payload,
                                                cluster_id)
                    return True
        except Exception:
            LOG.exception(
                _LE("Failed to retrieve port details for "
                    "device: %s."), device_id)
        LOG.error(_LE("Failed to retrieve ports for device: %s."), device_id)
        return False
Ejemplo n.º 23
0
    def get_ports_details_list(self, rpc_context, **kwargs):
        """Agent requests device details."""
        agent_id = kwargs.get('agent_id')
        port_ids = kwargs.get('port_ids')
        vcenter_id = kwargs['vcenter_id']
        cluster_id = kwargs['cluster_id']
        LOG.debug("Port details requested by agent "
                  "%(agent_id)s for ports %(ports)s.",
                  {'ports': port_ids, 'agent_id': agent_id})
        out_ports = []
        for port_id in port_ids:
            port_db = self._get_port_db(rpc_context.session, port_id, agent_id)
            if not port_db:
                continue
            port = self.plugin._make_port_dict(port_db)
            network = self.plugin.get_network(rpc_context, port['network_id'])
            levels = db.get_binding_levels(rpc_context.session, port_id,
                                           port_db.port_binding.host)
            port_context = driver_context.PortContext(self.plugin,
                                                      rpc_context,
                                                      port,
                                                      network,
                                                      port_db.port_binding,
                                                      levels)
            segment = port_context.top_bound_segment
            # Reference: ML2  Driver API changes for hierarchical port binding.
            bound_port = port_context.current
            if not segment:
                LOG.warning(_LW("Port %(port_id)s requested by agent "
                                "%(agent_id)s on network %(network_id)s not "
                                "bound, vif_type: %(vif_type)s."),
                            {'port_id': port['id'],
                             'agent_id': agent_id,
                             'network_id': port['network_id'],
                             'vif_type': port[portbindings.VIF_TYPE]})
                continue
            bound_port['lvid'] = None
            port_info = {'port_id': bound_port['id'],
                         'vcenter_id': vcenter_id,
                         'cluster_id': cluster_id,
                         'network_id': bound_port['network_id']}
            lvid = ovsvapp_db.get_local_vlan(port_info, False)
            if lvid:
                bound_port['lvid'] = lvid
            else:
                # Local VLANs are exhausted !! No point processing
                # further.
                LOG.error(_LE("Local VLAN not available in the cluster"
                              " %(cluster)s for port"
                              " %(port_id)s in vcenter %(vcenter)s."),
                          {'port_id': bound_port['id'],
                           'cluster': cluster_id,
                           'vcenter': vcenter_id})
                # Skip sending back this port as there is no lvid.
                continue

            entry = {'network_id': bound_port['network_id'],
                     'port_id': bound_port['id'],
                     'lvid': bound_port['lvid'],
                     'mac_address': bound_port['mac_address'],
                     'admin_state_up': bound_port['admin_state_up'],
                     'network_type': segment[api.NETWORK_TYPE],
                     'segmentation_id': segment[api.SEGMENTATION_ID],
                     'physical_network': segment[api.PHYSICAL_NETWORK],
                     'fixed_ips': bound_port['fixed_ips'],
                     'device_id': bound_port['device_id'],
                     'security_groups': bound_port['security_groups'],
                     'device_owner': bound_port['device_owner']}
            LOG.debug("Adding port detail: %s.", entry)
            out_ports.append(entry)
        return out_ports
Ejemplo n.º 24
0
    def get_ports_for_device(self, rpc_context, **kwargs):
        """RPC for getting port info.

        This method provides information about the network and port for
        a given device_id.
        """
        agent_id = kwargs.get('agent_id')
        host = kwargs.get('host')
        device = kwargs.get('device')
        device_id = device['id']
        vcenter_id = device['vcenter']
        cluster_id = device['cluster_id']
        LOG.info(_LI("Device %(device_id)s details requested by agent "
                     "%(agent_id)s running on host %(host)s."),
                 {'device_id': device_id, 'agent_id': agent_id, 'host': host})
        if not device_id:
            return False
        try_count = 3
        try:
            while try_count > 0:
                ports = self.plugin.get_ports(rpc_context,
                                              filters={'device_id':
                                                       [device_id]})
                device_ports = []
                sg_port_ids = set()
                for port in ports:
                    network = self.plugin.get_network(rpc_context,
                                                      port['network_id'])
                    port.update(
                        {'network_type': network['provider:network_type'],
                         'segmentation_id':
                         network['provider:segmentation_id'],
                         'physical_network':
                         network['provider:physical_network']})

                    port_info = {'port_id': port['id'],
                                 'vcenter_id': vcenter_id,
                                 'cluster_id': cluster_id,
                                 'network_id': port['network_id']}
                    lvid = ovsvapp_db.get_local_vlan(port_info)
                    if lvid:
                        port['lvid'] = lvid
                    else:
                        # Local VLANs are exhausted ! No point processing
                        # further.
                        LOG.error(_LE("No VLAN available in the cluster "
                                      "%(cluster)s for assignment to"
                                      " device %(device)s in "
                                      "vCenter %(vcenter)s."),
                                  {'device': device_id,
                                   'cluster': cluster_id,
                                   'vcenter': vcenter_id})
                        return False
                    # Bind the port here. If binding succeeds, then
                    # add this port to process for security groups, otheriwse
                    # ignore it.
                    updated_port = self.update_port_binding(rpc_context,
                                                            agent_id=agent_id,
                                                            port_id=port['id'],
                                                            host=host)
                    if not updated_port:
                        LOG.error(_LE("Port binding failed for "
                                      "port %s."), port['id]'])
                        # process the next port for the device
                        continue
                    if 'security_groups' in port:
                        sg_port_ids.add(port['id'])
                    new_status = (common_const.PORT_STATUS_BUILD
                                  if port['admin_state_up'] else
                                  common_const.PORT_STATUS_DOWN)
                    if port['status'] != new_status:
                        self.plugin.update_port_status(rpc_context, port['id'],
                                                       new_status, host)
                    device_ports.append(port)
                if not device_ports:
                    try_count -= 1
                    LOG.warning(_LW("Port details could not be retrieved for "
                                    "device %s ..retrying."), device_id)
                    time.sleep(3)
                else:
                    LOG.debug("Device details returned by server: "
                              "%s.", device_ports)
                    # Get the SG rules for the security enabled ports.
                    sg_payload = {}
                    if sg_port_ids:
                        ports = self._get_devices_info(
                            rpc_context, sg_port_ids)
                        sg_rules = (
                            self.sg_rpc.security_group_info_for_esx_ports(
                                rpc_context, ports))
                        sg_payload[device_id] = sg_rules
                    self.notifier.device_create(rpc_context, device,
                                                device_ports, sg_payload,
                                                cluster_id)
                    return True
        except Exception:
            LOG.exception(_LE("Failed to retrieve port details for "
                              "device: %s."), device_id)
        LOG.error(_LE("Failed to retrieve ports for device: %s."), device_id)
        return False
Ejemplo n.º 25
0
 def _wait_for_port_update_on_vm(self, vm_mor, pgmor):
     property_collector = None
     try:
         LOG.debug("Creating new property collector.")
         property_collector = self.session._call_method(vim_util, "create_property_collector")
         self._register_vm_for_updates(vm_mor, property_collector)
         version = ""
         pg_key, port_key, swuuid = (None, None, None)
         while self.state == constants.DRIVER_RUNNING:
             LOG.debug(
                 "Waiting for VM %(vm)s to connect to " "port group %(pg)s.", {"vm": vm_mor.value, "pg": pgmor.value}
             )
             try:
                 update_set = self.session._call_method(
                     vim_util, "wait_for_updates_ex", version, collector=property_collector
                 )
             except error_util.SocketTimeoutException:
                 LOG.exception(_LE("Socket Timeout Exception."))
                 # Ignore timeout.
                 continue
             if update_set:
                 version = update_set.version
                 filterSet = update_set.filterSet
                 if not filterSet:
                     continue
                 for propFilterUpdate in filterSet:
                     objectSet = propFilterUpdate.objectSet
                     if not objectSet:
                         continue
                     for objectUpdate in objectSet:
                         if objectUpdate.kind == "leave":
                             LOG.warning(
                                 _LW(
                                     "VM %(vm)s got deleted while "
                                     "waiting for it to connect "
                                     "to port group %(pg)s."
                                 ),
                                 {"vm": vm_mor.value, "pg": pgmor.value},
                             )
                             return (pg_key, port_key, swuuid)
                         changes = common_util.convert_objectupdate_to_dict(objectUpdate)
                         devices = changes.get("config.hardware.device")
                         nicdvs = network_util.get_vnics_from_devices(devices)
                         if not nicdvs:
                             continue
                         for device in nicdvs:
                             if (
                                 hasattr(device, "backing")
                                 and hasattr(device.backing, "port")
                                 and device.backing.port
                             ):
                                 port = device.backing.port
                                 if hasattr(port, "portgroupKey"):
                                     pg_key = port.portgroupKey
                                     if pg_key == pgmor.value and hasattr(port, "portKey"):
                                         port_key = port.portKey
                                         swuuid = port.switchUuid
                                         LOG.info(
                                             _LI("VM %(vm)s connected " "to port group: " "%(pg)s."),
                                             {"vm": vm_mor.value, "pg": pgmor.value},
                                         )
                                         return (pg_key, port_key, swuuid)
     except Exception as e:
         LOG.exception(
             _LE("Exception while waiting for VM %(vm)s " "to connect to port group %(pg)s: %(err)s."),
             {"vm": vm_mor.value, "pg": pgmor.value, "err": e},
         )
         raise e
     finally:
         LOG.debug("Destroying the property collector created.")
         self.session._call_method(vim_util, "destroy_property_collector", property_collector)
Ejemplo n.º 26
0
 def _wait_for_port_update_on_vm(self, vm_mor, pgmor):
     property_collector = None
     try:
         LOG.debug("Creating new property collector.")
         property_collector = self.session._call_method(
             vim_util, "create_property_collector")
         self._register_vm_for_updates(vm_mor, property_collector)
         version = ""
         pg_key, port_key, swuuid = (None, None, None)
         while self.state == constants.DRIVER_RUNNING:
             LOG.debug(
                 "Waiting for VM %(vm)s to connect to "
                 "port group %(pg)s.", {
                     'vm': vm_mor.value,
                     'pg': pgmor.value
                 })
             try:
                 update_set = self.session._call_method(
                     vim_util,
                     "wait_for_updates_ex",
                     version,
                     collector=property_collector)
             except error_util.SocketTimeoutException:
                 LOG.exception(_LE("Socket Timeout Exception."))
                 # Ignore timeout.
                 continue
             if update_set:
                 version = update_set.version
                 filterSet = update_set.filterSet
                 if not filterSet:
                     continue
                 for propFilterUpdate in filterSet:
                     objectSet = propFilterUpdate.objectSet
                     if not objectSet:
                         continue
                     for objectUpdate in objectSet:
                         if objectUpdate.kind == "leave":
                             LOG.warning(
                                 _LW("VM %(vm)s got deleted while "
                                     "waiting for it to connect "
                                     "to port group %(pg)s."), {
                                         'vm': vm_mor.value,
                                         'pg': pgmor.value
                                     })
                             return (pg_key, port_key, swuuid)
                         changes = common_util.convert_objectupdate_to_dict(
                             objectUpdate)
                         devices = changes.get('config.hardware.device')
                         nicdvs = network_util.get_vnics_from_devices(
                             devices)
                         if not nicdvs:
                             continue
                         for device in nicdvs:
                             if (hasattr(device, "backing")
                                     and hasattr(device.backing, "port")
                                     and device.backing.port):
                                 port = device.backing.port
                                 if (hasattr(port, "portgroupKey")):
                                     pg_key = port.portgroupKey
                                     if (pg_key == pgmor.value
                                             and hasattr(port, "portKey")):
                                         port_key = port.portKey
                                         swuuid = port.switchUuid
                                         LOG.info(
                                             _LI("VM %(vm)s connected "
                                                 "to port group: "
                                                 "%(pg)s."), {
                                                     'vm': vm_mor.value,
                                                     'pg': pgmor.value
                                                 })
                                         return (pg_key, port_key, swuuid)
     except Exception as e:
         LOG.exception(
             _LE("Exception while waiting for VM %(vm)s "
                 "to connect to port group %(pg)s: %(err)s."), {
                     'vm': vm_mor.value,
                     'pg': pgmor.value,
                     'err': e
                 })
         raise e
     finally:
         LOG.debug("Destroying the property collector created.")
         self.session._call_method(vim_util, "destroy_property_collector",
                                   property_collector)
Ejemplo n.º 27
0
    def monitor_agent_state(self):
        """Thread to monitor agent state.

        Represents a thread which maintains list of active
        and inactive agents based on the heartbeat recorded.
        """
        # Do nothing until plugin is initialized.
        if not self.plugin:
            status = self.get_plugin_and_initialize()
            if not status:
                LOG.warning(_LW("Plugin not defined...returning!"))
                return
        if not self.agent_ext_support:
            LOG.warning(_LW("Agent extension is not loaded by plugin."))
            return
        try:
            self.agents = self.plugin.get_agents(
                self.context,
                filters={'agent_type': [ovsvapp_const.AGENT_TYPE_OVSVAPP]})
        except Exception:
            LOG.exception(_LE("Unable to get agent list."))
            return
        for agent in self.agents:
            agent_time_stamp = agent['heartbeat_timestamp']
            agent_id = agent['id']
            status = timeutils.is_older_than(agent_time_stamp,
                                             cfg.CONF.agent_down_time * 2)
            LOG.debug("For ovsvapp_agent %(agent)s agent_state %(state)s.",
                      {'agent': agent, 'state': status})
            try:
                agent_config = agent['configurations']
                if not status:
                    if agent_id not in self.active_agents:
                        self.active_agents.append(agent_id)
                        self.update_agent_state(agent_id, True)
                    if agent_id in self.inactive_agents:
                        LOG.info(_LI("Removing agent: %s from inactive "
                                     "agent list."), agent_id)
                        self.inactive_agents.remove(agent_id)
                        ovsvapp_db.reset_cluster_threshold(
                            agent_config['vcenter_id'],
                            agent_config['cluster_id']
                        )
                else:
                    if not agent['admin_state_up']:
                        # This agent is already handled in earlier run or by
                        # another Neutron server. Just update the cache and
                        # proceed further.
                        if agent_id not in self.inactive_agents:
                            LOG.info(_LI("Moving agent: %s from active to "
                                         "inactive."), agent_id)
                            self.inactive_agents.append(agent_id)
                        if agent_id in self.active_agents:
                            self.active_agents.remove(agent_id)
                        continue
                    if self.update_agent_state(agent_id, False):
                        # Got the ownership for mitigating this agent.
                        if agent_id in self.active_agents:
                            self.active_agents.remove(agent_id)
                        if self.check_ovsvapp_data_path(agent):
                            continue
                        cluster_status = (
                            ovsvapp_db.update_and_get_cluster_lock(
                                agent_config['vcenter_id'],
                                agent_config['cluster_id']))
                        if cluster_status == ovsvapp_db.SUCCESS:
                            # Got the cluster lock for mitigating this agent.
                            self.threadpool.spawn_n(self.process_ovsvapp_agent,
                                                    agent)
                            LOG.info(_LI("Spawned a thread for processing "
                                         "OVSvApp Agent %s."), agent['id'])
                            if agent_id not in self.inactive_agents:
                                LOG.info(_LI("Moving agent: %s from active to "
                                             "inactive."), agent_id)
                                self.inactive_agents.append(agent_id)
                        elif cluster_status == ovsvapp_db.RETRY:
                            self.update_agent_state(agent['id'], True)
                            LOG.debug("Will retry the agent %s in the next "
                                      "iteration.", agent['id'])
                        elif cluster_status == ovsvapp_db.GIVE_UP:
                            self.update_agent_state(agent['id'], True)
                            LOG.debug("Threshold already reached. Will retry "
                                      "the agent %s in the next run",
                                      agent['id'])
            except Exception:
                LOG.exception(_LE("Exception occurred in"
                                  "monitor_agent_state."))