def main():
    common_config.init(sys.argv[1:])
    common_config.setup_logging()

    try:
        interface_mappings = utils.parse_mappings(
            cfg.CONF.ESWITCH.physical_interface_mappings)
    except ValueError as e:
        LOG.error(_LE("Parsing physical_interface_mappings failed: %s. "
                      "Agent terminated!"), e)
        sys.exit(1)
    LOG.info(_LI("Interface mappings: %s"), interface_mappings)

    try:
        agent = mlnx_eswitch_neutron_agent.MlnxEswitchNeutronAgent(
            interface_mappings)
    except Exception as e:
        LOG.error(_LE("Failed on Agent initialisation : %s. "
                      "Agent terminated!"), e)
        sys.exit(1)

    # Start everything.
    LOG.info(_LI("Agent initialised successfully, now running... "))
    agent.run()
    sys.exit(0)
Example #2
0
    def _handle_dhcp_request(self, msg, pkt, lport):
        packet = ryu_packet.Packet(data=msg.data)
        in_port = msg.match.get("in_port")

        if isinstance(packet[3], str):
            dhcp_packet = dhcp.dhcp.parser(packet[3])[0]
        else:
            dhcp_packet = packet[3]

        dhcp_message_type = self._get_dhcp_message_type_opt(dhcp_packet)
        send_packet = None
        if dhcp_message_type == DHCP_DISCOVER:
            #DHCP DISCOVER
            send_packet = self._create_dhcp_offer(
                                pkt,
                                dhcp_packet,
                                lport)
            LOG.info(_LI("sending DHCP offer for port IP %(port_ip)s"
                " port id %(port_id)s")
                     % {'port_ip': lport.get_ip(), 'port_id': lport.get_id()})
        elif dhcp_message_type == DHCP_REQUEST:
            #DHCP REQUEST
            send_packet = self._create_dhcp_ack(
                                pkt,
                                dhcp_packet,
                                lport)
            LOG.info(_LI("sending DHCP ACK for port IP %(port_ip)s"
                        " port id %(tunnel_id)s")
                        % {'port_ip': lport.get_ip(),
                        'tunnel_id': lport.get_id()})
        else:
            LOG.error(_LE("DHCP message type %d not handled"),
                dhcp_message_type)
        if send_packet:
            self._send_packet(self.dp, in_port, send_packet)
    def daemon_loop(self):
        LOG.info(_LI("LinuxBridge Agent RPC Daemon Started!"))
        device_info = None
        sync = True

        while True:
            start = time.time()

            device_info = self.scan_devices(previous=device_info, sync=sync)

            if sync:
                LOG.info(_LI("Agent out of sync with plugin!"))
                sync = False

            if self._device_info_has_changes(device_info):
                LOG.debug("Agent loop found changes! %s", device_info)
                try:
                    sync = self.process_network_devices(device_info)
                except Exception:
                    LOG.exception(_LE("Error in agent loop. Devices info: %s"),
                                  device_info)
                    sync = True

            # sleep till end of polling interval
            elapsed = (time.time() - start)
            if (elapsed < self.polling_interval):
                time.sleep(self.polling_interval - elapsed)
            else:
                LOG.debug("Loop iteration exceeded interval "
                          "(%(polling_interval)s vs. %(elapsed)s)!",
                          {'polling_interval': self.polling_interval,
                           'elapsed': elapsed})
def main():
    common_config.init(sys.argv[1:])

    common_config.setup_logging()
    try:
        interface_mappings = n_utils.parse_mappings(
            cfg.CONF.LINUX_BRIDGE.physical_interface_mappings)
    except ValueError as e:
        LOG.error(_LE("Parsing physical_interface_mappings failed: %s. "
                      "Agent terminated!"), e)
        sys.exit(1)
    LOG.info(_LI("Interface mappings: %s"), interface_mappings)

    try:
        bridge_mappings = n_utils.parse_mappings(
            cfg.CONF.LINUX_BRIDGE.bridge_mappings)
    except ValueError as e:
        LOG.error(_LE("Parsing bridge_mappings failed: %s. "
                      "Agent terminated!"), e)
        sys.exit(1)
    LOG.info(_LI("Bridge mappings: %s"), bridge_mappings)

    polling_interval = cfg.CONF.AGENT.polling_interval
    quitting_rpc_timeout = cfg.CONF.AGENT.quitting_rpc_timeout
    agent = LinuxBridgeNeutronAgentRPC(bridge_mappings,
                                       interface_mappings,
                                       polling_interval,
                                       quitting_rpc_timeout)
    LOG.info(_LI("Agent initialized successfully, now running... "))
    launcher = service.launch(cfg.CONF, agent)
    launcher.wait()
Example #5
0
    def sync_state(self, networks=None):
        """Sync the local DHCP state with Neutron. If no networks are passed,
        or 'None' is one of the networks, sync all of the networks.
        """
        only_nets = set([] if (not networks or None in networks) else networks)
        LOG.info(_LI('Synchronizing state'))
        pool = eventlet.GreenPool(cfg.CONF.num_sync_threads)
        known_network_ids = set(self.cache.get_network_ids())

        try:
            active_networks = self.plugin_rpc.get_active_networks_info()
            active_network_ids = set(network.id for network in active_networks)
            for deleted_id in known_network_ids - active_network_ids:
                try:
                    self.disable_dhcp_helper(deleted_id)
                except Exception as e:
                    self.schedule_resync(e, deleted_id)
                    LOG.exception(_LE('Unable to sync network state on '
                                      'deleted network %s'), deleted_id)

            for network in active_networks:
                if (not only_nets or  # specifically resync all
                        network.id not in known_network_ids or  # missing net
                        network.id in only_nets):  # specific network to sync
                    pool.spawn(self.safe_configure_dhcp_for_network, network)
            pool.waitall()
            LOG.info(_LI('Synchronizing state complete'))

        except Exception as e:
            self.schedule_resync(e)
            LOG.exception(_LE('Unable to sync network state.'))
 def dfa_uplink_restart(self, uplink_dict):
     LOG.info(_LI("Obtained uplink after restart %s "), uplink_dict)
     # This shouldn't happen
     if self.phy_uplink is not None:
         LOG.error(_LE("Uplink detection already done %s"), self.phy_uplink)
         return
     uplink = uplink_dict.get('uplink')
     veth_intf = uplink_dict.get('veth_intf')
     # Logic is as follows:
     # If DB didn't have any uplink it means it's not yet detected or down
     # if DB has uplink and veth, then no need to scan all ports we can
     # start with this veth.
     # If uplink has been removed or modified during restart, then a
     # down will be returned by uplink detection code and it will be
     # removed then.
     # If DB has uplink, but no veth, it's an error condition and in
     # which case remove the uplink port from bridge and start fresh
     if uplink is None or len(uplink) == 0:
         LOG.info(_LI("uplink not discovered yet"))
         self.restart_uplink_called = True
         return
     if veth_intf is not None and len(veth_intf) != 0:
         LOG.info(_LI("veth interface is already added, %(ul)s %(veth)s"),
                  {'ul': uplink, 'veth': veth_intf})
         self.phy_uplink = uplink
         self.veth_intf = veth_intf
         self.restart_uplink_called = True
         return
     LOG.info(_LI("Error case removing the uplink %s from bridge"), uplink)
     ovs_vdp.delete_uplink_and_flows(self.root_helper, self.br_ex, uplink)
     self.restart_uplink_called = True
Example #7
0
    def treat_devices_removed(self, devices):
        resync = False
        for device in devices:
            LOG.info(_LI("Removing device with mac_address %s"), device)
            try:
                pci_slot = self.eswitch_mgr.get_pci_slot_by_mac(device)
                if pci_slot:
                    profile = {"pci_slot": pci_slot}
                    port = {"device": device, "profile": profile}
                    self.ext_manager.delete_port(self.context, port)
                else:
                    LOG.warning(
                        _LW("Failed to find pci slot for device " "%(device)s; skipping extension port " "cleanup"),
                        device,
                    )

                dev_details = self.plugin_rpc.update_device_down(self.context, device, self.agent_id, cfg.CONF.host)
            except Exception as e:
                LOG.debug("Removing port failed for device %(device)s " "due to %(exc)s", {"device": device, "exc": e})
                resync = True
                continue
            if dev_details["exists"]:
                LOG.info(_LI("Port %s updated."), device)
            else:
                LOG.debug("Device %s not defined on plugin", device)
        return resync
Example #8
0
def main():
    common_config.init(sys.argv[1:])

    common_config.setup_logging()
    try:
        config_parser = SriovNicAgentConfigParser()
        config_parser.parse()
        device_mappings = config_parser.device_mappings
        exclude_devices = config_parser.exclude_devices

    except ValueError:
        LOG.exception(_LE("Failed on Agent configuration parse. "
                          "Agent terminated!"))
        raise SystemExit(1)
    LOG.info(_LI("Physical Devices mappings: %s"), device_mappings)
    LOG.info(_LI("Exclude Devices: %s"), exclude_devices)

    polling_interval = cfg.CONF.AGENT.polling_interval
    try:
        agent = SriovNicSwitchAgent(device_mappings,
                                    exclude_devices,
                                    polling_interval)
    except exc.SriovNicError:
        LOG.exception(_LE("Agent Initialization Failed"))
        raise SystemExit(1)
    # Start everything.
    LOG.info(_LI("Agent initialized successfully, now running... "))
    agent.daemon_loop()
Example #9
0
def main():
    """Main method for cleaning up OVS bridges.

    The utility cleans up the integration bridges used by Neutron.
    """

    conf = setup_conf()
    conf()
    config.setup_logging()

    configuration_bridges = set([conf.ovs_integration_bridge,
                                 conf.external_network_bridge])
    ovs = ovs_lib.BaseOVS()
    ovs_bridges = set(ovs.get_bridges())
    available_configuration_bridges = configuration_bridges & ovs_bridges

    if conf.ovs_all_ports:
        bridges = ovs_bridges
    else:
        bridges = available_configuration_bridges

    # Collect existing ports created by Neutron on configuration bridges.
    # After deleting ports from OVS bridges, we cannot determine which
    # ports were created by Neutron, so port information is collected now.
    ports = collect_neutron_ports(available_configuration_bridges)

    for bridge in bridges:
        LOG.info(_LI("Cleaning bridge: %s"), bridge)
        ovs = ovs_lib.OVSBridge(bridge)
        ovs.delete_ports(all_ports=conf.ovs_all_ports)

    # Remove remaining ports created by Neutron (usually veth pair)
    delete_neutron_ports(ports)

    LOG.info(_LI("OVS cleanup completed successfully"))
Example #10
0
    def delete_network_postcommit(self, context):
        network_id = context.current['id']
        vni = context.current['provider:segmentation_id']

        # remove vxlan from all hosts - a little unpleasant
        for _switch_ip in self.switches:
            try:
                actions = [
                    VXLAN_URL.format(
                        scheme=self.scheme,
                        base=_switch_ip,
                        port=self.protocol_port,
                        vni=vni
                    ),
                    NETWORKS_URL.format(
                        scheme=self.scheme,
                        base=_switch_ip,
                        port=self.protocol_port,
                        network=network_id
                    )
                ]

                for action in actions:
                    r = requests.delete(action)

                    if r.status_code != requests.codes.ok:
                        LOG.info(
                            _LI('Error during %s delete. HTTP Error:%s'),
                            action, r.status_code
                        )

            except Exception, e:
                # errors might be normal, but should improve this
                LOG.info(_LI('Error during net delete. Error %s'), e)
 def process_uplink_event(self, msg, phy_uplink):
     LOG.info(_LI("Received New uplink Msg %(msg)s for uplink %(uplink)s"),
              {'msg': msg.get_status(), 'uplink': phy_uplink})
     if msg.get_status() == 'up':
         ovs_exc_raised = False
         try:
             self.ovs_vdp_obj_dict[phy_uplink] = ovs_vdp.OVSNeutronVdp(
                 phy_uplink, msg.get_integ_br(), msg.get_ext_br(),
                 msg.get_root_helper())
         except Exception as exc:
             LOG.error(_LE("OVS VDP Object creation failed %s"), str(exc))
             ovs_exc_raised = True
         if (ovs_exc_raised or not self.ovs_vdp_obj_dict[phy_uplink].
                 is_lldpad_setup_done()):
             # Is there a way to delete the object??
             LOG.error(_LE("UP Event Processing NOT Complete"))
             self.err_que.enqueue(constants.Q_UPL_PRIO, msg)
         else:
             self.uplink_det_compl = True
             veth_intf = (self.ovs_vdp_obj_dict[self.phy_uplink].
                          get_lldp_bridge_port())
             LOG.info(_LI("UP Event Processing Complete Saving uplink "
                          "%(ul)s and veth %(veth)s"),
                      {'ul': self.phy_uplink, 'veth': veth_intf})
             self.save_uplink(uplink=self.phy_uplink, veth_intf=veth_intf)
     elif msg.get_status() == 'down':
         # Free the object fixme(padkrish)
         if phy_uplink in self.ovs_vdp_obj_dict:
             self.ovs_vdp_obj_dict[phy_uplink].clear_obj_params()
         else:
             ovs_vdp.delete_uplink_and_flows(self.root_helper, self.br_ex,
                                             phy_uplink)
         self.save_uplink()
Example #12
0
    def _port_status_handler(self, ev):
        msg = ev.msg
        reason = msg.reason
        port_no = msg.desc.port_no
        port_name = msg.desc.name

        ofproto = msg.datapath.ofproto
        if reason == ofproto.OFPPR_ADD:
            LOG.info(_LI("port added %s"), port_no)
            lport = self.db_store.get_local_port_by_name(port_name)
            if lport:
                lport.set_external_value('ofport', port_no)
                lport.set_external_value('is_local', True)
                self.notify_add_local_port(lport)
        elif reason == ofproto.OFPPR_DELETE:
            LOG.info(_LI("port deleted %s"), port_no)
            lport = self.db_store.get_local_port_by_name(port_name)
            if lport:
                self.notify_remove_local_port(lport)
                # Leave the last correct OF port number of this port
        elif reason == ofproto.OFPPR_MODIFY:
            LOG.info(_LI("port modified %s"), port_no)
            # TODO(oanson) Add notification
        else:
            LOG.info(_LI("Illeagal port state %(port_no)s %(reason)s")
                     % {'port_no': port_no, 'reason': reason})
    def get_running_config_router_ids(self, parsed_cfg):
        rconf_ids = []
        is_multi_region_enabled = cfg.CONF.multi_region.enable_multi_region

        if (is_multi_region_enabled):
            vrf_regex_new = VRF_MULTI_REGION_REGEX_NEW
        else:
            vrf_regex_new = VRF_REGEX_NEW

        for parsed_obj in parsed_cfg.find_objects(vrf_regex_new):
            LOG.info(_LI("VRF object: %s"), (str(parsed_obj)))
            match_obj = re.match(vrf_regex_new, parsed_obj.text)
            router_id = match_obj.group(1)
            LOG.info(_LI("    First 6 digits of router ID: %s\n"),
                        (router_id))
            if (is_multi_region_enabled):
                region_id = match_obj.group(2)
                LOG.info(_LI("    region ID: %s\n"),
                            (region_id))
                my_region_id = cfg.CONF.multi_region.region_id
                if (my_region_id == region_id):
                    rconf_ids.append(router_id)
            else:
                rconf_ids.append(router_id)

        return rconf_ids
    def clean_vrfs(self, conn, router_id_dict, parsed_cfg):
        ostk_router_ids = self.get_ostk_router_ids(router_id_dict)
        rconf_ids = self.get_running_config_router_ids(parsed_cfg)

        source_set = set(ostk_router_ids)
        dest_set = set(rconf_ids)

        # add_set = source_set.difference(dest_set)
        del_set = dest_set.difference(source_set)

        LOG.info(_LI("VRF DB set: %s"), (source_set))
        LOG.info(_LI("VRFs to delete: %s"), (del_set))
        # LOG.info("VRFs to add: %s" % (add_set))

        is_multi_region_enabled = cfg.CONF.multi_region.enable_multi_region
        invalid_vrfs = []
        for router_id in del_set:
            if (is_multi_region_enabled):
                my_region_id = cfg.CONF.multi_region.region_id
                invalid_vrfs.append("nrouter-%s-%s" % (router_id,
                                                       my_region_id))
            else:
                invalid_vrfs.append("nrouter-%s" % (router_id))

        if not self.test_mode:
            for vrf_name in invalid_vrfs:
                confstr = asr_snippets.REMOVE_VRF_DEFN % vrf_name
                conn.edit_config(target='running', config=confstr)

        return invalid_vrfs
Example #15
0
def check_foreign_keys(metadata):
    # This methods checks foreign keys that tables contain in models with
    # foreign keys that are in db.
    added_fks = []
    dropped_fks = []
    bind = op.get_bind()
    insp = sqlalchemy.engine.reflection.Inspector.from_engine(bind)
    # Get all tables from db
    db_tables = insp.get_table_names()
    # Get all tables from models
    model_tables = metadata.tables
    for table in db_tables:
        if table not in model_tables:
            continue
        # Get all necessary information about key of current table from db
        fk_db = dict((_get_fk_info_db(i), i['name']) for i in
                     insp.get_foreign_keys(table))
        fk_db_set = set(fk_db.keys())
        # Get all necessary information about key of current table from models
        fk_models = dict((_get_fk_info_from_model(fk), fk) for fk in
                         model_tables[table].foreign_keys)
        fk_models_set = set(fk_models.keys())
        for key in (fk_db_set - fk_models_set):
            dropped_fks.append(('drop_key', fk_db[key], table))
            LOG.info(_LI("Detected removed foreign key %(fk)r on "
                         "table %(table)r"),
                     {'fk': fk_db[key], 'table': table})
        for key in (fk_models_set - fk_db_set):
            added_fks.append(('add_key', fk_models[key]))
            LOG.info(_LI("Detected added foreign key for column %(fk)r on "
                         "table %(table)r"),
                     {'fk': fk_models[key].column.name,
                      'table': table})
    return (added_fks, dropped_fks)
 def daemon_loop(self):
     LOG.info(_LI("eSwitch Agent Started!"))
     sync = True
     port_info = {'current': set(),
                  'added': set(),
                  'removed': set(),
                  'updated': set()}
     while True:
         start = time.time()
         try:
             port_info = self.scan_ports(previous=port_info, sync=sync)
         except exceptions.RequestTimeout:
             LOG.exception(_LE("Request timeout in agent event loop "
                               "eSwitchD is not responding - exiting..."))
             raise SystemExit(1)
         if sync:
             LOG.info(_LI("Agent out of sync with plugin!"))
             sync = False
         if self._port_info_has_changes(port_info):
             LOG.debug("Starting to process devices in:%s", port_info)
             try:
                 sync = self.process_network_ports(port_info)
             except Exception:
                 LOG.exception(_LE("Error in agent event loop"))
                 sync = True
         # sleep till end of polling interval
         elapsed = (time.time() - start)
         if (elapsed < self._polling_interval):
             time.sleep(self._polling_interval - elapsed)
         else:
             LOG.debug("Loop iteration exceeded interval "
                       "(%(polling_interval)s vs. %(elapsed)s)",
                       {'polling_interval': self._polling_interval,
                        'elapsed': elapsed})
Example #17
0
    def refresh_firewall(self, device_ids=None):
        LOG.info(_LI("Refresh firewall rules"))
        if not device_ids:
            device_ids = self.firewall.ports.keys()
            if not device_ids:
                LOG.info(_LI("No ports here to refresh firewall"))
                return
        if self.use_enhanced_rpc:
            devices_info = self.plugin_rpc.security_group_info_for_devices(
                self.context, device_ids)
            devices = devices_info['devices']
            security_groups = devices_info['security_groups']
            security_group_member_ips = devices_info['sg_member_ips']
        else:
            devices = self.plugin_rpc.security_group_rules_for_devices(
                self.context, device_ids)

        with self.firewall.defer_apply():
            for device in devices.values():
                LOG.debug("Update port filter for %s", device['device'])
                self.set_local_zone(device)
                self.firewall.update_port_filter(device)
            if self.use_enhanced_rpc:
                LOG.debug("Update security group information for ports %s",
                          devices.keys())
                self._update_security_group_info(
                    security_groups, security_group_member_ips)
Example #18
0
    def treat_devices_added_updated(self, devices):
        try:
            devices_details_list = self.plugin_rpc.get_devices_details_list(
                self.context, devices, self.agent_id)
        except Exception as e:
            LOG.debug("Unable to get port details for devices "
                      "with MAC address %(devices)s: %(e)s",
                      {'devices': devices, 'e': e})
            # resync is needed
            return True

        for device_details in devices_details_list:
            device = device_details['device']
            LOG.debug("Port with MAC address %s is added", device)

            if 'port_id' in device_details:
                LOG.info(_LI("Port %(device)s updated. Details: %(details)s"),
                         {'device': device, 'details': device_details})
                profile = device_details['profile']
                self.treat_device(device_details['device'],
                                  profile.get('pci_slot'),
                                  device_details['admin_state_up'])
            else:
                LOG.info(_LI("Device with MAC %s not defined on plugin"),
                         device)
        return False
Example #19
0
    def login(self):
        """Get session based login"""
        login_obj = {"username": self.username, "password": self.password}

        msg = "NetScaler driver login:"******"login", NITRO_LOGIN_URI,
                                                   "login", login_obj)
        LOG.info(_LI("Response: status : %(status)s %result(result)s"), {
                 "status": resp_status, "result": result['body']})
        result_body = jsonutils.loads(result['body'])

        session_id = None
        if result_body and "login" in result_body:
            logins = result_body["login"]
            if isinstance(logins, list):
                login = logins[0]
            else:
                login = logins
            if login and "sessionid" in login:
                session_id = login["sessionid"]

        if session_id:
            LOG.info(_LI("Response: %(result)s"), {"result": result['body']})
            LOG.info(
                _LI("Session_id = %(session_id)s") %
                {"session_id": session_id})
            # Update sessin_id in auth
            self.auth = "SessId=%s" % session_id
        else:
            raise NCCException(NCCException.RESPONSE_ERROR)
Example #20
0
    def _get_dp(self):
        """Get (dp, ofp, ofpp) tuple for the switch.

        A convenient method for openflow message composers.
        """
        while True:
            dpid_int = self._cached_dpid
            if dpid_int is None:
                dpid_str = self.get_datapath_id()
                LOG.info(_LI("Bridge %(br_name)s has datapath-ID %(dpid)s"),
                         {"br_name": self.br_name, "dpid": dpid_str})
                dpid_int = int(dpid_str, 16)
            try:
                dp = self._get_dp_by_dpid(dpid_int)
            except RuntimeError:
                with excutils.save_and_reraise_exception() as ctx:
                    self._cached_dpid = None
                    # Retry if dpid has been changed.
                    # NOTE(yamamoto): Open vSwitch change its dpid on
                    # some events.
                    # REVISIT(yamamoto): Consider to set dpid statically.
                    new_dpid_str = self.get_datapath_id()
                    if new_dpid_str != dpid_str:
                        LOG.info(_LI("Bridge %(br_name)s changed its "
                                     "datapath-ID from %(old)s to %(new)s"), {
                            "br_name": self.br_name,
                            "old": dpid_str,
                            "new": new_dpid_str,
                        })
                        ctx.reraise = False
            else:
                self._cached_dpid = dpid_int
                return dp, dp.ofproto, dp.ofproto_parser
    def logical_port_updated(self, lport):
        if self.db_store.get_port(lport.get_id()) is not None:
            # TODO(gsagie) support updating port
            return

        if lport.get_chassis() is None:
            return

        chassis_to_ofport, lport_to_ofport = (
            self.vswitch_api.get_local_ports_to_ofport_mapping())
        network = self.get_network_id(lport.get_lswitch_id())
        lport.set_external_value('local_network_id', network)

        if lport.get_chassis() == self.chassis_name:
            ofport = lport_to_ofport.get(lport.get_id(), 0)
            if ofport != 0:
                lport.set_external_value('ofport', ofport)
                lport.set_external_value('is_local', True)
                LOG.info(_LI("Adding new local Logical Port"))
                LOG.info(lport.__str__())
                self.dispatcher.dispatch('add_local_port', lport=lport)
                self.db_store.set_port(lport.get_id(), lport, True)
            else:
                raise RuntimeError("ofport is 0")
        else:
            ofport = chassis_to_ofport.get(lport.get_chassis(), 0)
            if ofport != 0:
                lport.set_external_value('ofport', ofport)
                lport.set_external_value('is_local', False)
                LOG.info(_LI("Adding new remote Logical Port"))
                LOG.info(lport.__str__())
                self.dispatcher.dispatch('add_remote_port', lport=lport)
                self.db_store.set_port(lport.get_id(), lport, False)
            else:
                raise RuntimeError("ofport is 0")
Example #22
0
 def get_driver(self):
     if self._driver is None:
         _driver_class = self._driver_class or cfg.CONF.QUOTAS.quota_driver
         if _driver_class == QUOTA_DB_DRIVER and QUOTA_DB_MODULE not in sys.modules:
             # If quotas table is not loaded, force config quota driver.
             _driver_class = QUOTA_CONF_DRIVER
             LOG.info(
                 _LI(
                     "ConfDriver is used as quota_driver because the "
                     "loaded plugin does not support 'quotas' table."
                 )
             )
         if isinstance(_driver_class, six.string_types):
             _driver_class = importutils.import_object(_driver_class)
         if isinstance(_driver_class, ConfDriver):
             versionutils.report_deprecated_feature(
                 LOG,
                 _LW(
                     "The quota driver neutron.quota.ConfDriver is "
                     "deprecated as of Liberty. "
                     "neutron.db.quota.driver.DbQuotaDriver should "
                     "be used in its place"
                 ),
             )
         self._driver = _driver_class
         LOG.info(_LI("Loaded quota_driver: %s."), _driver_class)
     return self._driver
Example #23
0
    def treat_devices_added_updated(self, devices_info):
        try:
            macs_list = set([device_info[0] for device_info in devices_info])
            devices_details_list = self.plugin_rpc.get_devices_details_list(
                self.context, macs_list, self.agent_id)
        except Exception as e:
            LOG.debug("Unable to get port details for devices "
                      "with MAC addresses %(devices)s: %(e)s",
                      {'devices': macs_list, 'e': e})
            # resync is needed
            return True

        for device_details in devices_details_list:
            device = device_details['device']
            LOG.debug("Port with MAC address %s is added", device)

            if 'port_id' in device_details:
                LOG.info(_LI("Port %(device)s updated. Details: %(details)s"),
                         {'device': device, 'details': device_details})
                port_id = device_details['port_id']
                self.mac_to_port_id_mapping[device] = port_id
                profile = device_details['profile']
                spoofcheck = device_details.get('port_security_enabled', True)
                self.treat_device(device,
                                  profile.get('pci_slot'),
                                  device_details['admin_state_up'],
                                  spoofcheck)
                self.ext_manager.handle_port(self.context, device_details)
            else:
                LOG.info(_LI("Device with MAC %s not defined on plugin"),
                         device)
        return False
Example #24
0
def handle_port_metadata_access(plugin, context, port, is_delete=False):
    if is_user_port(port, check_dev_id=True):
        network_id = port["network_id"]
        network = plugin.get_network(context, network_id)
        if network[external_net.EXTERNAL]:
            LOG.info(_LI("Network %s is external: nothing to do"),
                     network_id)
            return
        subnet_id = port["fixed_ips"][0]['subnet_id']
        host_data = {
            "instance_id": port["device_id"],
            "tenant_id": port["tenant_id"],
            "ip_address": port["fixed_ips"][0]['ip_address']
        }
        LOG.info(_LI("Configuring metadata entry for port %s"), port)
        if not is_delete:
            handler = plugin.lsn_manager.lsn_port_meta_host_add
        else:
            handler = plugin.lsn_manager.lsn_port_meta_host_remove
        try:
            handler(context, network_id, subnet_id, host_data)
        except p_exc.PortConfigurationError:
            with excutils.save_and_reraise_exception():
                if not is_delete:
                    db_base_plugin_v2.NeutronDbPluginV2.delete_port(
                        plugin, context, port['id'])
        LOG.info(_LI("Metadata for port %s configured successfully"),
                 port['id'])
Example #25
0
    def treat_devices_added_updated(self, devices):
        try:
            devices_details_list = self.plugin_rpc.get_devices_details_list(self.context, devices, self.agent_id)
        except Exception as e:
            LOG.debug(
                "Unable to get port details for devices " "with MAC address %(devices)s: %(e)s",
                {"devices": devices, "e": e},
            )
            # resync is needed
            return True

        for device_details in devices_details_list:
            device = device_details["device"]
            LOG.debug("Port with MAC address %s is added", device)

            if "port_id" in device_details:
                LOG.info(
                    _LI("Port %(device)s updated. Details: %(details)s"), {"device": device, "details": device_details}
                )
                profile = device_details["profile"]
                spoofcheck = device_details.get("port_security_enabled", True)
                self.treat_device(
                    device_details["device"], profile.get("pci_slot"), device_details["admin_state_up"], spoofcheck
                )
                self.ext_manager.handle_port(self.context, device_details)
            else:
                LOG.info(_LI("Device with MAC %s not defined on plugin"), device)
        return False
Example #26
0
def remove_empty_bridges():
    try:
        interface_mappings = n_utils.parse_mappings(
            cfg.CONF.LINUX_BRIDGE.physical_interface_mappings)
    except ValueError as e:
        LOG.error(_LE("Parsing physical_interface_mappings failed: %s."), e)
        sys.exit(1)
    LOG.info(_LI("Interface mappings: %s."), interface_mappings)

    try:
        bridge_mappings = n_utils.parse_mappings(
            cfg.CONF.LINUX_BRIDGE.bridge_mappings)
    except ValueError as e:
        LOG.error(_LE("Parsing bridge_mappings failed: %s."), e)
        sys.exit(1)
    LOG.info(_LI("Bridge mappings: %s."), bridge_mappings)

    lb_manager = linuxbridge_neutron_agent.LinuxBridgeManager(
        bridge_mappings, interface_mappings)

    # NOTE(mgagne) Don't remove pre-existing user-defined bridges
    bridge_names = set(lb_manager.get_all_neutron_bridges())
    bridge_names -= set(bridge_mappings.values())

    for bridge_name in bridge_names:
        if lb_manager.get_tap_devices_count(bridge_name):
            continue

        try:
            lb_manager.delete_bridge(bridge_name)
            LOG.info(_LI("Linux bridge %s deleted"), bridge_name)
        except RuntimeError:
            LOG.exception(_LE("Linux bridge %s delete failed"), bridge_name)
    LOG.info(_LI("Linux bridge cleanup completed successfully"))
Example #27
0
 def _schedule_network(self, context, network_id, dhcp_notifier):
     LOG.info(_LI("Scheduling unhosted network %s"), network_id)
     try:
         # TODO(enikanorov): have to issue redundant db query
         # to satisfy scheduling interface
         network = self.get_network(context, network_id)
         agents = self.schedule_network(context, network)
         if not agents:
             LOG.info(_LI("Failed to schedule network %s, "
                          "no eligible agents or it might be "
                          "already scheduled by another server"),
                      network_id)
             return
         if not dhcp_notifier:
             return
         for agent in agents:
             LOG.info(_LI("Adding network %(net)s to agent "
                          "%(agent)s on host %(host)s"),
                      {'net': network_id,
                       'agent': agent.id,
                       'host': agent.host})
             dhcp_notifier.network_added_to_agent(
                 context, network_id, agent.host)
     except Exception:
         # catching any exception during scheduling
         # so if _schedule_network is invoked in the loop it could
         # continue in any case
         LOG.exception(_LE("Failed to schedule network %s"), network_id)
    def treat_device(self, device, pci_slot, admin_state_up, spoofcheck=True):
        if self.eswitch_mgr.device_exists(device, pci_slot):
            try:
                self.eswitch_mgr.set_device_spoofcheck(device, pci_slot,
                                                       spoofcheck)
            except Exception:
                LOG.warning(_LW("Failed to set spoofcheck for device %s"),
                            device)
            LOG.info(_LI("Device %(device)s spoofcheck %(spoofcheck)s"),
                     {"device": device, "spoofcheck": spoofcheck})

            try:
                self.eswitch_mgr.set_device_state(device, pci_slot,
                                                  admin_state_up)
            except exc.SriovNicError:
                LOG.exception(_LE("Failed to set device %s state"), device)
                return
            if admin_state_up:
                # update plugin about port status
                self.plugin_rpc.update_device_up(self.context,
                                                 device,
                                                 self.agent_id,
                                                 cfg.CONF.host)
            else:
                self.plugin_rpc.update_device_down(self.context,
                                                   device,
                                                   self.agent_id,
                                                   cfg.CONF.host)
        else:
            LOG.info(_LI("No device with MAC %s defined on agent."), device)
Example #29
0
    def _run_openstack_l3_cmds(self, commands, server):
        """Execute/sends a CAPI (Command API) command to EOS.

        In this method, list of commands is appended with prefix and
        postfix commands - to make is understandble by EOS.

        :param commands : List of command to be executed on EOS.
        :param server: Server endpoint on the Arista switch to be configured
        """
        command_start = ['enable', 'configure']
        command_end = ['exit']
        full_command = command_start + commands + command_end

        LOG.info(_LI('Executing command on Arista EOS: %s'), full_command)

        try:
            # this returns array of return values for every command in
            # full_command list
            ret = server.runCmds(version=1, cmds=full_command)
            LOG.info(_LI('Results of execution on Arista EOS: %s'), ret)

        except Exception:
            msg = (_LE("Error occurred while trying to execute "
                     "commands %(cmd)s on EOS %(host)s"),
                   {'cmd': full_command, 'host': server})
            LOG.exception(msg)
            raise arista_exc.AristaServicePluginRpcError(msg=msg)
Example #30
0
    def _create_resource_instance(self, resource_name, plural_name):
        """Factory function for quota Resource.

        This routine returns a resource instance of the appropriate type
        according to system configuration.

        If QUOTAS.track_quota_usage is True, and there is a model mapping for
        the current resource, this function will return an instance of
        AccountedResource; otherwise an instance of CountableResource.
        """

        if (not cfg.CONF.QUOTAS.track_quota_usage or
            resource_name not in self._tracked_resource_mappings):
            LOG.info(_LI("Creating instance of CountableResource for "
                         "resource:%s"), resource_name)
            return resource.CountableResource(
                resource_name, resource._count_resource,
                'quota_%s' % resource_name)
        else:
            LOG.info(_LI("Creating instance of TrackedResource for "
                         "resource:%s"), resource_name)
            return resource.TrackedResource(
                resource_name,
                self._tracked_resource_mappings[resource_name],
                'quota_%s' % resource_name)
    def create_port_postcommit(self, context):
        """Plug a physical host into a network.

        Send provisioning request to Arista Hardware to plug a host
        into appropriate network.
        """
        port = context.current
        device_id = port['device_id']
        device_owner = port['device_owner']
        host = context.host

        # device_id and device_owner are set on VM boot
        is_vm_boot = device_id and device_owner
        if host and is_vm_boot:
            port_id = port['id']
            port_name = port['name']
            network_id = port['network_id']
            tenant_id = port['tenant_id']
            if not tenant_id:
                tenant_id = context._plugin_context.tenant_id
            with self.eos_sync_lock:
                hostname = self._host_name(host)
                vm_provisioned = db_lib.is_vm_provisioned(
                    device_id, host, port_id, network_id, tenant_id)
                # If network does not exist under this tenant,
                # it may be a shared network. Get shared network owner Id
                net_provisioned = (
                    db_lib.is_network_provisioned(tenant_id, network_id)
                    or self.ndb.get_shared_network_owner_id(network_id))
                if vm_provisioned and net_provisioned:
                    try:
                        self.rpc.plug_port_into_network(
                            device_id, hostname, port_id, network_id,
                            tenant_id, port_name, device_owner)
                    except arista_exc.AristaRpcError:
                        LOG.info(EOS_UNREACHABLE_MSG)
                        raise ml2_exc.MechanismDriverError()
                else:
                    LOG.info(
                        _LI('VM %s is not created as it is not found in '
                            'Arista DB'), device_id)
Example #32
0
    def update_port_postcommit(self, context):
        """Update the name of a given port in EOS.

        At the moment we only support port name change
        Any other change to port is not supported at this time.
        """
        port = context.current
        orig_port = context.original
        if port['name'] == orig_port['name']:
            # nothing to do
            return

        device_id = port['device_id']
        device_owner = port['device_owner']
        host = context.host
        is_vm_boot = device_id and device_owner

        if host and is_vm_boot:
            port_id = port['id']
            port_name = port['name']
            network_id = port['network_id']
            tenant_id = port['tenant_id']
            with self.eos_sync_lock:
                hostname = self._host_name(host)
                segmentation_id = db.get_segmentation_id(tenant_id, network_id)
                vm_provisioned = db.is_vm_provisioned(device_id, host, port_id,
                                                      network_id, tenant_id)
                net_provisioned = db.is_network_provisioned(
                    tenant_id, network_id, segmentation_id)
                if vm_provisioned and net_provisioned:
                    try:
                        self.rpc.plug_port_into_network(
                            device_id, hostname, port_id, network_id,
                            tenant_id, port_name, device_owner)
                    except arista_exc.AristaRpcError:
                        LOG.info(EOS_UNREACHABLE_MSG)
                        raise ml2_exc.MechanismDriverError()
                else:
                    LOG.info(
                        _LI('VM %s is not updated as it is not found in '
                            'Arista DB'), device_id)
Example #33
0
    def _load_service_plugins(self):
        """Loads service plugins.

        Starts from the core plugin and checks if it supports
        advanced services then loads classes provided in configuration.
        """
        # load services from the core plugin first
        self._load_services_from_core_plugin()

        plugin_providers = cfg.CONF.service_plugins
        LOG.debug("Loading service plugins: %s", plugin_providers)
        for provider in plugin_providers:
            if provider == '':
                continue

            LOG.info(_LI("Loading Plugin: %s"), provider)
            plugin_inst = self._get_plugin_instance('neutron.service_plugins',
                                                    provider)

            # only one implementation of svc_type allowed
            # specifying more than one plugin
            # for the same type is a fatal exception
            if plugin_inst.get_plugin_type() in self.service_plugins:
                raise ValueError(
                    _("Multiple plugins for service "
                      "%s were configured") % plugin_inst.get_plugin_type())

            self.service_plugins[plugin_inst.get_plugin_type()] = plugin_inst

            # search for possible agent notifiers declared in service plugin
            # (needed by agent management extension)
            if (hasattr(self.plugin, 'agent_notifiers')
                    and hasattr(plugin_inst, 'agent_notifiers')):
                self.plugin.agent_notifiers.update(plugin_inst.agent_notifiers)

            LOG.debug(
                "Successfully loaded %(type)s plugin. "
                "Description: %(desc)s", {
                    "type": plugin_inst.get_plugin_type(),
                    "desc": plugin_inst.get_plugin_description()
                })
Example #34
0
    def create_fip_agent_gw_port_if_not_exists(self, context, network_id,
                                               host):
        """Function to return the FIP Agent GW port.

        This function will create a FIP Agent GW port
        if required. If the port already exists, it
        will return the existing port and will not
        create a new one.
        """
        l3_agent_db = self._get_agent_by_type_and_host(context,
                                                       l3_const.AGENT_TYPE_L3,
                                                       host)
        if l3_agent_db:
            LOG.debug("Agent ID exists: %s", l3_agent_db['id'])
            f_port = self.get_agent_gw_ports_exist_for_network(
                context, network_id, host, l3_agent_db['id'])
            if not f_port:
                LOG.info(
                    _LI('Agent Gateway port does not exist,'
                        ' so create one: %s'), f_port)
                agent_port = self._core_plugin.create_port(
                    context, {
                        'port': {
                            'tenant_id': '',
                            'network_id': network_id,
                            'mac_address': attributes.ATTR_NOT_SPECIFIED,
                            'fixed_ips': attributes.ATTR_NOT_SPECIFIED,
                            'device_id': l3_agent_db['id'],
                            'device_owner': DEVICE_OWNER_AGENT_GW,
                            'admin_state_up': True,
                            'name': ''
                        }
                    })
                if agent_port:
                    self._populate_subnet_for_ports(context, [agent_port])
                    return agent_port
                msg = _("Unable to create the Agent Gateway Port")
                raise n_exc.BadRequest(resource='router', msg=msg)
            else:
                self._populate_subnet_for_ports(context, [f_port])
                return f_port
Example #35
0
 def _create_sub_interface(self, ri, port, is_external=False, gw_ip=""):
     vlan = self._get_interface_vlan_from_hosting_port(port)
     if (self._fullsync and
             int(vlan) in self._existing_cfg_dict['interfaces']):
         LOG.info(_LI("Sub-interface already exists, skipping"))
         return
     vrf_name = self._get_vrf_name(ri)
     net_mask = netaddr.IPNetwork(port['ip_cidr']).netmask
     hsrp_ip = port['fixed_ips'][0]['ip_address']
     sub_interface = self._get_interface_name_from_hosting_port(port)
     self._do_create_sub_interface(sub_interface, vlan, vrf_name, hsrp_ip,
                                   net_mask, is_external)
     # Always do HSRP
     if ri.router.get(ha.ENABLED, False):
         if port.get(ha.HA_INFO) is not None:
             self._add_ha_hsrp(ri, port)
         else:
             # We are missing HA data, candidate for retrying
             params = {'r_id': ri.router_id, 'p_id': port['id'],
                       'port': port}
             raise cfg_exc.HAParamsMissingException(**params)
    def setup_physical_interfaces(self, interface_mappings):
        '''Sets up the physical network interfaces.

        Link physical interfaces to the integration bridge.
        :param interface_mappings: map physical net names to interface names.
        '''

        for physical_network, interface in interface_mappings.iteritems():
            LOG.info(_LI("Mapping physical network %(physical_network)s to "
                         "interface %(interface)s"),
                     {'physical_network': physical_network,
                      'interface': interface})
            # Connect the physical interface to the bridge
            if not ip_lib.device_exists(interface, self.root_helper):
                LOG.error(_LE("Interface %(interface)s for physical network "
                              "%(physical_network)s does not exist. Agent "
                              "terminated!"),
                          {'physical_network': physical_network,
                           'interface': interface})
                raise SystemExit(1)
            self.int_br.add_port(interface)
Example #37
0
 def schedule_snat_router(self, context, router_id, sync_router):
     """Schedule the snat router on l3 service agent."""
     active_l3_agents = self.get_l3_agents(context, active=True)
     if not active_l3_agents:
         LOG.warn(_LW('No active L3 agents found for SNAT'))
         return
     snat_candidates = self.get_snat_candidates(sync_router,
                                                active_l3_agents)
     if not snat_candidates:
         LOG.warn(_LW('No candidates found for SNAT'))
         return
     else:
         try:
             chosen_agent = self.bind_snat_servicenode(
                 context, router_id, snat_candidates)
         except db_exc.DBDuplicateEntry:
             LOG.info(_LI("SNAT already bound to a service node."))
             return
         self.bind_dvr_router_servicenode(
             context, router_id, chosen_agent)
         return chosen_agent
 def update_network_postcommit(self, context):
     """Send updated network parameters to the VSM."""
     updated_network = context.current
     old_network = context.original
     segment = context.network_segments[0]
     network_type = segment['network_type']
     # NoOp for unsupported network types
     if not self._is_segment_valid_for_n1kv(segment['segmentation_id'],
                                            network_type):
         return
     modifiable_vals = ['name', 'shared']
     # Perform network update on VSM only if a modifiable value changed.
     if any(updated_network[val] != old_network[val]
            for val in modifiable_vals):
         try:
             self.n1kvclient.update_network_segment(updated_network)
         except (n1kv_exc.VSMError, n1kv_exc.VSMConnectionFailed):
             raise ml2_exc.MechanismDriverError()
     LOG.info(
         _LI("Update network(postcommit) succeeded for network: %s") %
         old_network['id'])
Example #39
0
 def _schedule_bind_network(self, context, agents, network_id):
     for agent in agents:
         context.session.begin(subtransactions=True)
         try:
             binding = agentschedulers_db.NetworkDhcpAgentBinding()
             binding.dhcp_agent = agent
             binding.network_id = network_id
             context.session.add(binding)
             # try to actually write the changes and catch integrity
             # DBDuplicateEntry
             context.session.commit()
         except db_exc.DBDuplicateEntry:
             # it's totally ok, someone just did our job!
             context.session.rollback()
             LOG.info(_LI('Agent %s already present'), agent)
         LOG.debug(
             'Network %(network_id)s is scheduled to be '
             'hosted by DHCP agent %(agent_id)s', {
                 'network_id': network_id,
                 'agent_id': agent
             })
 def delete_network_postcommit(self, context):
     """Send network delete request to the VSM."""
     network = context.current
     segment = context.network_segments[0]
     network_type = segment['network_type']
     # NoOp for unsupported network types
     if not self._is_segment_valid_for_n1kv(segment['segmentation_id'],
                                            network_type):
         return
     try:
         self.n1kvclient.delete_network_segment(network['id'], network_type)
     except (n1kv_exc.VSMError, n1kv_exc.VSMConnectionFailed):
         raise ml2_exc.MechanismDriverError()
     LOG.info(
         _LI("Delete network(postcommit) succeeded for network: "
             "%(network_id)s of type: %(network_type)s with segment "
             "ID: %(segment_id)s"), {
                 "network_id": network['id'],
                 "network_type": network_type,
                 "segment_id": segment['segmentation_id']
             })
Example #41
0
    def delete_port_postcommit(self, mech_context):
        """Dissociate MAC address from the portprofile."""

        LOG.debug("delete_port_postcommit: called")
        port = mech_context.current
        port_id = port['id']
        network_id = port['network_id']
        tenant_id = port['tenant_id']

        context = mech_context._plugin_context

        self._dissociate_mac_from_net(context, network_id, port['mac_address'],
                                      "delete_port_postcommit")

        LOG.info(
            _LI("delete port (postcommit): port_id=%(port_id)s"
                " network_id=%(network_id)s tenant_id=%(tenant_id)s"), {
                    'port_id': port_id,
                    'network_id': network_id,
                    'tenant_id': tenant_id
                })
Example #42
0
    def _update_metadata_proxy(self, router_id, state):
        try:
            ri = self.router_info[router_id]
        except AttributeError:
            LOG.info(
                _LI('Router %s is not managed by this agent. It was '
                    'possibly deleted concurrently.'), router_id)
            return

        if state == 'master':
            LOG.debug('Spawning metadata proxy for router %s', router_id)
            self.metadata_driver.spawn_monitored_metadata_proxy(
                self.process_monitor,
                ri.ns_name,
                self.conf.metadata_port,
                self.conf,
                router_id=ri.router_id)
        else:
            LOG.debug('Closing metadata proxy for router %s', router_id)
            self.metadata_driver.destroy_monitored_metadata_proxy(
                self.process_monitor, ri.router_id, ri.ns_name, self.conf)
Example #43
0
 def treat_device(self, device, pci_slot, admin_state_up):
     if self.eswitch_mgr.device_exists(device, pci_slot):
         try:
             self.eswitch_mgr.set_device_state(device, pci_slot,
                                               admin_state_up)
         except exc.SriovNicError:
             LOG.exception(_LE("Failed to set device %s state"), device)
             return
         if admin_state_up:
             # update plugin about port status
             self.plugin_rpc.update_device_up(self.context,
                                              device,
                                              self.agent_id,
                                              cfg.CONF.host)
         else:
             self.plugin_rpc.update_device_down(self.context,
                                                device,
                                                self.agent_id,
                                                cfg.CONF.host)
     else:
         LOG.info(_LI("No device with MAC %s defined on agent."), device)
Example #44
0
 def _call_driver_operation(self,
                            context,
                            driver_method,
                            db_entity,
                            old_db_entity=None):
     manager_method = "%s.%s" % (driver_method.__self__.__class__.__name__,
                                 driver_method.__name__)
     LOG.info(_LI("Calling driver operation %s") % manager_method)
     try:
         if old_db_entity:
             driver_method(context, old_db_entity, db_entity)
         else:
             driver_method(context, db_entity)
     # catching and reraising agent issues
     except (lbaas_agentschedulerv2.NoEligibleLbaasAgent,
             lbaas_agentschedulerv2.NoActiveLbaasAgent) as no_agent:
         raise no_agent
     except Exception:
         LOG.exception(_LE("There was an error in the driver"))
         self._handle_driver_error(context, db_entity)
         raise loadbalancerv2.DriverError()
Example #45
0
    def create_network(self, network, segment):
        name = self._get_net_name(self.dvs_name, network)
        blocked = not network['admin_state_up']

        try:
            pg_spec = self._build_pg_create_spec(name,
                                                 segment['segmentation_id'],
                                                 blocked)
            pg_create_task = self._dvs.CreateDVPortgroup_Task(spec=pg_spec)

            result = wait_for_task(pg_create_task, si=self.connection)
        except vim.fault.VimFault as e:
            raise exceptions.wrap_wmvare_vim_exception(e)
        else:
            pg = result.result
            self._port_groups_by_name[name] = pg
            LOG.info(_LI('Network %(name)s created \n%(pg_ref)s'), {
                'name': name,
                'pg_ref': pg
            })
            return pg
Example #46
0
 def update_network_postcommit(self, context):
     """Send updated network parameters to the VSM."""
     updated_network = context.current
     old_network = context.original
     segment = context.network_segments[0]
     network_type = segment['network_type']
     # NoOp for unsupported network types
     if not self._is_segment_valid_for_n1kv(segment['segmentation_id'],
                                            network_type):
         return
     # Perform network update on VSM in case of network name change only.
     if updated_network['name'] != old_network['name']:
         try:
             self.n1kvclient.update_network_segment(updated_network)
         except (n1kv_exc.VSMError, n1kv_exc.VSMConnectionFailed) as e:
             with excutils.save_and_reraise_exception(reraise=False):
                 LOG.info(e.message)
                 raise ml2_exc.MechanismDriverError()
     LOG.info(
         _LI("Update network(postcommit) succeeded for network: %s") %
         old_network['id'])
Example #47
0
    def check_vnic_type_and_vendor_info(self, vnic_type, profile):
        """Checks if this vnic_type and vendor device info are supported.

        Returns True if:
        1. the port vnic_type is direct or macvtap and
        2. the vendor_id and product_id of the port is supported by
        this MD
        Useful in determining if this MD should bind the current
        port.
        """
        # Check for vnic_type
        if vnic_type not in self.supported_sriov_vnic_types:
            LOG.info(_LI('Non SR-IOV vnic_type: %s.'), vnic_type)
            return False

        if not profile:
            LOG.debug("Port binding missing profile info")
            return False

        # Check for vendor_info
        return self._check_for_supported_vendor(profile)
    def _check_response(rpc_obj, snippet_name):
        """This function checks the rpc response object for status.

        This function takes as input the response rpc_obj and the snippet name
        that was executed. It parses it to see, if the last edit operation was
        a success or not.
            <?xml version="1.0" encoding="UTF-8"?>
            <rpc-reply message-id="urn:uuid:81bf8082-....-b69a-000c29e1b85c"
                       xmlns="urn:ietf:params:netconf:base:1.0">
                <ok />
            </rpc-reply>
        In case of error, CSR1kv sends a response as follows.
        We take the error type and tag.
            <?xml version="1.0" encoding="UTF-8"?>
            <rpc-reply message-id="urn:uuid:81bf8082-....-b69a-000c29e1b85c"
            xmlns="urn:ietf:params:netconf:base:1.0">
                <rpc-error>
                    <error-type>protocol</error-type>
                    <error-tag>operation-failed</error-tag>
                    <error-severity>error</error-severity>
                </rpc-error>
            </rpc-reply>
        :return: True if the config operation completed successfully
        :raises: neutron.plugins.cisco.cfg_agent.cfg_exceptions.
        CSR1kvConfigException
        """
        LOG.debug("RPCReply for %(snippet_name)s is %(rpc_obj)s", {
            'snippet_name': snippet_name,
            'rpc_obj': rpc_obj.xml
        })
        xml_str = rpc_obj.xml
        if "<ok />" in xml_str:
            LOG.debug("RPCReply for %s is OK", snippet_name)
            LOG.info(_LI("%s successfully executed"), snippet_name)
            return True
        # Not Ok, we throw a ConfigurationException
        e_type = rpc_obj._root[0][0].text
        e_tag = rpc_obj._root[0][1].text
        params = {'snippet': snippet_name, 'type': e_type, 'tag': e_tag}
        raise cfg_exc.CSR1kvConfigException(**params)
Example #49
0
    def _validate_subnet_cidr(self, context, network, new_subnet_cidr):
        """Validate the CIDR for a subnet.

        Verifies the specified CIDR does not overlap with the ones defined
        for the other subnets specified for this network, or with any other
        CIDR if overlapping IPs are disabled.
        """
        new_subnet_ipset = netaddr.IPSet([new_subnet_cidr])
        # Disallow subnets with prefix length 0 as they will lead to
        # dnsmasq failures (see bug 1362651).
        # This is not a discrimination against /0 subnets.
        # A /0 subnet is conceptually possible but hardly a practical
        # scenario for neutron's use cases.
        for cidr in new_subnet_ipset.iter_cidrs():
            if cidr.prefixlen == 0:
                err_msg = _("0 is not allowed as CIDR prefix length")
                raise n_exc.InvalidInput(error_message=err_msg)

        if cfg.CONF.allow_overlapping_ips:
            subnet_list = network.subnets
        else:
            subnet_list = self._get_all_subnets(context)
        for subnet in subnet_list:
            if (netaddr.IPSet([subnet.cidr]) & new_subnet_ipset):
                # don't give out details of the overlapping subnet
                err_msg = (_("Requested subnet with cidr: %(cidr)s for "
                             "network: %(network_id)s overlaps with another "
                             "subnet") % {
                                 'cidr': new_subnet_cidr,
                                 'network_id': network.id
                             })
                LOG.info(
                    _LI("Validation for CIDR: %(new_cidr)s failed - "
                        "overlaps with subnet %(subnet_id)s "
                        "(CIDR: %(cidr)s)"), {
                            'new_cidr': new_subnet_cidr,
                            'subnet_id': subnet.id,
                            'cidr': subnet.cidr
                        })
                raise n_exc.InvalidInput(error_message=err_msg)
Example #50
0
    def _set_nat_pool(self, ri, gw_port, is_delete):
        vrf_name = self._get_vrf_name(ri)
        # TODO(sridar) reverting to old model, needs more investigation
        # and cleanup
        pool_ip = gw_port['fixed_ips'][0]['ip_address']
        pool_ip_prefix_len = gw_port['fixed_ips'][0]['prefixlen']
        # pool_info = gw_port['nat_pool_info']
        # pool_ip = pool_info['pool_ip']
        pool_name = "%s_nat_pool" % (vrf_name)
        #pool_net = netaddr.IPNetwork(pool_info['pool_cidr'])
        #pool_net = netaddr.IPNetwork(gw_port['ip_cidr'])
        pool_net = "%s/%s" % (pool_ip, pool_ip_prefix_len)
        pool_net = netaddr.IPNetwork(pool_net)
        if self._fullsync and pool_ip in self._existing_cfg_dict['pools']:
            LOG.info(_LI("Pool already exists, skipping"))
            return

        #LOG.debug("SET_NAT_POOL pool netmask: %s, gw_port %s" % (
        # pool_net.netmask, gw_port))
        try:
            if is_delete:
                conf_str = asr1k_snippets.DELETE_NAT_POOL % (
                    pool_name, pool_ip, pool_ip, pool_net.netmask)
                #self._edit_running_config(conf_str, '%s DELETE_NAT_POOL' %
                #                          self.target_asr['name'])
                # TODO(update so that hosting device name is passed down)
                self._edit_running_config(conf_str, 'DELETE_NAT_POOL')

            else:
                conf_str = asr1k_snippets.CREATE_NAT_POOL % (
                    pool_name, pool_ip, pool_ip, pool_net.netmask)
                #self._edit_running_config(conf_str, '%s CREATE_NAT_POOL' %
                #                          self.target_asr['name'])
                # TODO(update so that hosting device name is passed down)
                self._edit_running_config(conf_str, 'CREATE_NAT_POOL')
        #except cfg_exc.CSR1kvConfigException as cse:
        except Exception as cse:
            LOG.error(
                _LE("Temporary disable NAT_POOL exception handling: "
                    "%s"), cse)
Example #51
0
def set_rules(policies, overwrite=True):
    """Set rules based on the provided dict of rules.

    :param policies: New policies to use. It should be an instance of dict.
    :param overwrite: Whether to overwrite current rules or update them
                          with the new rules.
    """

    LOG.debug("Loading policies from file: %s", _ENFORCER.policy_path)
    # Ensure backward compatibility with folsom/grizzly convention
    # for extension rules
    for pol in policies.keys():
        if any([pol.startswith(depr_pol) for depr_pol in
                DEPRECATED_POLICY_MAP.keys()]):
            LOG.warn(_LW("Found deprecated policy rule:%s. Please consider "
                         "upgrading your policy configuration file"), pol)
            pol_name, action = pol.rsplit(':', 1)
            try:
                new_actions = DEPRECATED_ACTION_MAP[action]
                new_policies = DEPRECATED_POLICY_MAP[pol_name]
                # bind new actions and policies together
                for actual_policy in ['_'.join(item) for item in
                                      itertools.product(new_actions,
                                                        new_policies)]:
                    if actual_policy not in policies:
                        # New policy, same rule
                        LOG.info(_LI("Inserting policy:%(new_policy)s in "
                                     "place of deprecated "
                                     "policy:%(old_policy)s"),
                                 {'new_policy': actual_policy,
                                  'old_policy': pol})
                        policies[actual_policy] = policies[pol]
                # Remove old-style policy
                del policies[pol]
            except KeyError:
                LOG.error(_LE("Backward compatibility unavailable for "
                              "deprecated policy %s. The policy will "
                              "not be enforced"), pol)
    init()
    _ENFORCER.set_rules(policies, overwrite)
Example #52
0
    def supported_qos_rule_types(self):
        if not self.ordered_mech_drivers:
            return []

        rule_types = set(qos_consts.VALID_RULE_TYPES)
        binding_driver_found = False

        # Recalculate on every call to allow drivers determine supported rule
        # types dynamically
        for driver in self.ordered_mech_drivers:
            driver_obj = driver.obj
            if driver_obj._supports_port_binding:
                binding_driver_found = True
                if hasattr(driver_obj, 'supported_qos_rule_types'):
                    new_rule_types = \
                        rule_types & set(driver_obj.supported_qos_rule_types)
                    dropped_rule_types = new_rule_types - rule_types
                    if dropped_rule_types:
                        LOG.info(
                            _LI("%(rule_types)s rule types disabled for ml2 "
                                "because %(driver)s does not support them"),
                            {'rule_types': ', '.join(dropped_rule_types),
                             'driver': driver.name})
                    rule_types = new_rule_types
                else:
                    # at least one of drivers does not support QoS, meaning
                    # there are no rule types supported by all of them
                    LOG.warn(
                        _LW("%s does not support QoS; "
                            "no rule types available"),
                        driver.name)
                    return []

        if binding_driver_found:
            rule_types = list(rule_types)
        else:
            rule_types = []
        LOG.debug("Supported QoS rule types "
                  "(common subset for all mech drivers): %s", rule_types)
        return rule_types
Example #53
0
    def acquire_connection(self, auto_login=True, headers=None, rid=-1):
        '''Check out an available HTTPConnection instance.

        Blocks until a connection is available.
        :auto_login: automatically logins before returning conn
        :headers: header to pass on to login attempt
        :param rid: request id passed in from request eventlet.
        :returns: An available HTTPConnection instance or None if no
                 api_providers are configured.
        '''
        if not self._api_providers:
            LOG.warn(_LW("[%d] no API providers currently available."), rid)
            return None
        if self._conn_pool.empty():
            LOG.debug("[%d] Waiting to acquire API client connection.", rid)
        priority, conn = self._conn_pool.get()
        now = time.time()
        if getattr(conn, 'last_used', now) < now - cfg.CONF.conn_idle_timeout:
            LOG.info(
                _LI("[%(rid)d] Connection %(conn)s idle for %(sec)0.2f "
                    "seconds; reconnecting."), {
                        'rid': rid,
                        'conn': api_client.ctrl_conn_to_str(conn),
                        'sec': now - conn.last_used
                    })
            conn = self._create_connection(*self._conn_params(conn))

        conn.last_used = now
        conn.priority = priority  # stash current priority for release
        qsize = self._conn_pool.qsize()
        LOG.debug(
            "[%(rid)d] Acquired connection %(conn)s. %(qsize)d "
            "connection(s) available.", {
                'rid': rid,
                'conn': api_client.ctrl_conn_to_str(conn),
                'qsize': qsize
            })
        if auto_login and self.auth_cookie(conn) is None:
            self._wait_for_login(conn, headers)
        return conn
Example #54
0
    def _bound_ports(self, dvs, succeeded_keys, failed_keys):
        LOG.info(
            _LI("_bound_ports({}, {})").format(succeeded_keys, failed_keys))
        port_up_ids = []
        port_down_ids = []

        now = None
        with timeutils.StopWatch() as w:
            for port_key in succeeded_keys:
                port = dvs.ports_by_key.get(port_key, None)
                if not port:
                    LOG.debug(
                        "Port with key {} has already been removed.".format(
                            port_key))
                    continue
                port_id = port["port_id"]
                self.unbound_ports.pop(port_id, None)
                if port["admin_state_up"]:
                    port_up_ids.append(port_id)
                else:
                    port_down_ids.append(port_id)

                port_desc = port.get('port_desc', None)
                if not port_desc:
                    continue
                if port_desc.connected_since:
                    now = now or timeutils.utcnow()
                    stats.timing('networking_dvs.ports.bound',
                                 now - port_desc.connected_since)
                if port_desc.firewall_end:
                    stats.timing('networking_dvs.ports.reassigned',
                                 port_desc.firewall_end)

        if failed_keys:
            stats.increment('networking_dvs.ports.bound.failures',
                            len(failed_keys))

        if port_up_ids or port_down_ids:
            self.pool.spawn(self._update_device_list, port_down_ids,
                            port_up_ids)
    def update_agent_db(self, agent_info):
        session = db.get_session()
        host = agent_info.get('host')
        with session.begin(subtransactions=True):
            try:
                # Check if entry exists.
                session.query(DfaAgentsDb).filter_by(host=host).one()

                # Entry exist, only update the heartbeat and configurations.
                session.query(DfaAgentsDb).filter_by(host=host).update(
                    {'heartbeat': agent_info.get('timestamp')})
            except orm_exc.NoResultFound:
                LOG.info(_LI('Creating new entry for agent on %(host)s.'),
                         {'host': host})
                agent = DfaAgentsDb(host=host,
                                    created=agent_info.get('timestamp'),
                                    heartbeat=agent_info.get('timestamp'),
                                    configurations=agent_info.get('config'))
                session.add(agent)
            except orm_exc.MultipleResultsFound:
                LOG.error(_LE('More than one enty found for agent %(host)s.'),
                          {'host': host})
Example #56
0
 def _fetch_nsx_data_chunk(self, sp):
     base_chunk_size = sp.chunk_size
     chunk_size = base_chunk_size + sp.extra_chunk_size
     LOG.info(_LI("Fetching up to %s resources "
                  "from NSX backend"), chunk_size)
     fetched = ls_count = lr_count = lp_count = 0
     lswitches = lrouters = lswitchports = []
     if sp.ls_cursor or sp.ls_cursor == 'start':
         (lswitches, sp.ls_cursor,
          ls_count) = self._fetch_data(self.LS_URI, sp.ls_cursor,
                                       chunk_size)
         fetched = len(lswitches)
     if fetched < chunk_size and sp.lr_cursor or sp.lr_cursor == 'start':
         (lrouters, sp.lr_cursor,
          lr_count) = self._fetch_data(self.LR_URI, sp.lr_cursor,
                                       max(chunk_size - fetched, 0))
     fetched += len(lrouters)
     if fetched < chunk_size and sp.lp_cursor or sp.lp_cursor == 'start':
         (lswitchports, sp.lp_cursor,
          lp_count) = self._fetch_data(self.LP_URI, sp.lp_cursor,
                                       max(chunk_size - fetched, 0))
     fetched += len(lswitchports)
     if sp.current_chunk == 0:
         # No cursors were provided. Then it must be possible to
         # calculate the total amount of data to fetch
         sp.total_size = ls_count + lr_count + lp_count
     LOG.debug("Total data size: %d", sp.total_size)
     sp.chunk_size = self._get_chunk_size(sp)
     # Calculate chunk size adjustment
     sp.extra_chunk_size = sp.chunk_size - base_chunk_size
     LOG.debug(
         "Fetched %(num_lswitches)d logical switches, "
         "%(num_lswitchports)d logical switch ports,"
         "%(num_lrouters)d logical routers", {
             'num_lswitches': len(lswitches),
             'num_lswitchports': len(lswitchports),
             'num_lrouters': len(lrouters)
         })
     return (lswitches, lrouters, lswitchports)
Example #57
0
    def sdnve_check_and_create_tenant(self, os_tenant_id, network_type=None):

        if not os_tenant_id:
            return
        tenant_id, tenant_type = self.sdnve_get_tenant_byid(os_tenant_id)
        if tenant_id:
            if not network_type:
                return tenant_id
            if tenant_type != network_type:
                LOG.info(
                    _LI("Non matching tenant and network types: "
                        "%(ttype)s %(ntype)s"), {
                            'ttype': tenant_type,
                            'ntype': network_type
                        })
                return
            return tenant_id

        # Have to create a new tenant
        sdnve_tenant_id = self._tenant_id_conversion(os_tenant_id)
        if not network_type:
            network_type = self.keystoneclient.get_tenant_type(os_tenant_id)
        if network_type == constants.TENANT_TYPE_OVERLAY:
            network_type = SDNVE_TENANT_TYPE_OVERLAY

        pinn_desc = ("Created by SDN-VE Neutron Plugin, OS project name = " +
                     self.keystoneclient.get_tenant_name(os_tenant_id))

        res, content = self.sdnve_create(
            'tenant', {
                'id': sdnve_tenant_id,
                'name': os_tenant_id,
                'network_type': network_type,
                'description': pinn_desc
            })
        if res not in constants.HTTP_ACCEPTABLE:
            return

        return sdnve_tenant_id
Example #58
0
    def _run(self, config):
        LOG.info(_LI("Monitor running... "))
        try:
            self.connection = _create_session(config)
            connection = self.connection
            vim = connection.vim
            builder = SpecBuilder(vim.client.factory)

            version = None
            wait_options = builder.wait_options(60, 20)

            self.property_collector = self._create_property_collector()
            self._create_property_filter(self.property_collector, config)

            while not self._quit_event.ready():
                result = connection.invoke_api(vim, 'WaitForUpdatesEx', self.property_collector,
                                               version=version, options=wait_options)
                self.iteration += 1
                if result:
                    version = result.version
                    if result.filterSet and result.filterSet[0].objectSet:
                        now = utcnow()
                        for update in result.filterSet[0].objectSet:
                            if update.obj._type == 'VirtualMachine':
                                self._handle_virtual_machine(update, now)

                for port_desc in self.changed:
                    self._put(self.queue, port_desc)
                self.changed.clear()

                now = utcnow()
                for mac, (when, port_desc, iteration) in six.iteritems(self.down_ports):
                    if port_desc.status != 'untried' or 0 == self.iteration - iteration:
                        LOG.debug("Down: {} {} for {} {} {}".format(mac, port_desc.port_key, self.iteration - iteration, (now - when).total_seconds(), port_desc.status))
        except RequestCanceledException, e:
            # If the event is set, the request was canceled in self.stop()
            if not self._quit_event.ready():
                LOG.info("Waiting for updates was cancelled unexpectedly")
                raise e # This will kill the whole process and we start again from scratch
Example #59
0
    def prepare_devices_filter(self, device_ids):
        if not device_ids:
            return
        LOG.info(_LI("Preparing filters for devices %s"), device_ids)
        if self.use_enhanced_rpc:
            devices_info = self.plugin_rpc.security_group_info_for_devices(
                self.context, list(device_ids))
            devices = devices_info['devices']
            security_groups = devices_info['security_groups']
            security_group_member_ips = devices_info['sg_member_ips']
        else:
            devices = self.plugin_rpc.security_group_rules_for_devices(
                self.context, list(device_ids))

        with self.firewall.defer_apply():
            for device in devices.values():
                self.firewall.prepare_port_filter(device)
            if self.use_enhanced_rpc:
                LOG.debug("Update security group information for ports %s",
                          devices.keys())
                self._update_security_group_info(security_groups,
                                                 security_group_member_ips)
Example #60
0
 def _validate_router_migration(self, context, router_db, router_res):
     """Allow centralized -> distributed state transition only."""
     if (router_db.extra_attributes.distributed
             and router_res.get('distributed') is False):
         LOG.info(
             _LI("Centralizing distributed router %s "
                 "is not supported"), router_db['id'])
         raise NotImplementedError()
     elif (not router_db.extra_attributes.distributed
           and router_res.get('distributed')):
         # Notify advanced services of the imminent state transition
         # for the router.
         try:
             kwargs = {'context': context, 'router': router_db}
             registry.notify(resources.ROUTER, events.BEFORE_UPDATE, self,
                             **kwargs)
         except exceptions.CallbackFailure as e:
             with excutils.save_and_reraise_exception():
                 # NOTE(armax): preserve old check's behavior
                 if len(e.errors) == 1:
                     raise e.errors[0].error
                 raise l3.RouterInUse(router_id=router_db['id'], reason=e)