def port_up_segment_mode(self, lldpad_port, port_name, port_uuid, mac, net_uuid, segmentation_id, oui): lvm = self.local_vlan_map.get(net_uuid) if lvm and lvm.late_binding_vlan: vdp_vlan = lvm.late_binding_vlan ovs_cb_data = {'obj': self, 'mac': mac, 'port_uuid': port_uuid, 'net_uuid': net_uuid} lldpad_port.send_vdp_vnic_up(port_uuid=port_uuid, vsiid=port_uuid, gid=segmentation_id, mac=mac, vlan=vdp_vlan, oui=oui, vsw_cb_fn=self.vdp_vlan_change, vsw_cb_data=ovs_cb_data) lvm.port_uuid_list[port_uuid] = port_uuid return True else: int_br = self.integ_br_obj lvid = int_br.get_port_vlan_tag(port_name) if lvid != cconstants.INVALID_VLAN: ret, vdp_vlan = self.provision_vdp_overlay_networks( port_uuid, mac, net_uuid, segmentation_id, lvid, oui) if not lvm: lvm = LocalVlan(lvid, segmentation_id) self.local_vlan_map[net_uuid] = lvm lvm.lvid = lvid lvm.port_uuid_list[port_uuid] = port_uuid if vdp_vlan != cconstants.INVALID_VLAN: lvm.late_binding_vlan = vdp_vlan else: LOG.error(_LE("Cannot provision VDP overlay")) return ret else: LOG.error(_LE("Invalid VLAN")) return False
def delete_project(self, tenant_name, part_name): """Delete project on the DCNM. :param tenant_name: name of project. :param part_name: name of partition. """ res = self._delete_partition(tenant_name, part_name) if res and res.status_code in self._resp_ok: LOG.debug("Deleted %s partition in DCNM.", part_name) else: LOG.error( _LE("Failed to delete %(part)s partition in DCNM." "Response: %(res)s"), { 'part': part_name, 'res': res }) raise dexc.DfaClientRequestFailed(reason=res) res = self._delete_org(tenant_name) if res and res.status_code in self._resp_ok: LOG.debug("Deleted %s organization in DCNM.", tenant_name) else: LOG.error( _LE("Failed to delete %(org)s organization in DCNM." "Response: %(res)s"), { 'org': tenant_name, 'res': res }) raise dexc.DfaClientRequestFailed(reason=res)
def process_vm_event(self, msg, phy_uplink): LOG.info( _LI("In processing VM Event status %(status)s for MAC " "%(mac)s UUID %(uuid)s oui %(oui)s"), {"status": msg.get_status(), "mac": msg.get_mac(), "uuid": msg.get_port_uuid(), "oui": msg.get_oui()}, ) time.sleep(10) if msg.get_status() == "up": res_fail = constants.CREATE_FAIL else: res_fail = constants.DELETE_FAIL if not self.uplink_det_compl or phy_uplink not in self.ovs_vdp_obj_dict: LOG.error(_LE("Uplink Port Event not received yet")) self.update_vm_result(msg.get_port_uuid(), res_fail) return ovs_vdp_obj = self.ovs_vdp_obj_dict[phy_uplink] ret = ovs_vdp_obj.send_vdp_port_event( msg.get_port_uuid(), msg.get_mac(), msg.get_net_uuid(), msg.get_segmentation_id(), msg.get_status(), msg.get_oui(), ) if not ret: LOG.error(_LE("Error in VDP port event, Err Queue enq")) self.update_vm_result(msg.get_port_uuid(), res_fail) else: self.update_vm_result(msg.get_port_uuid(), constants.RESULT_SUCCESS)
def port_down_segment_mode(self, lldpad_port, port_uuid, mac, net_uuid, segmentation_id, oui): lvm = self.local_vlan_map.get(net_uuid) if lvm: if port_uuid not in lvm.port_uuid_list: LOG.error(_LE("port_uuid %s not in cache for port_down"), port_uuid) return False vdp_vlan = lvm.late_binding_vlan lldpad_port.send_vdp_vnic_down(port_uuid=port_uuid, vsiid=port_uuid, gid=segmentation_id, mac=mac, vlan=vdp_vlan, oui=oui) lvm.port_uuid_list.pop(port_uuid, None) if not lvm.port_uuid_list: self.unprovision_vdp_overlay_networks(net_uuid, lvm.lvid, vdp_vlan, oui) self.local_vlan_map.pop(net_uuid) LOG.debug("Deleting flows") else: # There's no logical change of this condition being hit # So, not returning False here. LOG.error(_LE("Local VLAN Map not available in port down")) return True
def send_vdp_port_event(self, port_uuid, mac, net_uuid, segmentation_id, status, oui): '''Send vNIC UP/Down event to VDP :param port_uuid: a ovslib.VifPort object. :mac: MAC address of the VNIC :param net_uuid: the net_uuid this port is to be associated with. :param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel' :param status: Type of port event. 'up' or 'down' :oui: OUI Parameters ''' lldpad_port = self.lldpad_info if not lldpad_port: LOG.error(_LE("There is no LLDPad port available.")) return False ret = False if status == 'up': if self.vdp_mode == constants.VDP_SEGMENT_MODE: port_name = self.ext_br_obj.get_ofport_name(port_uuid) if port_name is None: LOG.error(_LE("Unknown portname for uuid %s"), port_uuid) return False LOG.info(_LI('portname for uuid %s is '), port_name) ret = self.port_up_segment_mode(lldpad_port, port_name, port_uuid, mac, net_uuid, segmentation_id, oui) else: if self.vdp_mode == constants.VDP_SEGMENT_MODE: ret = self.port_down_segment_mode(lldpad_port, port_uuid, mac, net_uuid, segmentation_id, oui) return ret
def _program_dcnm_static_route(self, tenant_id, tenant_name): """Program DCNM Static Route. """ in_ip_dict = self.get_in_ip_addr(tenant_id) in_gw = in_ip_dict.get('gateway') in_ip = in_ip_dict.get('subnet') if in_gw is None: LOG.error(_LE("No FW service GW present")) return False out_ip_dict = self.get_out_ip_addr(tenant_id) out_ip = out_ip_dict.get('subnet') # Program DCNM to update profile's static IP address on OUT part excl_list = [] excl_list.append(in_ip) excl_list.append(out_ip) subnet_lst = self.os_helper.get_subnet_nwk_excl(tenant_id, excl_list, excl_part=True) # This count is for telling DCNM to insert the static route in a # particular position. Total networks created - exclusive list as # above - the network that just got created. srvc_node_ip = self.get_out_srvc_node_ip_addr(tenant_id) ret = self.dcnm_obj.update_partition_static_route( tenant_name, fw_const.SERV_PART_NAME, subnet_lst, vrf_prof=self.cfg.firewall.fw_service_part_vrf_profile, service_node_ip=srvc_node_ip) if not ret: LOG.error( _LE("Unable to update DCNM ext profile with static " "route")) return False return True
def network_create_notif(self, tenant_id, tenant_name, cidr): """Tenant Network create Notification. Restart is not supported currently for this. fixme(padkrish). """ router_id = self.get_router_id(tenant_id, tenant_name) if not router_id: LOG.error(_LE("Rout ID not present for tenant")) return False ret = self._program_dcnm_static_route(tenant_id, tenant_name) if not ret: LOG.error( _LE("Program DCNM with static routes failed " "for router %s"), router_id) return False # Program router namespace to have this network to be routed # to IN service network in_ip_dict = self.get_in_ip_addr(tenant_id) in_gw = in_ip_dict.get('gateway') if in_gw is None: LOG.error(_LE("No FW service GW present")) return False ret = self.os_helper.program_rtr_nwk_next_hop(router_id, in_gw, cidr) if not ret: LOG.error(_LE("Unable to program default router next hop %s"), router_id) return False return True
def _delete_fw(self, tenant_id, data): """Internal routine called when a FW is deleted. """ LOG.debug("In Delete fw data is %s", data) in_sub = self.get_in_subnet_id(tenant_id) out_sub = self.get_out_subnet_id(tenant_id) arg_dict = self._create_arg_dict(tenant_id, data, in_sub, out_sub) if arg_dict.get('router_id') is None: LOG.error(_LE("Router ID unknown for tenant %s"), tenant_id) return False if tenant_id not in self.tenant_dict: self.create_tenant_dict(tenant_id, arg_dict.get('router_id')) ret = self.send_in_router_port_msg(tenant_id, arg_dict, 'down') if not ret: return False ret = self.send_out_router_port_msg(tenant_id, arg_dict, 'down') if not ret: return False # Usually sending message to queue doesn't fail!!! router_ret = self.delete_intf_router(tenant_id, arg_dict.get('tenant_name'), arg_dict.get('router_id')) if not router_ret: LOG.error(_LE("Unable to delete router for tenant %s, error case"), tenant_id) return router_ret del self.tenant_dict[tenant_id] return router_ret
def _server_network_relay(self): # Open a socket in the global namespace for DHCP try: self.ext_sock, self.ext_addr = self._open_dhcp_ext_socket() except Exception: LOG.exception( _LE("Failed to open dhcp external socket in " "global ns")) return recvbuf = bytearray(RECV_BUFFER_SIZE) # Forward DHCP responses from external to internal networks while True: try: size = self.ext_sock.recv_into(recvbuf) pkt = DhcpPacket.parse(recvbuf) vpnid = pkt.get_relay_option(151) ciaddr = pkt.get_ciaddr() if vpnid not in self.int_sockets_by_vpn: continue int_sock = self.int_sockets_by_vpn[vpnid] self.debug_stats.increment_pkts_from_server(vpnid) if ciaddr == "0.0.0.0": ciaddr = "255.255.255.255" LOG.debug('Forwarding DHCP response for vpn %s', vpnid) int_sock.sendto(recvbuf[:size], (ciaddr, DHCP_CLIENT_PORT)) self.debug_stats.increment_pkts_to_client(vpnid) except Exception: LOG.exception(_LE('Failed to forward dhcp response'))
def _server_network_relay(self): # Open a socket in the global namespace for DNS try: self.ext_sock, self.ext_addr, ext_port = ( self._open_dns_ext_socket()) except Exception: LOG.exception(_LE('Failed to open dns external ' 'socket in global ns')) return recvbuf = bytearray(RECV_BUFFER_SIZE) LOG.debug("Opened dns external server socket on addr:%s:%i", self.ext_addr, ext_port) # Forward DNS responses from external to internal networks while True: try: size = self.ext_sock.recv_into(recvbuf) pkt = DnsPacket.parse(recvbuf, size) msgid = pkt.get_msgid() LOG.debug("got dns response pkt, msgid = %i", msgid) if msgid not in self.request_info_by_msgid: LOG.debug('Could not find request by msgid %i', msgid) continue int_sock, int_addr, int_port, createtime, viewid = ( self.request_info_by_msgid[msgid]) self.debug_stats.increment_pkts_from_server(viewid) LOG.debug("forwarding response to internal namespace " "at %s:%i", int_addr, int_port) int_sock.sendto(recvbuf[:size], (int_addr, int_port)) del self.request_info_by_msgid[msgid] self.debug_stats.increment_pkts_to_client(viewid) except Exception: LOG.exception(_LE('Failed to forward dns response'))
def _get_profile_id(cls, p_type, resource, name): try: tenant_id = manager.NeutronManager.get_service_plugins()[ cisco_constants.DEVICE_MANAGER].l3_tenant_id() except AttributeError: return if tenant_id is None: return if p_type == 'net_profile': plugin = manager.NeutronManager.\ get_service_plugins().get(constants.CISCO_N1KV_NET_PROFILE) profiles = plugin.get_network_profiles( n_context.get_admin_context(), {'tenant_id': [tenant_id], 'name': [name]}, ['id']) else: plugin = manager.NeutronManager.get_service_plugins().get( constants.CISCO_N1KV) profiles = plugin.get_policy_profiles( n_context.get_admin_context(), {'tenant_id': [tenant_id], 'name': [name]}, ['id']) if len(profiles) == 1: return profiles[0]['id'] elif len(profiles) > 1: # Profile must have a unique name. LOG.error(_LE('The %(resource)s %(name)s does not have unique ' 'name. Please refer to admin guide and create one.'), {'resource': resource, 'name': name}) else: # Profile has not been created. LOG.error(_LE('There is no %(resource)s %(name)s. Please refer to ' 'admin guide and create one.'), {'resource': resource, 'name': name})
def update_vnic_template(self, host_id, vlan_id, physnet, vnic_template_path, vnic_template): """Updates VNIC Template with the vlan_id.""" ucsm_ip = self.get_ucsm_ip_for_host(host_id) vlan_name = self.make_vlan_name(vlan_id) with self.ucsm_connect_disconnect(ucsm_ip) as handle: # Create Vlan Profile if not self._create_vlanprofile(handle, vlan_id, ucsm_ip): LOG.error( _LE('UCS Manager network driver failed to create ' 'Vlan Profile for vlan %s'), vlan_id) return False try: LOG.debug('VNIC Template Path: %s', vnic_template_path) vnic_template_full_path = (vnic_template_path + const.VNIC_TEMPLATE_PREFIX + str(vnic_template)) LOG.debug('VNIC Template Path: %s for physnet %s', vnic_template_full_path, physnet) handle.StartTransaction() mo = handle.GetManagedObject( None, self.ucsmsdk.VnicLanConnTempl.ClassId(), { self.ucsmsdk.VnicLanConnTempl.DN: vnic_template_full_path }, True) if not mo: LOG.error( _LE('UCS Manager network driver could ' 'not find VNIC template %s'), vnic_template_full_path) return False vlan_dn = (vnic_template_full_path + const.VLAN_PATH_PREFIX + vlan_name) LOG.debug('VNIC Template VLAN path: %s', vlan_dn) eth_if = handle.AddManagedObject( mo, self.ucsmsdk.VnicEtherIf.ClassId(), { self.ucsmsdk.VnicEtherIf.DN: vlan_dn, self.ucsmsdk.VnicEtherIf.NAME: vlan_name, self.ucsmsdk.VnicEtherIf.DEFAULT_NET: "no" }, True) if not eth_if: LOG.error( _LE('UCS Manager network driver could ' 'not add VLAN %(vlan_name)s to VNIC ' 'template %(vnic_template_full_path)s'), { 'vlan_name': vlan_name, 'vnic_template_full_path': vnic_template_full_path }) return False handle.CompleteTransaction() return True except Exception as e: return self._handle_ucsm_exception(e, 'VNIC Template', vlan_id, ucsm_ip)
def remove_rtr_nwk_next_hop(self, rout_id, next_hop, subnet_lst, excl_list): """Remove the next hop for all networks of a tenant. """ namespace = self.find_rtr_namespace(rout_id) if namespace is None: LOG.error(_LE("Unable to find namespace for router %s"), rout_id) return False args = ['ip', 'route'] ret = self.program_rtr_return(args, rout_id, namespace=namespace) if ret is None: LOG.error(_LE("Get routes return None %s"), rout_id) return False routes = ret.split('\n') concat_lst = subnet_lst + excl_list for rout in routes: if len(rout) == 0: continue nwk = rout.split()[0] if nwk == 'default': continue nwk_no_mask = nwk.split('/')[0] if nwk_no_mask not in concat_lst and nwk not in concat_lst: args = ['route', 'del', '-net', nwk, 'gw', next_hop] ret = self.program_rtr(args, rout_id, namespace=namespace) if not ret: LOG.error(_LE("Program router returned error for %s"), rout_id) return False return True
def process_uplink_event(self, msg, phy_uplink): LOG.info(_LI("Received New uplink Msg %(msg)s for uplink %(uplink)s"), {'msg': msg.get_status(), 'uplink': phy_uplink}) if msg.get_status() == 'up': ovs_exc_raised = False try: self.ovs_vdp_obj_dict[phy_uplink] = ovs_vdp.OVSNeutronVdp( phy_uplink, msg.get_integ_br(), msg.get_ext_br(), msg.get_root_helper()) except Exception as exc: LOG.error(_LE("OVS VDP Object creation failed %s"), str(exc)) ovs_exc_raised = True if (ovs_exc_raised or not self.ovs_vdp_obj_dict[phy_uplink]. is_lldpad_setup_done()): # Is there a way to delete the object?? LOG.error(_LE("UP Event Processing NOT Complete")) self.err_que.enqueue(constants.Q_UPL_PRIO, msg) else: self.uplink_det_compl = True veth_intf = (self.ovs_vdp_obj_dict[self.phy_uplink]. get_lldp_bridge_port()) LOG.info(_LI("UP Event Processing Complete Saving uplink " "%(ul)s and veth %(veth)s"), {'ul': self.phy_uplink, 'veth': veth_intf}) self.save_uplink(uplink=self.phy_uplink, veth_intf=veth_intf) elif msg.get_status() == 'down': # Free the object fixme(padkrish) if phy_uplink in self.ovs_vdp_obj_dict: self.ovs_vdp_obj_dict[phy_uplink].clear_obj_params() else: ovs_vdp.delete_uplink_and_flows(self.root_helper, self.br_ex, phy_uplink) self.save_uplink()
def _agent_registration(self): """Register this agent with the server. This method registers the cfg agent with the neutron server so hosting devices can be assigned to it. In case the server is not ready to accept registration (it sends a False) then we retry registration for `MAX_REGISTRATION_ATTEMPTS` with a delay of `REGISTRATION_RETRY_DELAY`. If there is no server response or a failure to register after the required number of attempts, the agent stops itself. """ for attempts in range(MAX_REGISTRATION_ATTEMPTS): context = n_context.get_admin_context_without_session() self.send_agent_report(self.agent_state, context) res = self.devmgr_rpc.register_for_duty(context) if res is True: LOG.info(_LI("[Agent registration] Agent successfully " "registered")) return elif res is False: LOG.warning(_LW("[Agent registration] Neutron server said " "that device manager was not ready. Retrying " "in %0.2f seconds "), REGISTRATION_RETRY_DELAY) time.sleep(REGISTRATION_RETRY_DELAY) elif res is None: LOG.error(_LE("[Agent registration] Neutron server said that " "no device manager was found. Cannot continue. " "Exiting!")) raise SystemExit("Cfg Agent exiting") LOG.error(_LE("[Agent registration] %d unsuccessful registration " "attempts. Exiting!"), MAX_REGISTRATION_ATTEMPTS) raise SystemExit("Cfg Agent exiting")
def update_serviceprofile(self, host_id, vlan_id): """Top level method to update Service Profiles on UCS Manager. Calls all the methods responsible for the individual tasks that ultimately result in a vlan_id getting programed on a server's ethernet ports and the Fabric Interconnect's network ports. """ ucsm_ip = self.ucsm_host_dict.get(host_id) service_profile = self.ucsm_sp_dict.get(ucsm_ip, host_id) if service_profile: LOG.debug("UCS Manager network driver Service Profile : %s", service_profile) else: LOG.info( _LI('UCS Manager network driver does not support Host_id ' '%s'), str(host_id)) return False # Create Vlan Profile if not self._create_vlanprofile(vlan_id, ucsm_ip): LOG.error( _LE('UCS Manager network driver failed to create ' 'Vlan Profile for vlan %s'), str(vlan_id)) return False # Update Service Profile if not self._update_service_profile(service_profile, vlan_id, ucsm_ip): LOG.error( _LE('UCS Manager network driver failed to update ' 'Service Profile %s'), service_profile) return False return True
def get_vlan_from_reply(self, reply): '''Parse the reply from VDP daemon to get the VLAN value''' try: f_ind = reply.index("filter") except Exception: LOG.error(_LE("Incorrect Reply, no filter information found %s"), reply) return constants.INVALID_VLAN try: l_ind = reply.rindex("filter") except Exception: LOG.error(_LE("Incorrect Reply, no filter information found %s"), reply) return constants.INVALID_VLAN if f_ind != l_ind: # Currently not supported if reply contains a filter keyword LOG.error(_LE("Err: not supported currently")) return constants.INVALID_VLAN try: filter_val = reply.partition("filter")[2] len_fil = len(filter_val) vlan_val = filter_val[4:len_fil].split('-')[0] vlan = int(vlan_val) except ValueError: LOG.error(_LE("Reply not formatted correctly %s"), reply) return constants.INVALID_VLAN return vlan
def get_routertype_db_by_id_name(self, context, id_or_name): query = context.session.query(l3_models.RouterType) query = query.filter(l3_models.RouterType.id == id_or_name) try: return query.one() except exc.MultipleResultsFound: with excutils.save_and_reraise_exception(): LOG.error(_LE('Database inconsistency: Multiple router types ' 'with same id %s'), id_or_name) raise routertype.RouterTypeNotFound(router_type=id_or_name) except exc.NoResultFound: query = context.session.query(l3_models.RouterType) query = query.filter(l3_models.RouterType.name == id_or_name) try: return query.one() except exc.MultipleResultsFound: with excutils.save_and_reraise_exception(): LOG.debug('Multiple router types with name %s found. ' 'Id must be specified to allow arbitration.', id_or_name) raise routertype.MultipleRouterTypes(name=id_or_name) except exc.NoResultFound: with excutils.save_and_reraise_exception(): LOG.error(_LE('No router type with name %s found.'), id_or_name) raise routertype.RouterTypeNotFound(id=id_or_name)
def process_vm_event(self, msg, phy_uplink): LOG.info(_LI("In processing VM Event status %(status)s for MAC " "%(mac)s UUID %(uuid)s oui %(oui)s"), {'status': msg.get_status(), 'mac': msg.get_mac(), 'uuid': msg.get_port_uuid(), 'oui': msg.get_oui()}) time.sleep(10) if msg.get_status() == 'up': res_fail = constants.CREATE_FAIL else: res_fail = constants.DELETE_FAIL if (not self.uplink_det_compl or phy_uplink not in self.ovs_vdp_obj_dict): LOG.error(_LE("Uplink Port Event not received yet")) self.update_vm_result(msg.get_port_uuid(), res_fail) return ovs_vdp_obj = self.ovs_vdp_obj_dict[phy_uplink] port_event_reply = ovs_vdp_obj.send_vdp_port_event( msg.get_port_uuid(), msg.get_mac(), msg.get_net_uuid(), msg.get_segmentation_id(), msg.get_status(), msg.get_oui()) if not port_event_reply.get('result'): LOG.error(_LE("Error in VDP port event, Err Queue enq")) self.update_vm_result( msg.get_port_uuid(), res_fail, fail_reason=port_event_reply.get('fail_reason')) else: LOG.info(_LI("Success in VDP port event")) lvid, vdp_vlan = ovs_vdp_obj.get_lvid_vdp_vlan(msg.get_net_uuid(), msg.get_port_uuid()) self.update_vm_result( msg.get_port_uuid(), constants.RESULT_SUCCESS, lvid=lvid, vdp_vlan=vdp_vlan, fail_reason=port_event_reply.get('fail_reason'))
def create_portprofile(self, profile_name, vlan_id, vnic_type, host_id): """Top level method to create Port Profiles on the UCS Manager. Calls all the methods responsible for the individual tasks that ultimately result in the creation of the Port Profile on the UCS Manager. """ ucsm_ip = self.ucsm_host_dict.get(host_id) if not ucsm_ip: LOG.info(_LI('UCS Manager network driver does not support Host_id ' '%s'), str(host_id)) return False # Create Vlan Profile if not self._create_vlanprofile(vlan_id, ucsm_ip): LOG.error(_LE('UCS Manager network driver failed to create ' 'Vlan Profile for vlan %s'), str(vlan_id)) return False # Create Port Profile if not self._create_port_profile(profile_name, vlan_id, vnic_type, ucsm_ip): LOG.error(_LE('UCS Manager network driver failed to create ' 'Port Profile %s'), profile_name) return False return True
def update_serviceprofile(self, host_id, vlan_id): """Top level method to update Service Profiles on UCS Manager. Calls all the methods responsible for the individual tasks that ultimately result in a vlan_id getting programed on a server's ethernet ports and the Fabric Interconnect's network ports. """ ucsm_ip = self.ucsm_host_dict.get(host_id) service_profile = self.ucsm_sp_dict.get(ucsm_ip, host_id) if service_profile: LOG.debug("UCS Manager network driver Service Profile : %s", service_profile) else: LOG.info(_LI('UCS Manager network driver does not support Host_id ' '%s'), str(host_id)) return False # Create Vlan Profile if not self._create_vlanprofile(vlan_id, ucsm_ip): LOG.error(_LE('UCS Manager network driver failed to create ' 'Vlan Profile for vlan %s'), str(vlan_id)) return False # Update Service Profile if not self._update_service_profile(service_profile, vlan_id, ucsm_ip): LOG.error(_LE('UCS Manager network driver failed to update ' 'Service Profile %s'), service_profile) return False return True
def _validate_multicast_ip_range(self, network_profile): """ Validate multicast ip range values. :param network_profile: network profile object """ try: min_ip, max_ip = (network_profile ['multicast_ip_range'].split('-', 1)) except ValueError: msg = _LE("Invalid multicast ip address range. " "example range: 224.1.1.1-224.1.1.10") LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) for ip in [min_ip, max_ip]: try: if not netaddr.IPAddress(ip).is_multicast(): msg = _LE("%s is not a valid multicast ip address") % ip LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) if netaddr.IPAddress(ip) <= netaddr.IPAddress('224.0.0.255'): msg = _LE("%s is reserved multicast ip address") % ip LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) except netaddr.AddrFormatError: msg = _LE("%s is not a valid ip address") % ip LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) if netaddr.IPAddress(min_ip) > netaddr.IPAddress(max_ip): msg = (_LE("Invalid multicast IP range '%(min_ip)s-%(max_ip)s':" " Range should be from low address to high address") % {'min_ip': min_ip, 'max_ip': max_ip}) LOG.error(msg) raise n_exc.InvalidInput(error_message=msg)
def _delete_fw_fab_dev(self, tenant_id, drvr_name, fw_dict): """Deletes the Firewall. This routine calls the fabric class to delete the fabric when a firewall is deleted. It also calls the device manager to unconfigure the device. It updates the database with the final result. """ is_fw_virt = self.is_device_virtual() if self.fwid_attr[tenant_id].is_fw_drvr_created(): ret = self.delete_fw_device(tenant_id, fw_dict.get('fw_id'), fw_dict) if not ret: LOG.error(_LE("Error in delete_fabric_fw device for tenant " "%s"), tenant_id) return False else: self.fwid_attr[tenant_id].fw_drvr_created(False) self.update_fw_db_dev_status(fw_dict.get('fw_id'), '') ret = self.fabric.delete_fabric_fw(tenant_id, fw_dict, is_fw_virt, fw_constants.RESULT_FW_DELETE_INIT) if not ret: LOG.error(_LE("Error in delete_fabric_fw for tenant %s"), tenant_id) return False self.update_fw_db_final_result(fw_dict.get('fw_id'), ( fw_constants.RESULT_FW_DELETE_DONE)) self.delete_fw(fw_dict.get('fw_id')) return True
def _create_fw_fab_dev_te(self, tenant_id, drvr_name, fw_dict): """Prepares the Fabric and configures the device. This routine calls the fabric class to prepare the fabric when a firewall is created. It also calls the device manager to configure the device. It updates the database with the final result. """ is_fw_virt = self.is_device_virtual() ret = self.fabric.prepare_fabric_fw(tenant_id, fw_dict, is_fw_virt, fw_constants.RESULT_FW_CREATE_INIT) if not ret: LOG.error(_LE("Prepare Fabric failed")) return else: self.update_fw_db_final_result(fw_dict.get('fw_id'), ( fw_constants.RESULT_FW_CREATE_DONE)) ret = self.create_fw_device(tenant_id, fw_dict.get('fw_id'), fw_dict) if ret: self.fwid_attr[tenant_id].fw_drvr_created(True) self.update_fw_db_dev_status(fw_dict.get('fw_id'), 'SUCCESS') LOG.info(_LI("FW device create returned success for tenant %s"), tenant_id) else: LOG.error(_LE("FW device create returned failure for tenant %s"), tenant_id)
def delete_network_all_subnets(self, net_id): """Delete the openstack network including all its subnets. """ try: body = {'network_id': net_id} subnet_list = self.neutronclient.list_subnets(body=body) subnet_list = subnet_list.get('subnets') for subnet in subnet_list: if subnet.get('network_id') == net_id: subnet_id = subnet.get('id') self.neutronclient.delete_subnet(subnet_id) except Exception as exc: LOG.error( _LE("Failed to delete subnet for net %(net)s " "Exc %(exc)s"), { 'net': net_id, 'exc': str(exc) }) return False try: self.neutronclient.delete_network(net_id) except Exception as exc: LOG.error(_LE("Failed to delete network %(net)s Exc %(exc)s"), { 'net': net_id, 'exc': str(exc) }) return False return True
def _reserve_provider_segment(self, session, net_id=None, seg_id=None, source=None): if seg_id is None: alloc = self._allocate_segment(session, net_id, source) if not alloc: LOG.error( _LE("ERROR: No segment is available for net %(net)s" " source %(src)s"), { 'net': net_id, 'src': source }) return else: # TODO(padkrish) net_id not passed here alloc = self._allocate_specified_segment(session, seg_id, source) if not alloc: LOG.error( _LE("ERROR: Segmentation_id %(seg)s is in use. for " "net %(net)s source %(src)"), { 'seg': seg_id, 'net': net_id, 'src': source }) return return alloc.segmentation_id
def create_router(self, name, tenant_id, subnet_lst): """Create a openstack router and add the interfaces. """ try: body = { 'router': { 'name': name, 'tenant_id': tenant_id, 'admin_state_up': True } } router = self.neutronclient.create_router(body=body) rout_dict = router.get('router') rout_id = rout_dict.get('id') except Exception as exc: LOG.error( _LE("Failed to create router with name %(name)s" " Exc %(exc)s"), { 'name': name, 'exc': str(exc) }) return None ret = self.add_intf_router(rout_id, tenant_id, subnet_lst) if not ret: try: ret = self.neutronclient.delete_router(rout_id) except Exception as exc: LOG.error(_LE("Failed to delete router %(name)s, Exc %(exc)s"), { 'name': name, 'exc': str(exc) }) return None return rout_id
def get_vlan_from_reply1(self, reply): '''Parse the reply from VDP daemon to get the VLAN value''' try: mode_str = reply.partition("mode = ")[2].split()[0] if mode_str != "assoc": return constants.INVALID_VLAN except Exception: LOG.error(_LE("Incorrect Reply,no mode information found: %s"), reply) try: f_ind = reply.index("filter = ") except Exception: LOG.error(_LE("Incorrect Reply,no filter information found: %s"), reply) return constants.INVALID_VLAN try: l_ind = reply.rindex("filter = ") except Exception: LOG.error(_LE("Incorrect Reply,no filter information found: %s"), reply) return constants.INVALID_VLAN if f_ind != l_ind: # Currently not supported if reply contains a filter keyword LOG.error(_LE("Err: not supported currently")) return constants.INVALID_VLAN try: vlan_val = reply.partition("filter = ")[2].split('-')[0] vlan = int(vlan_val) except ValueError: LOG.error(_LE("Reply not formatted correctly:"), reply) return constants.INVALID_VLAN return vlan
def _create_fw_fab_dev_te(self, tenant_id, drvr_name, fw_dict): """Prepares the Fabric and configures the device. This routine calls the fabric class to prepare the fabric when a firewall is created. It also calls the device manager to configure the device. It updates the database with the final result. """ is_fw_virt = self.is_device_virtual() ret = self.fabric.prepare_fabric_fw(tenant_id, fw_dict, is_fw_virt, fw_constants.RESULT_FW_CREATE_INIT) if not ret: LOG.error(_LE("Prepare Fabric failed")) return else: self.update_fw_db_final_result( fw_dict.get('fw_id'), (fw_constants.RESULT_FW_CREATE_DONE)) ret = self.create_fw_device(tenant_id, fw_dict.get('fw_id'), fw_dict) if ret: self.fwid_attr[tenant_id].fw_drvr_created(True) self.update_fw_db_dev_status(fw_dict.get('fw_id'), 'SUCCESS') LOG.info(_LI("FW device create returned success for tenant %s"), tenant_id) else: LOG.error(_LE("FW device create returned failure for tenant %s"), tenant_id)
def get_routertype_db_by_id_name(self, context, id_or_name): query = context.session.query(l3_models.RouterType) query = query.filter(l3_models.RouterType.id == id_or_name) try: return query.one() except exc.MultipleResultsFound: with excutils.save_and_reraise_exception(): LOG.error( _LE('Database inconsistency: Multiple router types ' 'with same id %s'), id_or_name) raise routertype.RouterTypeNotFound(router_type=id_or_name) except exc.NoResultFound: query = context.session.query(l3_models.RouterType) query = query.filter(l3_models.RouterType.name == id_or_name) try: return query.one() except exc.MultipleResultsFound: with excutils.save_and_reraise_exception(): LOG.debug( 'Multiple router types with name %s found. ' 'Id must be specified to allow arbitration.', id_or_name) raise routertype.MultipleRouterTypes(name=id_or_name) except exc.NoResultFound: with excutils.save_and_reraise_exception(): LOG.error(_LE('No router type with name %s found.'), id_or_name) raise routertype.RouterTypeNotFound(id=id_or_name)
def _delete_fw_fab_dev(self, tenant_id, drvr_name, fw_dict): """Deletes the Firewall. This routine calls the fabric class to delete the fabric when a firewall is deleted. It also calls the device manager to unconfigure the device. It updates the database with the final result. """ is_fw_virt = self.is_device_virtual() if self.fwid_attr[tenant_id].is_fw_drvr_created(): ret = self.delete_fw_device(tenant_id, fw_dict.get('fw_id'), fw_dict) if not ret: LOG.error( _LE("Error in delete_fabric_fw device for tenant " "%s"), tenant_id) return False else: self.fwid_attr[tenant_id].fw_drvr_created(False) self.update_fw_db_dev_status(fw_dict.get('fw_id'), '') ret = self.fabric.delete_fabric_fw(tenant_id, fw_dict, is_fw_virt, fw_constants.RESULT_FW_DELETE_INIT) if not ret: LOG.error(_LE("Error in delete_fabric_fw for tenant %s"), tenant_id) return False self.update_fw_db_final_result(fw_dict.get('fw_id'), (fw_constants.RESULT_FW_DELETE_DONE)) self.delete_fw(fw_dict.get('fw_id')) return True
def get_vlan_from_reply1(self, reply): '''Parse the reply from VDP daemon to get the VLAN value''' try: mode_str = reply.partition("mode = ")[2].split()[0] if mode_str != "assoc": return constants.INVALID_VLAN except Exception: LOG.error(_LE("Incorrect Reply,no mode information found: %s"), reply) return constants.INVALID_VLAN try: f_ind = reply.index("filter = ") except Exception: LOG.error(_LE("Incorrect Reply,no filter information found: %s"), reply) return constants.INVALID_VLAN try: l_ind = reply.rindex("filter = ") except Exception: LOG.error(_LE("Incorrect Reply,no filter information found: %s"), reply) return constants.INVALID_VLAN if f_ind != l_ind: # Currently not supported if reply contains a filter keyword LOG.error(_LE("Err: not supported currently")) return constants.INVALID_VLAN try: vlan_val = reply.partition("filter = ")[2].split('-')[0] vlan = int(vlan_val) except ValueError: LOG.error(_LE("Reply not formatted correctly:"), reply) return constants.INVALID_VLAN return vlan
def _get_instances_for_project(self, project_id): """Return all instances for a given project. :project_id: UUID of project (tenant) """ search_opts = { 'marker': None, 'all_tenants': True, 'project_id': project_id } self._create_token() try: servers = self._novaclnt.servers.list(True, search_opts) LOG.debug('_get_instances_for_project: servers=%s', servers) return servers except nexc.Unauthorized: emsg = (_LE('Failed to get novaclient:Unauthorised ' 'project_id=%(proj)s user=%(user)s'), { 'proj': self._project_id, 'user': self._user_name }) LOG.exception(emsg) raise nexc.ClientException(emsg) except nexc.AuthorizationFailure as err: emsg = (_LE("Failed to get novaclient %s")) LOG.exception(emsg, err) raise nexc.ClientException(emsg % err)
def get_vlan_from_associate_reply(self, reply, vsiid, mac): """Parse the associate reply from VDP daemon to get the VLAN value. """ try: verify_flag, fail_reason = self.crosscheck_reply_vsiid_mac( reply, vsiid, mac) if not verify_flag: return constants.INVALID_VLAN, fail_reason mode_str = reply.partition("mode = ")[2].split()[0] if mode_str != "assoc": fail_reason = self.get_vdp_failure_reason(reply) return constants.INVALID_VLAN, fail_reason except Exception: fail_reason = vdp_const.mode_failure_reason % (reply) LOG.error(_LE("%s"), fail_reason) return constants.INVALID_VLAN, fail_reason check_filter, fail_reason = self.check_filter_validity( reply, "filter = ") if not check_filter: return constants.INVALID_VLAN, fail_reason try: vlan_val = reply.partition("filter = ")[2].split('-')[0] vlan = int(vlan_val) except ValueError: fail_reason = vdp_const.format_failure_reason % (reply) LOG.error(_LE("%s"), fail_reason) return constants.INVALID_VLAN, fail_reason return vlan, None
def _server_network_relay(self): # Open a socket in the global namespace for DHCP try: self.ext_sock, self.ext_addr = self._open_dhcp_ext_socket() except Exception: LOG.exception(_LE("Failed to open dhcp external socket in " "global ns")) return recvbuf = bytearray(RECV_BUFFER_SIZE) # Forward DHCP responses from external to internal networks while True: try: size = self.ext_sock.recv_into(recvbuf) pkt = DhcpPacket.parse(recvbuf) vpnid = pkt.get_relay_option(151) ciaddr = pkt.get_ciaddr() if vpnid not in self.int_sockets_by_vpn: continue int_sock = self.int_sockets_by_vpn[vpnid] self.debug_stats.increment_pkts_from_server(vpnid) if ciaddr == "0.0.0.0": ciaddr = "255.255.255.255" LOG.debug('Forwarding DHCP response for vpn %s', vpnid) int_sock.sendto(recvbuf[:size], (ciaddr, DHCP_CLIENT_PORT)) self.debug_stats.increment_pkts_to_client(vpnid) except Exception: LOG.exception(_LE('Failed to forward dhcp response'))
def create_fw(self, tenant_id, data): LOG.info(_LI("In creating phy ASA FW data is %s"), data) tenant_name = data.get('tenant_name') in_ip_dict = self.get_in_ip_addr(tenant_id) in_gw = in_ip_dict.get('gateway') in_sec_gw = in_ip_dict.get('sec_gateway') in_serv_node = self.get_in_srvc_node_ip_addr(tenant_id) out_ip_dict = self.get_out_ip_addr(tenant_id) out_ip_gw = out_ip_dict.get('gateway') out_sec_gw = out_ip_dict.get('sec_gateway') out_serv_node = self.get_out_srvc_node_ip_addr(tenant_id) in_seg, in_vlan = self.get_in_seg_vlan(tenant_id) out_seg, out_vlan = self.get_out_seg_vlan(tenant_id) kw = {'params': {'tenant_name': tenant_name, 'in_vlan': in_vlan, 'out_vlan': out_vlan, 'in_ip': in_serv_node, 'in_mask': '255.255.255.0', 'in_gw': in_gw, 'in_sec_gw': in_sec_gw, 'out_ip': out_serv_node, 'out_mask': '255.255.255.0', 'out_gw': out_ip_gw, 'out_sec_gw': out_sec_gw, 'intf_in': self.interface_in, 'intf_out': self.interface_out}} status = self.asa5585.setup(**kw) if status is False: LOG.error(_LE("Physical FW instance creation failure for " "tenant %s"), tenant_name) return False status = self.asa5585.apply_policy(data) if status is False: LOG.error(_LE("Applying FW policy failure for tenant %s"), tenant_name) return status
def mgmt_sec_grp_id(cls): """Returns id of security group used by the management network.""" if not utils.is_extension_supported( manager.NeutronManager.get_plugin(), "security-group"): return if cls._mgmt_sec_grp_id is None: # Get the id for the _mgmt_security_group_id tenant_id = cls.l3_tenant_id() res = manager.NeutronManager.get_plugin().get_security_groups( neutron_context.get_admin_context(), {'tenant_id': [tenant_id], 'name': [cfg.CONF.general.default_security_group]}, ['id']) if len(res) == 1: sec_grp_id = res[0].get('id', None) cls._mgmt_sec_grp_id = sec_grp_id elif len(res) > 1: # the mgmt sec group must be unique. LOG.error(_LE('The security group for the management network ' 'does not have unique name. Please ensure that ' 'it is.')) else: # CSR Mgmt security group is not present. LOG.error(_LE('There is no security group for the management ' 'network. Please create one.')) return cls._mgmt_sec_grp_id
def _get_profile_id(cls, p_type, resource, name): try: tenant_id = bc.get_plugin( cisco_constants.DEVICE_MANAGER).l3_tenant_id() except AttributeError: return if tenant_id is None: return if p_type == 'net_profile': plugin = bc.get_plugin(constants.CISCO_N1KV_NET_PROFILE) profiles = plugin.get_network_profiles( bc.context.get_admin_context(), {'tenant_id': [tenant_id], 'name': [name]}, ['id']) else: plugin = bc.get_plugin(constants.CISCO_N1KV) profiles = plugin.get_policy_profiles( bc.context.get_admin_context(), {'tenant_id': [tenant_id], 'name': [name]}, ['id']) if len(profiles) == 1: return profiles[0]['id'] elif len(profiles) > 1: # Profile must have a unique name. LOG.error(_LE('The %(resource)s %(name)s does not have unique ' 'name. Please refer to admin guide and create one.'), {'resource': resource, 'name': name}) else: # Profile has not been created. LOG.error(_LE('There is no %(resource)s %(name)s. Please refer to ' 'admin guide and create one.'), {'resource': resource, 'name': name})
def create_project(self, org_name, part_name, dci_id, desc=None): """Create project on the DCNM. :param org_name: name of organization. :param part_name: name of partition. :param dci_id: Data Center interconnect id. :param desc: description of project. """ desc = desc or org_name res = self._create_org(org_name, desc) if res and res.status_code in self._resp_ok: LOG.debug("Created %s organization in DCNM.", org_name) else: LOG.error(_LE("Failed to create %(org)s organization in DCNM." "Response: %(res)s"), {'org': org_name, 'res': res}) raise dexc.DfaClientRequestFailed(reason=res) res = self._create_or_update_partition(org_name, part_name, dci_id, desc) if res and res.status_code in self._resp_ok: LOG.debug("Created %s partition in DCNM.", part_name) else: LOG.error(_LE("Failed to create %(part)s partition in DCNM." "Response: %(res)s"), {'part': part_name, 'res': res}) raise dexc.DfaClientRequestFailed(reason=res)
def mgmt_nw_id(cls): """Returns id of the management network.""" if cls._mgmt_nw_uuid is None: tenant_id = cls.l3_tenant_id() if not tenant_id: return net = manager.NeutronManager.get_plugin().get_networks( neutron_context.get_admin_context(), {'tenant_id': [tenant_id], 'name': [cfg.CONF.general.management_network]}, ['id', 'subnets']) if len(net) == 1: num_subnets = len(net[0]['subnets']) if num_subnets == 0: LOG.error(_LE('The management network has no subnet. ' 'Please assign one.')) return elif num_subnets > 1: LOG.info(_LI('The management network has %d subnets. The ' 'first one will be used.'), num_subnets) cls._mgmt_nw_uuid = net[0].get('id') cls._mgmt_subnet_uuid = net[0]['subnets'][0] elif len(net) > 1: # Management network must have a unique name. LOG.error(_LE('The management network for does not have ' 'unique name. Please ensure that it is.')) else: # Management network has not been created. LOG.error(_LE('There is no virtual management network. Please ' 'create one.')) return cls._mgmt_nw_uuid
def create_portprofile(self, profile_name, vlan_id, vnic_type, host_id): """Top level method to create Port Profiles on the UCS Manager. Calls all the methods responsible for the individual tasks that ultimately result in the creation of the Port Profile on the UCS Manager. """ ucsm_ip = self.get_ucsm_ip_for_host(host_id) if not ucsm_ip: LOG.info( _LI('UCS Manager network driver does not support Host_id ' '%s'), str(host_id)) return False with self.ucsm_connect_disconnect(ucsm_ip) as handle: # Create Vlan Profile if not self._create_vlanprofile(handle, vlan_id, ucsm_ip): LOG.error( _LE('UCS Manager network driver failed to create ' 'Vlan Profile for vlan %s'), str(vlan_id)) return False # Create Port Profile if not self._create_port_profile(handle, profile_name, vlan_id, vnic_type, ucsm_ip): LOG.error( _LE('UCS Manager network driver failed to create ' 'Port Profile %s'), profile_name) return False return True
def process_vm_event(self, msg, phy_uplink): LOG.info( _LI("In processing VM Event status %(status)s for MAC " "%(mac)s UUID %(uuid)s oui %(oui)s"), { 'status': msg.get_status(), 'mac': msg.get_mac(), 'uuid': msg.get_port_uuid(), 'oui': msg.get_oui() }) time.sleep(10) if (not self.uplink_det_compl or phy_uplink not in self.ovs_vdp_obj_dict): LOG.error(_LE("Uplink Port Event not received yet")) self.update_vm_result(msg.get_port_uuid(), constants.CREATE_FAIL) return ovs_vdp_obj = self.ovs_vdp_obj_dict[phy_uplink] ret = ovs_vdp_obj.send_vdp_port_event(msg.get_port_uuid(), msg.get_mac(), msg.get_net_uuid(), msg.get_segmentation_id(), msg.get_status(), msg.get_oui()) if not ret: LOG.error(_LE("Error in VDP port event, Err Queue enq")) self.update_vm_result(msg.get_port_uuid(), constants.CREATE_FAIL) else: self.update_vm_result(msg.get_port_uuid(), constants.RESULT_SUCCESS)
def check_hints(self, reply): """Parse the hints to check for errors. """ try: f_ind = reply.index("hints") l_ind = reply.rindex("hints") except Exception: fail_reason = vdp_const.hints_failure_reason % (reply) LOG.error(_LE("%s"), fail_reason) return False, fail_reason if f_ind != l_ind: # Currently not supported if reply contains a filter keyword fail_reason = vdp_const.multiple_hints_failure_reason % (reply) LOG.error(_LE("%s"), fail_reason) return False, fail_reason try: hints_compl = reply.partition("hints")[2] hints_val = reply.partition("hints")[2][0:4] len_hints = int(hints_val) hints_val = hints_compl[4:4 + len_hints] hints = int(hints_val) if hints != 0: fail_reason = vdp_const.nonzero_hints_failure % (hints) return False, fail_reason except ValueError: fail_reason = vdp_const.format_failure_reason % (reply) LOG.error(_LE("%s"), fail_reason) return False, fail_reason return True, None
def _agent_registration(self): """Register this agent with the server. This method registers the cfg agent with the neutron server so hosting devices can be assigned to it. In case the server is not ready to accept registration (it sends a False) then we retry registration for `MAX_REGISTRATION_ATTEMPTS` with a delay of `REGISTRATION_RETRY_DELAY`. If there is no server response or a failure to register after the required number of attempts, the agent stops itself. """ for attempts in range(MAX_REGISTRATION_ATTEMPTS): context = n_context.get_admin_context_without_session() self.send_agent_report(self.agent_state, context) res = self.devmgr_rpc.register_for_duty(context) if res is True: LOG.info(_LI("[Agent registration] Agent successfully " "registered")) return elif res is False: LOG.warning(_LW("[Agent registration] Neutron server said " "that device manager was not ready. Retrying " "in %0.2f seconds "), REGISTRATION_RETRY_DELAY) time.sleep(REGISTRATION_RETRY_DELAY) elif res is None: LOG.error(_LE("[Agent registration] Neutron server said that " "no device manager was found. Cannot continue. " "Exiting!")) raise SystemExit(_("Cfg Agent exiting")) LOG.error(_LE("[Agent registration] %d unsuccessful registration " "attempts. Exiting!"), MAX_REGISTRATION_ATTEMPTS) raise SystemExit(_("Cfg Agent exiting"))
def send_vdp_msg(self, mode, mgrid, typeid, typeid_ver, vsiid_frmt, vsiid, filter_frmt, gid, mac, vlan, oui_id, oui_data, sw_resp): '''Constructs and Sends the VDP Message Please refer http://www.ieee802.org/1/pages/802.1bg.html VDP Section for more detailed information :param mode: Associate or De-associate :param mgrid: MGR ID :param typeid: Type ID :param typeid_ver: Version of the Type ID :param vsiid_frmt: Format of the following VSI argument :param vsiid: VSI value :param filter_frmt: Filter Format :param gid: Group ID the vNIC belongs to :param mac: MAC Address of the vNIC :param vlan: VLAN of the vNIC :param oui_id: OUI Type :param oui_data: OUI Data :param sw_resp: Flag indicating if response is required from the daemon :return reply: Reply from vdptool ''' if not self.is_ncb: LOG.error(_LE("EVB cannot be set on NB")) return vdp_key_str = self.construct_vdp_dict(mode, mgrid, typeid, typeid_ver, vsiid_frmt, vsiid, filter_frmt, gid, mac, vlan, oui_id, oui_data) if len(vdp_key_str) == 0: LOG.error(_LE("NULL List")) return oui_cmd_str = self.gen_oui_str(vdp_key_str['oui_list']) if sw_resp: # If filter is not VID and if VLAN is 0, Query for the TLV first, # if found VDP will return the VLAN. Add support for this once # vdptool has the support for querying exact VSI filters # fixme(padkrish) reply = self.run_vdptool(["-T", "-i", self.port_name, "-W", "-V", mode, "-c", vdp_key_str['mode'], "-c", vdp_key_str['mgrid'], "-c", vdp_key_str['typeid'], "-c", vdp_key_str['typeid_ver'], "-c", vdp_key_str['vsiid'], "-c", "hints=none", "-c", vdp_key_str['filter']], oui_args=oui_cmd_str) else: reply = self.run_vdptool(["-T", "-i", self.port_name, "-V", mode, "-c", vdp_key_str['mode'], "-c", vdp_key_str['mgrid'], "-c", vdp_key_str['typeid'], "-c", vdp_key_str['typeid_ver'], "-c", vdp_key_str['vsiid'], "-c", "hints=none", "-c", vdp_key_str['filter']], oui_args=oui_cmd_str) return reply
def _test_remove_routertypes(self, delete_routers=True): if delete_routers: auto_deleted_router_ids = set() routers = self._list('routers')['routers'] for r in routers: # Exclude any redundancy routers as they are removed # automatically when removing the user visible router for rr_info in r.get( ha.DETAILS, {ha.REDUNDANCY_ROUTERS: []})[ha.REDUNDANCY_ROUTERS]: auto_deleted_router_ids.add(rr_info['id']) for r in routers: if r['id'] in auto_deleted_router_ids: continue # Remove any static routes if r.get('routes'): self._update('routers', r['id'], {'router': {'routes': None}}) # Remove any floatingips using the router for fip in self._list( 'floatingips', query_params='router_id=%s' % r['id'])['floatingips']: try: self._delete('floatingips', fip['id']) except n_exc.NeutronException as e: # since this is a cleanup after a UT execution we # just log it and then ignore it. The subsequent router # delete should anyway capture more serious errors. LOG.error(_LE('Encountered error during router clean ' 'up: '), e) # Remove any router interfaces for p in self._list( 'ports', query_params='device_id=%s&device_owner=%s' % ( r['id'], n_const.DEVICE_OWNER_ROUTER_INTF))['ports']: # get_ports can be mocked in some tests so we need to # ensure we get a port that is indeed a router port. try: if (p.get('device_owner') == n_const.DEVICE_OWNER_ROUTER_INTF and 'fixed_ips' in p and 'id' in p): req = self.new_action_request( 'routers', {'port_id': p['id']}, r['id'], "remove_router_interface") req.get_response(self.ext_api) except n_exc.NeutronException as e: # since this is a cleanup after a UT execution we # just log it and then ignore it. The subsequent router # delete should anyway capture more serious errors LOG.error(_LE('Encountered error during router clean ' 'up: '), e) # Remove the router, we don't capture any exceptions here as # that may hide real bugs self._delete('routers', r['id']) for rt in self._list('routertypes')['routertypes']: # we don't capture any exceptions here as that may hide real bugs self._delete('routertypes', rt['id'])
def _vdp_refrsh_hndlr(self): '''Periodic refresh of vNIC events to VDP VDP daemon itself has keepalives. This is needed on top of it to keep Orchestrator like OpenStack, VDP daemon and the physical switch in sync. ''' LOG.debug("Refresh handler") try: if not self.vdp_vif_map: LOG.debug("vdp_vif_map not created, returning") return vdp_vif_map = dict.copy(self.vdp_vif_map) oui_vif_map = dict.copy(self.oui_vif_map) for key in vdp_vif_map.viewkeys(): lvdp_dict = vdp_vif_map.get(key) loui_dict = oui_vif_map.get(key) if not lvdp_dict: return if not loui_dict: oui_id = "" oui_data = "" else: oui_id = loui_dict.get('oui_id') oui_data = loui_dict.get('oui_data') with self.mutex_lock: if key in self.vdp_vif_map: LOG.debug("Sending Refresh for VSI %s", lvdp_dict) vdp_vlan = self.send_vdp_assoc( vsiid=lvdp_dict.get('vsiid'), mgrid=lvdp_dict.get('mgrid'), typeid=lvdp_dict.get('typeid'), typeid_ver=lvdp_dict.get('typeid_ver'), vsiid_frmt=lvdp_dict.get('vsiid_frmt'), filter_frmt=lvdp_dict.get('filter_frmt'), gid=lvdp_dict.get('gid'), mac=lvdp_dict.get('mac'), vlan=0, oui_id=oui_id, oui_data=oui_data, sw_resp=True) # check validity. if not utils.is_valid_vlan_tag(vdp_vlan): LOG.error(_LE("Returned vlan %(vlan)s is invalid."), {'vlan': vdp_vlan}) # Need to invoke CB. So no return here. vdp_vlan = 0 exist_vdp_vlan = lvdp_dict.get('vdp_vlan') # Condition will be hit only during error cases when switch # reloads or when compute reloads if vdp_vlan != exist_vdp_vlan: # Invoke the CB Function cb_fn = lvdp_dict.get('vsw_cb_fn') cb_data = lvdp_dict.get('vsw_cb_data') if cb_fn: cb_fn(cb_data, vdp_vlan) lvdp_dict['vdp_vlan'] = vdp_vlan except Exception as e: LOG.error(_LE("Exception in Refrsh %s"), str(e))
def _remove_vlan_from_vnic_templates(self, handle, vlan_id, ucsm_ip): """Removes VLAN from all VNIC templates that have it enabled.""" vnic_template_info = self.ucsm_conf.get_vnic_templates_for_ucsm_ip( ucsm_ip) vlan_name = self.make_vlan_name(vlan_id) if not vnic_template_info: # Nothing to do return try: handle.StartTransaction() for temp_info in vnic_template_info: vnic_template_list = temp_info[1].split(',') vnic_template_path = temp_info[0] for vnic_template in vnic_template_list: vnic_template_full_path = (vnic_template_path + const.VNIC_TEMPLATE_PREFIX + str(vnic_template)) LOG.debug('vnic_template_full_path: %s', vnic_template_full_path) mo = handle.GetManagedObject( None, self.ucsmsdk.VnicLanConnTempl.ClassId(), {self.ucsmsdk.VnicLanConnTempl.DN: ( vnic_template_full_path)}, True) if not mo: LOG.error(_LE('UCS Manager network driver could ' 'not find VNIC template %s at'), vnic_template_full_path) continue vlan_dn = (vnic_template_full_path + const.VLAN_PATH_PREFIX + vlan_name) LOG.debug('VNIC Template VLAN path; %s', vlan_dn) eth_if = handle.GetManagedObject(mo, self.ucsmsdk.VnicEtherIf.ClassId(), {self.ucsmsdk.VnicEtherIf.DN: vlan_dn}) if not eth_if: LOG.error(_LE('UCS Manager network driver could not ' 'delete VLAN %(vlan_name)s from VNIC ' 'template %(vnic_template_full_path)s'), {'vlan_name': vlan_name, 'vnic_template_full_path': vnic_template_full_path}) if eth_if: handle.RemoveManagedObject(eth_if) handle.CompleteTransaction() return True except Exception as e: return self._handle_ucsm_exception(e, 'VNIC Template', vlan_id, ucsm_ip)
def _get_tenant_id_using_keystone_v3(cls): keystone = client.Client(session=cls._keystone_auth_session()) try: tenant = keystone.projects.find( name=cfg.CONF.general.l3_admin_tenant) except k_exceptions.NotFound: LOG.error(_LE('No tenant with a name or ID of %s exists.'), cfg.CONF.general.l3_admin_tenant) except k_exceptions.NoUniqueMatch: LOG.error(_LE('Multiple tenants matches found for %s'), cfg.CONF.general.l3_admin_tenant) return tenant.id
def update_vnic_template(self, host_id, vlan_id, physnet, vnic_template_path, vnic_template): """Updates VNIC Template with the vlan_id.""" ucsm_ip = self.get_ucsm_ip_for_host(host_id) vlan_name = self.make_vlan_name(vlan_id) with self.ucsm_connect_disconnect(ucsm_ip) as handle: # Create Vlan Profile if not self._create_vlanprofile(handle, vlan_id, ucsm_ip): LOG.error(_LE('UCS Manager network driver failed to create ' 'Vlan Profile for vlan %s'), vlan_id) return False try: LOG.debug('VNIC Template Path: %s', vnic_template_path) vnic_template_full_path = (vnic_template_path + const.VNIC_TEMPLATE_PREFIX + str(vnic_template)) LOG.debug('VNIC Template Path: %s for physnet %s', vnic_template_full_path, physnet) handle.StartTransaction() mo = handle.GetManagedObject( None, self.ucsmsdk.VnicLanConnTempl.ClassId(), {self.ucsmsdk.VnicLanConnTempl.DN: vnic_template_full_path}, True) if not mo: LOG.error(_LE('UCS Manager network driver could ' 'not find VNIC template %s'), vnic_template_full_path) return False vlan_dn = (vnic_template_full_path + const.VLAN_PATH_PREFIX + vlan_name) LOG.debug('VNIC Template VLAN path: %s', vlan_dn) eth_if = handle.AddManagedObject(mo, self.ucsmsdk.VnicEtherIf.ClassId(), {self.ucsmsdk.VnicEtherIf.DN: vlan_dn, self.ucsmsdk.VnicEtherIf.NAME: vlan_name, self.ucsmsdk.VnicEtherIf.DEFAULT_NET: "no"}, True) if not eth_if: LOG.error(_LE('UCS Manager network driver could ' 'not add VLAN %(vlan_name)s to VNIC ' 'template %(vnic_template_full_path)s'), {'vlan_name': vlan_name, 'vnic_template_full_path': vnic_template_full_path}) return False handle.CompleteTransaction() return True except Exception as e: return self._handle_ucsm_exception(e, 'VNIC Template', vlan_id, ucsm_ip)
def _client_network_relay(self, namespace): # Open a socket in the DHCP network namespace try: with self.ns_lock, netns.Namespace(namespace): recv_sock, send_sock, int_addr = self._open_dhcp_int_socket() except Exception: self.int_sock_retries += 1 if self.int_sock_retries >= 2: LOG.exception(_LE('Failed to open dhcp server socket in %s'), namespace) self.int_sock_retries = 0 del self.ns_states[namespace] return self.int_sock_retries = 0 self.ns_states[namespace] = NS_RELAY_RUNNING vpnid = self._convert_ns_to_vpnid(namespace) self.debug_stats.add_network_stats(vpnid) self.int_sockets_by_vpn[vpnid] = send_sock recvbuf = bytearray(RECV_BUFFER_SIZE) LOG.debug('Opened dhcp server socket on ns:%s, addr:%s, vpn:%s', namespace, int_addr, vpnid) # Forward DHCP requests from internal to external networks while self.ns_states[namespace] != NS_RELAY_DELETING: try: recv_sock.recv_into(recvbuf) pkt = DhcpPacket.parse(recvbuf) options = [(5, int_addr), (11, int_addr), (151, vpnid), (152, '')] for option in options: pkt.set_relay_option(*option) pkt.set_giaddr(self.ext_addr) self.debug_stats.increment_pkts_from_client(vpnid) LOG.debug('Forwarding DHCP request for vpn %s', vpnid) self.ext_sock.send(pkt.data()) self.debug_stats.increment_pkts_to_server(vpnid) except Exception: LOG.exception(_LE('Failed to forward dhcp to server from %s'), namespace) # Cleanup socket and internal state try: del self.ns_states[namespace] del self.int_sockets_by_vpn[vpnid] self.debug_stats.del_network_stats(vpnid) recv_sock.close() send_sock.close() except Exception: LOG.warning(_LW('Failed to cleanup relay for %s'), namespace)
def vdp_vlan_change(self, vsw_cb_data, vdp_vlan): '''Callback Function from VDP when provider VLAN changes This will be called only during error cases when switch reloads or when compute reloads ''' LOG.debug("In VDP VLAN change VLAN %s", vdp_vlan) if not vsw_cb_data: LOG.error(_LE("NULL vsw_cb_data Info received")) return net_uuid = vsw_cb_data.get('net_uuid') lvm = self.local_vlan_map.get(net_uuid) if not lvm: LOG.error(_LE("Network %s is not in the local vlan map"), net_uuid) return lldpad_port = self.lldpad_info if not lldpad_port: LOG.error(_LE("There is no LLDPad port available.")) return exist_vdp_vlan = lvm.late_binding_vlan lvid = lvm.vlan br = self.ext_br_obj LOG.debug("lvid %(lvid)s exist %(vlan)s", {'lvid': lvid, 'vlan': exist_vdp_vlan}) if vdp_vlan == exist_vdp_vlan: LOG.debug("No change in provider VLAN %s", vdp_vlan) return if ovs_lib.is_valid_vlan_tag(exist_vdp_vlan): # Clear the old flows # outbound br.delete_flows(in_port=self.phy_peer_port_num, dl_vlan=lvid) # inbound self.integ_br_obj.delete_flows(in_port=self.int_peer_port_num, dl_vlan=exist_vdp_vlan) if ovs_lib.is_valid_vlan_tag(vdp_vlan): # Add the new flows # outbound br.add_flow(priority=4, in_port=self.phy_peer_port_num, dl_vlan=lvid, actions="mod_vlan_vid:%s,normal" % vdp_vlan) # inbound self.integ_br_obj.add_flow(priority=3, in_port=self.int_peer_port_num, dl_vlan=vdp_vlan, actions="mod_vlan_vid:%s,normal" % lvid) else: LOG.error(_LE("Returned vlan %s is invalid"), vdp_vlan) # Even if it's 0, it's still stored to reflect provider # hasn't allocated a VLAN from VDP, happens during error case. lvm.late_binding_vlan = vdp_vlan
def dispatch_service_vm_real( self, context, instance_name, vm_image, vm_flavor, hosting_device_drv, credentials_info, connectivity_info, ports=None, ): mgmt_port = connectivity_info["mgmt_port"] nics = [{"port-id": mgmt_port["id"]}] for port in ports or {}: nics.append({"port-id": port["id"]}) try: image = n_utils.find_resource(self._nclient.images, vm_image) flavor = n_utils.find_resource(self._nclient.flavors, vm_flavor) except (nova_exc.CommandError, Exception) as e: LOG.error(_LE("Failure finding needed Nova resource: %s"), e) return try: # Assumption for now is that this does not need to be # plugin dependent, only hosting device type dependent. files = hosting_device_drv.create_config(context, credentials_info, connectivity_info) except IOError: return try: server = self._nclient.servers.create( instance_name, image.id, flavor.id, nics=nics, files=files, config_drive=(files != {}) ) # There are several individual Nova client exceptions but they have # no other common base than Exception, therefore the long list. except ( nova_exc.UnsupportedVersion, nova_exc.CommandError, nova_exc.AuthorizationFailure, nova_exc.NoUniqueMatch, nova_exc.AuthSystemNotFound, nova_exc.NoTokenLookupException, nova_exc.EndpointNotFound, nova_exc.AmbiguousEndpoints, nova_exc.ConnectionRefused, nova_exc.ClientException, Exception, ) as e: LOG.error(_LE("Failed to create service VM instance: %s"), e) return return {"id": server.id}