def _create_vm(self, pod_namespace, pod_id, pod_name, labels, proj_uuid): cluster_name = vnc_kube_config.cluster_name() vm_name = VncCommon.make_name(cluster_name, pod_namespace, pod_name) display_name = vm_name self._check_pod_uuid_change(pod_id, vm_name) perms2 = PermType2() perms2.owner = proj_uuid perms2.owner_access = cfgm_common.PERMS_RWX vm_obj = VirtualMachine(name=vm_name, perms2=perms2, display_name=display_name) vm_obj.uuid = pod_id vm_obj.set_server_type("container") VirtualMachineKM.add_annotations(self, vm_obj, pod_namespace, pod_name, k8s_uuid=str(pod_id), labels=json.dumps(labels)) try: self._vnc_lib.virtual_machine_create(vm_obj) except RefsExistError: vm_obj = self._vnc_lib.virtual_machine_read(id=pod_id) VirtualMachineKM.locate(vm_obj.uuid) return vm_obj
def _read_and_create_loopback_virtual_network(cls, vn_fq_name, fn_fq_name): fabric_fq_name = fn_fq_name[:-1] ok, fabric = cls.server.get_resource_class('fabric').\ locate(fabric_fq_name, create_it=False) if not ok: return ok, fabric kwargs = {'display_name': vn_fq_name[-1]} kwargs['parent_type'] = 'project' kwargs['virtual_network_properties'] = VirtualNetworkType( forwarding_mode='l3') kwargs['virtual_network_category'] = 'routed' kwargs['address_allocation_mode'] = "flat-subnet-only" kwargs['perms2'] = PermType2(global_access=7) kwargs['virtual_network_routed_properties'] =\ VirtualNetworkRoutedPropertiesType(shared_across_all_lrs=True) ok, result = cls.server.get_resource_class('virtual_network').\ locate(vn_fq_name, **kwargs) if ok: if len(result.get('fabric_back_refs', [])) == 0: op = 'ADD' cls.server.internal_request_ref_update( 'fabric', fabric['uuid'], op, 'virtual-network', result['uuid'], vn_fq_name, attr=json.loads( json.dumps(FabricNetworkTag('overlay-loopback'), default=_obj_serializer_all))) return ok, result
def _create_iip(self, pod_name, pod_namespace, proj_uuid, vn_obj, vmi, vmi_ip): # Instance-ip for pods are ALWAYS allocated from pod ipam on this # VN. Get the subnet uuid of the pod ipam on this VN, so we can request # an IP from it. vn = VirtualNetworkKM.find_by_name_or_uuid(vn_obj.get_uuid()) if not vn: # It is possible our cache may not have the VN yet. Locate it. vn = VirtualNetworkKM.locate(vn_obj.get_uuid()) if self._is_pod_network_isolated(pod_namespace): vn_namespace = pod_namespace else: vn_namespace = 'default' if self._is_ip_fabric_forwarding_enabled(vn_namespace): ipam_fq_name = vnc_kube_config.ip_fabric_ipam_fq_name() else: ipam_fq_name = vnc_kube_config.pod_ipam_fq_name() pod_ipam_subnet_uuid = vn.get_ipam_subnet_uuid(ipam_fq_name) # Create instance-ip. iip_uuid = str(uuid.uuid1()) iip_name = VncCommon.make_name(pod_name, iip_uuid) perms2 = PermType2() perms2.owner = proj_uuid perms2.owner_access = cfgm_common.PERMS_RWX iip_obj = InstanceIp(name=iip_name, instance_ip_address=vmi_ip, subnet_uuid=pod_ipam_subnet_uuid, display_name=iip_name, perms2=perms2) iip_obj.uuid = iip_uuid iip_obj.add_virtual_network(vn_obj) # Creation of iip requires the vmi vnc object. vmi_obj = self._vnc_lib.virtual_machine_interface_read( fq_name=vmi.fq_name) iip_obj.add_virtual_machine_interface(vmi_obj) InstanceIpKM.add_annotations(self, iip_obj, pod_namespace, pod_name) self._logger.debug("%s: Create IIP from ipam_fq_name [%s]" " pod_ipam_subnet_uuid [%s]" " vn [%s] vmi_fq_name [%s]" % (self._name, ipam_fq_name, pod_ipam_subnet_uuid, vn.name, vmi.fq_name)) try: self._vnc_lib.instance_ip_create(iip_obj) except RefsExistError: self._vnc_lib.instance_ip_update(iip_obj) InstanceIpKM.locate(iip_obj.uuid) return iip_obj
def __init__(self): perms = PermType('cloud-admin', PERMS_RWX, 'cloud-admin-group', PERMS_RWX, PERMS_RWX) self.perms = IdPermsType(permissions=perms, enable=True) # set default perms2 of a new object # cloud-admin owner with full access, not shared with anyone self.perms2 = PermType2( 'cloud-admin', PERMS_RWX, # tenant, tenant-access PERMS_NONE, # global-access []) # share list super(Defaults, self).__init__()
def _create_virtual_interface(self, proj_obj, vn_obj, service_ns, service_name, service_id, k8s_event_type, vip_address=None, subnet_uuid=None, tags=None): vmi_uuid = str(uuid.uuid4()) cluster_name = vnc_kube_config.cluster_name() vmi_name = VncCommon.make_name(cluster_name, k8s_event_type, service_name, service_id) vmi_display_name = VncCommon.make_display_name(service_ns, service_name) # Check if VMI exists, if yes, delete it. vmi_obj = VirtualMachineInterface(name=vmi_name, parent_obj=proj_obj, display_name=vmi_display_name) try: vmi_id = self._vnc_lib.fq_name_to_id('virtual-machine-interface', vmi_obj.get_fq_name()) if vmi_id: self.logger.error("Duplicate LB Interface %s, delete it" % vmi_obj.get_fq_name()) vmi = VirtualMachineInterfaceKM.get(vmi_id) iip_ids = vmi.instance_ips for iip_id in list(iip_ids): iip_obj = self._vnc_lib.instance_ip_read(id=iip_id) fip_refs = iip_obj.get_floating_ips() for fip_ref in fip_refs or []: fip = self._vnc_lib.floating_ip_read( id=fip_ref['uuid']) fip.set_virtual_machine_interface_list([]) self._vnc_lib.floating_ip_update(fip) self._vnc_lib.floating_ip_delete(id=fip_ref['uuid']) self._vnc_lib.instance_ip_delete(id=iip_obj.uuid) self._vnc_lib.virtual_machine_interface_delete(id=vmi_id) except NoIdError: pass # Create LB VMI vmi_obj.name = vmi_name vmi_obj.uuid = vmi_uuid vmi_obj.set_virtual_network(vn_obj) vmi_obj.set_virtual_machine_interface_device_owner("K8S:LOADBALANCER") sg_name = "-".join( [vnc_kube_config.cluster_name(), service_ns, 'default-sg']) sg_obj = SecurityGroup(sg_name, proj_obj) vmi_obj.add_security_group(sg_obj) vmi_obj.port_security_enabled = True try: self.logger.debug("Create LB Interface %s " % vmi_obj.get_fq_name()) self._vnc_lib.virtual_machine_interface_create(vmi_obj) VirtualMachineInterfaceKM.locate(vmi_obj.uuid) except BadRequest as e: self.logger.warning("LB (%s) Interface create failed %s " % (service_name, str(e))) return None, None try: vmi_obj = self._vnc_lib.virtual_machine_interface_read( id=vmi_obj.uuid) except NoIdError: self.logger.warning("Read Service VMI failed for" " service (" + service_name + ")" + " with NoIdError for vmi(" + vmi_id + ")") return None, None # Attach tags on this VMI. if tags: self._vnc_lib.set_tags(vmi_obj, tags) # Create InstanceIP <--- LB VMI iip_uuid = str(uuid.uuid4()) iip_name = VncCommon.make_name(service_name, iip_uuid) iip_display_name = VncCommon.make_display_name(service_ns, service_name) perms2 = PermType2() perms2.owner = proj_obj.uuid perms2.owner_access = cfgm_common.PERMS_RWX iip_obj = InstanceIp(name=iip_name, perms2=perms2, display_name=iip_display_name) iip_obj.uuid = iip_uuid iip_obj.set_virtual_network(vn_obj) if subnet_uuid: iip_obj.set_subnet_uuid(subnet_uuid) iip_obj.set_virtual_machine_interface(vmi_obj) iip_obj.set_display_name(service_name) if vip_address: iip_obj.set_instance_ip_address(vip_address) try: self.logger.debug("Create LB VMI InstanceIp %s " % iip_obj.get_fq_name()) self._vnc_lib.instance_ip_create(iip_obj) except RefsExistError: self._vnc_lib.instance_ip_update(iip_obj) InstanceIpKM.locate(iip_obj.uuid) iip_obj = self._vnc_lib.instance_ip_read(id=iip_obj.uuid) vip_address = iip_obj.get_instance_ip_address() self.logger.debug("Created LB VMI InstanceIp %s with VIP %s" % (iip_obj.get_fq_name(), vip_address)) return vmi_obj, vip_address
def _load_init_data(self): """ Load init data for job playbooks.This function loads init data from a data file specified by the argument '--fabric_ansible_dir' to the database. """ """ The data file must be in JSON format and follow the format below "my payload": { "object_type": "tag" "objects": [ { "fq_name": [ "fabric=management_ip" ], "name": "fabric=management_ip", "tag_type_name": "fabric", "tag_value": "management_ip" } ] } """ try: json_data = self._load_json_data() if json_data is None: self._logger.error('Unable to load init data') return for item in json_data.get("data"): object_type = item.get("object_type") # Get the class name from object type cls_name = CamelCase(object_type) # Get the class object cls_ob = str_to_class(cls_name, resource_client.__name__) # saving the objects to the database for obj in item.get("objects"): instance_obj = cls_ob.from_dict(**obj) # create/update the object fq_name = instance_obj.get_fq_name() try: uuid_id = self._vnc_api.fq_name_to_id( object_type, fq_name) if object_type == "tag": continue instance_obj.set_uuid(uuid_id) # Update config json inside role-config object if object_type == 'role-config': role_config_obj = self._vnc_api.\ role_config_read(id=uuid_id) cur_config_json = json.loads( role_config_obj.get_role_config_config()) def_config_json = json.loads( instance_obj.get_role_config_config()) def_config_json.update(cur_config_json) instance_obj.set_role_config_config( json.dumps(def_config_json)) if object_type not in [ 'telemetry-profile', 'sflow-profile', 'grpc-profile', 'snmp-profile', 'netconf-profile', 'device-functional-group' ]: self._vnc_api._object_update( object_type, instance_obj) except NoIdError: self._vnc_api._object_create(object_type, instance_obj) for item in json_data.get("refs"): from_type = item.get("from_type") from_fq_name = item.get("from_fq_name") from_uuid = self._vnc_api.fq_name_to_id( from_type, from_fq_name) to_type = item.get("to_type") to_fq_name = item.get("to_fq_name") to_uuid = self._vnc_api.fq_name_to_id(to_type, to_fq_name) self._vnc_api.ref_update(from_type, from_uuid, to_type, to_uuid, to_fq_name, 'ADD') except Exception as e: err_msg = 'error while loading init data: %s\n' % str(e) err_msg += detailed_traceback() self._logger.error(err_msg) # create VN and IPAM for IPV6 link-local addresses ipv6_link_local_nw = '_internal_vn_ipv6_link_local' self._create_ipv6_ll_ipam_and_vn(self._vnc_api, ipv6_link_local_nw) # - fetch list of all the physical routers # - check physical and overlay role associated with each PR # - create ref between physical_role and physical_router object, # if PR is assigned with a specific physical role # - create ref between overlay_roles and physical_router object, # if PR is assigned with specific overlay roles obj_list = self._vnc_api._objects_list('physical-router') pr_list = obj_list.get('physical-routers') for pr in pr_list or []: try: pr_obj = self._vnc_api.\ physical_router_read(id=pr.get('uuid')) physical_role = pr_obj.get_physical_router_role() overlay_roles = pr_obj.get_routing_bridging_roles() if overlay_roles is not None: overlay_roles = overlay_roles.get_rb_roles() if physical_role: try: physical_role_uuid = self._vnc_api.\ fq_name_to_id('physical_role', ['default-global-system-config', physical_role]) if physical_role_uuid: self._vnc_api.ref_update('physical-router', pr.get('uuid'), 'physical-role', physical_role_uuid, None, 'ADD') except NoIdError: pass if overlay_roles: for overlay_role in overlay_roles or []: try: overlay_role_uuid = self._vnc_api.\ fq_name_to_id('overlay_role', ['default-global-system-config', overlay_role.lower()]) if overlay_role_uuid: self._vnc_api.ref_update( 'physical-router', pr.get('uuid'), 'overlay-role', overlay_role_uuid, None, 'ADD') except NoIdError: pass except NoIdError: pass # handle replacing master-LR as <fab_name>-master-LR here # as part of in-place cluster update. Copy the master-LR # and also its associated vns and their annotations here master_lr_obj = None try: master_lr_obj = self._vnc_api.logical_router_read( fq_name=['default-domain', 'default-project', 'master-LR']) except NoIdError: try: master_lr_obj = self._vnc_api.logical_router_read( fq_name=['default-domain', 'admin', 'master-LR']) except NoIdError: pass if master_lr_obj: vmi_refs = master_lr_obj.get_virtual_machine_interface_refs() or [] # get existing pr refs pr_refs = master_lr_obj.get_physical_router_refs() or [] fabric_refs = master_lr_obj.get_fabric_refs() or [] perms2 = master_lr_obj.get_perms2() fab_fq_name = None try: # This has to happen before creating fab-master-LR as # otherwise it will fail creation # of fab-master-lr with annotations having master-lr uuid # Now delete master-LR object # this will delete lr annotations from fabric in # corresponding VNs if they exist self._vnc_api.logical_router_delete( id=master_lr_obj.get_uuid()) # try to obtain the fabric refs either by fabric ref if one # is available or from pr_refs if available if pr_refs and not fabric_refs: # this is assuming that even though there can be # multiple pr refs, a LR cannot have more than # one fabric refs. So a random pr chosen in the pr # refs list will have the same fabric name as the other # prs in the list pr_ref = pr_refs[-1] pr_obj = self._vnc_api.physical_router_read(id=pr_ref.get( 'uuid', self._vnc_api.fq_name_to_id(pr_ref.get('to')))) fabric_refs = pr_obj.get_fabric_refs() or [] if fabric_refs: fabric_ref = fabric_refs[-1] fab_fq_name = fabric_ref.get( 'to', self._vnc_api.id_to_fq_name(fabric_ref.get('uuid'))) # if fab_fq_name is not derivable or was not present, then # skip creating fab_name-master-LR as fabric information # is not available # if fab_fq_name is available, copy necessary refs from prev. # master LR, create new fab_name-master-LR and this will update # VN annotations accordingly. if fab_fq_name: self._create_fabric_master_LR(fab_fq_name, perms2, fabric_refs=fabric_refs, vmi_refs=vmi_refs, pr_refs=pr_refs) except NoIdError: pass except Exception as exc: err_msg = "An exception occurred while attempting to " \ "create fabric master-LR: %s " % exc.message self._logger.warning(err_msg) # Handle fabric master LR creation for all fabrics here # iterate through all existing fabric objects # create fabric master LR for all existing fabrics obj_list = self._vnc_api._objects_list('fabric') fab_list = obj_list.get('fabrics') for fabric in fab_list or []: try: fab_obj = self._vnc_api. \ fabric_read(id=fabric.get('uuid')) fab_fq_name = fab_obj.get_fq_name() # check if not default-fabric, if yes # skip creating fabric master-LR for it if fab_fq_name == [ "default-global-system-config", "default-fabric" ]: continue perms2 = PermType2('cloud-admin', PERMS_RWX, PERMS_RWX, []) # create fabric master LR self._create_fabric_master_LR(fab_fq_name, perms2, fabric_obj=fab_obj) except RefsExistError: pass except Exception as exc: err_msg = "An exception occurred while attempting to " \ "create fabric master-LR: %s " % exc.message self._logger.warning(err_msg) # handle deleted job_templates as part of in-place cluster update to_be_del_jt_names = [ 'show_interfaces_template', 'show_config_interfaces_template', 'show_interfaces_by_names_template' ] for jt_name in to_be_del_jt_names: try: self._vnc_api.job_template_delete( fq_name=['default-global-system-config', jt_name]) except NoIdError: pass
def test_shared_network(self): alice = self.alice bob = self.bob admin = self.admin vn_fq_name = [self.domain_name, alice.project, self.vn_name] # create VN with 'is_shared' set - validate global_access set in vnc vn = VirtualNetwork(self.vn_name, self.alice.project_obj) vn.set_is_shared(True) self.alice.vnc_lib.virtual_network_create(vn) vn = vnc_read_obj(self.admin.vnc_lib, 'virtual-network', name=vn_fq_name) self.assertEquals(vn.get_perms2().global_access, PERMS_RWX) self.admin.vnc_lib.virtual_network_delete(fq_name=vn_fq_name) # create VN with global_access set - validate 'is_shared' gets set vn = VirtualNetwork(self.vn_name, self.alice.project_obj) perms = PermType2('cloud-admin', PERMS_RWX, PERMS_RWX, []) vn.set_perms2(perms) self.alice.vnc_lib.virtual_network_create(vn) vn = vnc_read_obj(self.admin.vnc_lib, 'virtual-network', name=vn_fq_name) self.assertEquals(vn.get_is_shared(), True) self.admin.vnc_lib.virtual_network_delete(fq_name=vn_fq_name) # update VN 'is_shared' after initial create - ensure reflectd in global_access vn = VirtualNetwork(self.vn_name, self.alice.project_obj) self.alice.vnc_lib.virtual_network_create(vn) vn = vnc_read_obj(self.admin.vnc_lib, 'virtual-network', name=vn_fq_name) self.assertEquals(vn.get_perms2().global_access, 0) vn.set_is_shared(True) self.alice.vnc_lib.virtual_network_update(vn) vn = vnc_read_obj(self.admin.vnc_lib, 'virtual-network', name=vn_fq_name) self.assertEquals(vn.get_perms2().global_access, PERMS_RWX) vn.set_is_shared(False) self.alice.vnc_lib.virtual_network_update(vn) vn = vnc_read_obj(self.admin.vnc_lib, 'virtual-network', name=vn_fq_name) self.assertEquals(vn.get_perms2().global_access, 0) self.admin.vnc_lib.virtual_network_delete(fq_name=vn_fq_name) # VN global_access is reset after initial create - ensure reflected in 'is_shared' vn = VirtualNetwork(self.vn_name, self.alice.project_obj) self.alice.vnc_lib.virtual_network_create(vn) vn = vnc_read_obj(self.admin.vnc_lib, 'virtual-network', name=vn_fq_name) self.assertEquals(vn.get_is_shared(), False or None) perms = vn.get_perms2() perms.global_access = PERMS_RWX vn.set_perms2(perms) self.alice.vnc_lib.virtual_network_update(vn) vn = vnc_read_obj(self.admin.vnc_lib, 'virtual-network', name=vn_fq_name) self.assertEquals(vn.get_is_shared(), True) perms = vn.get_perms2() perms.global_access = 0 vn.set_perms2(perms) self.alice.vnc_lib.virtual_network_update(vn) vn = vnc_read_obj(self.admin.vnc_lib, 'virtual-network', name=vn_fq_name) self.assertEquals(vn.get_is_shared(), False)
def _add_owner_to(self, vnc_vm): perms2 = PermType2() perms2.set_owner(self._project.get_uuid()) vnc_vm.set_perms2(perms2)