def check_security_group_rule_quota( cls, proj_dict, db_conn, rule_count): quota_counter = cls.server.quota_counter obj_type = 'security_group_rule' quota_limit = QuotaHelper.get_quota_limit(proj_dict, obj_type) if (rule_count and quota_limit >= 0): path_prefix = _DEFAULT_ZK_COUNTER_PATH_PREFIX + proj_dict['uuid'] path = path_prefix + "/" + obj_type if not quota_counter.get(path): # Init quota counter for security group rule QuotaHelper._zk_quota_counter_init( path_prefix, {obj_type: quota_limit}, proj_dict['uuid'], db_conn, quota_counter) ok, result = QuotaHelper.verify_quota( obj_type, quota_limit, quota_counter[path], count=rule_count) if not ok: msg = "security_group_entries: %d" % quota_limit return False, (QUOTA_OVER_ERROR_CODE, msg) def undo(): # Revert back quota count quota_counter[path] -= rule_count get_context().push_undo(undo) return True, ""
def pre_dbe_delete(cls, id, obj_dict, db_conn): cls.addr_mgmt.net_delete_req(obj_dict) if obj_dict['id_perms'].get('user_visible', True) is not False: ok, result = QuotaHelper.get_project_dict_for_quota( obj_dict['parent_uuid'], db_conn) if not ok: return False, result, None proj_dict = result ok, (subnet_count, counter) =\ cls.addr_mgmt.get_subnet_quota_counter(obj_dict, proj_dict) if subnet_count: counter -= subnet_count def undo_subnet(): ok, (subnet_count, counter) =\ cls.addr_mgmt.get_subnet_quota_counter(obj_dict, proj_dict) if subnet_count: counter += subnet_count get_context().push_undo(undo_subnet) def undo(): cls.addr_mgmt.net_create_req(obj_dict) get_context().push_undo(undo) return True, "", None
def pre_dbe_create(cls, tenant_name, obj_dict, db_conn): ok, result = cls._check_interface_name(obj_dict, db_conn, None) if not ok: return ok, result esi = obj_dict.get('ethernet_segment_identifier') if esi: ok, result = cls._check_esi_string(esi) if not ok: return ok, result if cls._interface_should_be_aggregated_in_zk(obj_dict): interface_name = obj_dict['fq_name'][-1] router_name = obj_dict['fq_name'][1] ae_id = int(interface_name[2:]) if not cls.vnc_zk_client.ae_id_is_occupied(router_name, ae_id): cls.vnc_zk_client.alloc_ae_id(router_name, interface_name, ae_id) def undo_alloc(): cls.vnc_zk_client.free_ae_id(router_name, ae_id, interface_name) get_context().push_undo(undo_alloc) else: msg = ("Interface %s can't get AE-ID %d because it is " "already occupied on %s physical router." % (interface_name, ae_id, router_name)) return False, (403, msg) return (True, '')
def pre_dbe_create(cls, tenant_name, obj_dict, db_conn): vn_fq_name = obj_dict['fq_name'][:-2] req_ip = obj_dict.get("alias_ip_address") if req_ip and cls.addr_mgmt.is_ip_allocated(req_ip, vn_fq_name): return (False, (409, 'IP address already in use')) try: ok, result = cls.addr_mgmt.get_ip_free_args(vn_fq_name) if not ok: return ok, result aip_addr, sn_uuid, s_name = cls.addr_mgmt.ip_alloc_req( vn_fq_name, asked_ip_addr=req_ip, alloc_id=obj_dict['uuid']) def undo(): msg = ('AddrMgmt: free AIP %s for vn=%s tenant=%s, on undo' % (aip_addr, vn_fq_name, tenant_name)) db_conn.config_log(msg, level=SandeshLevel.SYS_DEBUG) cls.addr_mgmt.ip_free_req( aip_addr, vn_fq_name, alloc_id=obj_dict['uuid'], vn_dict=result.get('vn_dict'), ipam_dicts=result.get('ipam_dicts')) return True, "" get_context().push_undo(undo) except Exception as e: return (False, (500, str(e))) obj_dict['alias_ip_address'] = aip_addr msg = ('AddrMgmt: alloc %s AIP for vn=%s, tenant=%s, askip=%s' % (aip_addr, vn_fq_name, tenant_name, req_ip)) db_conn.config_log(msg, level=SandeshLevel.SYS_DEBUG) return True, ""
def pre_dbe_delete(cls, id, obj_dict, db_conn): logical_router_type = cls.check_lr_type(obj_dict) if logical_router_type == 'vxlan-routing': vn_int_fqname = (obj_dict['fq_name'][:-1] + [get_lr_internal_vn_name(obj_dict['uuid'])]) vn_int_uuid = db_conn.fq_name_to_uuid('virtual_network', vn_int_fqname) api_server = cls.server try: api_server.internal_request_ref_update( 'logical-router', obj_dict['uuid'], 'DELETE', 'virtual-network', vn_int_uuid, vn_int_fqname) api_server.internal_request_delete('virtual-network', vn_int_uuid) except HttpError as e: if e.status_code != 404: return False, (e.status_code, e.content), None except NoIdError: pass def undo_int_vn_delete(): return cls.create_intvn_and_ref(obj_dict) get_context().push_undo(undo_int_vn_delete) return True, '', None
def pre_dbe_delete(cls, id, obj_dict, db_conn): ok, result = cls.dbe_read(db_conn, 'security_group', id) if not ok: return ok, result, None sg_dict = result if sg_dict['id_perms'].get('user_visible', True) is not False: ok, result = QuotaHelper.get_project_dict_for_quota( sg_dict['parent_uuid'], db_conn) if not ok: return False, result, None proj_dict = result obj_type = 'security_group_rule' quota_limit = QuotaHelper.get_quota_limit(proj_dict, obj_type) if 'security_group_entries' in obj_dict and quota_limit >= 0: rule_count = len( obj_dict['security_group_entries']['policy_rule']) path_prefix = (_DEFAULT_ZK_COUNTER_PATH_PREFIX + proj_dict['uuid']) path = path_prefix + "/" + obj_type quota_counter = cls.server.quota_counter # If the SG has been created before R3, there is no # path in ZK. It is created on next update and we # can ignore it for now if quota_counter.get(path): quota_counter[path] -= rule_count def undo(): # Revert back quota count quota_counter[path] += rule_count get_context().push_undo(undo) return True, "", None
def pre_dbe_delete(cls, id, obj_dict, db_conn): ok, vxlan_routing = cls.is_vxlan_routing_enabled(db_conn, obj_dict) if not ok: return (ok, vxlan_routing, None) if vxlan_routing: ok, proj_dict = cls.get_parent_project(obj_dict, db_conn) vn_int_fqname = proj_dict.get('fq_name') vn_int_name = get_lr_internal_vn_name(obj_dict.get('uuid')) vn_int_fqname.append(vn_int_name) vn_int_uuid = db_conn.fq_name_to_uuid('virtual_network', vn_int_fqname) api_server = db_conn.get_api_server() attr_obj = LogicalRouterVirtualNetworkType( 'InternalVirtualNetwork') attr_dict = attr_obj.__dict__ api_server.internal_request_ref_update( 'logical-router', obj_dict['uuid'], 'DELETE', 'virtual-network', vn_int_uuid, vn_int_fqname, attr=attr_dict) api_server.internal_request_delete('virtual-network', vn_int_uuid) def undo_int_vn_delete(): return cls.create_intvn_and_ref(obj_dict, db_conn) get_context().push_undo(undo_int_vn_delete) return True, '', None
def pre_dbe_create(cls, tenant_name, obj_dict, db_conn): ok, result = cls._ensure_lr_dci_association(obj_dict) if not ok: return ok, result ok, result = cls.check_port_gateway_not_in_same_network( db_conn, obj_dict) if not ok: return ok, result ok, result = cls.is_port_in_use_by_vm(obj_dict, db_conn) if not ok: return ok, result ok, result = cls._check_type(obj_dict) if not ok: return ok, result vxlan_id = cls._check_vxlan_id_in_lr(obj_dict) logical_router_type = cls.check_lr_type(obj_dict) if vxlan_id and logical_router_type == 'vxlan-routing': # If input vxlan_id is not None, that means we need to reserve it. # First, if vxlan_id is not None, set it in Zookeeper and set the # undo function for when any failures happen later. # But first, get the internal_vlan name using which the resource # in zookeeper space will be reserved. vxlan_fq_name = '%s:%s_vxlan' % ( ':'.join(obj_dict['fq_name'][:-1]), get_lr_internal_vn_name(obj_dict['uuid']), ) try: # Now that we have the internal VN name, allocate it in # zookeeper only if the resource hasn't been reserved already cls.vnc_zk_client.alloc_vxlan_id(vxlan_fq_name, int(vxlan_id)) except ResourceExistsError: msg = ("Cannot set VXLAN_ID: %s, it has already been set" % vxlan_id) return False, (400, msg) def undo_vxlan_id(): cls.vnc_zk_client.free_vxlan_id(int(vxlan_id), vxlan_fq_name) return True, "" get_context().push_undo(undo_vxlan_id) # Check if type of all associated BGP VPN are 'l3' ok, result = cls.server.get_resource_class( 'bgpvpn').check_router_supports_vpn_type(obj_dict) if not ok: return ok, result # Check if we can reference the BGP VPNs return cls.server.get_resource_class( 'bgpvpn').check_router_has_bgpvpn_assoc_via_network(obj_dict)
def pre_dbe_create(cls, tenant_name, obj_dict, db_conn): pnf_svc_inst = cls._check_svc_inst_belongs_to_pnf(obj_dict) if pnf_svc_inst: ok, result = cls.validate_svc_inst_annotations(obj_dict) if not ok: return ok, result # Allocate unit number for service IRB interfaces for left and # right VRF on the service chaining device left_svc_fq_name = ':'.join(obj_dict['fq_name']) + 'left_svc' right_svc_fq_name = ':'.join(obj_dict['fq_name']) + 'right_svc' left_svc_unit = cls.vnc_zk_client.alloc_vn_id(left_svc_fq_name) def undo_left_svc_unit(): cls.vnc_zk_client.free_vn_id(left_svc_unit, left_svc_fq_name) return True, '' get_context().push_undo(undo_left_svc_unit) right_svc_unit = cls.vnc_zk_client.alloc_vn_id(right_svc_fq_name) def undo_right_svc_unit(): cls.vnc_zk_client.free_vn_id(right_svc_unit, right_svc_fq_name) return True, '' get_context().push_undo(undo_right_svc_unit) db_conn.config_log("Allocated IRB units, left_unit: %s " "right_unit: %s" % (left_svc_unit, right_svc_unit), level=SandeshLevel.SYS_DEBUG) # Store these unit-id's as key value pairs in # service_instance_bindings obj_dict['service_instance_bindings'] = {} obj_dict['service_instance_bindings']['key_value_pair'] = [] obj_dict['service_instance_bindings']['key_value_pair'].append({ 'key': 'left-svc-unit', 'value': str(left_svc_unit) }) obj_dict['service_instance_bindings']['key_value_pair'].append({ 'key': 'right-svc-unit', 'value': str(right_svc_unit) }) return True, ''
def pre_dbe_delete(cls, id, obj_dict, db_conn): draft_pm_uuid = None draft_pm_name = POLICY_MANAGEMENT_NAME_FOR_SECURITY_DRAFT draft_pm_fq_name = obj_dict['fq_name'] + [draft_pm_name] try: draft_pm_uuid = db_conn.fq_name_to_uuid( PolicyManagement.object_type, draft_pm_fq_name) except NoIdError: pass if draft_pm_uuid is not None: try: # If pending security modifications, it fails to delete the # draft PM cls.server.internal_request_delete( PolicyManagement.resource_type, draft_pm_uuid) except HttpError as e: if e.status_code != 404: return False, (e.status_code, e.content), None default_aps_uuid = None defaut_aps_fq_name = obj_dict['fq_name'] +\ ['default-%s' % ApplicationPolicySet.resource_type] try: default_aps_uuid = db_conn.fq_name_to_uuid( ApplicationPolicySet.object_type, defaut_aps_fq_name) except NoIdError: pass if default_aps_uuid is not None: try: cls.server.internal_request_ref_update( cls.resource_type, id, 'DELETE', ApplicationPolicySet.resource_type, default_aps_uuid, ) cls.server.internal_request_delete( ApplicationPolicySet.resource_type, default_aps_uuid) except HttpError as e: if e.status_code != 404: return False, (e.status_code, e.content), None def undo(): return cls._ensure_default_application_policy_set( default_aps_uuid, obj_dict['fq_name']) get_context().push_undo(undo) return True, '', None
def pre_dbe_create(cls, tenant_name, obj_dict, db_conn): type_str = obj_dict['tag_type_name'] if obj_dict.get('tag_id') is not None: msg = "Tag ID is not setable" return False, (400, msg) if obj_dict.get('tag_type_refs') is not None: msg = "Tag Type reference is not setable" return False, (400, msg) ok, result = cls.server.get_resource_class('tag_type').locate( [type_str], id_perms=IdPermsType(user_visible=False)) if not ok: return False, result tag_type = result def undo_tag_type(): cls.server.internal_request_delete('tag-type', tag_type['uuid']) return True, '' get_context().push_undo(undo_tag_type) obj_dict['tag_type_refs'] = [ { 'uuid': tag_type['uuid'], 'to': tag_type['fq_name'], }, ] # Allocate ID for tag value. Use the all fq_name to distinguish same # tag values between global and scoped value_id = cls.vnc_zk_client.alloc_tag_value_id( type_str, ':'.join(obj_dict['fq_name'])) def undo_value_id(): cls.vnc_zk_client.free_tag_value_id(type_str, value_id, ':'.join(obj_dict['fq_name'])) return True, "" get_context().push_undo(undo_value_id) # Compose Tag ID with the type ID and value ID obj_dict['tag_id'] = "{}{:04x}".format(tag_type['tag_type_id'], value_id) return True, ""
def pre_dbe_create(cls, tenant_name, obj_dict, db_conn): subnet_method = obj_dict.get('ipam_subnet_method', 'user-defined-subnet') ipam_subnets = obj_dict.get('ipam_subnets') if ipam_subnets is not None and subnet_method != 'flat-subnet': msg = "ipam-subnets are allowed only with flat-subnet" return False, (400, msg) ipam_subnetting = obj_dict.get('ipam_subnetting', False) if ipam_subnetting and subnet_method != 'flat-subnet': msg = "subnetting is allowed only with flat-subnet" return False, (400, msg) if subnet_method != 'flat-subnet': return True, "" ipam_subnets = obj_dict.get('ipam_subnets') if ipam_subnets is None: return True, "" ipam_subnets_list = cls.addr_mgmt._ipam_to_subnets(obj_dict) if not ipam_subnets_list: ipam_subnets_list = [] ok, result = cls.addr_mgmt.net_check_subnet_overlap(ipam_subnets_list) if not ok: return (ok, (400, result)) subnets = ipam_subnets.get('subnets') or [] (ok, result) = cls.addr_mgmt.net_check_subnet(subnets) if not ok: return (ok, (409, result)) try: cls.addr_mgmt.ipam_create_req(obj_dict) def undo(): cls.addr_mgmt.ipam_delete_req(obj_dict) return True, "" get_context().push_undo(undo) except Exception as e: return (False, (500, str(e))) return True, ""
def pre_dbe_create(cls, tenant_name, obj_dict, db_conn): pnf_svc_inst = cls._check_svc_inst_belongs_to_pnf(obj_dict) if pnf_svc_inst: ok, result = cls.validate_svc_inst_annotations(obj_dict) if not ok: return ok, result # Allocate unit number for service IRB interfaces for left and # right VRF on the service chaining device left_svc_fq_name = ':'.join(obj_dict['fq_name']) + 'left_svc' right_svc_fq_name = ':'.join(obj_dict['fq_name']) + 'right_svc' left_svc_unit = cls.vnc_zk_client.alloc_vn_id(left_svc_fq_name) def undo_left_svc_unit(): cls.vnc_zk_client.free_vn_id(left_svc_unit, left_svc_fq_name) return True, '' get_context().push_undo(undo_left_svc_unit) right_svc_unit = cls.vnc_zk_client.alloc_vn_id(right_svc_fq_name) def undo_right_svc_unit(): cls.vnc_zk_client.free_vn_id(right_svc_unit, right_svc_fq_name) return True, '' get_context().push_undo(undo_right_svc_unit) db_conn.config_log( "Allocated IRB units, left_unit: %s " "right_unit: %s" % (left_svc_unit, right_svc_unit), level=SandeshLevel.SYS_DEBUG) # Store these unit-id's as key value pairs in # service_instance_bindings obj_dict['service_instance_bindings'] = {} obj_dict['service_instance_bindings']['key_value_pair'] = [] obj_dict['service_instance_bindings']['key_value_pair'].append( {'key': 'left-svc-unit', 'value': str(left_svc_unit)}) obj_dict['service_instance_bindings']['key_value_pair'].append( {'key': 'right-svc-unit', 'value': str(right_svc_unit)}) return True, ''
def pre_dbe_delete(cls, id, obj_dict, db_conn): ok, read_result = cls.dbe_read(db_conn, 'network_ipam', id) if not ok: return ok, read_result, None subnet_method = read_result.get('ipam_subnet_method') if subnet_method is None or subnet_method != 'flat-subnet': return True, "", None ipam_subnets = read_result.get('ipam_subnets') if ipam_subnets is None: return True, "", None cls.addr_mgmt.ipam_delete_req(obj_dict) def undo(): cls.addr_mgmt.ipam_create_req(obj_dict) get_context().push_undo(undo) return True, "", None
def _set_configured_security_group_id(cls, obj_dict): fq_name_str = ':'.join(obj_dict['fq_name']) configured_sg_id = obj_dict.get('configured_security_group_id') or 0 sg_id = obj_dict.get('security_group_id') if sg_id is not None: sg_id = int(sg_id) if configured_sg_id > 0: if sg_id is not None: cls.vnc_zk_client.free_sg_id(sg_id, fq_name_str) def undo_dealloacte_sg_id(): # In case of error try to re-allocate the same ID as it was # not yet freed on other node new_sg_id = cls.vnc_zk_client.alloc_sg_id( fq_name_str, sg_id) if new_sg_id != sg_id: cls.vnc_zk_client.alloc_sg_id(fq_name_str) cls.server.internal_request_update( cls.resource_type, obj_dict['uuid'], {'security_group_id': new_sg_id}, ) return True, "" get_context().push_undo(undo_dealloacte_sg_id) obj_dict['security_group_id'] = configured_sg_id else: if (sg_id is not None and fq_name_str == cls.vnc_zk_client.get_sg_from_id(sg_id)): obj_dict['security_group_id'] = sg_id else: sg_id_allocated = cls.vnc_zk_client.alloc_sg_id(fq_name_str) def undo_allocate_sg_id(): cls.vnc_zk_client.free_sg_id(sg_id_allocated, fq_name_str) return True, "" get_context().push_undo(undo_allocate_sg_id) obj_dict['security_group_id'] = sg_id_allocated return True, ''
def pre_dbe_create(cls, tenant_name, obj_dict, db_conn): type_str = obj_dict['fq_name'][-1] obj_dict['name'] = type_str obj_dict['display_name'] = type_str if obj_dict.get('tag_type_id') is not None: msg = "Tag Type ID is not setable" return False, (400, msg) # Allocate ID for tag-type type_id = cls.vnc_zk_client.alloc_tag_type_id(type_str) def undo_type_id(): cls.vnc_zk_client.free_tag_type_id(type_id, obj_dict['fq_name']) return True, "" get_context().push_undo(undo_type_id) obj_dict['tag_type_id'] = "0x{:04x}".format(type_id) return True, ""
def check_openstack_firewall_group_quota(cls, obj_dict, deleted=False): obj_type = 'firewall_group' if (not obj_dict['id_perms'].get('user_visible', True) or obj_dict.get('parent_type') != Project.object_type): return True, '' ok, result = QuotaHelper.get_project_dict_for_quota( obj_dict['parent_uuid'], cls.db_conn) if not ok: return False, result project = result quota_limit = QuotaHelper.get_quota_limit(project, obj_type) if quota_limit < 0: return True, '' quota_count = 1 if deleted: quota_count = -1 path_prefix = _DEFAULT_ZK_COUNTER_PATH_PREFIX + project['uuid'] path = path_prefix + "/" + obj_type if not cls.server.quota_counter.get(path): QuotaHelper._zk_quota_counter_init( path_prefix, {obj_type: quota_limit}, project['uuid'], cls.db_conn, cls.server.quota_counter) return QuotaHelper.verify_quota( obj_type, quota_limit, cls.server.quota_counter[path], quota_count) def undo(): # revert back counter in case of any failure during creation if not deleted: cls.server.quota_counter[path] -= 1 else: cls.server.quota_counter[path] += 1 get_context().push_undo(undo) return True, ''
def _set_configured_security_group_id(cls, obj_dict): fq_name_str = ':'.join(obj_dict['fq_name']) configured_sg_id = obj_dict.get('configured_security_group_id') or 0 sg_id = obj_dict.get('security_group_id') if sg_id is not None: sg_id = int(sg_id) if configured_sg_id > 0: if sg_id is not None: cls.vnc_zk_client.free_sg_id(sg_id, fq_name_str) def undo_dealloacte_sg_id(): # In case of error try to re-allocate the same ID as it was # not yet freed on other node new_sg_id = cls.vnc_zk_client.alloc_sg_id(fq_name_str, sg_id) if new_sg_id != sg_id: cls.vnc_zk_client.alloc_sg_id(fq_name_str) cls.server.internal_request_update( cls.resource_type, obj_dict['uuid'], {'security_group_id': new_sg_id}, ) return True, "" get_context().push_undo(undo_dealloacte_sg_id) obj_dict['security_group_id'] = configured_sg_id else: if (sg_id is not None and fq_name_str == cls.vnc_zk_client.get_sg_from_id(sg_id)): obj_dict['security_group_id'] = sg_id else: sg_id_allocated = cls.vnc_zk_client.alloc_sg_id(fq_name_str) def undo_allocate_sg_id(): cls.vnc_zk_client.free_sg_id(sg_id_allocated, fq_name_str) return True, "" get_context().push_undo(undo_allocate_sg_id) obj_dict['security_group_id'] = sg_id_allocated return True, ''
def post_dbe_create(cls, tenant_name, obj_dict, db_conn): # Allocate unit number for service IRB interfaces, each for left and # right VRF on the service chaining device pnf_svc_inst = cls._check_svc_inst_belongs_to_pnf(obj_dict) if pnf_svc_inst: left_svc_fq_name = ':'.join(obj_dict['fq_name']) + 'left_svc' right_svc_fq_name = ':'.join(obj_dict['fq_name']) + 'right_svc' left_svc_unit = cls.vnc_zk_client.alloc_vn_id(left_svc_fq_name) def undo_left_svc_unit(): cls.vnc_zk_client.free_vn_id(left_svc_unit, left_svc_fq_name) return True, "" get_context().push_undo(undo_left_svc_unit) right_svc_unit = cls.vnc_zk_client.alloc_vn_id(right_svc_fq_name) def undo_right_svc_unit(): cls.vnc_zk_client.free_vn_id(right_svc_unit, right_svc_fq_name) return True, "" get_context().push_undo(undo_right_svc_unit) if left_svc_unit and right_svc_unit: # Store these unit-id's as key value pairs in # service_instance_bindings api_server = cls.server svc_inst_obj = ServiceInstance(obj_dict) svc_inst_obj.set_service_instance_bindings( KeyValuePairs([ KeyValuePair('left-svc-unit', str(left_svc_unit)), KeyValuePair('right-svc-unit', str(right_svc_unit)) ])) svc_inst_dict = json.dumps(svc_inst_obj, default=_obj_serializer_all) api_server.internal_request_update('service-instance', obj_dict['uuid'], json.loads(svc_inst_dict)) return True, ''
def pre_dbe_create(cls, tenant_name, obj_dict, db_conn): type_str = obj_dict['fq_name'][-1] obj_dict['name'] = type_str obj_dict['display_name'] = type_str tag_type_id = obj_dict.get('tag_type_id') or None if tag_type_id: tag_type_id = int(tag_type_id, 16) # if tag-type set as input and its value is less than 0x7FFF # return error. Tag type set is only supported for user defined # tag types. if tag_type_id is not None and \ not cls.vnc_zk_client.user_def_tag(tag_type_id): msg = "Tag type can be set only with user defined id in range\ 32768-65535" return False, (400, msg) # Allocate ID for tag-type try: type_id = cls.vnc_zk_client.alloc_tag_type_id( type_str, tag_type_id) except ResourceExistsError: return False, (400, "Tag Type with same Id already exists") def undo_type_id(): cls.vnc_zk_client.free_tag_type_id(type_id, obj_dict['fq_name']) return True, "" get_context().push_undo(undo_type_id) # type_id is None for failure case and in range 0 to 65535 for success # case if type_id is None: return False, (400, "Failed to allocate tag type Id") obj_dict['tag_type_id'] = "0x{:04x}".format(type_id) return True, ""
def pre_dbe_create(cls, tenant_name, obj_dict, db_conn): try: obj_dict['sub_cluster_id'] = \ cls.vnc_zk_client.alloc_sub_cluster_id( cls.server.global_autonomous_system, ':'.join(obj_dict['fq_name']), obj_dict.get('sub_cluster_id')) except ResourceExistsError: msg = ("Sub-cluster ID '%d' is already used, choose another one" % obj_dict.get('sub_cluster_id')) return False, (400, msg) except ResourceOutOfRangeError: msg = ("Requested ID %d is out of the range. Two bytes if global " "ASN uses four bytes, four bytes if global ASN uses two " "bytes. IDs start from 1" % obj_dict.get('sub_cluster_id')) return False, (400, msg) def undo_allocate_sub_cluster_id(): cls.vnc_zk_client.free_sub_cluster_id( obj_dict.get('sub_cluster_id'), ':'.join(obj_dict['fq_name'])) get_context().push_undo(undo_allocate_sub_cluster_id) return True, ''
def _process_ae_id(cls, db_obj_dict, vpg_name, obj_dict=None): attr_dict = None alloc_dealloc_dict = {'allocated_ae_id': [], 'deallocated_ae_id': []} curr_pr_dict = {} curr_pi_dict = {} db_pi_dict = {} db_pr_dict = {} vpg_uuid = db_obj_dict['uuid'] if not obj_dict: obj_dict = {} # process incoming PIs for ref in obj_dict.get('physical_interface_refs') or []: curr_pi_dict[ref['uuid']] = ref['to'][1] curr_pr_dict[ref['to'][1]] = ref['attr'] # process existing PIs in DB for ref in db_obj_dict.get('physical_interface_refs') or []: db_pi_dict[ref['uuid']] = ref['to'][1] if not (ref['to'][1] in db_pr_dict and db_pr_dict[ref['to'][1]]): db_pr_dict[ref['to'][1]] = ref['attr'] create_pi_uuids = list( set(curr_pi_dict.keys()) - set(db_pi_dict.keys())) delete_pi_uuids = list( set(db_pi_dict.keys()) - set(curr_pi_dict.keys())) # no PIs in db_obj_dict if len(create_pi_uuids) < 2 and len(db_pi_dict.keys()) == 0: msg = "Skip AE-ID allocation as Creating PI len(%s) < 2" % ( create_pi_uuids) cls.db_conn.config_log(msg, level=SandeshLevel.SYS_DEBUG) return True, (attr_dict, alloc_dealloc_dict) # nothing to delete or add if len(create_pi_uuids) == len(delete_pi_uuids) == 0: msg = "Skip AE-ID allocation as no PI to Create / Delete" cls.db_conn.config_log(msg, level=SandeshLevel.SYS_DEBUG) return True, (attr_dict, alloc_dealloc_dict) # nothing to delete, because rest of PIs shares same PR if (len(create_pi_uuids) == 0 and len(delete_pi_uuids) == 1 and len(db_pr_dict.keys()) == 1 and len(db_pi_dict.keys()) > 2): msg = "Skip AE-ID allocation as rest PI(%s) shares same PR(%s)" % ( db_pi_dict.keys(), db_pr_dict.keys()) cls.db_conn.config_log(msg, level=SandeshLevel.SYS_DEBUG) return True, (attr_dict, alloc_dealloc_dict) # allocate case for pi_uuid in create_pi_uuids: attr_dict = None pi_pr = curr_pi_dict.get(pi_uuid) pi_ae = db_pr_dict.get(pi_pr) if pi_ae is None: # allocate ok, result = cls._alloc_ae_id(pi_pr, vpg_name, vpg_uuid) if not ok: return ok, result attr_dict, _alloc_dict = result alloc_dealloc_dict['allocated_ae_id'].append(_alloc_dict) msg = "Allocated AE-ID(%s) for PI(%s) at VPG(%s)/PR(%s)" % ( attr_dict, pi_uuid, vpg_name, pi_pr) cls.db_conn.config_log(msg, level=SandeshLevel.SYS_DEBUG) else: attr_dict = pi_ae # re-allocate existing single PI if any if (len(db_pi_dict.keys()) == 1 and len(create_pi_uuids) == 1): db_pi_uuid = list(db_pi_dict.keys())[0] if (list(db_pi_dict.values())[0] != curr_pi_dict.get( create_pi_uuids[0])): # allocate a new ae-id as it belongs to different PR db_pr = list(db_pi_dict.values())[0] ok, result = cls._alloc_ae_id(db_pr, vpg_name, vpg_uuid) if not ok: return ok, result attr_dict_leftover_pi, _alloc_dict = result alloc_dealloc_dict['allocated_ae_id'].append(_alloc_dict) def undo_append_alloc_dict(): try: alloc_dealloc_dict['allocated_ae_id'].remove( _alloc_dict) except ValueError: pass get_context().push_undo(undo_append_alloc_dict) msg = ( "Allocated AE-ID(%s) for PI(%s) at " "VPG(%s)/PR(%s)" % (attr_dict_leftover_pi, db_pi_uuid, vpg_name, db_pr)) cls.db_conn.config_log(msg, level=SandeshLevel.SYS_DEBUG) attr_to_publish = alloc_dealloc_dict else: attr_dict_leftover_pi = attr_dict msg = "Re-using AE-ID(%s) for PI(%s) at VPG(%s)/PR(%s)" % ( attr_dict_leftover_pi, db_pi_uuid, vpg_name, pi_pr) cls.db_conn.config_log(msg, level=SandeshLevel.SYS_DEBUG) attr_to_publish = None (ok, result) = cls.db_conn.ref_update( 'virtual_port_group', vpg_uuid, 'physical_interface', db_pi_uuid, {'attr': attr_dict_leftover_pi}, 'ADD', db_obj_dict.get('id_perms'), attr_to_publish=attr_to_publish, relax_ref_for_delete=True) if not ok: return ok, result def undo_ref_update(): return cls.db_conn.ref_update( 'virtual_port_group', vpg_uuid, 'physical_interface', db_pi_uuid, {'attr': None}, 'ADD', db_obj_dict.get('id_perms'), attr_to_publish=attr_to_publish, relax_ref_for_delete=True) get_context().push_undo(undo_ref_update) msg = "Updated AE-ID(%s) in PI(%s) ref to VPG(%s)" % ( attr_dict_leftover_pi, db_pi_uuid, vpg_name) cls.db_conn.config_log(msg, level=SandeshLevel.SYS_DEBUG) # deallocate case _in_dealloc_list = [] for pi_uuid in delete_pi_uuids: pi_pr = db_pi_dict.get(pi_uuid) pi_ae = db_pr_dict.get(pi_pr) db_pi_prs = list(db_pi_dict.values()).count(pi_pr) # PR/VPG is already considered for deallocation, so no need # to dealloc again if '%s:%s' % (pi_pr, vpg_name) in _in_dealloc_list: continue if (pi_ae is not None and (db_pi_prs < 2 or len(delete_pi_uuids) > 1)): ae_id = pi_ae.get('ae_num') # de-allocation moved to post_dbe_update _dealloc_dict = { 'ae_id': ae_id, 'prouter_name': pi_pr, 'vpg_uuid': vpg_uuid, 'vpg_name': vpg_name } alloc_dealloc_dict['deallocated_ae_id'].append(_dealloc_dict) def undo_add_dealloc_dict(): alloc_dealloc_dict['deallocated_ae_id'].remove( _dealloc_dict) get_context().push_undo(undo_add_dealloc_dict) # record deallocated pr/vpg _in_dealloc_list.append('%s:%s' % (pi_pr, vpg_name)) # de-allocate leftover single PI, if any # in delete case, whatever comes in curr_pi_dict are the # leftovers because for delete refs, ref to be deleted # will not be coming in payload if (len(curr_pi_dict.keys()) == 1 and len(db_pi_dict.keys()) == len(delete_pi_uuids) + 1): pi_uuid = list(curr_pi_dict.keys())[0] pi_pr = curr_pi_dict.get(pi_uuid) pi_ae = curr_pr_dict.get(pi_pr) if '%s:%s' % (pi_pr, vpg_name) not in _in_dealloc_list: if pi_ae is not None: ae_id = pi_ae.get('ae_num') # de-allocation moved to post_dbe_update _dealloc_dict = { 'ae_id': ae_id, 'prouter_name': pi_pr, 'vpg_uuid': vpg_uuid, 'vpg_name': vpg_name } alloc_dealloc_dict['deallocated_ae_id'].append( _dealloc_dict) def undo_add_dealloc_dict(): alloc_dealloc_dict['deallocated_ae_id'].remove( _dealloc_dict) get_context().push_undo(undo_add_dealloc_dict) # record deallocated pr/vpg _in_dealloc_list.append('%s:%s' % (pi_pr, vpg_name)) # TO-DO Add undo pi-ref for leftover pi # remove PI ref from VPG (ok, result) = cls.db_conn.ref_update('virtual_port_group', vpg_uuid, 'physical_interface', pi_uuid, {'attr': None}, 'ADD', db_obj_dict.get('id_perms'), relax_ref_for_delete=True) if not ok: return ok, result def undo_ae_dealloc_from_pi(): attr_obj = VpgInterfaceParametersType(_dealloc_dict['ae_id']) (ok, result) = cls.db_conn.ref_update('virtual_port_group', vpg_uuid, 'physical_interface', pi_uuid, {'attr': attr_obj}, 'ADD', db_obj_dict.get('id_perms'), relax_ref_for_delete=True) get_context().push_undo(undo_ae_dealloc_from_pi) return True, (attr_dict, alloc_dealloc_dict)
def pre_dbe_update(cls, id, fq_name, obj_dict, db_conn, **kwargs): ok, read_result = cls.dbe_read(db_conn, 'network_ipam', id) if not ok: return ok, read_result def ipam_mgmt_check(): old_ipam_mgmt = read_result.get('network_ipam_mgmt') new_ipam_mgmt = obj_dict.get('network_ipam_mgmt') if not old_ipam_mgmt or not new_ipam_mgmt: return True, "" old_dns_method = old_ipam_mgmt.get('ipam_dns_method') new_dns_method = new_ipam_mgmt.get('ipam_dns_method') if not cls.is_change_allowed(old_dns_method, new_dns_method, read_result, db_conn): msg = ("Cannot change DNS Method with active VMs referring " "to the IPAM") return False, (400, msg) return True, "" ok, result = ipam_mgmt_check() if not ok: return ok, result old_subnet_method = read_result.get('ipam_subnet_method') if 'ipam_subnet_method' in obj_dict: new_subnet_method = obj_dict.get('ipam_subnet_method') if (old_subnet_method != new_subnet_method): return (False, (400, 'ipam_subnet_method can not be changed')) if (old_subnet_method != 'flat-subnet'): if 'ipam_subnets' in obj_dict: msg = "ipam-subnets are allowed only with flat-subnet" return False, (400, msg) return True, "" old_subnetting = read_result.get('ipam_subnetting') if 'ipam_subnetting' in obj_dict: subnetting = obj_dict.get('ipam_subnetting', False) if (old_subnetting != subnetting): return (False, (400, 'ipam_subnetting can not be changed')) if 'ipam_subnets' in obj_dict: req_subnets_list = cls.addr_mgmt._ipam_to_subnets(obj_dict) # First check the overlap condition within ipam_subnets ok, result = cls.addr_mgmt.net_check_subnet_overlap( req_subnets_list) if not ok: return (ok, (400, result)) # if subnets are modified then make sure new subnet lists are # not in overlap conditions with VNs subnets and other ipams # referred by all VNs referring this ipam vn_refs = read_result.get('virtual_network_back_refs', []) ref_ipam_uuid_list = [] refs_subnets_list = [] for ref in vn_refs: vn_id = ref.get('uuid') try: (ok, vn_dict) = db_conn.dbe_read('virtual_network', vn_id) except NoIdError: continue if not ok: return False, vn_dict # get existing subnets on this VN and on other ipams # this VN refers and run a overlap check. ipam_refs = vn_dict.get('network_ipam_refs', []) for ipam in ipam_refs: ref_ipam_uuid = ipam['uuid'] if ref_ipam_uuid == id: # This is a ipam for which update request has come continue if ref_ipam_uuid in ref_ipam_uuid_list: continue # check if ipam is a flat-subnet, for flat-subnet ipam # add uuid in ref_ipam_uuid_list, to read ipam later # to get current ipam_subnets from ipam vnsn_data = ipam.get('attr') or {} ref_ipam_subnets = vnsn_data.get('ipam_subnets') or [] if len(ref_ipam_subnets) == 1: # flat subnet ipam will have only one entry in # vn->ipam link without any ip_prefix ref_ipam_subnet = ref_ipam_subnets[0] ref_subnet = ref_ipam_subnet.get('subnet') or {} if 'ip_prefix' not in ref_subnet: # This is a flat-subnet, ref_ipam_uuid_list.append(ref_ipam_uuid) # vn->ipam link to the refs_subnets_list vn_subnets_list = cls.addr_mgmt._vn_to_subnets(vn_dict) if vn_subnets_list: refs_subnets_list += vn_subnets_list for ipam_uuid in ref_ipam_uuid_list: (ok, ipam_dict) = cls.dbe_read(db_conn, 'network_ipam', ipam_uuid) if not ok: return (ok, 409, ipam_dict) ref_subnets_list = cls.addr_mgmt._ipam_to_subnets(ipam_dict) refs_subnets_list += ref_subnets_list (ok, result) = cls.addr_mgmt.check_overlap_with_refs( refs_subnets_list, req_subnets_list) if not ok: return (ok, (400, result)) ipam_subnets = obj_dict.get('ipam_subnets') if ipam_subnets is not None: subnets = ipam_subnets.get('subnets') or [] (ok, result) = cls.addr_mgmt.net_check_subnet(subnets) if not ok: return (ok, (409, result)) (ok, result) = cls.addr_mgmt.ipam_check_subnet_delete( read_result, obj_dict) if not ok: return (ok, (409, result)) (ok, result) = cls.addr_mgmt.ipam_validate_subnet_update( read_result, obj_dict) if not ok: return (ok, (400, result)) try: cls.addr_mgmt.ipam_update_req(fq_name, read_result, obj_dict, id) def undo(): # failed => update with flipped values for db_dict and req_dict cls.addr_mgmt.ipam_update_req(fq_name, obj_dict, read_result, id) get_context().push_undo(undo) except Exception as e: return (False, (500, str(e))) return True, ""
def _manage_vpg_association(cls, vmi_id, api_server, db_conn, phy_links, vpg_name=None): fabric_name = None phy_interface_uuids = [] old_phy_interface_uuids = [] new_pi_to_pr_dict = {} old_pi_to_pr_dict = {} for link in phy_links: if link.get('fabric'): if fabric_name is not None and fabric_name != link['fabric']: msg = 'Physical interfaces in the same vpg '\ 'should belong to the same fabric' return (False, (400, msg)) fabric_name = link['fabric'] else: # use default fabric if it's not in link local information fabric_name = 'default-fabric' phy_interface_name = link['port_id'] prouter_name = link['switch_info'] pi_fq_name = [ 'default-global-system-config', prouter_name, phy_interface_name ] pi_uuid = db_conn.fq_name_to_uuid('physical_interface', pi_fq_name) phy_interface_uuids.append(pi_uuid) new_pi_to_pr_dict[pi_uuid] = prouter_name # check if new physical interfaces belongs to some other vpg for uuid in set(phy_interface_uuids): ok, phy_interface_dict = db_conn.dbe_read( obj_type='physical-interface', obj_id=uuid, obj_fields=['virtual_port_group_back_refs']) if not ok: return (ok, 400, phy_interface_dict) vpg_refs = phy_interface_dict.get('virtual_port_group_back_refs') if vpg_refs and vpg_name and vpg_refs[0]['to'][-1] != vpg_name: msg = 'Physical interface %s already belong to the vpg %s' %\ (phy_interface_dict.get('name'), vpg_refs[0]['to'][-1]) return (False, (400, msg)) if vpg_name: # read the vpg object vpg_fq_name = [ 'default-global-system-config', fabric_name, vpg_name ] try: vpg_uuid = db_conn.fq_name_to_uuid('virtual_port_group', vpg_fq_name) except NoIdError: msg = 'Vpg object %s is not found' % vpg_name return (False, (404, msg)) ok, vpg_dict = db_conn.dbe_read(obj_type='virtual-port-group', obj_id=vpg_uuid) if not ok: return (ok, 400, vpg_dict) else: # create vpg object fabric_fq_name = [ 'default-global-system-config', fabric_name, phy_interface_uuids[0], ] vpg_id = cls.vnc_zk_client.alloc_vpg_id(':'.join(fabric_fq_name)) def undo_vpg_id(): cls.vnc_zk_client.free_vpg_id(vpg_id, ':'.join(fabric_fq_name)) return True, "" get_context().push_undo(undo_vpg_id) vpg_name = "vpg-internal-" + str(vpg_id) vpg_obj = VirtualPortGroup(parent_type='fabric', fq_name=[ 'default-global-system-config', fabric_name, vpg_name ], virtual_port_group_user_created=False, virtual_port_group_lacp_enabled=True) vpg_int_dict = json.dumps(vpg_obj, default=_obj_serializer_all) ok, resp = api_server.internal_request_create( 'virtual-port-group', json.loads(vpg_int_dict)) if not ok: return (ok, 400, resp) vpg_dict = resp['virtual-port-group'] vpg_uuid = resp['virtual-port-group']['uuid'] def undo_vpg_create(): cls.server.internal_request_delete('virtual-port-group', vpg_uuid) return True, '' get_context().push_undo(undo_vpg_create) old_phy_interface_refs = vpg_dict.get('physical_interface_refs') for ref in old_phy_interface_refs or []: old_pi_to_pr_dict[ref['uuid']] = { 'prouter_name': ref['to'][1], 'ae_id': ref['attr'].get('ae_num') if ref['attr'] else None } old_phy_interface_uuids.append(ref['uuid']) ret_dict = {} ret_dict['deallocated_ae_id'] = [] ret_dict['allocated_ae_id'] = [] # delete old physical interfaces to the vpg for uuid in set(old_phy_interface_uuids) - set(phy_interface_uuids): prouter_dict = old_pi_to_pr_dict.get(uuid) dealloc_dict = cls._check_and_free_ae_id(phy_links, prouter_dict, vpg_name, new_pi_to_pr_dict) ret_dict['deallocated_ae_id'].append(dealloc_dict) api_server.internal_request_ref_update('virtual-port-group', vpg_uuid, 'DELETE', 'physical-interface', uuid) # add new physical interfaces to the vpg pr_to_ae_id = {} for uuid in phy_interface_uuids: prouter_name = new_pi_to_pr_dict.get(uuid) if pr_to_ae_id.get(prouter_name) is None: attr_obj, ae_id = cls._check_and_alloc_ae_id( phy_links, prouter_name, vpg_name, old_pi_to_pr_dict) pr_to_ae_id[prouter_name] = ae_id if len(phy_links) > 1 and ae_id is not None: alloc_dict = {} alloc_dict['ae_id'] = ae_id alloc_dict['prouter_name'] = prouter_name alloc_dict['vpg_name'] = vpg_name ret_dict['allocated_ae_id'].append(alloc_dict) else: attr_obj = VpgInterfaceParametersType( ae_num=pr_to_ae_id.get(prouter_name)) api_server.internal_request_ref_update( 'virtual-port-group', vpg_uuid, 'ADD', 'physical-interface', uuid, attr=attr_obj.__dict__ if attr_obj else None, relax_ref_for_delete=True) return vpg_uuid, ret_dict
def pre_dbe_create(cls, tenant_name, obj_dict, db_conn): virtual_router_refs = obj_dict.get('virtual_router_refs') virtual_network_refs = obj_dict.get('virtual_network_refs') network_ipam_refs = obj_dict.get('network_ipam_refs') if virtual_router_refs and network_ipam_refs: msg = "virtual_router_refs and ipam_refs are not allowed" return (False, (400, msg)) if virtual_router_refs and virtual_network_refs: msg = "router_refs and network_refs are not allowed" return (False, (400, msg)) if virtual_network_refs: vn_fq_name = obj_dict['virtual_network_refs'][0]['to'] if (vn_fq_name == IP_FABRIC_VN_FQ_NAME or vn_fq_name == LINK_LOCAL_VN_FQ_NAME): # Ignore ip-fabric and link-local address allocations return True, "" vn_id = db_conn.fq_name_to_uuid('virtual_network', vn_fq_name) ok, result = cls.dbe_read(db_conn, 'virtual_network', vn_id, obj_fields=['router_external', 'network_ipam_refs', 'address_allocation_mode']) if not ok: return ok, result vn_dict = result ipam_refs = None else: vn_fq_name = vn_id = vn_dict = None if virtual_router_refs: if len(virtual_router_refs) > 1: msg = "Instance IP cannot refer to multiple vrouters" return False, (400, msg) vrouter_uuid = virtual_router_refs[0].get('uuid') ok, vrouter_dict = db_conn.dbe_read(obj_type='virtual_router', obj_id=vrouter_uuid) if not ok: return (ok, (400, obj_dict)) ipam_refs = vrouter_dict.get('network_ipam_refs') or [] else: ipam_refs = obj_dict.get('network_ipam_refs') subnet_uuid = obj_dict.get('subnet_uuid') if subnet_uuid and virtual_router_refs: msg = "subnet uuid based allocation not supported with vrouter" return (False, (400, msg)) req_ip = obj_dict.get("instance_ip_address") req_ip_family = obj_dict.get("instance_ip_family") if req_ip_family == "v4": req_ip_version = 4 elif req_ip_family == "v6": req_ip_version = 6 else: req_ip_version = None # allocation for requested ip from a network_ipam is not supported if ipam_refs and req_ip: msg = ("allocation for requested IP from a network_ipam is not " "supported") return False, (400, msg) # if request has ip and not g/w ip, report if already in use. # for g/w ip, creation allowed but only can ref to router port. if req_ip and cls.addr_mgmt.is_ip_allocated(req_ip, vn_fq_name, vn_uuid=vn_id, vn_dict=vn_dict): if not cls.addr_mgmt.is_gateway_ip(vn_dict, req_ip): return False, (409, "IP address already in use") elif cls._vmi_has_vm_ref(db_conn, obj_dict): return False, (400, "Gateway IP cannot be used by VM port") alloc_pool_list = [] if 'virtual_router_refs' in obj_dict: # go over all the ipam_refs and build a list of alloc_pools # from where ip is expected for vr_ipam in ipam_refs: vr_ipam_data = vr_ipam.get('attr', {}) vr_alloc_pools = vr_ipam_data.get('allocation_pools', []) alloc_pool_list.extend( [(vr_alloc_pool) for vr_alloc_pool in vr_alloc_pools]) subscriber_tag = obj_dict.get('instance_ip_subscriber_tag') try: if vn_fq_name: ok, result = cls.addr_mgmt.get_ip_free_args(vn_fq_name, vn_dict) if not ok: return ok, result (ip_addr, sn_uuid, subnet_name) = cls.addr_mgmt.ip_alloc_req( vn_fq_name, vn_dict=vn_dict, sub=subnet_uuid, asked_ip_addr=req_ip, asked_ip_version=req_ip_version, alloc_id=obj_dict['uuid'], ipam_refs=ipam_refs, alloc_pools=alloc_pool_list, iip_subscriber_tag=subscriber_tag) def undo(): msg = ("AddrMgmt: free IIP %s, vn=%s tenant=%s on post fail" % (ip_addr, vn_fq_name, tenant_name)) db_conn.config_log(msg, level=SandeshLevel.SYS_DEBUG) cls.addr_mgmt.ip_free_req(ip_addr, vn_fq_name, alloc_id=obj_dict['uuid'], ipam_refs=ipam_refs, vn_dict=vn_dict, ipam_dicts=result.get('ipam_dicts')) return True, "" get_context().push_undo(undo) except Exception as e: return (False, (400, str(e))) obj_dict['instance_ip_address'] = ip_addr obj_dict['subnet_uuid'] = sn_uuid if subnet_name: ip_prefix = subnet_name.split('/')[0] prefix_len = int(subnet_name.split('/')[1]) instance_ip_subnet = {'ip_prefix': ip_prefix, 'ip_prefix_len': prefix_len} obj_dict['instance_ip_subnet'] = instance_ip_subnet msg = ("AddrMgmt: alloc IIP %s for vn=%s, tenant=%s, askip=%s" % (ip_addr, vn_fq_name, tenant_name, req_ip)) db_conn.config_log(msg, level=SandeshLevel.SYS_DEBUG) return True, ""
def pre_dbe_create(cls, tenant_name, obj_dict, db_conn): virtual_router_refs = obj_dict.get('virtual_router_refs') virtual_network_refs = obj_dict.get('virtual_network_refs') network_ipam_refs = obj_dict.get('network_ipam_refs') if virtual_router_refs and network_ipam_refs: msg = "virtual_router_refs and ipam_refs are not allowed" return (False, (400, msg)) if virtual_router_refs and virtual_network_refs: msg = "router_refs and network_refs are not allowed" return (False, (400, msg)) if virtual_network_refs: vn_fq_name = obj_dict['virtual_network_refs'][0]['to'] if (vn_fq_name == IP_FABRIC_VN_FQ_NAME or vn_fq_name == LINK_LOCAL_VN_FQ_NAME): # Ignore ip-fabric and link-local address allocations return True, "" vn_id = db_conn.fq_name_to_uuid('virtual_network', vn_fq_name) ok, result = cls.dbe_read(db_conn, 'virtual_network', vn_id, obj_fields=[ 'router_external', 'network_ipam_refs', 'address_allocation_mode' ]) if not ok: return ok, result vn_dict = result ipam_refs = None else: vn_fq_name = vn_id = vn_dict = None if virtual_router_refs: if len(virtual_router_refs) > 1: msg = "Instance IP cannot refer to multiple vrouters" return False, (400, msg) vrouter_uuid = virtual_router_refs[0].get('uuid') ok, vrouter_dict = db_conn.dbe_read(obj_type='virtual_router', obj_id=vrouter_uuid) if not ok: return (ok, (400, obj_dict)) ipam_refs = vrouter_dict.get('network_ipam_refs') or [] else: ipam_refs = obj_dict.get('network_ipam_refs') subnet_uuid = obj_dict.get('subnet_uuid') if subnet_uuid and virtual_router_refs: msg = "subnet uuid based allocation not supported with vrouter" return (False, (400, msg)) req_ip = obj_dict.get("instance_ip_address") req_ip_family = obj_dict.get("instance_ip_family") if req_ip_family == "v4": req_ip_version = 4 elif req_ip_family == "v6": req_ip_version = 6 else: req_ip_version = None # allocation for requested ip from a network_ipam is not supported if ipam_refs and req_ip: msg = ("allocation for requested IP from a network_ipam is not " "supported") return False, (400, msg) # if request has ip and not g/w ip, report if already in use. # for g/w ip, creation allowed but only can ref to router port. if req_ip and cls.addr_mgmt.is_ip_allocated( req_ip, vn_fq_name, vn_uuid=vn_id, vn_dict=vn_dict): if not cls.addr_mgmt.is_gateway_ip(vn_dict, req_ip): return False, (409, "IP address already in use") elif cls._vmi_has_vm_ref(db_conn, obj_dict): return False, (400, "Gateway IP cannot be used by VM port") alloc_pool_list = [] if 'virtual_router_refs' in obj_dict: # go over all the ipam_refs and build a list of alloc_pools # from where ip is expected for vr_ipam in ipam_refs: vr_ipam_data = vr_ipam.get('attr', {}) vr_alloc_pools = vr_ipam_data.get('allocation_pools', []) alloc_pool_list.extend([(vr_alloc_pool) for vr_alloc_pool in vr_alloc_pools]) subscriber_tag = obj_dict.get('instance_ip_subscriber_tag') try: if vn_fq_name: ok, result = cls.addr_mgmt.get_ip_free_args( vn_fq_name, vn_dict) if not ok: return ok, result (ip_addr, sn_uuid, subnet_name) = cls.addr_mgmt.ip_alloc_req( vn_fq_name, vn_dict=vn_dict, sub=subnet_uuid, asked_ip_addr=req_ip, asked_ip_version=req_ip_version, alloc_id=obj_dict['uuid'], ipam_refs=ipam_refs, alloc_pools=alloc_pool_list, iip_subscriber_tag=subscriber_tag) def undo(): msg = ("AddrMgmt: free IIP %s, vn=%s tenant=%s on post fail" % (ip_addr, vn_fq_name, tenant_name)) db_conn.config_log(msg, level=SandeshLevel.SYS_DEBUG) cls.addr_mgmt.ip_free_req(ip_addr, vn_fq_name, alloc_id=obj_dict['uuid'], ipam_refs=ipam_refs, vn_dict=vn_dict, ipam_dicts=result.get('ipam_dicts')) return True, "" get_context().push_undo(undo) except Exception as e: return (False, (400, str(e))) obj_dict['instance_ip_address'] = ip_addr obj_dict['subnet_uuid'] = sn_uuid if subnet_name: ip_prefix = subnet_name.split('/')[0] prefix_len = int(subnet_name.split('/')[1]) instance_ip_subnet = { 'ip_prefix': ip_prefix, 'ip_prefix_len': prefix_len } obj_dict['instance_ip_subnet'] = instance_ip_subnet msg = ("AddrMgmt: alloc IIP %s for vn=%s, tenant=%s, askip=%s" % (ip_addr, vn_fq_name, tenant_name, req_ip)) db_conn.config_log(msg, level=SandeshLevel.SYS_DEBUG) return True, ""
def _alloc_ae_id(cls, prouter_name, vpg_name, vpg_uuid): # create vpg node at /id/ae-id-vpg/ vpg_zk_path = os.path.join(cls.vnc_zk_client._vpg_ae_id_zk_path_prefix, 'vpg:%s' % vpg_uuid, prouter_name) pi_ae = None if not cls.vnc_zk_client._zk_client.exists(vpg_zk_path): while True: try: ae_id = cls.vnc_zk_client.alloc_ae_id( prouter_name, vpg_name) def undo_alloc_ae_id(): ok, result = cls._dealloc_ae_id( prouter_name, ae_id, vpg_name, vpg_uuid) return ok, result get_context().push_undo(undo_alloc_ae_id) break except ResourceExhaustionError: # reraise if its real exhaustion in_use_aes = 0 vpg_nodes = cls.vnc_zk_client._zk_client.get_children( cls.vnc_zk_client._vpg_ae_id_zk_path_prefix) for vpg_node in vpg_nodes: pr_path = cls.vnc_zk_client._zk_client.exists( os.path.join( cls.vnc_zk_client._vpg_ae_id_zk_path_prefix, vpg_node, prouter_name)) if pr_path: in_use_aes += 1 if in_use_aes >= cls.vnc_zk_client._AE_MAX_ID: err_msg = ('ResourceExhaustionError: when allocating ' 'AE-ID for virtual-port-group (%s) at ' 'physical-router (%s)' % (vpg_name, prouter_name)) return False, (400, err_msg) try: cls.vnc_zk_client._zk_client.create_node(vpg_zk_path, ae_id) def undo_create_node(): cls.vnc_zk_client._zk_client.delete_node(vpg_zk_path, True) get_context().push_undo(undo_create_node) pi_ae = ae_id except ResourceExhaustionError: ok, result = cls._dealloc_ae_id(prouter_name, ae_id, vpg_name, vpg_uuid) if not ok: return ok, result pi_ae_str = cls.vnc_zk_client._zk_client.read_node(vpg_zk_path) pi_ae = int(pi_ae_str) # TO-DO: can read_node return empty? else: # TO-DO: can read_node return empty? pi_ae_str = cls.vnc_zk_client._zk_client.read_node(vpg_zk_path) pi_ae = int(pi_ae_str) # TO-DO: Can pi_ae remain None at any case? # if pi_ae is None: attr_obj = VpgInterfaceParametersType(pi_ae) attr_dict = attr_obj.__dict__ alloc_dict = { 'ae_id': pi_ae, 'prouter_name': prouter_name, 'vpg_name': vpg_name, } msg = "Allocated AE-ID (%s) at VPG(%s)/PR(%s)" % (pi_ae, vpg_name, prouter_name) cls.db_conn.config_log(msg, level=SandeshLevel.SYS_DEBUG) return True, (attr_dict, alloc_dict)
def _manage_lag_interface(cls, vmi_id, api_server, db_conn, phy_links, lag_name=None): fabric_name = None phy_interface_uuids = [] for link in phy_links: if fabric_name is not None and fabric_name != link['fabric']: msg = 'Physical interfaces in the same lag should belong to '\ 'the same fabric' return (False, (400, msg)) else: fabric_name = link['fabric'] phy_interface_name = link['port_id'] prouter_name = link['switch_info'] pi_fq_name = [ 'default-global-system-config', prouter_name, phy_interface_name ] phy_interface_uuids.append( db_conn.fq_name_to_uuid('physical_interface', pi_fq_name)) # check if new physical interfaces belongs to some other lag for uuid in set(phy_interface_uuids): ok, phy_interface_dict = db_conn.dbe_read( obj_type='physical-interface', obj_id=uuid) if not ok: return (ok, 400, phy_interface_dict) mac = phy_interface_dict['physical_interface_mac_addresses'][ 'mac_address'] esi = "00:00:00:00:" + mac[0] lag_refs = phy_interface_dict.get('virtual_port_group_back_refs') if lag_refs and lag_refs[0]['to'][-1] != lag_name: msg = 'Physical interface %s already belong to the lag %s' %\ (phy_interface_dict.get('name'), lag_refs[0]['to'][-1]) return (False, (400, msg)) if lag_name: # read the lag object lag_fq_name = [ 'default-global-system-config', fabric_name, lag_name ] try: lag_uuid = db_conn.fq_name_to_uuid('virtual_port_group', lag_fq_name) except NoIdError: msg = 'Lag object %s is not found' % lag_name return (False, (404, msg)) ok, lag_dict = db_conn.dbe_read(obj_type='virtual-port-group', obj_id=lag_uuid) if not ok: return (ok, 400, lag_dict) kvps = lag_dict['annotations']['key_value_pair'] kvp_dict = cls._kvp_to_dict(kvps) lag_update_dict = {} if kvp_dict.get('esi') != esi: lag_dict['annotations']['key_value_pair'][1] = KeyValuePair( 'esi', esi) lag_update_dict['annotations'] = lag_dict['annotations'] lag_update_dict = json.dumps(lag_update_dict, default=_obj_serializer_all) ok, resp = api_server.internal_request_update( 'virtual-port-group', lag_dict['uuid'], json.loads(lag_update_dict)) else: # create lag object fabric_fq_name = [ 'default-global-system-config', fabric_name, phy_interface_uuids[0], ] ae_id = cls.vnc_zk_client.alloc_ae_id(':'.join(fabric_fq_name)) def undo_ae_id(): cls.vnc_zk_client.free_ae_id(':'.join(fabric_fq_name)) return True, "" get_context().push_undo(undo_ae_id) lag_name = "lag" + str(ae_id) lag_obj = VirtualPortGroup(parent_type='fabric', fq_name=[ 'default-global-system-config', fabric_name, lag_name ], virtual_port_group_lacp_enabled=True) lag_obj.set_annotations( KeyValuePairs([ KeyValuePair('ae_if_name', str(ae_id)), KeyValuePair('esi', esi) ])) lag_int_dict = json.dumps(lag_obj, default=_obj_serializer_all) ok, resp = api_server.internal_request_create( 'virtual-port-group', json.loads(lag_int_dict)) if not ok: return (ok, 400, resp) lag_dict = resp['virtual-port-group'] lag_uuid = resp['virtual-port-group']['uuid'] def undo_lag_create(): cls.server.internal_request_delete('virtual-port-group', lag_uuid) return True, '' get_context().push_undo(undo_lag_create) old_phy_interface_uuids = [] old_phy_interface_refs = lag_dict.get('physical_interface_refs') for ref in old_phy_interface_refs or []: old_phy_interface_uuids.append(ref['uuid']) # add new physical interfaces to the lag for uuid in set(phy_interface_uuids) - set(old_phy_interface_uuids): api_server.internal_request_ref_update('virtual-port-group', lag_uuid, 'ADD', 'physical-interface', uuid, relax_ref_for_delete=True) # delete old physical interfaces to the lag for uuid in set(old_phy_interface_uuids) - set(phy_interface_uuids): api_server.internal_request_ref_update('virtual-port-group', lag_uuid, 'DELETE', 'physical-interface', uuid) return lag_uuid
def pre_dbe_create(cls, tenant_name, obj_dict, db_conn): if obj_dict['parent_type'] == 'instance-ip': return True, "" vn_fq_name = obj_dict['fq_name'][:-2] req_ip = obj_dict.get("floating_ip_address") if req_ip and cls.addr_mgmt.is_ip_allocated(req_ip, vn_fq_name): return (False, (409, 'IP address already in use')) try: ok, result = cls.addr_mgmt.get_ip_free_args(vn_fq_name) if not ok: return ok, result # # Parse through floating-ip-pool config to see if there are any # guidelines laid for allocation of this floating-ip. # fip_subnets = None ok, ret_val = cls._get_fip_pool_subnets(obj_dict, db_conn) # On a successful fip-pool subnet get, the subnet list is returned. # Otherwise, returned value has appropriate reason string. if ok: fip_subnets = ret_val else: return ok, (400, "Floating-ip-pool lookup failed with error: " "%s" % ret_val) if not fip_subnets: # Subnet specification was not found on the floating-ip-pool. # Proceed to allocated floating-ip from any of the subnets # on the virtual-network. fip_addr, sn_uuid, s_name = cls.addr_mgmt.ip_alloc_req( vn_fq_name, asked_ip_addr=req_ip, alloc_id=obj_dict['uuid']) else: subnets_tried = [] # Iterate through configured subnets on floating-ip-pool. # We will try to allocate floating-ip by iterating through # the list of configured subnets. for fip_pool_subnet in fip_subnets['subnet_uuid']: try: # Record the subnets that we try to allocate from. subnets_tried.append(fip_pool_subnet) fip_addr, sn_uuid, s_name = cls.addr_mgmt.ip_alloc_req( vn_fq_name, sub=fip_pool_subnet, asked_ip_addr=req_ip, alloc_id=obj_dict['uuid']) except cls.addr_mgmt.AddrMgmtSubnetExhausted: # This subnet is exhausted. Try next subnet. continue if not fip_addr: # Floating-ip could not be allocated from any of the # configured subnets. Raise an exception. raise cls.addr_mgmt.AddrMgmtSubnetExhausted( vn_fq_name, subnets_tried) def undo(): msg = ( "AddrMgmt: free FIP %s for vn=%s on tenant=%s, on undo" % (fip_addr, vn_fq_name, tenant_name)) db_conn.config_log(msg, level=SandeshLevel.SYS_DEBUG) cls.addr_mgmt.ip_free_req(fip_addr, vn_fq_name, alloc_id=obj_dict['uuid'], vn_dict=result.get('vn_dict'), ipam_dicts=result.get('ipam_dicts')) return True, "" get_context().push_undo(undo) except Exception as e: return (False, (500, str(e))) obj_dict['floating_ip_address'] = fip_addr msg = ('AddrMgmt: alloc %s FIP for vn=%s, tenant=%s, askip=%s' % (fip_addr, vn_fq_name, tenant_name, req_ip)) db_conn.config_log(msg, level=SandeshLevel.SYS_DEBUG) return True, ""
def pre_dbe_update(cls, id, fq_name, obj_dict, db_conn, **kwargs): ok, result = cls.check_for_external_gateway(db_conn, obj_dict) if not ok: return ok, result ok, result = cls._ensure_lr_dci_association(obj_dict) if not ok: return ok, result ok, result = cls.check_port_gateway_not_in_same_network( db_conn, obj_dict, id) if not ok: return ok, result ok, result = cls.is_port_in_use_by_vm(obj_dict, db_conn) if not ok: return ok, result ok, result = cls.is_vxlan_routing_enabled(db_conn, obj_dict) if not ok: return ok, result vxlan_routing = result if (vxlan_routing and 'vxlan_network_identifier' in obj_dict): new_vxlan_id = None old_vxlan_id = None new_vxlan_id = cls._check_vxlan_id_in_lr(obj_dict) # To get the current vxlan_id, read the LR from the DB ok, read_result = cls.dbe_read(db_conn, 'logical_router', id) if not ok: return ok, read_result old_vxlan_id = cls._check_vxlan_id_in_lr(read_result) if new_vxlan_id != old_vxlan_id: int_fq_name = None for vn_ref in read_result['virtual_network_refs']: if (vn_ref.get('attr', {}).get( 'logical_router_virtual_network_type') == 'InternalVirtualNetwork'): int_fq_name = vn_ref.get('to') break if int_fq_name is None: msg = "Internal FQ name not found" return False, (400, msg) vxlan_fq_name = ':'.join(int_fq_name) + '_vxlan' if new_vxlan_id is not None: # First, check if the new_vxlan_id being updated exist for # some other VN. new_vxlan_fq_name_in_db = cls.vnc_zk_client.get_vn_from_id( int(new_vxlan_id)) if new_vxlan_fq_name_in_db is not None: if new_vxlan_fq_name_in_db != vxlan_fq_name: msg = ("Cannot set VXLAN_ID: %s, it has already " "been set" % new_vxlan_id) return False, (400, msg) # Second, set the new_vxlan_id in Zookeeper. cls.vnc_zk_client.alloc_vxlan_id(vxlan_fq_name, int(new_vxlan_id)) def undo_alloc(): cls.vnc_zk_client.free_vxlan_id( int(old_vxlan_id), vxlan_fq_name) get_context().push_undo(undo_alloc) # Third, check if old_vxlan_id is not None, if so, delete it # from Zookeeper if old_vxlan_id is not None: cls.vnc_zk_client.free_vxlan_id(int(old_vxlan_id), vxlan_fq_name) def undo_free(): cls.vnc_zk_client.alloc_vxlan_id( vxlan_fq_name, int(old_vxlan_id)) get_context().push_undo(undo_free) # Check if type of all associated BGP VPN are 'l3' ok, result = cls.server.get_resource_class( 'bgpvpn').check_router_supports_vpn_type(obj_dict) if not ok: return ok, result # Check if we can reference the BGP VPNs ok, result = cls.dbe_read( db_conn, 'logical_router', id, obj_fields=['bgpvpn_refs', 'virtual_machine_interface_refs']) if not ok: return ok, result return cls.server.get_resource_class( 'bgpvpn').check_router_has_bgpvpn_assoc_via_network( obj_dict, result)
def pre_dbe_update(cls, id, fq_name, obj_dict, db_conn, prop_collection_updates=None, ref_update=None): if 'sub_cluster_asn' in obj_dict: return False, (400, 'Sub cluster ASN can not be modified') if not obj_dict.get('sub_cluster_id'): return True, '' deallocated_id = None ok, result = cls.locate(uuid=id, create_it=False, fields=['sub_cluster_id']) if not ok: return False, result actual_id = result['sub_cluster_id'] new_id = obj_dict.get('sub_cluster_id') if new_id != actual_id: # try to allocate desired ID try: cls.vnc_zk_client.alloc_sub_cluster_id( cls.server.global_autonomous_system, ':'.join(fq_name), new_id) except ResourceExistsError: msg = ("Sub-cluster ID '%d' is already used, choose another " "one" % new_id) return False, (400, msg) def undo_allocate_sub_cluster_id(): cls.vnc_zk_client.free_sub_cluster_id(new_id, ':'.join(fq_name)) get_context().push_undo(undo_allocate_sub_cluster_id) # if available, deallocate already allocate ID cls.vnc_zk_client.free_sub_cluster_id(actual_id, ':'.join(fq_name)) def undo_deallocate_sub_cluster_id(): # In case of error try to re-allocate the same ID as it was # not yet freed on other node try: cls.vnc_zk_client.alloc_sub_cluster_id( cls.server.global_autonomous_system, ':'.join(fq_name), actual_id) except ResourceExistsError: undo_new_id = cls.vnc_zk_client.alloc_sub_cluster_id( cls.server.global_autonomous_system, ':'.join(fq_name)) cls.server.internal_request_update( cls.resource_type, id, {'sub_cluster_id': undo_new_id}) return True, "" get_context().push_undo(undo_deallocate_sub_cluster_id) deallocated_id = actual_id return True, { 'fq_name': fq_name, 'sub_cluster_id': new_id, 'deallocated_sub_cluster_id': deallocated_id, }
def pre_dbe_update(cls, id, fq_name, obj_dict, db_conn, **kwargs): if fq_name == LINK_LOCAL_VN_FQ_NAME: return True, "" # neutron <-> vnc sharing global_access = obj_dict.get('perms2', {}).get('global_access') is_shared = obj_dict.get('is_shared') router_external = obj_dict.get('router_external') if global_access is not None or is_shared is not None or \ router_external is not None: if global_access is not None and is_shared is not None: # NOTE(gzimin): Check router_external parameter too. if is_shared != (global_access == 7) and \ (router_external is None or not router_external): msg = ( "Inconsistent is_shared (%s) and global_access (%s)" % (is_shared, global_access)) return False, (400, msg) if global_access is not None and router_external is not None: # NOTE(gzimin): Check is_shared parameter too. if router_external != (global_access == 5) and \ (is_shared is None or not is_shared): msg = ("Inconsistent router_external (%s) and " "global_access (%s)" % (router_external, global_access)) return False, (400, msg) elif global_access is not None: obj_dict['is_shared'] = (global_access != 0) else: ok, result = cls.dbe_read(db_conn, 'virtual_network', id, obj_fields=['perms2']) if not ok: return ok, result obj_dict['perms2'] = result['perms2'] if is_shared: obj_dict['perms2']['global_access'] = PERMS_RWX elif router_external: obj_dict['perms2']['global_access'] = PERMS_RX else: obj_dict['perms2']['global_access'] = PERMS_NONE rt_dict = obj_dict.get('route_target_list') if rt_dict: (ok, error) = cls._check_route_targets(rt_dict) if not ok: return (False, (400, error)) rt_import_dict = obj_dict.get('import_route_target_list') if rt_import_dict: (ok, error) = cls._check_route_targets(rt_import_dict) if not ok: return (False, (400, error)) rt_export_dict = obj_dict.get('export_route_target_list') if rt_export_dict: (ok, error) = cls._check_route_targets(rt_export_dict) if not ok: return (False, (400, error)) (ok, error) = cls._check_provider_details(obj_dict, db_conn, False) if not ok: return (False, (409, error)) ok, read_result = cls.dbe_read(db_conn, 'virtual_network', id) if not ok: return ok, read_result new_vn_id = obj_dict.get('virtual_network_network_id') # Does not authorize to update the virtual network ID as it's allocated # by the vnc server if (new_vn_id is not None and new_vn_id != read_result.get('virtual_network_network_id')): return (False, (403, "Cannot update the virtual network ID")) new_vxlan_id = None old_vxlan_id = None deallocated_vxlan_network_identifier = None (new_vxlan_status, new_vxlan_id) = cls._check_vxlan_id(obj_dict) (old_vxlan_status, old_vxlan_id) = cls._check_vxlan_id(read_result) if new_vxlan_status and new_vxlan_id != old_vxlan_id: vn_fq_name = ':'.join(fq_name) vxlan_fq_name = vn_fq_name + '_vxlan' if new_vxlan_id is not None: # First, check if the new_vxlan_id being updated exist for # some other VN. new_vxlan_fq_name_in_db = cls.vnc_zk_client.get_vn_from_id( int(new_vxlan_id)) if new_vxlan_fq_name_in_db and \ not vxlan_fq_name.startswith(new_vxlan_fq_name_in_db): msg = ("Cannot set VXLAN_ID: %s, it has already been " "set" % new_vxlan_id) return False, (400, msg) # Free vxlan if allocated using vn_fq_name (w/o vxlan suffix) if new_vxlan_fq_name_in_db == vn_fq_name: cls.vnc_zk_client.free_vxlan_id(int(new_vxlan_id), vn_fq_name) # Second, set the new_vxlan_id in Zookeeper. cls.vnc_zk_client.alloc_vxlan_id(vxlan_fq_name, int(new_vxlan_id)) def undo_alloc(): cls.vnc_zk_client.free_vxlan_id(int(new_vxlan_id), vxlan_fq_name) get_context().push_undo(undo_alloc) # Third, check if old_vxlan_id is not None, if so, delete it from # Zookeeper if old_vxlan_id is not None: cls.vnc_zk_client.free_vxlan_id(int(old_vxlan_id), vxlan_fq_name) # Add old vxlan_network_identifier to handle # dbe_update_notification deallocated_vxlan_network_identifier = old_vxlan_id def undo_free(): cls.vnc_zk_client.alloc_vxlan_id(vxlan_fq_name, int(old_vxlan_id)) get_context().push_undo(undo_free) (ok, error) = cls._check_is_provider_network_property(obj_dict, db_conn, vn_ref=read_result) if not ok: return (False, (400, error)) (ok, error) = cls._check_provider_network(obj_dict, db_conn, vn_ref=read_result) if not ok: return (False, (400, error)) (ok, response) = cls._is_multi_policy_service_chain_supported( obj_dict, read_result) if not ok: return (ok, response) ok, return_code, result = cls._check_ipam_network_subnets( obj_dict, db_conn, id, read_result) if not ok: return (ok, (return_code, result)) (ok, result) = cls.addr_mgmt.net_check_subnet_delete( read_result, obj_dict) if not ok: return (ok, (409, result)) ipam_refs = obj_dict.get('network_ipam_refs') or [] if ipam_refs: (ok, result) = cls.addr_mgmt.net_validate_subnet_update( read_result, obj_dict) if not ok: return (ok, (400, result)) # Check if network forwarding mode support BGP VPN types ok, result = cls.server.get_resource_class( 'bgpvpn').check_network_supports_vpn_type(obj_dict, read_result) if not ok: return ok, result # Check if we can reference the BGP VPNs ok, result = cls.server.get_resource_class( 'bgpvpn').check_network_has_bgpvpn_assoc_via_router( obj_dict, read_result) if not ok: return ok, result # Check if VN is routed and has overlay-loopback in name alloc # and de-alloc instance IPs vn_category = read_result.get('virtual_network_category', None) if vn_category == 'routed': ok, error = cls._validate_routed_vn_payload(obj_dict, read_result) if ok and cls._check_if_overlay_loopback_vn(read_result): (ok, error) = cls._check_and_alloc_loopback_instance_ip( obj_dict, db_conn, read_result) if not ok: return (False, (409, error)) try: cls.addr_mgmt.net_update_req(fq_name, read_result, obj_dict, id) # update link with a subnet_uuid if ipam in read_result or obj_dict # does not have it already for ipam in ipam_refs: ipam_fq_name = ipam['to'] ipam_uuid = db_conn.fq_name_to_uuid('network_ipam', ipam_fq_name) (ok, ipam_dict) = db_conn.dbe_read( obj_type='network_ipam', obj_id=ipam_uuid, obj_fields=['ipam_subnet_method']) if not ok: return (ok, (409, ipam_dict)) subnet_method = ipam_dict.get('ipam_subnet_method') if (subnet_method is not None and subnet_method == 'flat-subnet'): vnsn_data = ipam.get('attr') or {} ipam_subnets = vnsn_data.get('ipam_subnets') or [] if (len(ipam_subnets) == 1): continue if (len(ipam_subnets) == 0): subnet_dict = {} flat_subnet_uuid = str(uuid.uuid4()) subnet_dict['subnet_uuid'] = flat_subnet_uuid ipam['attr']['ipam_subnets'].insert(0, subnet_dict) def undo(): # failed => update with flipped values for db_dict and req_dict cls.addr_mgmt.net_update_req(fq_name, obj_dict, read_result, id) get_context().push_undo(undo) except Exception as e: return (False, (500, str(e))) return True, { 'deallocated_vxlan_network_identifier': deallocated_vxlan_network_identifier, }
def pre_dbe_create(cls, tenant_name, obj_dict, db_conn): (ok, response) = cls._is_multi_policy_service_chain_supported(obj_dict) if not ok: return (ok, response) is_shared = obj_dict.get('is_shared') # neutron <-> vnc sharing if obj_dict['perms2'].get('global_access', 0) == PERMS_RWX: obj_dict['is_shared'] = True elif is_shared: obj_dict['perms2']['global_access'] = PERMS_RWX else: obj_dict['is_shared'] = False # Does not authorize to set the virtual network ID as it's allocated # by the vnc server if obj_dict.get('virtual_network_network_id') is not None: return False, (403, "Cannot set the virtual network ID") # Allocate vxlan_id if it's present in request. vxlan_id = None (ok, vxlan_id) = cls._check_vxlan_id(obj_dict) if vxlan_id is not None: try: vxlan_fq_name = ':'.join(obj_dict['fq_name']) + '_vxlan' cls.vnc_zk_client.alloc_vxlan_id(vxlan_fq_name, int(vxlan_id)) except ResourceExistsError: msg = ("Cannot set VXLAN_ID: %s, it has already been set" % vxlan_id) return False, (400, msg) def undo_vxlan_id(): cls.vnc_zk_client.free_vxlan_id(int(vxlan_id), vxlan_fq_name) return True, "" get_context().push_undo(undo_vxlan_id) # Allocate virtual network ID vn_id = cls.vnc_zk_client.alloc_vn_id(':'.join(obj_dict['fq_name'])) def undo_vn_id(): cls.vnc_zk_client.free_vn_id(vn_id, ':'.join(obj_dict['fq_name'])) return True, "" get_context().push_undo(undo_vn_id) obj_dict['virtual_network_network_id'] = vn_id vn_uuid = obj_dict.get('uuid') (ok, return_code, result) = cls._check_ipam_network_subnets(obj_dict, db_conn, vn_uuid) if not ok: return (ok, (return_code, result)) rt_dict = obj_dict.get('route_target_list') if rt_dict: (ok, error) = cls._check_route_targets(rt_dict) if not ok: return (False, (400, error)) rt_import_dict = obj_dict.get('import_route_target_list') if rt_import_dict: (ok, error) = cls._check_route_targets(rt_import_dict) if not ok: return (False, (400, error)) rt_export_dict = obj_dict.get('export_route_target_list') if rt_export_dict: (ok, error) = cls._check_route_targets(rt_export_dict) if not ok: return (False, (400, error)) (ok, error) = cls._check_is_provider_network_property(obj_dict, db_conn) if not ok: return (False, (400, error)) (ok, error) = cls._check_provider_details(obj_dict, db_conn, True) if not ok: return (False, (400, error)) (ok, error) = cls._check_provider_network(obj_dict, db_conn) if not ok: return (False, (400, error)) # Check if network forwarding mode support BGP VPN types ok, result = cls.server.get_resource_class( 'bgpvpn').check_network_supports_vpn_type(obj_dict) if not ok: return ok, result # Check if we can reference the BGP VPNs ok, result = cls.server.get_resource_class( 'bgpvpn').check_network_has_bgpvpn_assoc_via_router(obj_dict) if not ok: return ok, result ipam_refs = obj_dict.get('network_ipam_refs') or [] try: cls.addr_mgmt.net_create_req(obj_dict) # for all ipams which are flat, we need to write a unique id as # subnet uuid for all cidrs in flat-ipam for ipam in ipam_refs: ipam_fq_name = ipam['to'] ipam_uuid = ipam.get('uuid') if not ipam_uuid: ipam_uuid = db_conn.fq_name_to_uuid( 'network_ipam', ipam_fq_name) (ok, ipam_dict) = db_conn.dbe_read( obj_type='network_ipam', obj_id=ipam_uuid, obj_fields=['ipam_subnet_method']) if not ok: return (ok, (400, ipam_dict)) subnet_method = ipam_dict.get('ipam_subnet_method') if (subnet_method is not None and subnet_method == 'flat-subnet'): subnet_dict = {} flat_subnet_uuid = str(uuid.uuid4()) subnet_dict['subnet_uuid'] = flat_subnet_uuid ipam['attr']['ipam_subnets'] = [subnet_dict] def undo(): cls.addr_mgmt.net_delete_req(obj_dict) return True, "" get_context().push_undo(undo) except Exception as e: return (False, (500, str(e))) return True, ""
def pre_dbe_create(cls, tenant_name, obj_dict, db_conn): type_str = obj_dict['tag_type_name'] tag_id = obj_dict.get('tag_id') or None tag_value_id = None tag_type_id = None # For user defined tags tag id and tag-type id # is input from user. Range for user defined # ids are 32768 - 65535. Both values are expected # in hex format. if tag_id is not None: try: tag_value_id = int(tag_id, 16) & (2**16 - 1) tag_type_id = int(tag_id, 16) >> 16 except ValueError: return False, (400, "Tag value must be in hexadecimal") if tag_value_id is not None and \ not cls.vnc_zk_client.user_def_tag(tag_value_id): msg = "Tag id can be set only for user defined tags in range\ 32678-65535" return False, (400, msg) if obj_dict.get('tag_type_refs') is not None: msg = "Tag Type reference is not setable" return False, (400, msg) # check if tag-type is already present use that. ok, result = cls.server.get_resource_class('tag_type').locate( [type_str], create_it=False) if not ok and result[0] == 404: if tag_type_id is not None and \ not cls.vnc_zk_client.user_def_tag(tag_type_id): msg = "Tag type id can be set only for user defined tag types\ in range 32678-65535" return False, (400, msg) params = { "id_perms": IdPermsType(user_visible=False), "tag_type_id": None if tag_type_id is None else "0x%x" % tag_type_id, } ok, result = cls.server.get_resource_class('tag_type').locate( [type_str], **params) if not ok: return False, result tag_type = result def undo_tag_type(): cls.server.internal_request_delete('tag-type', tag_type['uuid']) return True, '' get_context().push_undo(undo_tag_type) obj_dict['tag_type_refs'] = [ { 'uuid': tag_type['uuid'], 'to': tag_type['fq_name'], }, ] # Allocate ID for tag value. Use the all fq_name to distinguish same # tag values between global and scoped try: value_id = cls.vnc_zk_client.alloc_tag_value_id( type_str, ':'.join(obj_dict['fq_name']), tag_value_id) except ResourceExistsError: return False, (400, "Requested Tag id is already allocated") def undo_value_id(): cls.vnc_zk_client.free_tag_value_id(type_str, value_id, ':'.join(obj_dict['fq_name'])) return True, "" get_context().push_undo(undo_value_id) # value id is None in case of Failure otherwise any positive # value between 0 and 65535 if value_id is None: return False, (400, "Failed to allocate tag Id") # Compose Tag ID with the type ID and value ID obj_dict['tag_id'] = "{}{:04x}".format(tag_type['tag_type_id'], value_id) return True, ""
def pre_dbe_update(cls, id, fq_name, obj_dict, db_conn, **kwargs): ok, result = cls._ensure_lr_dci_association(obj_dict) if not ok: return ok, result ok, result = cls.check_port_gateway_not_in_same_network( db_conn, obj_dict, id) if not ok: return ok, result ok, result = cls.is_port_in_use_by_vm(obj_dict, db_conn) if not ok: return ok, result (ok, error) = cls._check_route_targets(obj_dict) if not ok: return (False, (400, error)) # To get the current vxlan_id, read the LR from the DB ok, result = cls.dbe_read(cls.db_conn, 'logical_router', id, obj_fields=['virtual_network_refs', 'logical_router_type', 'vxlan_network_identifier']) if not ok: return ok, result read_result = result ok, result = cls._check_type(obj_dict, read_result) if not ok: return ok, result logical_router_type_in_db = cls.check_lr_type(read_result) if ('vxlan_network_identifier' in obj_dict and logical_router_type_in_db == 'vxlan-routing'): new_vxlan_id = cls._check_vxlan_id_in_lr(obj_dict) old_vxlan_id = cls._check_vxlan_id_in_lr(read_result) if new_vxlan_id != old_vxlan_id: int_fq_name = None for vn_ref in read_result['virtual_network_refs']: if (vn_ref.get('attr', {}).get( 'logical_router_virtual_network_type') == 'InternalVirtualNetwork'): int_fq_name = vn_ref.get('to') break if int_fq_name is None: msg = "Internal FQ name not found" return False, (400, msg) vxlan_fq_name = ':'.join(int_fq_name) + '_vxlan' if new_vxlan_id is not None: # First, check if the new_vxlan_id being updated exist for # some other VN. new_vxlan_fq_name_in_db = cls.vnc_zk_client.get_vn_from_id( int(new_vxlan_id)) if new_vxlan_fq_name_in_db is not None: if new_vxlan_fq_name_in_db != vxlan_fq_name: msg = ("Cannot set VXLAN_ID: %s, it has already " "been set" % new_vxlan_id) return False, (400, msg) # Second, set the new_vxlan_id in Zookeeper. cls.vnc_zk_client.alloc_vxlan_id(vxlan_fq_name, int(new_vxlan_id)) def undo_alloc(): cls.vnc_zk_client.free_vxlan_id( int(old_vxlan_id), vxlan_fq_name) get_context().push_undo(undo_alloc) # Third, check if old_vxlan_id is not None, if so, delete it # from Zookeeper if old_vxlan_id is not None: cls.vnc_zk_client.free_vxlan_id(int(old_vxlan_id), vxlan_fq_name) def undo_free(): cls.vnc_zk_client.alloc_vxlan_id( vxlan_fq_name, int(old_vxlan_id)) get_context().push_undo(undo_free) # Check if type of all associated BGP VPN are 'l3' ok, result = cls.server.get_resource_class( 'bgpvpn').check_router_supports_vpn_type(obj_dict) if not ok: return ok, result # Check if we can reference the BGP VPNs ok, result = cls.dbe_read( db_conn, 'logical_router', id, obj_fields=['bgpvpn_refs', 'virtual_machine_interface_refs']) if not ok: return ok, result return cls.server.get_resource_class( 'bgpvpn').check_router_has_bgpvpn_assoc_via_network( obj_dict, result)
def pre_dbe_update(cls, id, fq_name, obj_dict, db_conn, **kwargs): ok, read_result = cls.dbe_read(db_conn, 'network_ipam', id) if not ok: return ok, read_result def ipam_mgmt_check(): old_ipam_mgmt = read_result.get('network_ipam_mgmt') new_ipam_mgmt = obj_dict.get('network_ipam_mgmt') if not old_ipam_mgmt or not new_ipam_mgmt: return True, "" old_dns_method = old_ipam_mgmt.get('ipam_dns_method') new_dns_method = new_ipam_mgmt.get('ipam_dns_method') if not cls.is_change_allowed(old_dns_method, new_dns_method, read_result, db_conn): msg = ("Cannot change DNS Method with active VMs referring " "to the IPAM") return False, (400, msg) return True, "" ok, result = ipam_mgmt_check() if not ok: return ok, result old_subnet_method = read_result.get('ipam_subnet_method') if 'ipam_subnet_method' in obj_dict: new_subnet_method = obj_dict.get('ipam_subnet_method') if (old_subnet_method != new_subnet_method): return (False, (400, 'ipam_subnet_method can not be changed')) if (old_subnet_method != 'flat-subnet'): if 'ipam_subnets' in obj_dict: msg = "ipam-subnets are allowed only with flat-subnet" return False, (400, msg) return True, "" old_subnetting = read_result.get('ipam_subnetting') if 'ipam_subnetting' in obj_dict: subnetting = obj_dict.get('ipam_subnetting', False) if (old_subnetting != subnetting): return (False, (400, 'ipam_subnetting can not be changed')) if 'ipam_subnets' in obj_dict: req_subnets_list = cls.addr_mgmt._ipam_to_subnets(obj_dict) # First check the overlap condition within ipam_subnets ok, result = cls.addr_mgmt.net_check_subnet_overlap( req_subnets_list) if not ok: return (ok, (400, result)) # if subnets are modified then make sure new subnet lists are # not in overlap conditions with VNs subnets and other ipams # referred by all VNs referring this ipam vn_refs = read_result.get('virtual_network_back_refs', []) ref_ipam_uuid_list = [] refs_subnets_list = [] for ref in vn_refs: vn_id = ref.get('uuid') try: (ok, vn_dict) = db_conn.dbe_read('virtual_network', vn_id) except NoIdError: continue if not ok: return False, vn_dict # get existing subnets on this VN and on other ipams # this VN refers and run a overlap check. ipam_refs = vn_dict.get('network_ipam_refs', []) for ipam in ipam_refs: ref_ipam_uuid = ipam['uuid'] if ref_ipam_uuid == id: # This is a ipam for which update request has come continue if ref_ipam_uuid in ref_ipam_uuid_list: continue # check if ipam is a flat-subnet, for flat-subnet ipam # add uuid in ref_ipam_uuid_list, to read ipam later # to get current ipam_subnets from ipam vnsn_data = ipam.get('attr') or {} ref_ipam_subnets = vnsn_data.get('ipam_subnets') or [] if len(ref_ipam_subnets) == 1: # flat subnet ipam will have only one entry in # vn->ipam link without any ip_prefix ref_ipam_subnet = ref_ipam_subnets[0] ref_subnet = ref_ipam_subnet.get('subnet') or {} if 'ip_prefix' not in ref_subnet: # This is a flat-subnet, ref_ipam_uuid_list.append(ref_ipam_uuid) # vn->ipam link to the refs_subnets_list vn_subnets_list = cls.addr_mgmt._vn_to_subnets(vn_dict) if vn_subnets_list: refs_subnets_list += vn_subnets_list for ipam_uuid in ref_ipam_uuid_list: (ok, ipam_dict) = cls.dbe_read(db_conn, 'network_ipam', ipam_uuid) if not ok: return (ok, 409, ipam_dict) ref_subnets_list = cls.addr_mgmt._ipam_to_subnets(ipam_dict) refs_subnets_list += ref_subnets_list (ok, result) = cls.addr_mgmt.check_overlap_with_refs( refs_subnets_list, req_subnets_list) if not ok: return (ok, (400, result)) ipam_subnets = obj_dict.get('ipam_subnets') if ipam_subnets is not None: subnets = ipam_subnets.get('subnets') or [] (ok, result) = cls.addr_mgmt.net_check_subnet(subnets) if not ok: return (ok, (409, result)) (ok, result) = cls.addr_mgmt.ipam_check_subnet_delete(read_result, obj_dict) if not ok: return (ok, (409, result)) (ok, result) = cls.addr_mgmt.ipam_validate_subnet_update(read_result, obj_dict) if not ok: return (ok, (400, result)) try: cls.addr_mgmt.ipam_update_req(fq_name, read_result, obj_dict, id) def undo(): # failed => update with flipped values for db_dict and req_dict cls.addr_mgmt.ipam_update_req( fq_name, obj_dict, read_result, id) get_context().push_undo(undo) except Exception as e: return (False, (500, str(e))) return True, ""
def pre_dbe_create(cls, tenant_name, obj_dict, db_conn): ok, result = cls.check_for_external_gateway(db_conn, obj_dict) if not ok: return ok, result ok, result = cls._ensure_lr_dci_association(obj_dict) if not ok: return ok, result ok, result = cls.check_port_gateway_not_in_same_network(db_conn, obj_dict) if not ok: return ok, result ok, result = cls.is_port_in_use_by_vm(obj_dict, db_conn) if not ok: return ok, result ok, result = cls.is_vxlan_routing_enabled(db_conn, obj_dict) if not ok: return ok, result vxlan_routing = result vxlan_id = None vxlan_id = cls._check_vxlan_id_in_lr(obj_dict) if vxlan_routing and vxlan_id: # If input vxlan_id is not None, that means we need to reserve it. # First, check if vxlan_id is set for other fq_name existing_fq_name = cls.vnc_zk_client.get_vn_from_id(int(vxlan_id)) if existing_fq_name is not None: msg = ("Cannot set VXLAN_ID: %s, it has already been set" % vxlan_id) return False, (400, msg) # Second, if vxlan_id is not None, set it in Zookeeper and set the # undo function for when any failures happen later. # But first, get the internal_vlan name using which the resource # in zookeeper space will be reserved. ok, proj_dict = db_conn.dbe_read('project', obj_dict['parent_uuid']) if not ok: return (ok, proj_dict) vn_int_name = get_lr_internal_vn_name(obj_dict.get('uuid')) proj_obj = Project(name=proj_dict.get('fq_name')[-1], parent_type='domain', fq_name=proj_dict.get('fq_name')) vn_obj = VirtualNetwork(name=vn_int_name, parent_obj=proj_obj) try: vxlan_fq_name = ':'.join(vn_obj.fq_name) + '_vxlan' # Now that we have the internal VN name, allocate it in # zookeeper only if the resource hasn't been reserved already cls.vnc_zk_client.alloc_vxlan_id(vxlan_fq_name, int(vxlan_id)) except ResourceExistsError: msg = ("Cannot set VXLAN_ID: %s, it has already been set" % vxlan_id) return False, (400, msg) def undo_vxlan_id(): cls.vnc_zk_client.free_vxlan_id(int(vxlan_id), vxlan_fq_name) return True, "" get_context().push_undo(undo_vxlan_id) # Check if type of all associated BGP VPN are 'l3' ok, result = cls.server.get_resource_class( 'bgpvpn').check_router_supports_vpn_type(obj_dict) if not ok: return ok, result # Check if we can reference the BGP VPNs return cls.server.get_resource_class( 'bgpvpn').check_router_has_bgpvpn_assoc_via_network(obj_dict)
def pre_dbe_create(cls, tenant_name, obj_dict, db_conn): ok, result = cls.check_for_external_gateway(db_conn, obj_dict) if not ok: return ok, result ok, result = cls._ensure_lr_dci_association(obj_dict) if not ok: return ok, result ok, result = cls.check_port_gateway_not_in_same_network( db_conn, obj_dict) if not ok: return ok, result ok, result = cls.is_port_in_use_by_vm(obj_dict, db_conn) if not ok: return ok, result ok, result = cls.is_vxlan_routing_enabled(db_conn, obj_dict) if not ok: return ok, result vxlan_routing = result vxlan_id = None vxlan_id = cls._check_vxlan_id_in_lr(obj_dict) if vxlan_routing and vxlan_id: # If input vxlan_id is not None, that means we need to reserve it. # First, check if vxlan_id is set for other fq_name existing_fq_name = cls.vnc_zk_client.get_vn_from_id(int(vxlan_id)) if existing_fq_name is not None: msg = ("Cannot set VXLAN_ID: %s, it has already been set" % vxlan_id) return False, (400, msg) # Second, if vxlan_id is not None, set it in Zookeeper and set the # undo function for when any failures happen later. # But first, get the internal_vlan name using which the resource # in zookeeper space will be reserved. ok, proj_dict = db_conn.dbe_read('project', obj_dict['parent_uuid']) if not ok: return (ok, proj_dict) vn_int_name = get_lr_internal_vn_name(obj_dict.get('uuid')) proj_obj = Project(name=proj_dict.get('fq_name')[-1], parent_type='domain', fq_name=proj_dict.get('fq_name')) vn_obj = VirtualNetwork(name=vn_int_name, parent_obj=proj_obj) try: vxlan_fq_name = ':'.join(vn_obj.fq_name) + '_vxlan' # Now that we have the internal VN name, allocate it in # zookeeper only if the resource hasn't been reserved already cls.vnc_zk_client.alloc_vxlan_id(vxlan_fq_name, int(vxlan_id)) except ResourceExistsError: msg = ("Cannot set VXLAN_ID: %s, it has already been set" % vxlan_id) return False, (400, msg) def undo_vxlan_id(): cls.vnc_zk_client.free_vxlan_id(int(vxlan_id), vxlan_fq_name) return True, "" get_context().push_undo(undo_vxlan_id) # Check if type of all associated BGP VPN are 'l3' ok, result = cls.server.get_resource_class( 'bgpvpn').check_router_supports_vpn_type(obj_dict) if not ok: return ok, result # Check if we can reference the BGP VPNs return cls.server.get_resource_class( 'bgpvpn').check_router_has_bgpvpn_assoc_via_network(obj_dict)
def _manage_vpg_association(cls, vmi_id, api_server, db_conn, phy_links, vpg_name=None): fabric_name = None phy_interface_uuids = [] old_phy_interface_uuids = [] new_pi_to_pr_dict = {} old_pi_to_pr_dict = {} for link in phy_links: if link.get('fabric'): if fabric_name is not None and fabric_name != link['fabric']: msg = 'Physical interfaces in the same vpg '\ 'should belong to the same fabric' return (False, (400, msg)) fabric_name = link['fabric'] else: # use default fabric if it's not in link local information fabric_name = 'default-fabric' phy_interface_name = link['port_id'] prouter_name = link['switch_info'] pi_fq_name = ['default-global-system-config', prouter_name, phy_interface_name] pi_uuid = db_conn.fq_name_to_uuid('physical_interface', pi_fq_name) phy_interface_uuids.append(pi_uuid) new_pi_to_pr_dict[pi_uuid] = prouter_name # check if new physical interfaces belongs to some other vpg for uuid in set(phy_interface_uuids): ok, phy_interface_dict = db_conn.dbe_read( obj_type='physical-interface', obj_id=uuid, obj_fields=['virtual_port_group_back_refs']) if not ok: return (ok, 400, phy_interface_dict) vpg_refs = phy_interface_dict.get('virtual_port_group_back_refs') if vpg_refs and vpg_name and vpg_refs[0]['to'][-1] != vpg_name: msg = 'Physical interface %s already belong to the vpg %s' %\ (phy_interface_dict.get('name'), vpg_refs[0]['to'][-1]) return (False, (400, msg)) if vpg_name: # read the vpg object vpg_fq_name = ['default-global-system-config', fabric_name, vpg_name] try: vpg_uuid = db_conn.fq_name_to_uuid('virtual_port_group', vpg_fq_name) except NoIdError: msg = 'Vpg object %s is not found' % vpg_name return (False, (404, msg)) ok, vpg_dict = db_conn.dbe_read( obj_type='virtual-port-group', obj_id=vpg_uuid) if not ok: return (ok, 400, vpg_dict) else: # create vpg object fabric_fq_name = [ 'default-global-system-config', fabric_name, phy_interface_uuids[0], ] vpg_id = cls.vnc_zk_client.alloc_vpg_id(':'.join(fabric_fq_name)) def undo_vpg_id(): cls.vnc_zk_client.free_vpg_id(vpg_id, ':'.join(fabric_fq_name)) return True, "" get_context().push_undo(undo_vpg_id) vpg_name = "vpg-internal-" + str(vpg_id) vpg_obj = VirtualPortGroup( parent_type='fabric', fq_name=['default-global-system-config', fabric_name, vpg_name], virtual_port_group_user_created=False, virtual_port_group_lacp_enabled=True) vpg_int_dict = json.dumps(vpg_obj, default=_obj_serializer_all) ok, resp = api_server.internal_request_create( 'virtual-port-group', json.loads(vpg_int_dict)) if not ok: return (ok, 400, resp) vpg_dict = resp['virtual-port-group'] vpg_uuid = resp['virtual-port-group']['uuid'] def undo_vpg_create(): cls.server.internal_request_delete('virtual-port-group', vpg_uuid) return True, '' get_context().push_undo(undo_vpg_create) old_phy_interface_refs = vpg_dict.get('physical_interface_refs') for ref in old_phy_interface_refs or []: old_pi_to_pr_dict[ref['uuid']] = { 'prouter_name': ref['to'][1], 'ae_id': ref['attr'].get('ae_num') if ref['attr'] else None} old_phy_interface_uuids.append(ref['uuid']) ret_dict = {} ret_dict['deallocated_ae_id'] = [] ret_dict['allocated_ae_id'] = [] # delete old physical interfaces to the vpg for uuid in set(old_phy_interface_uuids) - set(phy_interface_uuids): prouter_dict = old_pi_to_pr_dict.get(uuid) dealloc_dict = cls._check_and_free_ae_id( phy_links, prouter_dict, vpg_name, new_pi_to_pr_dict) ret_dict['deallocated_ae_id'].append(dealloc_dict) api_server.internal_request_ref_update( 'virtual-port-group', vpg_uuid, 'DELETE', 'physical-interface', uuid) # add new physical interfaces to the vpg pr_to_ae_id = {} for uuid in phy_interface_uuids: prouter_name = new_pi_to_pr_dict.get(uuid) if pr_to_ae_id.get(prouter_name) is None: attr_obj, ae_id = cls._check_and_alloc_ae_id( phy_links, prouter_name, vpg_name, old_pi_to_pr_dict) pr_to_ae_id[prouter_name] = ae_id if len(phy_links) > 1 and ae_id is not None: alloc_dict = {} alloc_dict['ae_id'] = ae_id alloc_dict['prouter_name'] = prouter_name alloc_dict['vpg_name'] = vpg_name ret_dict['allocated_ae_id'].append(alloc_dict) else: attr_obj = VpgInterfaceParametersType( ae_num=pr_to_ae_id.get(prouter_name)) api_server.internal_request_ref_update( 'virtual-port-group', vpg_uuid, 'ADD', 'physical-interface', uuid, attr=attr_obj.__dict__ if attr_obj else None, relax_ref_for_delete=True) return vpg_uuid, ret_dict
def pre_dbe_create(cls, tenant_name, obj_dict, db_conn): if obj_dict['parent_type'] == 'instance-ip': return True, "" vn_fq_name = obj_dict['fq_name'][:-2] req_ip = obj_dict.get("floating_ip_address") if req_ip and cls.addr_mgmt.is_ip_allocated(req_ip, vn_fq_name): return (False, (409, 'IP address already in use')) try: ok, result = cls.addr_mgmt.get_ip_free_args(vn_fq_name) if not ok: return ok, result # # Parse through floating-ip-pool config to see if there are any # guidelines laid for allocation of this floating-ip. # fip_subnets = None ok, ret_val = cls._get_fip_pool_subnets(obj_dict, db_conn) # On a successful fip-pool subnet get, the subnet list is returned. # Otherwise, returned value has appropriate reason string. if ok: fip_subnets = ret_val else: return ok, (400, "Floating-ip-pool lookup failed with error: " "%s" % ret_val) if not fip_subnets: # Subnet specification was not found on the floating-ip-pool. # Proceed to allocated floating-ip from any of the subnets # on the virtual-network. fip_addr, sn_uuid, s_name = cls.addr_mgmt.ip_alloc_req( vn_fq_name, asked_ip_addr=req_ip, alloc_id=obj_dict['uuid']) else: subnets_tried = [] # Iterate through configured subnets on floating-ip-pool. # We will try to allocate floating-ip by iterating through # the list of configured subnets. for fip_pool_subnet in fip_subnets['subnet_uuid']: try: # Record the subnets that we try to allocate from. subnets_tried.append(fip_pool_subnet) fip_addr, sn_uuid, s_name = cls.addr_mgmt.ip_alloc_req( vn_fq_name, sub=fip_pool_subnet, asked_ip_addr=req_ip, alloc_id=obj_dict['uuid']) except cls.addr_mgmt.AddrMgmtSubnetExhausted: # This subnet is exhausted. Try next subnet. continue if not fip_addr: # Floating-ip could not be allocated from any of the # configured subnets. Raise an exception. raise cls.addr_mgmt.AddrMgmtSubnetExhausted( vn_fq_name, subnets_tried) def undo(): msg = ("AddrMgmt: free FIP %s for vn=%s on tenant=%s, on undo" % (fip_addr, vn_fq_name, tenant_name)) db_conn.config_log(msg, level=SandeshLevel.SYS_DEBUG) cls.addr_mgmt.ip_free_req(fip_addr, vn_fq_name, alloc_id=obj_dict['uuid'], vn_dict=result.get('vn_dict'), ipam_dicts=result.get('ipam_dicts')) return True, "" get_context().push_undo(undo) except Exception as e: return (False, (500, str(e))) obj_dict['floating_ip_address'] = fip_addr msg = ('AddrMgmt: alloc %s FIP for vn=%s, tenant=%s, askip=%s' % (fip_addr, vn_fq_name, tenant_name, req_ip)) db_conn.config_log(msg, level=SandeshLevel.SYS_DEBUG) return True, ""