def test_sanitize_display_name(self): self.assertEqual( 's0m_e_N.a_me-_', utils.sanitize_display_name('s0m_e N.a/me-+')) self.assertEqual( 'some' * 14 + 'som', utils.sanitize_display_name('some' * 15))
def test_sanitize_display_name(self): self.assertEqual( 's0m_e_N.a_me-_', utils.sanitize_display_name('s0m_e N.a/me-+')) self.assertEqual( 'some' * 14 + 'som', utils.sanitize_display_name('some' * 15))
def _get_nat_ap_epg(self, ctx, l3out): d_name = self._display_name(l3out) ap_name = getattr(self, 'app_profile_name', None) or l3out.name ap_name = self._scope_name_if_common(l3out.tenant_name, ap_name) ap_display_name = aim_utils.sanitize_display_name(ap_name or d_name) ap = resource.ApplicationProfile(tenant_name=l3out.tenant_name, name=ap_name, display_name=ap_display_name) epg = resource.EndpointGroup( tenant_name=ap.tenant_name, app_profile_name=ap.name, name='EXT-%s' % l3out.name, display_name=aim_utils.sanitize_display_name('EXT-%s' % d_name)) return (ap, epg)
def _get_nat_ap_epg(self, ctx, l3out): d_name = self._display_name(l3out) ap_name = getattr(self, 'app_profile_name', None) or l3out.name ap_name = self._scope_name_if_common(l3out.tenant_name, ap_name) ap_display_name = aim_utils.sanitize_display_name(ap_name or d_name) ap = resource.ApplicationProfile( tenant_name=l3out.tenant_name, name=ap_name, display_name=ap_display_name) epg = resource.EndpointGroup( tenant_name=ap.tenant_name, app_profile_name=ap.name, name='EXT-%s' % l3out.name, display_name=aim_utils.sanitize_display_name('EXT-%s' % d_name)) return (ap, epg)
def _get_flc_contract(self, session, flc, tenant): tenant_id = tenant flc_aid = self.name_mapper.flow_classifier(session, flc['id']) flc_aname = aim_utils.sanitize_display_name(flc['name']) return aim_resource.Contract(tenant_name=tenant_id, name=flc_aid, display_name=flc_aname)
def _get_pc_service_graph(self, session, pc, tenant): tenant_aid = tenant pc_aid = self.name_mapper.port_chain(session, pc['id']) pc_aname = aim_utils.sanitize_display_name(pc['name']) return aim_sg.ServiceGraph(tenant_name=tenant_aid, name=pc_aid, display_name=pc_aname)
def _generate_l3out_name(self, l3outside, vrf): # Generate a name based on its relationship with VRF name = '%s-%s' % (l3outside.name, vrf.name) display_name = aim_utils.sanitize_display_name( '%s-%s' % (self._display_name(l3outside), self._display_name(vrf))) return (name, display_name)
def update_router(self, context, current, original): LOG.debug("APIC AIM MD updating router: %s", current) if current['name'] != original['name']: session = context.session tenant_id = current['tenant_id'] tenant_aname = self.name_mapper.tenant(session, tenant_id) LOG.debug("Mapped tenant_id %(id)s to %(aname)s", { 'id': tenant_id, 'aname': tenant_aname }) id = current['id'] name = current['name'] aname = self.name_mapper.router(session, id, name) LOG.debug( "Mapped router_id %(id)s with name %(name)s to " "%(aname)s", { 'id': id, 'name': name, 'aname': aname }) dname = aim_utils.sanitize_display_name(name) aim_ctx = aim_context.AimContext(session) contract = aim_resource.Contract(tenant_name=tenant_aname, name=aname) contract = self.aim.update(aim_ctx, contract, display_name=dname) subject = aim_resource.ContractSubject(tenant_name=tenant_aname, contract_name=aname, name=ROUTER_SUBJECT_NAME) subject = self.aim.update(aim_ctx, subject, display_name=dname)
def update_address_scope_precommit(self, context): LOG.debug("APIC AIM MD updating address_scope: %s", context.current) if context.current['name'] != context.original['name']: session = context._plugin_context.session tenant_id = context.current['tenant_id'] tenant_aname = self.name_mapper.tenant(session, tenant_id) LOG.debug("Mapped tenant_id %(id)s to %(aname)s", { 'id': tenant_id, 'aname': tenant_aname }) id = context.current['id'] name = context.current['name'] aname = self.name_mapper.address_scope(session, id, name) LOG.debug( "Mapped address_scope_id %(id)s with name %(name)s to " "%(aname)s", { 'id': id, 'name': name, 'aname': aname }) dname = aim_utils.sanitize_display_name(name) aim_ctx = aim_context.AimContext(session) vrf = aim_resource.VRF(tenant_name=tenant_aname, name=aname) vrf = self.aim.update(aim_ctx, vrf, display_name=dname)
def update_network_precommit(self, context): LOG.debug("APIC AIM MD updating network: %s", context.current) if context.current['name'] != context.original['name']: session = context._plugin_context.session tenant_id = context.current['tenant_id'] tenant_aname = self.name_mapper.tenant(session, tenant_id) LOG.debug("Mapped tenant_id %(id)s to %(aname)s", { 'id': tenant_id, 'aname': tenant_aname }) id = context.current['id'] name = context.current['name'] aname = self.name_mapper.network(session, id, name) LOG.debug( "Mapped network_id %(id)s with name %(name)s to " "%(aname)s", { 'id': id, 'name': name, 'aname': aname }) dname = aim_utils.sanitize_display_name(context.current['name']) aim_ctx = aim_context.AimContext(session) bd = aim_resource.BridgeDomain(tenant_name=tenant_aname, name=aname) bd = self.aim.update(aim_ctx, bd, display_name=dname) epg = aim_resource.EndpointGroup(tenant_name=tenant_aname, app_profile_name=AP_NAME, name=aname) epg = self.aim.update(aim_ctx, epg, display_name=dname)
def _get_ppg_device_cluster(self, session, ppg, tenant): tenant_aid = tenant ppg_aid = self.name_mapper.port_pair_group(session, ppg['id']) ppg_aname = aim_utils.sanitize_display_name(ppg['name']) return aim_sg.DeviceCluster(tenant_name=tenant_aid, name=ppg_aid, display_name=ppg_aname, managed=False)
def _get_nat_vrf(self, ctx, l3out): d_name = self._display_name(l3out) vrf_name = self._scope_name_if_common(l3out.tenant_name, 'EXT-%s' % l3out.name) return resource.VRF(tenant_name=l3out.tenant_name, name=vrf_name, display_name=self._scope_name_if_common( l3out.tenant_name, aim_utils.sanitize_display_name('EXT-%s' % d_name)))
def _get_nat_vrf(self, ctx, l3out): d_name = self._display_name(l3out) vrf_name = self._scope_name_if_common(l3out.tenant_name, 'EXT-%s' % l3out.name) return resource.VRF( tenant_name=l3out.tenant_name, name=vrf_name, display_name=self._scope_name_if_common( l3out.tenant_name, aim_utils.sanitize_display_name('EXT-%s' % d_name)))
def _get_external_group_aim_name(self, plugin_context, flowc, prefix): if prefix == FLOWC_SRC: cidr = flowc['source_ip_prefix'] net = self._get_flowc_src_network(plugin_context, flowc) else: cidr = flowc['destination_ip_prefix'] net = self._get_flowc_dst_network(plugin_context, flowc) cidr = aim_utils.sanitize_display_name(cidr) return self.name_mapper.network(plugin_context.session, net['id'], prefix=cidr + '_')
def _get_nat_bd(self, ctx, l3out): d_name = self._display_name(l3out) bd_name = self._scope_name_if_common(l3out.tenant_name, 'EXT-%s' % l3out.name) return resource.BridgeDomain(tenant_name=l3out.tenant_name, name=bd_name, display_name=self._scope_name_if_common( l3out.tenant_name, aim_utils.sanitize_display_name( 'EXT-%s' % d_name)), limit_ip_learn_to_subnets=True, l3out_names=[l3out.name])
def _get_nat_bd(self, ctx, l3out): d_name = self._display_name(l3out) bd_name = self._scope_name_if_common(l3out.tenant_name, 'EXT-%s' % l3out.name) return resource.BridgeDomain( tenant_name=l3out.tenant_name, name=bd_name, display_name=self._scope_name_if_common( l3out.tenant_name, aim_utils.sanitize_display_name('EXT-%s' % d_name)), limit_ip_learn_to_subnets=True, l3out_names=[l3out.name])
def create_router(self, context, current): LOG.debug("APIC AIM MD creating router: %s", current) session = context.session tenant_id = current['tenant_id'] tenant_aname = self.name_mapper.tenant(session, tenant_id) LOG.debug("Mapped tenant_id %(id)s to %(aname)s", { 'id': tenant_id, 'aname': tenant_aname }) id = current['id'] name = current['name'] aname = self.name_mapper.router(session, id, name) LOG.debug("Mapped router_id %(id)s with name %(name)s to " "%(aname)s", { 'id': id, 'name': name, 'aname': aname }) dname = aim_utils.sanitize_display_name(name) aim_ctx = aim_context.AimContext(session) contract = aim_resource.Contract(tenant_name=tenant_aname, name=aname, display_name=dname) self.aim.create(aim_ctx, contract) subject = aim_resource.ContractSubject(tenant_name=tenant_aname, contract_name=aname, name=ROUTER_SUBJECT_NAME, display_name=dname, bi_filters=[ANY_FILTER_NAME]) self.aim.create(aim_ctx, subject) # REVISIT(rkukura): Consider having L3 plugin extend router # dict again after calling this function. sync_state = cisco_apic.SYNC_SYNCED sync_state = self._merge_status(aim_ctx, sync_state, contract) sync_state = self._merge_status(aim_ctx, sync_state, subject) current[cisco_apic.DIST_NAMES] = { cisco_apic_l3.CONTRACT: contract.dn, cisco_apic_l3.CONTRACT_SUBJECT: subject.dn } current[cisco_apic.SYNC_STATE] = sync_state
def create_network_precommit(self, context): LOG.debug("APIC AIM MD creating network: %s", context.current) session = context._plugin_context.session tenant_id = context.current['tenant_id'] tenant_aname = self.name_mapper.tenant(session, tenant_id) LOG.debug("Mapped tenant_id %(id)s to %(aname)s", { 'id': tenant_id, 'aname': tenant_aname }) id = context.current['id'] name = context.current['name'] aname = self.name_mapper.network(session, id, name) LOG.debug("Mapped network_id %(id)s with name %(name)s to %(aname)s", { 'id': id, 'name': name, 'aname': aname }) dname = aim_utils.sanitize_display_name(name) aim_ctx = aim_context.AimContext(session) vrf = self._get_unrouted_vrf(aim_ctx) bd = aim_resource.BridgeDomain(tenant_name=tenant_aname, name=aname, display_name=dname, vrf_name=vrf.name, enable_arp_flood=True, enable_routing=False, limit_ip_learn_to_subnets=True) self.aim.create(aim_ctx, bd) epg = aim_resource.EndpointGroup(tenant_name=tenant_aname, app_profile_name=AP_NAME, name=aname, display_name=dname, bd_name=aname) self.aim.create(aim_ctx, epg)
def _delete_flowc_network_group_mapping(self, plugin_context, net, flowc, tenant, cidr, prefix=''): flc_aid = self._get_external_group_aim_name(plugin_context, flowc, prefix) flc_aname = aim_utils.sanitize_display_name(flowc['name']) aim_ctx = aim_context.AimContext(plugin_context.session) l3out = self.aim_mech._get_svi_net_l3out(net) cidr = netaddr.IPNetwork(cidr) epg = None if l3out: if cidr.prefixlen != 0: ext_net = aim_resource.ExternalNetwork( tenant_name=l3out.tenant_name, l3out_name=l3out.name, name=flc_aid, display_name=flc_aname) self.aim.delete(aim_ctx, ext_net, cascade=True) else: ext_net = self.aim_mech._get_svi_default_external_epg(net) epg = self.aim.get(aim_ctx, ext_net) else: epg = self.aim.get( aim_ctx, self.aim_mech._get_epg_by_network_id(plugin_context.session, net['id'])) if epg: contract = self._get_flc_contract(plugin_context.session, flowc, tenant) try: if prefix == FLOWC_SRC: epg.consumed_contract_names.remove(contract.name) else: epg.provided_contract_names.remove(contract.name) self.aim.create(aim_ctx, epg, overwrite=True) except ValueError: pass
def _map_flowc_network_group(self, plugin_context, net, cidr, flowc, prefix): flc_aid = self._get_external_group_aim_name(plugin_context, flowc, prefix) flc_aname = aim_utils.sanitize_display_name(flowc['name']) aim_ctx = aim_context.AimContext(plugin_context.session) cidr = netaddr.IPNetwork(cidr) l3out = self.aim_mech._get_svi_net_l3out(net) if l3out: if cidr.prefixlen == 0: # Use default External Network ext_net = self.aim_mech._get_svi_default_external_epg(net) ext_net_db = self.aim.get(aim_ctx, ext_net) if not ext_net_db: raise exceptions.DefaultExternalNetworkNotFound( id=net['id']) else: # Create ExternalNetwork and ExternalSubnet on the proper # L3Out. Return the External network ext_net = aim_resource.ExternalNetwork( tenant_name=l3out.tenant_name, l3out_name=l3out.name, name=flc_aid, display_name=flc_aname) ext_sub = aim_resource.ExternalSubnet( tenant_name=ext_net.tenant_name, l3out_name=ext_net.l3out_name, external_network_name=ext_net.name, cidr=str(cidr)) ext_net_db = self.aim.get(aim_ctx, ext_net) if not ext_net_db: ext_net_db = self.aim.create(aim_ctx, ext_net) ext_sub_db = self.aim.get(aim_ctx, ext_sub) if not ext_sub_db: self.aim.create(aim_ctx, ext_sub) return ext_net_db else: return self.aim_mech._get_epg_by_network_id( plugin_context.session, net['id'])
def create_address_scope_precommit(self, context): LOG.debug("APIC AIM MD creating address scope: %s", context.current) session = context._plugin_context.session tenant_id = context.current['tenant_id'] tenant_aname = self.name_mapper.tenant(session, tenant_id) LOG.debug("Mapped tenant_id %(id)s to %(aname)s", { 'id': tenant_id, 'aname': tenant_aname }) id = context.current['id'] name = context.current['name'] aname = self.name_mapper.address_scope(session, id, name) LOG.debug( "Mapped address_scope_id %(id)s with name %(name)s to " "%(aname)s", { 'id': id, 'name': name, 'aname': aname }) dname = aim_utils.sanitize_display_name(name) aim_ctx = aim_context.AimContext(session) vrf = aim_resource.VRF(tenant_name=tenant_aname, name=aname, display_name=dname) self.aim.create(aim_ctx, vrf) # ML2Plus does not extend address scope dict after precommit. sync_state = cisco_apic.SYNC_SYNCED sync_state = self._merge_status(aim_ctx, sync_state, vrf) context.current[cisco_apic.DIST_NAMES] = {cisco_apic.VRF: vrf.dn} context.current[cisco_apic.SYNC_STATE] = sync_state
def _map_port_pair_group(self, plugin_context, ppg, tenant): session = plugin_context.session aim_ctx = aim_context.AimContext(session) # Create Logical device model, container for all the PPG port pairs. dc = self._get_ppg_device_cluster(session, ppg, tenant) type, domain = self._get_ppg_domain(plugin_context, ppg) if not type and not domain: raise exceptions.PortPairsNoUniqueDomain(id=ppg['port_pairs']) if type == PHYSDOM_TYPE: dc.device_type = 'PHYSICAL' dc.physical_domain_name = domain else: dc.device_type = 'VIRTUAL' dc.vmm_domain = [{'type': type, 'name': domain}] self.aim.create(aim_ctx, dc) # For each port pair, create the corresponding Concrete Devices # (represented by the static path of each interface) ingress_cdis = [] egress_cdis = [] port_pairs = self.sfc_plugin.get_port_pairs( plugin_context, filters={'id': ppg['port_pairs']}) for pp in port_pairs: ingress_port = self.plugin.get_port(plugin_context, pp['ingress']) egress_port = self.plugin.get_port(plugin_context, pp['egress']) pp_id = self.name_mapper.port_pair(session, pp['id']) pp_name = aim_utils.sanitize_display_name(ppg['name']) cd = aim_sg.ConcreteDevice(tenant_name=dc.tenant_name, device_cluster_name=dc.name, name=pp_id, display_name=pp_name) # Create ConcreteDevice self.aim.create(aim_ctx, cd) for p, store in [(ingress_port, ingress_cdis), (egress_port, egress_cdis)]: p_id = self.name_mapper.port(session, p['id']) p_name = aim_utils.sanitize_display_name(p['name']) path, encap = self.aim_mech._get_port_static_path_and_encap( plugin_context, p) if path is None: LOG.warning("Path not found for Port Pair %s member %s ", "Port might be unbound.", pp['id'], p['id']) continue # TODO(ivar): what if encap is None? is that an Opflex port? # Create Concrete Device Interface cdi = aim_sg.ConcreteDeviceInterface( tenant_name=cd.tenant_name, device_cluster_name=cd.device_cluster_name, device_name=cd.name, name=p_id, display_name=p_name, path=path) cdi = self.aim.create(aim_ctx, cdi) store.append((cdi, encap, p)) # Ingress and Egress CDIs have the same length. # All the ingress devices must be load balances, and so the egress # (for reverse path). Create the proper PBR policies as well as # the Logical Interfaces (which see all the physical interfaces of a # specific direction as they were one). internal_dci = aim_sg.DeviceClusterInterface( tenant_name=dc.tenant_name, device_cluster_name=dc.name, name=INGRESS, display_name=INGRESS) external_dci = aim_sg.DeviceClusterInterface( tenant_name=dc.tenant_name, device_cluster_name=dc.name, name=EGRESS, display_name=EGRESS) # Create 2 PBR rules per PPG, one per direction. ipbr = self._get_ppg_service_redirect_policy(session, ppg, INGRESS, tenant) epbr = self._get_ppg_service_redirect_policy(session, ppg, EGRESS, tenant) for i in range(len(ingress_cdis)): icdi, iencap, iport = ingress_cdis[i] ecdi, eencap, eport = egress_cdis[i] internal_dci.encap = iencap external_dci.encap = eencap internal_dci.concrete_interfaces.append(icdi.dn) external_dci.concrete_interfaces.append(ecdi.dn) if iport['fixed_ips']: ipbr.destinations.append({ 'ip': iport['fixed_ips'][0]['ip_address'], 'mac': iport['mac_address'] }) if eport['fixed_ips']: epbr.destinations.append({ 'ip': eport['fixed_ips'][0]['ip_address'], 'mac': eport['mac_address'] }) self.aim.create(aim_ctx, internal_dci) self.aim.create(aim_ctx, external_dci) self.aim.create(aim_ctx, ipbr) self.aim.create(aim_ctx, epbr)
def _scope_name_if_common(self, tenant_name, name): if tenant_name == 'common': scope = getattr(self, 'common_scope', None) scope = scope + '_' if scope else '' return aim_utils.sanitize_display_name(scope + name) return name
def _generate_l3out_name(self, l3outside, vrf): # Generate a name based on its relationship with VRF name = '%s-%s' % (l3outside.name, vrf.name) display_name = aim_utils.sanitize_display_name( '%s-%s' % (self._display_name(l3outside), self._display_name(vrf))) return (name, display_name)
def _scope_name_if_common(self, tenant_name, name): if tenant_name == 'common': scope = getattr(self, 'common_scope', None) scope = scope + '_' if scope else '' return aim_utils.sanitize_display_name(scope + name) return name
def do_apic_aim_security_group_migration(session): alembic_util.msg( "Starting data migration for SGs and its rules.") aim = aim_manager.AimManager() aim_ctx = aim_context.AimContext(session) mapper = apic_mapper.APICNameMapper() with session.begin(subtransactions=True): # Migrate SG. sg_dbs = (session.query(sg_models.SecurityGroup). options(lazyload('*')).all()) for sg_db in sg_dbs: alembic_util.msg("Migrating SG: %s" % sg_db) tenant_aname = mapper.project(session, sg_db['tenant_id']) sg_aim = aim_resource.SecurityGroup( tenant_name=tenant_aname, name=sg_db['id'], display_name=aim_utils.sanitize_display_name(sg_db['name'])) aim.create(aim_ctx, sg_aim, overwrite=True) # Always create this default subject sg_subject = aim_resource.SecurityGroupSubject( tenant_name=tenant_aname, security_group_name=sg_db['id'], name='default') aim.create(aim_ctx, sg_subject, overwrite=True) # Migrate SG rules. sg_rule_dbs = (session.query(sg_models.SecurityGroupRule). options(lazyload('*')).all()) for sg_rule_db in sg_rule_dbs: tenant_aname = mapper.project(session, sg_rule_db['tenant_id']) if sg_rule_db.get('remote_group_id'): ip_version = 0 if sg_rule_db['ethertype'] == 'IPv4': ip_version = 4 elif sg_rule_db['ethertype'] == 'IPv6': ip_version = 6 remote_ips = [] sg_ports = (session.query(models_v2.Port). join(sg_models.SecurityGroupPortBinding, sg_models.SecurityGroupPortBinding.port_id == models_v2.Port.id). filter(sg_models.SecurityGroupPortBinding. security_group_id == sg_rule_db['remote_group_id']). options(lazyload('*')).all()) for sg_port in sg_ports: for fixed_ip in sg_port['fixed_ips']: if ip_version == netaddr.IPAddress( fixed_ip['ip_address']).version: remote_ips.append(fixed_ip['ip_address']) else: remote_ips = ([sg_rule_db['remote_ip_prefix']] if sg_rule_db['remote_ip_prefix'] else '') sg_rule_aim = aim_resource.SecurityGroupRule( tenant_name=tenant_aname, security_group_name=sg_rule_db['security_group_id'], security_group_subject_name='default', name=sg_rule_db['id'], direction=sg_rule_db['direction'], ethertype=sg_rule_db['ethertype'].lower(), ip_protocol=(sg_rule_db['protocol'] if sg_rule_db['protocol'] else 'unspecified'), remote_ips=remote_ips, from_port=(sg_rule_db['port_range_min'] if sg_rule_db['port_range_min'] else 'unspecified'), to_port=(sg_rule_db['port_range_max'] if sg_rule_db['port_range_max'] else 'unspecified')) aim.create(aim_ctx, sg_rule_aim, overwrite=True) alembic_util.msg( "Finished data migration for SGs and its rules.")
def do_apic_aim_security_group_migration(session): alembic_util.msg("Starting data migration for SGs and its rules.") aim = aim_manager.AimManager() aim_ctx = aim_context.AimContext(session) mapper = apic_mapper.APICNameMapper() with session.begin(subtransactions=True): # Migrate SG. sg_dbs = (session.query(sg_models.SecurityGroup).options( lazyload('*')).all()) for sg_db in sg_dbs: alembic_util.msg("Migrating SG: %s" % sg_db) tenant_aname = mapper.project(session, sg_db['tenant_id']) sg_aim = aim_resource.SecurityGroup( tenant_name=tenant_aname, name=sg_db['id'], display_name=aim_utils.sanitize_display_name(sg_db['name'])) aim.create(aim_ctx, sg_aim, overwrite=True) # Always create this default subject sg_subject = aim_resource.SecurityGroupSubject( tenant_name=tenant_aname, security_group_name=sg_db['id'], name='default') aim.create(aim_ctx, sg_subject, overwrite=True) # Migrate SG rules. sg_rule_dbs = (session.query(sg_models.SecurityGroupRule).options( lazyload('*')).all()) for sg_rule_db in sg_rule_dbs: tenant_aname = mapper.project(session, sg_rule_db['tenant_id']) if sg_rule_db.get('remote_group_id'): ip_version = 0 if sg_rule_db['ethertype'] == 'IPv4': ip_version = 4 elif sg_rule_db['ethertype'] == 'IPv6': ip_version = 6 remote_ips = [] sg_ports = (session.query(models_v2.Port).join( sg_models.SecurityGroupPortBinding, sg_models.SecurityGroupPortBinding.port_id == models_v2.Port.id).filter( sg_models.SecurityGroupPortBinding.security_group_id == sg_rule_db['remote_group_id']).options( lazyload('*')).all()) for sg_port in sg_ports: for fixed_ip in sg_port['fixed_ips']: if ip_version == netaddr.IPAddress( fixed_ip['ip_address']).version: remote_ips.append(fixed_ip['ip_address']) else: remote_ips = ([sg_rule_db['remote_ip_prefix']] if sg_rule_db['remote_ip_prefix'] else '') sg_rule_aim = aim_resource.SecurityGroupRule( tenant_name=tenant_aname, security_group_name=sg_rule_db['security_group_id'], security_group_subject_name='default', name=sg_rule_db['id'], direction=sg_rule_db['direction'], ethertype=sg_rule_db['ethertype'].lower(), ip_protocol=(sg_rule_db['protocol'] if sg_rule_db['protocol'] else 'unspecified'), remote_ips=remote_ips, from_port=(sg_rule_db['port_range_min'] if sg_rule_db['port_range_min'] else 'unspecified'), to_port=(sg_rule_db['port_range_max'] if sg_rule_db['port_range_max'] else 'unspecified')) aim.create(aim_ctx, sg_rule_aim, overwrite=True) alembic_util.msg("Finished data migration for SGs and its rules.")