def get_domains(aim_ctx, manager, create_doms=True): vmms = config.create_vmdom_dictionary() physdoms = config.create_physdom_dictionary() vmm_doms = [] phys_doms = [] if vmms: vmm_types = utils.KNOWN_VMM_TYPES for type_ in vmm_types.values(): res = resource.VMMPolicy(type=type_, monitored=True) if create_doms: print_resource(manager.create(aim_ctx, res, overwrite=True)) for vmm_name, cfg in vmms.iteritems(): res = resource.VMMDomain(type=vmm_types.get( cfg.get('apic_vmm_type', 'openstack').lower()), name=vmm_name, monitored=True) if create_doms: print_resource(manager.create(aim_ctx, res, overwrite=True)) vmm_doms.append(res) for phys in physdoms: res = resource.PhysicalDomain(name=phys, monitored=True) if create_doms: print_resource(manager.create(aim_ctx, res, overwrite=True)) phys_doms.append(res) return vmm_doms, phys_doms
def setUp(self): super(TestNatStrategyBase, self).setUp() self.mgr = aim_manager.AimManager() self.ns = self.strategy(self.mgr) self.ns.app_profile_name = 'myapp' self.mgr.create(self.ctx, a_res.VMMPolicy(type='OpenStack')) self.mgr.create(self.ctx, a_res.VMMDomain(type='OpenStack', name='ostack')) self.mgr.create(self.ctx, a_res.PhysicalDomain(name='phys'))
def test_load_mappings(self): # The load-domains command invokes load-mappings, # so we don't use it to create hte domains -- we # have to create them manually cfg_mappings = [{ 'host_name': '*', 'domain_name': 'phys', 'domain_type': 'PhysDom' }, { 'host_name': '*', 'domain_name': 'phys2', 'domain_type': 'PhysDom' }, { 'host_name': '*', 'domain_name': 'ostack', 'domain_type': 'OpenStack' }, { 'host_name': '*', 'domain_name': 'ostack2', 'domain_type': 'OpenStack' }, { 'host_name': '*', 'domain_name': 'vmware', 'domain_type': 'VMware' }, { 'host_name': '*', 'domain_name': 'vmware2', 'domain_type': 'VMware' }] for mapping in cfg_mappings: if mapping['domain_type'] is 'PhysDom': domain = resource.PhysicalDomain(name=mapping['domain_name']) else: domain = resource.VMMDomain(type=mapping['domain_type'], name=mapping['domain_name']) self.mgr.create(self.ctx, domain) # Run the load-mappings command, which populates # the HostDomainMappingV2 table using the domain # objects found in AIM self.run_command('manager load-mappings') mappings = self.mgr.find(self.ctx, infra.HostDomainMappingV2) db_mappings = [ infra.HostDomainMappingV2(host_name=mapping['host_name'], domain_type=mapping['domain_type'], domain_name=mapping['domain_name']) for mapping in cfg_mappings ] self.assertEqual(sorted(db_mappings, key=lambda x: x.domain_name), sorted(mappings, key=lambda x: x.domain_name))
def setUp(self): super(TestNatStrategyVmmAndPhysDomains, self).setUp() # add in VMware policy, another OpenStack VMM domain, # a VMware VMM domain, and another PhysDom self.mgr.create(self.ctx, a_res.VMMPolicy(type='VMware')) self.mgr.create(self.ctx, a_res.VMMDomain(type='OpenStack', name='ostack2')) self.mgr.create(self.ctx, a_res.VMMDomain(type='VMware', name='vmware1')) self.mgr.create(self.ctx, a_res.PhysicalDomain(name='phys2')) self.vmm_domains = [{'type': u'OpenStack', 'name': u'ostack'}, {'type': u'OpenStack', 'name': u'ostack2'}, {'type': u'VMware', 'name': u'vmware1'}] self.phys_domains = [{'name': 'phys'}, {'name': 'phys2'}]
def test_load_domains(self): # create a VMM and PhysDom first pre_phys = resource.PhysicalDomain(name='pre-phys') pre_vmm = resource.VMMDomain(type='OpenStack', name='pre-vmm') ap = resource.ApplicationProfile(tenant_name='tn1', name='ap') pre_epg1 = resource.EndpointGroup(tenant_name='tn1', app_profile_name='ap', name='epg1') pre_epg2 = resource.EndpointGroup(tenant_name='tn1', app_profile_name='ap', name='epg2') self.mgr.create(self.ctx, resource.Tenant(name='tn1')) self.mgr.create(self.ctx, ap) self.mgr.create(self.ctx, pre_phys) self.mgr.create(self.ctx, pre_vmm) self.mgr.create(self.ctx, pre_epg2) self.mgr.create(self.ctx, pre_epg1) self.run_command('manager load-domains') # Verify pre-existing domains are still there self.assertIsNotNone(self.mgr.get(self.ctx, pre_phys)) self.assertIsNotNone(self.mgr.get(self.ctx, pre_vmm)) # Also the Domains defined in the config files exist self.assertIsNotNone( self.mgr.get(self.ctx, resource.PhysicalDomain(name='phys'))) self.assertIsNotNone( self.mgr.get(self.ctx, resource.PhysicalDomain(name='phys2'))) self.assertIsNotNone( self.mgr.get(self.ctx, resource.VMMDomain(type='OpenStack', name='ostack'))) self.assertIsNotNone( self.mgr.get(self.ctx, resource.VMMDomain(type='OpenStack', name='ostack2'))) self.assertIsNotNone( self.mgr.get(self.ctx, resource.VMMDomain(type='VMware', name='vmware'))) self.assertIsNotNone( self.mgr.get(self.ctx, resource.VMMDomain(type='VMware', name='vmware2'))) # EPGs are still empty pre_epg1 = self.mgr.get(self.ctx, pre_epg1) pre_epg2 = self.mgr.get(self.ctx, pre_epg2) self.assertEqual([], pre_epg1.vmm_domains) self.assertEqual([], pre_epg1.physical_domains) self.assertEqual([], pre_epg2.vmm_domains) self.assertEqual([], pre_epg2.physical_domains) # Delete one of them, and use the replace flag self.mgr.delete(self.ctx, resource.VMMDomain(type='OpenStack', name='ostack2')) self.run_command('manager load-domains --replace') # Now only 2 Domains each exist self.assertEqual(4, len(self.mgr.find(self.ctx, resource.VMMDomain))) self.assertEqual(2, len(self.mgr.find(self.ctx, resource.PhysicalDomain))) # EPGs are still empty pre_epg1 = self.mgr.get(self.ctx, pre_epg1) pre_epg2 = self.mgr.get(self.ctx, pre_epg2) self.assertEqual([], pre_epg1.vmm_domains) self.assertEqual([], pre_epg1.physical_domains) self.assertEqual([], pre_epg2.vmm_domains) self.assertEqual([], pre_epg2.physical_domains) # now update the current environment self.run_command('manager load-domains --replace --enforce') pre_epg1 = self.mgr.get(self.ctx, pre_epg1) pre_epg2 = self.mgr.get(self.ctx, pre_epg2) def get_vmm(type, name): return {'type': type, 'name': name} def get_phys(name): return {'name': name} self.assertEqual( sorted([ get_vmm('OpenStack', 'ostack'), get_vmm('OpenStack', 'ostack2'), get_vmm('VMware', 'vmware'), get_vmm('VMware', 'vmware2') ]), sorted(pre_epg1.vmm_domains)) self.assertEqual(sorted([get_phys('phys'), get_phys('phys2')]), sorted(pre_epg1.physical_domains)) self.assertEqual( sorted([ get_vmm('OpenStack', 'ostack'), get_vmm('OpenStack', 'ostack2'), get_vmm('VMware', 'vmware'), get_vmm('VMware', 'vmware2') ]), sorted(pre_epg2.vmm_domains)) self.assertEqual(sorted([get_phys('phys'), get_phys('phys2')]), sorted(pre_epg2.physical_domains))
def test_load_domains(self): # create a VMM and PhysDom first pre_phys = resource.PhysicalDomain(name='pre-phys') pre_vmm = resource.VMMDomain(type='OpenStack', name='pre-vmm') ap = resource.ApplicationProfile(tenant_name='tn1', name='ap') pre_epg1 = resource.EndpointGroup( tenant_name='tn1', app_profile_name='ap', name='epg1') pre_epg2 = resource.EndpointGroup( tenant_name='tn1', app_profile_name='ap', name='epg2') self.mgr.create(self.ctx, resource.Tenant(name='tn1')) self.mgr.create(self.ctx, ap) self.mgr.create(self.ctx, pre_phys) self.mgr.create(self.ctx, pre_vmm) self.mgr.create(self.ctx, pre_epg2) self.mgr.create(self.ctx, pre_epg1) self.run_command('manager load-domains --no-mappings') # Verify pre-existing domains are still there self.assertIsNotNone(self.mgr.get(self.ctx, pre_phys)) self.assertIsNotNone(self.mgr.get(self.ctx, pre_vmm)) # Also the Domains defined in the config files exist self.assertIsNotNone( self.mgr.get(self.ctx, resource.PhysicalDomain(name='phys'))) self.assertIsNotNone( self.mgr.get(self.ctx, resource.PhysicalDomain(name='phys2'))) self.assertIsNotNone( self.mgr.get(self.ctx, resource.VMMDomain(type='OpenStack', name='ostack'))) self.assertIsNotNone( self.mgr.get(self.ctx, resource.VMMDomain(type='OpenStack', name='ostack2'))) self.assertIsNotNone( self.mgr.get(self.ctx, resource.VMMDomain(type='VMware', name='vmware'))) self.assertIsNotNone( self.mgr.get(self.ctx, resource.VMMDomain(type='VMware', name='vmware2'))) # EPGs are still empty pre_epg1 = self.mgr.get(self.ctx, pre_epg1) pre_epg2 = self.mgr.get(self.ctx, pre_epg2) self.assertEqual([], pre_epg1.vmm_domains) self.assertEqual([], pre_epg1.physical_domains) self.assertEqual([], pre_epg2.vmm_domains) self.assertEqual([], pre_epg2.physical_domains) # Delete one of them, and use the replace flag self.mgr.delete(self.ctx, resource.VMMDomain(type='OpenStack', name='ostack2')) self.run_command('manager load-domains --replace --no-mappings') # Now only 2 Domains each exist self.assertEqual(4, len(self.mgr.find(self.ctx, resource.VMMDomain))) self.assertEqual(2, len(self.mgr.find(self.ctx, resource.PhysicalDomain))) # EPGs are still empty pre_epg1 = self.mgr.get(self.ctx, pre_epg1) pre_epg2 = self.mgr.get(self.ctx, pre_epg2) self.assertEqual([], pre_epg1.vmm_domains) self.assertEqual([], pre_epg1.physical_domains) self.assertEqual([], pre_epg2.vmm_domains) self.assertEqual([], pre_epg2.physical_domains) # now update the current environment cmd = 'manager load-domains --replace --enforce --no-mappings' self.run_command(cmd) pre_epg1 = self.mgr.get(self.ctx, pre_epg1) pre_epg2 = self.mgr.get(self.ctx, pre_epg2) def get_vmm(type, name): return {'type': type, 'name': name} def get_phys(name): return {'name': name} self.assertEqual(sorted([get_vmm('OpenStack', 'ostack'), get_vmm('OpenStack', 'ostack2'), get_vmm('VMware', 'vmware'), get_vmm('VMware', 'vmware2')]), sorted(pre_epg1.vmm_domains)) self.assertEqual(sorted([get_phys('phys'), get_phys('phys2')]), sorted(pre_epg1.physical_domains)) self.assertEqual(sorted([get_vmm('OpenStack', 'ostack'), get_vmm('OpenStack', 'ostack2'), get_vmm('VMware', 'vmware'), get_vmm('VMware', 'vmware2')]), sorted(pre_epg2.vmm_domains)) self.assertEqual(sorted([get_phys('phys'), get_phys('phys2')]), sorted(pre_epg2.physical_domains)) # re-run the command, but populate the domain mappings self.run_command('manager load-domains --replace --enforce') pre_epg1 = self.mgr.get(self.ctx, pre_epg1) pre_epg2 = self.mgr.get(self.ctx, pre_epg2) def get_vmm(type, name): return {'type': type, 'name': name} def get_phys(name): return {'name': name} # The load-domains should creat host domain mappings with # wildcard entries for every entry in the configuration file existing_mappings = [{'domain_type': 'PhysDom', 'host_name': '*', 'domain_name': 'phys'}, {'domain_type': 'PhysDom', 'host_name': '*', 'domain_name': 'phys2'}, {'domain_type': 'OpenStack', 'host_name': '*', 'domain_name': 'ostack'}, {'domain_type': 'OpenStack', 'host_name': '*', 'domain_name': 'ostack'}, {'domain_type': 'VMware', 'host_name': '*', 'domain_name': 'vmware'}, {'domain_type': 'VMware', 'host_name': '*', 'domain_name': 'vmware2'}] for mapping in existing_mappings: mapping = infra.HostDomainMappingV2( host_name=mapping['host_name'], domain_name=mapping['domain_name'], domain_type=mapping['domain_type']) try: self.assertIsNotNone(self.mgr.get(self.ctx, mapping)) except Exception: self.assertFalse(True) self.assertEqual(sorted([get_vmm('OpenStack', 'ostack'), get_vmm('OpenStack', 'ostack2'), get_vmm('VMware', 'vmware'), get_vmm('VMware', 'vmware2')]), sorted(pre_epg1.vmm_domains)) self.assertEqual(sorted([get_phys('phys'), get_phys('phys2')]), sorted(pre_epg1.physical_domains)) self.assertEqual(sorted([get_vmm('OpenStack', 'ostack'), get_vmm('OpenStack', 'ostack2'), get_vmm('VMware', 'vmware'), get_vmm('VMware', 'vmware2')]), sorted(pre_epg2.vmm_domains)) self.assertEqual(sorted([get_phys('phys'), get_phys('phys2')]), sorted(pre_epg2.physical_domains)) # re-run the command, with host-specific domain mappings populated. # This should cause an exception self.mgr.create(self.ctx, infra.HostDomainMappingV2( host_name='host1', domain_name='ostack10', domain_type='OpenStack')) self.run_command('manager load-domains --enforce', raises=True)
def _test_load_mappings_preexisting_mappings(self, replace=False): # The load-domains command invokes load-mappings, # so we don't use it to create hte domains -- we # have to create them manually cfg_mappings = [{'domain_type': 'PhysDom', 'host_name': '*', 'domain_name': 'phys'}, {'domain_type': 'PhysDom', 'host_name': '*', 'domain_name': 'phys2'}, {'domain_type': 'OpenStack', 'host_name': '*', 'domain_name': 'ostack'}, {'domain_type': 'OpenStack', 'host_name': '*', 'domain_name': 'ostack2'}, {'domain_type': 'VMware', 'host_name': '*', 'domain_name': 'vmware'}, {'domain_type': 'VMware', 'host_name': '*', 'domain_name': 'vmware2'}] existing_mappings = [{'domain_type': 'PhysDom', 'host_name': '*', 'domain_name': 'phys3'}, {'domain_type': 'PhysDom', 'host_name': 'vm1', 'domain_name': 'phys4'}, {'domain_type': 'OpenStack', 'host_name': '*', 'domain_name': 'ostack3'}, {'domain_type': 'OpenStack', 'host_name': 'vm2', 'domain_name': 'ostack4'}, {'domain_type': 'VMware', 'host_name': '*', 'domain_name': 'vmware3'}, {'domain_type': 'VMware', 'host_name': 'vm3', 'domain_name': 'vmware4'}] for mapping in cfg_mappings: if mapping['domain_type'] is 'PhysDom': domain = resource.PhysicalDomain(name=mapping['domain_name'], monitored=True) else: domain = resource.VMMDomain(type=mapping['domain_type'], name=mapping['domain_name'], monitored=True) self.mgr.create(self.ctx, domain) # Create some existing mappings, both host-specific # and wildcarded for mapping in existing_mappings: mapping_obj = infra.HostDomainMappingV2( host_name=mapping['host_name'], domain_name=mapping['domain_name'], domain_type=mapping['domain_type']) self.mgr.create(self.ctx, mapping_obj) mappings = self.mgr.find(self.ctx, infra.HostDomainMappingV2) # Run the load-mappings command, which populates # the HostDomainMappingV2 table using the contents # of the configuration file cmd = 'manager load-mappings' if replace: cmd += ' --replace' self.run_command(cmd) mappings = self.mgr.find(self.ctx, infra.HostDomainMappingV2) if replace: all_mappings = cfg_mappings else: all_mappings = existing_mappings + cfg_mappings db_mappings = [infra.HostDomainMappingV2( host_name=mapping['host_name'], domain_type=mapping['domain_type'], domain_name=mapping['domain_name']) for mapping in all_mappings] self.assertEqual(sorted(db_mappings, key=lambda x: x.domain_name), sorted(mappings, key=lambda x: x.domain_name))
def fv_rs_dom_att_converter(object_dict, otype, helper, source_identity_attributes, destination_identity_attributes, to_aim=True): result = [] if to_aim: # Converting a fvRsDomAtt into an EPG res_dict = {} try: id = default_identity_converter(object_dict, 'fvRsDomAtt', helper, to_aim=True) except apic_client.DNManager.InvalidNameFormat: return [] for index, attr in enumerate(destination_identity_attributes): res_dict[attr] = id[index] # fvRsDomAtt can be either referring to a physDomP or a vmmDomP type try: dom_id = default_identity_converter({'dn': id[-1]}, 'vmmDomP', helper, to_aim=True) if dom_id[0] == aim_utils.OPENSTACK_VMM_TYPE: res_dict['openstack_vmm_domain_names'] = [dom_id[-1]] res_dict['vmm_domains'] = [{'type': dom_id[0], 'name': dom_id[1]}] except apic_client.DNManager.InvalidNameFormat: dom_id = default_identity_converter({'dn': id[-1]}, 'physDomP', helper, to_aim=True) res_dict['physical_domain_names'] = [dom_id[0]] res_dict['physical_domains'] = [{'name': dom_id[0]}] result.append(default_to_resource(res_dict, helper, to_aim=True)) else: # Converting an EndpointGroup into fvRsDomAtt objects for phys in set(object_dict['physical_domain_names'] + [x['name'] for x in object_dict['physical_domains']]): # Get Physdom DN phys_dn = default_identity_converter( resource.PhysicalDomain(name=phys).__dict__, resource.PhysicalDomain, helper, aci_mo_type='physDomP', to_aim=False)[0] dn = default_identity_converter(object_dict, otype, helper, extra_attributes=[phys_dn], aci_mo_type='fvRsDomAtt', to_aim=False)[0] result.append( {'fvRsDomAtt': { 'attributes': { 'dn': dn, 'tDn': phys_dn } }}) # Convert OpenStack VMMs vmms_by_name = [(aim_utils.OPENSTACK_VMM_TYPE, x) for x in object_dict['openstack_vmm_domain_names']] for vmm in set([(x['type'], x['name']) for x in object_dict['vmm_domains']] + vmms_by_name): # Get VMM DN vmm_dn = default_identity_converter(resource.VMMDomain( type=vmm[0], name=vmm[1]).__dict__, resource.VMMDomain, helper, aci_mo_type='vmmDomP', to_aim=False)[0] dn = default_identity_converter(object_dict, otype, helper, extra_attributes=[vmm_dn], aci_mo_type='fvRsDomAtt', to_aim=False)[0] dom_ref = { 'fvRsDomAtt': { 'attributes': { 'dn': dn, 'tDn': vmm_dn, 'instrImedcy': 'lazy' } } } if not aim_cfg.CONF.aim.disable_micro_segmentation: dom_ref['fvRsDomAtt']['attributes'].update( {'classPref': 'useg'}) if vmm[0].lower() == aim_utils.VMWARE_VMM_TYPE.lower(): dom_ref['fvRsDomAtt']['attributes'][ 'instrImedcy'] = 'immediate' result.append(dom_ref) return result
def upgrade(): op.create_table( 'aim_vmm_policies', sa.Column('type', sa.String(64), nullable=False), sa.Column('aim_id', sa.Integer, autoincrement=True), sa.Column('display_name', sa.String(256), nullable=False, default=''), sa.Column('monitored', sa.Boolean, nullable=False, default=False), sa.PrimaryKeyConstraint('aim_id')) session = api.get_session(expire_on_commit=True) old_vmm_table = sa.Table('aim_vmm_domains', sa.MetaData(), sa.Column('type', sa.String(64), nullable=False), sa.Column('name', sa.String(64), nullable=False)) old_phys_table = sa.Table('aim_physical_domains', sa.MetaData(), sa.Column('name', sa.String(64), nullable=False)) mgr = aim_manager.AimManager() ctx = context.AimContext(db_session=session) new_vmms = [] new_phys = [] with session.begin(subtransactions=True): for vmm in session.query(old_vmm_table).all(): new_vmms.append( resource.VMMDomain(type=vmm.type, name=vmm.name, monitored=True)) for phys in session.query(old_phys_table).all(): new_phys.append( resource.PhysicalDomain(name=phys.name, monitored=True)) op.drop_table('aim_vmm_domains') op.drop_table('aim_physical_domains') op.create_table( 'aim_vmm_domains', sa.Column('type', sa.String(64), nullable=False), sa.Column('name', sa.String(64), nullable=False), sa.Column('aim_id', sa.Integer, autoincrement=True), sa.Column('display_name', sa.String(256), nullable=False, default=''), sa.Column('monitored', sa.Boolean, nullable=False, default=False), sa.Column('enforcement_pref', sa.Enum('sw', 'hw', 'unknown')), sa.Column('mode', sa.Enum('default', 'n1kv', 'unknown', 'ovs', 'k8s')), sa.Column('mcast_address', sa.String(64)), sa.Column('encap_mode', sa.Enum('unknown', 'vlan', 'vxlan')), sa.Column('pref_encap_mode', sa.Enum('unspecified', 'vlan', 'vxlan')), sa.Column('vlan_pool_name', sa.String(64)), sa.Column('vlan_pool_type', sa.Enum('static', 'dynamic')), sa.Column('mcast_addr_pool_name', sa.String(64)), sa.PrimaryKeyConstraint('aim_id'), sa.UniqueConstraint('type', 'name', name='uniq_aim_vmm_domains_identity'), sa.Index('idx_aim_vmm_domains_identity', 'type', 'name')) op.create_table( 'aim_physical_domains', sa.Column('name', sa.String(64), nullable=False), sa.Column('aim_id', sa.Integer, autoincrement=True), sa.Column('display_name', sa.String(256), nullable=False, default=''), sa.Column('monitored', sa.Boolean, nullable=False, default=False), sa.PrimaryKeyConstraint('aim_id')) with session.begin(subtransactions=True): for obj in new_vmms + new_phys: mgr.create(ctx, obj)