def test_tree_hooks_transactions(self):
        with mock.patch('aim.agent.aid.event_services.'
                        'rpc.AIDEventRpcApi._cast') as cast:
            tn = aim_res.Tenant(name='test_tree_hooks')
            ap = aim_res.ApplicationProfile(tenant_name='test_tree_hooks',
                                            name='ap')
            epg = aim_res.EndpointGroup(tenant_name='test_tree_hooks',
                                        app_profile_name='ap',
                                        name='epg',
                                        bd_name='some')

            tn1 = aim_res.Tenant(name='test_tree_hooks1')
            ap1 = aim_res.ApplicationProfile(tenant_name='test_tree_hooks1',
                                             name='ap')
            epg1 = aim_res.EndpointGroup(tenant_name='test_tree_hooks1',
                                         app_profile_name='ap',
                                         name='epg',
                                         bd_name='some')

            # This transaction will generate some action logs, which
            # will trigger a 'reconcile' event.
            with self.ctx.store.begin(subtransactions=True):
                with self.ctx.store.begin(subtransactions=True):
                    self.mgr.create(self.ctx, tn)
                    self.mgr.create(self.ctx, ap)
                    self.mgr.create(self.ctx, epg)
                self.assertEqual(0, cast.call_count)
                with self.ctx.store.begin(subtransactions=True):
                    self.mgr.create(self.ctx, tn1)
                    self.mgr.create(self.ctx, ap1)
                    self.mgr.create(self.ctx, epg1)
                self.assertEqual(0, cast.call_count)
            exp_calls = [mock.call(mock.ANY, 'reconcile', None)]
            self._check_call_list(exp_calls, cast)
            cast.reset_mock()

            # There are 2 tenants so 2 transactions will be involved here,
            # each transaction will update the trees so 2 'serve' events
            # will be generated.
            self.db_l.catch_up_with_action_log(self.ctx.store)
            exp_calls = [
                mock.call(mock.ANY, 'serve', None),
                mock.call(mock.ANY, 'serve', None)
            ]
            self._check_call_list(exp_calls, cast)
    def test_monitored_state_change(self):
        tn_name = 'test_monitored_state_change'
        tn_rn = 'tn-' + tn_name
        tn = aim_res.Tenant(name=tn_name, monitored=True)
        ap = aim_res.ApplicationProfile(tenant_name=tn_name, name='ap',
                                        monitored=True)
        epg = aim_res.EndpointGroup(
            tenant_name=tn_name, app_profile_name='ap', name='epg',
            bd_name='some', monitored=True)
        self.mgr.create(self.ctx, tn)
        self.mgr.create(self.ctx, ap)
        self.mgr.create(self.ctx, epg)
        cfg_tree = self.tt_mgr.get(self.ctx, tn_rn,
                                   tree=tree_manager.CONFIG_TREE)
        mon_tree = self.tt_mgr.get(self.ctx, tn_rn,
                                   tree=tree_manager.MONITORED_TREE)
        # Create my own tree representation
        my_cfg_tree = tree.StructuredHashTree()
        my_mon_tree = tree.StructuredHashTree()
        self.db_l.tt_maker.update(my_mon_tree, [tn])
        # Succeed their creation
        self.mgr.set_resource_sync_synced(self.ctx, ap)
        self.mgr.set_resource_sync_synced(self.ctx, epg)
        self.db_l.tt_maker.update(my_mon_tree, [ap, epg])
        cfg_tree = self.tt_mgr.get(self.ctx, tn_rn,
                                   tree=tree_manager.CONFIG_TREE)
        mon_tree = self.tt_mgr.get(self.ctx, tn_rn,
                                   tree=tree_manager.MONITORED_TREE)
        self.assertEqual(my_mon_tree, mon_tree)
        self.assertEqual(my_cfg_tree, cfg_tree)

        # Change ownership of the AP
        self.mgr.update(self.ctx, ap, monitored=False)
        my_mon_tree = tree.StructuredHashTree()
        # This is equivalent of adding only tenant and epg to the conf tree
        self.db_l.tt_maker.update(my_mon_tree, [tn, epg])
        self.db_l.tt_maker.update(my_cfg_tree, [ap])
        # Refresh trees
        cfg_tree = self.tt_mgr.get(self.ctx, tn_rn,
                                   tree=tree_manager.CONFIG_TREE)
        mon_tree = self.tt_mgr.get(self.ctx, tn_rn,
                                   tree=tree_manager.MONITORED_TREE)
        self.assertEqual(my_mon_tree, mon_tree,
                         'differences: %s' % my_mon_tree.diff(mon_tree))
        self.assertEqual(my_cfg_tree, cfg_tree)
        # Unset monitored to EPG as well
        self.mgr.update(self.ctx, epg, monitored=False)
        my_mon_tree = tree.StructuredHashTree()
        self.db_l.tt_maker.update(my_mon_tree, [tn])
        self.db_l.tt_maker.update(my_cfg_tree, [epg])
        # Refresh trees
        cfg_tree = self.tt_mgr.get(self.ctx, tn_rn,
                                   tree=tree_manager.CONFIG_TREE)
        mon_tree = self.tt_mgr.get(self.ctx, tn_rn,
                                   tree=tree_manager.MONITORED_TREE)
        self.assertEqual(my_mon_tree, mon_tree)
        self.assertEqual(my_cfg_tree, cfg_tree)
 def _get_nat_ap_epg(self, ctx, l3out):
     d_name = self._display_name(l3out)
     ap_name = getattr(self, 'app_profile_name', None) or l3out.name
     ap_name = self._scope_name_if_common(l3out.tenant_name, ap_name)
     ap_display_name = aim_utils.sanitize_display_name(ap_name or d_name)
     ap = resource.ApplicationProfile(tenant_name=l3out.tenant_name,
                                      name=ap_name,
                                      display_name=ap_display_name)
     epg = resource.EndpointGroup(
         tenant_name=ap.tenant_name,
         app_profile_name=ap.name,
         name='EXT-%s' % l3out.name,
         display_name=aim_utils.sanitize_display_name('EXT-%s' % d_name))
     return (ap, epg)
    def test_tree_hooks_transactions(self):
        with mock.patch('aim.agent.aid.event_services.'
                        'rpc.AIDEventRpcApi._cast') as cast:
            tn = aim_res.Tenant(name='test_tree_hooks')
            ap = aim_res.ApplicationProfile(tenant_name='test_tree_hooks',
                                            name='ap')
            epg = aim_res.EndpointGroup(tenant_name='test_tree_hooks',
                                        app_profile_name='ap',
                                        name='epg',
                                        bd_name='some')

            tn1 = aim_res.Tenant(name='test_tree_hooks1')
            ap1 = aim_res.ApplicationProfile(tenant_name='test_tree_hooks1',
                                             name='ap')
            epg1 = aim_res.EndpointGroup(tenant_name='test_tree_hooks1',
                                         app_profile_name='ap',
                                         name='epg',
                                         bd_name='some')
            # Try a transaction
            with self.ctx.store.begin(subtransactions=True):
                with self.ctx.store.begin(subtransactions=True):
                    self.mgr.create(self.ctx, tn)
                    self.mgr.create(self.ctx, ap)
                    self.mgr.create(self.ctx, epg)
                self.assertEqual(0, cast.call_count)
                with self.ctx.store.begin(subtransactions=True):
                    self.mgr.create(self.ctx, tn1)
                    self.mgr.create(self.ctx, ap1)
                    self.mgr.create(self.ctx, epg1)
                self.assertEqual(0, cast.call_count)
            # Trees are now saved one at a time, so serve is called twice
            exp_calls = [
                mock.call(mock.ANY, 'serve', None),
                mock.call(mock.ANY, 'serve', None),
                mock.call(mock.ANY, 'reconcile', None)
            ]
            self._check_call_list(exp_calls, cast)
 def _get_l3out_objects(self, l3out_name=None, l3out_display_name=None,
                        nat_vrf_name=None, vmm_domains=None,
                        phys_domains=None):
     name = 'EXT-%s' % (l3out_name or 'o1')
     d_name = 'EXT-%s' % (l3out_display_name or 'OUT')
     nat_vrf = a_res.VRF(tenant_name='t1', name=name, display_name=d_name)
     if vmm_domains is not None:
         vmm_doms = vmm_domains
     else:
         vmm_doms = self.vmm_domains
     if phys_domains is not None:
         phys_doms = phys_domains
     else:
         phys_doms = self.phys_domains
     return ([
         a_res.Filter(tenant_name='t1', name=name,
                      display_name=d_name),
         a_res.FilterEntry(tenant_name='t1', filter_name=name,
                           name='Any', display_name='Any'),
         a_res.Contract(tenant_name='t1', name=name,
                        display_name=d_name),
         a_res.ContractSubject(tenant_name='t1', contract_name=name,
                               name='Allow', display_name='Allow',
                               bi_filters=[name]),
         a_res.BridgeDomain(tenant_name='t1', name=name,
                            display_name=d_name,
                            vrf_name=nat_vrf_name or name,
                            limit_ip_learn_to_subnets=True,
                            l3out_names=[l3out_name or 'o1']),
         a_res.ApplicationProfile(tenant_name='t1', name='myapp',
                                  display_name='myapp'),
         a_res.EndpointGroup(tenant_name='t1', app_profile_name='myapp',
                             name=name, display_name=d_name,
                             bd_name=name,
                             provided_contract_names=[name],
                             consumed_contract_names=[name],
                             # NOTE(ivar): Need to keep both VMM
                             # representations since a GET on the EPG
                             # will also return the domain name list
                             # for backward compatibility
                             openstack_vmm_domain_names=[dom['name']
                                                         for dom in vmm_doms
                                                         if dom['type'] ==
                                                         'OpenStack'],
                             physical_domain_names=[dom['name']
                                                    for dom in phys_doms],
                             vmm_domains=vmm_doms,
                             physical_domains=phys_doms)] +
             ([nat_vrf] if nat_vrf_name is None else []))
Пример #6
0
    def ensure_tenant(self, plugin_context, tenant_id):
        LOG.debug("APIC AIM MD ensuring tenant_id: %s", tenant_id)

        self.project_name_cache.ensure_project(tenant_id)

        # TODO(rkukura): Move the following to calls made from
        # precommit methods so AIM Tenants, ApplicationProfiles, and
        # Filters are [re]created whenever needed.
        session = plugin_context.session
        with session.begin(subtransactions=True):
            project_name = self.project_name_cache.get_project_name(tenant_id)
            tenant_aname = self.name_mapper.tenant(session, tenant_id,
                                                   project_name)
            LOG.debug(
                "Mapped tenant_id %(id)s with name %(name)s to "
                "%(aname)s", {
                    'id': tenant_id,
                    'name': project_name,
                    'aname': tenant_aname
                })

            aim_ctx = aim_context.AimContext(session)

            tenant = aim_resource.Tenant(name=tenant_aname)
            if not self.aim.get(aim_ctx, tenant):
                self.aim.create(aim_ctx, tenant)

            ap = aim_resource.ApplicationProfile(tenant_name=tenant_aname,
                                                 name=AP_NAME)
            if not self.aim.get(aim_ctx, ap):
                self.aim.create(aim_ctx, ap)

            filter = aim_resource.Filter(tenant_name=tenant_aname,
                                         name=ANY_FILTER_NAME,
                                         display_name='Any Filter')
            if not self.aim.get(aim_ctx, filter):
                self.aim.create(aim_ctx, filter)

            entry = aim_resource.FilterEntry(tenant_name=tenant_aname,
                                             filter_name=ANY_FILTER_NAME,
                                             name=ANY_FILTER_ENTRY_NAME,
                                             display_name='Any FilterEntry')
            if not self.aim.get(aim_ctx, entry):
                self.aim.create(aim_ctx, entry)
 def test_tree_hooks(self):
     with mock.patch('aim.agent.aid.event_services.'
                     'rpc.AIDEventRpcApi._cast') as cast:
         tn_name = 'test_tree_hooks'
         tn_rn = 'tn-' + tn_name
         tn = aim_res.Tenant(name='test_tree_hooks_2')
         ap = aim_res.ApplicationProfile(tenant_name=tn_name, name='ap')
         epg = aim_res.EndpointGroup(tenant_name=tn_name,
                                     app_profile_name='ap',
                                     name='epg',
                                     bd_name='some')
         # Add Tenant and AP
         self.mgr.create(self.ctx, aim_res.Tenant(name=tn_name))
         # Creating a tenant also cause a log to be created, and
         # consequently a reconcile call
         exp_calls = [
             mock.call(mock.ANY, 'serve', None),
             mock.call(mock.ANY, 'reconcile', None)
         ]
         self._check_call_list(exp_calls, cast)
         self.mgr.create(self.ctx, tn)
         cast.reset_mock()
         self.mgr.create(self.ctx, ap)
         self.mgr.create(self.ctx, epg)
         # Create AP will create tenant, create EPG will modify it
         exp_calls = [
             mock.call(mock.ANY, 'reconcile', None),
             mock.call(mock.ANY, 'reconcile', None),
             mock.call(mock.ANY, 'reconcile', None),
             mock.call(mock.ANY, 'reconcile', None)
         ]
         self._check_call_list(exp_calls, cast)
         cast.reset_mock()
         self.mgr.update(self.ctx, epg, bd_name='bd2')
         exp_calls = [
             mock.call(mock.ANY, 'reconcile', None),
             mock.call(mock.ANY, 'reconcile', None)
         ]
         self._check_call_list(exp_calls, cast)
         cast.reset_mock()
         self.tt_mgr.delete_by_root_rn(self.ctx, tn_rn)
         cast.assert_called_once_with(mock.ANY, 'serve', None)
Пример #8
0
    def test_push_resources(self):
        aim_mgr = aim_manager.AimManager()
        aim_mgr.create(self.ctx, resource.Tenant(name='t1'))
        ap = self._get_example_aci_app_profile(dn='uni/tn-t1/ap-a1')
        ap_aim = resource.ApplicationProfile(tenant_name='t1', name='a1')
        epg = self._get_example_aci_epg(
            dn='uni/tn-t1/ap-a1/epg-test')
        fault = self._get_example_aci_fault(
            dn='uni/tn-t1/ap-a1/epg-test/fault-951')
        faul_aim = aim_status.AciFault(
            fault_code='951',
            external_identifier='uni/tn-t1/ap-a1/epg-test/fault-951')
        self.universe.push_resources(self.ctx, {'create': [ap, epg, fault],
                                                'delete': []})
        res = aim_mgr.get(self.ctx, resource.EndpointGroup(
            tenant_name='t1', app_profile_name='a1', name='test'))
        status = aim_mgr.get_status(self.ctx, res)
        self.assertEqual(1, len(status.faults))
        self.assertEqual('951', status.faults[0].fault_code)

        # Unset fault
        self.universe.push_resources(self.ctx, {'create': [],
                                                'delete': [faul_aim]})
        status = aim_mgr.get_status(self.ctx, res)
        self.assertEqual(0, len(status.faults))

        # create subject, and faults for subject-to-filter relation
        filter_objs = [
            {'vzBrCP': {'attributes': {'dn': 'uni/tn-t1/brc-c'}}},
            {'vzSubj': {'attributes': {'dn': 'uni/tn-t1/brc-c/subj-s2'}}},
            self._get_example_aci_fault(
                dn='uni/tn-t1/brc-c/subj-s2/intmnl/rsfiltAtt-f/fault-F1111',
                code='F1111'),
            self._get_example_aci_fault(
                dn='uni/tn-t1/brc-c/subj-s2/outtmnl/rsfiltAtt-g/fault-F1112',
                code='F1112'),
            self._get_example_aci_fault(
                dn='uni/tn-t1/brc-c/subj-s2/rssubjFiltAtt-h/fault-F1113',
                code='F1113')]
        self.universe.push_resources(self.ctx, {'create': filter_objs,
                                                'delete': []})
        subj = resource.ContractSubject(tenant_name='t1', contract_name='c',
                                        name='s2')
        status = aim_mgr.get_status(self.ctx, subj)
        self.assertEqual(2, len(status.faults))
        self.assertEqual(['F1111', 'F1112'],
                         [f.fault_code for f in status.faults])

        # delete filter faults
        self.universe.push_resources(self.ctx, {'create': [],
                                                'delete': status.faults})
        status = aim_mgr.get_status(self.ctx, subj)
        self.assertEqual(0, len(status.faults))
        # Managed epg
        managed_epg = resource.EndpointGroup(
            tenant_name='t1', app_profile_name='a1', name='managed')
        aim_mgr.create(self.ctx, managed_epg)
        # EPG cannot be deleted since is managed
        self.universe.push_resources(
            self.ctx, {'create': [], 'delete': [ap_aim, managed_epg]})
        res = aim_mgr.get(self.ctx, managed_epg)
        if self.monitor_universe:
            self.assertIsNotNone(res)
            aim_mgr.delete(self.ctx, managed_epg)
        else:
            self.assertIsNone(res)
    def _test_sync_failed(self, monitored=False):
        tn_name = 'tn1'
        tn_rn = 'tn-' + tn_name
        tn = aim_res.Tenant(name=tn_name, monitored=monitored)
        ap = aim_res.ApplicationProfile(tenant_name=tn_name, name='ap',
                                        monitored=monitored)
        epg = aim_res.EndpointGroup(
            tenant_name=tn_name, app_profile_name='ap', name='epg',
            monitored=monitored, bd_name='some')
        epg2 = aim_res.EndpointGroup(
            tenant_name=tn_name, app_profile_name='ap', name='epg2',
            monitored=monitored, bd_name='some')
        empty_map = {True: tree_manager.CONFIG_TREE,
                     False: tree_manager.MONITORED_TREE}

        exp_tree = tree.StructuredHashTree()
        exp_empty_tree = tree.StructuredHashTree()
        # Add Tenant and AP
        tn = self.mgr.create(self.ctx, tn)
        self.mgr.set_resource_sync_synced(self.ctx, tn)
        ap = self.mgr.create(self.ctx, ap)
        self.mgr.set_resource_sync_synced(self.ctx, ap)
        epg2 = self.mgr.create(self.ctx, epg2)
        self.mgr.set_resource_sync_synced(self.ctx, epg2)
        epg = self.mgr.create(self.ctx, epg)
        self.mgr.set_resource_sync_synced(self.ctx, epg)
        # Set EPG status to delete error
        self.mgr.set_resource_sync_error(self.ctx, epg)
        # Get the trees
        empty_tree = self.tt_mgr.get(
            self.ctx, tn_rn, tree=empty_map[monitored])
        configured_tree = self.tt_mgr.get(
            self.ctx, tn_rn, tree=empty_map[not monitored])

        epg._error = True
        self.db_l.tt_maker.update(exp_tree, [tn, ap, epg2, epg])
        self.assertEqual({'add': [], 'remove': []},
                         exp_tree.diff(configured_tree))
        self.assertEqual({'add': [], 'remove': []},
                         exp_empty_tree.diff(empty_tree))
        # Even if something changes in the EPG the difference will still be
        # empty
        epg.display_name = 'somethingelse'
        self.db_l.tt_maker.update(exp_tree, [epg])
        self.assertEqual({'add': [], 'remove': []},
                         exp_tree.diff(configured_tree))
        self.assertEqual({'add': [], 'remove': []},
                         exp_empty_tree.diff(empty_tree))

        # Update epg, it will be re-created. Note that I'm not actually
        # changing attributes
        epg = self.mgr.update(self.ctx, epg, bd_name='some')
        self.mgr.set_resource_sync_synced(self.ctx, epg)
        # Fix the expected tree as well
        self.db_l.tt_maker.update(exp_tree, [epg])
        # Get the trees
        empty_tree = self.tt_mgr.get(
            self.ctx, tn_rn, tree=empty_map[monitored])
        configured_tree = self.tt_mgr.get(
            self.ctx, tn_rn, tree=empty_map[not monitored])
        self.assertEqual(exp_tree, configured_tree)
        self.assertEqual(exp_empty_tree, empty_tree)

        # Modifying the EPG will make the difference visible
        epg.display_name = 'somethingelse'
        self.db_l.tt_maker.update(exp_tree, [epg])
        self.assertEqual(
            {'add': [('fvTenant|tn1', 'fvAp|ap', 'fvAEPg|epg')],
             'remove': []}, exp_tree.diff(configured_tree))
        self.assertEqual({'add': [], 'remove': []},
                         exp_empty_tree.diff(empty_tree))

        # Set AP in error state, will effect all the children
        self.mgr.set_resource_sync_error(self.ctx, ap)
        empty_tree = self.tt_mgr.get(
            self.ctx, tn_rn, tree=empty_map[monitored])
        configured_tree = self.tt_mgr.get(
            self.ctx, tn_rn, tree=empty_map[not monitored])
        # This time around, the AP and both its EPGs are in error state
        ap._error = True
        epg._error = True
        epg2._error = True
        self.db_l.tt_maker.update(exp_tree, [ap, epg, epg2])
        self.assertEqual({'add': [], 'remove': []},
                         exp_tree.diff(configured_tree))
        self.assertEqual({'add': [], 'remove': []},
                         exp_empty_tree.diff(empty_tree))

        # All the objects are in failed state
        for obj in [ap, epg2, epg]:
            self.assertEqual(aim_status.AciStatus.SYNC_FAILED,
                             self.mgr.get_status(self.ctx, obj).sync_status)

        if not monitored:
            # Changing sync status of the EPG will bring everything back
            self.mgr.set_resource_sync_pending(self.ctx, epg)
            epg = self.mgr.get(self.ctx, epg)
            # All the objects are in pending state
            for obj in [ap, epg2, epg]:
                self.assertEqual(
                    aim_status.AciStatus.SYNC_PENDING,
                    self.mgr.get_status(self.ctx, obj).sync_status)
            empty_tree = self.tt_mgr.get(
                self.ctx, tn_rn, tree=empty_map[monitored])
            configured_tree = self.tt_mgr.get(
                self.ctx, tn_rn, tree=empty_map[not monitored])
            del ap._error
            del epg2._error
            self.db_l.tt_maker.update(exp_tree, [ap, epg, epg2])

            self.assertEqual(exp_tree, configured_tree)
            self.assertEqual(exp_empty_tree, empty_tree)
    def test_load_domains(self):
        # create a VMM and PhysDom first
        pre_phys = resource.PhysicalDomain(name='pre-phys')
        pre_vmm = resource.VMMDomain(type='OpenStack', name='pre-vmm')
        ap = resource.ApplicationProfile(tenant_name='tn1', name='ap')
        pre_epg1 = resource.EndpointGroup(tenant_name='tn1',
                                          app_profile_name='ap',
                                          name='epg1')
        pre_epg2 = resource.EndpointGroup(tenant_name='tn1',
                                          app_profile_name='ap',
                                          name='epg2')
        self.mgr.create(self.ctx, resource.Tenant(name='tn1'))
        self.mgr.create(self.ctx, ap)
        self.mgr.create(self.ctx, pre_phys)
        self.mgr.create(self.ctx, pre_vmm)
        self.mgr.create(self.ctx, pre_epg2)
        self.mgr.create(self.ctx, pre_epg1)
        self.run_command('manager load-domains')
        # Verify pre-existing domains are still there
        self.assertIsNotNone(self.mgr.get(self.ctx, pre_phys))
        self.assertIsNotNone(self.mgr.get(self.ctx, pre_vmm))
        # Also the Domains defined in the config files exist
        self.assertIsNotNone(
            self.mgr.get(self.ctx, resource.PhysicalDomain(name='phys')))
        self.assertIsNotNone(
            self.mgr.get(self.ctx, resource.PhysicalDomain(name='phys2')))
        self.assertIsNotNone(
            self.mgr.get(self.ctx,
                         resource.VMMDomain(type='OpenStack', name='ostack')))
        self.assertIsNotNone(
            self.mgr.get(self.ctx,
                         resource.VMMDomain(type='OpenStack', name='ostack2')))
        self.assertIsNotNone(
            self.mgr.get(self.ctx,
                         resource.VMMDomain(type='VMware', name='vmware')))
        self.assertIsNotNone(
            self.mgr.get(self.ctx,
                         resource.VMMDomain(type='VMware', name='vmware2')))
        # EPGs are still empty
        pre_epg1 = self.mgr.get(self.ctx, pre_epg1)
        pre_epg2 = self.mgr.get(self.ctx, pre_epg2)

        self.assertEqual([], pre_epg1.vmm_domains)
        self.assertEqual([], pre_epg1.physical_domains)
        self.assertEqual([], pre_epg2.vmm_domains)
        self.assertEqual([], pre_epg2.physical_domains)

        # Delete one of them, and use the replace flag
        self.mgr.delete(self.ctx,
                        resource.VMMDomain(type='OpenStack', name='ostack2'))
        self.run_command('manager load-domains --replace')

        # Now only 2 Domains each exist
        self.assertEqual(4, len(self.mgr.find(self.ctx, resource.VMMDomain)))
        self.assertEqual(2,
                         len(self.mgr.find(self.ctx, resource.PhysicalDomain)))

        # EPGs are still empty
        pre_epg1 = self.mgr.get(self.ctx, pre_epg1)
        pre_epg2 = self.mgr.get(self.ctx, pre_epg2)

        self.assertEqual([], pre_epg1.vmm_domains)
        self.assertEqual([], pre_epg1.physical_domains)
        self.assertEqual([], pre_epg2.vmm_domains)
        self.assertEqual([], pre_epg2.physical_domains)

        # now update the current environment
        self.run_command('manager load-domains --replace --enforce')
        pre_epg1 = self.mgr.get(self.ctx, pre_epg1)
        pre_epg2 = self.mgr.get(self.ctx, pre_epg2)

        def get_vmm(type, name):
            return {'type': type, 'name': name}

        def get_phys(name):
            return {'name': name}

        self.assertEqual(
            sorted([
                get_vmm('OpenStack', 'ostack'),
                get_vmm('OpenStack', 'ostack2'),
                get_vmm('VMware', 'vmware'),
                get_vmm('VMware', 'vmware2')
            ]), sorted(pre_epg1.vmm_domains))
        self.assertEqual(sorted([get_phys('phys'),
                                 get_phys('phys2')]),
                         sorted(pre_epg1.physical_domains))
        self.assertEqual(
            sorted([
                get_vmm('OpenStack', 'ostack'),
                get_vmm('OpenStack', 'ostack2'),
                get_vmm('VMware', 'vmware'),
                get_vmm('VMware', 'vmware2')
            ]), sorted(pre_epg2.vmm_domains))
        self.assertEqual(sorted([get_phys('phys'),
                                 get_phys('phys2')]),
                         sorted(pre_epg2.physical_domains))
Пример #11
0
    def test_load_domains(self):
        # create a VMM and PhysDom first
        pre_phys = resource.PhysicalDomain(name='pre-phys')
        pre_vmm = resource.VMMDomain(type='OpenStack', name='pre-vmm')
        ap = resource.ApplicationProfile(tenant_name='tn1', name='ap')
        pre_epg1 = resource.EndpointGroup(
            tenant_name='tn1', app_profile_name='ap', name='epg1')
        pre_epg2 = resource.EndpointGroup(
            tenant_name='tn1', app_profile_name='ap', name='epg2')
        self.mgr.create(self.ctx, resource.Tenant(name='tn1'))
        self.mgr.create(self.ctx, ap)
        self.mgr.create(self.ctx, pre_phys)
        self.mgr.create(self.ctx, pre_vmm)
        self.mgr.create(self.ctx, pre_epg2)
        self.mgr.create(self.ctx, pre_epg1)
        self.run_command('manager load-domains --no-mappings')
        # Verify pre-existing domains are still there
        self.assertIsNotNone(self.mgr.get(self.ctx, pre_phys))
        self.assertIsNotNone(self.mgr.get(self.ctx, pre_vmm))
        # Also the Domains defined in the config files exist
        self.assertIsNotNone(
            self.mgr.get(self.ctx, resource.PhysicalDomain(name='phys')))
        self.assertIsNotNone(
            self.mgr.get(self.ctx, resource.PhysicalDomain(name='phys2')))
        self.assertIsNotNone(
            self.mgr.get(self.ctx, resource.VMMDomain(type='OpenStack',
                                                      name='ostack')))
        self.assertIsNotNone(
            self.mgr.get(self.ctx, resource.VMMDomain(type='OpenStack',
                                                      name='ostack2')))
        self.assertIsNotNone(
            self.mgr.get(self.ctx, resource.VMMDomain(type='VMware',
                                                      name='vmware')))
        self.assertIsNotNone(
            self.mgr.get(self.ctx, resource.VMMDomain(type='VMware',
                                                      name='vmware2')))
        # EPGs are still empty
        pre_epg1 = self.mgr.get(self.ctx, pre_epg1)
        pre_epg2 = self.mgr.get(self.ctx, pre_epg2)

        self.assertEqual([], pre_epg1.vmm_domains)
        self.assertEqual([], pre_epg1.physical_domains)
        self.assertEqual([], pre_epg2.vmm_domains)
        self.assertEqual([], pre_epg2.physical_domains)

        # Delete one of them, and use the replace flag
        self.mgr.delete(self.ctx, resource.VMMDomain(type='OpenStack',
                                                     name='ostack2'))
        self.run_command('manager load-domains --replace --no-mappings')

        # Now only 2 Domains each exist
        self.assertEqual(4, len(self.mgr.find(self.ctx, resource.VMMDomain)))
        self.assertEqual(2, len(self.mgr.find(self.ctx,
                                              resource.PhysicalDomain)))

        # EPGs are still empty
        pre_epg1 = self.mgr.get(self.ctx, pre_epg1)
        pre_epg2 = self.mgr.get(self.ctx, pre_epg2)

        self.assertEqual([], pre_epg1.vmm_domains)
        self.assertEqual([], pre_epg1.physical_domains)
        self.assertEqual([], pre_epg2.vmm_domains)
        self.assertEqual([], pre_epg2.physical_domains)

        # now update the current environment
        cmd = 'manager load-domains --replace --enforce --no-mappings'
        self.run_command(cmd)
        pre_epg1 = self.mgr.get(self.ctx, pre_epg1)
        pre_epg2 = self.mgr.get(self.ctx, pre_epg2)

        def get_vmm(type, name):
            return {'type': type, 'name': name}

        def get_phys(name):
            return {'name': name}

        self.assertEqual(sorted([get_vmm('OpenStack', 'ostack'),
                                 get_vmm('OpenStack', 'ostack2'),
                                 get_vmm('VMware', 'vmware'),
                                 get_vmm('VMware', 'vmware2')]),
                         sorted(pre_epg1.vmm_domains))
        self.assertEqual(sorted([get_phys('phys'),
                                 get_phys('phys2')]),
                         sorted(pre_epg1.physical_domains))
        self.assertEqual(sorted([get_vmm('OpenStack', 'ostack'),
                                 get_vmm('OpenStack', 'ostack2'),
                                 get_vmm('VMware', 'vmware'),
                                 get_vmm('VMware', 'vmware2')]),
                         sorted(pre_epg2.vmm_domains))
        self.assertEqual(sorted([get_phys('phys'),
                                 get_phys('phys2')]),
                         sorted(pre_epg2.physical_domains))

        # re-run the command, but populate the  domain mappings
        self.run_command('manager load-domains --replace --enforce')

        pre_epg1 = self.mgr.get(self.ctx, pre_epg1)
        pre_epg2 = self.mgr.get(self.ctx, pre_epg2)

        def get_vmm(type, name):
            return {'type': type, 'name': name}

        def get_phys(name):
            return {'name': name}

        # The load-domains should creat host domain mappings with
        # wildcard entries for every entry in the configuration file
        existing_mappings = [{'domain_type': 'PhysDom',
                              'host_name': '*',
                              'domain_name': 'phys'},
                             {'domain_type': 'PhysDom',
                              'host_name': '*',
                              'domain_name': 'phys2'},
                             {'domain_type': 'OpenStack',
                              'host_name': '*',
                              'domain_name': 'ostack'},
                             {'domain_type': 'OpenStack',
                              'host_name': '*',
                              'domain_name': 'ostack'},
                             {'domain_type': 'VMware',
                              'host_name': '*',
                              'domain_name': 'vmware'},
                             {'domain_type': 'VMware',
                              'host_name': '*',
                              'domain_name': 'vmware2'}]
        for mapping in existing_mappings:
            mapping = infra.HostDomainMappingV2(
                host_name=mapping['host_name'],
                domain_name=mapping['domain_name'],
                domain_type=mapping['domain_type'])
            try:
                self.assertIsNotNone(self.mgr.get(self.ctx, mapping))
            except Exception:
                self.assertFalse(True)

        self.assertEqual(sorted([get_vmm('OpenStack', 'ostack'),
                                 get_vmm('OpenStack', 'ostack2'),
                                 get_vmm('VMware', 'vmware'),
                                 get_vmm('VMware', 'vmware2')]),
                         sorted(pre_epg1.vmm_domains))
        self.assertEqual(sorted([get_phys('phys'),
                                 get_phys('phys2')]),
                         sorted(pre_epg1.physical_domains))
        self.assertEqual(sorted([get_vmm('OpenStack', 'ostack'),
                                 get_vmm('OpenStack', 'ostack2'),
                                 get_vmm('VMware', 'vmware'),
                                 get_vmm('VMware', 'vmware2')]),
                         sorted(pre_epg2.vmm_domains))
        self.assertEqual(sorted([get_phys('phys'),
                                 get_phys('phys2')]),
                         sorted(pre_epg2.physical_domains))

        # re-run the command, with host-specific domain mappings populated.
        # This should cause an exception
        self.mgr.create(self.ctx, infra.HostDomainMappingV2(
            host_name='host1',
            domain_name='ostack10',
            domain_type='OpenStack'))
        self.run_command('manager load-domains --enforce', raises=True)
Пример #12
0
 def _get_example_aim_app_profile(cls, **kwargs):
     example = resource.ApplicationProfile(
         tenant_name='test-tenant', name='test')
     example.__dict__.update(kwargs)
     return example