def test_delete_ext_net_with_vrf(self):
        l3out = a_res.L3Outside(tenant_name='t1', name='o1',
                                display_name='OUT')
        ext_net = a_res.ExternalNetwork(
            tenant_name='t1', l3out_name='o1', name='inet1',
            display_name='INET1')
        self.mgr.create(self.ctx, a_res.Tenant(name=self.vrf1_tenant_name))
        self.mgr.create(self.ctx, a_res.Tenant(name=self.vrf2_tenant_name))
        self.ns.create_l3outside(self.ctx, l3out)
        self.ns.create_external_network(self.ctx, ext_net)
        self.ns.update_external_cidrs(self. ctx, ext_net,
                                      ['20.20.20.0/24', '50.50.0.0/16'])

        # Connect vrf1 & vrf2 to ext_net with external-subnet
        vrf1 = a_res.VRF(tenant_name=self.vrf1_tenant_name, name='vrf1',
                         display_name='VRF1')
        self.mgr.create(self.ctx, vrf1)
        if self.fix_l3out_vrf:
            self.mgr.update(self.ctx, l3out, vrf_name=vrf1.name)
        ext_net.provided_contract_names = ['p1_vrf1', 'p2_vrf1']
        ext_net.consumed_contract_names = ['c1_vrf1', 'c2_vrf1']
        self.ns.connect_vrf(self.ctx, ext_net, vrf1)

        vrf2 = a_res.VRF(tenant_name=self.vrf2_tenant_name, name='vrf2',
                         display_name='VRF2')
        self.mgr.create(self.ctx, vrf2)
        if self.fix_l3out_vrf:
            self.mgr.update(self.ctx, l3out, vrf_name=vrf2.name)
        ext_net.provided_contract_names = ['p1_vrf2', 'p2_vrf2']
        ext_net.consumed_contract_names = ['c1_vrf2', 'c2_vrf2']
        self.ns.connect_vrf(self.ctx, ext_net, vrf2)
        self._check_delete_ext_net_with_vrf('stage1')

        self.ns.delete_external_network(self.ctx, ext_net)
        self._check_delete_ext_net_with_vrf('stage2')
示例#2
0
    def test_cleanup_state(self, tree_type=tree_manager.CONFIG_TREE):
        tree_mgr = tree_manager.HashTreeManager()
        aim_mgr = aim_manager.AimManager()
        aim_mgr.create(self.ctx, resource.Tenant(name='t1'))
        bd1 = resource.BridgeDomain(tenant_name='t1',
                                    name='bd1',
                                    display_name='somestuff',
                                    vrf_name='vrf')
        bd1_fault = aim_status.AciFault(
            fault_code='901',
            external_identifier='uni/tn-t1/BD-bd1/fault-901',
            description='failure901')

        aim_mgr.create(self.ctx, bd1)
        aim_mgr.set_fault(self.ctx, bd1, bd1_fault)
        self.assertRaises(Exception, self.universe.cleanup_state, 'tn-t1')

        trees = tree_mgr.find(self.ctx, tree=tree_type)
        # tenant still there, trees not empty.
        self.assertEqual(1, len(trees))
        aim_mgr.clear_fault(self.ctx, bd1_fault)
        aim_mgr.delete(self.ctx, resource.Tenant(name='t1'), cascade=True)
        self.universe.cleanup_state(self.ctx, 'tn-t1')
        trees = tree_mgr.find(self.ctx, tree=tree_type)
        self.assertEqual(0, len(trees))
 def test_delete_all_trees(self):
     self.mgr.create(self.ctx, aim_res.Tenant(name='common'))
     self.mgr.create(self.ctx, aim_res.Tenant(name='tn1'))
     self.mgr.create(self.ctx, aim_res.Tenant(name='tn2'))
     self.assertTrue(len(self.tt_mgr.find(self.ctx)) > 0)
     self.tt_mgr.delete_all(self.ctx)
     self.assertEqual(0, len(self.tt_mgr.find(self.ctx)))
    def test_connect_vrfs(self):
        l3out = a_res.L3Outside(tenant_name='t1', name='o1',
                                display_name='OUT')
        ext_net = a_res.ExternalNetwork(
            tenant_name='t1', l3out_name='o1', name='inet1',
            display_name='INET1')
        self.mgr.create(self.ctx, a_res.Tenant(name=self.vrf1_tenant_name))
        self.mgr.create(self.ctx, a_res.Tenant(name=self.vrf2_tenant_name))
        self.ns.create_l3outside(self.ctx, l3out)
        self.ns.create_external_network(self.ctx, ext_net)
        self.ns.update_external_cidrs(self.ctx, ext_net,
                                      ['20.20.20.0/24', '50.50.0.0/16'])

        # connect vrf_1
        vrf1 = a_res.VRF(tenant_name=self.vrf1_tenant_name, name='vrf1',
                         display_name='VRF1')
        if self.vrf1_tenant_name != self.bd1_tenant_name:
            self.mgr.create(self.ctx, a_res.Tenant(name='dept1'))
        if self.fix_l3out_vrf:
            self.mgr.update(self.ctx, l3out, vrf_name=vrf1.name)
        bd1 = a_res.BridgeDomain(tenant_name=self.bd1_tenant_name, name='bd1',
                                 limit_ip_learn_to_subnets=True,
                                 vrf_name='vrf1')
        self.mgr.create(self.ctx, vrf1)
        self.mgr.create(self.ctx, bd1)
        ext_net.provided_contract_names = ['p1_vrf1', 'p2_vrf1']
        ext_net.consumed_contract_names = ['c1_vrf1', 'c2_vrf1']
        self.ns.connect_vrf(self.ctx, ext_net, vrf1)
        connected_vrfs = self.ns.read_vrfs(self.ctx, ext_net)
        self.assertEqual(vrf1, connected_vrfs[0])
        self._check_connect_vrfs('stage1')

        # connect vrf_1 again - should be no-op
        self.ns.connect_vrf(self.ctx, ext_net, vrf1)
        self._check_connect_vrfs('stage1')

        # connect vrf_2
        vrf2 = a_res.VRF(tenant_name=self.vrf2_tenant_name, name='vrf2',
                         display_name='VRF2')
        bd2 = a_res.BridgeDomain(tenant_name=self.vrf2_tenant_name, name='bd2',
                                 limit_ip_learn_to_subnets=True,
                                 vrf_name='vrf2')
        self.mgr.create(self.ctx, vrf2)
        self.mgr.create(self.ctx, bd2)
        ext_net.provided_contract_names = ['p1_vrf2', 'p2_vrf2']
        ext_net.consumed_contract_names = ['c1_vrf2', 'c2_vrf2']
        if self.fix_l3out_vrf:
            self.mgr.update(self.ctx, l3out, vrf_name=vrf2.name)
        self.ns.connect_vrf(self.ctx, ext_net, vrf2)
        self._check_connect_vrfs('stage2')

        # disconnect vrf_1
        self.ns.disconnect_vrf(self.ctx, ext_net, vrf1)
        self._check_connect_vrfs('stage3')

        # disconnect vrf_2
        self.ns.disconnect_vrf(self.ctx, ext_net, vrf2)
        self._check_connect_vrfs('stage4')
    def test_external_network_pre(self):
        self.mgr.create(self.ctx, a_res.Tenant(name='t1'))
        l3out = a_res.L3Outside(tenant_name='t1', name='o1',
                                display_name='OUT',
                                monitored=True)
        self.mgr.create(self.ctx, l3out)
        self.ns.create_l3outside(self.ctx, l3out)

        ext_net = a_res.ExternalNetwork(
            tenant_name='t1', l3out_name='o1', name='inet1',
            display_name='INET1',
            monitored=True,
            provided_contract_names=['foo'],
            consumed_contract_names=['bar'])
        self.mgr.create(self.ctx, ext_net)

        self.ns.create_external_network(self.ctx, ext_net)
        ext_net.provided_contract_names.append('EXT-o1')
        ext_net.consumed_contract_names.append('EXT-o1')
        self._verify(present=[ext_net])

        self.ns.delete_external_network(self.ctx, ext_net)
        ext_net.provided_contract_names = ['foo']
        ext_net.consumed_contract_names = ['bar']
        self._verify(present=[ext_net])
    def test_vrf_contract_update(self):
        l3out = a_res.L3Outside(tenant_name='t1', name='o1',
                                display_name='OUT')
        ext_net = a_res.ExternalNetwork(
            tenant_name='t1', l3out_name='o1', name='inet1',
            display_name='INET1')
        self.mgr.create(self.ctx, a_res.Tenant(name=self.vrf1_tenant_name))
        self.ns.create_l3outside(self.ctx, l3out)
        self.ns.create_external_network(self.ctx, ext_net)

        vrf1 = a_res.VRF(tenant_name=self.vrf1_tenant_name, name='vrf1',
                         display_name='VRF1')
        self.mgr.create(self.ctx, vrf1)
        if self.fix_l3out_vrf:
            self.mgr.update(self.ctx, l3out, vrf_name=vrf1.name)
        ext_net.provided_contract_names = ['p1_vrf1', 'p2_vrf1']
        ext_net.consumed_contract_names = ['c1_vrf1', 'c2_vrf1']

        self.ns.connect_vrf(self.ctx, ext_net, vrf1)
        self._check_vrf_contract_update('stage1')

        # update contracts
        ext_net.provided_contract_names = ['arp', 'p2_vrf1']
        ext_net.consumed_contract_names = ['arp', 'c2_vrf1']
        self.ns.connect_vrf(self.ctx, ext_net, vrf1)
        self._check_vrf_contract_update('stage2')

        # unset contracts
        ext_net.provided_contract_names = []
        ext_net.consumed_contract_names = []
        self.ns.connect_vrf(self.ctx, ext_net, vrf1)
        self._check_vrf_contract_update('stage3')
    def test_external_subnet_update(self):
        l3out = a_res.L3Outside(tenant_name='t1', name='o1',
                                display_name='OUT')
        ext_net = a_res.ExternalNetwork(
            tenant_name='t1', l3out_name='o1', name='inet1',
            display_name='INET1')
        self.mgr.create(self.ctx, a_res.Tenant(name=self.vrf1_tenant_name))
        self.ns.create_l3outside(self.ctx, l3out)
        self.ns.create_external_network(self.ctx, ext_net)
        self.ns.update_external_cidrs(self.ctx, ext_net,
                                      ['20.20.20.0/24', '50.50.0.0/16'])

        # Connect vrf1 to ext_net
        vrf1 = a_res.VRF(tenant_name=self.vrf1_tenant_name, name='vrf1',
                         display_name='VRF1')
        self.mgr.create(self.ctx, vrf1)
        if self.fix_l3out_vrf:
            self.mgr.update(self.ctx, l3out, vrf_name=vrf1.name)
        ext_net.provided_contract_names = ['p1_vrf1', 'p2_vrf1']
        ext_net.consumed_contract_names = ['c1_vrf1', 'c2_vrf1']
        self.ns.connect_vrf(self.ctx, ext_net, vrf1)
        self._check_external_subnet_update("stage1")

        # Add & remove external-subnet
        self.ns.update_external_cidrs(self.ctx, ext_net,
                                      ['100.200.0.0/28', '50.50.0.0/16'])
        self._check_external_subnet_update("stage2")

        # Remove all external-subnets
        self.ns.update_external_cidrs(self.ctx, ext_net, [])
        self._check_external_subnet_update("stage3")
    def test_bd_l3out_vrf_in_tenant(self):
        self.mgr.create(self.ctx, a_res.Tenant(name='dept1'))
        vrf = a_res.VRF(tenant_name='dept1', name='default')
        bd1_dept1 = a_res.BridgeDomain(tenant_name='dept1', name='bd1',
                                       limit_ip_learn_to_subnets=True,
                                       vrf_name='default')
        bd2_dept1 = a_res.BridgeDomain(tenant_name='dept1', name='bd2',
                                       limit_ip_learn_to_subnets=True,
                                       vrf_name='foo')
        for o in [vrf, bd1_dept1, bd2_dept1]:
            self.mgr.create(self.ctx, o)

        l3out = a_res.L3Outside(tenant_name='dept1', name='o1')
        ext_net = a_res.ExternalNetwork(
            tenant_name='dept1', l3out_name='o1', name='inet1')
        self.ns.create_l3outside(self.ctx, l3out)
        self.ns.create_external_network(self.ctx, ext_net)
        self.mgr.update(self.ctx, l3out, vrf_name='default')

        self._verify(present=[bd1_dept1, bd2_dept1])

        self.ns.connect_vrf(self.ctx, ext_net, vrf)
        bd1_dept1.l3out_names = ['o1']
        self._verify(present=[bd1_dept1, bd2_dept1])

        self.ns.disconnect_vrf(self.ctx, ext_net, vrf)
        bd1_dept1.l3out_names = []
        self._verify(present=[bd1_dept1, bd2_dept1])
    def test_sync_state_find(self):
        # Create 2 APs and 2 BDs for each state
        tn = self.mgr.create(self.ctx, resource.Tenant(name='tn1'))
        self.mgr.set_resource_sync_synced(self.ctx, tn)
        expected = {'error': set(), 'synced': set(), 'pending': set()}
        expected['synced'].add(('tenant', 'tn1'))
        for state, f in [('error', self.mgr.set_resource_sync_error),
                         ('synced', self.mgr.set_resource_sync_synced),
                         ('pending', self.mgr.set_resource_sync_pending)]:
            for i in range(2):
                name = '%s_%s' % (state, i)
                for res, nice in [(resource.VRF, 'vrf'),
                                  (resource.BridgeDomain, 'bridge-domain')]:
                    item = self.mgr.create(self.ctx,
                                           res(tenant_name='tn1', name=name))
                    f(self.ctx, item)
                    expected[state].add((nice, 'tn1,%s' % name))

        for state in ['error', 'synced', 'pending']:
            result = self.run_command('manager sync-state-find -p -s %s' %
                                      state)
            parsed = self._parse_sync_find_output(result)
            if state is 'synced':
                self.assertEqual(5, len(parsed))
            else:
                self.assertEqual(4, len(parsed))
            self.assertEqual(expected[state], set(parsed))
示例#10
0
 def _get_common_tenant(self, aim_ctx):
     attrs = aim_resource.Tenant(name=COMMON_TENANT_NAME,
                                 display_name='Common Tenant')
     tenant = self.aim.get(aim_ctx, attrs)
     if not tenant:
         LOG.info(_LI("Creating common tenant"))
         tenant = self.aim.create(aim_ctx, attrs)
     return tenant
 def test_delete_status(self):
     tn = self.mgr.create(self.ctx,
                          resource.Tenant(name='test_delete_status'))
     st = self.mgr.get_status(self.ctx, tn)
     self.assertIsNotNone(self.mgr.get(self.ctx, st))
     db_obj = self.mgr._query_db_obj(self.ctx.store, tn)
     self.ctx.store.delete(db_obj)
     self.assertIsNone(self.mgr.get(self.ctx, st))
    def test_tree_hooks_transactions(self):
        with mock.patch('aim.agent.aid.event_services.'
                        'rpc.AIDEventRpcApi._cast') as cast:
            tn = aim_res.Tenant(name='test_tree_hooks')
            ap = aim_res.ApplicationProfile(tenant_name='test_tree_hooks',
                                            name='ap')
            epg = aim_res.EndpointGroup(tenant_name='test_tree_hooks',
                                        app_profile_name='ap',
                                        name='epg',
                                        bd_name='some')

            tn1 = aim_res.Tenant(name='test_tree_hooks1')
            ap1 = aim_res.ApplicationProfile(tenant_name='test_tree_hooks1',
                                             name='ap')
            epg1 = aim_res.EndpointGroup(tenant_name='test_tree_hooks1',
                                         app_profile_name='ap',
                                         name='epg',
                                         bd_name='some')

            # This transaction will generate some action logs, which
            # will trigger a 'reconcile' event.
            with self.ctx.store.begin(subtransactions=True):
                with self.ctx.store.begin(subtransactions=True):
                    self.mgr.create(self.ctx, tn)
                    self.mgr.create(self.ctx, ap)
                    self.mgr.create(self.ctx, epg)
                self.assertEqual(0, cast.call_count)
                with self.ctx.store.begin(subtransactions=True):
                    self.mgr.create(self.ctx, tn1)
                    self.mgr.create(self.ctx, ap1)
                    self.mgr.create(self.ctx, epg1)
                self.assertEqual(0, cast.call_count)
            exp_calls = [mock.call(mock.ANY, 'reconcile', None)]
            self._check_call_list(exp_calls, cast)
            cast.reset_mock()

            # There are 2 tenants so 2 transactions will be involved here,
            # each transaction will update the trees so 2 'serve' events
            # will be generated.
            self.db_l.catch_up_with_action_log(self.ctx.store)
            exp_calls = [
                mock.call(mock.ANY, 'serve', None),
                mock.call(mock.ANY, 'serve', None)
            ]
            self._check_call_list(exp_calls, cast)
    def test_monitored_state_change(self):
        tn_name = 'test_monitored_state_change'
        tn_rn = 'tn-' + tn_name
        tn = aim_res.Tenant(name=tn_name, monitored=True)
        ap = aim_res.ApplicationProfile(tenant_name=tn_name, name='ap',
                                        monitored=True)
        epg = aim_res.EndpointGroup(
            tenant_name=tn_name, app_profile_name='ap', name='epg',
            bd_name='some', monitored=True)
        self.mgr.create(self.ctx, tn)
        self.mgr.create(self.ctx, ap)
        self.mgr.create(self.ctx, epg)
        cfg_tree = self.tt_mgr.get(self.ctx, tn_rn,
                                   tree=tree_manager.CONFIG_TREE)
        mon_tree = self.tt_mgr.get(self.ctx, tn_rn,
                                   tree=tree_manager.MONITORED_TREE)
        # Create my own tree representation
        my_cfg_tree = tree.StructuredHashTree()
        my_mon_tree = tree.StructuredHashTree()
        self.db_l.tt_maker.update(my_mon_tree, [tn])
        # Succeed their creation
        self.mgr.set_resource_sync_synced(self.ctx, ap)
        self.mgr.set_resource_sync_synced(self.ctx, epg)
        self.db_l.tt_maker.update(my_mon_tree, [ap, epg])
        cfg_tree = self.tt_mgr.get(self.ctx, tn_rn,
                                   tree=tree_manager.CONFIG_TREE)
        mon_tree = self.tt_mgr.get(self.ctx, tn_rn,
                                   tree=tree_manager.MONITORED_TREE)
        self.assertEqual(my_mon_tree, mon_tree)
        self.assertEqual(my_cfg_tree, cfg_tree)

        # Change ownership of the AP
        self.mgr.update(self.ctx, ap, monitored=False)
        my_mon_tree = tree.StructuredHashTree()
        # This is equivalent of adding only tenant and epg to the conf tree
        self.db_l.tt_maker.update(my_mon_tree, [tn, epg])
        self.db_l.tt_maker.update(my_cfg_tree, [ap])
        # Refresh trees
        cfg_tree = self.tt_mgr.get(self.ctx, tn_rn,
                                   tree=tree_manager.CONFIG_TREE)
        mon_tree = self.tt_mgr.get(self.ctx, tn_rn,
                                   tree=tree_manager.MONITORED_TREE)
        self.assertEqual(my_mon_tree, mon_tree,
                         'differences: %s' % my_mon_tree.diff(mon_tree))
        self.assertEqual(my_cfg_tree, cfg_tree)
        # Unset monitored to EPG as well
        self.mgr.update(self.ctx, epg, monitored=False)
        my_mon_tree = tree.StructuredHashTree()
        self.db_l.tt_maker.update(my_mon_tree, [tn])
        self.db_l.tt_maker.update(my_cfg_tree, [epg])
        # Refresh trees
        cfg_tree = self.tt_mgr.get(self.ctx, tn_rn,
                                   tree=tree_manager.CONFIG_TREE)
        mon_tree = self.tt_mgr.get(self.ctx, tn_rn,
                                   tree=tree_manager.MONITORED_TREE)
        self.assertEqual(my_mon_tree, mon_tree)
        self.assertEqual(my_cfg_tree, cfg_tree)
 def _get_tenant(self, tenant_name, should_exist=True):
     session = db_api.get_session()
     aim_ctx = aim_context.AimContext(session)
     tenant = aim_resource.Tenant(name=tenant_name)
     tenant = self.aim_mgr.get(aim_ctx, tenant)
     if should_exist:
         self.assertIsNotNone(tenant)
     else:
         self.assertIsNone(tenant)
     return tenant
示例#15
0
    def test_get_resources_for_delete(self):
        objs = [
            {'fvBD': {'attributes': {
                'dn': 'uni/tn-t1/BD-test'}}},
            {'vzRsSubjFiltAtt': {'attributes': {
                'dn': 'uni/tn-t1/brc-c/subj-s/rssubjFiltAtt-f'}}},
            {'vzRsFiltAtt': {'attributes': {
                'dn': 'uni/tn-t1/brc-c/subj-s/intmnl/rsfiltAtt-g'}}},
            {'vzRsFiltAtt': {'attributes': {
                'dn': 'uni/tn-t1/brc-c/subj-s/outtmnl/rsfiltAtt-h'}}}]
        keys = [('fvTenant|t1', 'fvBD|test'),
                ('fvTenant|t1', 'vzBrCP|c', 'vzSubj|s',
                 'vzRsSubjFiltAtt|f'),
                ('fvTenant|t1', 'vzBrCP|c', 'vzSubj|s',
                 'vzInTerm|intmnl', 'vzRsFiltAtt|g'),
                ('fvTenant|t1', 'vzBrCP|c', 'vzSubj|s',
                 'vzOutTerm|outtmnl', 'vzRsFiltAtt|h')]
        result = self.universe.get_resources_for_delete(keys)
        self.assertEqual(utils.deep_sort(objs), utils.deep_sort(result))
        # Create a pending monitored object
        tn1 = resource.Tenant(name='tn1', monitored=True)
        monitored_bd = resource.BridgeDomain(
            tenant_name='tn1', name='monitoredBD', monitored=True)
        self.universe.manager.create(self.ctx, tn1)
        self.universe.manager.set_resource_sync_pending(self.ctx, tn1)
        self.universe.manager.create(self.ctx, monitored_bd)
        self.universe.manager.set_resource_sync_pending(self.ctx, monitored_bd)

        self.universe.multiverse = []
        result = self.universe.get_resources_for_delete(
            [('fvTenant|tn1', 'fvBD|monitoredBD')])
        self.assertEqual(1, len(result))
        result = result[0]
        self.assertEqual('tagInst', result.keys()[0])
        self.assertEqual('uni/tn-tn1/BD-monitoredBD/tag-openstack_aid',
                         list(result.values())[0]['attributes']['dn'])

        # Delete an RS-node of a monitored object
        self.universe.manager.create(self.ctx, resource.L3Outside(
            tenant_name='tn1', name='out', monitored=True))
        ext_net = self.universe.manager.create(
            self.ctx,
            resource.ExternalNetwork(tenant_name='tn1', l3out_name='out',
                                     name='inet',
                                     provided_contract_names=['p1'],
                                     monitored=True))
        self.universe.manager.set_resource_sync_synced(self.ctx, ext_net)
        result = self.universe.get_resources_for_delete(
            [('fvTenant|tn1', 'l3extOut|out', 'l3extInstP|inet',
              'fvRsProv|p1')])
        self.assertEqual(1, len(result))
        result = result[0]
        self.assertEqual('fvRsProv', result.keys()[0])
        self.assertEqual('uni/tn-tn1/out-out/instP-inet/rsprov-p1',
                         list(result.values())[0]['attributes']['dn'])
 def test_tree_hooks(self):
     with mock.patch('aim.agent.aid.event_services.'
                     'rpc.AIDEventRpcApi._cast') as cast:
         tn_name = 'test_tree_hooks'
         tn_rn = 'tn-' + tn_name
         tn = aim_res.Tenant(name='test_tree_hooks_2')
         ap = aim_res.ApplicationProfile(tenant_name=tn_name, name='ap')
         epg = aim_res.EndpointGroup(tenant_name=tn_name,
                                     app_profile_name='ap',
                                     name='epg',
                                     bd_name='some')
         # Add Tenant and AP
         self.mgr.create(self.ctx, aim_res.Tenant(name=tn_name))
         # Creating a tenant also cause a log to be created, and
         # consequently a reconcile call
         exp_calls = [
             mock.call(mock.ANY, 'serve', None),
             mock.call(mock.ANY, 'reconcile', None)
         ]
         self._check_call_list(exp_calls, cast)
         self.mgr.create(self.ctx, tn)
         cast.reset_mock()
         self.mgr.create(self.ctx, ap)
         self.mgr.create(self.ctx, epg)
         # Create AP will create tenant, create EPG will modify it
         exp_calls = [
             mock.call(mock.ANY, 'reconcile', None),
             mock.call(mock.ANY, 'reconcile', None),
             mock.call(mock.ANY, 'reconcile', None),
             mock.call(mock.ANY, 'reconcile', None)
         ]
         self._check_call_list(exp_calls, cast)
         cast.reset_mock()
         self.mgr.update(self.ctx, epg, bd_name='bd2')
         exp_calls = [
             mock.call(mock.ANY, 'reconcile', None),
             mock.call(mock.ANY, 'reconcile', None)
         ]
         self._check_call_list(exp_calls, cast)
         cast.reset_mock()
         self.tt_mgr.delete_by_root_rn(self.ctx, tn_rn)
         cast.assert_called_once_with(mock.ANY, 'serve', None)
 def test_squash_operations_no_key(self):
     aim_converter = converter.AimToAciModelConverter()
     tn = a_res.Tenant(name='tn1', display_name='foo')
     bd = a_res.BridgeDomain(tenant_name='tn1',
                             name='bd1',
                             display_name='bar')
     vrf = a_res.VRF(tenant_name='tn1', name='vrf1', display_name='pippo')
     self.manager.push_aim_resources({'create': [tn, bd]})
     self.manager.push_aim_resources(
         {'delete': aim_converter.convert([vrf])})
     self.assertEqual(2, len(self.manager.object_backlog.queue))
    def test_tree_hooks_transactions(self):
        with mock.patch('aim.agent.aid.event_services.'
                        'rpc.AIDEventRpcApi._cast') as cast:
            tn = aim_res.Tenant(name='test_tree_hooks')
            ap = aim_res.ApplicationProfile(tenant_name='test_tree_hooks',
                                            name='ap')
            epg = aim_res.EndpointGroup(tenant_name='test_tree_hooks',
                                        app_profile_name='ap',
                                        name='epg',
                                        bd_name='some')

            tn1 = aim_res.Tenant(name='test_tree_hooks1')
            ap1 = aim_res.ApplicationProfile(tenant_name='test_tree_hooks1',
                                             name='ap')
            epg1 = aim_res.EndpointGroup(tenant_name='test_tree_hooks1',
                                         app_profile_name='ap',
                                         name='epg',
                                         bd_name='some')
            # Try a transaction
            with self.ctx.store.begin(subtransactions=True):
                with self.ctx.store.begin(subtransactions=True):
                    self.mgr.create(self.ctx, tn)
                    self.mgr.create(self.ctx, ap)
                    self.mgr.create(self.ctx, epg)
                self.assertEqual(0, cast.call_count)
                with self.ctx.store.begin(subtransactions=True):
                    self.mgr.create(self.ctx, tn1)
                    self.mgr.create(self.ctx, ap1)
                    self.mgr.create(self.ctx, epg1)
                self.assertEqual(0, cast.call_count)
            # Trees are now saved one at a time, so serve is called twice
            exp_calls = [
                mock.call(mock.ANY, 'serve', None),
                mock.call(mock.ANY, 'serve', None),
                mock.call(mock.ANY, 'reconcile', None)
            ]
            self._check_call_list(exp_calls, cast)
 def test_squash_operations(self):
     # Craft some objects and push them
     aim_converter = converter.AimToAciModelConverter()
     tn = a_res.Tenant(name='tn1', display_name='foo')
     bd = a_res.BridgeDomain(tenant_name='tn1',
                             name='bd1',
                             display_name='bar')
     vrf = a_res.VRF(tenant_name='tn1', name='vrf1', display_name='pippo')
     self.manager.push_aim_resources({
         'create': [tn, bd],
         'delete': aim_converter.convert([vrf])
     })
     self.assertEqual(1, len(self.manager.object_backlog.queue))
     old = self.manager.object_backlog.queue[0]
     # Idempotent
     self.manager.push_aim_resources({
         'create': [tn, bd],
         'delete': aim_converter.convert([vrf])
     })
     self.assertEqual(1, len(self.manager.object_backlog.queue))
     curr = self.manager.object_backlog.queue[0]
     self.assertEqual(old, curr)
     # Now replace something
     bd2 = a_res.BridgeDomain(tenant_name='tn1',
                              name='bd2',
                              display_name='bar')
     bd = copy.deepcopy(bd)
     bd.display_name = 'foobar'
     self.manager.push_aim_resources({'create': [bd2, bd], 'delete': []})
     self.assertEqual(2, len(self.manager.object_backlog.queue))
     self.assertEqual({
         'create': [bd2],
         'delete': []
     }, self.manager.object_backlog.queue[1])
     self.assertEqual(
         'foobar',
         self.manager.object_backlog.queue[0]['create'][1].display_name)
     # Add something completely different
     vrf2 = a_res.VRF(tenant_name='tn1', name='vrf2', display_name='pippo')
     self.manager.push_aim_resources({
         'create': [vrf2],
         'delete': aim_converter.convert([bd])
     })
     self.assertEqual(
         {
             'create': [vrf2],
             'delete': aim_converter.convert([bd])
         }, self.manager.object_backlog.queue[2])
示例#20
0
def upgrade(ctx, version):
    """Used for upgrading database."""
    version = version or 'head'
    ctx.obj['manager'].upgrade(version)

    # create common tenant
    aim_ctx = context.AimContext(store=api.get_store(expire_on_commit=True))
    aim_mgr = aim_manager.AimManager()
    common_tenant = resource.Tenant(name='common', monitored=True)
    if not aim_mgr.get(aim_ctx, common_tenant):
        aim_mgr.create(aim_ctx, common_tenant)

    fix_no_nat_l3out_ownership(aim_ctx)

    click.echo('Rebuilding hash-trees')
    _reset(aim_mgr)
示例#21
0
    def test_sync_state_recover(self):
        # Create 2 APs and 2 BDs for each state
        tn = self.mgr.create(self.ctx, resource.Tenant(name='tn1'))
        self.mgr.set_resource_sync_synced(self.ctx, tn)
        items = []
        for i in range(2):
            name = 'error_%s' % i
            for res in [resource.VRF, resource.BridgeDomain]:
                item = self.mgr.create(self.ctx, res(tenant_name='tn1',
                                                     name=name))
                self.mgr.set_resource_sync_error(self.ctx, item)
                items.append(item)

        with mock.patch('aim.aim_manager.AimManager.update') as up:
            self.run_command('manager sync-state-recover')
            # Items are updated
            self.assertEqual(4, up.call_count)
    def _create_l3out(self, ctx, l3out):
        """Create NAT EPG etc. in addition to creating L3Out."""

        with ctx.store.begin(subtransactions=True):
            tenant = resource.Tenant(name=l3out.tenant_name)
            if not self.mgr.get(ctx, tenant):
                self.mgr.create(ctx, tenant)
            l3out_db = self.mgr.get(ctx, l3out)
            if not l3out_db:
                ext_vrf = self._get_nat_vrf(ctx, l3out)
                if not self.mgr.get(ctx, ext_vrf):
                    self.mgr.create(ctx, ext_vrf)
                l3out_db = copy.copy(l3out)
                l3out_db.vrf_name = ext_vrf.name
                l3out_db = self.mgr.create(ctx, l3out_db)
            self._create_nat_epg(ctx, l3out_db)
            return l3out_db
示例#23
0
    def test_session_rollback(self):
        aim_mgr = aim_manager.AimManager()
        aim_mgr.create(self.ctx, resource.Tenant(name='t1'))
        ap = self._get_example_aci_app_profile(dn='uni/tn-t1/ap-a1')
        self.universe.context.store.begin = mock.Mock(
            side_effect=db_exc.DBError)
        rollback = self.universe.context.store.db_session.rollback
        self.called_count = 0

        def rollback_count():
            rollback()
            self.called_count += 1

        self.universe.context.store.db_session.rollback = rollback_count
        self.universe.push_resources({'create': [ap], 'delete': []})
        self.assertEqual(1, self.called_count)
        del self.called_count
    def test_connect_vrf_multiple(self):
        l3out1 = a_res.L3Outside(tenant_name='t1', name='o1',
                                 display_name='OUT')
        ext_net1 = a_res.ExternalNetwork(
            tenant_name='t1', l3out_name='o1', name='inet1',
            display_name='INET1')
        self.mgr.create(self.ctx, a_res.Tenant(name=self.vrf1_tenant_name))
        self.ns.create_l3outside(self.ctx, l3out1)
        self.ns.create_external_network(self.ctx, ext_net1)
        self.ns.update_external_cidrs(self. ctx, ext_net1,
                                      ['20.20.20.0/24', '50.50.0.0/16'])

        l3out2 = a_res.L3Outside(tenant_name='t2', name='o2',
                                 display_name='OUT2')
        ext_net2 = a_res.ExternalNetwork(
            tenant_name='t2', l3out_name='o2', name='inet2',
            display_name='INET2')
        self.ns.create_l3outside(self.ctx, l3out2)
        self.ns.create_external_network(self.ctx, ext_net2)
        self.ns.update_external_cidrs(self. ctx, ext_net2,
                                      ['0.0.0.0/0'])

        vrf1 = a_res.VRF(tenant_name=self.vrf1_tenant_name, name='vrf1',
                         display_name='VRF1')
        bd1 = a_res.BridgeDomain(tenant_name=self.vrf1_tenant_name, name='bd1',
                                 limit_ip_learn_to_subnets=True,
                                 vrf_name='vrf1')
        self.mgr.create(self.ctx, vrf1)
        self.mgr.create(self.ctx, bd1)
        if self.fix_l3out_vrf:
            self.mgr.update(self.ctx, l3out1, vrf_name=vrf1.name)
            self.mgr.update(self.ctx, l3out2, vrf_name=vrf1.name)
        ext_net1.provided_contract_names = ['p1_vrf1', 'p2_vrf1']
        ext_net1.consumed_contract_names = ['c1_vrf1', 'c2_vrf1']
        ext_net2.provided_contract_names = ['p3_vrf1', 'p4_vrf1']
        ext_net2.consumed_contract_names = ['c3_vrf1', 'c4_vrf1']
        self.ns.connect_vrf(self.ctx, ext_net1, vrf1)
        self.ns.connect_vrf(self.ctx, ext_net2, vrf1)
        self._check_connect_vrf_multiple('stage1')

        self.ns.disconnect_vrf(self.ctx, ext_net1, vrf1)
        self._check_connect_vrf_multiple('stage2')

        self.ns.disconnect_vrf(self.ctx, ext_net2, vrf1)
        self._check_connect_vrf_multiple('stage3')
def upgrade(ctx, version):
    """Used for upgrading database."""
    version = version or 'head'
    ctx.obj['manager'].upgrade(version)

    # create common tenant
    aim_ctx = context.AimContext(store=api.get_store(expire_on_commit=True))
    aim_mgr = aim_manager.AimManager()
    common_tenant = resource.Tenant(name='common', monitored=True)
    if not aim_mgr.get(aim_ctx, common_tenant):
        aim_mgr.create(aim_ctx, common_tenant)

    fix_no_nat_l3out_ownership(aim_ctx)

    click.echo('Rebuilding hash-trees')
    # reset hash-trees to account for schema/converter changes
    listener = hashtree_db_listener.HashTreeDbListener(aim_mgr)
    listener.reset(aim_ctx.store)
    def test_subject_related_objects(self):
        self.mgr.create(self.ctx, aim_res.Tenant(name='common'))
        self.mgr.create(
            self.ctx, aim_res.Contract(tenant_name='common', name='c-name'))
        subj = aim_res.ContractSubject(
            **{'contract_name': 'c-name',
               'name': 's-name',
               'tenant_name': 'common', 'monitored': False})
        subj = self.mgr.create(self.ctx, subj)

        subj_flt = aim_res.ContractSubjOutFilter(
            **{'contract_name': 'c-name',
               'contract_subject_name': 's-name',
               'tenant_name': 'common',
               'monitored': False,
               'filter_name': 'pr_1'})
        subj_flt = self.mgr.create(self.ctx, subj_flt)

        subj_flt1 = aim_res.ContractSubjInFilter(
            **{'contract_name': 'c-name',
               'contract_subject_name': 's-name',
               'tenant_name': 'common',
               'monitored': False,
               'filter_name': 'pr_1'})
        subj_flt1 = self.mgr.create(self.ctx, subj_flt1)
        cfg_tree = self.tt_mgr.get(self.ctx, 'tn-common',
                                   tree=tree_manager.CONFIG_TREE)
        # verify pr_1 and its reverse are in the tree
        pr_1 = cfg_tree.find(
            ("fvTenant|common", "vzBrCP|c-name", "vzSubj|s-name",
             "vzOutTerm|outtmnl", "vzRsFiltAtt|pr_1"))
        rev_pr_1 = cfg_tree.find(
            ("fvTenant|common", "vzBrCP|c-name", "vzSubj|s-name",
             "vzInTerm|intmnl", "vzRsFiltAtt|pr_1"))
        self.assertIsNotNone(pr_1)
        self.assertIsNotNone(rev_pr_1)

        self.mgr.update(self.ctx, subj_flt1, action='deny')
        cfg_tree = self.tt_mgr.get(self.ctx, 'tn-common',
                                   tree=tree_manager.CONFIG_TREE)
        rev_pr_1 = cfg_tree.find(
            ("fvTenant|common", "vzBrCP|c-name", "vzSubj|s-name",
             "vzInTerm|intmnl", "vzRsFiltAtt|pr_1"))
        self.assertIsNotNone(rev_pr_1)
    def test_subject_related_objects(self):
        self.mgr.create(self.ctx, aim_res.Tenant(name='common'))
        self.mgr.create(self.ctx,
                        aim_res.Contract(tenant_name='common', name='c-name'))
        subj = aim_res.ContractSubject(
            **{
                'contract_name': 'c-name',
                'out_filters':
                ['pr_1', 'reverse-pr_1', 'pr_2', 'reverse-pr_2'],
                'name': 's-name',
                'tenant_name': 'common',
                'monitored': False,
                'bi_filters': [],
                'in_filters': ['pr_1', 'reverse-pr_1', 'pr_2', 'reverse-pr_2']
            })
        subj = self.mgr.create(self.ctx, subj)
        cfg_tree = self.tt_mgr.get(self.ctx,
                                   'tn-common',
                                   tree=tree_manager.CONFIG_TREE)
        # verify pr_1 and its reverse are in the tree
        pr_1 = cfg_tree.find(
            ("fvTenant|common", "vzBrCP|c-name", "vzSubj|s-name",
             "vzOutTerm|outtmnl", "vzRsFiltAtt|pr_1"))
        rev_pr_1 = cfg_tree.find(
            ("fvTenant|common", "vzBrCP|c-name", "vzSubj|s-name",
             "vzOutTerm|outtmnl", "vzRsFiltAtt|reverse-pr_1"))
        self.assertIsNotNone(pr_1)
        self.assertIsNotNone(rev_pr_1)

        self.mgr.update(self.ctx,
                        subj,
                        out_filters=['pr_2', 'reverse-pr_2'],
                        in_filters=['pr_2', 'reverse-pr_2'])
        cfg_tree = self.tt_mgr.get(self.ctx,
                                   'tn-common',
                                   tree=tree_manager.CONFIG_TREE)
        pr_1 = cfg_tree.find(
            ("fvTenant|common", "vzBrCP|c-name", "vzSubj|s-name",
             "vzOutTerm|outtmnl", "vzRsFiltAtt|pr_1"))
        rev_pr_1 = cfg_tree.find(
            ("fvTenant|common", "vzBrCP|c-name", "vzSubj|s-name",
             "vzOutTerm|outtmnl", "vzRsFiltAtt|reverse-pr_1"))
        self.assertIsNone(pr_1)
        self.assertIsNone(rev_pr_1)
示例#28
0
    def ensure_tenant(self, plugin_context, tenant_id):
        LOG.debug("APIC AIM MD ensuring tenant_id: %s", tenant_id)

        self.project_name_cache.ensure_project(tenant_id)

        # TODO(rkukura): Move the following to calls made from
        # precommit methods so AIM Tenants, ApplicationProfiles, and
        # Filters are [re]created whenever needed.
        session = plugin_context.session
        with session.begin(subtransactions=True):
            project_name = self.project_name_cache.get_project_name(tenant_id)
            tenant_aname = self.name_mapper.tenant(session, tenant_id,
                                                   project_name)
            LOG.debug(
                "Mapped tenant_id %(id)s with name %(name)s to "
                "%(aname)s", {
                    'id': tenant_id,
                    'name': project_name,
                    'aname': tenant_aname
                })

            aim_ctx = aim_context.AimContext(session)

            tenant = aim_resource.Tenant(name=tenant_aname)
            if not self.aim.get(aim_ctx, tenant):
                self.aim.create(aim_ctx, tenant)

            ap = aim_resource.ApplicationProfile(tenant_name=tenant_aname,
                                                 name=AP_NAME)
            if not self.aim.get(aim_ctx, ap):
                self.aim.create(aim_ctx, ap)

            filter = aim_resource.Filter(tenant_name=tenant_aname,
                                         name=ANY_FILTER_NAME,
                                         display_name='Any Filter')
            if not self.aim.get(aim_ctx, filter):
                self.aim.create(aim_ctx, filter)

            entry = aim_resource.FilterEntry(tenant_name=tenant_aname,
                                             filter_name=ANY_FILTER_NAME,
                                             name=ANY_FILTER_ENTRY_NAME,
                                             display_name='Any FilterEntry')
            if not self.aim.get(aim_ctx, entry):
                self.aim.create(aim_ctx, entry)
    def test_l3outside_pre(self):
        self.mgr.create(self.ctx, a_res.Tenant(name='t1'))
        vrf = a_res.VRF(tenant_name='t1', name='ctx1', monitored=True)
        self.mgr.create(self.ctx, vrf)
        l3out = a_res.L3Outside(tenant_name='t1', name='o1',
                                display_name='OUT', vrf_name='ctx1',
                                monitored=True)
        self.mgr.create(self.ctx, l3out)
        self.ns.create_l3outside(self.ctx, l3out)
        other_objs = self._get_l3out_objects(nat_vrf_name='ctx1')
        self._verify(present=[l3out, vrf] + other_objs)

        get_objs = self.ns.get_l3outside_resources(self.ctx, l3out)
        self._assert_res_list_eq(other_objs + [l3out, vrf], get_objs)

        self.ns.delete_l3outside(self.ctx, l3out)
        self._verify(present=[l3out, vrf], absent=other_objs)

        get_objs = self.ns.get_l3outside_resources(self.ctx, l3out)
        self.assertEqual([l3out, vrf], get_objs)
示例#30
0
    def test_creation_failed_cooldown(self):
        curr_cooldown = self.universe.retry_cooldown
        curr_max_retries = self.universe.max_create_retry
        aim_object = resource.Tenant(name='test_creation_failed_cooldown')
        aim_id = self.universe._get_aim_object_identifier(aim_object)
        # Set max_retry to infinity, we don't care about failing the object
        self.universe.max_create_retry = float('inf')
        # Fail first operation
        self.universe.creation_failed(aim_object)
        self.assertEqual((1, mock.ANY), self.universe.failure_log[aim_id])
        # If the cooldown is high enough, the object will not increase in retry
        # value as it keeps failing
        self.universe.retry_cooldown = float('inf')
        for x in range(10):
            self.universe.creation_failed(aim_object)
        self.assertEqual((1, mock.ANY), self.universe.failure_log[aim_id])
        # If the cooldown is low enough, we will see an increase in tentatives
        self.universe.retry_cooldown = -1
        for x in range(10):
            self.universe.creation_failed(aim_object)
        self.assertEqual((11, mock.ANY), self.universe.failure_log[aim_id])

        self.universe.retry_cooldown = curr_cooldown
        self.universe.max_create_retry = curr_max_retries