Ejemplo n.º 1
0
    def extend_network_dict(self, session, base_model, result):
        LOG.debug("APIC AIM MD extending dict for network: %s", result)

        tenant_id = result['tenant_id']
        tenant_aname = self.name_mapper.tenant(session, tenant_id)
        LOG.debug("Mapped tenant_id %(id)s to %(aname)s", {
            'id': tenant_id,
            'aname': tenant_aname
        })

        id = result['id']
        name = result['name']
        aname = self.name_mapper.network(session, id, name)
        LOG.debug("Mapped network_id %(id)s with name %(name)s to %(aname)s", {
            'id': id,
            'name': name,
            'aname': aname
        })

        bd = aim_resource.BridgeDomain(tenant_name=tenant_aname, name=aname)

        epg = aim_resource.EndpointGroup(tenant_name=tenant_aname,
                                         app_profile_name=AP_NAME,
                                         name=aname)

        aim_ctx = aim_context.AimContext(session)
        sync_state = cisco_apic.SYNC_SYNCED
        sync_state = self._merge_status(aim_ctx, sync_state, bd)
        sync_state = self._merge_status(aim_ctx, sync_state, epg)
        result[cisco_apic.DIST_NAMES] = {
            cisco_apic.BD: bd.dn,
            cisco_apic.EPG: epg.dn
        }
        result[cisco_apic.SYNC_STATE] = sync_state
Ejemplo n.º 2
0
 def _get_example_aim_epg(cls, **kwargs):
     example = resource.EndpointGroup(tenant_name='t1',
                                      app_profile_name='a1',
                                      name='test',
                                      bd_name='net1')
     example.__dict__.update(kwargs)
     return example
Ejemplo n.º 3
0
    def delete_network_precommit(self, context):
        LOG.debug("APIC AIM MD deleting network: %s", context.current)

        session = context._plugin_context.session

        tenant_id = context.current['tenant_id']
        tenant_aname = self.name_mapper.tenant(session, tenant_id)
        LOG.debug("Mapped tenant_id %(id)s to %(aname)s", {
            'id': tenant_id,
            'aname': tenant_aname
        })

        id = context.current['id']
        name = context.current['name']
        aname = self.name_mapper.network(session, id, name)
        LOG.debug("Mapped network_id %(id)s with name %(name)s to %(aname)s", {
            'id': id,
            'name': name,
            'aname': aname
        })

        aim_ctx = aim_context.AimContext(session)

        epg = aim_resource.EndpointGroup(tenant_name=tenant_aname,
                                         app_profile_name=AP_NAME,
                                         name=aname)
        self.aim.delete(aim_ctx, epg)

        bd = aim_resource.BridgeDomain(tenant_name=tenant_aname, name=aname)
        self.aim.delete(aim_ctx, bd)

        self.name_mapper.delete_apic_name(session, id)
Ejemplo n.º 4
0
    def update_network_precommit(self, context):
        LOG.debug("APIC AIM MD updating network: %s", context.current)

        if context.current['name'] != context.original['name']:
            session = context._plugin_context.session

            tenant_id = context.current['tenant_id']
            tenant_aname = self.name_mapper.tenant(session, tenant_id)
            LOG.debug("Mapped tenant_id %(id)s to %(aname)s", {
                'id': tenant_id,
                'aname': tenant_aname
            })

            id = context.current['id']
            name = context.current['name']
            aname = self.name_mapper.network(session, id, name)
            LOG.debug(
                "Mapped network_id %(id)s with name %(name)s to "
                "%(aname)s", {
                    'id': id,
                    'name': name,
                    'aname': aname
                })
            dname = aim_utils.sanitize_display_name(context.current['name'])

            aim_ctx = aim_context.AimContext(session)

            bd = aim_resource.BridgeDomain(tenant_name=tenant_aname,
                                           name=aname)
            bd = self.aim.update(aim_ctx, bd, display_name=dname)

            epg = aim_resource.EndpointGroup(tenant_name=tenant_aname,
                                             app_profile_name=AP_NAME,
                                             name=aname)
            epg = self.aim.update(aim_ctx, epg, display_name=dname)
Ejemplo n.º 5
0
    def _aim_endpoint_group(self, session, ptg, bd_name=None,
                            bd_tenant_name=None,
                            provided_contracts=None,
                            consumed_contracts=None):
        # This returns a new AIM EPG resource
        # TODO(Sumit): Use _aim_resource_by_name
        tenant_id = ptg['tenant_id']
        tenant_name = self._aim_tenant_name(session, tenant_id)
        id = ptg['id']
        name = ptg['name']
        epg_name = self.name_mapper.policy_target_group(session, id, name)
        display_name = self.aim_display_name(ptg['name'])
        LOG.debug("Mapped ptg_id %(id)s with name %(name)s to %(apic_name)s",
                  {'id': id, 'name': name, 'apic_name': epg_name})
        kwargs = {'tenant_name': str(tenant_name),
                  'name': str(epg_name),
                  'display_name': display_name,
                  'app_profile_name': aim_md.AP_NAME}
        if bd_name:
            kwargs['bd_name'] = bd_name
        if bd_tenant_name:
            kwargs['bd_tenant_name'] = bd_tenant_name

        if provided_contracts:
            kwargs['provided_contract_names'] = provided_contracts

        if consumed_contracts:
            kwargs['consumed_contract_names'] = consumed_contracts

        epg = aim_resource.EndpointGroup(**kwargs)
        return epg
    def test_tree_hooks_transactions(self):
        with mock.patch('aim.agent.aid.event_services.'
                        'rpc.AIDEventRpcApi._cast') as cast:
            tn = aim_res.Tenant(name='test_tree_hooks')
            ap = aim_res.ApplicationProfile(tenant_name='test_tree_hooks',
                                            name='ap')
            epg = aim_res.EndpointGroup(tenant_name='test_tree_hooks',
                                        app_profile_name='ap',
                                        name='epg',
                                        bd_name='some')

            tn1 = aim_res.Tenant(name='test_tree_hooks1')
            ap1 = aim_res.ApplicationProfile(tenant_name='test_tree_hooks1',
                                             name='ap')
            epg1 = aim_res.EndpointGroup(tenant_name='test_tree_hooks1',
                                         app_profile_name='ap',
                                         name='epg',
                                         bd_name='some')

            # This transaction will generate some action logs, which
            # will trigger a 'reconcile' event.
            with self.ctx.store.begin(subtransactions=True):
                with self.ctx.store.begin(subtransactions=True):
                    self.mgr.create(self.ctx, tn)
                    self.mgr.create(self.ctx, ap)
                    self.mgr.create(self.ctx, epg)
                self.assertEqual(0, cast.call_count)
                with self.ctx.store.begin(subtransactions=True):
                    self.mgr.create(self.ctx, tn1)
                    self.mgr.create(self.ctx, ap1)
                    self.mgr.create(self.ctx, epg1)
                self.assertEqual(0, cast.call_count)
            exp_calls = [mock.call(mock.ANY, 'reconcile', None)]
            self._check_call_list(exp_calls, cast)
            cast.reset_mock()

            # There are 2 tenants so 2 transactions will be involved here,
            # each transaction will update the trees so 2 'serve' events
            # will be generated.
            self.db_l.catch_up_with_action_log(self.ctx.store)
            exp_calls = [
                mock.call(mock.ANY, 'serve', None),
                mock.call(mock.ANY, 'serve', None)
            ]
            self._check_call_list(exp_calls, cast)
    def test_monitored_state_change(self):
        tn_name = 'test_monitored_state_change'
        tn_rn = 'tn-' + tn_name
        tn = aim_res.Tenant(name=tn_name, monitored=True)
        ap = aim_res.ApplicationProfile(tenant_name=tn_name, name='ap',
                                        monitored=True)
        epg = aim_res.EndpointGroup(
            tenant_name=tn_name, app_profile_name='ap', name='epg',
            bd_name='some', monitored=True)
        self.mgr.create(self.ctx, tn)
        self.mgr.create(self.ctx, ap)
        self.mgr.create(self.ctx, epg)
        cfg_tree = self.tt_mgr.get(self.ctx, tn_rn,
                                   tree=tree_manager.CONFIG_TREE)
        mon_tree = self.tt_mgr.get(self.ctx, tn_rn,
                                   tree=tree_manager.MONITORED_TREE)
        # Create my own tree representation
        my_cfg_tree = tree.StructuredHashTree()
        my_mon_tree = tree.StructuredHashTree()
        self.db_l.tt_maker.update(my_mon_tree, [tn])
        # Succeed their creation
        self.mgr.set_resource_sync_synced(self.ctx, ap)
        self.mgr.set_resource_sync_synced(self.ctx, epg)
        self.db_l.tt_maker.update(my_mon_tree, [ap, epg])
        cfg_tree = self.tt_mgr.get(self.ctx, tn_rn,
                                   tree=tree_manager.CONFIG_TREE)
        mon_tree = self.tt_mgr.get(self.ctx, tn_rn,
                                   tree=tree_manager.MONITORED_TREE)
        self.assertEqual(my_mon_tree, mon_tree)
        self.assertEqual(my_cfg_tree, cfg_tree)

        # Change ownership of the AP
        self.mgr.update(self.ctx, ap, monitored=False)
        my_mon_tree = tree.StructuredHashTree()
        # This is equivalent of adding only tenant and epg to the conf tree
        self.db_l.tt_maker.update(my_mon_tree, [tn, epg])
        self.db_l.tt_maker.update(my_cfg_tree, [ap])
        # Refresh trees
        cfg_tree = self.tt_mgr.get(self.ctx, tn_rn,
                                   tree=tree_manager.CONFIG_TREE)
        mon_tree = self.tt_mgr.get(self.ctx, tn_rn,
                                   tree=tree_manager.MONITORED_TREE)
        self.assertEqual(my_mon_tree, mon_tree,
                         'differences: %s' % my_mon_tree.diff(mon_tree))
        self.assertEqual(my_cfg_tree, cfg_tree)
        # Unset monitored to EPG as well
        self.mgr.update(self.ctx, epg, monitored=False)
        my_mon_tree = tree.StructuredHashTree()
        self.db_l.tt_maker.update(my_mon_tree, [tn])
        self.db_l.tt_maker.update(my_cfg_tree, [epg])
        # Refresh trees
        cfg_tree = self.tt_mgr.get(self.ctx, tn_rn,
                                   tree=tree_manager.CONFIG_TREE)
        mon_tree = self.tt_mgr.get(self.ctx, tn_rn,
                                   tree=tree_manager.MONITORED_TREE)
        self.assertEqual(my_mon_tree, mon_tree)
        self.assertEqual(my_cfg_tree, cfg_tree)
Ejemplo n.º 8
0
 def _get_epg(self, epg_name, tenant_name, app_profile_name,
              should_exist=True):
     session = db_api.get_session()
     aim_ctx = aim_context.AimContext(session)
     epg = aim_resource.EndpointGroup(tenant_name=tenant_name,
                                      app_profile_name=app_profile_name,
                                      name=epg_name)
     epg = self.aim_mgr.get(aim_ctx, epg)
     if should_exist:
         self.assertIsNotNone(epg)
     else:
         self.assertIsNone(epg)
     return epg
 def _get_nat_ap_epg(self, ctx, l3out):
     d_name = self._display_name(l3out)
     ap_name = getattr(self, 'app_profile_name', None) or l3out.name
     ap_name = self._scope_name_if_common(l3out.tenant_name, ap_name)
     ap_display_name = aim_utils.sanitize_display_name(ap_name or d_name)
     ap = resource.ApplicationProfile(tenant_name=l3out.tenant_name,
                                      name=ap_name,
                                      display_name=ap_display_name)
     epg = resource.EndpointGroup(
         tenant_name=ap.tenant_name,
         app_profile_name=ap.name,
         name='EXT-%s' % l3out.name,
         display_name=aim_utils.sanitize_display_name('EXT-%s' % d_name))
     return (ap, epg)
    def test_tree_hooks_transactions(self):
        with mock.patch('aim.agent.aid.event_services.'
                        'rpc.AIDEventRpcApi._cast') as cast:
            tn = aim_res.Tenant(name='test_tree_hooks')
            ap = aim_res.ApplicationProfile(tenant_name='test_tree_hooks',
                                            name='ap')
            epg = aim_res.EndpointGroup(tenant_name='test_tree_hooks',
                                        app_profile_name='ap',
                                        name='epg',
                                        bd_name='some')

            tn1 = aim_res.Tenant(name='test_tree_hooks1')
            ap1 = aim_res.ApplicationProfile(tenant_name='test_tree_hooks1',
                                             name='ap')
            epg1 = aim_res.EndpointGroup(tenant_name='test_tree_hooks1',
                                         app_profile_name='ap',
                                         name='epg',
                                         bd_name='some')
            # Try a transaction
            with self.ctx.store.begin(subtransactions=True):
                with self.ctx.store.begin(subtransactions=True):
                    self.mgr.create(self.ctx, tn)
                    self.mgr.create(self.ctx, ap)
                    self.mgr.create(self.ctx, epg)
                self.assertEqual(0, cast.call_count)
                with self.ctx.store.begin(subtransactions=True):
                    self.mgr.create(self.ctx, tn1)
                    self.mgr.create(self.ctx, ap1)
                    self.mgr.create(self.ctx, epg1)
                self.assertEqual(0, cast.call_count)
            # Trees are now saved one at a time, so serve is called twice
            exp_calls = [
                mock.call(mock.ANY, 'serve', None),
                mock.call(mock.ANY, 'serve', None),
                mock.call(mock.ANY, 'reconcile', None)
            ]
            self._check_call_list(exp_calls, cast)
 def _get_l3out_objects(self, l3out_name=None, l3out_display_name=None,
                        nat_vrf_name=None, vmm_domains=None,
                        phys_domains=None):
     name = 'EXT-%s' % (l3out_name or 'o1')
     d_name = 'EXT-%s' % (l3out_display_name or 'OUT')
     nat_vrf = a_res.VRF(tenant_name='t1', name=name, display_name=d_name)
     if vmm_domains is not None:
         vmm_doms = vmm_domains
     else:
         vmm_doms = self.vmm_domains
     if phys_domains is not None:
         phys_doms = phys_domains
     else:
         phys_doms = self.phys_domains
     return ([
         a_res.Filter(tenant_name='t1', name=name,
                      display_name=d_name),
         a_res.FilterEntry(tenant_name='t1', filter_name=name,
                           name='Any', display_name='Any'),
         a_res.Contract(tenant_name='t1', name=name,
                        display_name=d_name),
         a_res.ContractSubject(tenant_name='t1', contract_name=name,
                               name='Allow', display_name='Allow',
                               bi_filters=[name]),
         a_res.BridgeDomain(tenant_name='t1', name=name,
                            display_name=d_name,
                            vrf_name=nat_vrf_name or name,
                            limit_ip_learn_to_subnets=True,
                            l3out_names=[l3out_name or 'o1']),
         a_res.ApplicationProfile(tenant_name='t1', name='myapp',
                                  display_name='myapp'),
         a_res.EndpointGroup(tenant_name='t1', app_profile_name='myapp',
                             name=name, display_name=d_name,
                             bd_name=name,
                             provided_contract_names=[name],
                             consumed_contract_names=[name],
                             # NOTE(ivar): Need to keep both VMM
                             # representations since a GET on the EPG
                             # will also return the domain name list
                             # for backward compatibility
                             openstack_vmm_domain_names=[dom['name']
                                                         for dom in vmm_doms
                                                         if dom['type'] ==
                                                         'OpenStack'],
                             physical_domain_names=[dom['name']
                                                    for dom in phys_doms],
                             vmm_domains=vmm_doms,
                             physical_domains=phys_doms)] +
             ([nat_vrf] if nat_vrf_name is None else []))
 def test_tree_hooks(self):
     with mock.patch('aim.agent.aid.event_services.'
                     'rpc.AIDEventRpcApi._cast') as cast:
         tn_name = 'test_tree_hooks'
         tn_rn = 'tn-' + tn_name
         tn = aim_res.Tenant(name='test_tree_hooks_2')
         ap = aim_res.ApplicationProfile(tenant_name=tn_name, name='ap')
         epg = aim_res.EndpointGroup(tenant_name=tn_name,
                                     app_profile_name='ap',
                                     name='epg',
                                     bd_name='some')
         # Add Tenant and AP
         self.mgr.create(self.ctx, aim_res.Tenant(name=tn_name))
         # Creating a tenant also cause a log to be created, and
         # consequently a reconcile call
         exp_calls = [
             mock.call(mock.ANY, 'serve', None),
             mock.call(mock.ANY, 'reconcile', None)
         ]
         self._check_call_list(exp_calls, cast)
         self.mgr.create(self.ctx, tn)
         cast.reset_mock()
         self.mgr.create(self.ctx, ap)
         self.mgr.create(self.ctx, epg)
         # Create AP will create tenant, create EPG will modify it
         exp_calls = [
             mock.call(mock.ANY, 'reconcile', None),
             mock.call(mock.ANY, 'reconcile', None),
             mock.call(mock.ANY, 'reconcile', None),
             mock.call(mock.ANY, 'reconcile', None)
         ]
         self._check_call_list(exp_calls, cast)
         cast.reset_mock()
         self.mgr.update(self.ctx, epg, bd_name='bd2')
         exp_calls = [
             mock.call(mock.ANY, 'reconcile', None),
             mock.call(mock.ANY, 'reconcile', None)
         ]
         self._check_call_list(exp_calls, cast)
         cast.reset_mock()
         self.tt_mgr.delete_by_root_rn(self.ctx, tn_rn)
         cast.assert_called_once_with(mock.ANY, 'serve', None)
Ejemplo n.º 13
0
    def create_network_precommit(self, context):
        LOG.debug("APIC AIM MD creating network: %s", context.current)

        session = context._plugin_context.session

        tenant_id = context.current['tenant_id']
        tenant_aname = self.name_mapper.tenant(session, tenant_id)
        LOG.debug("Mapped tenant_id %(id)s to %(aname)s", {
            'id': tenant_id,
            'aname': tenant_aname
        })

        id = context.current['id']
        name = context.current['name']
        aname = self.name_mapper.network(session, id, name)
        LOG.debug("Mapped network_id %(id)s with name %(name)s to %(aname)s", {
            'id': id,
            'name': name,
            'aname': aname
        })
        dname = aim_utils.sanitize_display_name(name)

        aim_ctx = aim_context.AimContext(session)

        vrf = self._get_unrouted_vrf(aim_ctx)

        bd = aim_resource.BridgeDomain(tenant_name=tenant_aname,
                                       name=aname,
                                       display_name=dname,
                                       vrf_name=vrf.name,
                                       enable_arp_flood=True,
                                       enable_routing=False,
                                       limit_ip_learn_to_subnets=True)
        self.aim.create(aim_ctx, bd)

        epg = aim_resource.EndpointGroup(tenant_name=tenant_aname,
                                         app_profile_name=AP_NAME,
                                         name=aname,
                                         display_name=dname,
                                         bd_name=aname)
        self.aim.create(aim_ctx, epg)
    def test_sync_object_status(self):
        mgr = aim_manager.AimManager()
        epg = mgr.create(
            self.ctx,
            resource.EndpointGroup(tenant_name='test',
                                   app_profile_name='test',
                                   name='test',
                                   sync=False))
        status = mgr.get_status(self.ctx, epg)
        mgr.update(self.ctx, status, sync_status=status.SYNCED)
        tt_builder = tree_manager.HashTreeBuilder(mgr)
        trees = {}
        tt_maker = tree_manager.AimHashTreeMaker()
        key = tt_maker.get_root_key(epg)
        cfg = trees.setdefault(tt_builder.CONFIG,
                               {}).setdefault(key, tree.StructuredHashTree())
        mo = trees.setdefault(tt_builder.MONITOR,
                              {}).setdefault(key, tree.StructuredHashTree())
        oper = trees.setdefault(tt_builder.OPER,
                                {}).setdefault(key, tree.StructuredHashTree())

        tt_builder.build(
            [status], [], [], {
                tt_builder.CONFIG: {
                    key: cfg
                },
                tt_builder.MONITOR: {
                    key: mo
                },
                tt_builder.OPER: {
                    key: oper
                }
            },
            aim_ctx=self.ctx)
        # Should not add parent back
        exp_key = tt_maker._build_hash_tree_key(epg)
        self.assertIsNone(cfg.find(exp_key))
Ejemplo n.º 15
0
    def test_push_resources(self):
        aim_mgr = aim_manager.AimManager()
        aim_mgr.create(self.ctx, resource.Tenant(name='t1'))
        ap = self._get_example_aci_app_profile(dn='uni/tn-t1/ap-a1')
        ap_aim = resource.ApplicationProfile(tenant_name='t1', name='a1')
        epg = self._get_example_aci_epg(
            dn='uni/tn-t1/ap-a1/epg-test')
        fault = self._get_example_aci_fault(
            dn='uni/tn-t1/ap-a1/epg-test/fault-951')
        faul_aim = aim_status.AciFault(
            fault_code='951',
            external_identifier='uni/tn-t1/ap-a1/epg-test/fault-951')
        self.universe.push_resources(self.ctx, {'create': [ap, epg, fault],
                                                'delete': []})
        res = aim_mgr.get(self.ctx, resource.EndpointGroup(
            tenant_name='t1', app_profile_name='a1', name='test'))
        status = aim_mgr.get_status(self.ctx, res)
        self.assertEqual(1, len(status.faults))
        self.assertEqual('951', status.faults[0].fault_code)

        # Unset fault
        self.universe.push_resources(self.ctx, {'create': [],
                                                'delete': [faul_aim]})
        status = aim_mgr.get_status(self.ctx, res)
        self.assertEqual(0, len(status.faults))

        # create subject, and faults for subject-to-filter relation
        filter_objs = [
            {'vzBrCP': {'attributes': {'dn': 'uni/tn-t1/brc-c'}}},
            {'vzSubj': {'attributes': {'dn': 'uni/tn-t1/brc-c/subj-s2'}}},
            self._get_example_aci_fault(
                dn='uni/tn-t1/brc-c/subj-s2/intmnl/rsfiltAtt-f/fault-F1111',
                code='F1111'),
            self._get_example_aci_fault(
                dn='uni/tn-t1/brc-c/subj-s2/outtmnl/rsfiltAtt-g/fault-F1112',
                code='F1112'),
            self._get_example_aci_fault(
                dn='uni/tn-t1/brc-c/subj-s2/rssubjFiltAtt-h/fault-F1113',
                code='F1113')]
        self.universe.push_resources(self.ctx, {'create': filter_objs,
                                                'delete': []})
        subj = resource.ContractSubject(tenant_name='t1', contract_name='c',
                                        name='s2')
        status = aim_mgr.get_status(self.ctx, subj)
        self.assertEqual(2, len(status.faults))
        self.assertEqual(['F1111', 'F1112'],
                         [f.fault_code for f in status.faults])

        # delete filter faults
        self.universe.push_resources(self.ctx, {'create': [],
                                                'delete': status.faults})
        status = aim_mgr.get_status(self.ctx, subj)
        self.assertEqual(0, len(status.faults))
        # Managed epg
        managed_epg = resource.EndpointGroup(
            tenant_name='t1', app_profile_name='a1', name='managed')
        aim_mgr.create(self.ctx, managed_epg)
        # EPG cannot be deleted since is managed
        self.universe.push_resources(
            self.ctx, {'create': [], 'delete': [ap_aim, managed_epg]})
        res = aim_mgr.get(self.ctx, managed_epg)
        if self.monitor_universe:
            self.assertIsNotNone(res)
            aim_mgr.delete(self.ctx, managed_epg)
        else:
            self.assertIsNone(res)
    def _test_sync_failed(self, monitored=False):
        tn_name = 'tn1'
        tn_rn = 'tn-' + tn_name
        tn = aim_res.Tenant(name=tn_name, monitored=monitored)
        ap = aim_res.ApplicationProfile(tenant_name=tn_name, name='ap',
                                        monitored=monitored)
        epg = aim_res.EndpointGroup(
            tenant_name=tn_name, app_profile_name='ap', name='epg',
            monitored=monitored, bd_name='some')
        epg2 = aim_res.EndpointGroup(
            tenant_name=tn_name, app_profile_name='ap', name='epg2',
            monitored=monitored, bd_name='some')
        empty_map = {True: tree_manager.CONFIG_TREE,
                     False: tree_manager.MONITORED_TREE}

        exp_tree = tree.StructuredHashTree()
        exp_empty_tree = tree.StructuredHashTree()
        # Add Tenant and AP
        tn = self.mgr.create(self.ctx, tn)
        self.mgr.set_resource_sync_synced(self.ctx, tn)
        ap = self.mgr.create(self.ctx, ap)
        self.mgr.set_resource_sync_synced(self.ctx, ap)
        epg2 = self.mgr.create(self.ctx, epg2)
        self.mgr.set_resource_sync_synced(self.ctx, epg2)
        epg = self.mgr.create(self.ctx, epg)
        self.mgr.set_resource_sync_synced(self.ctx, epg)
        # Set EPG status to delete error
        self.mgr.set_resource_sync_error(self.ctx, epg)
        # Get the trees
        empty_tree = self.tt_mgr.get(
            self.ctx, tn_rn, tree=empty_map[monitored])
        configured_tree = self.tt_mgr.get(
            self.ctx, tn_rn, tree=empty_map[not monitored])

        epg._error = True
        self.db_l.tt_maker.update(exp_tree, [tn, ap, epg2, epg])
        self.assertEqual({'add': [], 'remove': []},
                         exp_tree.diff(configured_tree))
        self.assertEqual({'add': [], 'remove': []},
                         exp_empty_tree.diff(empty_tree))
        # Even if something changes in the EPG the difference will still be
        # empty
        epg.display_name = 'somethingelse'
        self.db_l.tt_maker.update(exp_tree, [epg])
        self.assertEqual({'add': [], 'remove': []},
                         exp_tree.diff(configured_tree))
        self.assertEqual({'add': [], 'remove': []},
                         exp_empty_tree.diff(empty_tree))

        # Update epg, it will be re-created. Note that I'm not actually
        # changing attributes
        epg = self.mgr.update(self.ctx, epg, bd_name='some')
        self.mgr.set_resource_sync_synced(self.ctx, epg)
        # Fix the expected tree as well
        self.db_l.tt_maker.update(exp_tree, [epg])
        # Get the trees
        empty_tree = self.tt_mgr.get(
            self.ctx, tn_rn, tree=empty_map[monitored])
        configured_tree = self.tt_mgr.get(
            self.ctx, tn_rn, tree=empty_map[not monitored])
        self.assertEqual(exp_tree, configured_tree)
        self.assertEqual(exp_empty_tree, empty_tree)

        # Modifying the EPG will make the difference visible
        epg.display_name = 'somethingelse'
        self.db_l.tt_maker.update(exp_tree, [epg])
        self.assertEqual(
            {'add': [('fvTenant|tn1', 'fvAp|ap', 'fvAEPg|epg')],
             'remove': []}, exp_tree.diff(configured_tree))
        self.assertEqual({'add': [], 'remove': []},
                         exp_empty_tree.diff(empty_tree))

        # Set AP in error state, will effect all the children
        self.mgr.set_resource_sync_error(self.ctx, ap)
        empty_tree = self.tt_mgr.get(
            self.ctx, tn_rn, tree=empty_map[monitored])
        configured_tree = self.tt_mgr.get(
            self.ctx, tn_rn, tree=empty_map[not monitored])
        # This time around, the AP and both its EPGs are in error state
        ap._error = True
        epg._error = True
        epg2._error = True
        self.db_l.tt_maker.update(exp_tree, [ap, epg, epg2])
        self.assertEqual({'add': [], 'remove': []},
                         exp_tree.diff(configured_tree))
        self.assertEqual({'add': [], 'remove': []},
                         exp_empty_tree.diff(empty_tree))

        # All the objects are in failed state
        for obj in [ap, epg2, epg]:
            self.assertEqual(aim_status.AciStatus.SYNC_FAILED,
                             self.mgr.get_status(self.ctx, obj).sync_status)

        if not monitored:
            # Changing sync status of the EPG will bring everything back
            self.mgr.set_resource_sync_pending(self.ctx, epg)
            epg = self.mgr.get(self.ctx, epg)
            # All the objects are in pending state
            for obj in [ap, epg2, epg]:
                self.assertEqual(
                    aim_status.AciStatus.SYNC_PENDING,
                    self.mgr.get_status(self.ctx, obj).sync_status)
            empty_tree = self.tt_mgr.get(
                self.ctx, tn_rn, tree=empty_map[monitored])
            configured_tree = self.tt_mgr.get(
                self.ctx, tn_rn, tree=empty_map[not monitored])
            del ap._error
            del epg2._error
            self.db_l.tt_maker.update(exp_tree, [ap, epg, epg2])

            self.assertEqual(exp_tree, configured_tree)
            self.assertEqual(exp_empty_tree, empty_tree)
Ejemplo n.º 17
0
 def _get_network_epg(self, mapping):
     return aim_resource.EndpointGroup(
         tenant_name=mapping.epg_tenant_name,
         app_profile_name=mapping.epg_app_profile_name,
         name=mapping.epg_name)
Ejemplo n.º 18
0
    def test_load_domains(self):
        # create a VMM and PhysDom first
        pre_phys = resource.PhysicalDomain(name='pre-phys')
        pre_vmm = resource.VMMDomain(type='OpenStack', name='pre-vmm')
        ap = resource.ApplicationProfile(tenant_name='tn1', name='ap')
        pre_epg1 = resource.EndpointGroup(tenant_name='tn1',
                                          app_profile_name='ap',
                                          name='epg1')
        pre_epg2 = resource.EndpointGroup(tenant_name='tn1',
                                          app_profile_name='ap',
                                          name='epg2')
        self.mgr.create(self.ctx, resource.Tenant(name='tn1'))
        self.mgr.create(self.ctx, ap)
        self.mgr.create(self.ctx, pre_phys)
        self.mgr.create(self.ctx, pre_vmm)
        self.mgr.create(self.ctx, pre_epg2)
        self.mgr.create(self.ctx, pre_epg1)
        self.run_command('manager load-domains')
        # Verify pre-existing domains are still there
        self.assertIsNotNone(self.mgr.get(self.ctx, pre_phys))
        self.assertIsNotNone(self.mgr.get(self.ctx, pre_vmm))
        # Also the Domains defined in the config files exist
        self.assertIsNotNone(
            self.mgr.get(self.ctx, resource.PhysicalDomain(name='phys')))
        self.assertIsNotNone(
            self.mgr.get(self.ctx, resource.PhysicalDomain(name='phys2')))
        self.assertIsNotNone(
            self.mgr.get(self.ctx,
                         resource.VMMDomain(type='OpenStack', name='ostack')))
        self.assertIsNotNone(
            self.mgr.get(self.ctx,
                         resource.VMMDomain(type='OpenStack', name='ostack2')))
        self.assertIsNotNone(
            self.mgr.get(self.ctx,
                         resource.VMMDomain(type='VMware', name='vmware')))
        self.assertIsNotNone(
            self.mgr.get(self.ctx,
                         resource.VMMDomain(type='VMware', name='vmware2')))
        # EPGs are still empty
        pre_epg1 = self.mgr.get(self.ctx, pre_epg1)
        pre_epg2 = self.mgr.get(self.ctx, pre_epg2)

        self.assertEqual([], pre_epg1.vmm_domains)
        self.assertEqual([], pre_epg1.physical_domains)
        self.assertEqual([], pre_epg2.vmm_domains)
        self.assertEqual([], pre_epg2.physical_domains)

        # Delete one of them, and use the replace flag
        self.mgr.delete(self.ctx,
                        resource.VMMDomain(type='OpenStack', name='ostack2'))
        self.run_command('manager load-domains --replace')

        # Now only 2 Domains each exist
        self.assertEqual(4, len(self.mgr.find(self.ctx, resource.VMMDomain)))
        self.assertEqual(2,
                         len(self.mgr.find(self.ctx, resource.PhysicalDomain)))

        # EPGs are still empty
        pre_epg1 = self.mgr.get(self.ctx, pre_epg1)
        pre_epg2 = self.mgr.get(self.ctx, pre_epg2)

        self.assertEqual([], pre_epg1.vmm_domains)
        self.assertEqual([], pre_epg1.physical_domains)
        self.assertEqual([], pre_epg2.vmm_domains)
        self.assertEqual([], pre_epg2.physical_domains)

        # now update the current environment
        self.run_command('manager load-domains --replace --enforce')
        pre_epg1 = self.mgr.get(self.ctx, pre_epg1)
        pre_epg2 = self.mgr.get(self.ctx, pre_epg2)

        def get_vmm(type, name):
            return {'type': type, 'name': name}

        def get_phys(name):
            return {'name': name}

        self.assertEqual(
            sorted([
                get_vmm('OpenStack', 'ostack'),
                get_vmm('OpenStack', 'ostack2'),
                get_vmm('VMware', 'vmware'),
                get_vmm('VMware', 'vmware2')
            ]), sorted(pre_epg1.vmm_domains))
        self.assertEqual(sorted([get_phys('phys'),
                                 get_phys('phys2')]),
                         sorted(pre_epg1.physical_domains))
        self.assertEqual(
            sorted([
                get_vmm('OpenStack', 'ostack'),
                get_vmm('OpenStack', 'ostack2'),
                get_vmm('VMware', 'vmware'),
                get_vmm('VMware', 'vmware2')
            ]), sorted(pre_epg2.vmm_domains))
        self.assertEqual(sorted([get_phys('phys'),
                                 get_phys('phys2')]),
                         sorted(pre_epg2.physical_domains))
Ejemplo n.º 19
0
    def test_load_domains(self):
        # create a VMM and PhysDom first
        pre_phys = resource.PhysicalDomain(name='pre-phys')
        pre_vmm = resource.VMMDomain(type='OpenStack', name='pre-vmm')
        ap = resource.ApplicationProfile(tenant_name='tn1', name='ap')
        pre_epg1 = resource.EndpointGroup(
            tenant_name='tn1', app_profile_name='ap', name='epg1')
        pre_epg2 = resource.EndpointGroup(
            tenant_name='tn1', app_profile_name='ap', name='epg2')
        self.mgr.create(self.ctx, resource.Tenant(name='tn1'))
        self.mgr.create(self.ctx, ap)
        self.mgr.create(self.ctx, pre_phys)
        self.mgr.create(self.ctx, pre_vmm)
        self.mgr.create(self.ctx, pre_epg2)
        self.mgr.create(self.ctx, pre_epg1)
        self.run_command('manager load-domains --no-mappings')
        # Verify pre-existing domains are still there
        self.assertIsNotNone(self.mgr.get(self.ctx, pre_phys))
        self.assertIsNotNone(self.mgr.get(self.ctx, pre_vmm))
        # Also the Domains defined in the config files exist
        self.assertIsNotNone(
            self.mgr.get(self.ctx, resource.PhysicalDomain(name='phys')))
        self.assertIsNotNone(
            self.mgr.get(self.ctx, resource.PhysicalDomain(name='phys2')))
        self.assertIsNotNone(
            self.mgr.get(self.ctx, resource.VMMDomain(type='OpenStack',
                                                      name='ostack')))
        self.assertIsNotNone(
            self.mgr.get(self.ctx, resource.VMMDomain(type='OpenStack',
                                                      name='ostack2')))
        self.assertIsNotNone(
            self.mgr.get(self.ctx, resource.VMMDomain(type='VMware',
                                                      name='vmware')))
        self.assertIsNotNone(
            self.mgr.get(self.ctx, resource.VMMDomain(type='VMware',
                                                      name='vmware2')))
        # EPGs are still empty
        pre_epg1 = self.mgr.get(self.ctx, pre_epg1)
        pre_epg2 = self.mgr.get(self.ctx, pre_epg2)

        self.assertEqual([], pre_epg1.vmm_domains)
        self.assertEqual([], pre_epg1.physical_domains)
        self.assertEqual([], pre_epg2.vmm_domains)
        self.assertEqual([], pre_epg2.physical_domains)

        # Delete one of them, and use the replace flag
        self.mgr.delete(self.ctx, resource.VMMDomain(type='OpenStack',
                                                     name='ostack2'))
        self.run_command('manager load-domains --replace --no-mappings')

        # Now only 2 Domains each exist
        self.assertEqual(4, len(self.mgr.find(self.ctx, resource.VMMDomain)))
        self.assertEqual(2, len(self.mgr.find(self.ctx,
                                              resource.PhysicalDomain)))

        # EPGs are still empty
        pre_epg1 = self.mgr.get(self.ctx, pre_epg1)
        pre_epg2 = self.mgr.get(self.ctx, pre_epg2)

        self.assertEqual([], pre_epg1.vmm_domains)
        self.assertEqual([], pre_epg1.physical_domains)
        self.assertEqual([], pre_epg2.vmm_domains)
        self.assertEqual([], pre_epg2.physical_domains)

        # now update the current environment
        cmd = 'manager load-domains --replace --enforce --no-mappings'
        self.run_command(cmd)
        pre_epg1 = self.mgr.get(self.ctx, pre_epg1)
        pre_epg2 = self.mgr.get(self.ctx, pre_epg2)

        def get_vmm(type, name):
            return {'type': type, 'name': name}

        def get_phys(name):
            return {'name': name}

        self.assertEqual(sorted([get_vmm('OpenStack', 'ostack'),
                                 get_vmm('OpenStack', 'ostack2'),
                                 get_vmm('VMware', 'vmware'),
                                 get_vmm('VMware', 'vmware2')]),
                         sorted(pre_epg1.vmm_domains))
        self.assertEqual(sorted([get_phys('phys'),
                                 get_phys('phys2')]),
                         sorted(pre_epg1.physical_domains))
        self.assertEqual(sorted([get_vmm('OpenStack', 'ostack'),
                                 get_vmm('OpenStack', 'ostack2'),
                                 get_vmm('VMware', 'vmware'),
                                 get_vmm('VMware', 'vmware2')]),
                         sorted(pre_epg2.vmm_domains))
        self.assertEqual(sorted([get_phys('phys'),
                                 get_phys('phys2')]),
                         sorted(pre_epg2.physical_domains))

        # re-run the command, but populate the  domain mappings
        self.run_command('manager load-domains --replace --enforce')

        pre_epg1 = self.mgr.get(self.ctx, pre_epg1)
        pre_epg2 = self.mgr.get(self.ctx, pre_epg2)

        def get_vmm(type, name):
            return {'type': type, 'name': name}

        def get_phys(name):
            return {'name': name}

        # The load-domains should creat host domain mappings with
        # wildcard entries for every entry in the configuration file
        existing_mappings = [{'domain_type': 'PhysDom',
                              'host_name': '*',
                              'domain_name': 'phys'},
                             {'domain_type': 'PhysDom',
                              'host_name': '*',
                              'domain_name': 'phys2'},
                             {'domain_type': 'OpenStack',
                              'host_name': '*',
                              'domain_name': 'ostack'},
                             {'domain_type': 'OpenStack',
                              'host_name': '*',
                              'domain_name': 'ostack'},
                             {'domain_type': 'VMware',
                              'host_name': '*',
                              'domain_name': 'vmware'},
                             {'domain_type': 'VMware',
                              'host_name': '*',
                              'domain_name': 'vmware2'}]
        for mapping in existing_mappings:
            mapping = infra.HostDomainMappingV2(
                host_name=mapping['host_name'],
                domain_name=mapping['domain_name'],
                domain_type=mapping['domain_type'])
            try:
                self.assertIsNotNone(self.mgr.get(self.ctx, mapping))
            except Exception:
                self.assertFalse(True)

        self.assertEqual(sorted([get_vmm('OpenStack', 'ostack'),
                                 get_vmm('OpenStack', 'ostack2'),
                                 get_vmm('VMware', 'vmware'),
                                 get_vmm('VMware', 'vmware2')]),
                         sorted(pre_epg1.vmm_domains))
        self.assertEqual(sorted([get_phys('phys'),
                                 get_phys('phys2')]),
                         sorted(pre_epg1.physical_domains))
        self.assertEqual(sorted([get_vmm('OpenStack', 'ostack'),
                                 get_vmm('OpenStack', 'ostack2'),
                                 get_vmm('VMware', 'vmware'),
                                 get_vmm('VMware', 'vmware2')]),
                         sorted(pre_epg2.vmm_domains))
        self.assertEqual(sorted([get_phys('phys'),
                                 get_phys('phys2')]),
                         sorted(pre_epg2.physical_domains))

        # re-run the command, with host-specific domain mappings populated.
        # This should cause an exception
        self.mgr.create(self.ctx, infra.HostDomainMappingV2(
            host_name='host1',
            domain_name='ostack10',
            domain_type='OpenStack'))
        self.run_command('manager load-domains --enforce', raises=True)
Ejemplo n.º 20
0
    def test_host_data_migration(self):
        self.mgr.create(self.ctx, infra.HostLink(
            host_name='h1', interface_name='eth0', path='h1/path/VPC'))
        self.mgr.create(self.ctx, infra.HostLink(
            host_name='h1', interface_name='eth1', path='h1/path/2'))
        self.mgr.create(self.ctx, infra.HostLink(
            host_name='h1', interface_name='eth2', path='h1/path/VPC'))
        self.mgr.create(self.ctx, infra.HostLink(
            host_name='h2', interface_name='eth2', path='h2/path'))

        epg1 = self.mgr.create(self.ctx, resource.EndpointGroup(
            tenant_name='t1', app_profile_name='ap', name='epg1',
            static_paths=[{'path': 'h1/path/2', 'encap': '100'},
                          {'path': 'h2/path', 'encap': '100'},
                          {'path': 'not_known', 'encap': '100'}]))
        epg2 = self.mgr.create(self.ctx, resource.EndpointGroup(
            tenant_name='t1', app_profile_name='ap', name='epg2',
            static_paths=[{'path': 'h1/path/2', 'encap': '100'},
                          {'path': 'h1/path/VPC', 'encap': '100'}]))
        dc = self.mgr.create(self.ctx, service_graph.DeviceCluster(
            tenant_name='t2', name='dc',
            devices=[{'path': 'h1/path/2', 'name': '1'},
                     {'path': 'h2/path', 'name': '2'}]))
        cdi1 = self.mgr.create(self.ctx, service_graph.ConcreteDeviceInterface(
            tenant_name='t2', device_cluster_name='dc', device_name='1',
            name='dc', path='h1/path/VPC'))
        cdi2 = self.mgr.create(self.ctx, service_graph.ConcreteDeviceInterface(
            tenant_name='t2', device_cluster_name='dc', device_name='2',
            name='dc', path='h2/path'))
        l3out_iface1 = self.mgr.create(
            self.ctx, resource.L3OutInterface(
                tenant_name='t2', l3out_name='dc', node_profile_name='1',
                interface_profile_name='dc1', interface_path='h1/path/VPC'))
        l3out_iface2 = self.mgr.create(
            self.ctx, resource.L3OutInterface(
                tenant_name='t2', l3out_name='dc', node_profile_name='1',
                interface_profile_name='dc2', interface_path='h2/path'))
        add_host_column.migrate(self.ctx.db_session)
        epg1 = self.mgr.get(self.ctx, epg1)
        self.assertEqual(
            utils.deep_sort(
                [{'path': 'h1/path/2', 'encap': '100', 'host': 'h1'},
                 {'path': 'h2/path', 'encap': '100', 'host': 'h2'},
                 {'path': 'not_known', 'encap': '100'}]),
            utils.deep_sort(epg1.static_paths))
        epg2 = self.mgr.get(self.ctx, epg2)
        self.assertEqual(
            utils.deep_sort(
                [{'path': 'h1/path/2', 'encap': '100', 'host': 'h1'},
                 {'path': 'h1/path/VPC', 'encap': '100', 'host': 'h1'}]),
            utils.deep_sort(epg2.static_paths))
        dc = self.mgr.get(self.ctx, dc)
        self.assertEqual(
            utils.deep_sort(
                [{'path': 'h1/path/2', 'name': '1', 'host': 'h1'},
                 {'path': 'h2/path', 'name': '2', 'host': 'h2'}]),
            utils.deep_sort(dc.devices))
        cdi1 = self.mgr.get(self.ctx, cdi1)
        self.assertEqual('h1', cdi1.host)
        cdi2 = self.mgr.get(self.ctx, cdi2)
        self.assertEqual('h2', cdi2.host)
        l3out_iface1 = self.mgr.get(self.ctx, l3out_iface1)
        self.assertEqual('h1', l3out_iface1.host)
        l3out_iface2 = self.mgr.get(self.ctx, l3out_iface2)
        self.assertEqual('h2', l3out_iface2.host)