Ejemplo n.º 1
0
def upgrade():
    # A model of the new domains table
    domainsv2 = op.create_table(
        AIM_HOST_DOMAIN_MAPPING_V2,
        sa.Column('host_name', sa.String(128)),
        sa.Column('domain_name', sa.String(64)),
        sa.Column('domain_type', sa.Enum('PhysDom',
                                         'OpenStack',
                                         'Kubernetes',
                                         'VMware')),
        sa.PrimaryKeyConstraint('host_name', 'domain_name', 'domain_type')
    )

    mgr = aim_manager.AimManager()
    ctx = context.AimContext(db_session=api.get_session(expire_on_commit=True))
    with ctx.db_session.begin(subtransactions=True):
        migrations = []
        for mapping in mgr.find(ctx, infra.HostDomainMapping):
            if mapping.vmm_domain_name:
                migrations.append({'host_name': mapping.host_name,
                                   'domain_name': mapping.vmm_domain_name,
                                   'domain_type': 'OpenStack'})
            if mapping.physical_domain_name:
                migrations.append({'host_name': mapping.host_name,
                                   'domain_name': mapping.physical_domain_name,
                                   'domain_type': 'PhysDom'})
        op.bulk_insert(domainsv2, migrations)
        # we can clear out the old table
        mgr.delete_all(ctx, infra.HostDomainMapping)
Ejemplo n.º 2
0
 def _initialize_hooks(self):
     self.register_before_session_flush_callback(
         'hashtree_db_listener_on_commit',
         ht_db_l.HashTreeDbListener(aim_manager.AimManager()).on_commit)
     self.register_after_transaction_ends_callback(
         'tree_creation_postcommit',
         rpc.AIDEventRpcApi().tree_creation_postcommit)
Ejemplo n.º 3
0
def fix_no_nat_l3out_ownership(aim_ctx):
    """Relinquish ownership of no-NAT L3Outs in AIM and APIC."""
    saved_l3out_table = sa.Table(
        'aim_lib_save_l3out',
        sa.MetaData(),
        sa.Column('tenant_name', sa.String(), primary_key=True),
        sa.Column('name', sa.String(), primary_key=True),
        sa.Column('monitored', nullable=True),
        sa.Column('vrf_name', nullable=True))
    session = aim_ctx.store.db_session
    bind = session.get_bind()
    with session.begin(subtransactions=True):
        if not saved_l3out_table.exists(bind=bind):
            return
        results = session.execute(
            saved_l3out_table.select(saved_l3out_table.c.monitored.is_(True)))
        click.echo("Fixing ownership of no-NAT L3Outs")
        rows = results.fetchall()
        if rows:
            cfg_mgr = config.ConfigManager(aim_ctx)
            system_id = cfg_mgr.get_option('aim_system_id', 'aim')
            aim_mgr = aim_manager.AimManager()
            apic = aci_universe.AciUniverse.establish_aci_session(cfg_mgr)
            for row in rows:
                l3out = resource.L3Outside(tenant_name=row['tenant_name'],
                                           name=row['name'])
                aim_mgr.update(aim_ctx, l3out, monitored=True)
                tag_dn = "/mo/" + l3out.dn + "/tag-" + system_id
                click.echo('Deleting AIM tag %s' % tag_dn)
                apic.DELETE(tag_dn + ".json")
    # drop the table after the transaction completes because databases
    # like MySQL hold locks on the table
    saved_l3out_table.drop(bind=bind)
Ejemplo n.º 4
0
def do_sg_rule_remote_group_id_insertion(session):
    alembic_util.msg("Starting remote_group_id insertion for SG rules.")

    aim = aim_manager.AimManager()
    aim_ctx = aim_context.AimContext(session)
    mapper = apic_mapper.APICNameMapper()
    with session.begin(subtransactions=True):
        sg_rule_dbs = (session.query(sg_models.SecurityGroupRule).options(
            lazyload('*')).all())
        for sg_rule_db in sg_rule_dbs:
            if sg_rule_db.get('remote_group_id'):
                tenant_aname = mapper.project(session, sg_rule_db['tenant_id'])
                sg_rule_aim = aim_resource.SecurityGroupRule(
                    tenant_name=tenant_aname,
                    security_group_name=sg_rule_db['security_group_id'],
                    security_group_subject_name='default',
                    name=sg_rule_db['id'])
                sg_rule_aim = aim.get(aim_ctx, sg_rule_aim)
                # Validation tool will add the missing SG rules
                # if there is any.
                if sg_rule_aim:
                    aim.update(aim_ctx,
                               sg_rule_aim,
                               remote_group_id=sg_rule_db['remote_group_id'])

    alembic_util.msg("Finished remote_group_id insertion for SG rules.")
 def initialize(self, store, conf_mgr, multiverse):
     super(HashTreeStoredUniverse, self).initialize(store, conf_mgr,
                                                    multiverse)
     self.multiverse = multiverse
     self.context = context.AimContext(store=store)
     self.manager = aim_manager.AimManager()
     self.conf_manager = conf_mgr
     self._state = {}
     self.failure_log = {}
     self.max_create_retry = self.conf_manager.get_option(
         'max_operation_retry', 'aim')
     # Don't increase retry value if at least retry_cooldown seconds have
     # passed
     self.retry_cooldown = self.conf_manager.get_option(
         'retry_cooldown', 'aim')
     self.reset_retry_limit = 2 * self.max_create_retry
     self.purge_retry_limit = 2 * self.reset_retry_limit
     self.error_handlers = {
         errors.OPERATION_TRANSIENT: self._retry_until_max,
         errors.UNKNOWN: self._retry_until_max,
         errors.OPERATION_CRITICAL: self._surrender_operation,
         errors.SYSTEM_CRITICAL: self._fail_agent,
     }
     self._action_cache = {}
     return self
Ejemplo n.º 6
0
    def test_cleanup_state(self, tree_type=tree_manager.CONFIG_TREE):
        tree_mgr = tree_manager.HashTreeManager()
        aim_mgr = aim_manager.AimManager()
        aim_mgr.create(self.ctx, resource.Tenant(name='t1'))
        bd1 = resource.BridgeDomain(tenant_name='t1',
                                    name='bd1',
                                    display_name='somestuff',
                                    vrf_name='vrf')
        bd1_fault = aim_status.AciFault(
            fault_code='901',
            external_identifier='uni/tn-t1/BD-bd1/fault-901',
            description='failure901')

        aim_mgr.create(self.ctx, bd1)
        aim_mgr.set_fault(self.ctx, bd1, bd1_fault)
        self.assertRaises(Exception, self.universe.cleanup_state, 'tn-t1')

        trees = tree_mgr.find(self.ctx, tree=tree_type)
        # tenant still there, trees not empty.
        self.assertEqual(1, len(trees))
        aim_mgr.clear_fault(self.ctx, bd1_fault)
        aim_mgr.delete(self.ctx, resource.Tenant(name='t1'), cascade=True)
        self.universe.cleanup_state(self.ctx, 'tn-t1')
        trees = tree_mgr.find(self.ctx, tree=tree_type)
        self.assertEqual(0, len(trees))
Ejemplo n.º 7
0
def hashtree(ctx):
    aim_ctx = context.AimContext(store=api.get_store(expire_on_commit=True))
    tree_mgr = tree_manager.HashTreeManager()
    manager = aim_manager.AimManager()
    ctx.obj['manager'] = manager
    ctx.obj['tree_mgr'] = tree_mgr
    ctx.obj['aim_ctx'] = aim_ctx
Ejemplo n.º 8
0
 def setUp(self):
     super(TestServer, self).setUp()
     self.addCleanup(root.shutdown)
     self.set_override('port', 0, 'aim_server')
     self.ip, self.port, self.root = root.run(config.CONF)
     self.uri = 'http://%s:%s' % (self.ip, self.port)
     self.mgr = aim_manager.AimManager()
    def test_missing(self):
        depl = resource.VmmInjectedDeployment(
            **{
                'display_name': '',
                'name': 'kubedns',
                'replicas': 1,
                'domain_name': 'kube',
                'controller_name': 'kube',
                'domain_type': 'Kubernetes',
                'guid': 'a',
                'namespace_name': 'k'
            })
        ns = resource.VmmInjectedNamespace(
            **{
                'display_name': '',
                'name': 'k',
                'domain_name': 'kube',
                'controller_name': 'kube',
                'domain_type': 'Kubernetes'
            })
        updates = [depl, ns, ns]

        mgr = aim_manager.AimManager()

        tt_maker = tree_manager.AimHashTreeMaker()
        tt_builder = tree_manager.HashTreeBuilder(mgr)
        trees = {}

        exp_key = tt_maker._build_hash_tree_key(depl)

        for aim_res in updates:
            key = tt_maker.get_root_key(aim_res)
            if key and trees is not None:
                cfg = trees.setdefault(tt_builder.CONFIG, {}).setdefault(
                    key, tree.StructuredHashTree())
                mo = trees.setdefault(tt_builder.MONITOR,
                                      {}).setdefault(key,
                                                     tree.StructuredHashTree())
                oper = trees.setdefault(tt_builder.OPER, {}).setdefault(
                    key, tree.StructuredHashTree())

                tt_builder.build(
                    [aim_res], [], [], {
                        tt_builder.CONFIG: {
                            key: cfg
                        },
                        tt_builder.MONITOR: {
                            key: mo
                        },
                        tt_builder.OPER: {
                            key: oper
                        }
                    },
                    aim_ctx=self.ctx)

            if not isinstance(aim_res, resource.VmmInjectedDeployment):
                self.assertIsNotNone(cfg.find(exp_key),
                                     'Resource %s' % aim_res)
                self.assertIsNotNone(trees['config']['comp'].find(exp_key),
                                     'Resource %s' % aim_res)
Ejemplo n.º 10
0
def _catch_up_logs(self, added, updated, removed):
    # Create new session and populate the hashtrees
    session = api.get_session(autocommit=True, expire_on_commit=True,
                              use_slave=False)
    store = aim_store.SqlAlchemyStore(session)
    ht_db_l.HashTreeDbListener(
        aim_manager.AimManager()).catch_up_with_action_log(store)
Ejemplo n.º 11
0
    def __init__(self, conf):
        self.run_daemon_loop = True
        self.host = conf.aim.aim_service_identifier

        aim_ctx = context.AimContext(store=api.get_store())
        # This config manager is shared between multiple threads. Therefore
        # all DB activity through this config manager will use the same
        # DB session which can result in conflicts.
        # TODO(amitbose) Fix ConfigManager to not use cached AimContext
        self.conf_manager = aim_cfg.ConfigManager(aim_ctx, self.host)
        self.k8s_watcher = None
        self.single_aid = False
        if conf.aim.aim_store == 'k8s':
            self.single_aid = True
            self.k8s_watcher = k8s_watcher.K8sWatcher()
            self.k8s_watcher.run()

        self.multiverse = []
        # Define multiverse pairs, First position is desired state
        self.multiverse += [
            # Configuration Universe (AIM to ACI)
            {DESIRED: aim_universe.AimDbUniverse().initialize(
                self.conf_manager, self.multiverse),
             CURRENT: aci_universe.AciUniverse().initialize(
                 self.conf_manager, self.multiverse)},
            # Operational Universe (ACI to AIM)
            {DESIRED: aci_universe.AciOperationalUniverse().initialize(
                self.conf_manager, self.multiverse),
             CURRENT: aim_universe.AimDbOperationalUniverse().initialize(
                 self.conf_manager, self.multiverse)},
            # Monitored Universe (ACI to AIM)
            {DESIRED: aci_universe.AciMonitoredUniverse().initialize(
                self.conf_manager, self.multiverse),
             CURRENT: aim_universe.AimDbMonitoredUniverse().initialize(
                 self.conf_manager, self.multiverse)},
        ]
        # Operational Universes. ACI operational info will be synchronized into
        # AIM's
        self.manager = aim_manager.AimManager()
        self.tree_manager = tree_manager.HashTreeManager()
        self.agent_id = 'aid-%s' % self.host
        self.agent = resource.Agent(id=self.agent_id, agent_type=AGENT_TYPE,
                                    host=self.host, binary_file=AGENT_BINARY,
                                    description=AGENT_DESCRIPTION,
                                    version=AGENT_VERSION)
        # Register agent
        self.agent = self.manager.create(aim_ctx, self.agent, overwrite=True)
        # Report procedure should happen asynchronously
        self.polling_interval = self.conf_manager.get_option_and_subscribe(
            self._change_polling_interval, 'agent_polling_interval',
            group='aim')
        self.report_interval = self.conf_manager.get_option_and_subscribe(
            self._change_report_interval, 'agent_report_interval', group='aim')
        self.squash_time = self.conf_manager.get_option_and_subscribe(
            self._change_squash_time, 'agent_event_squash_time', group='aim')
        self._spawn_heartbeat_loop()
        self.events = event_handler.EventHandler().initialize(
            self.conf_manager)
        self.max_down_time = 4 * self.report_interval
Ejemplo n.º 12
0
 def __init__(self, config):
     self.cfg = config
     self.ctx = context.AimContext(store=api.get_store())
     self.mgr = aim_manager.AimManager()
     self.sneak_name_to_klass = {
         utils.camel_to_snake(x.__name__): x
         for x in self.mgr.aim_resources
     }
Ejemplo n.º 13
0
 def _initialize_hooks(self):
     self._hashtree_db_listener = ht_db_l.HashTreeDbListener(
         aim_manager.AimManager())
     self.register_update_listener('hashtree_db_listener_on_commit',
                                   self._hashtree_db_listener.on_commit)
     self.register_postcommit_listener(
         'tree_creation_postcommit',
         rpc.AIDEventRpcApi().tree_creation_postcommit)
 def setUp(self):
     super(TestNatStrategyBase, self).setUp()
     self.mgr = aim_manager.AimManager()
     self.ns = self.strategy(self.mgr)
     self.ns.app_profile_name = 'myapp'
     self.mgr.create(self.ctx, a_res.VMMPolicy(type='OpenStack'))
     self.mgr.create(self.ctx, a_res.VMMDomain(type='OpenStack',
                                               name='ostack'))
     self.mgr.create(self.ctx, a_res.PhysicalDomain(name='phys'))
Ejemplo n.º 15
0
class aimCrud(object):
    global aim_ctx, mgr
    aim_config.init(['--config-file', '/etc/aim/aim.conf'])
    session = db_api.get_session(expire_on_commit=True)

    aim_ctx = aim_context.AimContext(db_session=session)
    mgr = aim_manager.AimManager()

    def update_contract_subject(self, cont_subj, **kwargs):
        return "TBD"
Ejemplo n.º 16
0
    def initialize(self):
        LOG.info(_LI("APIC AIM MD initializing"))
        self.project_name_cache = cache.ProjectNameCache()
        self.db = model.DbModel()
        self.name_mapper = apic_mapper.APICNameMapper(self.db, log)
        self.aim = aim_manager.AimManager()

        # REVISIT(rkukura): Read from config or possibly from AIM?
        self.enable_dhcp_opt = True
        self.enable_metadata_opt = True

        self._setup_opflex_rpc_listeners()
 def test_agents_to_trees_association(self):
     # N, M association
     with self.ctx.store.begin(subtransactions=True):
         data = tree.StructuredHashTree().include([{
             'key': ('keyA', 'keyB')
         }, {
             'key': ('keyA', 'keyC')
         }, {
             'key': ('keyA', 'keyC', 'keyD')
         }])
         data2 = tree.StructuredHashTree().include([{
             'key': ('keyA1', 'keyB')
         }, {
             'key': ('keyA1', 'keyC')
         }, {
             'key': ('keyA1', 'keyC', 'keyD')
         }])
         data3 = tree.StructuredHashTree().include([{
             'key': ('keyA2', 'keyB')
         }, {
             'key': ('keyA2', 'keyC')
         }, {
             'key': ('keyA2', 'keyC', 'keyD')
         }])
         self.mgr.update_bulk(self.ctx, [data, data2, data3])
         agent1 = resource.Agent(agent_type='aid',
                                 host='host',
                                 binary_file='binary',
                                 hash_trees=['keyA', 'keyA1', 'keyA2'],
                                 version='1.0')
         agent2 = resource.Agent(agent_type='aid',
                                 host='host2',
                                 binary_file='binary',
                                 hash_trees=['keyA', 'keyA2'],
                                 version='1.0')
         agent1 = aim_manager.AimManager().create(self.ctx, agent1)
         agent2 = aim_manager.AimManager().create(self.ctx, agent2)
     self.assertEqual(set(['keyA', 'keyA1', 'keyA2']),
                      set(agent1.hash_trees))
     self.assertEqual(set(['keyA', 'keyA2']), set(agent2.hash_trees))
     # Empty agent2
     agent2 = aim_manager.AimManager().update(self.ctx,
                                              agent2,
                                              hash_trees=[])
     # Delete a tree
     self.mgr.delete(self.ctx, data)
     if self.ctx.store.supports_foreign_keys:
         agent1 = aim_manager.AimManager().get(self.ctx, agent1)
         self.assertEqual(set(['keyA1', 'keyA2']), set(agent1.hash_trees))
         self.assertEqual(set(), set(agent2.hash_trees))
         # Add rogue key
         self.assertRaises(exc.HashTreeNotFound,
                           aim_manager.AimManager().update,
                           self.ctx,
                           agent1,
                           hash_trees=['notakey'])
         # Verify agent1 was rolled back properly
         agent1 = aim_manager.AimManager().get(self.get_new_context(),
                                               agent1)
         self.assertEqual(set(['keyA1', 'keyA2']), set(agent1.hash_trees))
Ejemplo n.º 18
0
def get_apic_manager():
    apic_config = config.CONF.apic
    network_config = {
        'vlan_ranges': apic_config.vlan_ranges,
        'switch_dict': cfg.create_switch_dictionary(),
        'vpc_dict': cfg.create_vpc_dictionary(apic_config),
        'external_network_dict': cfg.create_external_network_dictionary(),
    }
    aim_ctx = context.AimContext(store=api.get_store())
    manager = aim_manager.AimManager()
    db = infra_model.HostLinkManager(aim_ctx, manager)
    apic_system_id = config.CONF.apic_system_id
    return apic_manager.APICManager(db, logging, network_config, apic_config,
                                    None, None, apic_system_id)
Ejemplo n.º 19
0
def upgrade(ctx, version):
    """Used for upgrading database."""
    version = version or 'head'
    ctx.obj['manager'].upgrade(version)

    # create common tenant
    aim_ctx = context.AimContext(store=api.get_store(expire_on_commit=True))
    aim_mgr = aim_manager.AimManager()
    common_tenant = resource.Tenant(name='common', monitored=True)
    if not aim_mgr.get(aim_ctx, common_tenant):
        aim_mgr.create(aim_ctx, common_tenant)

    fix_no_nat_l3out_ownership(aim_ctx)

    click.echo('Rebuilding hash-trees')
    _reset(aim_mgr)
Ejemplo n.º 20
0
    def setUp(self):
        # Enable the test mechanism driver to ensure that
        # we can successfully call through to all mechanism
        # driver apis.
        config.cfg.CONF.set_override('mechanism_drivers',
                                     ['logger', 'apic_aim'],
                                     'ml2')
        config.cfg.CONF.set_override('extension_drivers',
                                     ['apic_aim'],
                                     'ml2')
        config.cfg.CONF.set_override('type_drivers',
                                     ['opflex', 'local', 'vlan'],
                                     'ml2')
        config.cfg.CONF.set_override('tenant_network_types',
                                     ['opflex'],
                                     'ml2')
        config.cfg.CONF.set_override('network_vlan_ranges',
                                     ['physnet1:1000:1099'],
                                     group='ml2_type_vlan')

        service_plugins = {
            'L3_ROUTER_NAT':
            'gbpservice.neutron.services.apic_aim.l3_plugin.ApicL3Plugin'}

        super(ApicAimTestCase, self).setUp(PLUGIN_NAME,
                                           service_plugins=service_plugins)
        ext_mgr = extensions.PluginAwareExtensionManager.get_instance()
        self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr)
        self.port_create_status = 'DOWN'

        self.saved_keystone_client = ksc_client.Client
        ksc_client.Client = FakeKeystoneClient

        engine = db_api.get_engine()
        aim_model_base.Base.metadata.create_all(engine)

        self.plugin = manager.NeutronManager.get_plugin()
        self.plugin.start_rpc_listeners()
        self.driver = self.plugin.mechanism_manager.mech_drivers[
            'apic_aim'].obj
        self.l3_plugin = manager.NeutronManager.get_service_plugins()[
            service_constants.L3_ROUTER_NAT]
        self.aim_mgr = aim_manager.AimManager()
        self._app_profile_name = 'NeutronAP'
        self._tenant_name = self._map_name({'id': 'test-tenant',
                                            'name': 'TestTenantName'})
        self._unrouted_vrf_name = 'UnroutedVRF'
Ejemplo n.º 21
0
    def test_session_rollback(self):
        aim_mgr = aim_manager.AimManager()
        aim_mgr.create(self.ctx, resource.Tenant(name='t1'))
        ap = self._get_example_aci_app_profile(dn='uni/tn-t1/ap-a1')
        self.universe.context.store.begin = mock.Mock(
            side_effect=db_exc.DBError)
        rollback = self.universe.context.store.db_session.rollback
        self.called_count = 0

        def rollback_count():
            rollback()
            self.called_count += 1

        self.universe.context.store.db_session.rollback = rollback_count
        self.universe.push_resources({'create': [ap], 'delete': []})
        self.assertEqual(1, self.called_count)
        del self.called_count
Ejemplo n.º 22
0
 def initialize(self, conf_mgr, multiverse):
     super(HashTreeStoredUniverse, self).initialize(conf_mgr, multiverse)
     self.multiverse = multiverse
     self.manager = aim_manager.AimManager()
     self.conf_manager = conf_mgr
     self._state = {}
     self.max_create_retry = self.conf_manager.get_option(
         'max_operation_retry', 'aim')
     self.max_backoff_time = 600
     self.reset_retry_limit = 2 * self.max_create_retry
     self.purge_retry_limit = 2 * self.reset_retry_limit
     self.error_handlers = {
         errors.OPERATION_CRITICAL: self._surrender_operation,
         errors.SYSTEM_CRITICAL: self._fail_agent,
     }
     self._sync_log = {}
     return self
def upgrade():

    op.add_column(
        'aim_statuses',
        sa.Column('resource_root',
                  sa.String(64),
                  nullable=False,
                  server_default='|unknown|'))
    mgr = aim_manager.AimManager()
    ctx = context.AimContext(db_session=api.get_session(expire_on_commit=True))
    with ctx.db_session.begin(subtransactions=True):
        for st in mgr.find(ctx, status.AciStatus):
            # We are changing an identity attribute
            db_obj = mgr._query_db_obj(ctx.store, st)
            parent = mgr.get_by_id(ctx, st.parent_class, st.resource_id)
            db_obj.resource_root = parent.root
            ctx.db_session.add(db_obj)
Ejemplo n.º 24
0
def upgrade(ctx, version):
    """Used for upgrading database."""
    version = version or 'head'
    ctx.obj['manager'].upgrade(version)

    # create common tenant
    aim_ctx = context.AimContext(store=api.get_store(expire_on_commit=True))
    aim_mgr = aim_manager.AimManager()
    common_tenant = resource.Tenant(name='common', monitored=True)
    if not aim_mgr.get(aim_ctx, common_tenant):
        aim_mgr.create(aim_ctx, common_tenant)

    fix_no_nat_l3out_ownership(aim_ctx)

    click.echo('Rebuilding hash-trees')
    # reset hash-trees to account for schema/converter changes
    listener = hashtree_db_listener.HashTreeDbListener(aim_mgr)
    listener.reset(aim_ctx.store)
Ejemplo n.º 25
0
 def initialize(self):
     # TODO(ivar): SFC resource mapping to APIC DNs
     self._core_plugin = None
     self._flowc_plugin = None
     self._l3_plugin = None
     self._sfc_plugin = None
     self._aim_mech_driver = None
     self._aim_flowc_driver = None
     self.name_mapper = apic_mapper.APICNameMapper()
     self.aim = aim_manager.AimManager()
     # We don't care about deletion, that is managed by the database layer
     # (can't delete a flowclassifier if in use).
     for event in [events.PRECOMMIT_UPDATE, events.PRECOMMIT_CREATE]:
         registry.subscribe(self._handle_flow_classifier,
                            sfc_cts.GBP_FLOW_CLASSIFIER, event)
     registry.subscribe(self._handle_port_bound, sfc_cts.GBP_PORT,
                        events.PRECOMMIT_UPDATE)
     registry.subscribe(self._handle_net_gbp_change,
                        sfc_cts.GBP_NETWORK_EPG, events.PRECOMMIT_UPDATE)
     registry.subscribe(self._handle_net_gbp_change,
                        sfc_cts.GBP_NETWORK_VRF, events.PRECOMMIT_UPDATE)
Ejemplo n.º 26
0
    def __init__(self, ctx=None, *args, **kwargs):
        self.ctx = ctx or context.AimContext(store=api.get_store())
        if 'streaming' not in self.ctx.store.features:
            # TODO(ivar) raise something meaningful
            raise Exception
        self.mgr = aim_manager.AimManager()
        self.tt_mgr = tree_manager.HashTreeManager()
        self.tt_maker = tree_manager.AimHashTreeMaker()
        self.tt_builder = tree_manager.HashTreeBuilder(self.mgr)
        self.klient = self.ctx.store.klient
        self.namespace = self.ctx.store.namespace
        self.trees = {}
        self.q = queue.Queue()
        self.event_handler = event_handler.EventHandler
        self._stop = False
        self._http_resp = None
        # Tenants whose trees need to be saved in AIM
        self.affected_tenants = set()
        self._observe_thread_state = {}

        self._k8s_types_to_observe = set([])
        self._k8s_aim_type_map = {}
        self._k8s_kinds = set([])
        self._needs_init = True

        for aim_res in aim_manager.AimManager.aim_resources:
            if issubclass(aim_res, resource.AciResourceBase):
                k8s_type = self.ctx.store.resource_to_db_type(aim_res)
                for ktype in ([k8s_type] + k8s_type.aux_objects.values()):
                    self._k8s_types_to_observe.add(ktype)
                    self._k8s_kinds.add(ktype.kind)
                    if ktype != api_v1.AciContainersObject:
                        self._k8s_aim_type_map[ktype.kind] = (aim_res,
                                                              k8s_type)

        self._event_filters = {
            api_v1.Pod: self._pod_event_filter,
            api_v1.Endpoints: self._endpoints_event_filter
        }
    def test_single_session_multi_objects(self):
        with self.ctx.store.begin(subtransactions=True):
            data = tree.StructuredHashTree().include([{
                'key': ('keyA', 'keyB')
            }, {
                'key': ('keyA', 'keyC')
            }, {
                'key': ('keyA', 'keyC', 'keyD')
            }])
            self.mgr.update(self.ctx, data)
            agent = resource.Agent(id='test',
                                   agent_type='aid',
                                   host='host3',
                                   binary_file='binary',
                                   hash_trees=['keyA'],
                                   version='1.0')
            agent = aim_manager.AimManager().create(self.ctx, agent)

        # Creation worked
        self.assertEqual('test', agent.id)
        data2 = self.mgr.find(self.ctx, root_rn=['keyA'])[0]
        self.assertEqual(['keyA'], agent.hash_trees)
        self.assertEqual(data, data2)
    def test_sync_object_status(self):
        mgr = aim_manager.AimManager()
        epg = mgr.create(
            self.ctx,
            resource.EndpointGroup(tenant_name='test',
                                   app_profile_name='test',
                                   name='test',
                                   sync=False))
        status = mgr.get_status(self.ctx, epg)
        mgr.update(self.ctx, status, sync_status=status.SYNCED)
        tt_builder = tree_manager.HashTreeBuilder(mgr)
        trees = {}
        tt_maker = tree_manager.AimHashTreeMaker()
        key = tt_maker.get_root_key(epg)
        cfg = trees.setdefault(tt_builder.CONFIG,
                               {}).setdefault(key, tree.StructuredHashTree())
        mo = trees.setdefault(tt_builder.MONITOR,
                              {}).setdefault(key, tree.StructuredHashTree())
        oper = trees.setdefault(tt_builder.OPER,
                                {}).setdefault(key, tree.StructuredHashTree())

        tt_builder.build(
            [status], [], [], {
                tt_builder.CONFIG: {
                    key: cfg
                },
                tt_builder.MONITOR: {
                    key: mo
                },
                tt_builder.OPER: {
                    key: oper
                }
            },
            aim_ctx=self.ctx)
        # Should not add parent back
        exp_key = tt_maker._build_hash_tree_key(epg)
        self.assertIsNone(cfg.find(exp_key))
Ejemplo n.º 29
0
    def test_push_resources_service_graph(self):
        aim_mgr = aim_manager.AimManager()
        aim_mgr.create(self.ctx, resource.Tenant(name='t1'))

        def create_delete_object(aim_obj, aci_obj, aci_faults):
            # create object and faults
            to_create = [aci_obj]
            to_create.extend(aci_faults)
            self.universe.push_resources(self.ctx, {'create': to_create,
                                                    'delete': []})

            self.assertIsNotNone(aim_mgr.get(self.ctx, aim_obj))
            status = aim_mgr.get_status(self.ctx, aim_obj)
            self.assertEqual(len(aci_faults), len(status.faults))
            self.assertEqual(sorted([f['faultInst']['attributes']['code']
                                     for f in aci_faults]),
                             sorted([f.fault_code for f in status.faults]))

            # delete filter faults
            self.universe.push_resources(self.ctx, {'create': [],
                                                    'delete': status.faults})
            status = aim_mgr.get_status(self.ctx, aim_obj)
            self.assertEqual(0, len(status.faults))

        # Objects with alt_resource
        dc1_aci = {'vnsLDevVip':
                   {'attributes': {'dn': 'uni/tn-t1/lDevVip-cl2'}}}
        dc1_fault_objs = [
            self._get_example_aci_fault(
                dn='uni/tn-t1/lDevVip-cl2/fault-F1110',
                code='F1110'),
            self._get_example_aci_fault(
                dn='uni/tn-t1/lDevVip-cl2/lIf-interface/fault-F1111',
                code='F1111'),
            self._get_example_aci_fault(
                dn='uni/tn-t1/lDevVip-cl2/cDev-n2/cIf-[interface]/fault-F1112',
                code='F1112')]
        dc1 = aim_service_graph.DeviceCluster(tenant_name='t1', name='cl2')

        create_delete_object(dc1, dc1_aci, dc1_fault_objs)

        sg1_aci = {'vnsAbsGraph':
                   {'attributes': {'dn': 'uni/tn-t1/AbsGraph-gr2'}}}
        sg1_fault_objs = [
            self._get_example_aci_fault(
                dn='uni/tn-t1/AbsGraph-gr2/fault-F1110',
                code='F1110'),
            self._get_example_aci_fault(
                dn='uni/tn-t1/AbsGraph-gr2/AbsConnection-C1/fault-F1111',
                code='F1111'),
            self._get_example_aci_fault(
                dn='uni/tn-t1/AbsGraph-gr2/AbsNode-N1/fault-F1112',
                code='F1112')]
        sg1 = aim_service_graph.ServiceGraph(tenant_name='t1', name='gr2')

        srp1_aci = {'vnsSvcRedirectPol':
                    {'attributes':
                     {'dn': 'uni/tn-t1/svcCont/svcRedirectPol-r2'}}}
        srp1_fault_objs = [
            self._get_example_aci_fault(
                dn='uni/tn-t1/svcCont/svcRedirectPol-r2/fault-F1111',
                code='F1111'),
            self._get_example_aci_fault(
                dn=('uni/tn-t1/svcCont/svcRedirectPol-r2/'
                    'RedirectDest_ip-[10.6.1.1]/fault-F1112'),
                code='F1112')]
        srp1 = aim_service_graph.ServiceRedirectPolicy(tenant_name='t1',
                                                       name='r2')

        dcc1_aci = {'vnsLDevCtx':
                    {'attributes':
                     {'dn': 'uni/tn-t1/ldevCtx-c-c1-g-g1-n-N1'}}}
        dcc1_fault_objs = [
            self._get_example_aci_fault(
                dn='uni/tn-t1/ldevCtx-c-c1-g-g1-n-N1/fault-F1111',
                code='F1111'),
            self._get_example_aci_fault(
                dn=('uni/tn-t1/ldevCtx-c-c1-g-g1-n-N1/lIfCtx-c-consumer/'
                    'fault-F1112'),
                code='F1112')]
        dcc1 = aim_service_graph.DeviceClusterContext(tenant_name='t1',
                                                      contract_name='c1',
                                                      service_graph_name='g1',
                                                      node_name='N1')

        create_delete_object(dc1, dc1_aci, dc1_fault_objs)
        create_delete_object(sg1, sg1_aci, sg1_fault_objs)
        create_delete_object(srp1, srp1_aci, srp1_fault_objs)
        create_delete_object(dcc1, dcc1_aci, dcc1_fault_objs)
Ejemplo n.º 30
0
    def test_push_resources(self):
        aim_mgr = aim_manager.AimManager()
        aim_mgr.create(self.ctx, resource.Tenant(name='t1'))
        ap = self._get_example_aci_app_profile(dn='uni/tn-t1/ap-a1')
        ap_aim = resource.ApplicationProfile(tenant_name='t1', name='a1')
        epg = self._get_example_aci_epg(
            dn='uni/tn-t1/ap-a1/epg-test')
        fault = self._get_example_aci_fault(
            dn='uni/tn-t1/ap-a1/epg-test/fault-951')
        faul_aim = aim_status.AciFault(
            fault_code='951',
            external_identifier='uni/tn-t1/ap-a1/epg-test/fault-951')
        self.universe.push_resources(self.ctx, {'create': [ap, epg, fault],
                                                'delete': []})
        res = aim_mgr.get(self.ctx, resource.EndpointGroup(
            tenant_name='t1', app_profile_name='a1', name='test'))
        status = aim_mgr.get_status(self.ctx, res)
        self.assertEqual(1, len(status.faults))
        self.assertEqual('951', status.faults[0].fault_code)

        # Unset fault
        self.universe.push_resources(self.ctx, {'create': [],
                                                'delete': [faul_aim]})
        status = aim_mgr.get_status(self.ctx, res)
        self.assertEqual(0, len(status.faults))

        # create subject, and faults for subject-to-filter relation
        filter_objs = [
            {'vzBrCP': {'attributes': {'dn': 'uni/tn-t1/brc-c'}}},
            {'vzSubj': {'attributes': {'dn': 'uni/tn-t1/brc-c/subj-s2'}}},
            self._get_example_aci_fault(
                dn='uni/tn-t1/brc-c/subj-s2/intmnl/rsfiltAtt-f/fault-F1111',
                code='F1111'),
            self._get_example_aci_fault(
                dn='uni/tn-t1/brc-c/subj-s2/outtmnl/rsfiltAtt-g/fault-F1112',
                code='F1112'),
            self._get_example_aci_fault(
                dn='uni/tn-t1/brc-c/subj-s2/rssubjFiltAtt-h/fault-F1113',
                code='F1113')]
        self.universe.push_resources(self.ctx, {'create': filter_objs,
                                                'delete': []})
        subj = resource.ContractSubject(tenant_name='t1', contract_name='c',
                                        name='s2')
        status = aim_mgr.get_status(self.ctx, subj)
        self.assertEqual(2, len(status.faults))
        self.assertEqual(['F1111', 'F1112'],
                         [f.fault_code for f in status.faults])

        # delete filter faults
        self.universe.push_resources(self.ctx, {'create': [],
                                                'delete': status.faults})
        status = aim_mgr.get_status(self.ctx, subj)
        self.assertEqual(0, len(status.faults))
        # Managed epg
        managed_epg = resource.EndpointGroup(
            tenant_name='t1', app_profile_name='a1', name='managed')
        aim_mgr.create(self.ctx, managed_epg)
        # EPG cannot be deleted since is managed
        self.universe.push_resources(
            self.ctx, {'create': [], 'delete': [ap_aim, managed_epg]})
        res = aim_mgr.get(self.ctx, managed_epg)
        if self.monitor_universe:
            self.assertIsNotNone(res)
            aim_mgr.delete(self.ctx, managed_epg)
        else:
            self.assertIsNone(res)