def extend_address_scope_dict(self, session, base_model, result): LOG.debug("APIC AIM MD extending dict for address scope: %s", result) tenant_id = result['tenant_id'] tenant_aname = self.name_mapper.tenant(session, tenant_id) LOG.debug("Mapped tenant_id %(id)s to %(aname)s", { 'id': tenant_id, 'aname': tenant_aname }) id = result['id'] name = result['name'] aname = self.name_mapper.address_scope(session, id, name) LOG.debug( "Mapped address_scope_id %(id)s with name %(name)s to " "%(aname)s", { 'id': id, 'name': name, 'aname': aname }) vrf = aim_resource.VRF(tenant_name=tenant_aname, name=aname) aim_ctx = aim_context.AimContext(session) sync_state = cisco_apic.SYNC_SYNCED sync_state = self._merge_status(aim_ctx, sync_state, vrf) result[cisco_apic.DIST_NAMES] = {cisco_apic.VRF: vrf.dn} result[cisco_apic.SYNC_STATE] = sync_state
def update_router(self, context, current, original): LOG.debug("APIC AIM MD updating router: %s", current) if current['name'] != original['name']: session = context.session tenant_id = current['tenant_id'] tenant_aname = self.name_mapper.tenant(session, tenant_id) LOG.debug("Mapped tenant_id %(id)s to %(aname)s", { 'id': tenant_id, 'aname': tenant_aname }) id = current['id'] name = current['name'] aname = self.name_mapper.router(session, id, name) LOG.debug( "Mapped router_id %(id)s with name %(name)s to " "%(aname)s", { 'id': id, 'name': name, 'aname': aname }) dname = aim_utils.sanitize_display_name(name) aim_ctx = aim_context.AimContext(session) contract = aim_resource.Contract(tenant_name=tenant_aname, name=aname) contract = self.aim.update(aim_ctx, contract, display_name=dname) subject = aim_resource.ContractSubject(tenant_name=tenant_aname, contract_name=aname, name=ROUTER_SUBJECT_NAME) subject = self.aim.update(aim_ctx, subject, display_name=dname)
def update_address_scope_precommit(self, context): LOG.debug("APIC AIM MD updating address_scope: %s", context.current) if context.current['name'] != context.original['name']: session = context._plugin_context.session tenant_id = context.current['tenant_id'] tenant_aname = self.name_mapper.tenant(session, tenant_id) LOG.debug("Mapped tenant_id %(id)s to %(aname)s", { 'id': tenant_id, 'aname': tenant_aname }) id = context.current['id'] name = context.current['name'] aname = self.name_mapper.address_scope(session, id, name) LOG.debug( "Mapped address_scope_id %(id)s with name %(name)s to " "%(aname)s", { 'id': id, 'name': name, 'aname': aname }) dname = aim_utils.sanitize_display_name(name) aim_ctx = aim_context.AimContext(session) vrf = aim_resource.VRF(tenant_name=tenant_aname, name=aname) vrf = self.aim.update(aim_ctx, vrf, display_name=dname)
def delete_address_scope_precommit(self, context): LOG.debug("APIC AIM MD deleting address scope: %s", context.current) session = context._plugin_context.session tenant_id = context.current['tenant_id'] tenant_aname = self.name_mapper.tenant(session, tenant_id) LOG.debug("Mapped tenant_id %(id)s to %(aname)s", { 'id': tenant_id, 'aname': tenant_aname }) id = context.current['id'] name = context.current['name'] aname = self.name_mapper.address_scope(session, id, name) LOG.debug( "Mapped address_scope_id %(id)s with name %(name)s to " "%(aname)s", { 'id': id, 'name': name, 'aname': aname }) aim_ctx = aim_context.AimContext(session) vrf = aim_resource.VRF(tenant_name=tenant_aname, name=aname) self.aim.delete(aim_ctx, vrf) self.name_mapper.delete_apic_name(session, id)
def delete_network_precommit(self, context): LOG.debug("APIC AIM MD deleting network: %s", context.current) session = context._plugin_context.session tenant_id = context.current['tenant_id'] tenant_aname = self.name_mapper.tenant(session, tenant_id) LOG.debug("Mapped tenant_id %(id)s to %(aname)s", { 'id': tenant_id, 'aname': tenant_aname }) id = context.current['id'] name = context.current['name'] aname = self.name_mapper.network(session, id, name) LOG.debug("Mapped network_id %(id)s with name %(name)s to %(aname)s", { 'id': id, 'name': name, 'aname': aname }) aim_ctx = aim_context.AimContext(session) epg = aim_resource.EndpointGroup(tenant_name=tenant_aname, app_profile_name=AP_NAME, name=aname) self.aim.delete(aim_ctx, epg) bd = aim_resource.BridgeDomain(tenant_name=tenant_aname, name=aname) self.aim.delete(aim_ctx, bd) self.name_mapper.delete_apic_name(session, id)
def extend_network_dict(self, session, base_model, result): LOG.debug("APIC AIM MD extending dict for network: %s", result) tenant_id = result['tenant_id'] tenant_aname = self.name_mapper.tenant(session, tenant_id) LOG.debug("Mapped tenant_id %(id)s to %(aname)s", { 'id': tenant_id, 'aname': tenant_aname }) id = result['id'] name = result['name'] aname = self.name_mapper.network(session, id, name) LOG.debug("Mapped network_id %(id)s with name %(name)s to %(aname)s", { 'id': id, 'name': name, 'aname': aname }) bd = aim_resource.BridgeDomain(tenant_name=tenant_aname, name=aname) epg = aim_resource.EndpointGroup(tenant_name=tenant_aname, app_profile_name=AP_NAME, name=aname) aim_ctx = aim_context.AimContext(session) sync_state = cisco_apic.SYNC_SYNCED sync_state = self._merge_status(aim_ctx, sync_state, bd) sync_state = self._merge_status(aim_ctx, sync_state, epg) result[cisco_apic.DIST_NAMES] = { cisco_apic.BD: bd.dn, cisco_apic.EPG: epg.dn } result[cisco_apic.SYNC_STATE] = sync_state
def do_sg_rule_remote_group_id_insertion(session): alembic_util.msg("Starting remote_group_id insertion for SG rules.") aim = aim_manager.AimManager() aim_ctx = aim_context.AimContext(session) mapper = apic_mapper.APICNameMapper() with session.begin(subtransactions=True): sg_rule_dbs = (session.query(sg_models.SecurityGroupRule).options( lazyload('*')).all()) for sg_rule_db in sg_rule_dbs: if sg_rule_db.get('remote_group_id'): tenant_aname = mapper.project(session, sg_rule_db['tenant_id']) sg_rule_aim = aim_resource.SecurityGroupRule( tenant_name=tenant_aname, security_group_name=sg_rule_db['security_group_id'], security_group_subject_name='default', name=sg_rule_db['id']) sg_rule_aim = aim.get(aim_ctx, sg_rule_aim) # Validation tool will add the missing SG rules # if there is any. if sg_rule_aim: aim.update(aim_ctx, sg_rule_aim, remote_group_id=sg_rule_db['remote_group_id']) alembic_util.msg("Finished remote_group_id insertion for SG rules.")
def update_network_precommit(self, context): LOG.debug("APIC AIM MD updating network: %s", context.current) if context.current['name'] != context.original['name']: session = context._plugin_context.session tenant_id = context.current['tenant_id'] tenant_aname = self.name_mapper.tenant(session, tenant_id) LOG.debug("Mapped tenant_id %(id)s to %(aname)s", { 'id': tenant_id, 'aname': tenant_aname }) id = context.current['id'] name = context.current['name'] aname = self.name_mapper.network(session, id, name) LOG.debug( "Mapped network_id %(id)s with name %(name)s to " "%(aname)s", { 'id': id, 'name': name, 'aname': aname }) dname = aim_utils.sanitize_display_name(context.current['name']) aim_ctx = aim_context.AimContext(session) bd = aim_resource.BridgeDomain(tenant_name=tenant_aname, name=aname) bd = self.aim.update(aim_ctx, bd, display_name=dname) epg = aim_resource.EndpointGroup(tenant_name=tenant_aname, app_profile_name=AP_NAME, name=aname) epg = self.aim.update(aim_ctx, epg, display_name=dname)
def _heartbeat_loop(self): start_time = time.time() aim_ctx = context.AimContext(store=api.get_store()) self._send_heartbeat(aim_ctx) utils.wait_for_next_cycle(start_time, self.report_interval, LOG, readable_caller='AID-HB', notify_exceeding_timeout=False)
def upgrade(): # A model of the new domains table domainsv2 = op.create_table( AIM_HOST_DOMAIN_MAPPING_V2, sa.Column('host_name', sa.String(128)), sa.Column('domain_name', sa.String(64)), sa.Column('domain_type', sa.Enum('PhysDom', 'OpenStack', 'Kubernetes', 'VMware')), sa.PrimaryKeyConstraint('host_name', 'domain_name', 'domain_type') ) mgr = aim_manager.AimManager() ctx = context.AimContext(db_session=api.get_session(expire_on_commit=True)) with ctx.db_session.begin(subtransactions=True): migrations = [] for mapping in mgr.find(ctx, infra.HostDomainMapping): if mapping.vmm_domain_name: migrations.append({'host_name': mapping.host_name, 'domain_name': mapping.vmm_domain_name, 'domain_type': 'OpenStack'}) if mapping.physical_domain_name: migrations.append({'host_name': mapping.host_name, 'domain_name': mapping.physical_domain_name, 'domain_type': 'PhysDom'}) op.bulk_insert(domainsv2, migrations) # we can clear out the old table mgr.delete_all(ctx, infra.HostDomainMapping)
def initialize(self, store, conf_mgr, multiverse): super(HashTreeStoredUniverse, self).initialize(store, conf_mgr, multiverse) self.multiverse = multiverse self.context = context.AimContext(store=store) self.manager = aim_manager.AimManager() self.conf_manager = conf_mgr self._state = {} self.failure_log = {} self.max_create_retry = self.conf_manager.get_option( 'max_operation_retry', 'aim') # Don't increase retry value if at least retry_cooldown seconds have # passed self.retry_cooldown = self.conf_manager.get_option( 'retry_cooldown', 'aim') self.reset_retry_limit = 2 * self.max_create_retry self.purge_retry_limit = 2 * self.reset_retry_limit self.error_handlers = { errors.OPERATION_TRANSIENT: self._retry_until_max, errors.UNKNOWN: self._retry_until_max, errors.OPERATION_CRITICAL: self._surrender_operation, errors.SYSTEM_CRITICAL: self._fail_agent, } self._action_cache = {} return self
def hashtree(ctx): aim_ctx = context.AimContext(store=api.get_store(expire_on_commit=True)) tree_mgr = tree_manager.HashTreeManager() manager = aim_manager.AimManager() ctx.obj['manager'] = manager ctx.obj['tree_mgr'] = tree_mgr ctx.obj['aim_ctx'] = aim_ctx
def extend_router_dict(self, session, base_model, result): LOG.debug("APIC AIM MD extending dict for router: %s", result) tenant_id = result['tenant_id'] tenant_aname = self.name_mapper.tenant(session, tenant_id) LOG.debug("Mapped tenant_id %(id)s to %(aname)s", { 'id': tenant_id, 'aname': tenant_aname }) id = result['id'] name = result['name'] aname = self.name_mapper.router(session, id, name) LOG.debug("Mapped router_id %(id)s with name %(name)s to " "%(aname)s", { 'id': id, 'name': name, 'aname': aname }) contract = aim_resource.Contract(tenant_name=tenant_aname, name=aname) subject = aim_resource.ContractSubject(tenant_name=tenant_aname, contract_name=aname, name=ROUTER_SUBJECT_NAME) aim_ctx = aim_context.AimContext(session) sync_state = cisco_apic.SYNC_SYNCED sync_state = self._merge_status(aim_ctx, sync_state, contract) sync_state = self._merge_status(aim_ctx, sync_state, subject) result[cisco_apic.DIST_NAMES] = { cisco_apic_l3.CONTRACT: contract.dn, cisco_apic_l3.CONTRACT_SUBJECT: subject.dn } result[cisco_apic.SYNC_STATE] = sync_state
def delete_router(self, context, current): LOG.debug("APIC AIM MD deleting router: %s", current) session = context.session tenant_id = current['tenant_id'] tenant_aname = self.name_mapper.tenant(session, tenant_id) LOG.debug("Mapped tenant_id %(id)s to %(aname)s", { 'id': tenant_id, 'aname': tenant_aname }) id = current['id'] name = current['name'] aname = self.name_mapper.router(session, id, name) LOG.debug("Mapped router_id %(id)s with name %(name)s to " "%(aname)s", { 'id': id, 'name': name, 'aname': aname }) aim_ctx = aim_context.AimContext(session) subject = aim_resource.ContractSubject(tenant_name=tenant_aname, contract_name=aname, name=ROUTER_SUBJECT_NAME) self.aim.delete(aim_ctx, subject) contract = aim_resource.Contract(tenant_name=tenant_aname, name=aname) self.aim.delete(aim_ctx, contract) self.name_mapper.delete_apic_name(session, id)
def __init__(self, conf): self.run_daemon_loop = True self.host = conf.aim.aim_service_identifier aim_ctx = context.AimContext(store=api.get_store()) # This config manager is shared between multiple threads. Therefore # all DB activity through this config manager will use the same # DB session which can result in conflicts. # TODO(amitbose) Fix ConfigManager to not use cached AimContext self.conf_manager = aim_cfg.ConfigManager(aim_ctx, self.host) self.k8s_watcher = None self.single_aid = False if conf.aim.aim_store == 'k8s': self.single_aid = True self.k8s_watcher = k8s_watcher.K8sWatcher() self.k8s_watcher.run() self.multiverse = [] # Define multiverse pairs, First position is desired state self.multiverse += [ # Configuration Universe (AIM to ACI) {DESIRED: aim_universe.AimDbUniverse().initialize( self.conf_manager, self.multiverse), CURRENT: aci_universe.AciUniverse().initialize( self.conf_manager, self.multiverse)}, # Operational Universe (ACI to AIM) {DESIRED: aci_universe.AciOperationalUniverse().initialize( self.conf_manager, self.multiverse), CURRENT: aim_universe.AimDbOperationalUniverse().initialize( self.conf_manager, self.multiverse)}, # Monitored Universe (ACI to AIM) {DESIRED: aci_universe.AciMonitoredUniverse().initialize( self.conf_manager, self.multiverse), CURRENT: aim_universe.AimDbMonitoredUniverse().initialize( self.conf_manager, self.multiverse)}, ] # Operational Universes. ACI operational info will be synchronized into # AIM's self.manager = aim_manager.AimManager() self.tree_manager = tree_manager.HashTreeManager() self.agent_id = 'aid-%s' % self.host self.agent = resource.Agent(id=self.agent_id, agent_type=AGENT_TYPE, host=self.host, binary_file=AGENT_BINARY, description=AGENT_DESCRIPTION, version=AGENT_VERSION) # Register agent self.agent = self.manager.create(aim_ctx, self.agent, overwrite=True) # Report procedure should happen asynchronously self.polling_interval = self.conf_manager.get_option_and_subscribe( self._change_polling_interval, 'agent_polling_interval', group='aim') self.report_interval = self.conf_manager.get_option_and_subscribe( self._change_report_interval, 'agent_report_interval', group='aim') self.squash_time = self.conf_manager.get_option_and_subscribe( self._change_squash_time, 'agent_event_squash_time', group='aim') self._spawn_heartbeat_loop() self.events = event_handler.EventHandler().initialize( self.conf_manager) self.max_down_time = 4 * self.report_interval
def _get_flowc_consumer_group(self, plugin_context, flowc): aim_ctx = aim_context.AimContext(plugin_context.session) net = self._get_flowc_src_network(plugin_context, flowc) return self.aim.get( aim_ctx, self._map_flowc_network_group(plugin_context, net, flowc['source_ip_prefix'], flowc, FLOWC_SRC))
def tenant_creation_failed(self, aim_object, reason='unknown', error=errors.UNKNOWN): # New context, sessions are not thread safe. store = api.get_store() context = aim_ctx.AimContext(store=store) self.creation_failed(context, aim_object, reason=reason, error=error)
def __init__(self, conf): self.host = aim_cfg.CONF.aim.aim_service_identifier self.context = context.AimContext(store=api.get_store()) self.conf_manager = aim_cfg.ConfigManager(self.context, self.host) # TODO(ivar): heartbeat for these services? self.sender = event_handler.EventSender() self.sender.initialize(self.conf_manager) self.run_daemon_loop = True
def _get_flowc_provider_group(self, plugin_context, flowc): aim_ctx = aim_context.AimContext(plugin_context.session) net = self._get_flowc_dst_network(plugin_context, flowc) return self.aim.get( aim_ctx, self._map_flowc_network_group(plugin_context, net, flowc['destination_ip_prefix'], flowc, FLOWC_DST))
def __init__(self, config): self.cfg = config self.ctx = context.AimContext(store=api.get_store()) self.mgr = aim_manager.AimManager() self.sneak_name_to_klass = { utils.camel_to_snake(x.__name__): x for x in self.mgr.aim_resources }
def setUp(self, initialize_hooks=True): super(TestAimDBBase, self).setUp() self.test_id = uuidutils.generate_uuid() aim_cfg.OPTION_SUBSCRIBER_MANAGER = None aci_universe.ws_context = None if not os.environ.get(K8S_STORE_VENV): CONF.set_override('aim_store', 'sql', 'aim') self.engine = api.get_engine() if not TestAimDBBase._TABLES_ESTABLISHED: model_base.Base.metadata.create_all(self.engine) TestAimDBBase._TABLES_ESTABLISHED = True # Uncomment the line below to log SQL statements. Additionally, to # log results of queries, change INFO to DEBUG # # logging.getLogger('sqlalchemy.engine').setLevel(logging.DEBUG) def clear_tables(): with self.engine.begin() as conn: for table in reversed( model_base.Base.metadata.sorted_tables): conn.execute(table.delete()) self.addCleanup(clear_tables) self.old_add_commit_hook = ( aim_store.SqlAlchemyStore.add_commit_hook) aim_store.SqlAlchemyStore.add_commit_hook = _add_commit_hook def restore_commit_hook(): aim_store.SqlAlchemyStore.add_commit_hook = ( self.old_add_commit_hook) self.addCleanup(restore_commit_hook) aim_store.SqlAlchemyStore._after_transaction_end_2 = ( _after_transaction_end_2) else: CONF.set_override('aim_store', 'k8s', 'aim') CONF.set_override('k8s_namespace', self.test_id, 'aim_k8s') k8s_config_path = os.environ.get(K8S_CONFIG_ENV) if k8s_config_path: CONF.set_override('k8s_config_path', k8s_config_path, 'aim_k8s') aim_store.K8sStore._post_delete = _k8s_post_delete aim_store.K8sStore._post_create = _k8s_post_create global k8s_watcher_instance k8s_watcher_instance = k8s_watcher.K8sWatcher() k8s_watcher_instance.event_handler = mock.Mock() k8s_watcher_instance._renew_klient_watch = mock.Mock() self.addCleanup(self._cleanup_objects) self.store = api.get_store(expire_on_commit=True, initialize_hooks=initialize_hooks) self.ctx = context.AimContext(store=self.store) self.cfg_manager = aim_cfg.ConfigManager(self.ctx, '') self.tt_mgr = tree_manager.HashTreeManager() resource.ResourceBase.__eq__ = resource_equal self.cfg_manager.replace_all(CONF) self.sys_id = self.cfg_manager.get_option('aim_system_id', 'aim')
def _get_aim_contract(self, session, policy_rule_set): # This gets a Contract from the AIM DB aim_ctx = aim_context.AimContext(session) contract = self._aim_contract(session, policy_rule_set) contract_fetched = self.aim.get(aim_ctx, contract) if not contract_fetched: LOG.debug("No Contract found in AIM DB") else: LOG.debug("Got Contract: %s", contract_fetched.__dict__) return contract_fetched
def _get_aim_endpoint_group(self, session, ptg): # This gets an EPG from the AIM DB epg = self._aim_endpoint_group(session, ptg) aim_ctx = aim_context.AimContext(session) epg_fetched = self.aim.get(aim_ctx, epg) if not epg_fetched: LOG.debug("No EPG found in AIM DB") else: LOG.debug("Got epg: %s", epg_fetched.__dict__) return epg_fetched
def _get_tenant(self, tenant_name, should_exist=True): session = db_api.get_session() aim_ctx = aim_context.AimContext(session) tenant = aim_resource.Tenant(name=tenant_name) tenant = self.aim_mgr.get(aim_ctx, tenant) if should_exist: self.assertIsNotNone(tenant) else: self.assertIsNone(tenant) return tenant
def initialize(self, store, conf_mgr, multiverse): super(AimDbUniverse, self).initialize(store, conf_mgr, multiverse) self.tree_manager = tree_manager.HashTreeManager() self.context = context.AimContext(store=store) self._converter = converter.AciToAimModelConverter() self._converter_aim_to_aci = converter.AimToAciModelConverter() self._served_tenants = set() self._monitored_state_update_failures = 0 self._max_monitored_state_update_failures = 5 return self
class aimCrud(object): global aim_ctx, mgr aim_config.init(['--config-file', '/etc/aim/aim.conf']) session = db_api.get_session(expire_on_commit=True) aim_ctx = aim_context.AimContext(db_session=session) mgr = aim_manager.AimManager() def update_contract_subject(self, cont_subj, **kwargs): return "TBD"
def _add_implicit_svc_contracts_to_epg(self, context, l2p, aim_epg): session = context._plugin_context.session aim_ctx = aim_context.AimContext(session) implicit_contract_name = str(self.name_mapper.policy_rule_set( session, l2p['tenant_id'], l2p['tenant_id'], prefix=alib.IMPLICIT_PREFIX)) service_contract_name = str(self.name_mapper.policy_rule_set( session, l2p['tenant_id'], l2p['tenant_id'], prefix=alib.SERVICE_PREFIX)) self._add_contracts_for_epg(aim_ctx, aim_epg, consumed_contracts=[ implicit_contract_name, service_contract_name])
def _get_bd(self, bd_name, tenant_name, should_exist=True): session = db_api.get_session() aim_ctx = aim_context.AimContext(session) bd = aim_resource.BridgeDomain(tenant_name=tenant_name, name=bd_name) bd = self.aim_mgr.get(aim_ctx, bd) if should_exist: self.assertIsNotNone(bd) else: self.assertIsNone(bd) return bd
def _get_contract(self, contract_name, tenant_name, should_exist=True): session = db_api.get_session() aim_ctx = aim_context.AimContext(session) contract = aim_resource.Contract(tenant_name=tenant_name, name=contract_name) contract = self.aim_mgr.get(aim_ctx, contract) if should_exist: self.assertIsNotNone(contract) else: self.assertIsNone(contract) return contract
def _get_filter(self, filter_name, tenant_name, should_exist=True): session = db_api.get_session() aim_ctx = aim_context.AimContext(session) filter = aim_resource.Filter(tenant_name=tenant_name, name=filter_name) filter = self.aim_mgr.get(aim_ctx, filter) if should_exist: self.assertIsNotNone(filter) else: self.assertIsNone(filter) return filter