def do_sg_rule_remote_group_id_insertion(session): alembic_util.msg("Starting remote_group_id insertion for SG rules.") aim = aim_manager.AimManager() aim_ctx = aim_context.AimContext(session) mapper = apic_mapper.APICNameMapper() with session.begin(subtransactions=True): sg_rule_dbs = (session.query(sg_models.SecurityGroupRule).options( lazyload('*')).all()) for sg_rule_db in sg_rule_dbs: if sg_rule_db.get('remote_group_id'): tenant_aname = mapper.project(session, sg_rule_db['tenant_id']) sg_rule_aim = aim_resource.SecurityGroupRule( tenant_name=tenant_aname, security_group_name=sg_rule_db['security_group_id'], security_group_subject_name='default', name=sg_rule_db['id']) sg_rule_aim = aim.get(aim_ctx, sg_rule_aim) # Validation tool will add the missing SG rules # if there is any. if sg_rule_aim: aim.update(aim_ctx, sg_rule_aim, remote_group_id=sg_rule_db['remote_group_id']) alembic_util.msg("Finished remote_group_id insertion for SG rules.")
def upgrade(): url = context.config.powervc_config.DATABASE.connection engine = session.create_engine(url) # In previous release, we do not use alembic or any other migration, # as we need to support migration case, we need to check if the table # exists or not if engine.dialect.has_table(engine.connect(), tablename): alembic_util.msg("table has been already exists!") return op.create_table( tablename, sa.Column('id', sa.String(36), primary_key=True, default=uuidutils.generate_uuid), sa.Column('obj_type', sa.Enum(constants.OBJ_TYPE_NETWORK, constants.OBJ_TYPE_SUBNET, constants.OBJ_TYPE_PORT, name='mapping_object_type'), nullable=False), sa.Column('status', sa.Enum(constants.STATUS_CREATING, constants.STATUS_ACTIVE, constants.STATUS_DELETING, name='mapping_state'), nullable=False), sa.Column('sync_key', sa.String(255), nullable=False), sa.Column('local_id', sa.String(36)), sa.Column('pvc_id', sa.String(36)), sa.Column('update_data', sa.String(512)) )
def do_alembic_command(config, cmd, *args, **kwargs): project = config.get_main_option("neutron_project") alembic_util.msg(_("Running %(cmd)s for %(project)s ...") % {"cmd": cmd, "project": project}) try: getattr(alembic_command, cmd)(config, *args, **kwargs) except alembic_util.CommandError as e: alembic_util.err(six.text_type(e)) alembic_util.msg(_("OK"))
def do_alembic_command(config, cmd, *args, **kwargs): project = config.get_main_option('neutron_project') alembic_util.msg(_('Running %(cmd)s for %(project)s ...') % {'cmd': cmd, 'project': project}) try: getattr(alembic_command, cmd)(config, *args, **kwargs) except alembic_util.CommandError as e: alembic_util.err(six.text_type(e)) alembic_util.msg(_('OK'))
def do_alembic_command(config, cmd, *args, **kwargs): project = config.get_main_option('neutron_project') alembic_util.msg( _('Running %(cmd)s for %(project)s ...') % { 'cmd': cmd, 'project': project }) try: getattr(alembic_command, cmd)(config, *args, **kwargs) except alembic_util.CommandError as e: alembic_util.err(six.text_type(e)) alembic_util.msg(_('OK'))
def main(): CONF(project='neutron') validate_cli_options() return_val = False for config in get_alembic_configs(): #TODO(gongysh) enable logging return_val |= bool(CONF.command.func(config, CONF.command.name)) if CONF.command.name == 'has_offline_migrations' and not return_val: alembic_util.msg(_('No offline migrations pending.')) return return_val
def init(): """ Prepare directory with alembic.ini, mako-files and directory for migrations. Part of functional was copied from original Alembic Init but changed some things with config """ if os.access(config.alembic_dir, os.F_OK): raise util.CommandError("Directory {} already exists".format( config.alembic_dir)) template_dir = os.path.join(config.template_path + config.template_name) if not os.access(template_dir, os.F_OK): raise util.CommandError("No such template {}".format(template_dir)) util.status( "Creating directory {}".format(os.path.abspath( config.alembic_dir)), os.makedirs, config.alembic_dir) versions = os.path.join(config.alembic_dir, 'versions') util.status("Creating directory %s" % os.path.abspath(versions), os.makedirs, versions) script = ScriptDirectory(config.alembic_dir) dirs = os.listdir(template_dir) dirs += [ 'versions/create_table_alembic_version_history.py', ] for file_ in dirs: file_path = os.path.join(template_dir, file_) if file_ == 'alembic.ini.mako': config_file = os.path.abspath('alembic.ini') if os.access(config_file, os.F_OK): util.msg( "File {} already exists, skipping".format(config_file)) else: script._generate_template( template_dir + '/alembic.ini.mako', os.path.join(config.alembic_dir, 'alembic.ini'), script_location=config.alembic_dir) elif os.path.isfile(file_path): output_file = os.path.join(config.alembic_dir, file_) script._copy_file(file_path, output_file) util.msg("Please edit configuration/connection/logging " "settings in {} before proceeding.".format( os.path.join(config.alembic_dir, 'alembic.ini')))
def has_offline_migrations(config, cmd): heads_map = _get_heads_map(config) if heads_map[CONTRACT_BRANCH] not in _get_current_database_heads(config): # If there is at least one contract revision not applied to database, # it means we should shut down all neutron-server instances before # proceeding with upgrade. project = config.get_main_option('neutron_project') alembic_util.msg( _('Need to apply migrations from %(project)s ' 'contract branch. This will require all Neutron ' 'server instances to be shutdown before ' 'proceeding with the upgrade.') % {"project": project}) return True return False
def has_offline_migrations(config, cmd): heads_map = _get_heads_map(config) if heads_map[CONTRACT_BRANCH] not in _get_current_database_heads(config): # If there is at least one contract revision not applied to database, # it means we should shut down all neutron-server instances before # proceeding with upgrade. project = config.get_main_option('neutron_project') alembic_util.msg(_('Need to apply migrations from %(project)s ' 'contract branch. This will require all Neutron ' 'server instances to be shutdown before ' 'proceeding with the upgrade.') % {"project": project}) return True return False
def main(): # Interpret the config file for Python logging. # This line sets up loggers basically. logging_config.fileConfig(neutron_alembic_ini) CONF(project='neutron') return_val = False for config in get_alembic_configs(): #TODO(gongysh) enable logging return_val |= bool(CONF.command.func(config, CONF.command.name)) if CONF.command.name == 'has_offline_migrations' and not return_val: alembic_util.msg(_('No offline migrations pending.')) return return_val
def merge(self): """ Perform merging of migrations if exists more then one head """ revision_heads = [head for head in self.heads] if len(revision_heads) < 2: util.msg('There are not migrations for merge') else: print('\n\n-------------------------------------------------') merge_choices = [ choice for choice in self.__merge_choices__(revision_heads) ] for choice in merge_choices: rev_1 = choice['migration1'] rev_2 = choice['migration2'] rev_1_branch = self.__branch_name__(rev_1) rev_2_branch = self.__branch_name__(rev_2) util.msg('{}) {}:{} -> {}:{}'.format(choice["inc"], rev_1_branch, rev_1, rev_2_branch, rev_2)) util.msg('-------------------------------------------------\n\n') choice = input('Choose migration:\n') try: choice = int(choice) if not (1 <= choice <= len(merge_choices)): util.msg('Input value must be between 1 and {}'.format( len(merge_choices))) exit(0) rev_1 = merge_choices[choice - 1]['migration1'] rev_2 = merge_choices[choice - 1]['migration2'] command.merge(self.init_config, revisions=[rev_2.revision, rev_1.revision], message='merge_{}({})_into_{}({})'.format( rev_1, rev_1.branch_labels, rev_2, rev_2.branch_labels)) except ValueError: util.msg('Your choice must be of int data type')
def create(self, name): """ Create new migration, but if exists more then one head you must merge it firstly :param name: name of new migration """ r_heads = [head for head in self.heads] if len(r_heads) < 2: self.__set_branch_to_script__() command.revision(self.init_config, name) else: util.msg('There are {} heads.\n' 'You must merge migrations first.'.format(len(r_heads))) self.merge()
def upgrade(): session = sa.orm.Session(bind=op.get_bind(), autocommit=True) with session.begin(subtransactions=True): # Migrate networks. net_dbs = (session.query(models_v2.Network).options( lazyload('*')).all()) for net_db in net_dbs: # Skip applying this update if using a legacy plugin if not hasattr(net_db, 'aim_extension_mapping') or ( net_db.aim_extension_mapping is None): continue util.msg("Migrating network: %s" % net_db) # If this update is successful then it means its an external # network with its DN set. res = session.execute(NetworkExtensionDb.update().values( svi=False).where(NetworkExtensionDb.c.network_id == net_db.id)) if res.rowcount == 0: session.execute(NetworkExtensionDb.insert().values( network_id=net_db.id, svi=False))
def upgrade(): session = sa.orm.Session(bind=op.get_bind(), autocommit=True) with session.begin(subtransactions=True): # Migrate networks. net_dbs = (session.query(models_v2.Network) .options(lazyload('*')).all()) for net_db in net_dbs: # Skip applying this update if using a legacy plugin if not hasattr(net_db, 'aim_extension_mapping') or ( net_db.aim_extension_mapping is None): continue util.msg("Migrating network: %s" % net_db) # If this update is successful then it means its an external # network with its DN set. res = session.execute(NetworkExtensionDb.update().values( svi=False).where(NetworkExtensionDb.c.network_id == net_db.id)) if res.rowcount == 0: session.execute(NetworkExtensionDb.insert().values( network_id=net_db.id, svi=False))
def init(config, directory, template='generic'): """Initialize a new scripts directory.""" if os.access(directory, os.F_OK): raise util.CommandError("Directory %s already exists" % directory) template_dir = os.path.join(config.get_template_directory(), template) if not os.access(template_dir, os.F_OK): raise util.CommandError("No such template %r" % template) util.status("Creating directory %s" % os.path.abspath(directory), os.makedirs, directory) versions = os.path.join(directory, 'versions') util.status("Creating directory %s" % os.path.abspath(versions), os.makedirs, versions) script = ScriptDirectory(directory) for file_ in os.listdir(template_dir): file_path = os.path.join(template_dir, file_) if file_ == 'alembic.ini.mako': config_file = os.path.abspath(config.config_file_name) if os.access(config_file, os.F_OK): util.msg("File %s already exists, skipping" % config_file) else: script._generate_template( file_path, config_file, script_location=directory ) elif os.path.isfile(file_path): output_file = os.path.join(directory, file_) script._copy_file( file_path, output_file ) util.msg("Please edit configuration/connection/logging "\ "settings in %r before proceeding." % config_file)
def do_alembic_command(config, cmd, revision=None, desc=None, **kwargs): args = [] if revision: args.append(revision) project = config.get_main_option('neutron_project') if desc: alembic_util.msg(_('Running %(cmd)s (%(desc)s) for %(project)s ...') % {'cmd': cmd, 'desc': desc, 'project': project}) else: alembic_util.msg(_('Running %(cmd)s for %(project)s ...') % {'cmd': cmd, 'project': project}) try: getattr(alembic_command, cmd)(config, *args, **kwargs) except alembic_util.CommandError as e: alembic_util.err(six.text_type(e)) alembic_util.msg(_('OK'))
def do_alembic_command(config, cmd, revision=None, desc=None, **kwargs): args = [] if revision: args.append(revision) if desc: alembic_util.msg('Running %(cmd)s (%(desc)s)...' % { 'cmd': cmd, 'desc': desc }) else: alembic_util.msg('Running %(cmd)s ...' % {'cmd': cmd}) try: getattr(alembic_command, cmd)(config, *args, **kwargs) except alembic_util.CommandError as e: alembic_util.err(six.text_type(e)) alembic_util.msg('OK')
def do_alembic_command(config, cmd, revision=None, desc=None, **kwargs): project = config.get_main_option('neutron_project') args = [] if revision: # We use unique branch labels from Newton onwards. if revision.split('@')[0] in MIGRATION_BRANCHES: revision = '-'.join([project, revision]) args.append(revision) if desc: alembic_util.msg(_('Running %(cmd)s (%(desc)s) for %(project)s ...') % {'cmd': cmd, 'desc': desc, 'project': project}) else: alembic_util.msg(_('Running %(cmd)s for %(project)s ...') % {'cmd': cmd, 'project': project}) try: getattr(alembic_command, cmd)(config, *args, **kwargs) except alembic_util.CommandError as e: alembic_util.err(six.text_type(e)) alembic_util.msg(_('OK'))
def do_apic_aim_persist_migration(session): alembic_util.msg( "Starting data migration for apic_aim mechanism driver persistence.") aim = aim_manager.AimManager() aim_ctx = aim_context.AimContext(session) mapper = apic_mapper.APICNameMapper() with session.begin(subtransactions=True): # Migrate address scopes. scope_dbs = (session.query(as_db.AddressScope) .options(lazyload('*')).all()) for scope_db in scope_dbs: alembic_util.msg("Migrating address scope: %s" % scope_db) vrf = None ext_db = (session.query(DefunctAddressScopeExtensionDb). filter_by(address_scope_id=scope_db.id). one_or_none()) if ext_db: # It has a pre-existing VRF. vrf = aim_resource.VRF.from_dn(ext_db.vrf_dn) # REVISIT: Get VRF to verify it exists? vrf_owned = False if not vrf: # It does not have a pre-existing VRF. aname = mapper.address_scope(session, scope_db.id) vrfs = aim.find( aim_ctx, aim_resource.VRF, name=aname) if vrfs: vrf = vrfs[0] vrf_owned = True if vrf: _add_address_scope_mapping( session, scope_db.id, vrf, vrf_owned) else: alembic_util.warn( "No AIM VRF found for address scope: %s" % scope_db) # Migrate networks. net_dbs = (session.query(models_v2.Network) .options(lazyload('*')).all()) for net_db in net_dbs: alembic_util.msg("Migrating network: %s" % net_db) bd = None epg = None vrf = None ext_db = (session.query(NetworkExtensionDb). filter_by(network_id=net_db.id). one_or_none()) if ext_db and ext_db.external_network_dn: # Its a managed external network. ext_net = aim_resource.ExternalNetwork.from_dn( ext_db.external_network_dn) # REVISIT: Get ExternalNetwork to verify it exists? l3out = aim_resource.L3Outside( tenant_name=ext_net.tenant_name, name=ext_net.l3out_name) if ext_db.nat_type == '': ns_cls = nat_strategy.NoNatStrategy elif ext_db.nat_type == 'edge': ns_cls = nat_strategy.EdgeNatStrategy else: ns_cls = nat_strategy.DistributedNatStrategy ns = ns_cls(aim) ns.app_profile_name = 'OpenStack' for resource in ns.get_l3outside_resources(aim_ctx, l3out): if isinstance(resource, aim_resource.BridgeDomain): bd = resource elif isinstance(resource, aim_resource.EndpointGroup): epg = resource elif isinstance(resource, aim_resource.VRF): vrf = resource if not bd: # It must be a normal network. aname = mapper.network(session, net_db.id) bds = aim.find( aim_ctx, aim_resource.BridgeDomain, name=aname) if bds: bd = bds[0] epgs = aim.find( aim_ctx, aim_resource.EndpointGroup, name=aname) if epgs: epg = epgs[0] if bd: vrfs = ( aim.find( aim_ctx, aim_resource.VRF, tenant_name=bd.tenant_name, name=bd.vrf_name) or aim.find( aim_ctx, aim_resource.VRF, tenant_name='common', name=bd.vrf_name)) if vrfs: vrf = vrfs[0] if bd and epg and vrf: _add_network_mapping(session, net_db.id, bd, epg, vrf) elif not net_db.external: alembic_util.warn( "AIM BD, EPG or VRF not found for network: %s" % net_db) alembic_util.msg( "Finished data migration for apic_aim mechanism driver persistence.")
def do_ap_name_change(session, conf=None): alembic_util.msg("Starting data migration for apic_aim ap name change.") cfg = conf or CONF aim = aim_manager.AimManager() aim_ctx = aim_context.AimContext(session) system_id = cfg.apic_system_id alembic_util.msg("APIC System ID: %s" % system_id) with session.begin(subtransactions=True): net_dbs = session.query(models_v2.Network).options(lazyload('*')).all() for net_db in net_dbs: ext_db = _get_network_extn_db(session, net_db.id) if ext_db and ext_db.get(ext.EXTERNAL_NETWORK): alembic_util.msg("Migrating external network: %s" % net_db) # Its a managed external network. ext_net = aim_resource.ExternalNetwork.from_dn( ext_db[ext.EXTERNAL_NETWORK]) ext_net = aim.get(aim_ctx, ext_net) l3out = aim_resource.L3Outside(tenant_name=ext_net.tenant_name, name=ext_net.l3out_name) if ext_db[ext.NAT_TYPE] == '': ns_cls = nat_strategy.NoNatStrategy elif ext_db[ext.NAT_TYPE] == 'edge': ns_cls = nat_strategy.EdgeNatStrategy else: ns_cls = nat_strategy.DistributedNatStrategy clone_ext_nets = {} ns = ns_cls(aim) ns.app_profile_name = 'OpenStack' ns.common_scope = None # Start Cleanup if not isinstance(ns, nat_strategy.NoNatStrategy): l3out_clones = ns.db.get_clones(aim_ctx, l3out) # Retrieve External Networks for l3out_clone in l3out_clones: for extc in aim.find( aim_ctx, aim_resource.ExternalNetwork, tenant_name=l3out_clone[0], l3out_name=l3out_clone[1]): clone_ext_nets[(l3out.tenant_name, l3out.name, extc.name)] = extc vrfs = ns.read_vrfs(aim_ctx, ext_net) session.execute(NetworkMapping.delete().where( NetworkMapping.c.network_id == net_db.id)) for vrf in vrfs: ns.disconnect_vrf(aim_ctx, ext_net, vrf) ns.delete_external_network(aim_ctx, ext_net) ns.delete_l3outside(aim_ctx, l3out) # Recreate ns.common_scope = system_id ns.create_l3outside(aim_ctx, l3out) ns.create_external_network(aim_ctx, ext_net) ns.update_external_cidrs(aim_ctx, ext_net, ext_db[ext.EXTERNAL_CIDRS]) for subnet in net_db.subnets: aim_subnet = aim_resource.Subnet.to_gw_ip_mask( subnet.gateway_ip, int(subnet.cidr.split('/')[1])) ns.create_subnet(aim_ctx, l3out, aim_subnet) for resource in ns.get_l3outside_resources(aim_ctx, l3out): if isinstance(resource, aim_resource.BridgeDomain): bd = resource elif isinstance(resource, aim_resource.EndpointGroup): epg = resource elif isinstance(resource, aim_resource.VRF): vrf = resource _add_network_mapping(session, net_db.id, bd, epg, vrf) eid = (ext_net.tenant_name, ext_net.l3out_name, ext_net.name) for vrf in vrfs: if eid in clone_ext_nets: ext_net.provided_contract_names = clone_ext_nets[ eid].provided_contract_names ext_net.consumed_contract_names = clone_ext_nets[ eid].consumed_contract_names ns.connect_vrf(aim_ctx, ext_net, vrf)
def do_apic_aim_persist_migration(session): alembic_util.msg( "Starting data migration for apic_aim mechanism driver persistence.") db_mixin = db.DbMixin() aim = aim_manager.AimManager() aim_ctx = aim_context.AimContext(session) mapper = apic_mapper.APICNameMapper() with session.begin(subtransactions=True): # Migrate address scopes. scope_dbs = (session.query(as_db.AddressScope).all()) for scope_db in scope_dbs: alembic_util.msg("Migrating address scope: %s" % scope_db) vrf = None ext_db = (session.query(DefunctAddressScopeExtensionDb).filter_by( address_scope_id=scope_db.id).one_or_none()) if ext_db: # It has a pre-existing VRF. vrf = aim_resource.VRF.from_dn(ext_db.vrf_dn) # REVISIT: Get VRF to verify it exists? vrf_owned = False if not vrf: # It does not have a pre-existing VRF. aname = mapper.address_scope(session, scope_db.id) vrfs = aim.find(aim_ctx, aim_resource.VRF, name=aname) if vrfs: vrf = vrfs[0] vrf_owned = True if vrf: db_mixin._add_address_scope_mapping(session, scope_db.id, vrf, vrf_owned) else: alembic_util.warn("No AIM VRF found for address scope: %s" % scope_db) # Migrate networks. net_dbs = (session.query(models_v2.Network).all()) for net_db in net_dbs: alembic_util.msg("Migrating network: %s" % net_db) bd = None epg = None vrf = None ext_db = (session.query(extension_db.NetworkExtensionDb).filter_by( network_id=net_db.id).one_or_none()) if ext_db and ext_db.external_network_dn: # Its a managed external network. ext_net = aim_resource.ExternalNetwork.from_dn( ext_db.external_network_dn) # REVISIT: Get ExternalNetwork to verify it exists? l3out = aim_resource.L3Outside(tenant_name=ext_net.tenant_name, name=ext_net.l3out_name) if ext_db.nat_type == '': ns_cls = nat_strategy.NoNatStrategy elif ext_db.nat_type == 'edge': ns_cls = nat_strategy.EdgeNatStrategy else: ns_cls = nat_strategy.DistributedNatStrategy ns = ns_cls(aim) ns.app_profile_name = 'OpenStack' for resource in ns.get_l3outside_resources(aim_ctx, l3out): if isinstance(resource, aim_resource.BridgeDomain): bd = resource elif isinstance(resource, aim_resource.EndpointGroup): epg = resource elif isinstance(resource, aim_resource.VRF): vrf = resource if not bd: # It must be a normal network. aname = mapper.network(session, net_db.id) bds = aim.find(aim_ctx, aim_resource.BridgeDomain, name=aname) if bds: bd = bds[0] epgs = aim.find(aim_ctx, aim_resource.EndpointGroup, name=aname) if epgs: epg = epgs[0] if bd: vrfs = (aim.find(aim_ctx, aim_resource.VRF, tenant_name=bd.tenant_name, name=bd.vrf_name) or aim.find(aim_ctx, aim_resource.VRF, tenant_name='common', name=bd.vrf_name)) if vrfs: vrf = vrfs[0] if bd and epg and vrf: db_mixin._add_network_mapping(session, net_db.id, bd, epg, vrf) elif not net_db.external: alembic_util.warn( "AIM BD, EPG or VRF not found for network: %s" % net_db) alembic_util.msg( "Finished data migration for apic_aim mechanism driver persistence.")
def do_ap_name_change(session, conf=None): alembic_util.msg("Starting data migration for apic_aim ap name change.") cfg = conf or CONF aim = aim_manager.AimManager() aim_ctx = aim_context.AimContext(session) system_id = cfg.apic_system_id alembic_util.msg("APIC System ID: %s" % system_id) ext_mixin = extension_db.ExtensionDbMixin() db_mixin = db.DbMixin() with session.begin(subtransactions=True): net_dbs = session.query(models_v2.Network).all() for net_db in net_dbs: ext_db = ext_mixin.get_network_extn_db(session, net_db.id) if ext_db and ext_db[ext.EXTERNAL_NETWORK]: alembic_util.msg("Migrating external network: %s" % net_db) # Its a managed external network. ext_net = aim_resource.ExternalNetwork.from_dn( ext_db[ext.EXTERNAL_NETWORK]) ext_net = aim.get(aim_ctx, ext_net) l3out = aim_resource.L3Outside(tenant_name=ext_net.tenant_name, name=ext_net.l3out_name) if ext_db[ext.NAT_TYPE] == '': ns_cls = nat_strategy.NoNatStrategy elif ext_db[ext.NAT_TYPE] == 'edge': ns_cls = nat_strategy.EdgeNatStrategy else: ns_cls = nat_strategy.DistributedNatStrategy clone_ext_nets = {} ns = ns_cls(aim) ns.app_profile_name = 'OpenStack' ns.common_scope = None # Start Cleanup if not isinstance(ns, nat_strategy.NoNatStrategy): l3out_clones = ns.db.get_clones(aim_ctx, l3out) # Retrieve External Networks for l3out_clone in l3out_clones: for extc in aim.find(aim_ctx, aim_resource.ExternalNetwork, tenant_name=l3out_clone[0], l3out_name=l3out_clone[1]): clone_ext_nets[(l3out.tenant_name, l3out.name, extc.name)] = extc vrfs = ns.read_vrfs(aim_ctx, ext_net) session.query(db.NetworkMapping).filter( db.NetworkMapping.network_id == net_db.id).delete() for vrf in vrfs: ns.disconnect_vrf(aim_ctx, ext_net, vrf) ns.delete_external_network(aim_ctx, ext_net) ns.delete_l3outside(aim_ctx, l3out) # Recreate ns.common_scope = system_id ns.create_l3outside(aim_ctx, l3out) ns.create_external_network(aim_ctx, ext_net) ns.update_external_cidrs(aim_ctx, ext_net, ext_db[ext.EXTERNAL_CIDRS]) for subnet in net_db.subnets: aim_subnet = aim_resource.Subnet.to_gw_ip_mask( subnet.gateway_ip, int(subnet.cidr.split('/')[1])) ns.create_subnet(aim_ctx, l3out, aim_subnet) for resource in ns.get_l3outside_resources(aim_ctx, l3out): if isinstance(resource, aim_resource.BridgeDomain): bd = resource elif isinstance(resource, aim_resource.EndpointGroup): epg = resource elif isinstance(resource, aim_resource.VRF): vrf = resource db_mixin._add_network_mapping(session, net_db.id, bd, epg, vrf) eid = (ext_net.tenant_name, ext_net.l3out_name, ext_net.name) for vrf in vrfs: if eid in clone_ext_nets: ext_net.provided_contract_names = clone_ext_nets[ eid].provided_contract_names ext_net.consumed_contract_names = clone_ext_nets[ eid].consumed_contract_names ns.connect_vrf(aim_ctx, ext_net, vrf)
def do_apic_aim_security_group_migration(session): alembic_util.msg( "Starting data migration for SGs and its rules.") aim = aim_manager.AimManager() aim_ctx = aim_context.AimContext(session) mapper = apic_mapper.APICNameMapper() with session.begin(subtransactions=True): # Migrate SG. sg_dbs = (session.query(sg_models.SecurityGroup). options(lazyload('*')).all()) for sg_db in sg_dbs: alembic_util.msg("Migrating SG: %s" % sg_db) tenant_aname = mapper.project(session, sg_db['tenant_id']) sg_aim = aim_resource.SecurityGroup( tenant_name=tenant_aname, name=sg_db['id'], display_name=aim_utils.sanitize_display_name(sg_db['name'])) aim.create(aim_ctx, sg_aim, overwrite=True) # Always create this default subject sg_subject = aim_resource.SecurityGroupSubject( tenant_name=tenant_aname, security_group_name=sg_db['id'], name='default') aim.create(aim_ctx, sg_subject, overwrite=True) # Migrate SG rules. sg_rule_dbs = (session.query(sg_models.SecurityGroupRule). options(lazyload('*')).all()) for sg_rule_db in sg_rule_dbs: tenant_aname = mapper.project(session, sg_rule_db['tenant_id']) if sg_rule_db.get('remote_group_id'): ip_version = 0 if sg_rule_db['ethertype'] == 'IPv4': ip_version = 4 elif sg_rule_db['ethertype'] == 'IPv6': ip_version = 6 remote_ips = [] sg_ports = (session.query(models_v2.Port). join(sg_models.SecurityGroupPortBinding, sg_models.SecurityGroupPortBinding.port_id == models_v2.Port.id). filter(sg_models.SecurityGroupPortBinding. security_group_id == sg_rule_db['remote_group_id']). options(lazyload('*')).all()) for sg_port in sg_ports: for fixed_ip in sg_port['fixed_ips']: if ip_version == netaddr.IPAddress( fixed_ip['ip_address']).version: remote_ips.append(fixed_ip['ip_address']) else: remote_ips = ([sg_rule_db['remote_ip_prefix']] if sg_rule_db['remote_ip_prefix'] else '') sg_rule_aim = aim_resource.SecurityGroupRule( tenant_name=tenant_aname, security_group_name=sg_rule_db['security_group_id'], security_group_subject_name='default', name=sg_rule_db['id'], direction=sg_rule_db['direction'], ethertype=sg_rule_db['ethertype'].lower(), ip_protocol=(sg_rule_db['protocol'] if sg_rule_db['protocol'] else 'unspecified'), remote_ips=remote_ips, from_port=(sg_rule_db['port_range_min'] if sg_rule_db['port_range_min'] else 'unspecified'), to_port=(sg_rule_db['port_range_max'] if sg_rule_db['port_range_max'] else 'unspecified')) aim.create(aim_ctx, sg_rule_aim, overwrite=True) alembic_util.msg( "Finished data migration for SGs and its rules.")
def do_apic_aim_security_group_migration(session): alembic_util.msg("Starting data migration for SGs and its rules.") aim = aim_manager.AimManager() aim_ctx = aim_context.AimContext(session) mapper = apic_mapper.APICNameMapper() with session.begin(subtransactions=True): # Migrate SG. sg_dbs = (session.query(sg_models.SecurityGroup).options( lazyload('*')).all()) for sg_db in sg_dbs: alembic_util.msg("Migrating SG: %s" % sg_db) tenant_aname = mapper.project(session, sg_db['tenant_id']) sg_aim = aim_resource.SecurityGroup( tenant_name=tenant_aname, name=sg_db['id'], display_name=aim_utils.sanitize_display_name(sg_db['name'])) aim.create(aim_ctx, sg_aim, overwrite=True) # Always create this default subject sg_subject = aim_resource.SecurityGroupSubject( tenant_name=tenant_aname, security_group_name=sg_db['id'], name='default') aim.create(aim_ctx, sg_subject, overwrite=True) # Migrate SG rules. sg_rule_dbs = (session.query(sg_models.SecurityGroupRule).options( lazyload('*')).all()) for sg_rule_db in sg_rule_dbs: tenant_aname = mapper.project(session, sg_rule_db['tenant_id']) if sg_rule_db.get('remote_group_id'): ip_version = 0 if sg_rule_db['ethertype'] == 'IPv4': ip_version = 4 elif sg_rule_db['ethertype'] == 'IPv6': ip_version = 6 remote_ips = [] sg_ports = (session.query(models_v2.Port).join( sg_models.SecurityGroupPortBinding, sg_models.SecurityGroupPortBinding.port_id == models_v2.Port.id).filter( sg_models.SecurityGroupPortBinding.security_group_id == sg_rule_db['remote_group_id']).options( lazyload('*')).all()) for sg_port in sg_ports: for fixed_ip in sg_port['fixed_ips']: if ip_version == netaddr.IPAddress( fixed_ip['ip_address']).version: remote_ips.append(fixed_ip['ip_address']) else: remote_ips = ([sg_rule_db['remote_ip_prefix']] if sg_rule_db['remote_ip_prefix'] else '') sg_rule_aim = aim_resource.SecurityGroupRule( tenant_name=tenant_aname, security_group_name=sg_rule_db['security_group_id'], security_group_subject_name='default', name=sg_rule_db['id'], direction=sg_rule_db['direction'], ethertype=sg_rule_db['ethertype'].lower(), ip_protocol=(sg_rule_db['protocol'] if sg_rule_db['protocol'] else 'unspecified'), remote_ips=remote_ips, from_port=(sg_rule_db['port_range_min'] if sg_rule_db['port_range_min'] else 'unspecified'), to_port=(sg_rule_db['port_range_max'] if sg_rule_db['port_range_max'] else 'unspecified')) aim.create(aim_ctx, sg_rule_aim, overwrite=True) alembic_util.msg("Finished data migration for SGs and its rules.")
def init(self, directory, template="mampoer_generic", package=False): """Initialize a new scripts directory. :param template: string name of the migration environment template to use. :param package: when True, write ``__init__.py`` files into the environment location as well as the versions/ location. .. versionadded:: 1.2 """ if os.access(directory, os.F_OK) and os.listdir(directory): raise util.CommandError( "Directory %s already exists and is not empty" % directory) template_dir = os.path.join(self.get_template_directory(), template) if not os.access(template_dir, os.F_OK): raise util.CommandError("No such template %r" % template) if not os.access(directory, os.F_OK): util.status( "Creating directory %s" % os.path.abspath(directory), os.makedirs, directory, ) versions = os.path.join(directory, "versions") util.status( "Creating directory %s" % os.path.abspath(versions), os.makedirs, versions, ) script = ScriptDirectory(directory) for file_ in os.listdir(template_dir): file_path = os.path.join(template_dir, file_) if file_ == "mampoer.ini.mako": config_file = os.path.abspath(self.config.config_file_name) if os.access(config_file, os.F_OK): util.msg("File %s already exists, skipping" % config_file) else: script._generate_template(file_path, config_file, script_location=directory) elif os.path.isfile(file_path): output_file = os.path.join(directory, file_) script._copy_file(file_path, output_file) if package: for path in [ os.path.join(os.path.abspath(directory), "__init__.py"), os.path.join(os.path.abspath(versions), "__init__.py"), ]: file_ = util.status("Adding %s" % path, open, path, "w") file_.close() util.msg("Please edit configuration/connection/logging " "settings in %r before proceeding." % config_file)