コード例 #1
0
ファイル: cli.py プロジェクト: neoareslinux/neutron
def _compare_labels(revision, expected_labels):
    # validate that the script has expected labels only
    bad_labels = revision.branch_labels - expected_labels
    if bad_labels:
        # NOTE(ihrachyshka): this hack is temporary to accomodate those
        # projects that already initialized their branches with liberty_*
        # labels. Let's notify them about the deprecation for now and drop it
        # later.
        bad_labels_with_release = revision.branch_labels - _get_release_labels(expected_labels)
        if not bad_labels_with_release:
            alembic_util.warn(
                _(
                    "Release aware branch labels (%s) are deprecated. "
                    "Please switch to expand@ and contract@ "
                    "labels."
                )
                % bad_labels
            )
            return

        script_name = os.path.basename(revision.path)
        alembic_util.err(
            _("Unexpected label for script %(script_name)s: %(labels)s")
            % {"script_name": script_name, "labels": bad_labels}
        )
コード例 #2
0
ファイル: sqlite.py プロジェクト: 13917547121/me
 def add_constraint(self, const):
     # attempt to distinguish between an
     # auto-gen constraint and an explicit one
     if const._create_rule is None:
         raise NotImplementedError(
                 "No support for ALTER of constraints in SQLite dialect")
     elif const._create_rule(self):
         util.warn("Skipping unsupported ALTER for "
                     "creation of implicit constraint")
コード例 #3
0
 def add_constraint(self, const):
     # attempt to distinguish between an
     # auto-gen constraint and an explicit one
     if const._create_rule is None:
         raise NotImplementedError(
             "No support for ALTER of constraints in SQLite dialect")
     elif const._create_rule(self):
         util.warn("Skipping unsupported ALTER for "
                   "creation of implicit constraint")
コード例 #4
0
ファイル: cli.py プロジェクト: rktidwell/neutron
def validate_head_files(config):
    '''Check that HEAD files contain the latest head for the branch.'''
    contract_head = _get_contract_head_file_path(config)
    expand_head = _get_expand_head_file_path(config)
    if not os.path.exists(contract_head) or not os.path.exists(expand_head):
        alembic_util.warn(_("Repository does not contain HEAD files for "
                            "contract and expand branches."))
        return
    head_map = _get_heads_map(config)
    _check_head(CONTRACT_BRANCH, contract_head, head_map[CONTRACT_BRANCH])
    _check_head(EXPAND_BRANCH, expand_head, head_map[EXPAND_BRANCH])
コード例 #5
0
ファイル: cli.py プロジェクト: sajuptpm7/neutron-vagrant
def _validate_head_files(config):
    '''Check that HEAD files contain the latest head for the branch.'''
    contract_head = _get_contract_head_file_path(config)
    expand_head = _get_expand_head_file_path(config)
    if not os.path.exists(contract_head) or not os.path.exists(expand_head):
        alembic_util.warn(_("Repository does not contain HEAD files for "
                            "contract and expand branches."))
        return
    head_map = _get_heads_map(config)
    _check_head(CONTRACT_BRANCH, contract_head, head_map[CONTRACT_BRANCH])
    _check_head(EXPAND_BRANCH, expand_head, head_map[EXPAND_BRANCH])
コード例 #6
0
def upgrade():

    op.create_table(
        'apic_aim_address_scope_mappings',
        sa.Column('scope_id', sa.String(36), nullable=False),
        sa.Column('vrf_name', sa.String(64), nullable=True),
        sa.Column('vrf_tenant_name', sa.String(64), nullable=True),
        sa.Column('vrf_owned', sa.Boolean, nullable=False),
        sa.ForeignKeyConstraint(
            ['scope_id'], ['address_scopes.id'],
            name='apic_aim_address_scope_mappings_fk_scope_id',
            ondelete='CASCADE'), sa.PrimaryKeyConstraint('scope_id'))

    op.create_table(
        'apic_aim_network_mappings',
        sa.Column('network_id', sa.String(36), nullable=False),
        sa.Column('bd_name', sa.String(64), nullable=True),
        sa.Column('bd_tenant_name', sa.String(64), nullable=True),
        sa.Column('epg_name', sa.String(64), nullable=True),
        sa.Column('epg_tenant_name', sa.String(64), nullable=True),
        sa.Column('epg_app_profile_name', sa.String(64), nullable=True),
        sa.Column('vrf_name', sa.String(64), nullable=True),
        sa.Column('vrf_tenant_name', sa.String(64), nullable=True),
        sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
                                name='apic_aim_network_mappings_fk_network_id',
                                ondelete='CASCADE'),
        sa.PrimaryKeyConstraint('network_id'))

    # See if AIM is being used, and if so, migrate data.
    bind = op.get_bind()
    insp = sa.engine.reflection.Inspector.from_engine(bind)
    if 'aim_tenants' in insp.get_table_names():
        try:
            # Note - this cannot be imported unless we know the
            # apic_aim mechanism driver is deployed, since the AIM
            # library may not be installed.
            from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import (
                data_migrations)

            session = sa.orm.Session(bind=bind)
            data_migrations.do_apic_aim_persist_migration(session)
        except ImportError:
            util.warn("AIM schema present, but failed to import AIM libraries"
                      " - data not migrated.")
        except Exception as e:
            util.warn("Caught exception migrating AIM data: %s" % e)

    op.drop_table('apic_aim_addr_scope_extensions')
コード例 #7
0
def upgrade():

    op.create_table(
        'apic_aim_address_scope_mappings',
        sa.Column('scope_id', sa.String(36), nullable=False),
        sa.Column('vrf_name', sa.String(64), nullable=True),
        sa.Column('vrf_tenant_name', sa.String(64), nullable=True),
        sa.Column('vrf_owned', sa.Boolean, nullable=False),
        sa.ForeignKeyConstraint(
            ['scope_id'], ['address_scopes.id'],
            name='apic_aim_address_scope_mappings_fk_scope_id',
            ondelete='CASCADE'),
        sa.PrimaryKeyConstraint('scope_id'))

    op.create_table(
        'apic_aim_network_mappings',
        sa.Column('network_id', sa.String(36), nullable=False),
        sa.Column('bd_name', sa.String(64), nullable=True),
        sa.Column('bd_tenant_name', sa.String(64), nullable=True),
        sa.Column('epg_name', sa.String(64), nullable=True),
        sa.Column('epg_tenant_name', sa.String(64), nullable=True),
        sa.Column('epg_app_profile_name', sa.String(64), nullable=True),
        sa.Column('vrf_name', sa.String(64), nullable=True),
        sa.Column('vrf_tenant_name', sa.String(64), nullable=True),
        sa.ForeignKeyConstraint(
            ['network_id'], ['networks.id'],
            name='apic_aim_network_mappings_fk_network_id',
            ondelete='CASCADE'),
        sa.PrimaryKeyConstraint('network_id'))

    # See if AIM is being used, and if so, migrate data.
    bind = op.get_bind()
    insp = sa.engine.reflection.Inspector.from_engine(bind)
    if 'aim_tenants' in insp.get_table_names():
        try:
            # Note - this cannot be imported unless we know the
            # apic_aim mechanism driver is deployed, since the AIM
            # library may not be installed.
            from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import (
                data_migrations)

            session = sa.orm.Session(bind=bind, autocommit=True)
            data_migrations.do_apic_aim_persist_migration(session)
        except ImportError:
            util.warn("AIM schema present, but failed to import AIM libraries"
                      " - data not migrated.")

    op.drop_table('apic_aim_addr_scope_extensions')
コード例 #8
0
ファイル: impl.py プロジェクト: Lifto/alembic
 def alter_column(self, table_name, column_name,
                     nullable=None,
                     server_default=False,
                     name=None,
                     type_=None,
                     schema=None,
                     autoincrement=None,
                     existing_type=None,
                     existing_server_default=None,
                     existing_nullable=None,
                     existing_autoincrement=None
                 ):
     if autoincrement is not None or existing_autoincrement is not None:
         util.warn("nautoincrement and existing_autoincrement only make sense for MySQL")
     if nullable is not None:
         self._exec(base.ColumnNullable(table_name, column_name,
                             nullable, schema=schema,
                             existing_type=existing_type,
                             existing_server_default=existing_server_default,
                             existing_nullable=existing_nullable,
                             ))
     if server_default is not False:
         self._exec(base.ColumnDefault(
                             table_name, column_name, server_default,
                             schema=schema,
                             existing_type=existing_type,
                             existing_server_default=existing_server_default,
                             existing_nullable=existing_nullable,
                         ))
     if type_ is not None:
         self._exec(base.ColumnType(
                             table_name, column_name, type_, schema=schema,
                             existing_type=existing_type,
                             existing_server_default=existing_server_default,
                             existing_nullable=existing_nullable,
                         ))
     # do the new name last ;)
     if name is not None:
         self._exec(base.ColumnName(
                             table_name, column_name, name, schema=schema,
                             existing_type=existing_type,
                             existing_server_default=existing_server_default,
                             existing_nullable=existing_nullable,
                         ))
コード例 #9
0
def upgrade():
    # See if AIM is being used, and if so, migrate data.
    bind = op.get_bind()
    insp = sa.engine.reflection.Inspector.from_engine(bind)
    if 'aim_tenants' in insp.get_table_names():
        try:
            # Note - this cannot be imported unless we know the
            # apic_aim mechanism driver is deployed, since the AIM
            # library may not be installed.
            from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import (
                data_migrations)

            session = sa.orm.Session(bind=bind, autocommit=True)
            data_migrations.do_apic_aim_security_group_migration(session)
        except ImportError:
            util.warn("AIM schema present, but failed to import AIM libraries"
                      " - SG data not migrated.")
        except Exception as e:
            util.warn("Caught exception migrating SG data: %s" % e)
コード例 #10
0
ファイル: script.py プロジェクト: briandailey/alembic
 def _revision_map(self):
     map_ = {}
     for file_ in os.listdir(self.versions):
         script = Script._from_filename(self.versions, file_)
         if script is None:
             continue
         if script.revision in map_:
             util.warn("Revision %s is present more than once" %
                       script.revision)
         map_[script.revision] = script
     for rev in map_.values():
         if rev.down_revision is None:
             continue
         if rev.down_revision not in map_:
             util.warn("Revision %s referenced from %s is not present" %
                       (rev.down_revision, rev))
             rev.down_revision = None
         else:
             map_[rev.down_revision].add_nextrev(rev.revision)
     map_[None] = None
     return map_
コード例 #11
0
ファイル: script.py プロジェクト: 13917547121/me
 def _revision_map(self):
     map_ = {}
     for file_ in os.listdir(self.versions):
         script = Script._from_filename(self.versions, file_)
         if script is None:
             continue
         if script.revision in map_:
             util.warn("Revision %s is present more than once" %
                             script.revision)
         map_[script.revision] = script
     for rev in map_.values():
         if rev.down_revision is None:
             continue
         if rev.down_revision not in map_:
             util.warn("Revision %s referenced from %s is not present"
                         % (rev.down_revision, rev))
             rev.down_revision = None
         else:
             map_[rev.down_revision].add_nextrev(rev.revision)
     map_[None] = None
     return map_
コード例 #12
0
def _compare_labels(revision, expected_labels):
    # validate that the script has expected labels only
    bad_labels = revision.branch_labels - expected_labels
    if bad_labels:
        # NOTE(ihrachyshka): this hack is temporary to accommodate those
        # projects that already initialized their branches with liberty_*
        # labels. Let's notify them about the deprecation for now and drop it
        # later.
        bad_labels_with_release = (revision.branch_labels -
                                   _get_release_labels(expected_labels))
        if not bad_labels_with_release:
            alembic_util.warn(
                _('Release aware branch labels (%s) are deprecated. '
                  'Please switch to expand@ and contract@ '
                  'labels.') % bad_labels)
            return

        script_name = os.path.basename(revision.path)
        alembic_util.err(
            _('Unexpected label for script %(script_name)s: %(labels)s') % {
                'script_name': script_name,
                'labels': bad_labels
            })
コード例 #13
0
def upgrade():
    """Upgrade database."""
    op.execute('COMMIT')  # See https://bitbucket.org/zzzeek/alembic/issue/123
    ctx = op.get_context()
    metadata = ctx.opts['target_metadata']
    metadata.naming_convention = NAMING_CONVENTION
    metadata.bind = ctx.connection.engine
    insp = Inspector.from_engine(ctx.connection.engine)

    for table_name in insp.get_table_names():
        if table_name not in metadata.tables:
            continue

        table = metadata.tables[table_name]

        ixs = {}
        uqs = {}
        fks = {}

        for ix in insp.get_indexes(table_name):
            ixs[tuple(ix['column_names'])] = ix
        for uq in insp.get_unique_constraints(table_name):
            uqs[tuple(uq['column_names'])] = uq
        for fk in insp.get_foreign_keys(table_name):
            fks[(tuple(fk['constrained_columns']), fk['referred_table'])] = fk

        with op.batch_alter_table(
                table_name, naming_convention=NAMING_CONVENTION) as batch_op:
            for c in list(table.constraints) + list(table.indexes):
                key = None
                if isinstance(c, sa.schema.ForeignKeyConstraint):
                    key = (tuple(c.column_keys), c.referred_table.name)
                    fk = fks.get(key)
                    if fk and c.name != fk['name']:
                        batch_op.drop_constraint(fk['name'],
                                                 type_='foreignkey')
                        batch_op.create_foreign_key(op.f(c.name),
                                                    fk['referred_table'],
                                                    fk['constrained_columns'],
                                                    fk['referred_columns'],
                                                    **fk['options'])
                elif isinstance(c, sa.schema.UniqueConstraint):
                    key = tuple(c.columns.keys())
                    uq = uqs.get(key)
                    if uq and c.name != uq['name']:
                        batch_op.drop_constraint(uq['name'], type_='unique')
                        batch_op.create_unique_constraint(
                            op.f(c.name), uq['column_names'])
                elif isinstance(c, sa.schema.CheckConstraint):
                    util.warn('Update {0.table.name} CHECK {0.name} '
                              'manually'.format(c))
                elif isinstance(c, sa.schema.Index):
                    key = tuple(c.columns.keys())
                    ix = ixs.get(key)
                    if ix and c.name != ix['name']:
                        batch_op.drop_index(ix['name'])
                        batch_op.create_index(
                            op.f(c.name),
                            ix['column_names'],
                            unique=ix['unique'],
                        )
                elif isinstance(c, sa.schema.PrimaryKeyConstraint) or \
                        c.name == '_unnamed_':
                    # NOTE we don't care about primary keys since they have
                    # specific syntax.
                    pass
                else:
                    raise RuntimeError('Missing {0!r}'.format(c))
コード例 #14
0
 def alter_column(self,
                  table_name,
                  column_name,
                  nullable=None,
                  server_default=False,
                  name=None,
                  type_=None,
                  schema=None,
                  autoincrement=None,
                  existing_type=None,
                  existing_server_default=None,
                  existing_nullable=None,
                  existing_autoincrement=None):
     if autoincrement is not None or existing_autoincrement is not None:
         util.warn(
             "nautoincrement and existing_autoincrement only make sense for MySQL"
         )
     if nullable is not None:
         self._exec(
             base.ColumnNullable(
                 table_name,
                 column_name,
                 nullable,
                 schema=schema,
                 existing_type=existing_type,
                 existing_server_default=existing_server_default,
                 existing_nullable=existing_nullable,
             ))
     if server_default is not False:
         self._exec(
             base.ColumnDefault(
                 table_name,
                 column_name,
                 server_default,
                 schema=schema,
                 existing_type=existing_type,
                 existing_server_default=existing_server_default,
                 existing_nullable=existing_nullable,
             ))
     if type_ is not None:
         self._exec(
             base.ColumnType(
                 table_name,
                 column_name,
                 type_,
                 schema=schema,
                 existing_type=existing_type,
                 existing_server_default=existing_server_default,
                 existing_nullable=existing_nullable,
             ))
     # do the new name last ;)
     if name is not None:
         self._exec(
             base.ColumnName(
                 table_name,
                 column_name,
                 name,
                 schema=schema,
                 existing_type=existing_type,
                 existing_server_default=existing_server_default,
                 existing_nullable=existing_nullable,
             ))
コード例 #15
0
def do_apic_aim_persist_migration(session):
    alembic_util.msg(
        "Starting data migration for apic_aim mechanism driver persistence.")

    db_mixin = db.DbMixin()
    aim = aim_manager.AimManager()
    aim_ctx = aim_context.AimContext(session)
    mapper = apic_mapper.APICNameMapper()

    with session.begin(subtransactions=True):
        # Migrate address scopes.
        scope_dbs = (session.query(as_db.AddressScope).all())
        for scope_db in scope_dbs:
            alembic_util.msg("Migrating address scope: %s" % scope_db)
            vrf = None
            ext_db = (session.query(DefunctAddressScopeExtensionDb).filter_by(
                address_scope_id=scope_db.id).one_or_none())
            if ext_db:
                # It has a pre-existing VRF.
                vrf = aim_resource.VRF.from_dn(ext_db.vrf_dn)
                # REVISIT: Get VRF to verify it exists?
                vrf_owned = False
            if not vrf:
                # It does not have a pre-existing VRF.
                aname = mapper.address_scope(session, scope_db.id)
                vrfs = aim.find(aim_ctx, aim_resource.VRF, name=aname)
                if vrfs:
                    vrf = vrfs[0]
                    vrf_owned = True
            if vrf:
                db_mixin._add_address_scope_mapping(session, scope_db.id, vrf,
                                                    vrf_owned)
            else:
                alembic_util.warn("No AIM VRF found for address scope: %s" %
                                  scope_db)

        # Migrate networks.
        net_dbs = (session.query(models_v2.Network).all())
        for net_db in net_dbs:
            alembic_util.msg("Migrating network: %s" % net_db)
            bd = None
            epg = None
            vrf = None
            ext_db = (session.query(extension_db.NetworkExtensionDb).filter_by(
                network_id=net_db.id).one_or_none())
            if ext_db and ext_db.external_network_dn:
                # Its a managed external network.
                ext_net = aim_resource.ExternalNetwork.from_dn(
                    ext_db.external_network_dn)
                # REVISIT: Get ExternalNetwork to verify it exists?
                l3out = aim_resource.L3Outside(tenant_name=ext_net.tenant_name,
                                               name=ext_net.l3out_name)
                if ext_db.nat_type == '':
                    ns_cls = nat_strategy.NoNatStrategy
                elif ext_db.nat_type == 'edge':
                    ns_cls = nat_strategy.EdgeNatStrategy
                else:
                    ns_cls = nat_strategy.DistributedNatStrategy
                ns = ns_cls(aim)
                ns.app_profile_name = 'OpenStack'
                for resource in ns.get_l3outside_resources(aim_ctx, l3out):
                    if isinstance(resource, aim_resource.BridgeDomain):
                        bd = resource
                    elif isinstance(resource, aim_resource.EndpointGroup):
                        epg = resource
                    elif isinstance(resource, aim_resource.VRF):
                        vrf = resource
            if not bd:
                # It must be a normal network.
                aname = mapper.network(session, net_db.id)
                bds = aim.find(aim_ctx, aim_resource.BridgeDomain, name=aname)
                if bds:
                    bd = bds[0]
                epgs = aim.find(aim_ctx,
                                aim_resource.EndpointGroup,
                                name=aname)
                if epgs:
                    epg = epgs[0]
                if bd:
                    vrfs = (aim.find(aim_ctx,
                                     aim_resource.VRF,
                                     tenant_name=bd.tenant_name,
                                     name=bd.vrf_name)
                            or aim.find(aim_ctx,
                                        aim_resource.VRF,
                                        tenant_name='common',
                                        name=bd.vrf_name))
                    if vrfs:
                        vrf = vrfs[0]
            if bd and epg and vrf:
                db_mixin._add_network_mapping(session, net_db.id, bd, epg, vrf)
            elif not net_db.external:
                alembic_util.warn(
                    "AIM BD, EPG or VRF not found for network: %s" % net_db)

    alembic_util.msg(
        "Finished data migration for apic_aim mechanism driver persistence.")
コード例 #16
0
def do_apic_aim_persist_migration(session):
    alembic_util.msg(
        "Starting data migration for apic_aim mechanism driver persistence.")

    aim = aim_manager.AimManager()
    aim_ctx = aim_context.AimContext(session)
    mapper = apic_mapper.APICNameMapper()

    with session.begin(subtransactions=True):
        # Migrate address scopes.
        scope_dbs = (session.query(as_db.AddressScope)
                     .options(lazyload('*')).all())
        for scope_db in scope_dbs:
            alembic_util.msg("Migrating address scope: %s" % scope_db)
            vrf = None
            ext_db = (session.query(DefunctAddressScopeExtensionDb).
                      filter_by(address_scope_id=scope_db.id).
                      one_or_none())
            if ext_db:
                # It has a pre-existing VRF.
                vrf = aim_resource.VRF.from_dn(ext_db.vrf_dn)
                # REVISIT: Get VRF to verify it exists?
                vrf_owned = False
            if not vrf:
                # It does not have a pre-existing VRF.
                aname = mapper.address_scope(session, scope_db.id)
                vrfs = aim.find(
                    aim_ctx, aim_resource.VRF,
                    name=aname)
                if vrfs:
                    vrf = vrfs[0]
                    vrf_owned = True
            if vrf:
                _add_address_scope_mapping(
                    session, scope_db.id, vrf, vrf_owned)
            else:
                alembic_util.warn(
                    "No AIM VRF found for address scope: %s" % scope_db)

        # Migrate networks.
        net_dbs = (session.query(models_v2.Network)
                   .options(lazyload('*')).all())
        for net_db in net_dbs:
            alembic_util.msg("Migrating network: %s" % net_db)
            bd = None
            epg = None
            vrf = None
            ext_db = (session.query(NetworkExtensionDb).
                      filter_by(network_id=net_db.id).
                      one_or_none())
            if ext_db and ext_db.external_network_dn:
                # Its a managed external network.
                ext_net = aim_resource.ExternalNetwork.from_dn(
                    ext_db.external_network_dn)
                # REVISIT: Get ExternalNetwork to verify it exists?
                l3out = aim_resource.L3Outside(
                    tenant_name=ext_net.tenant_name,
                    name=ext_net.l3out_name)
                if ext_db.nat_type == '':
                    ns_cls = nat_strategy.NoNatStrategy
                elif ext_db.nat_type == 'edge':
                    ns_cls = nat_strategy.EdgeNatStrategy
                else:
                    ns_cls = nat_strategy.DistributedNatStrategy
                ns = ns_cls(aim)
                ns.app_profile_name = 'OpenStack'
                for resource in ns.get_l3outside_resources(aim_ctx, l3out):
                    if isinstance(resource, aim_resource.BridgeDomain):
                        bd = resource
                    elif isinstance(resource, aim_resource.EndpointGroup):
                        epg = resource
                    elif isinstance(resource, aim_resource.VRF):
                        vrf = resource
            if not bd:
                # It must be a normal network.
                aname = mapper.network(session, net_db.id)
                bds = aim.find(
                    aim_ctx, aim_resource.BridgeDomain,
                    name=aname)
                if bds:
                    bd = bds[0]
                epgs = aim.find(
                    aim_ctx, aim_resource.EndpointGroup,
                    name=aname)
                if epgs:
                    epg = epgs[0]
                if bd:
                    vrfs = (
                        aim.find(
                            aim_ctx, aim_resource.VRF,
                            tenant_name=bd.tenant_name,
                            name=bd.vrf_name) or
                        aim.find(
                            aim_ctx, aim_resource.VRF,
                            tenant_name='common',
                            name=bd.vrf_name))
                    if vrfs:
                        vrf = vrfs[0]
            if bd and epg and vrf:
                _add_network_mapping(session, net_db.id, bd, epg, vrf)
            elif not net_db.external:
                alembic_util.warn(
                    "AIM BD, EPG or VRF not found for network: %s" % net_db)

    alembic_util.msg(
        "Finished data migration for apic_aim mechanism driver persistence.")