Пример #1
0
def upgrade():
    url = context.config.powervc_config.DATABASE.connection
    engine = session.create_engine(url)
    # In previous release, we do not use alembic or any other migration,
    # as we need to support migration case, we need to check if the table
    # exists or not
    if engine.dialect.has_table(engine.connect(), tablename):
        alembic_util.msg("table has been already exists!")
        return
    op.create_table(
        tablename,
        sa.Column('id', sa.String(36),
                  primary_key=True,
                  default=uuidutils.generate_uuid),
        sa.Column('obj_type', sa.Enum(constants.OBJ_TYPE_NETWORK,
                                      constants.OBJ_TYPE_SUBNET,
                                      constants.OBJ_TYPE_PORT,
                                      name='mapping_object_type'),
                  nullable=False),
        sa.Column('status', sa.Enum(constants.STATUS_CREATING,
                                    constants.STATUS_ACTIVE,
                                    constants.STATUS_DELETING,
                                    name='mapping_state'),
                  nullable=False),
        sa.Column('sync_key', sa.String(255), nullable=False),
        sa.Column('local_id', sa.String(36)),
        sa.Column('pvc_id', sa.String(36)),
        sa.Column('update_data', sa.String(512))
    )
Пример #2
0
    def _load_config(self):
        # Load test databases from the config file. Only do this
        # once. No need to re-run this on each test...
        LOG.debug('config_path is %s' % self.CONFIG_FILE_PATH)
        if os.path.exists(self.CONFIG_FILE_PATH):
            cp = ConfigParser.RawConfigParser()
            try:
                cp.read(self.CONFIG_FILE_PATH)
                config = cp.options('migration_dbs')
                for key in config:
                    self.test_databases[key] = cp.get('migration_dbs', key)
                self.snake_walk = cp.getboolean('walk_style', 'snake_walk')
                self.downgrade = cp.getboolean('walk_style', 'downgrade')
            except ConfigParser.ParsingError as e:
                self.fail("Failed to read test_migrations.conf config "
                          "file. Got error: %s" % e)
        else:
            self.fail("Failed to find test_migrations.conf config "
                      "file.")

        self.engines = {}
        for key, value in self.test_databases.items():
            self.engines[key] = session.create_engine(value)

        self._create_databases()
Пример #3
0
    def __call__(self,
                 connection_url,
                 save_tables=False,
                 tunnel_type=None,
                 vxlan_udp_port=None):
        engine = session.create_engine(connection_url)
        metadata = sa.MetaData()
        check_db_schema_version(engine, metadata)

        if hasattr(self, 'define_ml2_tables'):
            self.define_ml2_tables(metadata)

        # Autoload the ports table to ensure that foreign keys to it and
        # the network table can be created for the new tables.
        sa.Table('ports', metadata, autoload=True, autoload_with=engine)
        metadata.create_all(engine)

        self.migrate_network_segments(engine, metadata)
        if tunnel_type:
            self.migrate_tunnels(engine, tunnel_type, vxlan_udp_port)
        self.migrate_vlan_allocations(engine)
        self.migrate_port_bindings(engine, metadata)

        if hasattr(self, 'drop_old_tables'):
            self.drop_old_tables(engine, save_tables)
Пример #4
0
    def _load_config(self):
        # Load test databases from the config file. Only do this
        # once. No need to re-run this on each test...
        LOG.debug('config_path is %s' % self.CONFIG_FILE_PATH)
        if os.path.exists(self.CONFIG_FILE_PATH):
            cp = ConfigParser.RawConfigParser()
            try:
                cp.read(self.CONFIG_FILE_PATH)
                config = cp.options('migration_dbs')
                for key in config:
                    self.test_databases[key] = cp.get('migration_dbs', key)
                self.snake_walk = cp.getboolean('walk_style', 'snake_walk')
                self.downgrade = cp.getboolean('walk_style', 'downgrade')
            except ConfigParser.ParsingError as e:
                self.fail("Failed to read test_migrations.conf config "
                          "file. Got error: %s" % e)
        else:
            self.fail("Failed to find test_migrations.conf config "
                      "file.")

        self.engines = {}
        for key, value in self.test_databases.items():
            self.engines[key] = session.create_engine(value)

        self._create_databases()
Пример #5
0
    def _load_config(self):
        # Load test databases from the config file. Only do this
        # once. No need to re-run this on each test...
        LOG.debug("config_path is %s" % self.CONFIG_FILE_PATH)
        if os.path.exists(self.CONFIG_FILE_PATH):
            cp = ConfigParser.RawConfigParser()
            try:
                cp.read(self.CONFIG_FILE_PATH)
                config = cp.options("unit_tests")
                for key in config:
                    self.test_databases[key] = cp.get("unit_tests", key)
                self.snake_walk = cp.getboolean("walk_style", "snake_walk")
                self.downgrade = cp.getboolean("walk_style", "downgrade")

            except ConfigParser.ParsingError as e:
                self.fail("Failed to read test_migrations.conf config " "file. Got error: %s" % e)
        else:
            self.fail("Failed to find test_migrations.conf config " "file.")

        self.engines = {}
        for key, value in self.test_databases.items():
            self.engines[key] = session.create_engine(value)

        # NOTE(jhesketh): We only need to make sure the databases are created
        # not necessarily clean of tables.
        self._create_databases()
Пример #6
0
def run_migrations_online():
    set_mysql_engine()
    engine = session.create_engine(neutron_config.database.connection)

    connection = engine.connect()
    context.configure(connection=connection,
                      target_metadata=target_metadata,
                      version_table=GBP_VERSION_TABLE)

    try:
        with context.begin_transaction():
            context.run_migrations()
    finally:
        connection.close()
        engine.dispose()
Пример #7
0
    def setUp(self):
        super(MySQLModeTestCase, self).setUp()

        self.engine = session.create_engine(self.engine.url, mysql_sql_mode=self.mysql_mode)
        self.connection = self.engine.connect()

        meta = MetaData()
        meta.bind = self.engine
        self.test_table = Table(
            _TABLE_NAME + "mode", meta, Column("id", Integer, primary_key=True), Column("bar", String(255))
        )
        self.test_table.create()

        self.addCleanup(self.test_table.drop)
        self.addCleanup(self.connection.close)
Пример #8
0
    def setUp(self):
        super(MySQLModeTestCase, self).setUp()

        self.engine = session.create_engine(self.engine.url,
                                            mysql_sql_mode=self.mysql_mode)
        self.connection = self.engine.connect()

        meta = MetaData()
        meta.bind = self.engine
        self.test_table = Table(_TABLE_NAME + "mode", meta,
                                Column('id', Integer, primary_key=True),
                                Column('bar', String(255)))
        self.test_table.create()

        self.addCleanup(self.test_table.drop)
        self.addCleanup(self.connection.close)
Пример #9
0
def run_migrations_online():
    set_mysql_engine()
    engine = session.create_engine(neutron_config.database.connection)

    connection = engine.connect()
    context.configure(
        connection=connection,
        target_metadata=target_metadata,
        version_table=FWAAS_VERSION_TABLE
    )
    try:
        with context.begin_transaction():
            context.run_migrations()
    finally:
        connection.close()
        engine.dispose()
Пример #10
0
    def _test_postgresql_opportunistically(self):
        # Test postgresql database migration walk
        if not _have_postgresql(self.USER, self.PASSWD, self.DATABASE):
            self.skipTest("postgresql not available")
        # add this to the global lists to make reset work with it, it's removed
        # automatically in tearDown so no need to clean it up here.
        connect_string = oslodbutils.get_connect_string("postgresql+psycopg2", self.DATABASE, self.USER, self.PASSWD)
        engine = session.create_engine(connect_string)
        (user, password, database, host) = get_pgsql_connection_info(urlparse.urlparse(connect_string))
        self.engines[database] = engine
        self.test_databases[database] = connect_string

        # build a fully populated postgresql database with all the tables
        self._reset_database(database)
        self._walk_versions(engine, self.snake_walk, self.downgrade)
        del (self.engines[database])
        del (self.test_databases[database])
Пример #11
0
def run_migrations_online():
    """Run migrations in 'online' mode.

    In this scenario we need to create an Engine
    and associate a connection with the context.

    """
    engine = session.create_engine(config.powervc_config.DATABASE.connection)
    connection = engine.connect()
    context.configure(connection=connection,
                      target_metadata=target_metadata)

    try:
        with context.begin_transaction():
            context.run_migrations()
    finally:
        connection.close()
Пример #12
0
def run_migrations_online():
    """Run migrations in 'online' mode.

    In this scenario we need to create an Engine
    and associate a connection with the context.

    """
    set_mysql_engine()
    engine = session.create_engine(congress_config.database.connection)

    connection = engine.connect()
    context.configure(connection=connection, target_metadata=target_metadata)

    try:
        with context.begin_transaction():
            context.run_migrations()
    finally:
        connection.close()
Пример #13
0
    def _test_postgresql_opportunistically(self):
        # Test postgresql database migration walk
        if not _have_postgresql(self.USER, self.PASSWD, self.DATABASE):
            self.skipTest("postgresql not available")
        # add this to the global lists to make reset work with it, it's removed
        # automatically in tearDown so no need to clean it up here.
        connect_string = oslodbutils.get_connect_string(
            "postgresql+psycopg2", self.DATABASE, self.USER, self.PASSWD)
        engine = session.create_engine(connect_string)
        (user, password, database, host) = \
                get_pgsql_connection_info(urlparse.urlparse(connect_string))
        self.engines[database] = engine
        self.test_databases[database] = connect_string

        # build a fully populated postgresql database with all the tables
        self._reset_database(database)
        self._walk_versions(engine, self.snake_walk, self.downgrade)
        del(self.engines[database])
        del(self.test_databases[database])
Пример #14
0
    def __call__(self, connection_url, save_tables=False, tunnel_type=None,
                 vxlan_udp_port=None):
        engine = session.create_engine(connection_url)
        metadata = sa.MetaData()
        check_db_schema_version(engine, metadata)

        self.define_ml2_tables(metadata)

        # Autoload the ports table to ensure that foreign keys to it and
        # the network table can be created for the new tables.
        sa.Table('ports', metadata, autoload=True, autoload_with=engine)
        metadata.create_all(engine)

        self.migrate_network_segments(engine, metadata)
        if tunnel_type:
            self.migrate_tunnels(engine, tunnel_type, vxlan_udp_port)
        self.migrate_vlan_allocations(engine)
        self.migrate_port_bindings(engine, metadata)

        self.drop_old_tables(engine, save_tables)
def run_migrations_online():
    """Run migrations in 'online' mode.

    In this scenario we need to create an Engine
    and associate a connection with the context.

    """
    engine = session.create_engine(neutron_config.database.connection)
    connection = engine.connect()
    context.configure(
        connection=connection,
        target_metadata=None,
        version_table=VERSION_TABLE
    )

    try:
        with context.begin_transaction():
            context.run_migrations()
    finally:
        connection.close()
        engine.dispose()
Пример #16
0
def run_migrations_online():
    """Run migrations in 'online' mode.

    In this scenario we need to create an Engine
    and associate a connection with the context.

    """
    set_mysql_engine()
    engine = session.create_engine(vnfsvc_config.database.connection)

    connection = engine.connect()
    context.configure(
        connection=connection,
        target_metadata=target_metadata
    )

    try:
        with context.begin_transaction():
            context.run_migrations(active_plugins=active_plugins,
                                   options=build_options())
    finally:
        connection.close()
Пример #17
0
    def _test_mysql_opportunistically(self):
        # Test that table creation on mysql only builds InnoDB tables
        if not _have_mysql(self.USER, self.PASSWD, self.DATABASE):
            self.skipTest("mysql not available")
        # add this to the global lists to make reset work with it, it's removed
        # automatically in tearDown so no need to clean it up here.
        connect_string = oslodbutils.get_connect_string(
            "mysql+mysqldb", self.DATABASE, self.USER, self.PASSWD)
        (user, password, database, host) = \
                get_mysql_connection_info(urlparse.urlparse(connect_string))
        engine = session.create_engine(connect_string)
        self.engines[database] = engine
        self.test_databases[database] = connect_string

        # build a fully populated mysql database with all the tables
        self._reset_database(database)
        self._walk_versions(engine, self.snake_walk, self.downgrade)

        connection = engine.connect()
        # sanity check
        total = connection.execute("SELECT count(*) "
                                   "from information_schema.TABLES "
                                   "where TABLE_SCHEMA='%(database)s'" %
                                   {'database': database})
        self.assertTrue(total.scalar() > 0, "No tables found. Wrong schema?")

        noninnodb = connection.execute("SELECT count(*) "
                                       "from information_schema.TABLES "
                                       "where TABLE_SCHEMA='%(database)s' "
                                       "and ENGINE!='InnoDB' "
                                       "and TABLE_NAME!='migrate_version'" %
                                       {'database': database})
        count = noninnodb.scalar()
        self.assertEqual(count, 0, "%d non InnoDB tables created" % count)
        connection.close()

        del(self.engines[database])
        del(self.test_databases[database])
Пример #18
0
    def _test_mysql_opportunistically(self):
        # Test that table creation on mysql only builds InnoDB tables
        if not _have_mysql(self.USER, self.PASSWD, self.DATABASE):
            self.skipTest("mysql not available")
        # add this to the global lists to make reset work with it, it's removed
        # automatically in tearDown so no need to clean it up here.
        connect_string = oslodbutils.get_connect_string(
            "mysql+mysqldb", self.DATABASE, self.USER, self.PASSWD)
        (user, password, database, host) = \
                get_mysql_connection_info(urlparse.urlparse(connect_string))
        engine = session.create_engine(connect_string)
        self.engines[database] = engine
        self.test_databases[database] = connect_string

        # build a fully populated mysql database with all the tables
        self._reset_database(database)
        self._walk_versions(engine, self.snake_walk, self.downgrade)

        connection = engine.connect()
        # sanity check
        total = connection.execute("SELECT count(*) "
                                   "from information_schema.TABLES "
                                   "where TABLE_SCHEMA='%(database)s'" %
                                   {'database': database})
        self.assertTrue(total.scalar() > 0, "No tables found. Wrong schema?")

        noninnodb = connection.execute("SELECT count(*) "
                                       "from information_schema.TABLES "
                                       "where TABLE_SCHEMA='%(database)s' "
                                       "and ENGINE!='InnoDB' "
                                       "and TABLE_NAME!='migrate_version'" %
                                       {'database': database})
        count = noninnodb.scalar()
        self.assertEqual(count, 0, "%d non InnoDB tables created" % count)
        connection.close()

        del(self.engines[database])
        del(self.test_databases[database])
Пример #19
0
    def provisioned_engine(self, base_url, ident):
        """Return a provisioned engine.

        Given the URL of a particular database backend and the string
        name of a particular 'database' within that backend, return
        an Engine instance whose connections will refer directly to the
        named database.

        For hostname-based URLs, this typically involves switching just the
        'database' portion of the URL with the given name and creating
        an engine.

        For URLs that instead deal with DSNs, the rules may be more custom;
        for example, the engine may need to connect to the root URL and
        then emit a command to switch to the named database.

        """

        url = sa_url.make_url(str(base_url))
        url.database = ident
        return session.create_engine(
            url,
            logging_name="%s@%s" % (self.drivername, ident))
Пример #20
0
    def setUp(self):
        super(DbFixture, self).setUp()

        self.test.engine = session.create_engine(self._get_uri())
        self.test.sessionmaker = session.get_maker(self.test.engine)
Пример #21
0
 def provisioned_engine(self, base_url, ident):
     return session.create_engine(
         self._provisioned_database_url(base_url, ident))
Пример #22
0
    def setUp(self):
        super(DbFixture, self).setUp()

        self.test.engine = session.create_engine(self._get_uri())
        self.test.sessionmaker = session.get_maker(self.test.engine)
Пример #23
0
 def __init__(self, migration_config):
     self.repository = migration_config.get('migration_repo_path', '')
     self.init_version = migration_config.get('init_version', 0)
     self.db_url = migration_config['db_url']
     self.engine = db_session.create_engine(self.db_url)
Пример #24
0
def convert_nsx_to_ml2(connection, dry_run=False):
    engine = session.create_engine(connection)

    def exec_q(q):
        if dry_run:
            print q
        else:
            return engine.execute(q)

    metadata = sa.MetaData()

    table_names = [
        'networks',
        'ports',
        'ml2_port_bindings',
        'ml2_network_segments',
        'portbindingports',
        'ml2_vxlan_allocations',
    ]

    tables = {
        name: sa.Table(name, metadata, autoload=True, autoload_with=engine)
        for name in table_names
    }

    # count number of networks
    networks_table = tables['networks']
    segments_table = tables['ml2_network_segments']

    networks = engine.execute(
        networks_table.outerjoin(
            segments_table,
            networks_table.c.id==segments_table.c.network_id
        ).select(
            segments_table.c.network_id==None,
            use_labels=True
        )
    ).fetchall()

    # count number of available vnis
    vnis_alloc = tables['ml2_vxlan_allocations']

    vnis = engine.execute(
        vnis_alloc.select(vnis_alloc.c.allocated==False)
    ).fetchall()

    if len(networks) > len(vnis):
        print 'There are more networks than avaialbe VNIs'
        return

    # populate ml2_network_segments
    total = len(networks)
    for index, (network, vni) in enumerate(itertools.izip(networks, vnis)):
        print 'Allocatin VNI %s/%s' % (index, total)
        q = segments_table.insert().values(
            id=str(uuid.uuid4()),
            network_id=network.networks_id,
            network_type='vxlan',
            physical_network=None,
            segmentation_id=vni.vxlan_vni,
            is_dynamic=False
        )

        retval = exec_q(q)

    if total:
        # mark vnis in-use
        subq = sa.select([segments_table.c.segmentation_id])
        subq = subq.where(segments_table.c.network_type=='vxlan')

        q = vnis_alloc.update().where(vnis_alloc.c.vxlan_vni.in_(subq))
        q = q.values(allocated=True)

        print 'Updating allocated vnis'
        exec_q(q)

    #####
    # add ml2 ports bindings
    old_bindings = tables['portbindingports']
    new_bindings = tables['ml2_port_bindings']

    # find the ports to update
    ports_to_update = engine.execute(
        old_bindings.outerjoin(
            new_bindings,
            old_bindings.c.port_id==new_bindings.c.port_id
        ).select(
            new_bindings.c.port_id==None,
            use_labels=True
        )
    ).fetchall()

    # cache segments
    ports = tables['ports']
    q = ports.join(
        segments_table,
        ports.c.network_id==segments_table.c.network_id
    ).select(use_labels=True)

    segment_cache = {
        rv.ports_id: rv.ml2_network_segments_id
        for rv in engine.execute(q).fetchall()
    }

    total = len(ports_to_update)

    for index, old_binding in enumerate(ports_to_update):
        print 'Migrating Binding %s/%s' % (index, total)
        if old_binding.portbindingports_port_id not in segment_cache:
            print 'Port %s no longer exists, skipping...' % (
                old_binding.portbindingports_port_id
            )
        else:
            q = new_bindings.insert().values(
                port_id=old_binding.portbindingports_port_id,
                host=old_binding.portbindingports_host,
                vif_type='ovs',
                driver='dhcnsx',
                segment=segment_cache[old_binding.portbindingports_port_id],
                vnic_type='normal',
                vif_details='{"port_filter": true}'
            )
            retval = exec_q(q)

        q = old_bindings.delete(
            old_bindings.c.port_id==old_binding.portbindingports_port_id,
        )
        retval = exec_q(q)
Пример #25
0
 def version(self):
     engine = db_session.create_engine(self.db_url)
     with engine.connect() as conn:
         context = alembic_migration.MigrationContext.configure(conn)
         return context.get_current_revision()
Пример #26
0
 def version(self):
     engine = db_session.create_engine(self.db_url)
     with engine.connect() as conn:
         context = alembic_migration.MigrationContext.configure(conn)
         return context.get_current_revision()
Пример #27
0
 def _fixture(self, **kw):
     return session.create_engine("sqlite://", **kw)
Пример #28
0
 def __init__(self, migration_config):
     self.repository = migration_config.get('migration_repo_path', '')
     self.init_version = migration_config.get('init_version', 0)
     self.db_url = migration_config['db_url']
     self.engine = db_session.create_engine(self.db_url)
Пример #29
0
 def _fixture(self, sql_mode):
     return session.create_engine(self.engine.url, mysql_sql_mode=sql_mode)
Пример #30
0
 def _fixture(self, **kw):
     return session.create_engine("sqlite://", **kw)
Пример #31
0
 def _fixture(self, sql_mode):
     return session.create_engine(self.engine.url, mysql_sql_mode=sql_mode)