示例#1
0
def _find_migrate_repo(database='main'):
    """Get the path for the migrate repository."""

    path = os.path.join(os.path.abspath(os.path.dirname(__file__)), database,
                        'legacy_migrations')

    return migrate_repository.Repository(path)
示例#2
0
class TestMigrations(test.TestCase):
    """Test sqlalchemy-migrate migrations"""

    DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
                                       'test_migrations.conf')
    # Test machines can set the NOVA_TEST_MIGRATIONS_CONF variable
    # to override the location of the config file for migration testing
    CONFIG_FILE_PATH = os.environ.get('NOVA_TEST_MIGRATIONS_CONF',
                                      DEFAULT_CONFIG_FILE)
    MIGRATE_FILE = nova.db.sqlalchemy.migrate_repo.__file__
    REPOSITORY = repository.Repository(
        os.path.abspath(os.path.dirname(MIGRATE_FILE)))

    def setUp(self):
        super(TestMigrations, self).setUp()

        self.snake_walk = False
        self.test_databases = {}

        # Load test databases from the config file. Only do this
        # once. No need to re-run this on each test...
        LOG.debug('config_path is %s' % TestMigrations.CONFIG_FILE_PATH)
        if os.path.exists(TestMigrations.CONFIG_FILE_PATH):
            cp = ConfigParser.RawConfigParser()
            try:
                cp.read(TestMigrations.CONFIG_FILE_PATH)
                defaults = cp.defaults()
                for key, value in defaults.items():
                    self.test_databases[key] = value
                self.snake_walk = cp.getboolean('walk_style', 'snake_walk')
            except ConfigParser.ParsingError, e:
                self.fail("Failed to read test_migrations.conf config "
                          "file. Got error: %s" % e)
        else:
示例#3
0
def _find_migrate_repo(abs_path):
    """Get the project's change script repository

    :param abs_path: Absolute path to migrate repository
    """
    if not os.path.exists(abs_path):
        raise db_exception.DBMigrationError("Path %s not found" % abs_path)
    return migrate_repository.Repository(abs_path)
示例#4
0
 def __init__(self, repo_path):
     super(DatabaseFixture, self).__init__()
     self.golden_db = self._mktemp()
     engine = sqlalchemy.create_engine('sqlite:///%s' % self.golden_db)
     repo = repository.Repository(repo_path)
     versioning_api.version_control(engine, repository=repo)
     versioning_api.upgrade(engine, repository=repo)
     self.working_copy = self._mktemp()
     self.url = 'sqlite:///%s' % self.working_copy
示例#5
0
def _find_migrate_repo():
    """Get the project's change script repository

    :returns: An instance of ``migrate.versioning.repository.Repository``
    """
    path = os.path.join(
        os.path.abspath(os.path.dirname(__file__)), 'legacy_migrations',
    )
    return migrate_repo.Repository(path)
示例#6
0
def _version_control(version):
    """
    Place a database under migration control
    """
    repo_path = get_migrate_repo_path()
    sql_connection = CONF.sql_connection
    if version is None:
        version = versioning_repository.Repository(repo_path).latest
    return versioning_api.version_control(sql_connection, repo_path, version)
    def __init__(self, *args, **kwargs):
        super(TestNovaMigrations, self).__init__(*args, **kwargs)

        self.DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
                                                'test_migrations.conf')
        # Test machines can set the NOVA_TEST_MIGRATIONS_CONF variable
        # to override the location of the config file for migration testing
        self.CONFIG_FILE_PATH = os.environ.get('NOVA_TEST_MIGRATIONS_CONF',
                                               self.DEFAULT_CONFIG_FILE)
        self.MIGRATE_FILE = nova.db.sqlalchemy.migrate_repo.__file__
        self.REPOSITORY = repository.Repository(
            os.path.abspath(os.path.dirname(self.MIGRATE_FILE)))
示例#8
0
def _version_control(version):
    """
    Place a database under migration control

    This will only set the specific version of a database, it won't
    run any migrations.
    """
    repo_path = get_migrate_repo_path()
    sql_connection = CONF.sql_connection
    if version is None:
        version = versioning_repository.Repository(repo_path).latest
    return versioning_api.version_control(sql_connection, repo_path, version)
    def __init__(self, *args, **kwargs):
        super(BaseMigrationTestCase, self).__init__(*args, **kwargs)

        self.DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
                                                'test_migrations.conf')
        # Test machines can set the NOVA_TEST_MIGRATIONS_CONF variable
        # to override the location of the config file for migration testing
        self.CONFIG_FILE_PATH = os.environ.get('NOVA_TEST_MIGRATIONS_CONF',
                                               self.DEFAULT_CONFIG_FILE)
        self.MIGRATE_FILE = nova.db.sqlalchemy.migrate_repo.__file__
        self.REPOSITORY = repository.Repository(
            os.path.abspath(os.path.dirname(self.MIGRATE_FILE)))
        self.INIT_VERSION = 0

        self.snake_walk = False
        self.test_databases = {}
        self.migration = None
        self.migration_api = None
示例#10
0
    def upgrade(self):
        # NOTE(gordc): to minimise memory, only import migration when needed
        from oslo_db.sqlalchemy import migration
        path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
                            'sqlalchemy', 'migrate_repo')
        engine = self._engine_facade.get_engine()

        from migrate import exceptions as migrate_exc
        from migrate.versioning import api
        from migrate.versioning import repository

        repo = repository.Repository(path)
        try:
            api.db_version(engine, repo)
        except migrate_exc.DatabaseNotControlledError:
            models.Base.metadata.create_all(engine)
            api.version_control(engine, repo, repo.latest)
        else:
            migration.db_sync(engine, path)
示例#11
0
import logging
import os.path
import sys

from migrate.versioning import repository, schema

from sqlalchemy import (create_engine, MetaData, Table)
from sqlalchemy.exc import NoSuchTableError

log = logging.getLogger(__name__)

# path relative to galaxy
migrate_repository_directory = os.path.abspath(
    os.path.dirname(__file__)).replace(os.getcwd() + os.path.sep, '', 1)
migrate_repository = repository.Repository(migrate_repository_directory)


def create_or_verify_database(url,
                              galaxy_config_file,
                              engine_options={},
                              app=None):
    """
    Check that the database is use-able, possibly creating it if empty (this is
    the only time we automatically create tables, otherwise we force the
    user to do it using the management script so they can create backups).

    1) Empty database --> initialize with latest version and return
    2) Database older than migration support --> fail and require manual update
    3) Database at state where migrate support introduced --> add version control information but make no changes (might still require manual update)
    4) Database versioned but out of date --> fail with informative message, user must run "sh manage_db.sh upgrade"
    """
示例#12
0
    def __init__(self, *args, **kwargs):
        super(TestMigrations, self).__init__(*args, **kwargs)

        self.MIGRATE_FILE = ironic.db.sqlalchemy.migrate_repo.__file__
        self.REPOSITORY = repository.Repository(
            os.path.abspath(os.path.dirname(self.MIGRATE_FILE)))
 def REPOSITORY(self):
     return repository.Repository(
         os.path.abspath(os.path.dirname(self.migrate_file)))
示例#14
0
class TestMigrations(test.TestCase):
    """Test sqlalchemy-migrate migrations."""

    DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
                                       'test_migrations.conf')
    # Test machines can set the CINDER_TEST_MIGRATIONS_CONF variable
    # to override the location of the config file for migration testing
    CONFIG_FILE_PATH = os.environ.get('CINDER_TEST_MIGRATIONS_CONF',
                                      DEFAULT_CONFIG_FILE)
    MIGRATE_FILE = cinder.db.sqlalchemy.migrate_repo.__file__
    REPOSITORY = repository.Repository(
        os.path.abspath(os.path.dirname(MIGRATE_FILE)))

    def setUp(self):
        super(TestMigrations, self).setUp()

        self.snake_walk = False
        self.test_databases = {}

        # Load test databases from the config file. Only do this
        # once. No need to re-run this on each test...
        LOG.debug('config_path is %s' % TestMigrations.CONFIG_FILE_PATH)
        if not self.test_databases:
            if os.path.exists(TestMigrations.CONFIG_FILE_PATH):
                cp = ConfigParser.RawConfigParser()
                try:
                    cp.read(TestMigrations.CONFIG_FILE_PATH)
                    defaults = cp.defaults()
                    for key, value in defaults.items():
                        self.test_databases[key] = value
                    self.snake_walk = cp.getboolean('walk_style', 'snake_walk')
                except ConfigParser.ParsingError as e:
                    self.fail("Failed to read test_migrations.conf config "
                              "file. Got error: %s" % e)
            else:
                self.fail("Failed to find test_migrations.conf config "
                          "file.")

        self.engines = {}
        for key, value in self.test_databases.items():
            self.engines[key] = sqlalchemy.create_engine(value)

        # We start each test case with a completely blank slate.
        self._reset_databases()

    def tearDown(self):

        # We destroy the test data store between each test case,
        # and recreate it, which ensures that we have no side-effects
        # from the tests
        self._reset_databases()
        super(TestMigrations, self).tearDown()

    def _reset_databases(self):
        def execute_cmd(cmd=None):
            status, output = commands.getstatusoutput(cmd)
            LOG.debug(output)
            self.assertEqual(0, status)

        for key, engine in self.engines.items():
            conn_string = self.test_databases[key]
            conn_pieces = urlparse.urlparse(conn_string)
            engine.dispose()
            if conn_string.startswith('sqlite'):
                # We can just delete the SQLite database, which is
                # the easiest and cleanest solution
                db_path = conn_pieces.path.strip('/')
                if os.path.exists(db_path):
                    os.unlink(db_path)
                # No need to recreate the SQLite DB. SQLite will
                # create it for us if it's not there...
            elif conn_string.startswith('mysql'):
                # We can execute the MySQL client to destroy and re-create
                # the MYSQL database, which is easier and less error-prone
                # than using SQLAlchemy to do this via MetaData...trust me.
                database = conn_pieces.path.strip('/')
                loc_pieces = conn_pieces.netloc.split('@')
                host = loc_pieces[1]
                auth_pieces = loc_pieces[0].split(':')
                user = auth_pieces[0]
                password = ""
                if len(auth_pieces) > 1:
                    if auth_pieces[1].strip():
                        password = "******"%s\"" % auth_pieces[1]
                sql = ("drop database if exists %(database)s; create database "
                       "%(database)s;") % {
                           'database': database
                       }
                cmd = ("mysql -u \"%(user)s\" %(password)s -h %(host)s "
                       "-e \"%(sql)s\"") % {
                           'user': user,
                           'password': password,
                           'host': host,
                           'sql': sql
                       }
                execute_cmd(cmd)
            elif conn_string.startswith('postgresql'):
                database = conn_pieces.path.strip('/')
                loc_pieces = conn_pieces.netloc.split('@')
                host = loc_pieces[1]

                auth_pieces = loc_pieces[0].split(':')
                user = auth_pieces[0]
                password = ""
                if len(auth_pieces) > 1:
                    password = auth_pieces[1].strip()
                # note(krtaylor): File creation problems with tests in
                # venv using .pgpass authentication, changed to
                # PGPASSWORD environment variable which is no longer
                # planned to be deprecated
                os.environ['PGPASSWORD'] = password
                os.environ['PGUSER'] = user
                # note(boris-42): We must create and drop database, we can't
                # drop database which we have connected to, so for such
                # operations there is a special database template1.
                sqlcmd = ("psql -w -U %(user)s -h %(host)s -c"
                          " '%(sql)s' -d template1")
                sql = ("drop database if exists %(database)s;") % {
                    'database': database
                }
                droptable = sqlcmd % {'user': user, 'host': host, 'sql': sql}
                execute_cmd(droptable)
                sql = ("create database %(database)s;") % {
                    'database': database
                }
                createtable = sqlcmd % {'user': user, 'host': host, 'sql': sql}
                execute_cmd(createtable)
                os.unsetenv('PGPASSWORD')
                os.unsetenv('PGUSER')

    def test_walk_versions(self):
        """
        Walks all version scripts for each tested database, ensuring
        that there are no errors in the version scripts for each engine
        """
        for key, engine in self.engines.items():
            self._walk_versions(engine, self.snake_walk)

    def test_mysql_connect_fail(self):
        """
        Test that we can trigger a mysql connection failure and we fail
        gracefully to ensure we don't break people without mysql
        """
        if _is_mysql_avail(user="******"):
            self.fail("Shouldn't have connected")

    @testtools.skipUnless(_have_mysql(), "mysql not available")
    def test_mysql_innodb(self):
        """
        Test that table creation on mysql only builds InnoDB tables
        """
        # add this to the global lists to make reset work with it, it's removed
        # automaticaly in tearDown so no need to clean it up here.
        connect_string = _get_connect_string('mysql')
        engine = sqlalchemy.create_engine(connect_string)
        self.engines["mysqlcitest"] = engine
        self.test_databases["mysqlcitest"] = connect_string

        # build a fully populated mysql database with all the tables
        self._reset_databases()
        self._walk_versions(engine, False, False)

        uri = _get_connect_string('mysql', database="information_schema")
        connection = sqlalchemy.create_engine(uri).connect()

        # sanity check
        total = connection.execute("SELECT count(*) "
                                   "from information_schema.TABLES "
                                   "where TABLE_SCHEMA='openstack_citest'")
        self.assertTrue(total.scalar() > 0, "No tables found. Wrong schema?")

        noninnodb = connection.execute("SELECT count(*) "
                                       "from information_schema.TABLES "
                                       "where TABLE_SCHEMA='openstack_citest' "
                                       "and ENGINE!='InnoDB' "
                                       "and TABLE_NAME!='migrate_version'")
        count = noninnodb.scalar()
        self.assertEqual(count, 0, "%d non InnoDB tables created" % count)

    def test_postgresql_connect_fail(self):
        """
        Test that we can trigger a postgres connection failure and we fail
        gracefully to ensure we don't break people without postgres
        """
        if _is_backend_avail('postgres', user="******"):
            self.fail("Shouldn't have connected")

    @testtools.skipUnless(_is_backend_avail('postgres'),
                          "postgresql not available")
    def test_postgresql_opportunistically(self):
        # add this to the global lists to make reset work with it, it's removed
        # automatically in tearDown so no need to clean it up here.
        connect_string = _get_connect_string("postgres")
        engine = sqlalchemy.create_engine(connect_string)
        self.engines["postgresqlcitest"] = engine
        self.test_databases["postgresqlcitest"] = connect_string

        # build a fully populated postgresql database with all the tables
        self._reset_databases()
        self._walk_versions(engine, False, False)

    def _walk_versions(self, engine=None, snake_walk=False, downgrade=True):
        # Determine latest version script from the repo, then
        # upgrade from 1 through to the latest, with no data
        # in the databases. This just checks that the schema itself
        # upgrades successfully.

        # Place the database under version control
        migration_api.version_control(engine, TestMigrations.REPOSITORY,
                                      migration.INIT_VERSION)
        self.assertEqual(
            migration.INIT_VERSION,
            migration_api.db_version(engine, TestMigrations.REPOSITORY))

        migration_api.upgrade(engine, TestMigrations.REPOSITORY,
                              migration.INIT_VERSION + 1)

        LOG.debug('latest version is %s' % TestMigrations.REPOSITORY.latest)

        for version in xrange(migration.INIT_VERSION + 2,
                              TestMigrations.REPOSITORY.latest + 1):
            # upgrade -> downgrade -> upgrade
            self._migrate_up(engine, version, with_data=True)
            if snake_walk:
                self._migrate_down(engine, version - 1)
                self._migrate_up(engine, version)

        if downgrade:
            # Now walk it back down to 0 from the latest, testing
            # the downgrade paths.
            for version in reversed(
                    xrange(migration.INIT_VERSION + 1,
                           TestMigrations.REPOSITORY.latest)):
                # downgrade -> upgrade -> downgrade
                self._migrate_down(engine, version)
                if snake_walk:
                    self._migrate_up(engine, version + 1)
                    self._migrate_down(engine, version)

    def _migrate_down(self, engine, version):
        migration_api.downgrade(engine, TestMigrations.REPOSITORY, version)
        self.assertEqual(
            version, migration_api.db_version(engine,
                                              TestMigrations.REPOSITORY))

    def _migrate_up(self, engine, version, with_data=False):
        """migrate up to a new version of the db.

        We allow for data insertion and post checks at every
        migration version with special _prerun_### and
        _check_### functions in the main test.
        """
        # NOTE(sdague): try block is here because it's impossible to debug
        # where a failed data migration happens otherwise
        try:
            if with_data:
                data = None
                prerun = getattr(self, "_prerun_%3.3d" % version, None)
                if prerun:
                    data = prerun(engine)

            migration_api.upgrade(engine, TestMigrations.REPOSITORY, version)
            self.assertEqual(
                version,
                migration_api.db_version(engine, TestMigrations.REPOSITORY))

            if with_data:
                check = getattr(self, "_check_%3.3d" % version, None)
                if check:
                    check(engine, data)
        except Exception:
            LOG.error("Failed to migrate to version %s on engine %s" %
                      (version, engine))
            raise

    # migration 004 - change volume types to UUID
    def _prerun_004(self, engine):
        data = {
            'volumes': [
                {
                    'id': str(uuid.uuid4()),
                    'host': 'test1',
                    'volume_type_id': 1
                },
                {
                    'id': str(uuid.uuid4()),
                    'host': 'test2',
                    'volume_type_id': 1
                },
                {
                    'id': str(uuid.uuid4()),
                    'host': 'test3',
                    'volume_type_id': 3
                },
            ],
            'volume_types': [
                {
                    'name': 'vtype1'
                },
                {
                    'name': 'vtype2'
                },
                {
                    'name': 'vtype3'
                },
            ],
            'volume_type_extra_specs': [
                {
                    'volume_type_id': 1,
                    'key': 'v1',
                    'value': 'hotep',
                },
                {
                    'volume_type_id': 1,
                    'key': 'v2',
                    'value': 'bending rodrigez',
                },
                {
                    'volume_type_id': 2,
                    'key': 'v3',
                    'value': 'bending rodrigez',
                },
            ]
        }

        volume_types = get_table(engine, 'volume_types')
        for vtype in data['volume_types']:
            r = volume_types.insert().values(vtype).execute()
            vtype['id'] = r.inserted_primary_key[0]

        volume_type_es = get_table(engine, 'volume_type_extra_specs')
        for vtes in data['volume_type_extra_specs']:
            r = volume_type_es.insert().values(vtes).execute()
            vtes['id'] = r.inserted_primary_key[0]

        volumes = get_table(engine, 'volumes')
        for vol in data['volumes']:
            r = volumes.insert().values(vol).execute()
            vol['id'] = r.inserted_primary_key[0]

        return data

    def _check_004(self, engine, data):
        volumes = get_table(engine, 'volumes')
        v1 = volumes.select(
            volumes.c.id == data['volumes'][0]['id']).execute().first()
        v2 = volumes.select(
            volumes.c.id == data['volumes'][1]['id']).execute().first()
        v3 = volumes.select(
            volumes.c.id == data['volumes'][2]['id']).execute().first()

        volume_types = get_table(engine, 'volume_types')
        vt1 = volume_types.select(volume_types.c.name == data['volume_types']
                                  [0]['name']).execute().first()
        vt2 = volume_types.select(volume_types.c.name == data['volume_types']
                                  [1]['name']).execute().first()
        vt3 = volume_types.select(volume_types.c.name == data['volume_types']
                                  [2]['name']).execute().first()

        vtes = get_table(engine, 'volume_type_extra_specs')
        vtes1 = vtes.select(vtes.c.key == data['volume_type_extra_specs'][0]
                            ['key']).execute().first()
        vtes2 = vtes.select(vtes.c.key == data['volume_type_extra_specs'][1]
                            ['key']).execute().first()
        vtes3 = vtes.select(vtes.c.key == data['volume_type_extra_specs'][2]
                            ['key']).execute().first()

        self.assertEqual(v1['volume_type_id'], vt1['id'])
        self.assertEqual(v2['volume_type_id'], vt1['id'])
        self.assertEqual(v3['volume_type_id'], vt3['id'])

        self.assertEqual(vtes1['volume_type_id'], vt1['id'])
        self.assertEqual(vtes2['volume_type_id'], vt1['id'])
        self.assertEqual(vtes3['volume_type_id'], vt2['id'])

    def test_migration_005(self):
        """Test that adding source_volid column works correctly."""
        for (key, engine) in self.engines.items():
            migration_api.version_control(engine, TestMigrations.REPOSITORY,
                                          migration.INIT_VERSION)
            migration_api.upgrade(engine, TestMigrations.REPOSITORY, 4)
            metadata = sqlalchemy.schema.MetaData()
            metadata.bind = engine

            migration_api.upgrade(engine, TestMigrations.REPOSITORY, 5)
            volumes = sqlalchemy.Table('volumes', metadata, autoload=True)
            self.assertTrue(
                isinstance(volumes.c.source_volid.type,
                           sqlalchemy.types.VARCHAR))

    def _metadatas(self, upgrade_to, downgrade_to=None):
        for (key, engine) in self.engines.items():
            migration_api.version_control(engine, TestMigrations.REPOSITORY,
                                          migration.INIT_VERSION)
            migration_api.upgrade(engine, TestMigrations.REPOSITORY,
                                  upgrade_to)

            if downgrade_to is not None:
                migration_api.downgrade(engine, TestMigrations.REPOSITORY,
                                        downgrade_to)

            metadata = sqlalchemy.schema.MetaData()
            metadata.bind = engine
            yield metadata

    def metadatas_upgraded_to(self, revision):
        return self._metadatas(revision)

    def metadatas_downgraded_from(self, revision):
        return self._metadatas(revision, revision - 1)

    def test_upgrade_006_adds_provider_location(self):
        for metadata in self.metadatas_upgraded_to(6):
            snapshots = sqlalchemy.Table('snapshots', metadata, autoload=True)
            self.assertTrue(
                isinstance(snapshots.c.provider_location.type,
                           sqlalchemy.types.VARCHAR))

    def test_downgrade_006_removes_provider_location(self):
        for metadata in self.metadatas_downgraded_from(6):
            snapshots = sqlalchemy.Table('snapshots', metadata, autoload=True)

            self.assertTrue('provider_location' not in snapshots.c)

    def test_upgrade_007_adds_fk(self):
        for metadata in self.metadatas_upgraded_to(7):
            snapshots = sqlalchemy.Table('snapshots', metadata, autoload=True)
            volumes = sqlalchemy.Table('volumes', metadata, autoload=True)

            fkey, = snapshots.c.volume_id.foreign_keys

            self.assertEquals(volumes.c.id, fkey.column)

    def test_downgrade_007_removes_fk(self):
        for metadata in self.metadatas_downgraded_from(7):
            snapshots = sqlalchemy.Table('snapshots', metadata, autoload=True)

            self.assertEquals(0, len(snapshots.c.volume_id.foreign_keys))

    def test_migration_008(self):
        """Test that adding and removing the backups table works correctly"""
        for (key, engine) in self.engines.items():
            migration_api.version_control(engine, TestMigrations.REPOSITORY,
                                          migration.INIT_VERSION)
            migration_api.upgrade(engine, TestMigrations.REPOSITORY, 7)
            metadata = sqlalchemy.schema.MetaData()
            metadata.bind = engine

            migration_api.upgrade(engine, TestMigrations.REPOSITORY, 8)

            self.assertTrue(
                engine.dialect.has_table(engine.connect(), "backups"))
            backups = sqlalchemy.Table('backups', metadata, autoload=True)

            self.assertTrue(
                isinstance(backups.c.created_at.type,
                           sqlalchemy.types.DATETIME))
            self.assertTrue(
                isinstance(backups.c.updated_at.type,
                           sqlalchemy.types.DATETIME))
            self.assertTrue(
                isinstance(backups.c.deleted_at.type,
                           sqlalchemy.types.DATETIME))
            self.assertTrue(
                isinstance(backups.c.deleted.type, sqlalchemy.types.BOOLEAN))
            self.assertTrue(
                isinstance(backups.c.id.type, sqlalchemy.types.VARCHAR))
            self.assertTrue(
                isinstance(backups.c.volume_id.type, sqlalchemy.types.VARCHAR))
            self.assertTrue(
                isinstance(backups.c.user_id.type, sqlalchemy.types.VARCHAR))
            self.assertTrue(
                isinstance(backups.c.project_id.type,
                           sqlalchemy.types.VARCHAR))
            self.assertTrue(
                isinstance(backups.c.host.type, sqlalchemy.types.VARCHAR))
            self.assertTrue(
                isinstance(backups.c.availability_zone.type,
                           sqlalchemy.types.VARCHAR))
            self.assertTrue(
                isinstance(backups.c.display_name.type,
                           sqlalchemy.types.VARCHAR))
            self.assertTrue(
                isinstance(backups.c.display_description.type,
                           sqlalchemy.types.VARCHAR))
            self.assertTrue(
                isinstance(backups.c.container.type, sqlalchemy.types.VARCHAR))
            self.assertTrue(
                isinstance(backups.c.status.type, sqlalchemy.types.VARCHAR))
            self.assertTrue(
                isinstance(backups.c.fail_reason.type,
                           sqlalchemy.types.VARCHAR))
            self.assertTrue(
                isinstance(backups.c.service_metadata.type,
                           sqlalchemy.types.VARCHAR))
            self.assertTrue(
                isinstance(backups.c.service.type, sqlalchemy.types.VARCHAR))
            self.assertTrue(
                isinstance(backups.c.size.type, sqlalchemy.types.INTEGER))
            self.assertTrue(
                isinstance(backups.c.object_count.type,
                           sqlalchemy.types.INTEGER))

            migration_api.downgrade(engine, TestMigrations.REPOSITORY, 7)

            self.assertFalse(
                engine.dialect.has_table(engine.connect(), "backups"))

    def test_migration_009(self):
        """Test adding snapshot_metadata table works correctly."""
        for (key, engine) in self.engines.items():
            migration_api.version_control(engine, TestMigrations.REPOSITORY,
                                          migration.INIT_VERSION)
            migration_api.upgrade(engine, TestMigrations.REPOSITORY, 8)
            metadata = sqlalchemy.schema.MetaData()
            metadata.bind = engine

            migration_api.upgrade(engine, TestMigrations.REPOSITORY, 9)

            self.assertTrue(
                engine.dialect.has_table(engine.connect(),
                                         "snapshot_metadata"))
            snapshot_metadata = sqlalchemy.Table('snapshot_metadata',
                                                 metadata,
                                                 autoload=True)

            self.assertTrue(
                isinstance(snapshot_metadata.c.created_at.type,
                           sqlalchemy.types.DATETIME))
            self.assertTrue(
                isinstance(snapshot_metadata.c.updated_at.type,
                           sqlalchemy.types.DATETIME))
            self.assertTrue(
                isinstance(snapshot_metadata.c.deleted_at.type,
                           sqlalchemy.types.DATETIME))
            self.assertTrue(
                isinstance(snapshot_metadata.c.deleted.type,
                           sqlalchemy.types.BOOLEAN))
            self.assertTrue(
                isinstance(snapshot_metadata.c.deleted.type,
                           sqlalchemy.types.BOOLEAN))
            self.assertTrue(
                isinstance(snapshot_metadata.c.id.type,
                           sqlalchemy.types.INTEGER))
            self.assertTrue(
                isinstance(snapshot_metadata.c.snapshot_id.type,
                           sqlalchemy.types.VARCHAR))
            self.assertTrue(
                isinstance(snapshot_metadata.c.key.type,
                           sqlalchemy.types.VARCHAR))
            self.assertTrue(
                isinstance(snapshot_metadata.c.value.type,
                           sqlalchemy.types.VARCHAR))

            migration_api.downgrade(engine, TestMigrations.REPOSITORY, 8)

            self.assertFalse(
                engine.dialect.has_table(engine.connect(),
                                         "snapshot_metadata"))

    def test_migration_010(self):
        """Test adding transfers table works correctly."""
        for (key, engine) in self.engines.items():
            migration_api.version_control(engine, TestMigrations.REPOSITORY,
                                          migration.INIT_VERSION)
            migration_api.upgrade(engine, TestMigrations.REPOSITORY, 9)
            metadata = sqlalchemy.schema.MetaData()
            metadata.bind = engine

            migration_api.upgrade(engine, TestMigrations.REPOSITORY, 10)
            self.assertTrue(
                engine.dialect.has_table(engine.connect(), "transfers"))
            transfers = sqlalchemy.Table('transfers', metadata, autoload=True)

            self.assertTrue(
                isinstance(transfers.c.created_at.type,
                           sqlalchemy.types.DATETIME))
            self.assertTrue(
                isinstance(transfers.c.updated_at.type,
                           sqlalchemy.types.DATETIME))
            self.assertTrue(
                isinstance(transfers.c.deleted_at.type,
                           sqlalchemy.types.DATETIME))
            self.assertTrue(
                isinstance(transfers.c.deleted.type, sqlalchemy.types.BOOLEAN))
            self.assertTrue(
                isinstance(transfers.c.id.type, sqlalchemy.types.VARCHAR))
            self.assertTrue(
                isinstance(transfers.c.volume_id.type,
                           sqlalchemy.types.VARCHAR))
            self.assertTrue(
                isinstance(transfers.c.display_name.type,
                           sqlalchemy.types.VARCHAR))
            self.assertTrue(
                isinstance(transfers.c.salt.type, sqlalchemy.types.VARCHAR))
            self.assertTrue(
                isinstance(transfers.c.crypt_hash.type,
                           sqlalchemy.types.VARCHAR))
            self.assertTrue(
                isinstance(transfers.c.expires_at.type,
                           sqlalchemy.types.DATETIME))

            migration_api.downgrade(engine, TestMigrations.REPOSITORY, 9)

            self.assertFalse(
                engine.dialect.has_table(engine.connect(), "transfers"))

    def test_migration_011(self):
        """Test adding transfers table works correctly."""
        for (key, engine) in self.engines.items():
            migration_api.version_control(engine, TestMigrations.REPOSITORY,
                                          migration.INIT_VERSION)
            migration_api.upgrade(engine, TestMigrations.REPOSITORY, 10)
            metadata = sqlalchemy.schema.MetaData()
            metadata.bind = engine

            volumes_v10 = sqlalchemy.Table('volumes', metadata, autoload=True)

            migration_api.upgrade(engine, TestMigrations.REPOSITORY, 11)
            metadata = sqlalchemy.schema.MetaData()
            metadata.bind = engine

            self.assertTrue(
                engine.dialect.has_table(engine.connect(), "volumes"))
            volumes = sqlalchemy.Table('volumes', metadata, autoload=True)

            # Make sure we didn't miss any columns in the upgrade
            for column in volumes_v10.c:
                self.assertTrue(volumes.c.__contains__(column.name))

            self.assertTrue(
                isinstance(volumes.c.bootable.type, sqlalchemy.types.BOOLEAN))

            migration_api.downgrade(engine, TestMigrations.REPOSITORY, 10)
            metadata = sqlalchemy.schema.MetaData()
            metadata.bind = engine

            volumes = sqlalchemy.Table('volumes', metadata, autoload=True)
            self.assertTrue('bootable' not in volumes.c)

            # Make sure we put all the columns back
            for column in volumes_v10.c:
                self.assertTrue(volumes.c.__contains__(column.name))

    def test_migration_012(self):
        """Test that adding attached_host column works correctly."""
        for (key, engine) in self.engines.items():
            migration_api.version_control(engine, TestMigrations.REPOSITORY,
                                          migration.INIT_VERSION)
            migration_api.upgrade(engine, TestMigrations.REPOSITORY, 11)
            metadata = sqlalchemy.schema.MetaData()
            metadata.bind = engine

            migration_api.upgrade(engine, TestMigrations.REPOSITORY, 12)
            volumes = sqlalchemy.Table('volumes', metadata, autoload=True)
            self.assertTrue(
                isinstance(volumes.c.attached_host.type,
                           sqlalchemy.types.VARCHAR))

            migration_api.downgrade(engine, TestMigrations.REPOSITORY, 11)
            metadata = sqlalchemy.schema.MetaData()
            metadata.bind = engine

            volumes = sqlalchemy.Table('volumes', metadata, autoload=True)
            self.assertTrue('attached_host' not in volumes.c)

    def test_migration_013(self):
        """Test that adding provider_geometry column works correctly."""
        for (key, engine) in self.engines.items():
            migration_api.version_control(engine, TestMigrations.REPOSITORY,
                                          migration.INIT_VERSION)
            migration_api.upgrade(engine, TestMigrations.REPOSITORY, 12)
            metadata = sqlalchemy.schema.MetaData()
            metadata.bind = engine

            migration_api.upgrade(engine, TestMigrations.REPOSITORY, 13)
            volumes = sqlalchemy.Table('volumes', metadata, autoload=True)
            self.assertTrue(
                isinstance(volumes.c.provider_geometry.type,
                           sqlalchemy.types.VARCHAR))

            migration_api.downgrade(engine, TestMigrations.REPOSITORY, 12)
            metadata = sqlalchemy.schema.MetaData()
            metadata.bind = engine

            volumes = sqlalchemy.Table('volumes', metadata, autoload=True)
            self.assertTrue('provider_geometry' not in volumes.c)

    def test_migration_014(self):
        """Test that adding _name_id column works correctly."""
        for (key, engine) in self.engines.items():
            migration_api.version_control(engine, TestMigrations.REPOSITORY,
                                          migration.INIT_VERSION)
            migration_api.upgrade(engine, TestMigrations.REPOSITORY, 13)
            metadata = sqlalchemy.schema.MetaData()
            metadata.bind = engine

            migration_api.upgrade(engine, TestMigrations.REPOSITORY, 14)
            volumes = sqlalchemy.Table('volumes', metadata, autoload=True)
            self.assertTrue(
                isinstance(volumes.c._name_id.type, sqlalchemy.types.VARCHAR))

            migration_api.downgrade(engine, TestMigrations.REPOSITORY, 13)
            metadata = sqlalchemy.schema.MetaData()
            metadata.bind = engine

            volumes = sqlalchemy.Table('volumes', metadata, autoload=True)
            self.assertTrue('_name_id' not in volumes.c)

    def test_migration_015(self):
        """Test removing migrations table works correctly."""
        for (key, engine) in self.engines.items():
            migration_api.version_control(engine, TestMigrations.REPOSITORY,
                                          migration.INIT_VERSION)
            migration_api.upgrade(engine, TestMigrations.REPOSITORY, 15)

            self.assertFalse(
                engine.dialect.has_table(engine.connect(), "migrations"))

            migration_api.downgrade(engine, TestMigrations.REPOSITORY, 14)

            self.assertTrue(
                engine.dialect.has_table(engine.connect(), "migrations"))

    def test_migration_016(self):
        """Test that dropping xen storage manager tables works correctly."""
        for (key, engine) in self.engines.items():
            migration_api.version_control(engine, TestMigrations.REPOSITORY,
                                          migration.INIT_VERSION)
            migration_api.upgrade(engine, TestMigrations.REPOSITORY, 15)
            metadata = sqlalchemy.schema.MetaData()
            metadata.bind = engine

            migration_api.upgrade(engine, TestMigrations.REPOSITORY, 16)
            self.assertFalse(
                engine.dialect.has_table(engine.connect(), 'sm_flavors'))
            self.assertFalse(
                engine.dialect.has_table(engine.connect(),
                                         'sm_backend_config'))
            self.assertFalse(
                engine.dialect.has_table(engine.connect(), 'sm_volume'))

            migration_api.downgrade(engine, TestMigrations.REPOSITORY, 15)
            self.assertTrue(
                engine.dialect.has_table(engine.connect(), 'sm_flavors'))
            self.assertTrue(
                engine.dialect.has_table(engine.connect(),
                                         'sm_backend_config'))
            self.assertTrue(
                engine.dialect.has_table(engine.connect(), 'sm_volume'))

    def test_migration_017(self):
        """Test that added encryption information works correctly."""

        # upgrade schema
        for (key, engine) in self.engines.items():
            migration_api.version_control(engine, TestMigrations.REPOSITORY,
                                          migration.INIT_VERSION)
            migration_api.upgrade(engine, TestMigrations.REPOSITORY, 16)
            metadata = sqlalchemy.schema.MetaData()
            metadata.bind = engine

            migration_api.upgrade(engine, TestMigrations.REPOSITORY, 17)

            # encryption key UUID
            volumes = sqlalchemy.Table('volumes', metadata, autoload=True)
            self.assertTrue('encryption_key_id' in volumes.c)
            self.assertTrue(
                isinstance(volumes.c.encryption_key_id.type,
                           sqlalchemy.types.VARCHAR))

            snapshots = sqlalchemy.Table('snapshots', metadata, autoload=True)
            self.assertTrue('encryption_key_id' in snapshots.c)
            self.assertTrue(
                isinstance(snapshots.c.encryption_key_id.type,
                           sqlalchemy.types.VARCHAR))
            self.assertTrue('volume_type_id' in snapshots.c)
            self.assertTrue(
                isinstance(snapshots.c.volume_type_id.type,
                           sqlalchemy.types.VARCHAR))

            # encryption types table
            encryption = sqlalchemy.Table('encryption',
                                          metadata,
                                          autoload=True)
            self.assertTrue(
                isinstance(encryption.c.volume_type_id.type,
                           sqlalchemy.types.VARCHAR))
            self.assertTrue(
                isinstance(encryption.c.cipher.type, sqlalchemy.types.VARCHAR))
            self.assertTrue(
                isinstance(encryption.c.key_size.type,
                           sqlalchemy.types.INTEGER))
            self.assertTrue(
                isinstance(encryption.c.provider.type,
                           sqlalchemy.types.VARCHAR))

            # downgrade schema
            migration_api.downgrade(engine, TestMigrations.REPOSITORY, 16)
            metadata = sqlalchemy.schema.MetaData()
            metadata.bind = engine

            volumes = sqlalchemy.Table('volumes', metadata, autoload=True)
            self.assertTrue('encryption_key_id' not in volumes.c)

            snapshots = sqlalchemy.Table('snapshots', metadata, autoload=True)
            self.assertTrue('encryption_key_id' not in snapshots.c)

            self.assertFalse(
                engine.dialect.has_table(engine.connect(), 'encryption'))

    def test_migration_018(self):
        """Test that added qos_specs table works correctly."""
        for (key, engine) in self.engines.items():
            migration_api.version_control(engine, TestMigrations.REPOSITORY,
                                          migration.INIT_VERSION)
            migration_api.upgrade(engine, TestMigrations.REPOSITORY, 17)
            metadata = sqlalchemy.schema.MetaData()
            metadata.bind = engine

            migration_api.upgrade(engine, TestMigrations.REPOSITORY, 18)
            self.assertTrue(
                engine.dialect.has_table(engine.connect(),
                                         "quality_of_service_specs"))
            qos_specs = sqlalchemy.Table('quality_of_service_specs',
                                         metadata,
                                         autoload=True)
            self.assertTrue(
                isinstance(qos_specs.c.created_at.type,
                           sqlalchemy.types.DATETIME))
            self.assertTrue(
                isinstance(qos_specs.c.updated_at.type,
                           sqlalchemy.types.DATETIME))
            self.assertTrue(
                isinstance(qos_specs.c.deleted_at.type,
                           sqlalchemy.types.DATETIME))
            self.assertTrue(
                isinstance(qos_specs.c.deleted.type, sqlalchemy.types.BOOLEAN))
            self.assertTrue(
                isinstance(qos_specs.c.id.type, sqlalchemy.types.VARCHAR))
            self.assertTrue(
                isinstance(qos_specs.c.specs_id.type,
                           sqlalchemy.types.VARCHAR))
            self.assertTrue(
                isinstance(qos_specs.c.key.type, sqlalchemy.types.VARCHAR))
            self.assertTrue(
                isinstance(qos_specs.c.value.type, sqlalchemy.types.VARCHAR))

            migration_api.downgrade(engine, TestMigrations.REPOSITORY, 17)

            self.assertFalse(
                engine.dialect.has_table(engine.connect(),
                                         "quality_of_service_specs"))

    def test_migration_019(self):
        """Test that adding migration_status column works correctly."""
        for (key, engine) in self.engines.items():
            migration_api.version_control(engine, TestMigrations.REPOSITORY,
                                          migration.INIT_VERSION)
            migration_api.upgrade(engine, TestMigrations.REPOSITORY, 18)
            metadata = sqlalchemy.schema.MetaData()
            metadata.bind = engine

            migration_api.upgrade(engine, TestMigrations.REPOSITORY, 19)
            volumes = sqlalchemy.Table('volumes', metadata, autoload=True)
            self.assertTrue(
                isinstance(volumes.c.migration_status.type,
                           sqlalchemy.types.VARCHAR))

            migration_api.downgrade(engine, TestMigrations.REPOSITORY, 18)
            metadata = sqlalchemy.schema.MetaData()
            metadata.bind = engine

            volumes = sqlalchemy.Table('volumes', metadata, autoload=True)
            self.assertTrue('migration_status' not in volumes.c)

    def test_migration_020(self):
        """Test adding volume_admin_metadata table works correctly."""
        for (key, engine) in self.engines.items():
            migration_api.version_control(engine, TestMigrations.REPOSITORY,
                                          migration.INIT_VERSION)
            migration_api.upgrade(engine, TestMigrations.REPOSITORY, 19)
            metadata = sqlalchemy.schema.MetaData()
            metadata.bind = engine

            migration_api.upgrade(engine, TestMigrations.REPOSITORY, 20)

            self.assertTrue(
                engine.dialect.has_table(engine.connect(),
                                         "volume_admin_metadata"))
            volume_admin_metadata = sqlalchemy.Table('volume_admin_metadata',
                                                     metadata,
                                                     autoload=True)

            self.assertTrue(
                isinstance(volume_admin_metadata.c.created_at.type,
                           sqlalchemy.types.DATETIME))
            self.assertTrue(
                isinstance(volume_admin_metadata.c.updated_at.type,
                           sqlalchemy.types.DATETIME))
            self.assertTrue(
                isinstance(volume_admin_metadata.c.deleted_at.type,
                           sqlalchemy.types.DATETIME))
            self.assertTrue(
                isinstance(volume_admin_metadata.c.deleted.type,
                           sqlalchemy.types.BOOLEAN))
            self.assertTrue(
                isinstance(volume_admin_metadata.c.deleted.type,
                           sqlalchemy.types.BOOLEAN))
            self.assertTrue(
                isinstance(volume_admin_metadata.c.id.type,
                           sqlalchemy.types.INTEGER))
            self.assertTrue(
                isinstance(volume_admin_metadata.c.volume_id.type,
                           sqlalchemy.types.VARCHAR))
            self.assertTrue(
                isinstance(volume_admin_metadata.c.key.type,
                           sqlalchemy.types.VARCHAR))
            self.assertTrue(
                isinstance(volume_admin_metadata.c.value.type,
                           sqlalchemy.types.VARCHAR))

            migration_api.downgrade(engine, TestMigrations.REPOSITORY, 19)

            self.assertFalse(
                engine.dialect.has_table(engine.connect(),
                                         "volume_admin_metadata"))
示例#15
0
def _get_migrate_repo():
    """Get the path for the migrate repository."""
    global _REPOSITORY
    if _REPOSITORY is None:
        _REPOSITORY = versioning_repository.Repository(get_migrate_repo_path())
    return _REPOSITORY
示例#16
0
 def __init__(self):
     self.MIGRATE_FILE = trove.db.sqlalchemy.migrate_repo.__file__
     self.REPOSITORY = repository.Repository(
         os.path.abspath(os.path.dirname(self.MIGRATE_FILE)))
     self.INIT_VERSION = 0
示例#17
0
    def setUp(self):
        self.config(database_connection='sqlite://',
                    group='storage:sqlalchemy')
        super(SqlalchemyStorageTest, self).setUp()

        self.REPOSITORY = repository.Repository(REPOSITORY)
示例#18
0
 def _get_repo_path():
     return repository.Repository(os.path.split(migrate_repo.__file__)[0])
示例#19
0
 def REPOSITORY(self):
     migrate_file = cinder.db.sqlalchemy.migrate_repo.__file__
     return repository.Repository(
         os.path.abspath(os.path.dirname(migrate_file)))
示例#20
0
 def REPOSITORY(self):
     return repository.Repository(
         os.path.abspath(os.path.dirname(migrate_repo.__file__)))
示例#21
0
 def REPOSITORY(self):
     migrate_file = cinder.db.legacy_migrations.__file__
     return repository.Repository(
         os.path.abspath(os.path.dirname(migrate_file)))