コード例 #1
0
    def create(cls, engine, repository, version=None):
        """
        Declare a database to be under a repository's version control.

        :raises: :exc:`DatabaseAlreadyControlledError`
        :returns: :class:`ControlledSchema`
        """
        # Confirm that the version # is valid: positive, integer,
        # exists in repos
        if isinstance(repository, basestring):
            repository = Repository(repository)
        version = cls._validate_version(repository, version)
        table = cls._create_table_version(engine, repository, version)
        # TODO: history table
        # Load repository information and return
        return cls(engine, repository)
def includeme(config):
    """ Set up SQLAlchemy """
    LOG.debug('Initializing SQLAlchemy session')

    global repository, engine

    settings = config.get_settings()
    engine = engine_from_config(settings, 'sqlalchemy.')

    try:
        repository = Repository(settings.get('sqlalchemy_migrate.repository', None))
    except (InvalidRepositoryError, TypeError):
        LOG.debug('Migrations disabled. No valid repository found.')
        repository = None

    Session.configure(bind=engine)
コード例 #3
0
ファイル: py.py プロジェクト: 571451370/devstack_mitaka
    def make_update_script_for_model(cls, engine, oldmodel, model, repository,
                                     **opts):
        """Create a migration script based on difference between two SA models.

        :param repository: path to migrate repository
        :param oldmodel: dotted.module.name:SAClass or SAClass object
        :param model: dotted.module.name:SAClass or SAClass object
        :param engine: SQLAlchemy engine
        :type repository: string or :class:`Repository instance <migrate.versioning.repository.Repository>`
        :type oldmodel: string or Class
        :type model: string or Class
        :type engine: Engine instance
        :returns: Upgrade / Downgrade script
        :rtype: string
        """

        if isinstance(repository, six.string_types):
            # oh dear, an import cycle!
            from migrate.versioning.repository import Repository
            repository = Repository(repository)

        oldmodel = load_model(oldmodel)
        model = load_model(model)

        # Compute differences.
        diff = schemadiff.getDiffOfModelAgainstModel(
            model, oldmodel, excludeTables=[repository.version_table])
        # TODO: diff can be False (there is no difference?)
        decls, upgradeCommands, downgradeCommands = \
            genmodel.ModelGenerator(diff,engine).genB2AMigration()

        # Store differences into file.
        src = Template(opts.pop('templates_path', None)).get_script(
            opts.pop('templates_theme', None))
        f = open(src)
        contents = f.read()
        f.close()

        # generate source
        search = 'def upgrade(migrate_engine):'
        contents = contents.replace(search, '\n\n'.join((decls, search)), 1)
        if upgradeCommands:
            contents = contents.replace('    pass', upgradeCommands, 1)
        if downgradeCommands:
            contents = contents.replace('    pass', downgradeCommands, 1)
        return contents
コード例 #4
0
    def test_create(self):
        """Repositories are created successfully"""
        repos = self.tmp_repos()

        # Creating a file that doesn't exist should succeed
        result = self.env.run('migrate create %s repository_name' % repos)

        # Files should actually be created
        self.assertTrue(os.path.exists(repos))

        # The default table should not be None
        repos_ = Repository(repos)
        self.assertNotEquals(repos_.config.get('db_settings', 'version_table'), 'None')

        # Can't create it again: it already exists
        result = self.env.run('migrate create %s repository_name' % repos,
            expect_error=True)
        self.assertEqual(result.returncode, 2)
コード例 #5
0
ファイル: db.py プロジェクト: khufuproject/khufu_script
    def upgradedb(self):
        from migrate.versioning.api import (upgrade, version_control,
                                            db_version, version)
        from migrate.exceptions import DatabaseNotControlledError
        from migrate.versioning.repository import Repository

        sql_url = self.manager.settings['sqlalchemy.url']
        upgraded = []
        for mod in self.manager.db_migrations:
            mname = str(mod)
            try:
                mod = maybe_resolve(mname)
            except ImportError:
                self.logger.warn('Skipping upgrade, repo doesn'
                                 '\'t exist - %s' % mname)
                continue

            p = mod.__path__[0]
            repo = Repository(p)
            new = repo.latest

            try:
                old = db_version(sql_url, p)
            except DatabaseNotControlledError:
                self.logger.warn('DB missing version info, '
                                 'updating - %s' % repo.id)
                version_control(sql_url, p, version(p))
                old = db_version(sql_url, p)

            if new <= old:
                self.logger.debug('Upgrade not required - %s (%s)' %
                                  (repo.id, old))
                continue

            try:
                upgrade(sql_url, p)
                self.logger.info('Upgraded %s: %s to %s' % (repo.id, old, new))
                upgraded.append(repo)
            except DatabaseNotControlledError:
                self.logger.warn('DB missing version info, '
                                 'updating - %s' % repo.id)
                version_control(sql_url, p)
        self.logger.info('Updated db schema for %i components' % len(upgraded))
コード例 #6
0
class TestMigrations(unittest.TestCase):
    """Test sqlalchemy-migrate migrations"""

    TEST_DATABASES = {}
    # Test machines can set the NOVA_TEST_MIGRATIONS_CONF variable
    # to override the location of the config file for migration testing
    CONFIG_FILE_PATH = os.environ.get('NOVA_TEST_MIGRATIONS_CONF',
                                      os.path.join('test_migrations.conf'))
    REPOSITORY_PATH = os.path.abspath(
        os.path.join('..', 'db', 'sqlalchemy', 'migrate_repo'))
    REPOSITORY = Repository(REPOSITORY_PATH)

    def __init__(self, *args, **kwargs):
        super(TestMigrations, self).__init__(*args, **kwargs)

    def setUp(self):
        super(TestMigrations, self).setUp()

        # Load test databases from the config file. Only do this
        # once. No need to re-run this on each test...
        LOG.debug('config_path is %s' % TestMigrations.CONFIG_FILE_PATH)
        if not TestMigrations.TEST_DATABASES:
            if os.path.exists(TestMigrations.CONFIG_FILE_PATH):
                cp = ConfigParser.RawConfigParser()
                try:
                    cp.read(TestMigrations.CONFIG_FILE_PATH)
                    defaults = cp.defaults()
                    for key, value in defaults.items():
                        TestMigrations.TEST_DATABASES[key] = value
                    self.snake_walk = cp.getboolean('walk_style', 'snake_walk')
                except ConfigParser.ParsingError, e:
                    self.fail("Failed to read test_migrations.conf config "
                              "file. Got error: %s" % e)
            else:
                self.fail("Failed to find test_migrations.conf config "
                          "file.")

        self.engines = {}
        for key, value in TestMigrations.TEST_DATABASES.items():
            self.engines[key] = create_engine(value)

        # We start each test case with a completely blank slate.
        self._reset_databases()
コード例 #7
0
class TestMigrations(utils.BaseTestCase):
    """Test sqlalchemy-migrate migrations"""

    TEST_DATABASES = {}
    # Test machines can set the GLANCE_TEST_MIGRATIONS_CONF variable
    # to override the location of the config file for migration testing
    CONFIG_FILE_PATH = os.environ.get(
        'GLANCE_TEST_MIGRATIONS_CONF',
        os.path.join('glance', 'tests', 'unit', 'test_migrations.conf'))
    REPOSITORY_PATH = 'glance/db/sqlalchemy/migrate_repo'
    REPOSITORY = Repository(REPOSITORY_PATH)

    def __init__(self, *args, **kwargs):
        super(TestMigrations, self).__init__(*args, **kwargs)

    def setUp(self):
        super(TestMigrations, self).setUp()
        # Load test databases from the config file. Only do this
        # once. No need to re-run this on each test...
        if not TestMigrations.TEST_DATABASES:
            if os.path.exists(TestMigrations.CONFIG_FILE_PATH):
                cp = ConfigParser.RawConfigParser()
                try:
                    cp.read(TestMigrations.CONFIG_FILE_PATH)
                    defaults = cp.defaults()
                    for key, value in defaults.items():
                        TestMigrations.TEST_DATABASES[key] = value
                except ConfigParser.ParsingError, e:
                    self.fail("Failed to read test_migrations.conf config "
                              "file. Got error: %s" % e)
            else:
                self.fail("Failed to find test_migrations.conf config "
                          "file.")

        self.engines = {}
        for key, value in TestMigrations.TEST_DATABASES.items():
            self.engines[key] = create_engine(value, poolclass=NullPool)

        # We start each test case with a completely blank slate.
        self._reset_databases()
コード例 #8
0
class TestMigrations(utils.BaseTestCase):
    """Test sqlalchemy-migrate migrations."""

    DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
                                       'test_migrations.conf')
    # Test machines can set the GLANCE_TEST_MIGRATIONS_CONF variable
    # to override the location of the config file for migration testing
    CONFIG_FILE_PATH = os.environ.get('GLANCE_TEST_MIGRATIONS_CONF',
                                      DEFAULT_CONFIG_FILE)
    MIGRATE_FILE = glance.db.sqlalchemy.migrate_repo.__file__
    REPOSITORY = Repository(os.path.abspath(os.path.dirname(MIGRATE_FILE)))

    def setUp(self):
        super(TestMigrations, self).setUp()

        self.snake_walk = False
        self.test_databases = {}

        # Load test databases from the config file. Only do this
        # once. No need to re-run this on each test...
        LOG.debug('config_path is %s' % TestMigrations.CONFIG_FILE_PATH)
        if os.path.exists(TestMigrations.CONFIG_FILE_PATH):
            cp = ConfigParser.RawConfigParser()
            try:
                cp.read(TestMigrations.CONFIG_FILE_PATH)
                defaults = cp.defaults()
                for key, value in defaults.items():
                    self.test_databases[key] = value
                self.snake_walk = cp.getboolean('walk_style', 'snake_walk')
            except ConfigParser.ParsingError as e:
                self.fail("Failed to read test_migrations.conf config "
                          "file. Got error: %s" % e)
        else:
            self.fail("Failed to find test_migrations.conf config "
                      "file.")

        self.engines = {}
        for key, value in self.test_databases.items():
            self.engines[key] = sqlalchemy.create_engine(value)

        # We start each test case with a completely blank slate.
        self._reset_databases()

    def tearDown(self):
        # We destroy the test data store between each test case,
        # and recreate it, which ensures that we have no side-effects
        # from the tests
        self._reset_databases()
        super(TestMigrations, self).tearDown()

    def _reset_databases(self):
        def execute_cmd(cmd=None):
            status, output = commands.getstatusoutput(cmd)
            LOG.debug(output)
            self.assertEqual(0, status)
        for key, engine in self.engines.items():
            conn_string = self.test_databases[key]
            conn_pieces = urlparse.urlparse(conn_string)
            engine.dispose()
            if conn_string.startswith('sqlite'):
                # We can just delete the SQLite database, which is
                # the easiest and cleanest solution
                db_path = conn_pieces.path.strip('/')
                if os.path.exists(db_path):
                    os.unlink(db_path)
                # No need to recreate the SQLite DB. SQLite will
                # create it for us if it's not there...
            elif conn_string.startswith('mysql'):
                # We can execute the MySQL client to destroy and re-create
                # the MYSQL database, which is easier and less error-prone
                # than using SQLAlchemy to do this via MetaData...trust me.
                database = conn_pieces.path.strip('/')
                loc_pieces = conn_pieces.netloc.split('@')
                host = loc_pieces[1]
                auth_pieces = loc_pieces[0].split(':')
                user = auth_pieces[0]
                password = ""
                if len(auth_pieces) > 1:
                    if auth_pieces[1].strip():
                        password = "******"%s\"" % auth_pieces[1]
                sql = ("drop database if exists %(database)s; "
                       "create database %(database)s;") % locals()
                cmd = ("mysql -u \"%(user)s\" %(password)s -h %(host)s "
                       "-e \"%(sql)s\"") % locals()
                execute_cmd(cmd)
            elif conn_string.startswith('postgresql'):
                database = conn_pieces.path.strip('/')
                loc_pieces = conn_pieces.netloc.split('@')
                host = loc_pieces[1]

                auth_pieces = loc_pieces[0].split(':')
                user = auth_pieces[0]
                password = ""
                if len(auth_pieces) > 1:
                    password = auth_pieces[1].strip()
                # note(boris-42): This file is used for authentication
                # without password prompt.
                createpgpass = ("echo '*:*:*:%(user)s:%(password)s' > "
                                "~/.pgpass && chmod 0600 ~/.pgpass" % locals())
                execute_cmd(createpgpass)
                # note(boris-42): We must create and drop database, we can't
                # drop database which we have connected to, so for such
                # operations there is a special database template1.
                sqlcmd = ("psql -w -U %(user)s -h %(host)s -c"
                          " '%(sql)s' -d template1")
                sql = ("drop database if exists %(database)s;") % locals()
                droptable = sqlcmd % locals()
                execute_cmd(droptable)
                sql = ("create database %(database)s;") % locals()
                createtable = sqlcmd % locals()
                execute_cmd(createtable)

    def test_walk_versions(self):
        """
        Walks all version scripts for each tested database, ensuring
        that there are no errors in the version scripts for each engine
        """
        for key, engine in self.engines.items():
            self._walk_versions(engine, self.snake_walk)

    def test_mysql_connect_fail(self):
        """
        Test that we can trigger a mysql connection failure and we fail
        gracefully to ensure we don't break people without mysql
        """
        if _is_backend_avail('mysql', user="******"):
            self.fail("Shouldn't have connected")

    def test_mysql_opportunistically(self):
        # Test that table creation on mysql only builds InnoDB tables
        if not _is_backend_avail('mysql'):
            self.skipTest("mysql not available")
        # add this to the global lists to make reset work with it, it's removed
        # automatically in tearDown so no need to clean it up here.
        connect_string = _get_connect_string("mysql")
        engine = sqlalchemy.create_engine(connect_string)
        self.engines["mysqlcitest"] = engine
        self.test_databases["mysqlcitest"] = connect_string

        # build a fully populated mysql database with all the tables
        self._reset_databases()
        self._walk_versions(engine, False, False)

        connection = engine.connect()
        # sanity check
        total = connection.execute("SELECT count(*) "
                                   "from information_schema.TABLES "
                                   "where TABLE_SCHEMA='openstack_citest'")
        self.assertTrue(total.scalar() > 0, "No tables found. Wrong schema?")

        noninnodb = connection.execute("SELECT count(*) "
                                       "from information_schema.TABLES "
                                       "where TABLE_SCHEMA='openstack_citest' "
                                       "and ENGINE!='InnoDB' "
                                       "and TABLE_NAME!='migrate_version'")
        count = noninnodb.scalar()
        self.assertEqual(count, 0, "%d non InnoDB tables created" % count)
        connection.close()

    def test_postgresql_connect_fail(self):
        """
        Test that we can trigger a postgres connection failure and we fail
        gracefully to ensure we don't break people without postgres
        """
        if _is_backend_avail('postgresql', user="******"):
            self.fail("Shouldn't have connected")

    def test_postgresql_opportunistically(self):
        # Test postgresql database migration walk
        if not _is_backend_avail('postgres'):
            self.skipTest("postgresql not available")
        # add this to the global lists to make reset work with it, it's removed
        # automatically in tearDown so no need to clean it up here.
        connect_string = _get_connect_string("postgres")
        engine = sqlalchemy.create_engine(connect_string)
        self.engines["postgresqlcitest"] = engine
        self.test_databases["postgresqlcitest"] = connect_string

        # build a fully populated postgresql database with all the tables
        self._reset_databases()
        self._walk_versions(engine, False, False)

    def _walk_versions(self, engine=None, snake_walk=False, downgrade=True,
                       initial_version=None):
        # Determine latest version script from the repo, then
        # upgrade from 1 through to the latest, with no data
        # in the databases. This just checks that the schema itself
        # upgrades successfully.

        def db_version():
            return migration_api.db_version(engine, TestMigrations.REPOSITORY)

        # Place the database under version control
        init_version = migration.INIT_VERSION
        if initial_version is not None:
            init_version = initial_version
        migration_api.version_control(engine, TestMigrations.REPOSITORY,
                                      init_version)
        self.assertEqual(init_version, db_version())

        migration_api.upgrade(engine, TestMigrations.REPOSITORY,
                              init_version + 1)
        self.assertEqual(init_version + 1, db_version())

        LOG.debug('latest version is %s' % TestMigrations.REPOSITORY.latest)

        for version in xrange(init_version + 2,
                              TestMigrations.REPOSITORY.latest + 1):
            # upgrade -> downgrade -> upgrade
            self._migrate_up(engine, version, with_data=True)
            if snake_walk:
                self._migrate_down(engine, version - 1, with_data=True)
                self._migrate_up(engine, version)

        if downgrade:
            # Now walk it back down to 0 from the latest, testing
            # the downgrade paths.
            for version in reversed(
                xrange(init_version + 2,
                       TestMigrations.REPOSITORY.latest + 1)):
                # downgrade -> upgrade -> downgrade
                self._migrate_down(engine, version - 1)
                if snake_walk:
                    self._migrate_up(engine, version)
                    self._migrate_down(engine, version - 1)

            # Ensure we made it all the way back to the first migration
            self.assertEqual(init_version + 1, db_version())

    def _migrate_down(self, engine, version, with_data=False):
        migration_api.downgrade(engine,
                                TestMigrations.REPOSITORY,
                                version)
        self.assertEqual(version,
                         migration_api.db_version(engine,
                                                  TestMigrations.REPOSITORY))

        # NOTE(sirp): `version` is what we're downgrading to (i.e. the 'target'
        # version). So if we have any downgrade checks, they need to be run for
        # the previous (higher numbered) migration.
        if with_data:
            post_downgrade = getattr(self, "_post_downgrade_%03d" %
                                           (version + 1), None)
            if post_downgrade:
                post_downgrade(engine)

    def _migrate_up(self, engine, version, with_data=False):
        """migrate up to a new version of the db.

        We allow for data insertion and post checks at every
        migration version with special _pre_upgrade_### and
        _check_### functions in the main test.
        """
        if with_data:
            data = None
            pre_upgrade = getattr(self, "_pre_upgrade_%3.3d" % version, None)
            if pre_upgrade:
                data = pre_upgrade(engine)

        migration_api.upgrade(engine,
                              TestMigrations.REPOSITORY,
                              version)
        self.assertEqual(version,
                         migration_api.db_version(engine,
                                                  TestMigrations.REPOSITORY))

        if with_data:
            check = getattr(self, "_check_%3.3d" % version, None)
            if check:
                check(engine, data)

    def _create_unversioned_001_db(self, engine):
        # Create the initial version of the images table
        meta = sqlalchemy.schema.MetaData()
        meta.bind = engine
        images_001 = sqlalchemy.Table('images', meta,
                                      sqlalchemy.Column('id', models.Integer,
                                                        primary_key=True),
                                      sqlalchemy.Column('name',
                                                        sqlalchemy.String(255)
                                                        ),
                                      sqlalchemy.Column('type',
                                                        sqlalchemy.String(30)),
                                      sqlalchemy.Column('size',
                                                        sqlalchemy.Integer),
                                      sqlalchemy.Column('status',
                                                        sqlalchemy.String(30)),
                                      sqlalchemy.Column('is_public',
                                                        sqlalchemy.Boolean,
                                                        default=False),
                                      sqlalchemy.Column('location',
                                                        sqlalchemy.Text),
                                      sqlalchemy.Column('created_at',
                                                        sqlalchemy.DateTime(),
                                                        nullable=False),
                                      sqlalchemy.Column('updated_at',
                                                        sqlalchemy.DateTime()),
                                      sqlalchemy.Column('deleted_at',
                                                        sqlalchemy.DateTime()),
                                      sqlalchemy.Column('deleted',
                                                        sqlalchemy.Boolean(),
                                                        nullable=False,
                                                        default=False))
        images_001.create()

    def test_version_control_existing_db(self):
        """
        Creates a DB without version control information, places it
        under version control and checks that it can be upgraded
        without errors.
        """
        for key, engine in self.engines.items():
            self._create_unversioned_001_db(engine)
            self._walk_versions(engine, self.snake_walk, initial_version=1)

    def _pre_upgrade_003(self, engine):
        now = datetime.datetime.now()
        images = get_table(engine, 'images')
        data = {'deleted': False, 'created_at': now, 'updated_at': now,
                'type': 'kernel', 'status': 'active', 'is_public': True}
        images.insert().values(data).execute()
        return data

    def _check_003(self, engine, data):
        images = get_table(engine, 'images')
        self.assertTrue('type' not in images.c,
                        "'type' column found in images table columns! "
                        "images table columns reported by metadata: %s\n"
                        % images.c.keys())
        images_prop = get_table(engine, 'image_properties')
        result = images_prop.select().execute()
        types = []
        for row in result:
            if row['key'] == 'type':
                types.append(row['value'])
        self.assertIn(data['type'], types)

    def _pre_upgrade_004(self, engine):
        """Insert checksum data sample to check if migration goes fine with
        data"""
        now = timeutils.utcnow()
        images = get_table(engine, 'images')
        data = [
            {
                'deleted': False, 'created_at': now, 'updated_at': now,
                'type': 'kernel', 'status': 'active', 'is_public': True,
            }
        ]
        engine.execute(images.insert(), data)
        return data

    def _check_004(self, engine, data):
        """Assure that checksum data is present on table"""
        images = get_table(engine, 'images')
        self.assertIn('checksum', images.c)
        self.assertEquals(images.c['checksum'].type.length, 32)

    def _pre_upgrade_005(self, engine):
        now = timeutils.utcnow()
        images = get_table(engine, 'images')
        data = [
            {
                'deleted': False, 'created_at': now, 'updated_at': now,
                'type': 'kernel', 'status': 'active', 'is_public': True,
                # Integer type signed size limit
                'size': 2147483647
            }
        ]
        engine.execute(images.insert(), data)
        return data

    def _check_005(self, engine, data):

        images = get_table(engine, 'images')
        select = images.select().execute()

        sizes = [row['size'] for row in select if row['size'] is not None]
        migrated_data_sizes = [element['size'] for element in data]

        for migrated in migrated_data_sizes:
            self.assertIn(migrated, sizes)

    def _pre_upgrade_006(self, engine):
        now = timeutils.utcnow()
        images = get_table(engine, 'images')
        image_data = [
            {
                'deleted': False, 'created_at': now, 'updated_at': now,
                'type': 'kernel', 'status': 'active', 'is_public': True,
                'id': 9999,
            }
        ]
        engine.execute(images.insert(), image_data)

        images_properties = get_table(engine, 'image_properties')
        properties_data = [
            {
                'id': 10, 'image_id': 9999, 'updated_at': now,
                'created_at': now, 'deleted': False, 'key': 'image_name'
            }
        ]
        engine.execute(images_properties.insert(), properties_data)
        return properties_data

    def _check_006(self, engine, data):
        images_properties = get_table(engine, 'image_properties')
        select = images_properties.select().execute()

        # load names from name collumn
        image_names = [row['name'] for row in select]

        # check names from data in image names from name collumn
        for element in data:
            self.assertIn(element['key'], image_names)

    def _pre_upgrade_010(self, engine):
        """Test rows in images with NULL updated_at get updated to equal
        created_at"""

        initial_values = [
            (datetime.datetime(1999, 1, 2, 4, 10, 20),
             datetime.datetime(1999, 1, 2, 4, 10, 30)),
            (datetime.datetime(1999, 2, 4, 6, 15, 25),
             datetime.datetime(1999, 2, 4, 6, 15, 35)),
            (datetime.datetime(1999, 3, 6, 8, 20, 30),
             None),
            (datetime.datetime(1999, 4, 8, 10, 25, 35),
             None),
        ]

        images = get_table(engine, 'images')
        for created_at, updated_at in initial_values:
            row = dict(deleted=False,
                       created_at=created_at,
                       updated_at=updated_at,
                       status='active',
                       is_public=True,
                       min_disk=0,
                       min_ram=0)
            images.insert().values(row).execute()

        return initial_values

    def _check_010(self, engine, data):
        values = dict((c, u) for c, u in data)

        images = get_table(engine, 'images')
        for row in images.select().execute():
            if row['created_at'] in values:
                # updated_at should be unchanged if not previous NULL, or
                # set to created_at if previously NULL
                updated_at = values.pop(row['created_at']) or row['created_at']
                self.assertEqual(row['updated_at'], updated_at)

        # No initial values should be remaining
        self.assertEqual(len(values), 0)

    def _pre_upgrade_012(self, engine):
        """Test rows in images have id changes from int to varchar(32) and
        value changed from int to UUID. Also test image_members and
        image_properties gets updated to point to new UUID keys"""

        images = get_table(engine, 'images')
        image_members = get_table(engine, 'image_members')
        image_properties = get_table(engine, 'image_properties')

        # Insert kernel, ramdisk and normal images
        now = timeutils.utcnow()
        data = {'created_at': now, 'updated_at': now,
                'status': 'active', 'deleted': False,
                'is_public': True, 'min_disk': 0, 'min_ram': 0}

        test_data = {}
        for name in ('kernel', 'ramdisk', 'normal'):
            data['name'] = '%s migration 012 test' % name
            result = images.insert().values(data).execute()
            test_data[name] = result.inserted_primary_key[0]

        # Insert image_members and image_properties rows
        data = {'created_at': now, 'updated_at': now, 'deleted': False,
                'image_id': test_data['normal'], 'member': 'foobar',
                'can_share': False}
        result = image_members.insert().values(data).execute()
        test_data['member'] = result.inserted_primary_key[0]

        data = {'created_at': now, 'updated_at': now, 'deleted': False,
                'image_id': test_data['normal'], 'name': 'ramdisk_id',
                'value': test_data['ramdisk']}
        result = image_properties.insert().values(data).execute()
        test_data['properties'] = [result.inserted_primary_key[0]]

        data.update({'name': 'kernel_id', 'value': test_data['kernel']})
        result = image_properties.insert().values(data).execute()
        test_data['properties'].append(result.inserted_primary_key)

        return test_data

    def _check_012(self, engine, test_data):
        images = get_table(engine, 'images')
        image_members = get_table(engine, 'image_members')
        image_properties = get_table(engine, 'image_properties')

        # Find kernel, ramdisk and normal images. Make sure id has been
        # changed to a uuid
        uuids = {}
        for name in ('kernel', 'ramdisk', 'normal'):
            image_name = '%s migration 012 test' % name
            rows = images.select()\
                         .where(images.c.name == image_name)\
                         .execute().fetchall()

            self.assertEquals(len(rows), 1)

            row = rows[0]
            print(repr(dict(row)))
            self.assertTrue(uuidutils.is_uuid_like(row['id']))

            uuids[name] = row['id']

        # Find all image_members to ensure image_id has been updated
        results = image_members.select()\
                               .where(image_members.c.image_id ==
                                      uuids['normal'])\
                               .execute().fetchall()
        self.assertEquals(len(results), 1)

        # Find all image_properties to ensure image_id has been updated
        # as well as ensure kernel_id and ramdisk_id values have been
        # updated too
        results = image_properties.select()\
                                  .where(image_properties.c.image_id ==
                                         uuids['normal'])\
                                  .execute().fetchall()
        self.assertEquals(len(results), 2)
        for row in results:
            self.assertIn(row['name'], ('kernel_id', 'ramdisk_id'))

            if row['name'] == 'kernel_id':
                self.assertEqual(row['value'], uuids['kernel'])
            if row['name'] == 'ramdisk_id':
                self.assertEqual(row['value'], uuids['ramdisk'])

    def _post_downgrade_012(self, engine):
        images = get_table(engine, 'images')
        image_members = get_table(engine, 'image_members')
        image_properties = get_table(engine, 'image_properties')

        # Find kernel, ramdisk and normal images. Make sure id has been
        # changed back to an integer
        ids = {}
        for name in ('kernel', 'ramdisk', 'normal'):
            image_name = '%s migration 012 test' % name
            rows = images.select()\
                         .where(images.c.name == image_name)\
                         .execute().fetchall()
            self.assertEquals(len(rows), 1)

            row = rows[0]
            self.assertFalse(uuidutils.is_uuid_like(row['id']))

            ids[name] = row['id']

        # Find all image_members to ensure image_id has been updated
        results = image_members.select()\
                               .where(image_members.c.image_id ==
                                      ids['normal'])\
                               .execute().fetchall()
        self.assertEquals(len(results), 1)

        # Find all image_properties to ensure image_id has been updated
        # as well as ensure kernel_id and ramdisk_id values have been
        # updated too
        results = image_properties.select()\
                                  .where(image_properties.c.image_id ==
                                         ids['normal'])\
                                  .execute().fetchall()
        self.assertEquals(len(results), 2)
        for row in results:
            self.assertIn(row['name'], ('kernel_id', 'ramdisk_id'))

            if row['name'] == 'kernel_id':
                self.assertEqual(row['value'], str(ids['kernel']))
            if row['name'] == 'ramdisk_id':
                self.assertEqual(row['value'], str(ids['ramdisk']))

    def _pre_upgrade_015(self, engine):
        images = get_table(engine, 'images')
        unquoted_locations = [
            'swift://*****:*****@example.com/container/obj-id',
            'file://foo',
        ]
        now = datetime.datetime.now()
        temp = dict(deleted=False,
                    created_at=now,
                    updated_at=now,
                    status='active',
                    is_public=True,
                    min_disk=0,
                    min_ram=0)
        data = []
        for i, location in enumerate(unquoted_locations):
            temp.update(location=location, id=uuidutils.generate_uuid())
            data.append(temp)
            images.insert().values(temp).execute()
        return data

    def _check_015(self, engine, data):
        images = get_table(engine, 'images')
        quoted_locations = [
            'swift://acct%3Ausr:[email protected]/container/obj-id',
            'file://foo',
        ]
        result = images.select().execute()
        locations = map(lambda x: x['location'], result)
        for loc in quoted_locations:
            self.assertIn(loc, locations)

    def _pre_upgrade_016(self, engine):
        images = get_table(engine, 'images')
        now = datetime.datetime.now()
        temp = dict(deleted=False,
                    created_at=now,
                    updated_at=now,
                    status='active',
                    is_public=True,
                    min_disk=0,
                    min_ram=0,
                    id='fake-image-id1')
        images.insert().values(temp).execute()
        image_members = get_table(engine, 'image_members')
        now = datetime.datetime.now()
        data = {'deleted': False,
                'created_at': now,
                'member': 'fake-member',
                'updated_at': now,
                'can_share': False,
                'image_id': 'fake-image-id1'}
        image_members.insert().values(data).execute()
        return data

    def _check_016(self, engine, data):
        image_members = get_table(engine, 'image_members')
        self.assertTrue('status' in image_members.c,
                        "'status' column found in image_members table "
                        "columns! image_members table columns: %s"
                        % image_members.c.keys())

    def _pre_upgrade_017(self, engine):
        metadata_encryption_key = 'a' * 16
        self.config(metadata_encryption_key=metadata_encryption_key)
        images = get_table(engine, 'images')
        unquoted = 'swift://*****:*****@example.com/container/obj-id'
        encrypted_unquoted = crypt.urlsafe_encrypt(
                                    metadata_encryption_key,
                                    unquoted, 64)
        data = []
        now = datetime.datetime.now()
        temp = dict(deleted=False,
                    created_at=now,
                    updated_at=now,
                    status='active',
                    is_public=True,
                    min_disk=0,
                    min_ram=0,
                    location=encrypted_unquoted,
                    id='fakeid1')
        images.insert().values(temp).execute()

        locations = [
            'file://ab',
            'file://abc',
            'swift://acct3A%foobar:[email protected]/container/obj-id2'
        ]

        now = datetime.datetime.now()
        temp = dict(deleted=False,
                    created_at=now,
                    updated_at=now,
                    status='active',
                    is_public=True,
                    min_disk=0,
                    min_ram=0)
        for i, location in enumerate(locations):
            temp.update(location=location, id=uuidutils.generate_uuid())
            data.append(temp)
            images.insert().values(temp).execute()
        return data

    def _check_017(self, engine, data):
        metadata_encryption_key = 'a' * 16
        quoted = 'swift://acct%3Ausr:[email protected]/container/obj-id'
        images = get_table(engine, 'images')
        result = images.select().execute()
        locations = map(lambda x: x['location'], result)
        actual_location = []
        for location in locations:
            if location:
                try:
                    temp_loc = crypt.urlsafe_decrypt(metadata_encryption_key,
                                                     location)
                    actual_location.append(temp_loc)
                except TypeError:
                    actual_location.append(location)
                except ValueError:
                    actual_location.append(location)

        self.assertIn(quoted, actual_location)
        loc_list = ['file://ab',
                    'file://abc',
                    'swift://acct3A%foobar:[email protected]/container/obj-id2']

        for location in loc_list:
            if not location in actual_location:
                self.fail(_("location: %s data lost") % location)

    def _pre_upgrade_019(self, engine):
        images = get_table(engine, 'images')
        now = datetime.datetime.now()
        base_values = {
                'deleted': False,
                'created_at': now,
                'updated_at': now,
                'status': 'active',
                'is_public': True,
                'min_disk': 0,
                'min_ram': 0,
        }
        data = [
            {'id': 'fake-19-1', 'location': 'http://glance.example.com'},
            #NOTE(bcwaldon): images with a location of None should
            # not be migrated
            {'id': 'fake-19-2', 'location': None},
        ]
        map(lambda image: image.update(base_values), data)
        for image in data:
            images.insert().values(image).execute()
        return data

    def _check_019(self, engine, data):
        image_locations = get_table(engine, 'image_locations')
        records = image_locations.select().execute().fetchall()
        locations = dict([(il.image_id, il.value) for il in records])
        self.assertEqual(locations.get('fake-19-1'),
                         'http://glance.example.com')

    def _check_020(self, engine, data):
        images = get_table(engine, 'images')
        self.assertFalse('location' in images.c)

    def _pre_upgrade_026(self, engine):
        image_locations = get_table(engine, 'image_locations')

        now = datetime.datetime.now()
        image_id = 'fake_id'
        url = 'file:///some/place/onthe/fs'

        images = get_table(engine, 'images')
        temp = dict(deleted=False,
                    created_at=now,
                    updated_at=now,
                    status='active',
                    is_public=True,
                    min_disk=0,
                    min_ram=0,
                    id=image_id)
        images.insert().values(temp).execute()

        temp = dict(deleted=False,
                    created_at=now,
                    updated_at=now,
                    image_id=image_id,
                    value=url)
        image_locations.insert().values(temp).execute()
        return image_id

    def _check_026(self, engine, data):
        image_locations = get_table(engine, 'image_locations')
        results = image_locations.select()\
            .where(image_locations.c.image_id == data).execute()

        r = list(results)
        self.assertEquals(len(r), 1)
        self.assertEquals(r[0]['value'], 'file:///some/place/onthe/fs')
        self.assertTrue('meta_data' in r[0])
        x = pickle.loads(r[0]['meta_data'])
        self.assertEqual(x, {})

    def _check_027(self, engine, data):
        table = "images"
        index = "checksum_image_idx"
        columns = ["checksum"]

        meta = sqlalchemy.MetaData()
        meta.bind = engine

        new_table = sqlalchemy.Table(table, meta, autoload=True)

        index_data = [(idx.name, idx.columns.keys())
                      for idx in new_table.indexes]

        self.assertIn((index, columns), index_data)

    def _check_028(self, engine, data):
        owner_index = "owner_image_idx"
        columns = ["owner"]

        images_table = get_table(engine, 'images')

        index_data = [(idx.name, idx.columns.keys())
                      for idx in images_table.indexes
                      if idx.name == owner_index]

        self.assertIn((owner_index, columns), index_data)

    def _post_downgrade_028(self, engine):
        owner_index = "owner_image_idx"
        columns = ["owner"]

        images_table = get_table(engine, 'images')

        index_data = [(idx.name, idx.columns.keys())
                      for idx in images_table.indexes
                      if idx.name == owner_index]

        self.assertNotIn((owner_index, columns), index_data)

    def _pre_upgrade_029(self, engine):
        image_locations = get_table(engine, 'image_locations')

        meta_data = {'somelist': ['a', 'b', 'c'], 'avalue': 'hello',
                     'adict': {}}

        now = datetime.datetime.now()
        image_id = 'fake_029_id'
        url = 'file:///some/place/onthe/fs029'

        images = get_table(engine, 'images')
        temp = dict(deleted=False,
                    created_at=now,
                    updated_at=now,
                    status='active',
                    is_public=True,
                    min_disk=0,
                    min_ram=0,
                    id=image_id)
        images.insert().values(temp).execute()

        pickle_md = pickle.dumps(meta_data)
        temp = dict(deleted=False,
                    created_at=now,
                    updated_at=now,
                    image_id=image_id,
                    value=url,
                    meta_data=pickle_md)
        image_locations.insert().values(temp).execute()

        return meta_data, image_id

    def _check_029(self, engine, data):
        meta_data = data[0]
        image_id = data[1]
        image_locations = get_table(engine, 'image_locations')

        records = image_locations.select().\
            where(image_locations.c.image_id == image_id).execute().fetchall()

        for r in records:
            d = json.loads(r['meta_data'])
            self.assertEqual(d, meta_data)

    def _post_downgrade_029(self, engine):
        image_id = 'fake_029_id'

        image_locations = get_table(engine, 'image_locations')

        records = image_locations.select().\
            where(image_locations.c.image_id == image_id).execute().fetchall()

        for r in records:
            md = r['meta_data']
            d = pickle.loads(md)
            self.assertEqual(type(d), dict)

    def _check_030(self, engine, data):
        table = "tasks"
        index_type = ('ix_tasks_type', ['type'])
        index_status = ('ix_tasks_status', ['status'])
        index_owner = ('ix_tasks_owner', ['owner'])
        index_deleted = ('ix_tasks_deleted', ['deleted'])
        index_updated_at = ('ix_tasks_updated_at', ['updated_at'])

        meta = sqlalchemy.MetaData()
        meta.bind = engine

        tasks_table = sqlalchemy.Table(table, meta, autoload=True)

        index_data = [(idx.name, idx.columns.keys())
                      for idx in tasks_table.indexes]

        self.assertIn(index_type, index_data)
        self.assertIn(index_status, index_data)
        self.assertIn(index_owner, index_data)
        self.assertIn(index_deleted, index_data)
        self.assertIn(index_updated_at, index_data)

        expected = [u'id',
                    u'type',
                    u'status',
                    u'owner',
                    u'input',
                    u'result',
                    u'message',
                    u'expires_at',
                    u'created_at',
                    u'updated_at',
                    u'deleted_at',
                    u'deleted']

        # NOTE(flwang): Skip the column type checking for now since Jenkins is
        # using sqlalchemy.dialects.postgresql.base.TIMESTAMP instead of
        # DATETIME which is using by mysql and sqlite.
        col_data = [col.name for col in tasks_table.columns]
        self.assertEqual(expected, col_data)

    def _post_downgrade_030(self, engine):
        self.assertRaises(sqlalchemy.exc.NoSuchTableError,
                          get_table, engine, 'tasks')
コード例 #9
0
ファイル: test_migrations.py プロジェクト: cp16net/qonos
class TestMigrations(utils.BaseTestCase):
    """Test sqlalchemy-migrate migrations."""

    DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
                                       'test_migrations.conf')
    # Test machines can set the QONOS_TEST_MIGRATIONS_CONF variable
    # to override the location of the config file for migration testing
    CONFIG_FILE_PATH = os.environ.get('QONOS_TEST_MIGRATIONS_CONF',
                                      DEFAULT_CONFIG_FILE)
    MIGRATE_FILE = qonos.db.sqlalchemy.migrate_repo.__file__
    REPOSITORY = Repository(os.path.abspath(os.path.dirname(MIGRATE_FILE)))

    def setUp(self):
        super(TestMigrations, self).setUp()

        self.snake_walk = False
        self.test_databases = {}

        # Load test databases from the config file. Only do this
        # once. No need to re-run this on each test...
        LOG.debug('config_path is %s' % TestMigrations.CONFIG_FILE_PATH)
        if os.path.exists(TestMigrations.CONFIG_FILE_PATH):
            cp = ConfigParser.RawConfigParser()
            try:
                cp.read(TestMigrations.CONFIG_FILE_PATH)
                defaults = cp.defaults()
                for key, value in defaults.items():
                    self.test_databases[key] = value
                self.snake_walk = cp.getboolean('walk_style', 'snake_walk')
            except ConfigParser.ParsingError as e:
                self.fail("Failed to read test_migrations.conf config "
                          "file. Got error: %s" % e)
        else:
            self.fail("Failed to find test_migrations.conf config " "file.")

        self.engines = {}
        for key, value in self.test_databases.items():
            self.engines[key] = sqlalchemy.create_engine(value)

        # We start each test case with a completely blank slate.
        self._reset_databases()

    def tearDown(self):
        # We destroy the test data store between each test case,
        # and recreate it, which ensures that we have no side-effects
        # from the tests
        self._reset_databases()
        super(TestMigrations, self).tearDown()

    def _reset_databases(self):
        def execute_cmd(cmd=None):
            status, output = commands.getstatusoutput(cmd)
            LOG.debug(output)
            self.assertEqual(0, status)

        for key, engine in self.engines.items():
            conn_string = self.test_databases[key]
            conn_pieces = urlparse.urlparse(conn_string)
            engine.dispose()
            if conn_string.startswith('sqlite'):
                # We can just delete the SQLite database, which is
                # the easiest and cleanest solution
                db_path = conn_pieces.path.strip('/')
                if os.path.exists(db_path):
                    os.unlink(db_path)
                # No need to recreate the SQLite DB. SQLite will
                # create it for us if it's not there...
            elif conn_string.startswith('mysql'):
                # We can execute the MySQL client to destroy and re-create
                # the MYSQL database, which is easier and less error-prone
                # than using SQLAlchemy to do this via MetaData...trust me.
                database = conn_pieces.path.strip('/')
                loc_pieces = conn_pieces.netloc.split('@')
                host = loc_pieces[1]
                auth_pieces = loc_pieces[0].split(':')
                user = auth_pieces[0]
                password = ""
                if len(auth_pieces) > 1:
                    if auth_pieces[1].strip():
                        password = "******"%s\"" % auth_pieces[1]
                sql = ("drop database if exists %(database)s; "
                       "create database %(database)s;") % locals()
                cmd = ("mysql -u \"%(user)s\" %(password)s -h %(host)s "
                       "-e \"%(sql)s\"") % locals()
                execute_cmd(cmd)
            elif conn_string.startswith('postgresql'):
                database = conn_pieces.path.strip('/')
                loc_pieces = conn_pieces.netloc.split('@')
                host = loc_pieces[1]

                auth_pieces = loc_pieces[0].split(':')
                user = auth_pieces[0]
                password = ""
                if len(auth_pieces) > 1:
                    password = auth_pieces[1].strip()
                # note(boris-42): This file is used for authentication
                # without password prompt.
                createpgpass = ("echo '*:*:*:%(user)s:%(password)s' > "
                                "~/.pgpass && chmod 0600 ~/.pgpass" % locals())
                execute_cmd(createpgpass)
                # note(boris-42): We must create and drop database, we can't
                # drop database which we have connected to, so for such
                # operations there is a special database template1.
                sqlcmd = ("psql -w -U %(user)s -h %(host)s -c"
                          " '%(sql)s' -d template1")
                sql = ("drop database if exists %(database)s;") % locals()
                droptable = sqlcmd % locals()
                execute_cmd(droptable)
                sql = ("create database %(database)s;") % locals()
                createtable = sqlcmd % locals()
                execute_cmd(createtable)

    def test_walk_versions(self):
        """
        Walks all version scripts for each tested database, ensuring
        that there are no errors in the version scripts for each engine
        """
        for key, engine in self.engines.items():
            self._walk_versions(engine, self.snake_walk)

    def test_mysql_connect_fail(self):
        """
        Test that we can trigger a mysql connection failure and we fail
        gracefully to ensure we don't break people without mysql
        """
        if _is_backend_avail('mysql', user="******"):
            self.fail("Shouldn't have connected")

    def test_mysql_opportunistically(self):
        # Test that table creation on mysql only builds InnoDB tables
        if not _is_backend_avail('mysql'):
            self.skipTest("mysql not available")
        # add this to the global lists to make reset work with it, it's removed
        # automatically in tearDown so no need to clean it up here.
        connect_string = _get_connect_string("mysql")
        engine = sqlalchemy.create_engine(connect_string)
        self.engines["mysqlcitest"] = engine
        self.test_databases["mysqlcitest"] = connect_string

        # build a fully populated mysql database with all the tables
        self._reset_databases()
        self._walk_versions(engine, False, False)

        connection = engine.connect()
        # sanity check
        total = connection.execute("SELECT count(*) "
                                   "from information_schema.TABLES "
                                   "where TABLE_SCHEMA='qonos_citest'")
        self.assertTrue(total.scalar() > 0, "No tables found. Wrong schema?")

        noninnodb = connection.execute("SELECT count(*) "
                                       "from information_schema.TABLES "
                                       "where TABLE_SCHEMA='qonos_citest' "
                                       "and ENGINE!='InnoDB' "
                                       "and TABLE_NAME!='migrate_version'")
        count = noninnodb.scalar()
        self.assertEqual(count, 0, "%d non InnoDB tables created" % count)
        connection.close()

    def test_postgresql_connect_fail(self):
        """
        Test that we can trigger a postgres connection failure and we fail
        gracefully to ensure we don't break people without postgres
        """
        if _is_backend_avail('postgresql', user="******"):
            self.fail("Shouldn't have connected")

    def test_postgresql_opportunistically(self):
        # Test postgresql database migration walk
        if not _is_backend_avail('postgres'):
            self.skipTest("postgresql not available")
        # add this to the global lists to make reset work with it, it's removed
        # automatically in tearDown so no need to clean it up here.
        connect_string = _get_connect_string("postgres")
        engine = sqlalchemy.create_engine(connect_string)
        self.engines["postgresqlcitest"] = engine
        self.test_databases["postgresqlcitest"] = connect_string

        # build a fully populated postgresql database with all the tables
        self._reset_databases()
        self._walk_versions(engine, False, False)

    def _walk_versions(self,
                       engine=None,
                       snake_walk=False,
                       downgrade=True,
                       initial_version=None):
        # Determine latest version script from the repo, then
        # upgrade from 1 through to the latest, with no data
        # in the databases. This just checks that the schema itself
        # upgrades successfully.

        def db_version():
            return migration_api.db_version(engine, TestMigrations.REPOSITORY)

        # Place the database under version control
        init_version = migration.INIT_VERSION
        if initial_version is not None:
            init_version = initial_version
        migration_api.version_control(engine, TestMigrations.REPOSITORY,
                                      init_version)
        self.assertEqual(init_version, db_version())

        migration_api.upgrade(engine, TestMigrations.REPOSITORY,
                              init_version + 1)
        self.assertEqual(init_version + 1, db_version())

        LOG.debug('latest version is %s' % TestMigrations.REPOSITORY.latest)

        for version in xrange(init_version + 2,
                              TestMigrations.REPOSITORY.latest + 1):
            # upgrade -> downgrade -> upgrade
            self._migrate_up(engine, version, with_data=True)
            if snake_walk:
                self._migrate_down(engine, version - 1, with_data=True)
                self._migrate_up(engine, version)

        if downgrade:
            # Now walk it back down to 0 from the latest, testing
            # the downgrade paths.
            for version in reversed(
                    xrange(init_version + 2,
                           TestMigrations.REPOSITORY.latest + 1)):
                # downgrade -> upgrade -> downgrade
                self._migrate_down(engine, version - 1)
                if snake_walk:
                    self._migrate_up(engine, version)
                    self._migrate_down(engine, version - 1)

            # Ensure we made it all the way back to the first migration
            self.assertEqual(init_version + 1, db_version())

    def _migrate_down(self, engine, version, with_data=False):
        migration_api.downgrade(engine, TestMigrations.REPOSITORY, version)
        self.assertEqual(
            version, migration_api.db_version(engine,
                                              TestMigrations.REPOSITORY))

        # NOTE(sirp): `version` is what we're downgrading to (i.e. the 'target'
        # version). So if we have any downgrade checks, they need to be run for
        # the previous (higher numbered) migration.
        if with_data:
            post_downgrade = getattr(self,
                                     "_post_downgrade_%03d" % (version + 1),
                                     None)
            if post_downgrade:
                post_downgrade(engine)

    def _migrate_up(self, engine, version, with_data=False):
        """migrate up to a new version of the db.

        We allow for data insertion and post checks at every
        migration version with special _pre_upgrade_### and
        _check_### functions in the main test.
        """
        if with_data:
            data = None
            pre_upgrade = getattr(self, "_pre_upgrade_%3.3d" % version, None)
            if pre_upgrade:
                data = pre_upgrade(engine)

        migration_api.upgrade(engine, TestMigrations.REPOSITORY, version)
        self.assertEqual(
            version, migration_api.db_version(engine,
                                              TestMigrations.REPOSITORY))

        if with_data:
            check = getattr(self, "_check_%3.3d" % version, None)
            if check:
                check(engine, data)

    def _create_unversioned_001_db(self, engine):
        # Create the initial version of the schedules table
        meta = sqlalchemy.schema.MetaData()
        meta.bind = engine

        schedules_001 = sqlalchemy.Table(
            'schedules', meta,
            sqlalchemy.Column('id', sqlalchemy.String(36), primary_key=True),
            sqlalchemy.Column('tenant', sqlalchemy.String(255),
                              nullable=False),
            sqlalchemy.Column('action', sqlalchemy.String(255),
                              nullable=False),
            sqlalchemy.Column('minute', sqlalchemy.Integer),
            sqlalchemy.Column('hour', sqlalchemy.Integer),
            sqlalchemy.Column('day_of_month', sqlalchemy.Integer),
            sqlalchemy.Column('month', sqlalchemy.Integer),
            sqlalchemy.Column('day_of_week', sqlalchemy.Integer),
            sqlalchemy.Column('last_scheduled', sqlalchemy.DateTime),
            sqlalchemy.Column('next_run', sqlalchemy.DateTime),
            sqlalchemy.Column('created_at',
                              sqlalchemy.DateTime,
                              nullable=False),
            sqlalchemy.Column('updated_at', sqlalchemy.DateTime))

        schedules_001.create()

    def test_version_control_existing_db(self):
        """
        Creates a DB without version control information, places it
        under version control and checks that it can be upgraded
        without errors.
        """
        for key, engine in self.engines.items():
            self._create_unversioned_001_db(engine)
            self._walk_versions(engine, self.snake_walk, initial_version=1)

    def _assert_script_001(self, meta):
        schedules_table = sqlalchemy.Table('schedules', meta, autoload=True)
        expected_col_names = [
            u'id',
            u'tenant',
            u'action',
            u'minute',
            u'hour',
            u'day_of_month',
            u'month',
            u'day_of_week',
            u'last_scheduled',
            u'next_run',
            u'created_at',
            u'updated_at',
        ]
        col_names = [col.name for col in schedules_table.columns]
        self.assertEqual(expected_col_names, col_names)

        # try insert and fetch a record
        # Setting microsecond to 0 because using MySQL it only goes to seconds
        now = datetime.datetime.now().replace(microsecond=0)
        ins_schedule = {
            'id': 'WORKER-1',
            'tenant': 'OWNER-1',
            'action': 'snapshot',
            'minute': 30,
            'hour': 1,
            'day_of_month': 1,
            'month': 12,
            'day_of_week': 1,
            'last_scheduled': now,
            'next_run': now,
            'created_at': now,
            'updated_at': now
        }
        schedules_table.insert().values(ins_schedule).execute()
        ret_schedules = schedules_table.select().execute().fetchall()

        self.assertEqual(1, len(ret_schedules))
        self.assertEqual(ins_schedule['id'], ret_schedules[0]['id'])
        self.assertEqual(ins_schedule['tenant'], ret_schedules[0]['tenant'])
        self.assertEqual(ins_schedule['action'], ret_schedules[0]['action'])
        self.assertEqual(ins_schedule['minute'], ret_schedules[0]['minute'])
        self.assertEqual(ins_schedule['hour'], ret_schedules[0]['hour'])
        self.assertEqual(ins_schedule['day_of_month'],
                         ret_schedules[0]['day_of_month'])
        self.assertEqual(ins_schedule['month'], ret_schedules[0]['month'])
        self.assertEqual(ins_schedule['day_of_week'],
                         ret_schedules[0]['day_of_week'])
        self.assertEqual(ins_schedule['last_scheduled'],
                         ret_schedules[0]['last_scheduled'])
        self.assertEqual(ins_schedule['next_run'],
                         ret_schedules[0]['next_run'])
        datetime_without_msec = "%Y-%m-%d %H:%M:%S"
        self.assertEqual(
            ins_schedule['created_at'].strftime(datetime_without_msec),
            ret_schedules[0]['created_at'].strftime(datetime_without_msec))
        self.assertEqual(
            ins_schedule['updated_at'].strftime(datetime_without_msec),
            ret_schedules[0]['updated_at'].strftime(datetime_without_msec))

    def _assert_script_002(self, meta):
        schedule_metadata_table = sqlalchemy.Table('schedule_metadata',
                                                   meta,
                                                   autoload=True)
        expected_col_names = [
            u'id',
            u'schedule_id',
            u'key',
            u'value',
            u'created_at',
            u'updated_at',
        ]
        col_names = [col.name for col in schedule_metadata_table.columns]
        self.assertEqual(expected_col_names, col_names)

        # try insert and fetch a record
        now = datetime.datetime.now()
        ins_schedule_metadata = {
            'id': 'WORKER-1',
            'schedule_id': 'SCHD-1',
            'key': 'some_key',
            'value': 'some_value',
            'created_at': now,
            'updated_at': now
        }
        schedule_metadata_table.insert().\
            values(ins_schedule_metadata).execute()
        ret_schedule_metadata = schedule_metadata_table.select()\
            .execute().fetchall()

        self.assertEqual(1, len(ret_schedule_metadata))
        self.assertEqual(ins_schedule_metadata['id'],
                         ret_schedule_metadata[0]['id'])
        self.assertEqual(ins_schedule_metadata['schedule_id'],
                         ret_schedule_metadata[0]['schedule_id'])
        self.assertEqual(ins_schedule_metadata['key'],
                         ret_schedule_metadata[0]['key'])
        self.assertEqual(ins_schedule_metadata['value'],
                         ret_schedule_metadata[0]['value'])
        datetime_without_msec = "%Y-%m-%d %H:%M:%S"
        self.assertEqual(
            ins_schedule_metadata['created_at'].strftime(
                datetime_without_msec), ret_schedule_metadata[0]
            ['created_at'].strftime(datetime_without_msec))
        self.assertEqual(
            ins_schedule_metadata['updated_at'].strftime(
                datetime_without_msec), ret_schedule_metadata[0]
            ['updated_at'].strftime(datetime_without_msec))

    def _check_003(self, engine, data):
        meta = sqlalchemy.MetaData()
        meta.bind = engine

        # Note (venkatesh) temporarily testing scripts 001 & 002 here.
        # need to find a better way to test them separately.
        # _check_001
        self._assert_script_001(meta)

        # _check_002
        self._assert_script_002(meta)

        # _check_003
        workers_table = sqlalchemy.Table('workers', meta, autoload=True)

        expected_col_names = [
            u'id',
            u'host',
            u'process_id',
            u'created_at',
            u'updated_at',
        ]

        col_names = [col.name for col in workers_table.columns]
        self.assertEqual(expected_col_names, col_names)

        # try insert and fetch a record
        now = datetime.datetime.now()
        ins_worker = {
            'id': 'WORKER-1',
            'host': 'localhost',
            'process_id': 12345,
            'created_at': now,
            'updated_at': now
        }
        workers_table.insert().values(ins_worker).execute()
        ret_workers = workers_table.select().execute().fetchall()

        self.assertEqual(1, len(ret_workers))
        self.assertEqual(ins_worker['id'], ret_workers[0]['id'])
        self.assertEqual(ins_worker['host'], ret_workers[0]['host'])
        self.assertEqual(ins_worker['process_id'],
                         ret_workers[0]['process_id'])
        datetime_without_msec = "%Y-%m-%d %H:%M:%S"
        self.assertEqual(
            ins_worker['created_at'].strftime(datetime_without_msec),
            ret_workers[0]['created_at'].strftime(datetime_without_msec))
        self.assertEqual(
            ins_worker['updated_at'].strftime(datetime_without_msec),
            ret_workers[0]['updated_at'].strftime(datetime_without_msec))

    def _post_downgrade_003(self, engine):
        self.assertRaises(sqlalchemy.exc.NoSuchTableError, get_table, engine,
                          'workers')

    def _check_004(self, engine, data):
        meta = sqlalchemy.MetaData()
        meta.bind = engine

        jobs_table = sqlalchemy.Table('jobs', meta, autoload=True)

        expected_col_names = [
            u'id',
            u'schedule_id',
            u'tenant',
            u'worker_id',
            u'status',
            u'action',
            u'retry_count',
            u'timeout',
            u'hard_timeout',
            u'created_at',
            u'updated_at',
        ]

        col_names = [col.name for col in jobs_table.columns]
        self.assertEqual(expected_col_names, col_names)

        # try insert and fetch a record
        now = datetime.datetime.now()

        ins_job = {
            'id': 'JOB-1',
            'schedule_id': 'SCHD-1',
            'tenant': 'OWNER-1',
            'worker_id': 'WORKER-1',
            'status': 'success',
            'action': 'snapshot',
            'retry_count': 3,
            'timeout': now,
            'hard_timeout': now,
            'created_at': now,
            'updated_at': now
        }
        jobs_table.insert().values(ins_job).execute()
        ret_jobs = jobs_table.select().execute().fetchall()

        self.assertEqual(1, len(ret_jobs))
        self.assertEqual(ins_job['id'], ret_jobs[0]['id'])
        self.assertEqual(ins_job['schedule_id'], ret_jobs[0]['schedule_id'])
        self.assertEqual(ins_job['tenant'], ret_jobs[0]['tenant'])
        self.assertEqual(ins_job['worker_id'], ret_jobs[0]['worker_id'])
        self.assertEqual(ins_job['status'], ret_jobs[0]['status'])
        self.assertEqual(ins_job['action'], ret_jobs[0]['action'])
        self.assertEqual(ins_job['retry_count'], ret_jobs[0]['retry_count'])
        datetime_without_msec = "%Y-%m-%d %H:%M:%S"
        self.assertEqual(
            ins_job['timeout'].strftime(datetime_without_msec),
            ret_jobs[0]['timeout'].strftime(datetime_without_msec))
        self.assertEqual(
            ins_job['hard_timeout'].strftime(datetime_without_msec),
            ret_jobs[0]['hard_timeout'].strftime(datetime_without_msec))
        self.assertEqual(
            ins_job['created_at'].strftime(datetime_without_msec),
            ret_jobs[0]['created_at'].strftime(datetime_without_msec))
        self.assertEqual(
            ins_job['updated_at'].strftime(datetime_without_msec),
            ret_jobs[0]['updated_at'].strftime(datetime_without_msec))

    def _post_downgrade_004(self, engine):
        self.assertRaises(sqlalchemy.exc.NoSuchTableError, get_table, engine,
                          'jobs')

    def _check_005(self, engine, data):
        meta = sqlalchemy.MetaData()
        meta.bind = engine

        job_metadata_table = sqlalchemy.Table('job_metadata',
                                              meta,
                                              autoload=True)
        expected_col_names = [
            u'id',
            u'job_id',
            u'key',
            u'value',
            u'created_at',
            u'updated_at',
        ]

        col_names = [col.name for col in job_metadata_table.columns]
        self.assertEqual(expected_col_names, col_names)

        # try insert and fetch a record
        now = datetime.datetime.now()

        ins_job_metadata = {
            'id': 'JOB-META-1',
            'job_id': 'JOB-1',
            'key': 'some_key',
            'value': 'some_value',
            'created_at': now,
            'updated_at': now
        }
        job_metadata_table.insert().values(ins_job_metadata).execute()
        ret_job_metadata = job_metadata_table.select().execute().fetchall()

        self.assertEqual(1, len(ret_job_metadata))
        self.assertEqual(ins_job_metadata['id'], ret_job_metadata[0]['id'])
        self.assertEqual(ins_job_metadata['key'], ret_job_metadata[0]['key'])
        self.assertEqual(ins_job_metadata['value'],
                         ret_job_metadata[0]['value'])
        datetime_without_msec = "%Y-%m-%d %H:%M:%S"
        self.assertEqual(
            ins_job_metadata['created_at'].strftime(datetime_without_msec),
            ret_job_metadata[0]['created_at'].strftime(datetime_without_msec))
        self.assertEqual(
            ins_job_metadata['updated_at'].strftime(datetime_without_msec),
            ret_job_metadata[0]['updated_at'].strftime(datetime_without_msec))

    def _post_downgrade_005(self, engine):
        self.assertRaises(sqlalchemy.exc.NoSuchTableError, get_table, engine,
                          'job_metadata')

    def _check_006(self, engine, data):
        meta = sqlalchemy.MetaData()
        meta.bind = engine

        job_faults_table = sqlalchemy.Table('job_faults', meta, autoload=True)

        expected_col_names = [
            u'id',
            u'job_id',
            u'schedule_id',
            u'tenant',
            u'worker_id',
            u'action',
            u'message',
            u'job_metadata',
            u'created_at',
            u'updated_at',
        ]

        col_names = [col.name for col in job_faults_table.columns]
        self.assertEqual(expected_col_names, col_names)

        # try insert and fetch a record
        now = datetime.datetime.now()

        ins_job_fault = {
            'id': 'JOB-META-1',
            'job_id': 'JOB-1',
            'schedule_id': 'SCHD-1',
            'tenant': 'OWNER-1',
            'worker_id': 'WORKER-1',
            'action': '{snapshot:true}',
            'message': 'Error Occurred',
            'job_metadata': '{key1:value1, key2:value2}',
            'created_at': now,
            'updated_at': now
        }
        job_faults_table.insert().values(ins_job_fault).execute()
        ret_job_faults = job_faults_table.select().execute().fetchall()

        self.assertEqual(1, len(ret_job_faults))
        self.assertEqual(ins_job_fault['id'], ret_job_faults[0]['id'])
        self.assertEqual(ins_job_fault['job_id'], ret_job_faults[0]['job_id'])
        self.assertEqual(ins_job_fault['schedule_id'],
                         ret_job_faults[0]['schedule_id'])
        self.assertEqual(ins_job_fault['tenant'], ret_job_faults[0]['tenant'])
        self.assertEqual(ins_job_fault['worker_id'],
                         ret_job_faults[0]['worker_id'])
        self.assertEqual(ins_job_fault['action'], ret_job_faults[0]['action'])
        self.assertEqual(ins_job_fault['message'],
                         ret_job_faults[0]['message'])
        self.assertEqual(ins_job_fault['job_metadata'],
                         ret_job_faults[0]['job_metadata'])
        datetime_without_msec = "%Y-%m-%d %H:%M:%S"
        self.assertEqual(
            ins_job_fault['created_at'].strftime(datetime_without_msec),
            ret_job_faults[0]['created_at'].strftime(datetime_without_msec))
        self.assertEqual(
            ins_job_fault['updated_at'].strftime(datetime_without_msec),
            ret_job_faults[0]['updated_at'].strftime(datetime_without_msec))

    def _post_downgrade_006(self, engine):
        self.assertRaises(sqlalchemy.exc.NoSuchTableError, get_table, engine,
                          'job_faults')

    def _check_007(self, engine, data):
        jobs = get_table(engine, 'jobs')
        self.assertTrue('version_id' in jobs.c.keys())

    def _post_downgrade_007(self, engine):
        jobs = get_table(engine, 'jobs')
        self.assertFalse('version_id' in jobs.c.keys())

    def _check_008(self, engine, data):
        table = "schedules"
        index = "next_run_idx"
        columns = ["next_run"]

        meta = sqlalchemy.MetaData()
        meta.bind = engine

        new_table = sqlalchemy.Table(table, meta, autoload=True)

        index_data = [(idx.name, idx.columns.keys())
                      for idx in new_table.indexes]

        self.assertIn((index, columns), index_data)

    def _post_downgrade_008(self, engine):
        table = "schedules"
        index = "next_run_idx"
        columns = ["next_run"]

        meta = sqlalchemy.MetaData()
        meta.bind = engine

        new_table = sqlalchemy.Table(table, meta, autoload=True)

        index_data = [(idx.name, idx.columns.keys())
                      for idx in new_table.indexes]

        self.assertNotIn((index, columns), index_data)

    def _check_009(self, engine, data):
        table = "jobs"
        index = "hard_timeout_idx"
        columns = ["hard_timeout"]
        meta = sqlalchemy.MetaData()
        meta.bind = engine

        new_table = sqlalchemy.Table(table, meta, autoload=True)

        index_data = [(idx.name, idx.columns.keys())
                      for idx in new_table.indexes]

        self.assertIn((index, columns), index_data)

    def _post_downgrade_009(self, engine):
        table = "jobs"
        index = "hard_timeout_idx"
        columns = ["hard_timeout"]
        meta = sqlalchemy.MetaData()
        meta.bind = engine

        new_table = sqlalchemy.Table(table, meta, autoload=True)

        index_data = [(idx.name, idx.columns.keys())
                      for idx in new_table.indexes]

        self.assertNotIn((index, columns), index_data)

    def _pre_upgrade_010(self, engine):
        initial_values = [
            ('JOB-1', uuidutils.generate_uuid()),
            ('JOB-2', uuidutils.generate_uuid()),
            ('JOB-3', None),
            ('JOB-4', None),
            ('JOB-5', None),
        ]

        jobs_table = get_table(engine, 'jobs')
        for job_id, version_id in initial_values:
            now = datetime.datetime.now()
            new_job = {
                'id': job_id,
                'schedule_id': 'SCHD-1',
                'tenant': 'OWNER-1',
                'worker_id': 'WORKER-1',
                'status': 'success',
                'action': 'snapshot',
                'retry_count': 3,
                'timeout': now,
                'hard_timeout': now,
                'version_id': version_id,
                'created_at': now,
                'updated_at': now
            }

            jobs_table.insert().values(new_job).execute()

        return initial_values

    def _check_010(self, engine, data):
        values = dict((jid, vid) for jid, vid in data)

        jobs = get_table(engine, 'jobs')
        for row in jobs.select().execute():
            if row['id'] in values:
                version_id = values.pop(row['id']) or row['id']
                self.assertEqual(row['version_id'], version_id)

        self.assertEqual(len(values), 0)

    def _post_downgrade_010(self, engine):
        jobs = get_table(engine, 'jobs')

        # for jobs with no version_id
        expected_job_ids = set(['JOB-3', 'JOB-4', 'JOB-5'])
        results = jobs.select().where(
            jobs.c.version_id.is_(None)).execute().fetchall()
        actual_job_ids = set(row['id'] for row in results)
        self.assertEqual(actual_job_ids, expected_job_ids)

        # for jobs with version_id
        expected_job_ids = set(['JOB-1', 'JOB-2'])
        results = jobs.select().where(
            jobs.c.version_id.isnot(None)).execute().fetchall()
        actual_job_ids = set(row['id'] for row in results)
        self.assertEqual(actual_job_ids, expected_job_ids)

    def _check_011(self, engine, data):
        table = "job_metadata"
        meta = sqlalchemy.MetaData()
        meta.bind = engine

        new_table = sqlalchemy.Table(table, meta, autoload=True)

        self.assertTrue(len(new_table.foreign_keys), 1)

    def _check_012(self, engine, data):
        table = "schedule_metadata"
        meta = sqlalchemy.MetaData()
        meta.bind = engine

        new_table = sqlalchemy.Table(table, meta, autoload=True)

        self.assertTrue(len(new_table.foreign_keys), 1)

    def _check_013(self, engine, data):
        jobs = get_table(engine, 'jobs')

        # So, this value comes back in a kid of odd format
        self.assertEqual(u"'QUEUED'", jobs.c.status.server_default.arg.text)
        self.assertFalse(jobs.c.status.nullable)

    def _post_downgrade_013(self, engine):
        jobs = get_table(engine, 'jobs')

        self.assertEqual(None, jobs.c.status.server_default)
        self.assertTrue(jobs.c.status.nullable)

    def _check_014(self, engine, data):
        table = "jobs"
        index_status = "status_idx"
        columns_status = ["status"]
        index_timeout = "timeout_idx"
        columns_timeout = ["timeout"]
        meta = sqlalchemy.MetaData()
        meta.bind = engine

        new_table = sqlalchemy.Table(table, meta, autoload=True)

        index_data = [(idx.name, idx.columns.keys())
                      for idx in new_table.indexes]

        self.assertIn((index_status, columns_status), index_data)
        self.assertIn((index_timeout, columns_timeout), index_data)

    def _post_downgrade_014(self, engine):
        table = "jobs"
        index_status = "status_idx"
        columns_status = ["status"]
        index_timeout = "timeout_idx"
        columns_timeout = ["timeout"]
        meta = sqlalchemy.MetaData()
        meta.bind = engine

        new_table = sqlalchemy.Table(table, meta, autoload=True)

        index_data = [(idx.name, idx.columns.keys())
                      for idx in new_table.indexes]

        self.assertNotIn((index_status, columns_status), index_data)
        self.assertNotIn((index_timeout, columns_timeout), index_data)
コード例 #10
0
class ControlledSchema(object):
    """A database under version control"""
    def __init__(self, engine, repository):
        if type(repository) is str:
            repository = Repository(repository)
        self.engine = engine
        self.repository = repository
        self.meta = MetaData(engine)
        self._load()

    def __eq__(self, other):
        return (self.repository is other.repository \
            and self.version == other.version)

    def _load(self):
        """Load controlled schema version info from DB"""
        tname = self.repository.version_table
        self.meta = MetaData(self.engine)
        if not hasattr(self, 'table') or self.table is None:
            try:
                self.table = Table(tname, self.meta, autoload=True)
            except (exceptions.NoSuchTableError):
                raise exceptions.DatabaseNotControlledError(tname)
        # TODO?: verify that the table is correct (# cols, etc.)
        result = self.engine.execute(
            self.table.select(
                self.table.c.repository_id == str(self.repository.id)))
        data = list(result)[0]
        # TODO?: exception if row count is bad
        # TODO: check repository id, exception if incorrect
        self.version = data['version']

    def _get_repository(self):
        """
        Given a database engine, try to guess the repository.

        :raise: :exc:`NotImplementedError`
        """
        # TODO: no guessing yet; for now, a repository must be supplied
        raise NotImplementedError()

    @classmethod
    def create(cls, engine, repository, version=None):
        """
        Declare a database to be under a repository's version control.
        """
        # Confirm that the version # is valid: positive, integer,
        # exists in repos
        if type(repository) is str:
            repository = Repository(repository)
        version = cls._validate_version(repository, version)
        table = cls._create_table_version(engine, repository, version)
        # TODO: history table
        # Load repository information and return
        return cls(engine, repository)

    @classmethod
    def _validate_version(cls, repository, version):
        """
        Ensures this is a valid version number for this repository.

        :raises: :exc:`cls.InvalidVersionError` if invalid
        :return: valid version number
        """
        if version is None:
            version = 0
        try:
            version = VerNum(version)  # raises valueerror
            if version < 0 or version > repository.latest:
                raise ValueError()
        except ValueError:
            raise exceptions.InvalidVersionError(version)
        return version

    @classmethod
    def _create_table_version(cls, engine, repository, version):
        """
        Creates the versioning table in a database.
        """
        # Create tables
        tname = repository.version_table
        meta = MetaData(engine)

        table = Table(
            tname,
            meta,
            Column('repository_id', String(255), primary_key=True),
            Column('repository_path', Text),
            Column('version', Integer),
        )

        if not table.exists():
            table.create()

        # Insert data
        try:
            engine.execute(table.insert(),
                           repository_id=repository.id,
                           repository_path=repository.path,
                           version=int(version))
        except sa_exceptions.IntegrityError:
            # An Entry for this repo already exists.
            raise exceptions.DatabaseAlreadyControlledError()
        return table

    @classmethod
    def compare_model_to_db(cls, engine, model, repository):
        """
        Compare the current model against the current database.
        """
        if isinstance(repository, basestring):
            repository = Repository(repository)
        model = loadModel(model)
        diff = schemadiff.getDiffOfModelAgainstDatabase(
            model, engine, excludeTables=[repository.version_table])
        return diff

    @classmethod
    def create_model(cls, engine, repository, declarative=False):
        """
        Dump the current database as a Python model.
        """
        if isinstance(repository, basestring):
            repository = Repository(repository)
        diff = schemadiff.getDiffOfModelAgainstDatabase(
            MetaData(), engine, excludeTables=[repository.version_table])
        return genmodel.ModelGenerator(diff, declarative).toPython()

    def update_db_from_model(self, model):
        """
        Modify the database to match the structure of the current Python model.
        """
        if isinstance(self.repository, basestring):
            self.repository = Repository(self.repository)
        model = loadModel(model)
        diff = schemadiff.getDiffOfModelAgainstDatabase(
            model, self.engine, excludeTables=[self.repository.version_table])
        genmodel.ModelGenerator(diff).applyModel()
        update = self.table.update(
            self.table.c.repository_id == str(self.repository.id))
        self.engine.execute(update, version=int(self.repository.latest))

    def drop(self):
        """
        Remove version control from a database.
        """
        try:
            self.table.drop()
        except (sa_exceptions.SQLError):
            raise exceptions.DatabaseNotControlledError(str(self.table))

    def _engine_db(self, engine):
        """
        Returns the database name of an engine - ``postgres``, ``sqlite`` ...
        """
        # TODO: This is a bit of a hack...
        return str(engine.dialect.__module__).split('.')[-1]

    def changeset(self, version=None):
        database = self._engine_db(self.engine)
        start_ver = self.version
        changeset = self.repository.changeset(database, start_ver, version)
        return changeset

    def runchange(self, ver, change, step):
        startver = ver
        endver = ver + step
        # Current database version must be correct! Don't run if corrupt!
        if self.version != startver:
            raise exceptions.InvalidVersionError("%s is not %s" % \
                                                     (self.version, startver))
        # Run the change
        change.run(self.engine, step)
        # Update/refresh database version
        update = self.table.update(
            and_(self.table.c.version == int(startver),
                 self.table.c.repository_id == str(self.repository.id)))
        self.engine.execute(update, version=int(endver))
        self._load()

    def upgrade(self, version=None):
        """
        Upgrade (or downgrade) to a specified version, or latest version.
        """
        changeset = self.changeset(version)
        for ver, change in changeset:
            self.runchange(ver, change, changeset.step)
コード例 #11
0
class TestMigrations(unittest.TestCase):
    """Test sqlalchemy-migrate migrations"""

    TEST_DATABASES = {'sqlite': 'sqlite:///migration.db'}

    REPOSITORY_PATH = os.path.abspath(
        os.path.join(os.path.abspath(__file__), os.pardir, os.pardir,
                     os.pardir, 'backends', 'sqlalchemy', 'migrate_repo'))
    REPOSITORY = Repository(REPOSITORY_PATH)

    def __init__(self, *args, **kwargs):
        super(TestMigrations, self).__init__(*args, **kwargs)

    def setUp(self):
        # Load test databases
        self.engines = {}
        for key, value in TestMigrations.TEST_DATABASES.items():
            self.engines[key] = create_engine(value, poolclass=NullPool)

        # We start each test case with a completely blank slate.
        self._reset_databases()

    def tearDown(self):
        # We destroy the test data store between each test case,
        # and recreate it, which ensures that we have no side-effects
        # from the tests
        self._reset_databases()

    def _reset_databases(self):
        for key, engine in self.engines.items():
            conn_string = TestMigrations.TEST_DATABASES[key]
            conn_pieces = urlparse.urlparse(conn_string)
            if conn_string.startswith('sqlite'):
                # We can just delete the SQLite database, which is
                # the easiest and cleanest solution
                db_path = conn_pieces.path.strip('/')
                if os.path.exists(db_path):
                    os.unlink(db_path)
                # No need to recreate the SQLite DB. SQLite will
                # create it for us if it's not there...
            elif conn_string.startswith('mysql'):
                # We can execute the MySQL client to destroy and re-create
                # the MYSQL database, which is easier and less error-prone
                # than using SQLAlchemy to do this via MetaData...trust me.
                database = conn_pieces.path.strip('/')
                loc_pieces = conn_pieces.netloc.split('@')
                host = loc_pieces[1]
                auth_pieces = loc_pieces[0].split(':')
                user = auth_pieces[0]
                password = ""
                if len(auth_pieces) > 1:
                    if auth_pieces[1].strip():
                        password = "******" % auth_pieces[1]
                sql = ("drop database if exists %(database)s; "
                       "create database %(database)s;") % locals()
                cmd = ("mysql -u%(user)s %(password)s -h%(host)s "
                       "-e\"%(sql)s\"") % locals()
                exitcode, out, err = execute(cmd)
                self.assertEqual(0, exitcode)

    def test_walk_versions(self):
        """
        Walks all version scripts for each tested database, ensuring
        that there are no errors in the version scripts for each engine
        """
        for key, engine in self.engines.items():
            self._walk_versions(TestMigrations.TEST_DATABASES[key])

    def _walk_versions(self, sql_connection):
        # Determine latest version script from the repo, then
        # upgrade from 1 through to the latest, with no data
        # in the databases. This just checks that the schema itself
        # upgrades successfully.

        # Assert we are not under version control...
        self.assertRaises(fault.DatabaseMigrationError,
                          migration_api.db_version, sql_connection)
        # Place the database under version control
        print migration_api.version_control(sql_connection)

        cur_version = migration_api.db_version(sql_connection)
        self.assertEqual(0, cur_version)

        for version in xrange(1, TestMigrations.REPOSITORY.latest + 1):
            migration_api.upgrade(sql_connection, version)
            cur_version = migration_api.db_version(sql_connection)
            self.assertEqual(cur_version, version)

        # Now walk it back down to 0 from the latest, testing
        # the downgrade paths.
        for version in reversed(xrange(0, TestMigrations.REPOSITORY.latest)):
            migration_api.downgrade(sql_connection, version)
            cur_version = migration_api.db_version(sql_connection)
            self.assertEqual(cur_version, version)
コード例 #12
0
 def REPOSITORY(self):
     migrate_file = migrate_repo.__file__
     return Repository(os.path.abspath(os.path.dirname(migrate_file)))
コード例 #13
0
def main():
    import camelot
    import logging
    logging.basicConfig(level=logging.INFO)
    #logging.getLogger('sqlalchemy.engine').setLevel(logging.DEBUG)
    parser = CommandOptionParser(usage=usage,
                                 description=description,
                                 version=camelot.__version__)
    (_options, args) = parser.parse_args()
    if not args:
        parser.print_help()
    elif args[0] == 'console':
        setup_model()
        sh = Shell()
        sh.interact()
    elif args[0] == 'schema_display':
        setup_model()
        schema_display()
    elif args[0] in ('version_control', 'db_version', 'version', 'upgrade'):
        import settings
        from migrate.versioning.repository import Repository
        from migrate.versioning.schema import ControlledSchema
        from migrate.versioning.exceptions import DatabaseNotControlledError
        from sqlalchemy.exceptions import NoSuchTableError
        migrate_engine = settings.ENGINE()
        repository = Repository(settings.REPOSITORY)
        schema = None
        try:
            schema = ControlledSchema(migrate_engine, repository)
        except (NoSuchTableError, DatabaseNotControlledError):
            print 'database not yet under version control, putting it under version_control first.'
        if args[0] == 'version_control' or schema is None:
            migrate_connection = migrate_engine.connect()
            transaction = migrate_connection.begin()
            try:
                schema = ControlledSchema.create(migrate_engine, repository)
                transaction.commit()
            except:
                transaction.rollback()
                raise
            finally:
                migrate_connection.close()
            print 'database was put under version control'
        if schema:
            if args[0] == 'db_version':
                print schema.version
            elif args[0] == 'version':
                print repository.latest
            elif args[0] == 'upgrade':
                migrate_connection = migrate_engine.connect()
                if len(args) >= 2:
                    version = int(args[1])
                else:
                    version = repository.latest
                #
                # perform each upgrade step in a separate transaction, since
                # one upgrade might depend on an other being fully executed
                #
                try:
                    if schema.version == version:
                        print 'database is allready at requested version'
                    if schema.version <= version:
                        step = 1
                    else:
                        step = -1
                    for i in range(schema.version + step, version + step,
                                   step):
                        transaction = migrate_connection.begin()
                        try:
                            schema.upgrade(i)
                            transaction.commit()
                            if step == 1:
                                print 'upgrade %s' % i
                            else:
                                print 'downgrade %s' % i
                        except:
                            transaction.rollback()
                            raise
                finally:
                    migrate_connection.close()
    else:
        parser.print_help()
コード例 #14
0
class TestMigrations(utils.BaseTestCase):
    """Test sqlalchemy-migrate migrations."""

    DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
                                       'test_migrations.conf')
    # Test machines can set the GLANCE_TEST_MIGRATIONS_CONF variable
    # to override the location of the config file for migration testing
    CONFIG_FILE_PATH = os.environ.get('GLANCE_TEST_MIGRATIONS_CONF',
                                      DEFAULT_CONFIG_FILE)
    MIGRATE_FILE = glance.db.sqlalchemy.migrate_repo.__file__
    REPOSITORY = Repository(os.path.abspath(os.path.dirname(MIGRATE_FILE)))

    def setUp(self):
        super(TestMigrations, self).setUp()

        self.snake_walk = False
        self.test_databases = {}

        # Load test databases from the config file. Only do this
        # once. No need to re-run this on each test...
        LOG.debug('config_path is %s' % TestMigrations.CONFIG_FILE_PATH)
        if os.path.exists(TestMigrations.CONFIG_FILE_PATH):
            cp = ConfigParser.RawConfigParser()
            try:
                cp.read(TestMigrations.CONFIG_FILE_PATH)
                defaults = cp.defaults()
                for key, value in defaults.items():
                    self.test_databases[key] = value
                self.snake_walk = cp.getboolean('walk_style', 'snake_walk')
            except ConfigParser.ParsingError as e:
                self.fail("Failed to read test_migrations.conf config "
                          "file. Got error: %s" % e)
        else:
            self.fail("Failed to find test_migrations.conf config " "file.")

        self.engines = {}
        for key, value in self.test_databases.items():
            self.engines[key] = sqlalchemy.create_engine(value)

        # We start each test case with a completely blank slate.
        self._reset_databases()

    def tearDown(self):
        # We destroy the test data store between each test case,
        # and recreate it, which ensures that we have no side-effects
        # from the tests
        self._reset_databases()
        super(TestMigrations, self).tearDown()

    def _reset_databases(self):
        def execute_cmd(cmd=None):
            status, output = commands.getstatusoutput(cmd)
            LOG.debug(output)
            self.assertEqual(0, status)

        for key, engine in self.engines.items():
            conn_string = self.test_databases[key]
            conn_pieces = urlparse.urlparse(conn_string)
            engine.dispose()
            if conn_string.startswith('sqlite'):
                # We can just delete the SQLite database, which is
                # the easiest and cleanest solution
                db_path = conn_pieces.path.strip('/')
                if os.path.exists(db_path):
                    os.unlink(db_path)
                # No need to recreate the SQLite DB. SQLite will
                # create it for us if it's not there...
            elif conn_string.startswith('mysql'):
                # We can execute the MySQL client to destroy and re-create
                # the MYSQL database, which is easier and less error-prone
                # than using SQLAlchemy to do this via MetaData...trust me.
                database = conn_pieces.path.strip('/')
                loc_pieces = conn_pieces.netloc.split('@')
                host = loc_pieces[1]
                auth_pieces = loc_pieces[0].split(':')
                user = auth_pieces[0]
                password = ""
                if len(auth_pieces) > 1:
                    if auth_pieces[1].strip():
                        password = "******"%s\"" % auth_pieces[1]
                sql = ("drop database if exists %(database)s; "
                       "create database %(database)s;") % locals()
                cmd = ("mysql -u \"%(user)s\" %(password)s -h %(host)s "
                       "-e \"%(sql)s\"") % locals()
                execute_cmd(cmd)
            elif conn_string.startswith('postgresql'):
                database = conn_pieces.path.strip('/')
                loc_pieces = conn_pieces.netloc.split('@')
                host = loc_pieces[1]

                auth_pieces = loc_pieces[0].split(':')
                user = auth_pieces[0]
                password = ""
                if len(auth_pieces) > 1:
                    password = auth_pieces[1].strip()
                # note(boris-42): This file is used for authentication
                # without password prompt.
                createpgpass = ("echo '*:*:*:%(user)s:%(password)s' > "
                                "~/.pgpass && chmod 0600 ~/.pgpass" % locals())
                execute_cmd(createpgpass)
                # note(boris-42): We must create and drop database, we can't
                # drop database which we have connected to, so for such
                # operations there is a special database template1.
                sqlcmd = ("psql -w -U %(user)s -h %(host)s -c"
                          " '%(sql)s' -d template1")
                sql = ("drop database if exists %(database)s;") % locals()
                droptable = sqlcmd % locals()
                execute_cmd(droptable)
                sql = ("create database %(database)s;") % locals()
                createtable = sqlcmd % locals()
                execute_cmd(createtable)

    def test_walk_versions(self):
        """
        Walks all version scripts for each tested database, ensuring
        that there are no errors in the version scripts for each engine
        """
        for key, engine in self.engines.items():
            self._walk_versions(engine, self.snake_walk)

    def test_mysql_connect_fail(self):
        """
        Test that we can trigger a mysql connection failure and we fail
        gracefully to ensure we don't break people without mysql
        """
        if _is_backend_avail('mysql', user="******"):
            self.fail("Shouldn't have connected")

    def test_mysql_opportunistically(self):
        # Test that table creation on mysql only builds InnoDB tables
        if not _is_backend_avail('mysql'):
            self.skipTest("mysql not available")
        # add this to the global lists to make reset work with it, it's removed
        # automatically in tearDown so no need to clean it up here.
        connect_string = _get_connect_string("mysql")
        engine = sqlalchemy.create_engine(connect_string)
        self.engines["mysqlcitest"] = engine
        self.test_databases["mysqlcitest"] = connect_string

        # build a fully populated mysql database with all the tables
        self._reset_databases()
        self._walk_versions(engine, False, False)

        connection = engine.connect()
        # sanity check
        total = connection.execute("SELECT count(*) "
                                   "from information_schema.TABLES "
                                   "where TABLE_SCHEMA='openstack_citest'")
        self.assertTrue(total.scalar() > 0, "No tables found. Wrong schema?")

        noninnodb = connection.execute("SELECT count(*) "
                                       "from information_schema.TABLES "
                                       "where TABLE_SCHEMA='openstack_citest' "
                                       "and ENGINE!='InnoDB' "
                                       "and TABLE_NAME!='migrate_version'")
        count = noninnodb.scalar()
        self.assertEqual(count, 0, "%d non InnoDB tables created" % count)
        connection.close()

    def test_postgresql_connect_fail(self):
        """
        Test that we can trigger a postgres connection failure and we fail
        gracefully to ensure we don't break people without postgres
        """
        if _is_backend_avail('postgresql', user="******"):
            self.fail("Shouldn't have connected")

    def test_postgresql_opportunistically(self):
        # Test postgresql database migration walk
        if not _is_backend_avail('postgres'):
            self.skipTest("postgresql not available")
        # add this to the global lists to make reset work with it, it's removed
        # automatically in tearDown so no need to clean it up here.
        connect_string = _get_connect_string("postgres")
        engine = sqlalchemy.create_engine(connect_string)
        self.engines["postgresqlcitest"] = engine
        self.test_databases["postgresqlcitest"] = connect_string

        # build a fully populated postgresql database with all the tables
        self._reset_databases()
        self._walk_versions(engine, False, False)

    def _walk_versions(self,
                       engine=None,
                       snake_walk=False,
                       downgrade=True,
                       initial_version=None):
        # Determine latest version script from the repo, then
        # upgrade from 1 through to the latest, with no data
        # in the databases. This just checks that the schema itself
        # upgrades successfully.

        # Place the database under version control
        init_version = migration.INIT_VERSION
        if initial_version is not None:
            init_version = initial_version
        migration_api.version_control(engine, TestMigrations.REPOSITORY,
                                      init_version)
        self.assertEqual(
            init_version,
            migration_api.db_version(engine, TestMigrations.REPOSITORY))

        migration_api.upgrade(engine, TestMigrations.REPOSITORY,
                              init_version + 1)

        LOG.debug('latest version is %s' % TestMigrations.REPOSITORY.latest)

        for version in xrange(init_version + 2,
                              TestMigrations.REPOSITORY.latest + 1):
            # upgrade -> downgrade -> upgrade
            self._migrate_up(engine, version, with_data=True)
            if snake_walk:
                self._migrate_down(engine, version)
                self._migrate_up(engine, version)

        if downgrade:
            # Now walk it back down to 0 from the latest, testing
            # the downgrade paths.
            for version in reversed(
                    xrange(init_version + 2,
                           TestMigrations.REPOSITORY.latest + 1)):
                # downgrade -> upgrade -> downgrade
                self._migrate_down(engine, version)
                if snake_walk:
                    self._migrate_up(engine, version)
                    self._migrate_down(engine, version)

    def _migrate_down(self, engine, version):
        migration_api.downgrade(engine, TestMigrations.REPOSITORY, version)
        self.assertEqual(
            version, migration_api.db_version(engine,
                                              TestMigrations.REPOSITORY))

    def _migrate_up(self, engine, version, with_data=False):
        """migrate up to a new version of the db.

        We allow for data insertion and post checks at every
        migration version with special _prerun_### and
        _check_### functions in the main test.
        """
        if with_data:
            data = None
            prerun = getattr(self, "_prerun_%3.3d" % version, None)
            if prerun:
                data = prerun(engine)

        migration_api.upgrade(engine, TestMigrations.REPOSITORY, version)
        self.assertEqual(
            version, migration_api.db_version(engine,
                                              TestMigrations.REPOSITORY))

        if with_data:
            check = getattr(self, "_check_%3.3d" % version, None)
            if check:
                check(engine, data)

    def _create_unversioned_001_db(self, engine):
        # Create the initial version of the images table
        meta = sqlalchemy.schema.MetaData()
        meta.bind = engine
        images_001 = sqlalchemy.Table(
            'images', meta,
            sqlalchemy.Column('id', models.Integer, primary_key=True),
            sqlalchemy.Column('name', sqlalchemy.String(255)),
            sqlalchemy.Column('type', sqlalchemy.String(30)),
            sqlalchemy.Column('size', sqlalchemy.Integer),
            sqlalchemy.Column('status', sqlalchemy.String(30)),
            sqlalchemy.Column('is_public', sqlalchemy.Boolean, default=False),
            sqlalchemy.Column('location', sqlalchemy.Text),
            sqlalchemy.Column('created_at',
                              sqlalchemy.DateTime(),
                              nullable=False),
            sqlalchemy.Column('updated_at', sqlalchemy.DateTime()),
            sqlalchemy.Column('deleted_at', sqlalchemy.DateTime()),
            sqlalchemy.Column('deleted',
                              sqlalchemy.Boolean(),
                              nullable=False,
                              default=False))
        images_001.create()

    def test_version_control_existing_db(self):
        """
        Creates a DB without version control information, places it
        under version control and checks that it can be upgraded
        without errors.
        """
        for key, engine in self.engines.items():
            self._create_unversioned_001_db(engine)
            self._walk_versions(engine, self.snake_walk, initial_version=1)

    def _prerun_003(self, engine):
        now = datetime.datetime.now()
        images = get_table(engine, 'images')
        data = {
            'deleted': False,
            'created_at': now,
            'updated_at': now,
            'type': 'kernel',
            'status': 'active',
            'is_public': True
        }
        images.insert().values(data).execute()
        return data

    def _check_003(self, engine, data):
        images = get_table(engine, 'images')
        self.assertTrue(
            'type' not in images.c,
            "'type' column found in images table columns! "
            "images table columns reported by metadata: %s\n" %
            images.c.keys())
        images_prop = get_table(engine, 'image_properties')
        result = images_prop.select().execute()
        types = []
        for row in result:
            if row['key'] == 'type':
                types.append(row['value'])
        self.assertIn(data['type'], types)

    def _prerun_004(self, engine):
        """Insert checksum data sample to check if migration goes fine with
        data"""
        now = timeutils.utcnow()
        images = get_table(engine, 'images')
        data = [{
            'deleted': False,
            'created_at': now,
            'updated_at': now,
            'type': 'kernel',
            'status': 'active',
            'is_public': True,
        }]
        engine.execute(images.insert(), data)
        return data

    def _check_004(self, engine, data):
        """Assure that checksum data is present on table"""
        images = get_table(engine, 'images')
        self.assertIn('checksum', images.c)
        self.assertEquals(images.c['checksum'].type.length, 32)

    def _prerun_005(self, engine):
        now = timeutils.utcnow()
        images = get_table(engine, 'images')
        data = [{
            'deleted': False,
            'created_at': now,
            'updated_at': now,
            'type': 'kernel',
            'status': 'active',
            'is_public': True,
            # Integer type signed size limit
            'size': 2147483647
        }]
        engine.execute(images.insert(), data)
        return data

    def _check_005(self, engine, data):

        images = get_table(engine, 'images')
        select = images.select().execute()

        sizes = [row['size'] for row in select if row['size'] is not None]
        migrated_data_sizes = [element['size'] for element in data]

        for migrated in migrated_data_sizes:
            self.assertIn(migrated, sizes)

    def _prerun_006(self, engine):
        now = timeutils.utcnow()
        images = get_table(engine, 'images')
        image_data = [{
            'deleted': False,
            'created_at': now,
            'updated_at': now,
            'type': 'kernel',
            'status': 'active',
            'is_public': True,
            'id': 9999,
        }]
        engine.execute(images.insert(), image_data)

        images_properties = get_table(engine, 'image_properties')
        properties_data = [{
            'id': 10,
            'image_id': 9999,
            'updated_at': now,
            'created_at': now,
            'deleted': False,
            'key': 'image_name'
        }]
        engine.execute(images_properties.insert(), properties_data)
        return properties_data

    def _check_006(self, engine, data):
        images_properties = get_table(engine, 'image_properties')
        select = images_properties.select().execute()

        # load names from name collumn
        image_names = [row['name'] for row in select]

        # check names from data in image names from name collumn
        for element in data:
            self.assertIn(element['key'], image_names)

    def _prerun_015(self, engine):
        images = get_table(engine, 'images')
        unquoted_locations = [
            'swift://*****:*****@example.com/container/obj-id',
            'file://foo',
        ]
        now = datetime.datetime.now()
        temp = dict(deleted=False,
                    created_at=now,
                    updated_at=now,
                    status='active',
                    is_public=True,
                    min_disk=0,
                    min_ram=0)
        data = []
        for i, location in enumerate(unquoted_locations):
            temp.update(location=location, id=uuidutils.generate_uuid())
            data.append(temp)
            images.insert().values(temp).execute()
        return data

    def _check_015(self, engine, data):
        images = get_table(engine, 'images')
        quoted_locations = [
            'swift://acct%3Ausr:[email protected]/container/obj-id',
            'file://foo',
        ]
        result = images.select().execute()
        locations = map(lambda x: x['location'], result)
        for loc in quoted_locations:
            self.assertIn(loc, locations)

    def _prerun_016(self, engine):
        images = get_table(engine, 'images')
        now = datetime.datetime.now()
        temp = dict(deleted=False,
                    created_at=now,
                    updated_at=now,
                    status='active',
                    is_public=True,
                    min_disk=0,
                    min_ram=0,
                    id='fake-image-id1')
        images.insert().values(temp).execute()
        image_members = get_table(engine, 'image_members')
        now = datetime.datetime.now()
        data = {
            'deleted': False,
            'created_at': now,
            'member': 'fake-member',
            'updated_at': now,
            'can_share': False,
            'image_id': 'fake-image-id1'
        }
        image_members.insert().values(data).execute()
        return data

    def _check_016(self, engine, data):
        image_members = get_table(engine, 'image_members')
        self.assertTrue(
            'status' in image_members.c,
            "'status' column found in image_members table "
            "columns! image_members table columns: %s" %
            image_members.c.keys())

    def _prerun_017(self, engine):
        metadata_encryption_key = 'a' * 16
        self.config(metadata_encryption_key=metadata_encryption_key)
        images = get_table(engine, 'images')
        unquoted = 'swift://*****:*****@example.com/container/obj-id'
        encrypted_unquoted = crypt.urlsafe_encrypt(metadata_encryption_key,
                                                   unquoted, 64)
        data = []
        now = datetime.datetime.now()
        temp = dict(deleted=False,
                    created_at=now,
                    updated_at=now,
                    status='active',
                    is_public=True,
                    min_disk=0,
                    min_ram=0,
                    location=encrypted_unquoted,
                    id='fakeid1')
        images.insert().values(temp).execute()

        locations = [
            'file://ab', 'file://abc',
            'swift://acct3A%foobar:[email protected]/container/obj-id2'
        ]

        now = datetime.datetime.now()
        temp = dict(deleted=False,
                    created_at=now,
                    updated_at=now,
                    status='active',
                    is_public=True,
                    min_disk=0,
                    min_ram=0)
        for i, location in enumerate(locations):
            temp.update(location=location, id=uuidutils.generate_uuid())
            data.append(temp)
            images.insert().values(temp).execute()
        return data

    def _check_017(self, engine, data):
        metadata_encryption_key = 'a' * 16
        quoted = 'swift://acct%3Ausr:[email protected]/container/obj-id'
        images = get_table(engine, 'images')
        result = images.select().execute()
        locations = map(lambda x: x['location'], result)
        actual_location = []
        for location in locations:
            if location:
                try:
                    temp_loc = crypt.urlsafe_decrypt(metadata_encryption_key,
                                                     location)
                    actual_location.append(temp_loc)
                except TypeError:
                    actual_location.append(location)
                except ValueError:
                    actual_location.append(location)

        self.assertIn(quoted, actual_location)
        loc_list = [
            'file://ab', 'file://abc',
            'swift://acct3A%foobar:[email protected]/container/obj-id2'
        ]

        for location in loc_list:
            if not location in actual_location:
                self.fail(_("location: %s data lost") % location)

    def _prerun_019(self, engine):
        images = get_table(engine, 'images')
        now = datetime.datetime.now()
        base_values = {
            'deleted': False,
            'created_at': now,
            'updated_at': now,
            'status': 'active',
            'is_public': True,
            'min_disk': 0,
            'min_ram': 0,
        }
        data = [
            {
                'id': 'fake-19-1',
                'location': 'http://glance.example.com'
            },
            #NOTE(bcwaldon): images with a location of None should
            # not be migrated
            {
                'id': 'fake-19-2',
                'location': None
            },
        ]
        map(lambda image: image.update(base_values), data)
        for image in data:
            images.insert().values(image).execute()
        return data

    def _check_019(self, engine, data):
        image_locations = get_table(engine, 'image_locations')
        records = image_locations.select().execute().fetchall()
        locations = dict([(il.image_id, il.value) for il in records])
        self.assertEqual(locations.get('fake-19-1'),
                         'http://glance.example.com')

    def _check_020(self, engine, data):
        images = get_table(engine, 'images')
        self.assertFalse('location' in images.c)