def test_redhat_production_20130304(self): connection = self.migration_metadata.bind.connect() connection.execute(pkg_resources.resource_string('bkr.inttest.server', 'database-dumps/redhat-production-20130304.sql')) upgrade_db(self.migration_metadata) self.check_migrated_schema() downgrade_db(self.migration_metadata, 'base')
def test_Installing_status_is_mapped_on_downgrade(self): with self.migration_metadata.bind.connect() as connection: # populate empty database connection.execute(pkg_resources.resource_string('bkr.inttest.server', 'database-dumps/22.sql')) upgrade_db(self.migration_metadata) # create a job in Installing state connection.execute( "INSERT INTO job (owner_id, retention_tag_id, dirty_version, clean_version, status) " "VALUES (1, 1, '', '', 'Installing')") connection.execute( "INSERT INTO recipe_set (job_id, queue_time, waived, status) " "VALUES (1, '2015-11-09 17:03:04', FALSE, 'Installing')") connection.execute( "INSERT INTO recipe (type, recipe_set_id, autopick_random, status) " "VALUES ('machine_recipe', 1, FALSE, 'Installing')") # run the downgrade downgrade_db(self.migration_metadata, '22') # status should be Running so that it works with 22.x with self.migration_metadata.bind.connect() as connection: self.assertEquals( connection.scalar('SELECT status FROM job WHERE id = 1'), u'Running') self.assertEquals( connection.scalar('SELECT status FROM recipe_set WHERE id = 1'), u'Running') self.assertEquals( connection.scalar('SELECT status FROM recipe WHERE id = 1'), u'Running')
def test_redhat_production_20140820(self): with self.migration_engine.connect() as connection: connection.execute(pkg_resources.resource_string('bkr.inttest.server', 'database-dumps/redhat-production-20140820.sql')) upgrade_db(self.migration_metadata) self.check_migrated_schema() downgrade_db(self.migration_metadata, 'base')
def test_redhat_production_20120216(self): connection = self.migration_metadata.bind.connect() connection.execute(pkg_resources.resource_string('bkr.inttest.server', 'database-dumps/redhat-production-20120216.sql')) raise unittest.SkipTest('Database migrations are not implemented ' 'far enough into the past yet') upgrade_db(self.migration_metadata) self.check_migrated_schema() downgrade_db(self.migration_metadata, 'base')
def test_full_downgrade_then_upgrade(self): # The point is to test that the complete *downgrade* sequence is valid, # by then upgrading again and making sure we still have a correct schema. connection = self.migration_metadata.bind.connect() connection.execute(pkg_resources.resource_string('bkr.inttest.server', 'database-dumps/0.11.sql')) upgrade_db(self.migration_metadata) downgrade_db(self.migration_metadata, 'base') upgrade_db(self.migration_metadata) self.check_migrated_schema()
def test_migrate_system_access_policies_to_custom_access_policies(self): connection = self.migration_metadata.bind.connect() # create the DB schema for beaker 19 connection.execute(pkg_resources.resource_string('bkr.inttest.server', 'database-dumps/19.sql')) # populate synthetic data into relevant tables connection.execute('INSERT INTO system(id, fqdn, date_added, owner_id, type, status, kernel_type_id) VALUES (1, "test.fqdn.name", "2015-01-01", 1, 1, 1, 1)') connection.execute('INSERT INTO system(id, fqdn, date_added, owner_id, type, status, kernel_type_id) VALUES (2, "test1.fqdn.name", "2015-01-01", 1, 1, 1, 1)') connection.execute('INSERT INTO system(id, fqdn, date_added, owner_id, type, status, kernel_type_id) VALUES (3, "test2.fqdn.name", "2015-01-01", 1, 1, 1, 1)') connection.execute('INSERT INTO system_access_policy(id, system_id) VALUES (1, 2)') connection.execute('INSERT INTO system_access_policy(id, system_id) VALUES (2, 1)') connection.execute('INSERT INTO system_access_policy(id, system_id) VALUES (3, 3)') # Migrate upgrade_db(self.migration_metadata) # check the data has been migrated successfully systems = self.migration_session.query(System).all() expected_system_policy_map = { 'test.fqdn.name':2, 'test1.fqdn.name':1, 'test2.fqdn.name':3 } for s in systems: self.assertEquals(s.custom_access_policy_id, expected_system_policy_map[s.fqdn]) self.assertEquals(s.active_access_policy_id, expected_system_policy_map[s.fqdn]) # downgrade test downgrade_db(self.migration_metadata, '1c444555ea3d') # XXX self.metadata.reflect() isn't for some reason detecting # the schema changes migration_metadata = sqlalchemy.MetaData(bind=self.migration_engine) migration_metadata.reflect() self.assertIn('system_id', migration_metadata.tables['system_access_policy'].columns.keys()) self.assertNotIn('system_access_policy_id', migration_metadata.tables['system_pool'].columns.keys())
def test_can_pass_beaker_version_to_downgrade(self): # We should be able to give it arbitrary Beaker versions and have it # figure out the matching schema version we want. # The downgrade process itself will do nothing in this case because we # are already on the right version. with self.migration_engine.connect() as connection: connection.execute(pkg_resources.resource_string('bkr.inttest.server', 'database-dumps/21.sql')) downgrade_db(self.migration_metadata, '21') self.assertTrue(check_db(self.migration_metadata, '171c07fb4970')) # Should also accept minor versions downgrade_db(self.migration_metadata, '21.1') self.assertTrue(check_db(self.migration_metadata, '171c07fb4970')) # Should also accept RPM version-releases, this makes our playbooks simpler downgrade_db(self.migration_metadata, '21.1-1.el6eng') self.assertTrue(check_db(self.migration_metadata, '171c07fb4970'))