Exemple #1
0
    def _pre_upgrade_045(self, engine):
        raw_template = utils.get_table(engine, 'raw_template')
        templ = [dict(id=5, template='{}', files='{}')]
        engine.execute(raw_template.insert(), templ)

        user_creds = utils.get_table(engine, 'user_creds')
        user = [dict(id=6, username='******', password='******',
                     tenant='mine', auth_url='bla',
                     tenant_id=str(uuid.uuid4()),
                     trust_id='',
                     trustor_user_id='')]
        engine.execute(user_creds.insert(), user)

        stack = utils.get_table(engine, 'stack')
        stack_ids = [('s1', '967aaefb-152e-505d-b13a-35d4c816390c'),
                     ('s2', '9e9deba9-a303-5f29-84d3-c8165647c47e'),
                     ('s1*', '9a4bd1ec-8b21-56cd-964a-f66cb1cfa2f9')]
        data = [dict(id=ll_id, name=name,
                     raw_template_id=templ[0]['id'],
                     user_creds_id=user[0]['id'],
                     username='******', disable_rollback=True)
                for name, ll_id in stack_ids]
        data[2]['owner_id'] = '967aaefb-152e-505d-b13a-35d4c816390c'

        engine.execute(stack.insert(), data)
        return data
 def downgrade(self, migrate_engine):
     UniqueConstraint('uuid',
                      table=db_utils.get_table(migrate_engine, 'instances'),
                      name='uniq_instances0uuid').drop()
     for table_name in ('instances', 'shadow_instances'):
         table = db_utils.get_table(migrate_engine, table_name)
         table.columns.uuid.alter(nullable=True)
    def _check_267(self, engine, data):
        # Make sure the column is non-nullable and the UC exists.
        fixed_ips = oslodbutils.get_table(engine, 'fixed_ips')
        self.assertTrue(fixed_ips.c.instance_uuid.nullable)
        fixed_ip = fixed_ips.select(fixed_ips.c.id == 1).execute().first()
        self.assertIsNone(fixed_ip.instance_uuid)

        instances = oslodbutils.get_table(engine, 'instances')
        self.assertFalse(instances.c.uuid.nullable)

        inspector = reflection.Inspector.from_engine(engine)
        constraints = inspector.get_unique_constraints('instances')
        constraint_names = [constraint['name'] for constraint in constraints]
        self.assertIn('uniq_instances0uuid', constraint_names)

        # Make sure the instances record with the valid uuid is still there.
        instance = instances.select(instances.c.id == 1).execute().first()
        self.assertIsNotNone(instance)

        # Check that the null entry in the volumes table is still there since
        # we skipped tables that don't have FK's back to the instances table.
        volumes = oslodbutils.get_table(engine, 'volumes')
        self.assertTrue(volumes.c.instance_uuid.nullable)
        volume = fixed_ips.select(
            volumes.c.id == '9c3c317e-24db-4d57-9a6f-96e6d477c1da'
        ).execute().first()
        self.assertIsNone(volume.instance_uuid)
Exemple #4
0
    def _pre_upgrade_031(self, engine):
        images = db_utils.get_table(engine, 'images')
        now = datetime.datetime.now()
        image_id = 'fake_031_id'
        temp = dict(deleted=False,
                    created_at=now,
                    updated_at=now,
                    status='active',
                    is_public=True,
                    min_disk=0,
                    min_ram=0,
                    id=image_id)
        images.insert().values(temp).execute()

        locations_table = db_utils.get_table(engine, 'image_locations')
        locations = [
            ('file://ab', '{"a": "yo yo"}'),
            ('file://ab', '{}'),
            ('file://ab', '{}'),
            ('file://ab1', '{"a": "that one, please"}'),
            ('file://ab1', '{"a": "that one, please"}'),
        ]
        temp = dict(deleted=False,
                    created_at=now,
                    updated_at=now,
                    image_id=image_id)

        for location, metadata in locations:
            temp.update(value=location, meta_data=metadata)
            locations_table.insert().values(temp).execute()
        return image_id
    def test_upgrade(self):
        self.assertTableDoesNotExist(self.identity_provider)
        self.assertTableDoesNotExist(self.federation_protocol)
        self.assertTableDoesNotExist(self.mapping)

        self.upgrade(1, repository=self.repo_path)
        self.assertTableColumns(self.identity_provider,
                                ['id',
                                 'enabled',
                                 'description'])

        self.assertTableColumns(self.federation_protocol,
                                ['id',
                                 'idp_id',
                                 'mapping_id'])

        self.upgrade(2, repository=self.repo_path)
        self.assertTableColumns(self.mapping,
                                ['id', 'rules'])

        federation_protocol = utils.get_table(
            self.engine,
            'federation_protocol')
        with self.engine.begin() as conn:
            conn.execute(federation_protocol.insert(), id=0, idp_id=1)
            self.upgrade(3, repository=self.repo_path)
            federation_protocol = utils.get_table(
                self.engine,
                'federation_protocol')
            self.assertFalse(federation_protocol.c.mapping_id.nullable)
Exemple #6
0
    def _pre_upgrade_031(self, engine):
        raw_template = utils.get_table(engine, 'raw_template')
        templ = [dict(id=3, template='{}')]
        engine.execute(raw_template.insert(), templ)

        user_creds = utils.get_table(engine, 'user_creds')
        user = [dict(id=4, username='******', password='******',
                     tenant='mine', auth_url='bla',
                     tenant_id=str(uuid.uuid4()),
                     trust_id='',
                     trustor_user_id='')]
        engine.execute(user_creds.insert(), user)

        stack = utils.get_table(engine, 'stack')
        stack_ids = ['967aaefb-152e-405d-b13a-35d4c816390c',
                     '9e9deba9-a303-4f29-84d3-c8165647c47e',
                     '9a4bd1ec-8b21-46cd-964a-f66cb1cfa2f9']
        data = [dict(id=ll_id, name='fruity',
                     raw_template_id=templ[0]['id'],
                     user_creds_id=user[0]['id'],
                     username='******', disable_rollback=True)
                for ll_id in stack_ids]

        engine.execute(stack.insert(), data)
        return data
Exemple #7
0
    def _pre_upgrade_029(self, engine):
        image_locations = db_utils.get_table(engine, 'image_locations')

        meta_data = {'somelist': ['a', 'b', 'c'], 'avalue': 'hello',
                     'adict': {}}

        now = datetime.datetime.now()
        image_id = 'fake_029_id'
        url = 'file:///some/place/onthe/fs029'

        images = db_utils.get_table(engine, 'images')
        temp = dict(deleted=False,
                    created_at=now,
                    updated_at=now,
                    status='active',
                    is_public=True,
                    min_disk=0,
                    min_ram=0,
                    id=image_id)
        images.insert().values(temp).execute()

        pickle_md = pickle.dumps(meta_data)
        temp = dict(deleted=False,
                    created_at=now,
                    updated_at=now,
                    image_id=image_id,
                    value=url,
                    meta_data=pickle_md)
        image_locations.insert().values(temp).execute()

        return meta_data, image_id
Exemple #8
0
    def _pre_upgrade_026(self, engine):
        image_locations = db_utils.get_table(engine, 'image_locations')

        now = datetime.datetime.now()
        image_id = 'fake_id'
        url = 'file:///some/place/onthe/fs'

        images = db_utils.get_table(engine, 'images')
        temp = dict(deleted=False,
                    created_at=now,
                    updated_at=now,
                    status='active',
                    is_public=True,
                    min_disk=0,
                    min_ram=0,
                    id=image_id)
        images.insert().values(temp).execute()

        temp = dict(deleted=False,
                    created_at=now,
                    updated_at=now,
                    image_id=image_id,
                    value=url)
        image_locations.insert().values(temp).execute()
        return image_id
Exemple #9
0
    def _check_017(self, engine, data):
        """Test that added encryption information works correctly."""
        # encryption key UUID
        volumes = db_utils.get_table(engine, 'volumes')
        self.assertIn('encryption_key_id', volumes.c)
        self.assertIsInstance(volumes.c.encryption_key_id.type,
                              sqlalchemy.types.VARCHAR)

        snapshots = db_utils.get_table(engine, 'snapshots')
        self.assertIn('encryption_key_id', snapshots.c)
        self.assertIsInstance(snapshots.c.encryption_key_id.type,
                              sqlalchemy.types.VARCHAR)
        self.assertIn('volume_type_id', snapshots.c)
        self.assertIsInstance(snapshots.c.volume_type_id.type,
                              sqlalchemy.types.VARCHAR)

        # encryption types table
        encryption = db_utils.get_table(engine, 'encryption')
        self.assertIsInstance(encryption.c.volume_type_id.type,
                              sqlalchemy.types.VARCHAR)
        self.assertIsInstance(encryption.c.cipher.type,
                              sqlalchemy.types.VARCHAR)
        self.assertIsInstance(encryption.c.key_size.type,
                              sqlalchemy.types.INTEGER)
        self.assertIsInstance(encryption.c.provider.type,
                              sqlalchemy.types.VARCHAR)
Exemple #10
0
    def _pre_upgrade_047(self, engine):
        raw_template = utils.get_table(engine, 'raw_template')
        templ = [dict(id=6, template='{}', files='{}')]
        engine.execute(raw_template.insert(), templ)

        user_creds = utils.get_table(engine, 'user_creds')
        user = [dict(id=7, username='******', password='******',
                     tenant='mine', auth_url='bla',
                     tenant_id=str(uuid.uuid4()),
                     trust_id='',
                     trustor_user_id='')]
        engine.execute(user_creds.insert(), user)

        stack = utils.get_table(engine, 'stack')
        stack_ids = [('s9', '167aaefb-152e-505d-b13a-35d4c816390c'),
                     ('n1', '1e9deba9-a303-5f29-84d3-c8165647c47e'),
                     ('n2', '1e9deba9-a304-5f29-84d3-c8165647c47e'),
                     ('n3', '1e9deba9-a305-5f29-84d3-c8165647c47e'),
                     ('s9*', '1a4bd1ec-8b21-56cd-964a-f66cb1cfa2f9')]
        data = [dict(id=ll_id, name=name,
                     raw_template_id=templ[0]['id'],
                     user_creds_id=user[0]['id'],
                     owner_id=None,
                     backup=False,
                     username='******', disable_rollback=True)
                for name, ll_id in stack_ids]
        # Make a nested tree s1->s2->s3->s4 with a s1 backup
        data[1]['owner_id'] = '167aaefb-152e-505d-b13a-35d4c816390c'
        data[2]['owner_id'] = '1e9deba9-a303-5f29-84d3-c8165647c47e'
        data[3]['owner_id'] = '1e9deba9-a304-5f29-84d3-c8165647c47e'
        data[4]['owner_id'] = '167aaefb-152e-505d-b13a-35d4c816390c'
        data[4]['backup'] = True
        engine.execute(stack.insert(), data)
        return data
Exemple #11
0
    def _pre_upgrade_033(self, engine):
        images = db_utils.get_table(engine, 'images')
        image_locations = db_utils.get_table(engine, 'image_locations')

        now = datetime.datetime.now()
        image_id = 'fake_id_028_%d'
        url = 'file:///some/place/onthe/fs_%d'
        status_list = ['active', 'saving', 'queued', 'killed',
                       'pending_delete', 'deleted']
        image_id_list = []

        for (idx, status) in enumerate(status_list):
            temp = dict(deleted=False,
                        created_at=now,
                        updated_at=now,
                        status=status,
                        is_public=True,
                        min_disk=0,
                        min_ram=0,
                        id=image_id % idx)
            images.insert().values(temp).execute()

            temp = dict(deleted=False,
                        created_at=now,
                        updated_at=now,
                        image_id=image_id % idx,
                        value=url % idx)
            image_locations.insert().values(temp).execute()

            image_id_list.append(image_id % idx)
        return image_id_list
Exemple #12
0
    def _check_254(self, engine, data):
        self.assertColumnExists(engine, "pci_devices", "request_id")
        self.assertColumnExists(engine, "shadow_pci_devices", "request_id")

        pci_devices = oslodbutils.get_table(engine, "pci_devices")
        shadow_pci_devices = oslodbutils.get_table(engine, "shadow_pci_devices")
        self.assertIsInstance(pci_devices.c.request_id.type, sqlalchemy.types.String)
        self.assertIsInstance(shadow_pci_devices.c.request_id.type, sqlalchemy.types.String)
Exemple #13
0
    def _check_251(self, engine, data):
        self.assertColumnExists(engine, "compute_nodes", "numa_topology")
        self.assertColumnExists(engine, "shadow_compute_nodes", "numa_topology")

        compute_nodes = oslodbutils.get_table(engine, "compute_nodes")
        shadow_compute_nodes = oslodbutils.get_table(engine, "shadow_compute_nodes")
        self.assertIsInstance(compute_nodes.c.numa_topology.type, sqlalchemy.types.Text)
        self.assertIsInstance(shadow_compute_nodes.c.numa_topology.type, sqlalchemy.types.Text)
Exemple #14
0
    def _check_253(self, engine, data):
        self.assertColumnExists(engine, "instance_extra", "pci_requests")
        self.assertColumnExists(engine, "shadow_instance_extra", "pci_requests")

        instance_extra = oslodbutils.get_table(engine, "instance_extra")
        shadow_instance_extra = oslodbutils.get_table(engine, "shadow_instance_extra")
        self.assertIsInstance(instance_extra.c.pci_requests.type, sqlalchemy.types.Text)
        self.assertIsInstance(shadow_instance_extra.c.pci_requests.type, sqlalchemy.types.Text)
def downgrade(migrate_engine):
    actions_events = utils.get_table(migrate_engine, 'instance_actions_events')
    actions_events.drop_column('host')
    actions_events.drop_column('details')
    shadow_actions_events = utils.get_table(migrate_engine,
            api._SHADOW_TABLE_PREFIX + 'instance_actions_events')
    shadow_actions_events.drop_column('host')
    shadow_actions_events.drop_column('details')
Exemple #16
0
    def _post_downgrade_247(self, engine):
        quota_usages = oslodbutils.get_table(engine, 'quota_usages')
        self.assertTrue(quota_usages.c.resource.nullable)

        pci_devices = oslodbutils.get_table(engine, 'pci_devices')
        self.assertFalse(pci_devices.c.deleted.nullable)
        self.assertTrue(pci_devices.c.product_id.nullable)
        self.assertTrue(pci_devices.c.vendor_id.nullable)
        self.assertTrue(pci_devices.c.dev_type.nullable)
def downgrade(migrate_engine):
    # drop the unique constraint on instances.uuid
    UniqueConstraint('uuid',
                     table=utils.get_table(migrate_engine, 'instances'),
                     name=UC_NAME).drop()
    # We can't bring the deleted records back but we can make uuid nullable.
    for table_name in ('instances', 'shadow_instances'):
        table = utils.get_table(migrate_engine, table_name)
        table.columns.uuid.alter(nullable=True)
Exemple #18
0
    def _post_downgrade_017(self, engine):
        volumes = db_utils.get_table(engine, 'volumes')
        self.assertNotIn('encryption_key_id', volumes.c)

        snapshots = db_utils.get_table(engine, 'snapshots')
        self.assertNotIn('encryption_key_id', snapshots.c)

        self.assertFalse(engine.dialect.has_table(engine.connect(),
                                                  'encryption'))
Exemple #19
0
 def _pre_upgrade_006(self, engine):
     nodes = oslodbutils.get_table(engine, "bm_nodes")
     ifs = oslodbutils.get_table(engine, "bm_interfaces")
     # node 1 has two different addresses in bm_nodes and bm_interfaces
     engine.execute(nodes.insert(), [{"id": 1, "prov_mac_address": "aa:aa:aa:aa:aa:aa"}])
     engine.execute(ifs.insert(), [{"id": 101, "bm_node_id": 1, "address": "bb:bb:bb:bb:bb:bb"}])
     # node 2 has one same address both in bm_nodes and bm_interfaces
     engine.execute(nodes.insert(), [{"id": 2, "prov_mac_address": "cc:cc:cc:cc:cc:cc"}])
     engine.execute(ifs.insert(), [{"id": 201, "bm_node_id": 2, "address": "cc:cc:cc:cc:cc:cc"}])
Exemple #20
0
    def _check_247(self, engine, data):
        quota_usages = oslodbutils.get_table(engine, "quota_usages")
        self.assertFalse(quota_usages.c.resource.nullable)

        pci_devices = oslodbutils.get_table(engine, "pci_devices")
        self.assertTrue(pci_devices.c.deleted.nullable)
        self.assertFalse(pci_devices.c.product_id.nullable)
        self.assertFalse(pci_devices.c.vendor_id.nullable)
        self.assertFalse(pci_devices.c.dev_type.nullable)
def upgrade(migrate_engine):
    actions_events = utils.get_table(migrate_engine, 'instance_actions_events')
    host = Column('host', String(255))
    details = Column('details', Text)
    actions_events.create_column(host)
    actions_events.create_column(details)
    shadow_actions_events = utils.get_table(migrate_engine,
            api._SHADOW_TABLE_PREFIX + 'instance_actions_events')
    shadow_actions_events.create_column(host.copy())
    shadow_actions_events.create_column(details.copy())
    def _check_270(self, engine, data):
        self.assertColumnExists(engine, 'instance_extra', 'flavor')
        self.assertColumnExists(engine, 'shadow_instance_extra', 'flavor')

        instance_extra = oslodbutils.get_table(engine, 'instance_extra')
        shadow_instance_extra = oslodbutils.get_table(
                engine, 'shadow_instance_extra')
        self.assertIsInstance(instance_extra.c.flavor.type,
                              sqlalchemy.types.Text)
        self.assertIsInstance(shadow_instance_extra.c.flavor.type,
                              sqlalchemy.types.Text)
 def _check_265(self, engine, data):
     # Assert that only one index exists that covers columns
     # host and deleted
     instances = oslodbutils.get_table(engine, 'instances')
     self.assertEqual(1, len([i for i in instances.indexes
                              if [c.name for c in i.columns][:2] ==
                                 ['host', 'deleted']]))
     # and only one index covers host column
     iscsi_targets = oslodbutils.get_table(engine, 'iscsi_targets')
     self.assertEqual(1, len([i for i in iscsi_targets.indexes
                              if [c.name for c in i.columns][:1] ==
                                 ['host']]))
 def _check_268(self, engine, data):
     # We can only assert that the col exists, not the unique constraint
     # as the engine is running sqlite
     self.assertColumnExists(engine, 'compute_nodes', 'host')
     self.assertColumnExists(engine, 'shadow_compute_nodes', 'host')
     compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
     shadow_compute_nodes = oslodbutils.get_table(
         engine, 'shadow_compute_nodes')
     self.assertIsInstance(compute_nodes.c.host.type,
                           sqlalchemy.types.String)
     self.assertIsInstance(shadow_compute_nodes.c.host.type,
                           sqlalchemy.types.String)
Exemple #25
0
 def _check_003(self, engine, data):
     images = db_utils.get_table(engine, 'images')
     self.assertTrue('type' not in images.c,
                     "'type' column found in images table columns! "
                     "images table columns reported by metadata: %s\n"
                     % images.c.keys())
     images_prop = db_utils.get_table(engine, 'image_properties')
     result = images_prop.select().execute()
     types = []
     for row in result:
         if row['key'] == 'type':
             types.append(row['value'])
     self.assertIn(data['type'], types)
    def _check_269(self, engine, data):

        self.assertColumnExists(engine, 'pci_devices', 'numa_node')
        self.assertColumnExists(engine, 'shadow_pci_devices', 'numa_node')
        pci_devices = oslodbutils.get_table(engine, 'pci_devices')
        shadow_pci_devices = oslodbutils.get_table(
            engine, 'shadow_pci_devices')
        self.assertIsInstance(pci_devices.c.numa_node.type,
                              sqlalchemy.types.Integer)
        self.assertTrue(pci_devices.c.numa_node.nullable)
        self.assertIsInstance(shadow_pci_devices.c.numa_node.type,
                              sqlalchemy.types.Integer)
        self.assertTrue(shadow_pci_devices.c.numa_node.nullable)
 def _post_downgrade_265(self, engine):
     # The duplicated index is not created on downgrade, so this
     # asserts that only one index exists that covers columns
     # host and deleted
     instances = oslodbutils.get_table(engine, 'instances')
     self.assertEqual(1, len([i for i in instances.indexes
                              if [c.name for c in i.columns][:2] ==
                                 ['host', 'deleted']]))
     # and only one index covers host column
     iscsi_targets = oslodbutils.get_table(engine, 'iscsi_targets')
     self.assertEqual(1, len([i for i in iscsi_targets.indexes
                              if [c.name for c in i.columns][:1] ==
                                 ['host']]))
 def _check_273(self, engine, data):
     for src_table, src_column, dst_table, dst_column in [
             ('fixed_ips', 'instance_uuid', 'instances', 'uuid'),
             ('block_device_mapping', 'instance_uuid', 'instances', 'uuid'),
             ('instance_info_caches', 'instance_uuid', 'instances', 'uuid'),
             ('instance_metadata', 'instance_uuid', 'instances', 'uuid'),
             ('instance_system_metadata', 'instance_uuid',
              'instances', 'uuid'),
             ('instance_type_projects', 'instance_type_id',
              'instance_types', 'id'),
             ('iscsi_targets', 'volume_id', 'volumes', 'id'),
             ('reservations', 'usage_id', 'quota_usages', 'id'),
             ('security_group_instance_association', 'instance_uuid',
              'instances', 'uuid'),
             ('security_group_instance_association', 'security_group_id',
              'security_groups', 'id'),
             ('virtual_interfaces', 'instance_uuid', 'instances', 'uuid'),
             ('compute_nodes', 'service_id', 'services', 'id'),
             ('instance_actions', 'instance_uuid', 'instances', 'uuid'),
             ('instance_faults', 'instance_uuid', 'instances', 'uuid'),
             ('migrations', 'instance_uuid', 'instances', 'uuid')]:
         src_table = oslodbutils.get_table(engine, src_table)
         fkeys = {fk.parent.name: fk.column
                  for fk in src_table.foreign_keys}
         self.assertIn(src_column, fkeys)
         self.assertEqual(fkeys[src_column].table.name, dst_table)
         self.assertEqual(fkeys[src_column].name, dst_column)
Exemple #29
0
    def _check_231(self, engine, data):
        self.assertColumnExists(engine, 'instances', 'ephemeral_key_uuid')

        instances = oslodbutils.get_table(engine, 'instances')
        self.assertIsInstance(instances.c.ephemeral_key_uuid.type,
                              sqlalchemy.types.String)
        self.assertTrue(db_utils.check_shadow_table(engine, 'instances'))
Exemple #30
0
 def _check_249(self, engine, data):
     # Assert that only one index exists that covers columns
     # instance_uuid and device_name
     bdm = oslodbutils.get_table(engine, 'block_device_mapping')
     self.assertEqual(1, len([i for i in bdm.indexes
                              if [c.name for c in i.columns] ==
                                 ['instance_uuid', 'device_name']]))
Exemple #31
0
    def _post_downgrade_028(self, engine):
        owner_index = "owner_image_idx"
        columns = ["owner"]

        images_table = db_utils.get_table(engine, 'images')

        index_data = [(idx.name, idx.columns.keys())
                      for idx in images_table.indexes
                      if idx.name == owner_index]

        self.assertNotIn((owner_index, columns), index_data)
Exemple #32
0
    def _check_029(self, engine, data):
        meta_data = data[0]
        image_id = data[1]
        image_locations = db_utils.get_table(engine, 'image_locations')

        records = image_locations.select().where(
            image_locations.c.image_id == image_id).execute().fetchall()

        for r in records:
            d = jsonutils.loads(r['meta_data'])
            self.assertEqual(d, meta_data)
Exemple #33
0
 def _pre_upgrade_007(self, engine):
     desc = 'magic'
     t = db_utils.get_table(engine, 'clusters')
     engine.execute(t.insert(),
                    id='123',
                    name='name',
                    plugin_name='pname',
                    hadoop_version='1',
                    management_private_key='2',
                    management_public_key='3',
                    status_description=desc)
Exemple #34
0
 def _check_249(self, engine, data):
     # Assert that only one index exists that covers columns
     # instance_uuid and device_name
     bdm = oslodbutils.get_table(engine, 'block_device_mapping')
     self.assertEqual(
         1,
         len([
             i for i in bdm.indexes
             if [c.name
                 for c in i.columns] == ['instance_uuid', 'device_name']
         ]))
Exemple #35
0
 def _check_245(self, engine, data):
     networks = oslodbutils.get_table(engine, 'networks')
     network = networks.select(networks.c.id == 1).execute().first()
     # mtu should default to None
     self.assertIsNone(network.mtu)
     # dhcp_server should default to None
     self.assertIsNone(network.dhcp_server)
     # enable dhcp should default to true
     self.assertTrue(network.enable_dhcp)
     # share address should default to false
     self.assertFalse(network.share_address)
Exemple #36
0
    def assertIndexMembers(self, engine, table, index, members):
        self.assertIndexExists(engine, table, index)

        t = oslodbutils.get_table(engine, table)
        index_columns = None
        for idx in t.indexes:
            if idx.name == index:
                index_columns = idx.columns.keys()
                break

        self.assertEqual(sorted(members), sorted(index_columns))
Exemple #37
0
    def _check_010(self, engine, data):
        bm_nodes = oslodbutils.get_table(engine, 'bm_nodes')
        self.assertIn('preserve_ephemeral', bm_nodes.columns)

        default = engine.execute(
            sqlalchemy.select([bm_nodes.c.preserve_ephemeral])
                      .where(bm_nodes.c.id == data[0]['id'])
        ).scalar()
        self.assertEqual(default, False)

        bm_nodes.delete().where(bm_nodes.c.id == data[0]['id']).execute()
Exemple #38
0
    def _check_026(self, engine, data):
        image_locations = db_utils.get_table(engine, 'image_locations')
        results = image_locations.select().where(
            image_locations.c.image_id == data).execute()

        r = list(results)
        self.assertEqual(len(r), 1)
        self.assertEqual(r[0]['value'], 'file:///some/place/onthe/fs')
        self.assertIn('meta_data', r[0])
        x = pickle.loads(r[0]['meta_data'])
        self.assertEqual(x, {})
Exemple #39
0
    def _check_023(self, engine, data):
        """Test that adding reservations index works correctly."""
        reservations = db_utils.get_table(engine, 'reservations')
        index_columns = []
        for idx in reservations.indexes:
            if idx.name == 'reservations_deleted_expire_idx':
                index_columns = idx.columns.keys()
                break

        self.assertEqual(sorted(['deleted', 'expire']),
                         sorted(index_columns))
Exemple #40
0
    def _check_004(self, engine, data):
        volumes = db_utils.get_table(engine, 'volumes')
        v1 = volumes.select(volumes.c.id ==
                            data['volumes'][0]['id']
                            ).execute().first()
        v2 = volumes.select(volumes.c.id ==
                            data['volumes'][1]['id']
                            ).execute().first()
        v3 = volumes.select(volumes.c.id ==
                            data['volumes'][2]['id']
                            ).execute().first()

        volume_types = db_utils.get_table(engine, 'volume_types')
        vt1 = volume_types.select(volume_types.c.name ==
                                  data['volume_types'][0]['name']
                                  ).execute().first()
        vt2 = volume_types.select(volume_types.c.name ==
                                  data['volume_types'][1]['name']
                                  ).execute().first()
        vt3 = volume_types.select(volume_types.c.name ==
                                  data['volume_types'][2]['name']
                                  ).execute().first()

        vtes = db_utils.get_table(engine, 'volume_type_extra_specs')
        vtes1 = vtes.select(vtes.c.key ==
                            data['volume_type_extra_specs'][0]['key']
                            ).execute().first()
        vtes2 = vtes.select(vtes.c.key ==
                            data['volume_type_extra_specs'][1]['key']
                            ).execute().first()
        vtes3 = vtes.select(vtes.c.key ==
                            data['volume_type_extra_specs'][2]['key']
                            ).execute().first()

        self.assertEqual(v1['volume_type_id'], vt1['id'])
        self.assertEqual(v2['volume_type_id'], vt1['id'])
        self.assertEqual(v3['volume_type_id'], vt3['id'])

        self.assertEqual(vtes1['volume_type_id'], vt1['id'])
        self.assertEqual(vtes2['volume_type_id'], vt1['id'])
        self.assertEqual(vtes3['volume_type_id'], vt2['id'])
Exemple #41
0
    def _check_230(self, engine, data):
        for table_name in ['instance_actions_events',
                           'shadow_instance_actions_events']:
            self.assertColumnExists(engine, table_name, 'host')
            self.assertColumnExists(engine, table_name, 'details')

        action_events = oslodbutils.get_table(engine,
                                              'instance_actions_events')
        self.assertIsInstance(action_events.c.host.type,
                              sqlalchemy.types.String)
        self.assertIsInstance(action_events.c.details.type,
                              sqlalchemy.types.Text)
Exemple #42
0
 def _check_3bea56f25597(self, engine, data):
     nodes = db_utils.get_table(engine, 'nodes')
     instance_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
     data = {
         'driver': 'fake',
         'uuid': utils.generate_uuid(),
         'instance_uuid': instance_uuid
     }
     nodes.insert().values(data).execute()
     data['uuid'] = utils.generate_uuid()
     self.assertRaises(sqlalchemy.exc.IntegrityError,
                       nodes.insert().execute, data)
Exemple #43
0
 def _pre_upgrade_043(self, engine):
     raw_template = utils.get_table(engine, 'raw_template')
     templ = '''{"HeatTemplateFormatVersion" : "2012-12-11",
     "Parameters" : {
       "foo" : { "Type" : "String", "NoEcho": "True" },
       "bar" : { "Type" : "String", "NoEcho": "True", "Default": "abc" },
       "blarg" : { "Type" : "String", "Default": "quux" }
       }
     }'''
     data = [dict(id=8, template=templ, files='{}')]
     engine.execute(raw_template.insert(), data)
     return data[0]
Exemple #44
0
 def _pre_upgrade_037(self, engine):
     raw_template = utils.get_table(engine, 'raw_template')
     templ = '''{"heat_template_version": "2013-05-23",
     "parameters": {
        "key_name": {
           "Type": "string"
        }
       }
     }'''
     data = [dict(id=4, template=templ, files='{}')]
     engine.execute(raw_template.insert(), data)
     return data[0]
Exemple #45
0
 def _post_downgrade_249(self, engine):
     # The duplicate index is not created on downgrade, so this
     # asserts that only one index exists that covers columns
     # instance_uuid and device_name
     bdm = oslodbutils.get_table(engine, 'block_device_mapping')
     self.assertEqual(
         1,
         len([
             i for i in bdm.indexes
             if [c.name
                 for c in i.columns] == ['instance_uuid', 'device_name']
         ]))
Exemple #46
0
    def _post_downgrade_029(self, engine):
        image_id = 'fake_029_id'

        image_locations = db_utils.get_table(engine, 'image_locations')

        records = image_locations.select().where(
            image_locations.c.image_id == image_id).execute().fetchall()

        for r in records:
            md = r['meta_data']
            d = pickle.loads(md)
            self.assertIsInstance(d, dict)
Exemple #47
0
    def _pre_upgrade_006(self, engine):
        now = timeutils.utcnow()
        images = db_utils.get_table(engine, 'images')
        image_data = [
            {
                'deleted': False, 'created_at': now, 'updated_at': now,
                'type': 'kernel', 'status': 'active', 'is_public': True,
                'id': 9999,
            }
        ]
        engine.execute(images.insert(), image_data)

        images_properties = db_utils.get_table(engine, 'image_properties')
        properties_data = [
            {
                'id': 10, 'image_id': 9999, 'updated_at': now,
                'created_at': now, 'deleted': False, 'key': 'image_name'
            }
        ]
        engine.execute(images_properties.insert(), properties_data)
        return properties_data
Exemple #48
0
 def _pre_upgrade_002(self, engine):
     data = [{
         'id': 1,
         'key': 'fake-key',
         'image_path': '/dev/null',
         'pxe_config_path': '/dev/null/',
         'root_mb': 0,
         'swap_mb': 0
     }]
     table = oslodbutils.get_table(engine, 'bm_deployments')
     engine.execute(table.insert(), data)
     return data
Exemple #49
0
    def _check_031(self, engine, image_id):
        locations_table = db_utils.get_table(engine, 'image_locations')
        result = locations_table.select().where(
            locations_table.c.image_id == image_id).execute().fetchall()

        locations = set([(x['value'], x['meta_data']) for x in result])
        actual_locations = set([
            ('file://ab', '{"a": "yo yo"}'),
            ('file://ab', '{}'),
            ('file://ab1', '{"a": "that one, please"}'),
        ])
        self.assertFalse(actual_locations.symmetric_difference(locations))
Exemple #50
0
    def _check_007(self, engine, data):
        t = db_utils.get_table(engine, 'clusters')
        res = engine.execute(t.select(), id='123').first()
        self.assertEqual('magic', res['status_description'])
        engine.execute(t.delete())

        # check that status_description can keep 128kb.
        # MySQL varchar can not keep more then 64kb
        desc = 'a' * 128 * 1024  # 128kb
        t = db_utils.get_table(engine, 'clusters')
        engine.execute(t.insert(),
                       id='123',
                       name='name',
                       plugin_name='plname',
                       hadoop_version='hversion',
                       management_private_key='1',
                       management_public_key='2',
                       status_description=desc)
        new_desc = engine.execute(t.select()).fetchone().status_description
        self.assertEqual(desc, new_desc)
        engine.execute(t.delete())
Exemple #51
0
    def _pre_upgrade_012(self, engine):
        """Test rows in images have id changes from int to varchar(32) and
        value changed from int to UUID. Also test image_members and
        image_properties gets updated to point to new UUID keys.
        """

        images = db_utils.get_table(engine, 'images')
        image_members = db_utils.get_table(engine, 'image_members')
        image_properties = db_utils.get_table(engine, 'image_properties')

        # Insert kernel, ramdisk and normal images
        now = timeutils.utcnow()
        data = {'created_at': now, 'updated_at': now,
                'status': 'active', 'deleted': False,
                'is_public': True, 'min_disk': 0, 'min_ram': 0}

        test_data = {}
        for name in ('kernel', 'ramdisk', 'normal'):
            data['name'] = '%s migration 012 test' % name
            result = images.insert().values(data).execute()
            test_data[name] = result.inserted_primary_key[0]

        # Insert image_members and image_properties rows
        data = {'created_at': now, 'updated_at': now, 'deleted': False,
                'image_id': test_data['normal'], 'member': 'foobar',
                'can_share': False}
        result = image_members.insert().values(data).execute()
        test_data['member'] = result.inserted_primary_key[0]

        data = {'created_at': now, 'updated_at': now, 'deleted': False,
                'image_id': test_data['normal'], 'name': 'ramdisk_id',
                'value': test_data['ramdisk']}
        result = image_properties.insert().values(data).execute()
        test_data['properties'] = [result.inserted_primary_key[0]]

        data.update({'name': 'kernel_id', 'value': test_data['kernel']})
        result = image_properties.insert().values(data).execute()
        test_data['properties'].append(result.inserted_primary_key)

        return test_data
Exemple #52
0
 def _pre_upgrade_005(self, engine):
     now = timeutils.utcnow()
     images = db_utils.get_table(engine, 'images')
     data = [
         {
             'deleted': False, 'created_at': now, 'updated_at': now,
             'type': 'kernel', 'status': 'active', 'is_public': True,
             # Integer type signed size limit
             'size': 2147483647
         }
     ]
     engine.execute(images.insert(), data)
     return data
Exemple #53
0
 def _pre_upgrade_003(self, engine):
     now = datetime.datetime.now()
     images = db_utils.get_table(engine, 'images')
     data = {
         'deleted': False,
         'created_at': now,
         'updated_at': now,
         'type': 'kernel',
         'status': 'active',
         'is_public': True
     }
     images.insert().values(data).execute()
     return data
Exemple #54
0
 def _pre_upgrade_016(self, engine):
     images = db_utils.get_table(engine, 'images')
     now = datetime.datetime.now()
     temp = dict(deleted=False,
                 created_at=now,
                 updated_at=now,
                 status='active',
                 is_public=True,
                 min_disk=0,
                 min_ram=0,
                 id='fake-image-id1')
     images.insert().values(temp).execute()
     image_members = db_utils.get_table(engine, 'image_members')
     now = datetime.datetime.now()
     data = {'deleted': False,
             'created_at': now,
             'member': 'fake-member',
             'updated_at': now,
             'can_share': False,
             'image_id': 'fake-image-id1'}
     image_members.insert().values(data).execute()
     return data
Exemple #55
0
    def _check_012(self, engine, test_data):
        images = db_utils.get_table(engine, 'images')
        image_members = db_utils.get_table(engine, 'image_members')
        image_properties = db_utils.get_table(engine, 'image_properties')

        # Find kernel, ramdisk and normal images. Make sure id has been
        # changed to a uuid
        uuids = {}
        for name in ('kernel', 'ramdisk', 'normal'):
            image_name = '%s migration 012 test' % name
            rows = images.select().where(
                images.c.name == image_name).execute().fetchall()

            self.assertEqual(len(rows), 1)

            row = rows[0]
            self.assertTrue(utils.is_uuid_like(row['id']))

            uuids[name] = row['id']

        # Find all image_members to ensure image_id has been updated
        results = image_members.select().where(
            image_members.c.image_id == uuids['normal']).execute().fetchall()
        self.assertEqual(len(results), 1)

        # Find all image_properties to ensure image_id has been updated
        # as well as ensure kernel_id and ramdisk_id values have been
        # updated too
        results = image_properties.select().where(
            image_properties.c.image_id == uuids['normal']
        ).execute().fetchall()
        self.assertEqual(len(results), 2)
        for row in results:
            self.assertIn(row['name'], ('kernel_id', 'ramdisk_id'))

            if row['name'] == 'kernel_id':
                self.assertEqual(row['value'], uuids['kernel'])
            if row['name'] == 'ramdisk_id':
                self.assertEqual(row['value'], uuids['ramdisk'])
Exemple #56
0
    def _check_010(self, engine, data):
        values = dict((c, u) for c, u in data)

        images = db_utils.get_table(engine, 'images')
        for row in images.select().execute():
            if row['created_at'] in values:
                # updated_at should be unchanged if not previous NULL, or
                # set to created_at if previously NULL
                updated_at = values.pop(row['created_at']) or row['created_at']
                self.assertEqual(row['updated_at'], updated_at)

        # No initial values should be remaining
        self.assertEqual(len(values), 0)
Exemple #57
0
    def _check_032(self, engine, data):
        """Test adding volume_type_projects table works correctly."""
        volume_type_projects = db_utils.get_table(engine,
                                                  'volume_type_projects')
        self.assertIsInstance(volume_type_projects.c.created_at.type,
                              self.TIME_TYPE)
        self.assertIsInstance(volume_type_projects.c.updated_at.type,
                              self.TIME_TYPE)
        self.assertIsInstance(volume_type_projects.c.deleted_at.type,
                              self.TIME_TYPE)
        self.assertIsInstance(volume_type_projects.c.deleted.type,
                              self.BOOL_TYPE)
        self.assertIsInstance(volume_type_projects.c.id.type,
                              sqlalchemy.types.INTEGER)
        self.assertIsInstance(volume_type_projects.c.volume_type_id.type,
                              sqlalchemy.types.VARCHAR)
        self.assertIsInstance(volume_type_projects.c.project_id.type,
                              sqlalchemy.types.VARCHAR)

        volume_types = db_utils.get_table(engine, 'volume_types')
        self.assertIsInstance(volume_types.c.is_public.type,
                              self.BOOL_TYPE)
Exemple #58
0
    def test_change_deleted_column_type_to_boolean(self):
        expected_types = {'mysql': mysql.TINYINT, 'ibm_db_sa': SmallInteger}
        table_name = 'abc'
        table = Table(table_name, self.meta,
                      Column('id', Integer, primary_key=True),
                      Column('deleted', Integer))
        table.create()

        utils.change_deleted_column_type_to_boolean(self.engine, table_name)

        table = utils.get_table(self.engine, table_name)
        self.assertIsInstance(table.c.deleted.type,
                              expected_types.get(self.engine.name, Boolean))
Exemple #59
0
 def _check_006(self, engine, data):
     ifs = oslodbutils.get_table(engine, 'bm_interfaces')
     rows = ifs.select().\
                 where(ifs.c.bm_node_id == 1).\
                 execute().\
                 fetchall()
     self.assertEqual(len(rows), 2)
     rows = ifs.select().\
                 where(ifs.c.bm_node_id == 2).\
                 execute().\
                 fetchall()
     self.assertEqual(len(rows), 1)
     self.assertEqual(rows[0]['address'], 'cc:cc:cc:cc:cc:cc')
Exemple #60
0
    def test_change_deleted_column_type_to_id_type_string(self):
        table_name = 'abc'
        for engine in self.engines.values():
            meta = MetaData()
            meta.bind = engine
            table = Table(table_name, meta,
                          Column('id', String(255), primary_key=True),
                          Column('deleted', Boolean))
            table.create()
            utils.change_deleted_column_type_to_id_type(engine, table_name)

            table = utils.get_table(engine, table_name)
            self.assertTrue(isinstance(table.c.deleted.type, String))