def fuzzify(engine, config):
    """Do the actual fuzzification based on the loaded attributes of
       the models."""
    modelObjects = {}
    Session = sessionmaker(bind=engine)
    session = Session()
    metadata = MetaData(bind=engine, reflect=True)
    cascade_fkeys(metadata)

    for table_name, columns in config.items():
        tables = []
        if table_name in metadata.tables.keys():
            tables.append(utils.get_table(engine, table_name))
        if 'shadow_' + table_name in metadata.tables.keys():
            tables.append(utils.get_table(engine, 'shadow_' + table_name))
        for table in tables:
            print "Doing table: " + str(table)
            modelObjects[table] = type('Model_' + str(table), (object, ), {})
            mapper(modelObjects[table], table)
            q = session.query(modelObjects[table])
            for row in q.all():
                for column, column_type in columns.items():
                    if hasattr(row, column):
                        before = getattr(row, column)
                        after = randomness(before, column_type)
                        setattr(row, column, after)
    cascade_fkeys(metadata, restore=True)
    session.commit()
    session.flush()
def downgrade(migrate_engine):
    actions_events = utils.get_table(migrate_engine, 'instance_actions_events')
    actions_events.drop_column('host')
    actions_events.drop_column('details')
    shadow_actions_events = utils.get_table(migrate_engine,
            api._SHADOW_TABLE_PREFIX + 'instance_actions_events')
    shadow_actions_events.drop_column('host')
    shadow_actions_events.drop_column('details')
Beispiel #3
0
def downgrade(migrate_engine):
    compute_nodes = utils.get_table(migrate_engine, 'compute_nodes')
    compute_nodes.drop_column('host_ip')
    compute_nodes.drop_column('supported_instances')
    shadow_compute_nodes = utils.get_table(
        migrate_engine, db._SHADOW_TABLE_PREFIX + 'compute_nodes')
    shadow_compute_nodes.drop_column('host_ip')
    shadow_compute_nodes.drop_column('supported_instances')
def downgrade(migrate_engine):
    compute_nodes = utils.get_table(migrate_engine, 'compute_nodes')
    compute_nodes.drop_column('host_ip')
    compute_nodes.drop_column('supported_instances')
    shadow_compute_nodes = utils.get_table(migrate_engine,
            db._SHADOW_TABLE_PREFIX + 'compute_nodes')
    shadow_compute_nodes.drop_column('host_ip')
    shadow_compute_nodes.drop_column('supported_instances')
Beispiel #5
0
def downgrade(migrate_engine):
    actions_events = utils.get_table(migrate_engine, 'instance_actions_events')
    actions_events.drop_column('host')
    actions_events.drop_column('details')
    shadow_actions_events = utils.get_table(
        migrate_engine, api._SHADOW_TABLE_PREFIX + 'instance_actions_events')
    shadow_actions_events.drop_column('host')
    shadow_actions_events.drop_column('details')
Beispiel #6
0
def upgrade(migrate_engine):
    actions_events = utils.get_table(migrate_engine, 'instance_actions_events')
    host = Column('host', String(255))
    details = Column('details', Text)
    actions_events.create_column(host)
    actions_events.create_column(details)
    shadow_actions_events = utils.get_table(
        migrate_engine, api._SHADOW_TABLE_PREFIX + 'instance_actions_events')
    shadow_actions_events.create_column(host.copy())
    shadow_actions_events.create_column(details.copy())
def upgrade(migrate_engine):
    actions_events = utils.get_table(migrate_engine, 'instance_actions_events')
    host = Column('host', String(255))
    details = Column('details', Text)
    actions_events.create_column(host)
    actions_events.create_column(details)
    shadow_actions_events = utils.get_table(migrate_engine,
            api._SHADOW_TABLE_PREFIX + 'instance_actions_events')
    shadow_actions_events.create_column(host.copy())
    shadow_actions_events.create_column(details.copy())
Beispiel #8
0
def upgrade(migrate_engine):
    compute_nodes = utils.get_table(migrate_engine, 'compute_nodes')
    host_ip = Column('host_ip', types.IPAddress())
    supported_instances = Column('supported_instances', Text)
    compute_nodes.create_column(host_ip)
    compute_nodes.create_column(supported_instances)
    shadow_compute_nodes = utils.get_table(
        migrate_engine, db._SHADOW_TABLE_PREFIX + 'compute_nodes')
    host_ip = Column('host_ip', types.IPAddress())
    supported_instances = Column('supported_instances', Text)
    shadow_compute_nodes.create_column(host_ip)
    shadow_compute_nodes.create_column(supported_instances)
def upgrade(migrate_engine):
    compute_nodes = utils.get_table(migrate_engine, 'compute_nodes')
    host_ip = Column('host_ip', types.IPAddress())
    supported_instances = Column('supported_instances', Text)
    compute_nodes.create_column(host_ip)
    compute_nodes.create_column(supported_instances)
    shadow_compute_nodes = utils.get_table(migrate_engine,
            db._SHADOW_TABLE_PREFIX + 'compute_nodes')
    host_ip = Column('host_ip', types.IPAddress())
    supported_instances = Column('supported_instances', Text)
    shadow_compute_nodes.create_column(host_ip)
    shadow_compute_nodes.create_column(supported_instances)
Beispiel #10
0
    def test_change_deleted_column_type_to_id_type_custom(self):
        if 'sqlite' in self.engines:
            table_name = 'test_change_deleted_column_type_to_id_type_custom'
            engine = self.engines['sqlite']
            meta = MetaData()
            meta.bind = engine
            table = Table(table_name, meta,
                          Column('id', Integer, primary_key=True),
                          Column('foo', CustomType),
                          Column('deleted', Boolean))
            table.create()

            self.assertRaises(exception.NovaException,
                              utils.change_deleted_column_type_to_id_type,
                              engine, table_name)

            fooColumn = Column('foo', CustomType())
            utils.change_deleted_column_type_to_id_type(engine,
                                                        table_name,
                                                        foo=fooColumn)

            table = utils.get_table(engine, table_name)
            # NOTE(boris-42): There is no way to check has foo type CustomType.
            #                 but sqlalchemy will set it to NullType.
            self.assertIsInstance(table.c.foo.type, NullType)
            self.assertIsInstance(table.c.deleted.type, Integer)
            table.drop()
Beispiel #11
0
    def test_change_deleted_column_type_to_id_type_custom(self):
        if 'sqlite' in self.engines:
            table_name = 'test_change_deleted_column_type_to_id_type_custom'
            engine = self.engines['sqlite']
            meta = MetaData()
            meta.bind = engine
            table = Table(table_name, meta,
                          Column('id', Integer, primary_key=True),
                          Column('foo', CustomType),
                          Column('deleted', Boolean))
            table.create()

            # reflection of custom types has been fixed upstream
            if SA_VERSION < (0, 9, 0):
                self.assertRaises(exception.NovaException,
                                  utils.change_deleted_column_type_to_id_type,
                                  engine, table_name)

            fooColumn = Column('foo', CustomType())
            utils.change_deleted_column_type_to_id_type(engine, table_name,
                                                        foo=fooColumn)

            table = utils.get_table(engine, table_name)
            # NOTE(boris-42): There is no way to check has foo type CustomType.
            #                 but sqlalchemy will set it to NullType. This has
            #                 been fixed upstream in recent SA versions
            if SA_VERSION < (0, 9, 0):
                self.assertIsInstance(table.c.foo.type, NullType)
            self.assertIsInstance(table.c.deleted.type, Integer)
            table.drop()
    def _check_231(self, engine, data):
        self.assertColumnExists(engine, 'instances', 'ephemeral_key_uuid')

        instances = db_utils.get_table(engine, 'instances')
        self.assertIsInstance(instances.c.ephemeral_key_uuid.type,
                              sqlalchemy.types.String)
        self.assertTrue(db_utils.check_shadow_table(engine, 'instances'))
Beispiel #13
0
    def _check_231(self, engine, data):
        self.assertColumnExists(engine, 'instances', 'ephemeral_key_uuid')

        instances = db_utils.get_table(engine, 'instances')
        self.assertIsInstance(instances.c.ephemeral_key_uuid.type,
                              sqlalchemy.types.String)
        self.assertTrue(db_utils.check_shadow_table(engine, 'instances'))
    def _check_229(self, engine, data):
        self.assertColumnExists(engine, 'compute_nodes', 'extra_resources')

        compute_nodes = db_utils.get_table(engine, 'compute_nodes')
        self.assertTrue(
            isinstance(compute_nodes.c.extra_resources.type,
                       sqlalchemy.types.Text))
Beispiel #15
0
    def test_change_deleted_column_type_to_boolean_type_custom(self):
        if 'sqlite' in self.engines:
            table_name = \
                'test_change_deleted_column_type_to_boolean_type_custom'
            engine = self.engines['sqlite']
            meta = MetaData()
            meta.bind = engine
            table = Table(table_name, meta,
                          Column('id', Integer, primary_key=True),
                          Column('foo', CustomType),
                          Column('deleted', Integer))
            table.create()

            # reflection of custom types has been fixed upstream
            if SA_VERSION < (0, 9, 0):
                self.assertRaises(exception.NovaException,
                                  utils.change_deleted_column_type_to_boolean,
                                  engine, table_name)

            fooColumn = Column('foo', CustomType())
            utils.change_deleted_column_type_to_boolean(engine,
                                                        table_name,
                                                        foo=fooColumn)

            table = utils.get_table(engine, table_name)
            # NOTE(boris-42): There is no way to check has foo type CustomType.
            #                 but sqlalchemy will set it to NullType. This has
            #                 been fixed upstream in recent SA versions.
            if SA_VERSION < (0, 9, 0):
                self.assertIsInstance(table.c.foo.type, NullType)
            self.assertIsInstance(table.c.deleted.type, Boolean)
            table.drop()
Beispiel #16
0
    def test_change_deleted_column_type_to_boolean_type_custom(self):
        if 'sqlite' in self.engines:
            table_name = \
                'test_change_deleted_column_type_to_boolean_type_custom'
            engine = self.engines['sqlite']
            meta = MetaData()
            meta.bind = engine
            table = Table(table_name, meta,
                          Column('id', Integer, primary_key=True),
                          Column('foo', CustomType),
                          Column('deleted', Integer))
            table.create()

            self.assertRaises(exception.NovaException,
                              utils.change_deleted_column_type_to_boolean,
                              engine, table_name)

            fooColumn = Column('foo', CustomType())
            utils.change_deleted_column_type_to_boolean(engine, table_name,
                                                        foo=fooColumn)

            table = utils.get_table(engine, table_name)
            # NOTE(boris-42): There is no way to check has foo type CustomType.
            #                 but sqlalchemy will set it to NullType.
            self.assertIsInstance(table.c.foo.type, NullType)
            self.assertIsInstance(table.c.deleted.type, Boolean)
            table.drop()
def upgrade(migrate_engine):
    meta = MetaData(bind=migrate_engine)

    pci_devices_uc_name = 'uniq_pci_devices0compute_node_id0address0deleted'
    pci_devices = Table('pci_devices', meta,
                        Column('created_at', DateTime(timezone=False)),
                        Column('updated_at', DateTime(timezone=False)),
                        Column('deleted_at', DateTime(timezone=False)),
                        Column('deleted', Integer, default=0, nullable=False),
                        Column('id', Integer, primary_key=True),
                        Column('compute_node_id', Integer, nullable=False),
                        Column('address', String(12), nullable=False),
                        Column('product_id', String(4)),
                        Column('vendor_id', String(4)),
                        Column('dev_type', String(8)),
                        Column('dev_id', String(255)),
                        Column('label', String(255), nullable=False),
                        Column('status', String(36), nullable=False),
                        Column('extra_info', Text, nullable=True),
                        Column('instance_uuid', String(36), nullable=True),
                        Index('ix_pci_devices_compute_node_id_deleted',
                              'compute_node_id', 'deleted'),
                        Index('ix_pci_devices_instance_uuid_deleted',
                              'instance_uuid', 'deleted'),
                        UniqueConstraint('compute_node_id',
                                         'address', 'deleted',
                                         name=pci_devices_uc_name),
                        mysql_engine='InnoDB',
                        mysql_charset='utf8')

    try:
        pci_devices.create()
        utils.create_shadow_table(migrate_engine, table=pci_devices)
    except Exception:
        LOG.exception(_("Exception while creating table 'pci_devices'."))
        raise

    try:
        compute_nodes = utils.get_table(migrate_engine, 'compute_nodes')
        pci_stats = Column('pci_stats', Text, nullable=True)
        compute_nodes.create_column(pci_stats)
        shadow_compute_nodes = utils.get_table(
            migrate_engine, api._SHADOW_TABLE_PREFIX + 'compute_nodes')
        shadow_compute_nodes.create_column(pci_stats.copy())
    except Exception:
        LOG.exception(_("Exception for adding pci stats to compute node."))
        raise
 def downgrade_pci_device_table(self, migrate_engine):
     LOG.audit(_("downgrading pci_devices table"))
     meta = MetaData(bind=migrate_engine)
     pci_devices = utils.get_table(migrate_engine, 'pci_devices')
     pci_devices.drop_column('bus')
     pci_devices.drop_column('slot')
     pci_devices.drop_column('function')
     pci_devices.drop_column('workload')
Beispiel #19
0
    def upgrade_instance_table(self, migrate_engine):
        LOG.audit(_("upgrading instance table"))
        meta = MetaData(bind=migrate_engine)
        instances = utils.get_table(migrate_engine, 'instances')
        workload_type = Column('workload_type', String(100), nullable=True)
	policy = Column('policy', Integer(11), nullable=True)
        instances.create_column(workload_type)
	instances.create_column(policy)
    def _post_downgrade_006(self, engine):
        ifs = db_utils.get_table(engine, 'bm_interfaces')
        rows = ifs.select().where(ifs.c.bm_node_id == 1).execute().fetchall()
        self.assertEqual(len(rows), 1)
        self.assertEqual(rows[0]['address'], 'bb:bb:bb:bb:bb:bb')

        rows = ifs.select().where(ifs.c.bm_node_id == 2).execute().fetchall()
        self.assertEqual(len(rows), 0)
Beispiel #21
0
    def _post_downgrade_006(self, engine):
        ifs = db_utils.get_table(engine, 'bm_interfaces')
        rows = ifs.select().where(ifs.c.bm_node_id == 1).execute().fetchall()
        self.assertEqual(len(rows), 1)
        self.assertEqual(rows[0]['address'], 'bb:bb:bb:bb:bb:bb')

        rows = ifs.select().where(ifs.c.bm_node_id == 2).execute().fetchall()
        self.assertEqual(len(rows), 0)
Beispiel #22
0
    def _check_233(self, engine, data):
        self.assertColumnExists(engine, 'compute_nodes', 'stats')

        compute_nodes = db_utils.get_table(engine, 'compute_nodes')
        self.assertIsInstance(compute_nodes.c.stats.type,
                              sqlalchemy.types.Text)

        self.assertRaises(sqlalchemy.exc.NoSuchTableError, db_utils.get_table,
                          engine, 'compute_node_stats')
    def _check_233(self, engine, data):
        self.assertColumnExists(engine, 'compute_nodes', 'stats')

        compute_nodes = db_utils.get_table(engine, 'compute_nodes')
        self.assertIsInstance(compute_nodes.c.stats.type,
                              sqlalchemy.types.Text)

        self.assertRaises(sqlalchemy.exc.NoSuchTableError, db_utils.get_table,
                          engine, 'compute_node_stats')
Beispiel #24
0
 def _pre_upgrade_006(self, engine):
     nodes = db_utils.get_table(engine, 'bm_nodes')
     ifs = db_utils.get_table(engine, 'bm_interfaces')
     # node 1 has two different addresses in bm_nodes and bm_interfaces
     engine.execute(nodes.insert(),
                    [{'id': 1,
                      'prov_mac_address': 'aa:aa:aa:aa:aa:aa'}])
     engine.execute(ifs.insert(),
                    [{'id': 101,
                      'bm_node_id': 1,
                      'address': 'bb:bb:bb:bb:bb:bb'}])
     # node 2 has one same address both in bm_nodes and bm_interfaces
     engine.execute(nodes.insert(),
                    [{'id': 2,
                      'prov_mac_address': 'cc:cc:cc:cc:cc:cc'}])
     engine.execute(ifs.insert(),
                    [{'id': 201,
                      'bm_node_id': 2,
                      'address': 'cc:cc:cc:cc:cc:cc'}])
Beispiel #25
0
 def _pre_upgrade_006(self, engine):
     nodes = db_utils.get_table(engine, 'bm_nodes')
     ifs = db_utils.get_table(engine, 'bm_interfaces')
     # node 1 has two different addresses in bm_nodes and bm_interfaces
     engine.execute(nodes.insert(),
                    [{'id': 1,
                      'prov_mac_address': 'aa:aa:aa:aa:aa:aa'}])
     engine.execute(ifs.insert(),
                    [{'id': 101,
                      'bm_node_id': 1,
                      'address': 'bb:bb:bb:bb:bb:bb'}])
     # node 2 has one same address both in bm_nodes and bm_interfaces
     engine.execute(nodes.insert(),
                    [{'id': 2,
                      'prov_mac_address': 'cc:cc:cc:cc:cc:cc'}])
     engine.execute(ifs.insert(),
                    [{'id': 201,
                      'bm_node_id': 2,
                      'address': 'cc:cc:cc:cc:cc:cc'}])
Beispiel #26
0
    def _check_010(self, engine, data):
        bm_nodes = db_utils.get_table(engine, 'bm_nodes')
        self.assertIn('preserve_ephemeral', bm_nodes.columns)

        default = engine.execute(
            sqlalchemy.select([bm_nodes.c.preserve_ephemeral])
                      .where(bm_nodes.c.id == data[0]['id'])
        ).scalar()
        self.assertEqual(default, False)

        bm_nodes.delete().where(bm_nodes.c.id == data[0]['id']).execute()
Beispiel #27
0
    def _check_230(self, engine, data):
        for table_name in ['instance_actions_events',
                           'shadow_instance_actions_events']:
            self.assertColumnExists(engine, table_name, 'host')
            self.assertColumnExists(engine, table_name, 'details')

        action_events = db_utils.get_table(engine, 'instance_actions_events')
        self.assertIsInstance(action_events.c.host.type,
                              sqlalchemy.types.String)
        self.assertIsInstance(action_events.c.details.type,
                              sqlalchemy.types.Text)
    def _check_010(self, engine, data):
        bm_nodes = db_utils.get_table(engine, 'bm_nodes')
        self.assertIn('preserve_ephemeral', bm_nodes.columns)

        default = engine.execute(
            sqlalchemy.select([
                bm_nodes.c.preserve_ephemeral
            ]).where(bm_nodes.c.id == data[0]['id'])).scalar()
        self.assertEqual(default, False)

        bm_nodes.delete().where(bm_nodes.c.id == data[0]['id']).execute()
Beispiel #29
0
    def _check_230(self, engine, data):
        for table_name in ['instance_actions_events',
                           'shadow_instance_actions_events']:
            self.assertColumnExists(engine, table_name, 'host')
            self.assertColumnExists(engine, table_name, 'details')

        action_events = db_utils.get_table(engine, 'instance_actions_events')
        self.assertIsInstance(action_events.c.host.type,
                              sqlalchemy.types.String)
        self.assertIsInstance(action_events.c.details.type,
                              sqlalchemy.types.Text)
Beispiel #30
0
    def assertIndexMembers(self, engine, table, index, members):
        self.assertIndexExists(engine, table, index)

        t = db_utils.get_table(engine, table)
        index_columns = None
        for idx in t.indexes:
            if idx.name == index:
                index_columns = idx.columns.keys()
                break

        self.assertEqual(sorted(members), sorted(index_columns))
    def assertIndexMembers(self, engine, table, index, members):
        self.assertIndexExists(engine, table, index)

        t = db_utils.get_table(engine, table)
        index_columns = None
        for idx in t.indexes:
            if idx.name == index:
                index_columns = idx.columns.keys()
                break

        self.assertEqual(sorted(members), sorted(index_columns))
 def upgrade_pci_device_table(self, migrate_engine):
     LOG.audit(_("upgrading pci_devices table"))
     meta = MetaData(bind=migrate_engine)
     pci_devices = utils.get_table(migrate_engine, 'pci_devices')
     bus = Column('bus', String(2), nullable=False)
     slot = Column('slot', String(2), nullable=False)
     function = Column('function', String(2), nullable=False)
     workload = Column('workload', String(2), nullable=True)
     pci_devices.create_column(bus)
     pci_devices.create_column(slot)
     pci_devices.create_column(function)
     pci_devices.create_column(workload)
Beispiel #33
0
    def test_change_deleted_column_type_to_id_type_string(self):
        table_name = "test_change_deleted_column_type_to_id_type_string"
        for key, engine in self.engines.items():
            meta = MetaData()
            meta.bind = engine
            table = Table(table_name, meta, Column("id", String(255), primary_key=True), Column("deleted", Boolean))
            table.create()
            utils.change_deleted_column_type_to_id_type(engine, table_name)

            table = utils.get_table(engine, table_name)
            self.assertIsInstance(table.c.deleted.type, String)
            table.drop()
 def _pre_upgrade_002(self, engine):
     data = [{
         'id': 1,
         'key': 'fake-key',
         'image_path': '/dev/null',
         'pxe_config_path': '/dev/null/',
         'root_mb': 0,
         'swap_mb': 0
     }]
     table = db_utils.get_table(engine, 'bm_deployments')
     engine.execute(table.insert(), data)
     return data
Beispiel #35
0
 def _pre_upgrade_002(self, engine):
     data = [{
         'id': 1,
         'key': 'fake-key',
         'image_path': '/dev/null',
         'pxe_config_path': '/dev/null/',
         'root_mb': 0,
         'swap_mb': 0
     }]
     table = db_utils.get_table(engine, 'bm_deployments')
     engine.execute(table.insert(), data)
     return data
def downgrade(migrate_engine):
    quota_usages = utils.get_table(migrate_engine, 'quota_usages')
    quota_usages.drop_column('user_id')
    shadow_quota_usages = utils.get_table(migrate_engine,
                                db._SHADOW_TABLE_PREFIX + 'quota_usages')
    shadow_quota_usages.drop_column('user_id')

    reservations = utils.get_table(migrate_engine, 'reservations')
    reservations.drop_column('user_id')
    shadow_reservations = utils.get_table(migrate_engine,
                                db._SHADOW_TABLE_PREFIX + 'reservations')
    shadow_reservations.drop_column('user_id')

    project_user_quotas = utils.get_table(migrate_engine,
                                          'project_user_quotas')
    try:
        project_user_quotas.drop()
    except Exception:
        LOG.error(_("project_user_quotas table not dropped"))
        raise

    shadow_table_name = db._SHADOW_TABLE_PREFIX + 'project_user_quotas'
    shadow_table = utils.get_table(migrate_engine, shadow_table_name)
    try:
        shadow_table.drop()
    except Exception:
        LOG.error(_("%s table not dropped") % shadow_table_name)
        raise
Beispiel #37
0
def downgrade(migrate_engine):
    quota_usages = utils.get_table(migrate_engine, 'quota_usages')
    quota_usages.drop_column('user_id')
    shadow_quota_usages = utils.get_table(
        migrate_engine, db._SHADOW_TABLE_PREFIX + 'quota_usages')
    shadow_quota_usages.drop_column('user_id')

    reservations = utils.get_table(migrate_engine, 'reservations')
    reservations.drop_column('user_id')
    shadow_reservations = utils.get_table(
        migrate_engine, db._SHADOW_TABLE_PREFIX + 'reservations')
    shadow_reservations.drop_column('user_id')

    project_user_quotas = utils.get_table(migrate_engine,
                                          'project_user_quotas')
    try:
        project_user_quotas.drop()
    except Exception:
        LOG.error(_("project_user_quotas table not dropped"))
        raise

    shadow_table_name = db._SHADOW_TABLE_PREFIX + 'project_user_quotas'
    shadow_table = utils.get_table(migrate_engine, shadow_table_name)
    try:
        shadow_table.drop()
    except Exception:
        LOG.error(_("%s table not dropped") % shadow_table_name)
        raise
    def test_change_deleted_column_type_to_id_type_integer(self):
        table_name = 'abc'
        for key, engine in self.engines.items():
            meta = MetaData()
            meta.bind = engine
            table = Table(table_name, meta,
                          Column('id', Integer, primary_key=True),
                          Column('deleted', Boolean))
            table.create()
            utils.change_deleted_column_type_to_id_type(engine, table_name)

            table = utils.get_table(engine, table_name)
            self.assertTrue(isinstance(table.c.deleted.type, Integer))
Beispiel #39
0
    def test_change_deleted_column_type_to_id_type_string(self):
        table_name = 'abc'
        for key, engine in self.engines.items():
            meta = MetaData()
            meta.bind = engine
            table = Table(table_name, meta,
                          Column('id', String(255), primary_key=True),
                          Column('deleted', Boolean))
            table.create()
            utils.change_deleted_column_type_to_id_type(engine, table_name)

            table = utils.get_table(engine, table_name)
            self.assertTrue(isinstance(table.c.deleted.type, String))
 def _check_006(self, engine, data):
     ifs = db_utils.get_table(engine, 'bm_interfaces')
     rows = ifs.select().\
                 where(ifs.c.bm_node_id == 1).\
                 execute().\
                 fetchall()
     self.assertEqual(len(rows), 2)
     rows = ifs.select().\
                 where(ifs.c.bm_node_id == 2).\
                 execute().\
                 fetchall()
     self.assertEqual(len(rows), 1)
     self.assertEqual(rows[0]['address'], 'cc:cc:cc:cc:cc:cc')
def downgrade(migrate_engine):
    meta = MetaData(bind=migrate_engine)

    try:
        pci_device = Table('pci_devices', meta, autoload=True)
        pci_device.drop()
        shadow_pci_device = Table(api._SHADOW_TABLE_PREFIX + 'pci_devices',
                                  meta, autoload=True)
        shadow_pci_device.drop()
    except Exception:
        LOG.exception(_("Exception while dropping 'pci_devices' tables."))
        raise

    try:
        compute_nodes = utils.get_table(migrate_engine, 'compute_nodes')
        compute_nodes.drop_column('pci_stats')
        shadow_compute_nodes = utils.get_table(
            migrate_engine, api._SHADOW_TABLE_PREFIX + 'compute_nodes')
        shadow_compute_nodes.drop_column('pci_stats')
    except Exception:
        LOG.exception(_("Exception for dropping pci stats from compute node."))
        raise
Beispiel #42
0
 def _check_006(self, engine, data):
     ifs = db_utils.get_table(engine, 'bm_interfaces')
     rows = ifs.select().\
                 where(ifs.c.bm_node_id == 1).\
                 execute().\
                 fetchall()
     self.assertEqual(len(rows), 2)
     rows = ifs.select().\
                 where(ifs.c.bm_node_id == 2).\
                 execute().\
                 fetchall()
     self.assertEqual(len(rows), 1)
     self.assertEqual(rows[0]['address'], 'cc:cc:cc:cc:cc:cc')
Beispiel #43
0
    def test_change_deleted_column_type_to_boolean(self):
        table_name = "test_change_deleted_column_type_to_boolean"
        for key, engine in self.engines.items():
            meta = MetaData()
            meta.bind = engine
            table = Table(table_name, meta, Column("id", Integer, primary_key=True), Column("deleted", Integer))
            table.create()

            utils.change_deleted_column_type_to_boolean(engine, table_name)

            table = utils.get_table(engine, table_name)
            expected_type = Boolean if key != "mysql" else mysql.TINYINT
            self.assertIsInstance(table.c.deleted.type, expected_type)
            table.drop()
Beispiel #44
0
    def _check_227(self, engine, data):
        table = db_utils.get_table(engine, 'project_user_quotas')

        # Insert fake_quotas with the longest resource name.
        fake_quotas = {'id': 5,
                       'project_id': 'fake_project',
                       'user_id': 'fake_user',
                       'resource': 'injected_file_content_bytes',
                       'hard_limit': 10}
        table.insert().execute(fake_quotas)

        # Check we can get the longest resource name.
        quota = table.select(table.c.id == 5).execute().first()
        self.assertEqual(quota['resource'], 'injected_file_content_bytes')
Beispiel #45
0
    def test_change_deleted_column_type_to_id_type_integer(self):
        table_name = 'test_change_deleted_column_type_to_id_type_integer'
        for key, engine in self.engines.items():
            meta = MetaData()
            meta.bind = engine
            table = Table(table_name, meta,
                          Column('id', Integer, primary_key=True),
                          Column('deleted', Boolean))
            table.create()
            utils.change_deleted_column_type_to_id_type(engine, table_name)

            table = utils.get_table(engine, table_name)
            self.assertIsInstance(table.c.deleted.type, Integer)
            table.drop()
Beispiel #46
0
    def _check_227(self, engine, data):
        table = db_utils.get_table(engine, 'project_user_quotas')

        # Insert fake_quotas with the longest resource name.
        fake_quotas = {'id': 5,
                       'project_id': 'fake_project',
                       'user_id': 'fake_user',
                       'resource': 'injected_file_content_bytes',
                       'hard_limit': 10}
        table.insert().execute(fake_quotas)

        # Check we can get the longest resource name.
        quota = table.select(table.c.id == 5).execute().first()
        self.assertEqual(quota['resource'], 'injected_file_content_bytes')
Beispiel #47
0
    def test_change_deleted_column_type_to_boolean(self):
        table_name = 'abc'
        for key, engine in self.engines.items():
            meta = MetaData()
            meta.bind = engine
            table = Table(table_name, meta,
                          Column('id', Integer, primary_key=True),
                          Column('deleted', Integer))
            table.create()

            utils.change_deleted_column_type_to_boolean(engine, table_name)

            table = utils.get_table(engine, table_name)
            expected_type = Boolean if key != "mysql" else mysql.TINYINT
            self.assertTrue(isinstance(table.c.deleted.type, expected_type))
def fuzzify(engine, config):
    """Do the actual fuzzification based on the loaded attributes of
       the models."""
    Session = sessionmaker(bind=engine)
    session = Session()
    metadata = MetaData(bind=engine, reflect=True)
    cascade_fkeys(metadata)

    for model_name, columns in config.items():
        table_name = getattr(models, model_name).__tablename__
        tables = [getattr(models, model_name)]
        if 'shadow_' + table_name in metadata.tables.keys():
            tables.append(utils.get_table(engine, 'shadow_' + table_name))
        for table in tables:
            q = session.query(table)
            for row in q.all():
                for column, column_type in columns:
                    setattr(row, column,
                            randomness(getattr(row, column), column_type))

    session.commit()
    cascade_fkeys(metadata, restore=True)
Beispiel #49
0
def downgrade(migrate_engine):
    quota_usages = utils.get_table(migrate_engine, 'quota_usages')
    reservations = utils.get_table(migrate_engine, 'reservations')

    if migrate_engine.name == 'mysql' or migrate_engine.name == 'postgresql':
        # Remove the indexes first
        indexes = [
            Index('ix_quota_usages_user_id_deleted',
                quota_usages.c.user_id, quota_usages.c.deleted),
            Index('ix_reservations_user_id_deleted',
                reservations.c.user_id, reservations.c.deleted)
        ]
        for index in indexes:
            index.drop(migrate_engine)

    quota_usages.drop_column('user_id')
    shadow_quota_usages = utils.get_table(migrate_engine,
                                db._SHADOW_TABLE_PREFIX + 'quota_usages')
    shadow_quota_usages.drop_column('user_id')

    reservations.drop_column('user_id')
    shadow_reservations = utils.get_table(migrate_engine,
                                db._SHADOW_TABLE_PREFIX + 'reservations')
    shadow_reservations.drop_column('user_id')

    project_user_quotas = utils.get_table(migrate_engine,
                                          'project_user_quotas')
    try:
        project_user_quotas.drop()
    except Exception:
        LOG.error(_("project_user_quotas table not dropped"))
        raise

    shadow_table_name = db._SHADOW_TABLE_PREFIX + 'project_user_quotas'
    shadow_table = utils.get_table(migrate_engine, shadow_table_name)
    try:
        shadow_table.drop()
    except Exception:
        LOG.error(_("%s table not dropped") % shadow_table_name)
        raise
 def _post_downgrade_010(self, engine):
     bm_nodes = db_utils.get_table(engine, 'bm_nodes')
     self.assertNotIn('preserve_ephemeral', bm_nodes.columns)
 def _post_downgrade_008(self, engine):
     db_utils.get_table(engine, 'bm_pxe_ips')
    def _pre_upgrade_010(self, engine):
        bm_nodes = db_utils.get_table(engine, 'bm_nodes')
        data = [{'id': 10, 'prov_mac_address': 'cc:cc:cc:cc:cc:cc'}]
        engine.execute(bm_nodes.insert(), data)

        return data
 def _check_007(self, engine, data):
     bm_nodes = db_utils.get_table(engine, 'bm_nodes')
     columns = [c.name for c in bm_nodes.columns]
     self.assertNotIn(u'prov_mac_address', columns)
Beispiel #54
0
    def _pre_upgrade_010(self, engine):
        bm_nodes = db_utils.get_table(engine, 'bm_nodes')
        data = [{'id': 10, 'prov_mac_address': 'cc:cc:cc:cc:cc:cc'}]
        engine.execute(bm_nodes.insert(), data)

        return data
Beispiel #55
0
 def _post_downgrade_010(self, engine):
     bm_nodes = db_utils.get_table(engine, 'bm_nodes')
     self.assertNotIn('preserve_ephemeral', bm_nodes.columns)
 def _check_005(self, engine, data):
     bm_nodes = db_utils.get_table(engine, 'bm_nodes')
     columns = [c.name for c in bm_nodes.columns]
     self.assertNotIn(u'prov_vlan_id', columns)
     self.assertNotIn(u'registration_status', columns)
 def _post_downgrade_004(self, engine):
     bm_nodes = db_utils.get_table(engine, 'bm_nodes')
     self.assertNotIn(u'instance_name', [c.name for c in bm_nodes.columns])