def setUp(self): super(VolumeActionsTest, self).setUp() self.stubs.Set(volume.API, 'get', fake_volume_api) self.UUID = utils.gen_uuid() for _method in self._methods: self.stubs.Set(volume.API, _method, fake_volume_api) self.stubs.Set(volume.API, 'get', fake_volume_get)
def setUp(self): super(VolumeActionsTest, self).setUp() self.stubs.Set(volume.API, 'get', fake_volume_api) self.UUID = utils.gen_uuid() for _method in self._methods: self.stubs.Set(volume.API, _method, fake_volume_api) self.stubs.Set(volume.API, 'get', fake_volume_get)
def snapshot_create(context, values): snapshot_ref = models.Snapshot() if not values.get('id'): values['id'] = str(utils.gen_uuid()) snapshot_ref.update(values) session = get_session() with session.begin(): snapshot_ref.save(session=session) return snapshot_ref
def volume_create(context, values): values['volume_metadata'] = _metadata_refs(values.get('metadata'), models.VolumeMetadata) volume_ref = models.Volume() if not values.get('id'): values['id'] = str(utils.gen_uuid()) volume_ref.update(values) session = get_session() with session.begin(): volume_ref.save(session=session) return volume_ref
def create(self, context, metadata, data=None): """Store the image data and return the new image id. :raises: Duplicate if the image already exist. """ image_id = str(metadata.get("id", utils.gen_uuid())) metadata["id"] = image_id if image_id in self.images: raise exception.Duplicate() self.images[image_id] = copy.deepcopy(metadata) if data: self._imagedata[image_id] = data.read() return self.images[image_id]
def create(self, context, metadata, data=None): """Store the image data and return the new image id. :raises: Duplicate if the image already exist. """ image_id = str(metadata.get('id', utils.gen_uuid())) metadata['id'] = image_id if image_id in self.images: raise exception.Duplicate() self.images[image_id] = copy.deepcopy(metadata) if data: self._imagedata[image_id] = data.read() return self.images[image_id]
def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine instances = Table('instances', meta, autoload=True) uuid_column = Column("uuid", String(36)) instances.create_column(uuid_column) rows = migrate_engine.execute(instances.select()) for row in rows: instance_uuid = str(utils.gen_uuid()) migrate_engine.execute(instances.update()\ .where(instances.c.id == row[0])\ .values(uuid=instance_uuid))
def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine networks = Table('networks', meta, autoload=True) uuid_column = Column("uuid", String(36)) networks.create_column(uuid_column) rows = migrate_engine.execute(networks.select()) for row in rows: networks_uuid = str(utils.gen_uuid()) migrate_engine.execute(networks.update() .where(networks.c.id == row[0]) .values(uuid=networks_uuid))
def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine virtual_interfaces = Table('virtual_interfaces', meta, autoload=True) uuid_column = Column('uuid', String(36)) virtual_interfaces.create_column(uuid_column) rows = migrate_engine.execute(virtual_interfaces.select()) for row in rows: vif_uuid = str(utils.gen_uuid()) migrate_engine.execute(virtual_interfaces.update() .where(virtual_interfaces.c.id == row[0]) .values(uuid=vif_uuid))
def volume_create(context, values): values['volume_metadata'] = _metadata_refs(values.get('metadata'), models.VolumeMetadata) volume_ref = models.Volume() if not values.get('id'): values['id'] = str(utils.gen_uuid()) volume_ref.update(values) session = get_session() with session.begin(): volume_ref.save(session=session) meta = volume_metadata_get(context, volume_ref.id) volume_ref.metadata = meta result = model_query(context, models.Volume, read_deleted="no").\ options(joinedload('volume_metadata')).\ filter_by(id=volume_ref['id']).first() if not result: raise exception.VolumeNotFound(volume_id=volume_ref['id']) return result
def __init__(self): self.id = utils.gen_uuid() self.transport = FakeTransport()
def get_fake_uuid(token=0): if not token in FAKE_UUIDS: FAKE_UUIDS[token] = str(utils.gen_uuid()) return FAKE_UUIDS[token]
def test_gen_valid_uuid(self): self.assertTrue(uuidutils.is_uuid_like(str(utils.gen_uuid())))
def test_gen_valid_uuid(self): self.assertUUIDLike(str(utils.gen_uuid()), True)
def get_invalid_image(self): return str(utils.gen_uuid())
def generate_request_id(): return 'req-' + str(utils.gen_uuid())
def generate_request_id(): return 'req-' + str(utils.gen_uuid())
def get_invalid_image(self): return str(utils.gen_uuid())
def test_gen_valid_uuid(self): self.assertUUIDLike(str(utils.gen_uuid()), True)
def upgrade(migrate_engine): """Build mapping tables for our volume uuid migration. These mapping tables serve two purposes: 1. Provide a method for downgrade after UUID conversion 2. Provide a uuid to associate with existing volumes and snapshots when we do the actual datatype migration from int to uuid """ meta = MetaData() meta.bind = migrate_engine volume_id_mappings = Table('volume_id_mappings', meta, Column('created_at', DateTime(timezone=False)), Column('updated_at', DateTime(timezone=False)), Column('deleted_at', DateTime(timezone=False)), Column('deleted', Boolean(create_constraint=True, name=None)), Column('id', Integer(), primary_key=True, nullable=False, autoincrement=True), Column('uuid', String(36), nullable=False)) try: volume_id_mappings.create() except Exception: LOG.exception("Exception while creating table 'volume_id_mappings'") meta.drop_all(tables=[volume_id_mappings]) raise snapshot_id_mappings = Table('snapshot_id_mappings', meta, Column('created_at', DateTime(timezone=False)), Column('updated_at', DateTime(timezone=False)), Column('deleted_at', DateTime(timezone=False)), Column('deleted', Boolean(create_constraint=True, name=None)), Column('id', Integer(), primary_key=True, nullable=False, autoincrement=True), Column('uuid', String(36), nullable=False)) try: snapshot_id_mappings.create() except Exception: LOG.exception("Exception while creating table 'snapshot_id_mappings'") meta.drop_all(tables=[snapshot_id_mappings]) raise if migrate_engine.name == "mysql": migrate_engine.execute("ALTER TABLE volume_id_mappings Engine=InnoDB") migrate_engine.execute("ALTER TABLE snapshot_id_mappings " "Engine=InnoDB") volumes = Table('volumes', meta, autoload=True) snapshots = Table('snapshots', meta, autoload=True) volume_id_mappings = Table('volume_id_mappings', meta, autoload=True) snapshot_id_mappings = Table('snapshot_id_mappings', meta, autoload=True) volume_list = list(volumes.select().execute()) for v in volume_list: old_id = v['id'] new_id = utils.gen_uuid() row = volume_id_mappings.insert() row.execute({'id': old_id, 'uuid': str(new_id)}) snapshot_list = list(snapshots.select().execute()) for s in snapshot_list: old_id = s['id'] new_id = utils.gen_uuid() row = snapshot_id_mappings.insert() row.execute({'id': old_id, 'uuid': str(new_id)})
def get_fake_uuid(token=0): if not token in FAKE_UUIDS: FAKE_UUIDS[token] = str(utils.gen_uuid()) return FAKE_UUIDS[token]
def setUp(self): super(VolumeHostAttributeTest, self).setUp() self.stubs.Set(volume.API, 'get', fake_volume_get) self.stubs.Set(volume.API, 'get_all', fake_volume_get_all) self.UUID = utils.gen_uuid()