def test_many_nested(self): class ExampleMany(model.Model): class Schema(model.Schema): object_id = model.PrimaryKey() many = model.Nested(Simple, many=True) many = ExampleMany.load_from_cloud( self.cloud, { 'object_id': 'foo', 'many': [ { 'foo': 'foo' }, { 'foo': 'bar' }, { 'foo': 'baz' }, ], }) self.assertEqual('foo', many.many[0].foo) self.assertEqual('bar', many.many[1].foo) self.assertEqual('baz', many.many[2].foo) with model.Session() as session: session.store(many) with model.Session() as session: obj = session.retrieve(ExampleMany, model.ObjectId('foo', 'test_cloud')) self.assertEqual('foo', obj.many[0].foo) self.assertEqual('bar', obj.many[1].foo) self.assertEqual('baz', obj.many[2].foo)
def invalidate(self, old_signature, new_signature, force=False): """ Remove data related to any cloud that changed signature. """ if force: with model.Session() as session: session.delete() return self.missing_clouds = [] # Create set of cloud names that which data is not valid anymore old_clouds = set(old_signature.keys()) invalid_clouds = old_clouds.difference(new_signature.keys()) for name, signature in new_signature.items(): if name not in old_signature: self.missing_clouds.append(name) continue if old_signature[name] != signature: self.missing_clouds.append(name) invalid_clouds.add(name) with model.Session() as session: for cloud in invalid_clouds: session.delete(cloud=cloud)
def test_store_list(self): data = Example.generate_cloud_data() orig_obj = Example.load_from_cloud(self.cloud, data) object_id = orig_obj.object_id with model.Session() as session: session.store(orig_obj) # Validate retrieve working before commit self._validate_example_obj(object_id, session.list(Example)[0]) with model.Session() as session: # Validate retrieve working after commit self._validate_example_obj(object_id, session.list(Example)[0])
def test_load_store(self): data = Example.generate_cloud_data() orig_obj = Example.load_from_cloud(self.cloud, data) object_id = orig_obj.object_id with model.Session() as session: session.store(orig_obj) with model.Session() as session: obj = session.retrieve(Example, object_id) self._validate_example_obj(object_id, obj) obj.baz.foo = 'changed' obj.bar = 'changed too' with model.Session() as session: loaded_obj = session.retrieve(Example, object_id) self.assertEqual('changed', loaded_obj.baz.foo) self.assertEqual('changed too', loaded_obj.bar)
def setUp(self): super(StageTestCase, self).setUp() self.cloud = mock.MagicMock() self.cloud.name = 'test_cloud' self.obj1 = TestMode.load_from_cloud(self.cloud, { 'id': 'id1', 'field1': 'a', 'field2': 'a', }) self.obj2 = TestMode.load_from_cloud(self.cloud, { 'id': 'id2', 'field1': 'a', 'field2': 'b', }) self.obj3 = TestMode.load_from_cloud(self.cloud, { 'id': 'id3', 'field1': 'b', 'field2': 'a', }) self.obj4 = TestMode.load_from_cloud(self.cloud, { 'id': 'id4', 'field1': 'b', 'field2': 'b', }) with model.Session() as s: s.store(self.obj1) s.store(self.obj2) s.store(self.obj3) s.store(self.obj4)
def show_largest_servers(cfg, count, migration_name): def server_size(server): size = 0 if server.image is not None: size += server.image.size for ephemeral_disk in server.ephemeral_disks: size += ephemeral_disk.size for volume in server.attached_volumes: size += volume.size return size output = [] migration = cfg.migrations[migration_name] with model.Session() as session: for index, server in enumerate( heapq.nlargest( count, migration.query.search(session, migration.source, nova.Server), key=server_size), start=1): output.append( ' {0}. {1.object_id.id} {1.name} - {2}'.format( index, server, sizeof_format.sizeof_fmt(server_size(server)))) if output: print '\n{0} largest servers:'.format(len(output)) for line in output: print line
def discover(cls, cloud): volume_client = cloud.volume_client() volumes_list = volume_client.volumes.list( search_opts={'all_tenants': True}) with model.Session() as session: for raw_volume in volumes_list: volume = Volume.load_from_cloud(cloud, raw_volume) session.store(volume)
def test_jmespath_query(self): q = query.Query({ CLASS_FQN: [ '[? field1 == `b` && field2 == `a` ]' ] }) with model.Session() as session: objs = sorted(q.search(session), key=lambda x: x.object_id.id) self.assertEqual(1, len(objs)) self.assertEqual(objs[0].object_id.id, 'id3')
def test_nested_sessions(self): data = Example.generate_cloud_data() orig_obj1 = Example.load_from_cloud(self.cloud, data) object1_id = orig_obj1.object_id orig_obj2 = Example.load_from_cloud(self.cloud2, data) object2_id = orig_obj2.object_id with model.Session() as s1: s1.store(orig_obj1) with model.Session() as s2: s2.store(orig_obj2) self._validate_example_obj(object1_id, s2.retrieve(Example, object1_id)) self._validate_example_obj(object2_id, s2.retrieve(Example, object2_id)) with model.Session() as s: self._validate_example_obj(object1_id, s.retrieve(Example, object1_id)) self._validate_example_obj(object2_id, s2.retrieve(Example, object2_id))
def show_largest_unused_resources(count, cloud_name, tenant): with model.Session() as session: used_volumes = set() used_images = set() servers = list_filtered(session, nova.Server, cloud_name, tenant) for server in servers: if server.image is not None: used_images.add(server.image.object_id) for volume in server.attached_volumes: used_volumes.add(volume.object_id) # Find unused volumes volumes_output = [] volumes_size = 0 volumes = list_filtered(session, cinder.Volume, cloud_name, tenant) for index, volume in enumerate( heapq.nlargest(count, (v for v in volumes if v.object_id not in used_volumes), key=lambda v: v.size), start=1): volumes_size += volume.size size = sizeof_format.sizeof_fmt(volume.size, 'G') volumes_output.append( ' {0:3d}. {1.object_id.id} {2:10s} {1.name}'.format( index, volume, size)) # Find unused images images_output = [] images_size = 0 images = list_filtered(session, glance.Image, cloud_name, tenant) for index, image in enumerate( heapq.nlargest(count, (i for i in images if i.object_id not in used_images), key=lambda i: i.size), start=1): images_size += image.size size = sizeof_format.sizeof_fmt(image.size) images_output.append( ' {0:3d}. {1.object_id.id} {2:10s} {1.name}'.format( index, image, size)) # Output result if volumes_output: print '\n{0} largest unused volumes:'.format(len(volumes_output)) for line in volumes_output: print line print ' Total:', sizeof_format.sizeof_fmt(volumes_size, 'G') if images_output: print '\n{0} largest unused images:'.format(len(images_output)) for line in images_output: print line print ' Total:', sizeof_format.sizeof_fmt(images_size)
def test_store_list_cloud(self): data = Example.generate_cloud_data() orig_obj1 = Example.load_from_cloud(self.cloud, data) object1_id = orig_obj1.object_id orig_obj2 = Example.load_from_cloud(self.cloud2, data) object2_id = orig_obj2.object_id with model.Session() as session: session.store(orig_obj1) session.store(orig_obj2) # Validate retrieve working before commit self._validate_example_obj(object1_id, session.list(Example, 'test_cloud')[0]) self._validate_example_obj(object2_id, session.list(Example, 'test_cloud2')[0]) # Validate retrieve working after commit with model.Session() as session: self._validate_example_obj(object1_id, session.list(Example, 'test_cloud')[0]) with model.Session() as session: self._validate_example_obj(object2_id, session.list(Example, 'test_cloud2')[0])
def test_simple_query2(self): q = query.Query({ CLASS_FQN: [ { 'field1': ['b'], 'field2': ['b'], } ] }) with model.Session() as session: objs = sorted(q.search(session), key=lambda x: x.object_id.id) self.assertEqual(1, len(objs)) self.assertEqual(objs[0].object_id.id, 'id4')
def test_nested_sessions_save_updates_after_nested(self): data = Example.generate_cloud_data() orig_obj1 = Example.load_from_cloud(self.cloud, data) object1_id = orig_obj1.object_id orig_obj2 = Example.load_from_cloud(self.cloud2, data) object2_id = orig_obj2.object_id with model.Session() as s1: s1.store(orig_obj1) with model.Session() as s2: s2.store(orig_obj2) self._validate_example_obj(object1_id, s2.retrieve(Example, object1_id)) self._validate_example_obj(object2_id, s2.retrieve(Example, object2_id)) orig_obj1.bar = 'some other non-random string' with model.Session() as s: self._validate_example_obj( object1_id, s.retrieve(Example, object1_id), bar_value='some other non-random string') self._validate_example_obj(object2_id, s2.retrieve(Example, object2_id))
def discover(cls, cloud): compute_client = cloud.compute_client() avail_hosts = list_available_compute_hosts(compute_client) with model.Session() as session: servers = [] # Collect servers using API for tenant in session.list(keystone.Tenant, cloud.name): server_list = compute_client.servers.list( search_opts={ 'all_tenants': True, 'tenant_id': tenant.object_id.id, }) for raw_server in server_list: host = getattr(raw_server, HOST) if host not in avail_hosts: LOG.warning('Skipping server %s, host not available.', host) continue # Workaround for grizzly lacking os-extended-volumes overrides = {} if not hasattr(raw_server, VOLUMES_ATTACHED): overrides['attached_volumes'] = [ volume.id for volume in compute_client.volumes. get_server_volumes(raw_server.id) ] try: srv = Server.load_from_cloud(cloud, raw_server, overrides) if srv.image and srv.image.tenant != srv.tenant: srv.image_membership = glance.ImageMember.get( cloud, srv.image.object_id.id, srv.tenant.object_id.id) servers.append(srv) LOG.debug('Discovered: %s', srv) except marshmallow_exc.ValidationError as e: LOG.warning('Server %s ignored: %s', raw_server.id, e) continue # Discover ephemeral volume info using SSH servers.sort(key=lambda s: s.host) for host, host_servers in itertools.groupby(servers, key=lambda s: s.host): with cloud.remote_executor(host, ignore_errors=True) as remote: for srv in host_servers: ephemeral_disks = _list_ephemeral(remote, srv) if ephemeral_disks is not None: srv.ephemeral_disks = ephemeral_disks session.store(srv)
def discover(cls, cloud): image_client = cloud.image_client() with model.Session() as session: for raw_image in image_client.images.list( filters={"is_public": None}): try: image = Image.load_from_cloud(cloud, raw_image) session.store(image) members_list = image_client.image_members.list( image=raw_image) for raw_member in members_list: member = ImageMember.load_from_cloud(cloud, raw_member) session.store(member) image.members.append(member) except exceptions.ValidationError as e: LOG.warning('Invalid image %s: %s', raw_image.id, e)
def estimate_copy(cfg, migration_name): migration = cfg.migrations[migration_name] query = migration.query src_cloud = migration.source with model.Session() as session: total_ephemeral_size = 0 total_volume_size = 0 total_image_size = 0 accounted_volumes = set() accounted_images = set() for server in query.search(session, src_cloud, nova.Server): for ephemeral_disk in server.ephemeral_disks: total_ephemeral_size += ephemeral_disk.size if server.image is not None \ and server.image.object_id not in accounted_images: total_image_size += server.image.size accounted_images.add(server.image.object_id) for volume in server.attached_volumes: if volume.object_id not in accounted_volumes: total_volume_size += volume.size accounted_volumes.add(volume.object_id) for volume in query.search(session, src_cloud, cinder.Volume): if volume.object_id not in accounted_volumes: total_volume_size += volume.size for image in query.search(session, src_cloud, glance.Image): if image.object_id not in accounted_images: total_image_size += image.size print 'Migration', migration_name, 'estimates:' print 'Images:' print ' Size:', sizeof_format.sizeof_fmt(total_image_size) print 'Ephemeral disks:' print ' Size:', sizeof_format.sizeof_fmt(total_ephemeral_size) print 'Volumes:' print ' Size:', sizeof_format.sizeof_fmt(total_volume_size, 'G')
def discover(cls, cloud): identity_client = cloud.identity_client() with model.Session() as session: for tenant in identity_client.tenants.list(): session.store(Tenant.load_from_cloud(cloud, tenant))