def test_absent_reference_equals4(self): with model.Session(): ExampleReferenced.create_object('test_cloud1', 'example_referenced_id') ExampleReferenced.create_object('test_cloud2', 'other_referenced_id') object1 = ExampleRef.create_object('test_cloud1', 'example_ref_id', 'example_referenced_id') object2 = ExampleRef.create_object('test_cloud2', 'example_ref_id', 'other_referenced_id') # We have equivalent objects referenced by example_referenced_id and # other_referenced_id this time self.assertTrue(object1.equals(object2))
def discover_one(self, uuid): image_client = clients.image_client(self.cloud) try: raw_image = self.retry( image_client.images.get, uuid, expected_exceptions=[glance_exc.HTTPNotFound]) img = self.load_from_cloud(raw_image) with model.Session() as session: session.store(img) self._populate_members(img, image_client) return img except glance_exc.HTTPNotFound: raise discover.NotFound()
def discover_all(self): volumes = [] volume_client = clients.volume_client(self.cloud) for raw_volume in self.retry(volume_client.volumes.list, search_opts={'all_tenants': True}, returns_iterable=True): try: volumes.append(self.load_from_cloud(raw_volume)) except model.ValidationError as e: LOG.warning('Invalid volume %s in cloud %s: %s', raw_volume.id, self.cloud.name, e) with model.Session() as session: for volume in volumes: session.store(volume)
def run(self, cfg, migration): cloud = cfg.clouds[getattr(migration, self.location)] network_client = clients.network_client(cloud) try: with model.Session() as session: net_obj_id = model.ObjectId(self.network_id, cloud.name) subnet_obj_id = model.ObjectId(self.subnet_id, cloud.name) session.delete(network.Network, object_id=net_obj_id) session.delete(network.Subnet, object_id=subnet_obj_id) clients.retry(network_client.delete_network, self.network_id, expected_exceptions=[neutron_exceptions.NotFound]) except neutron_exceptions.NotFound: pass
def discover_all(self): compute_client = clients.compute_client(self.cloud) avail_hosts = self._list_available_compute_hosts(compute_client) servers = {} # Go through each tenant since nova don't return more items than # specified in osapi_max_limit configuration option (1000 by default) # in single API call for tenant in self._get_tenants(): LOG.debug('Discovering servers from cloud "%s" tenant "%s"', self.cloud.name, tenant.name) tenant_id = tenant.id raw_server_list = self.retry(compute_client.servers.list, search_opts={ 'all_tenants': True, 'tenant_id': tenant_id, }, returns_iterable=True) for raw_server in raw_server_list: host = getattr(raw_server, EXT_ATTR_HOST) if host not in avail_hosts: LOG.warning( 'Skipping server %s in tenant %s, host not ' 'available.', host, tenant.name) continue # Convert server data to model conforming format server = self.load_from_cloud(raw_server) hyper_host = getattr(raw_server, EXT_ATTR_HYPER_HOST) servers.setdefault(hyper_host, []).append(server) # Collect information about ephemeral disks # TODO: work with different servers in parallel for host, host_servers in list(servers.items()): LOG.debug( 'Getting ephemeral disks information from cloud %s ' 'host %s', self.cloud.name, host) with remote.RemoteExecutor(self.cloud, host) as remote_executor: for server in host_servers: _populate_ephemeral_disks(remote_executor, server) # Store data to local database with model.Session() as session: for host_servers in list(servers.values()): for server in host_servers: session.store(server) if _need_image_membership(server): image_member_uuid = image.ImageMember.make_uuid( server.image, server.tenant) server.image_membership = self.find_obj( image.ImageMember, image_member_uuid)
def signature(self): """ Return list of all IDs for each migration """ signature = {} with model.Session() as session: for name, migration in self.config.migrations.items(): query = migration.query src_cloud = self.config.clouds[migration.source] objects = query.search(session, src_cloud) src_ids = [] for src_obj in model.flatten_dependencies(objects): src_ids.append(src_obj.primary_key.id) signature[name] = sorted(src_ids) return signature
def take_action(self, parsed_args): super(Migrate, self).take_action(parsed_args) stage.execute_stage('cloudferry.lib.os.discovery.stages.LinkStage', self.config) with model.Session() as session: migration = self.config.migrations[parsed_args.migration] src_cloud = self.config.clouds[migration.source] objects = migration.query.search(session, src_cloud) graph = taskflow_utils.create_graph_flow( parsed_args.migration, objects, migrate_base.create_migration_flow, self.config, migration) taskflow_utils.execute_flow(graph)
def discover_one(self, uuid): server_id, volume_id = uuid.split(':') compute_client = clients.compute_client(self.cloud) try: raw_attachment = self.retry( compute_client.volumes.get_server_volume, server_id, volume_id, expected_exceptions=[nova_exceptions.NotFound]) attachment = self.load_from_cloud(raw_attachment) with model.Session() as session: session.store(attachment) return attachment except nova_exceptions.NotFound: raise discover.NotFound()
def test_simple_query3(self): q = query.Query( {CLASS_FQN: [ { 'field1': ['a'], }, { 'field2': ['b'], }, ]}) with model.Session() as session: objs = sorted(q.search(session), key=lambda x: x.object_id.id) self.assertEqual(3, len(objs)) self.assertEqual(objs[0].object_id.id, 'id1') self.assertEqual(objs[1].object_id.id, 'id2') self.assertEqual(objs[2].object_id.id, 'id4')
def test_example_name_ref(self): class ExampleNameRef(model.Model): object_id = model.PrimaryKey() ref = model.Dependency(Example.get_class_qualname()) with model.Session() as session: example = Example.load(Example.generate_data('foo-bar-baz')) session.store(example) obj = ExampleNameRef.load({ 'object_id': self._make_id(ExampleNameRef, 'ExampleNameRef-1'), 'ref': self._make_id(Example, 'foo-bar-baz'), }) self.assertIs(Example, obj.ref.get_class())
def migrate(self, source_obj, dst_object, need_restore_deleted, *args, **kwargs): if not need_restore_deleted: return dst_image_id = dst_object.object_id.id with model.Session() as session: boot_disk_infos = self._get_boot_disk_locations( session, source_obj) for boot_disk_info in boot_disk_infos: if self.upload_server_image(boot_disk_info, dst_image_id, source_obj): return raise base.AbortMigration( 'Unable to restore deleted image %s: no servers found', dst_image_id)
def execute(self): """ Execute migrated objects search. """ with model.Session() as session: for migration in self.config.migrations.values(): query = migration.query src_cloud = self.config.clouds[migration.source] dst_cloud = self.config.clouds[migration.destination] objects = query.search(session, src_cloud) for src_obj in model.flatten_dependencies(objects): for dst_obj in session.list(src_obj.get_class(), dst_cloud): if src_obj.equals(dst_obj): src_obj.link_to(dst_obj) break
def discover_all(self): volume_client = clients.volume_client(self.cloud) raw_volumes = self.retry(volume_client.volumes.list, search_opts={'all_tenants': True}, returns_iterable=True) attachments = [] for raw_volume in raw_volumes: for raw_attachment in raw_volume.attachments: try: attachment = self.load_from_cloud(raw_attachment) attachments.append(attachment) except model.ValidationError as e: LOG.warning('Invalid attachment %s in cloud %s: %s', raw_attachment['id'], self.cloud.name, e) with model.Session() as session: for attachment in attachments: session.store(attachment)
def migrate(self, *args, **kwargs): with model.Session() as session: image_id = self._find_supported_cirros_image(session) if image_id is None: try: img = self._upload_cirros_image(session) except clients.Timeout: raise base.AbortMigration( 'Failed to upload transfer VM image') image_obj = self.load_from_cloud(image.Image, self.cloud, img) session.store(image_obj) image_id = img.id self.destructor = ImageDestructor(self.location, image_id) return { self.var_name: image_id, self.destructor_var: self.destructor }
def discover_all(self): images = [] image_client = clients.image_client(self.cloud) raw_images = self.retry(image_client.images.list, filters={ 'is_public': None, 'status': 'active' }, returns_iterable=True) for raw_image in raw_images: try: images.append(self.load_from_cloud(raw_image)) except model.ValidationError as e: LOG.warning('Invalid image %s in cloud %s: %s', raw_image.id, self.cloud.name, e) with model.Session() as session: for img in images: session.store(img) for img in images: self._populate_members(img, image_client)
def discover_one(self, uuid): hostname = uuid with remote.RemoteExecutor(self.cloud, hostname) as remote_executor: try: ip_addr_output = remote_executor.sudo('ip addr show') interfaces = _parse_interfaces(ip_addr_output) except remote.RemoteFailure: LOG.warn('Unable to get network interfaces of node: %s', hostname) LOG.debug('Unable to get network interfaces of node: %s', hostname, exc_info=True) interfaces = {} # Store server with model.Session() as session: compute_node = self.load_from_cloud({ 'hostname': hostname, 'interfaces': interfaces, }) session.store(compute_node) return compute_node
def __init__(self, cfg, migration, obj, location): super(SetUnlimitedQuotas, self).__init__(cfg, migration, obj, location) self.obj_tenant_id = None with model.Session() as session: self.admin_tenant_id = _get_admin_tenant_id(self.cloud, session)
def test_jmespath_query(self): q = query.Query({CLASS_FQN: ['[? field1 == `b` && field2 == `a` ]']}) with model.Session() as session: objs = sorted(q.search(session), key=lambda x: x.object_id.id) self.assertEqual(1, len(objs)) self.assertEqual(objs[0].object_id.id, 'id3')
def discover_all(self): with cloud_db.connection(self.cloud.keystone_db) as ks_db: with model.Session() as session: for user_id, tenant_id, role_id in self._iterate_roles(ks_db): raw_obj = self._make_obj(user_id, tenant_id, role_id) session.store(self.load_from_cloud(raw_obj, no_check=True))
def discover_all(self): raw_objs = self.retry(self._manager.list, returns_iterable=True) with model.Session() as session: for raw_obj in raw_objs: session.store(self.load_from_cloud(raw_obj))