def make_resource_config(config, position, cloud_config, resource_name): resource_config = copy.deepcopy(cloud_config) resource_config[resource_name] = utils.ext_dict() for k, v in getattr(config, '%s_%s' % (position, resource_name)).iteritems(): resource_config[resource_name][k] = v return resource_config
def make_cloud_config(config, position): cloud_config = utils.ext_dict(migrate=utils.ext_dict(), cloud=utils.ext_dict(), import_rules=utils.ext_dict(), mail=utils.ext_dict(), snapshot=utils.ext_dict(), mysql=utils.ext_dict()) for k, v in config.migrate.iteritems(): cloud_config['migrate'][k] = v for k, v in getattr(config, position).iteritems(): cloud_config['cloud'][k] = v for k, v in config.import_rules.iteritems(): cloud_config['import_rules'][k] = v for k, v in config.mail.iteritems(): cloud_config['mail'][k] = v for k, v in getattr(config, position + '_mysql').iteritems(): cloud_config['mysql'][k] = v cloud_config['snapshot'].update(config.snapshot) return cloud_config
def setUp(self): super(GetInfoImagesTestCase, self).setUp() self.fake_info = {'images': {'fake_image_id': {'image': 'image_body', 'meta': {}}}} self.fake_image = mock.Mock() self.fake_image.read_info.return_value = self.fake_info self.fake_src_cloud = mock.Mock() self.fake_dst_cloud = mock.Mock() self.fake_config = utils.ext_dict(migrate=utils.ext_dict( {'ignore_empty_images': False})) self.fake_src_cloud.resources = {'image': self.fake_image} self.fake_init = { 'src_cloud': self.fake_src_cloud, 'dst_cloud': self.fake_dst_cloud, 'cfg': self.fake_config }
def setUp(self): super(ConverterVolumeToImageTest, self).setUp() self.fake_src_cloud = mock.Mock() self.fake_storage = mock.Mock() self.fake_storage.deploy = mock.Mock() self.fake_storage.upload_volume_to_image.return_value = ( 'resp', 'image_id') self.fake_storage.get_backend.return_value = 'ceph' self.fake_image = mock.Mock() self.fake_image.wait_for_status = mock.Mock() self.fake_image.read_info = mock.Mock() self.fake_image.read_info.return_value = { 'images': { 'image_id': {'image': 'image_body', 'meta': {}}}} self.fake_image.patch_image = mock.Mock() self.fake_src_cloud.resources = {'storage': self.fake_storage, 'image': self.fake_image} self.fake_volumes_info = { 'volumes': { 'id1': { 'volume': { 'id': 'id1', 'display_name': 'dis1', }, 'meta': { 'image': 'image', }, }}, } self.fake_dst_cloud = mock.Mock() self.fake_config = utils.ext_dict(migrate=utils.ext_dict( {'disk_format': 'qcow', 'container_format': 'bare'})) self.fake_init = { 'src_cloud': self.fake_src_cloud, 'dst_cloud': self.fake_dst_cloud, 'cfg': self.fake_config }
def _action(fake_src_data, fake_dst_data): fake_config = utils.ext_dict( migrate=utils.ext_dict({ 'ssh_connection_attempts': 3, 'key_filename': 'key_filename', }), src=utils.ext_dict({'ssh_user': '******', 'ssh_sudo_password': '******', 'host': SRC_CINDER_HOST, }), dst=utils.ext_dict({'ssh_user': '******', 'ssh_sudo_password': '******', 'host': DST_CINDER_HOST, 'conf': '/etc/cinder.conf', }), src_storage=utils.ext_dict({'conf': '/etc/cinder.conf'}), dst_storage=utils.ext_dict({'conf': '/etc/cinder.conf'}), ) fake_src_cloud = mock.Mock() fake_src_storage = mock.Mock() fake_src_cloud.resources = {'storage': fake_src_storage} fake_dst_cloud = mock.Mock() fake_dst_storage = mock.Mock() fake_dst_storage.read_db_info = \ mock.Mock(return_value=jsondate.dumps(fake_dst_data)) fake_dst_cloud.resources = {'storage': fake_dst_storage} fake_init = { 'src_cloud': fake_src_cloud, 'dst_cloud': fake_dst_cloud, 'cfg': fake_config } action = cinder_database_manipulation.WriteVolumesDb(fake_init) action.dst_mount = get_dst_mount(fake_dst_data) action.mount_dirs = mock.MagicMock(side_effect=mount_dirs) action.find_dir = mock.MagicMock(side_effect=find_dir(fake_dst_data)) action.volume_size = mock.MagicMock(side_effect=volume_size) action.free_space = mock.MagicMock(side_effect=free_space) action.dst_hosts = [ 'dst_cinder', 'dst_cinder@nfs1', 'dst_cinder@nfs2', 'dst_cinder@nfs3', ] action.run_repeat_on_errors = mock.Mock() args = { cinder_database_manipulation.NAMESPACE_CINDER_CONST: jsondate.dumps(fake_src_data) } return action, args
def setUp(self): super(CopyFromGlanceToGlanceTestCase, self).setUp() self.fake_input_info = {'image_data': {'fake_key': 'fake_value'}} self.fake_result_info = {'image_data': { 'image': {'images': [{'image': 'image_body', 'meta': {}}]}}} self.fake_image = mock.Mock() self.fake_image.deploy.return_value = self.fake_result_info self.src_cloud = mock.Mock() self.dst_cloud = mock.Mock() self.dst_cloud.resources = {'image': self.fake_image} self.fake_config = utils.ext_dict(migrate=utils.ext_dict( {'ignore_empty_images': False})) self.src_cloud.resources = {'image': self.fake_image} self.fake_init = { 'src_cloud': self.src_cloud, 'dst_cloud': self.dst_cloud, 'cfg': self.fake_config }
def make_cloud_config(config, position): cloud_config = utils.ext_dict(migrate=utils.ext_dict(), cloud=utils.ext_dict(), import_rules=utils.ext_dict(), mail=utils.ext_dict(), snapshot=utils.ext_dict(), mysql=utils.ext_dict(), rabbit=utils.ext_dict(), storage=utils.ext_dict(), initial_check=utils.ext_dict()) cloud_config['migrate'].update(config.migrate) cloud_config['cloud'].update(getattr(config, position)) cloud_config['import_rules'].update(config.import_rules) cloud_config['mail'].update(config.mail) cloud_config['mysql'].update(getattr(config, position + '_mysql')) cloud_config['rabbit'].update(getattr(config, position + '_rabbit')) cloud_config['snapshot'].update(config.snapshot) cloud_config['storage'].update(getattr(config, position + '_storage')) cloud_config['initial_check'].update(config.initial_check) return cloud_config
def make_cloud_config(config, position): cloud_config = utils.ext_dict(migrate=utils.ext_dict(), cloud=utils.ext_dict(), import_rules=utils.ext_dict(), mail=utils.ext_dict(), mysql=utils.ext_dict()) for k, v in config.migrate.iteritems(): cloud_config['migrate'][k] = v for k, v in getattr(config, position).iteritems(): cloud_config['cloud'][k] = v for k, v in config.import_rules.iteritems(): cloud_config['import_rules'][k] = v for k, v in config.mail.iteritems(): cloud_config['mail'][k] = v for k, v in getattr(config, position + '_mysql').iteritems(): cloud_config['mysql'][k] = v return cloud_config
from cloudferrylib.utils import utils from tests import test FAKE_CONFIG = utils.ext_dict(cloud=utils.ext_dict({ 'user': '******', 'password': '******', 'tenant': 'fake_tenant', 'auth_url': 'http://1.1.1.1:35357/v2.0/', 'region': None, 'service_tenant': 'service' }), migrate=utils.ext_dict({ 'speed_limit': '10MB', 'retry': '7', 'time_wait': 5, 'keep_user_passwords': False, 'overwrite_user_passwords': False, 'migrate_users': True, 'optimize_user_role_fetch': False }), mail=utils.ext_dict({'server': '-'})) @mock.patch("cloudferrylib.base.clients", mock.MagicMock())
from cinderclient.v1 import client as cinder_client from oslotest import mockpatch from cloudferrylib.os.storage import cinder_storage from cloudferrylib.utils import utils from tests import test FAKE_CONFIG = utils.ext_dict( cloud=utils.ext_dict({'user': '******', 'password': '******', 'tenant': 'fake_tenant', 'host': '1.1.1.1'}), migrate=utils.ext_dict({'speed_limit': '10MB', 'retry': '7', 'time_wait': '5', 'keep_volume_storage': False}), mysql=utils.ext_dict({'host': '1.1.1.1'}), storage=utils.ext_dict({'backend': 'ceph', 'rbd_pool': 'volumes', 'volume_name_template': 'volume-', 'host': '1.1.1.1'})) class CinderStorageTestCase(test.TestCase): def setUp(self): super(CinderStorageTestCase, self).setUp() self.mock_client = mock.Mock() self.cs_patch = mockpatch.PatchObject(cinder_client, 'Client', new=self.mock_client) self.useFixture(self.cs_patch)
import mock from oslotest import mockpatch from cloud import cloud from cloud import grouping from cloudferrylib.utils import utils from tests import test RESULT_FILE = 'tests/grouping_result' FILE_NAME = 'tests/groups' FAKE_CONFIG = utils.ext_dict(src=utils.ext_dict({'user': '******', 'password': '******', 'tenant': 'fake_tenant', 'host': '1.1.1.1'}), migrate=utils.ext_dict( {'group_file_path': RESULT_FILE})) class GroupingTestCase(test.TestCase): def setUp(self): super(GroupingTestCase, self).setUp() self.network = mock.Mock() self.compute = mock.Mock() self.identity = mock.Mock() self.fake_tenant1 = mock.Mock() self.fake_tenant1.id = 't1' self.fake_tenant2 = mock.Mock()
from oslotest import mockpatch from cloudferrylib.base import exception from cloudferrylib.os.compute import nova_compute from cloudferrylib.utils import utils from tests import test FAKE_CONFIG = utils.ext_dict( cloud=utils.ext_dict({'user': '******', 'password': '******', 'tenant': 'fake_tenant', 'region': None, 'auth_url': 'http://1.1.1.1:35357/v2.0/', 'cacert': '', 'insecure': False}), mysql=utils.ext_dict({'host': '1.1.1.1'}), migrate=utils.ext_dict({'migrate_quotas': True, 'retry': '7', 'time_wait': 5, 'keep_network_interfaces_order': True, 'keep_usage_quotas_inst': True})) class BaseNovaComputeTestCase(test.TestCase): def setUp(self): super(BaseNovaComputeTestCase, self).setUp() self.mock_client = mock.MagicMock() self.nc_patch = mockpatch.PatchObject(nova_client, 'Client', new=self.mock_client)
import copy import mock from neutronclient.v2_0 import client as neutron_client from oslotest import mockpatch from cloudferrylib.os.network import neutron from cloudferrylib.utils import utils from tests import test FAKE_CONFIG = utils.ext_dict(cloud=utils.ext_dict({ 'user': '******', 'password': '******', 'tenant': 'fake_tenant', 'host': '1.1.1.1', }), migrate=utils.ext_dict({ 'speed_limit': '10MB', 'retry': '7', 'time_wait': '5' })) class NeutronTestCase(test.TestCase): def setUp(self): super(NeutronTestCase, self).setUp() self.neutron_mock_client = mock.MagicMock() self.neutron_client_patch = \ mockpatch.PatchObject(neutron_client,
def _action(fake_src_data, fake_dst_data, fake_deployed_data): fake_config = utils.ext_dict( migrate=utils.ext_dict({ 'ssh_connection_attempts': 3, 'key_filename': 'key_filename', }), src=utils.ext_dict({'ssh_user': '******', 'ssh_sudo_password': '******', 'host': SRC_CINDER_HOST, }), dst=utils.ext_dict({'ssh_user': '******', 'ssh_sudo_password': '******', 'host': DST_CINDER_HOST, 'conf': '/etc/cinder.conf', }), src_storage=utils.ext_dict({'conf': '/etc/cinder.conf'}), dst_storage=utils.ext_dict({'conf': '/etc/cinder.conf'}), ) fake_src_cloud = mock.Mock() fake_src_storage = mock.Mock() fake_src_storage.read_db_info = \ mock.Mock(return_value=fake_src_data) fake_img_res = mock.Mock() fake_src_cloud.migration = { 'image': FakeMigration('image'), 'identity': None, } fake_src_cloud.resources = { 'storage': fake_src_storage, 'image': fake_img_res, } fake_src_images = { 'images': { 'img1': { 'image': { 'id': 'img1', 'name': 'img1_name', 'checksum': 'fake_checksum1', } } } } fake_img_res.read_db_info = \ mock.Mock(return_value=fake_src_images) fake_dst_cloud = mock.Mock() fake_dst_storage = mock.Mock() fake_dst_storage.read_db_info = \ mock.Mock(return_value=fake_dst_data) fake_dst_storage.reread = \ mock.Mock(return_value=fake_deployed_data) fake_dst_storage.deploy = mock.Mock(side_effect=no_modify) fake_dst_img_res = mock.Mock() fake_dst_cloud.resources = { 'storage': fake_dst_storage, 'image': fake_dst_img_res, } fake_dst_images = { 'images': { 'dst_img1': { 'image': { 'id': 'dst_img1', 'name': 'img1_name', 'checksum': 'fake_checksum1', } } } } fake_dst_img_res.read_db_info = \ mock.Mock(return_value=fake_dst_images) fake_init = { 'src_cloud': fake_src_cloud, 'dst_cloud': fake_dst_cloud, 'cfg': fake_config } action = cinder_database_manipulation.WriteVolumesDb(fake_init) action.cp_volumes.dst_mount = get_dst_mount(fake_dst_data) action.cp_volumes.mount_dirs = mock.MagicMock(side_effect=mount_dirs) action.cp_volumes.find_dir = mock.MagicMock( side_effect=find_dir(fake_dst_data)) action.cp_volumes.volume_size = mock.MagicMock(side_effect=volume_size) action.cp_volumes.free_space = mock.MagicMock(side_effect=free_space) action.cp_volumes.dst_volumes = mock.MagicMock(return_value=[]) action.cp_volumes.dst_hosts = [ 'dst_cinder', 'dst_cinder@nfs1', 'dst_cinder@nfs2', 'dst_cinder@nfs3', ] action.cp_volumes.run_repeat_on_errors = mock.Mock() def not_rsync(_, src, dst): return action.cp_volumes.run_rsync(src, dst) action.cp_volumes.rsync_if_enough_space = \ mock.MagicMock(side_effect=not_rsync) return action, { cinder_database_manipulation.NAMESPACE_CINDER_CONST: fake_src_data }
import mock from glanceclient.v1 import client as glance_client from oslotest import mockpatch from cloudferrylib.os.image.glance_image import GlanceImage from cloudferrylib.utils import utils from tests import test FAKE_CONFIG = utils.ext_dict(cloud=utils.ext_dict({'user': '******', 'password': '******', 'tenant': 'fake_tenant', 'region': None, 'host': '1.1.1.1', 'ssh_host': '1.1.1.10', 'ssh_user': '******', 'cacert': '', 'insecure': False }), migrate=utils.ext_dict({'retry': '7', 'time_wait': 5})) class FakeUser(object): def __init__(self): self.name = 'fake_user_name' class GlanceImageTestCase(test.TestCase): def setUp(self):
def tn_name_by_id(uuid, _): for tn in TENANTS: if tn['id'] == uuid: return tn['name'] FAKE_CONFIG = utils.ext_dict( cloud=utils.ext_dict({'user': '******', 'password': '******', 'tenant': 'fake_tenant', 'host': '1.1.1.1', 'auth_url': 'http://1.1.1.1:35357/v2.0/', 'cacert': '', 'insecure': False}), migrate=utils.ext_dict({'speed_limit': '10MB', 'retry': '7', 'time_wait': 5, 'keep_volume_storage': False, 'keep_volume_snapshots': False}), mysql=utils.ext_dict({'db_host': '1.1.1.1'}), storage=utils.ext_dict({'backend': 'ceph', 'rbd_pool': 'volumes', 'volume_name_template': 'volume-', 'host': '1.1.1.1'})) STATUSES = ( AVAILABLE, IN_USE, CREATING, ERROR, DELETING, ERROR_DELETING, ATTACHING, DETACHING, ERROR_ATTACHING, ) = ( 'available', 'in-use', 'creating', 'error', 'deleting', 'error_deleting',
from cloudferrylib.utils import utils from tests import test FAKE_CONFIG = utils.ext_dict(cloud=utils.ext_dict({ 'user': '******', 'password': '******', 'tenant': 'fake_tenant', 'region': None, 'auth_url': 'http://1.1.1.1:35357/v2.0/', 'cacert': '', 'insecure': False }), mysql=utils.ext_dict({'host': '1.1.1.1'}), migrate=utils.ext_dict({ 'migrate_quotas': True, 'retry': '7', 'time_wait': 5, 'keep_network_interfaces_order': True, 'keep_usage_quotas_inst': True, 'override_rules': None })) class BaseNovaComputeTestCase(test.TestCase): def setUp(self):
import mock from novaclient.v1_1 import client as nova_client from oslotest import mockpatch from cloudferrylib.os.network.nova_network import NovaNetwork from cloudferrylib.utils import utils from tests import test FAKE_CONFIG = utils.ext_dict(cloud=utils.ext_dict({ 'user': '******', 'password': '******', 'tenant': 'fake_tenant', 'region': None, 'auth_url': 'http://1.1.1.1:35357/v2.0/', 'cacert': '', 'insecure': False }), migrate=utils.ext_dict({ 'retry': '7', 'time_wait': 5 })) class TestNovaNetwork(test.TestCase): def setUp(self): super(TestNovaNetwork, self).setUp() self.nova_mock_client = mock.MagicMock() self.nova_client_patch = mockpatch.PatchObject(
from oslotest import mockpatch from cloudferrylib.base import exception from cloudferrylib.os.network import neutron from cloudferrylib.utils import utils from tests import test FAKE_CONFIG = utils.ext_dict( cloud=utils.ext_dict({'user': '******', 'password': '******', 'tenant': 'fake_tenant', 'auth_url': 'http://1.1.1.1:35357/v2.0/', 'region': None, 'service_tenant': 'services', 'cacert': '', 'insecure': False}), migrate=utils.ext_dict({'ext_net_map': 'fake_ext_net_map.yaml', 'retry': '7', 'time_wait': 5}), network=utils.ext_dict({ 'get_all_quota': True })) class NeutronTestCase(test.TestCase): def setUp(self): super(NeutronTestCase, self).setUp() self.neutron_mock_client = mock.MagicMock()
def deploy(self, info): LOG.info("Glance images deployment started...") info = copy.deepcopy(info) new_info = {'images': {}} created_images = [] delete_container_format, delete_disk_format = [], [] empty_image_list = {} # List for obsolete/broken images IDs, that will not be migrated obsolete_images_ids_list = [] dst_img_checksums = {x.checksum: x for x in self.get_image_list()} dst_img_names = [x.name for x in self.get_image_list()] for image_id_src, gl_image in info['images'].iteritems(): img = gl_image['image'] if img and img['resource']: checksum_current = img['checksum'] name_current = img['name'] meta = gl_image['meta'] same_image_on_destination = ( checksum_current in dst_img_checksums and name_current in dst_img_names) if same_image_on_destination: created_images.append( (dst_img_checksums[checksum_current], meta)) LOG.info("Image '%s' is already present on destination, " "skipping", img['name']) continue LOG.debug("Updating owner '{owner}' of image '{image}'".format( owner=img["owner_name"], image=img["name"])) img["owner"] = \ self.identity_client.get_tenant_id_by_name( img["owner_name"]) del img["owner_name"] if img["properties"]: # update snapshot metadata metadata = img["properties"] if "owner_id" in metadata: # update tenant id LOG.debug("updating snapshot metadata for field " "'owner_id' for image {image}".format( image=img["id"])) metadata["owner_id"] = img["owner"] if "user_id" in metadata: # update user id by specified name LOG.debug("updating snapshot metadata for field " "'user_id' for image {image}".format( image=img["id"])) try: ks_client = self.identity_client.keystone_client metadata["user_id"] = ks_client.users.find( username=metadata["user_name"]).id del metadata["user_name"] except keystone_exceptions.NotFound: LOG.warning("Cannot update user name for image " "{}".format(img['name'])) if img["checksum"] is None: LOG.warning("re-creating image {} " "from original source URL" .format(img["id"])) if meta['img_loc'] is not None: self.glance_img_create( img['name'], img['disk_format'] or "qcow2", meta['img_loc'] ) recreated_image = utl.ext_dict( name=img["name"] ) created_images.append( (recreated_image, gl_image['meta']) ) else: raise exception.AbortMigrationError( "image information has no original source URL") continue LOG.debug("Creating image '{image}' ({image_id})".format( image=img["name"], image_id=img['id'])) # we can face situation when image has no # disk_format and container_format properties # this situation appears, when image was created # with option --copy-from # glance-client cannot create image without this # properties, we need to create them artificially # and then - delete from database try: data_proxy = file_like_proxy.FileLikeProxy( img, self.config['migrate']['speed_limit']) created_image = self.create_image( name=img['name'], container_format=(img['container_format'] or "bare"), disk_format=(img['disk_format'] or "qcow2"), is_public=img['is_public'], protected=img['protected'], owner=img['owner'], size=img['size'], properties=img['properties'], data=data_proxy) image_members = img['members'].get(img['id'], {}) for tenant_name, can_share in image_members.iteritems(): LOG.debug("deploying image member for image '%s' " "tenant '%s'", img['id'], img['owner']) self.create_member( created_image.id, tenant_name, can_share) LOG.debug("new image ID {}".format(created_image.id)) created_images.append((created_image, meta)) except exception.ImageDownloadError: LOG.warning("Unable to reach image's data due to " "Glance HTTPInternalServerError. Skipping " "image: (id = %s)", img["id"]) obsolete_images_ids_list.append(img["id"]) continue if not img["container_format"]: delete_container_format.append(created_image.id) if not img["disk_format"]: delete_disk_format.append(created_image.id) elif img['resource'] is None: recreated_image = utl.ext_dict(name=img["name"]) created_images.append((recreated_image, gl_image['meta'])) elif not img: empty_image_list[image_id_src] = gl_image # Remove obsolete/broken images from info for img_id in obsolete_images_ids_list: info['images'].pop(img_id) if created_images: im_name_list = [(im.name, tmp_meta) for (im, tmp_meta) in created_images] LOG.debug("images on destination: {}".format( [im for (im, tmp_meta) in im_name_list])) new_info = self._convert_images_with_metadata(im_name_list) new_info['images'].update(empty_image_list) self.delete_fields('disk_format', delete_disk_format) self.delete_fields('container_format', delete_container_format) LOG.info("Glance images deployment finished.") return new_info
from cloudferrylib.os.storage import filters from cloudferrylib.utils import utils from tests import test FAKE_CONFIG = utils.ext_dict( cloud=utils.ext_dict( { "user": "******", "password": "******", "tenant": "fake_tenant", "host": "1.1.1.1", "ssh_host": "1.1.1.10", "auth_url": "http://1.1.1.1:35357/v2.0/", "region": None, "cacert": "", "insecure": False, } ), migrate=utils.ext_dict( {"retry": "7", "time_wait": 5, "keep_volume_storage": False, "keep_volume_snapshots": False} ), mysql=utils.ext_dict({"db_host": "1.1.1.1"}), storage=utils.ext_dict( {"backend": "ceph", "rbd_pool": "volumes", "volume_name_template": "volume-", "host": "1.1.1.1"} ), ) class CinderStorageTestCase(test.TestCase): def setUp(self): test.TestCase.setUp(self)
from cloudferrylib.os.compute import nova_compute from cloudferrylib.utils import timeout_exception from cloudferrylib.utils import utils from tests import test FAKE_CONFIG = utils.ext_dict( cloud=utils.ext_dict( { "user": "******", "password": "******", "tenant": "fake_tenant", "region": None, "auth_url": "http://1.1.1.1:35357/v2.0/", "cacert": "", "insecure": False, } ), mysql=utils.ext_dict({"host": "1.1.1.1"}), migrate=utils.ext_dict({"migrate_quotas": True, "retry": "7", "time_wait": 5}), ) class NovaComputeTestCase(test.TestCase): def setUp(self): super(NovaComputeTestCase, self).setUp() self.mock_client = mock.MagicMock() self.nc_patch = mockpatch.PatchObject(nova_client, "Client", new=self.mock_client)
def deploy(self, info): LOG.info("Glance images deployment started...") info = copy.deepcopy(info) new_info = {'images': {}} migrate_images_list = [] delete_container_format, delete_disk_format = [], [] empty_image_list = {} # List for obsolete/broken images IDs, that will not be migrated obsolete_images_ids_list = [] for image_id_src, gl_image in info['images'].iteritems(): if gl_image['image'] and gl_image['image']['resource']: dst_img_checksums = {x.checksum: x for x in self.get_image_list()} dst_img_names = [x.name for x in self.get_image_list()] checksum_current = gl_image['image']['checksum'] name_current = gl_image['image']['name'] meta = gl_image['meta'] if checksum_current in dst_img_checksums and ( name_current) in dst_img_names: migrate_images_list.append( (dst_img_checksums[checksum_current], meta)) continue LOG.debug("Updating owner '{owner}' of image '{image}'".format( owner=gl_image["image"]["owner_name"], image=gl_image["image"]["name"])) gl_image["image"]["owner"] = \ self.identity_client.get_tenant_id_by_name( gl_image["image"]["owner_name"]) del gl_image["image"]["owner_name"] if gl_image["image"]["properties"]: # update snapshot metadata metadata = gl_image["image"]["properties"] if "owner_id" in metadata: # update tenant id LOG.debug("updating snapshot metadata for field " "'owner_id' for image {image}".format( image=gl_image["image"]["id"])) metadata["owner_id"] = gl_image["image"]["owner"] if "user_id" in metadata: # update user id by specified name LOG.debug("updating snapshot metadata for field " "'user_id' for image {image}".format( image=gl_image["image"]["id"])) metadata["user_id"] = \ self.identity_client.keystone_client.users.find( username=metadata["user_name"]).id del metadata["user_name"] if gl_image["image"]["checksum"] is None: LOG.warning("re-creating image {} " "from original source URL" .format(gl_image["image"]["id"])) if meta['img_loc'] is not None: self.glance_img_create( gl_image['image']['name'], gl_image['image']['disk_format'] or "qcow2", meta['img_loc'] ) recreated_image = utl.ext_dict( name=gl_image["image"]["name"] ) migrate_images_list.append( (recreated_image, gl_image['meta']) ) else: raise exception.AbortMigrationError( "image information has no original source URL") continue LOG.debug("Creating image '{image}' ({image_id})".format( image=gl_image["image"]["name"], image_id=gl_image['image']['id'])) # we can face situation when image has no # disk_format and container_format properties # this situation appears, when image was created # with option --copy-from # glance-client cannot create image without this # properties, we need to create them artificially # and then - delete from database try: migrate_image = self.create_image( name=gl_image['image']['name'], container_format=(gl_image['image']['container_format'] or "bare"), disk_format=(gl_image['image']['disk_format'] or "qcow2"), is_public=gl_image['image']['is_public'], protected=gl_image['image']['protected'], owner=gl_image['image']['owner'], size=gl_image['image']['size'], properties=gl_image['image']['properties'], data=file_like_proxy.FileLikeProxy( gl_image['image'], self.config['migrate']['speed_limit'])) LOG.debug("new image ID {}".format(migrate_image.id)) except exception.ImageDownloadError: LOG.warning("Unable to reach image's data due to " "Glance HTTPInternalServerError. Skipping " "image: (id = %s)", gl_image["image"]["id"]) obsolete_images_ids_list.append(gl_image["image"]["id"]) continue migrate_images_list.append((migrate_image, meta)) if not gl_image["image"]["container_format"]: delete_container_format.append(migrate_image.id) if not gl_image["image"]["disk_format"]: delete_disk_format.append(migrate_image.id) elif gl_image['image']['resource'] is None: recreated_image = utl.ext_dict(name=gl_image["image"]["name"]) migrate_images_list.append((recreated_image, gl_image['meta'])) elif not gl_image['image']: empty_image_list[image_id_src] = gl_image # Remove obsolete/broken images from info [info['images'].pop(img_id) for img_id in obsolete_images_ids_list] if migrate_images_list: im_name_list = [(im.name, tmp_meta) for (im, tmp_meta) in migrate_images_list] LOG.debug("images on destination: {}".format( [im for (im, tmp_meta) in im_name_list])) new_info = self.read_info(images_list_meta=im_name_list) new_info['images'].update(empty_image_list) # on this step we need to create map between source ids and dst ones LOG.debug("creating map between source and destination image ids") image_ids_map = {} dst_img_checksums = {x.checksum: x.id for x in self.get_image_list()} for image_id_src, gl_image in info['images'].iteritems(): cur_image = gl_image["image"] image_ids_map[cur_image["id"]] = \ dst_img_checksums[cur_image["checksum"]] LOG.debug("deploying image members") for image_id, data in info.get("members", {}).items(): for tenant_name, can_share in data.items(): LOG.debug("deploying image member for image {image}" " tenant {tenant}".format( image=image_id, tenant=tenant_name)) self.create_member( image_ids_map[image_id], tenant_name, can_share) self.delete_fields('disk_format', delete_disk_format) self.delete_fields('container_format', delete_container_format) LOG.info("Glance images deployment finished.") return new_info
import copy import mock from cloudferrylib.os.storage import cinder_database from cloudferrylib.os.storage import cinder_netapp from cloudferrylib.utils import utils from tests import test FAKE_CONFIG = utils.ext_dict( cloud=utils.ext_dict({'user': '******', 'password': '******', 'tenant': 'fake_tenant', 'host': '1.1.1.1', 'auth_url': 'http://1.1.1.1:35357/v2.0/'}), mysql=utils.ext_dict({'db_host': '1.1.1.1'}), migrate=utils.ext_dict({ 'retry': '7', 'time_wait': 5})) FAKE_ENTRY_0 = {'id': 'fake_volume_id_0', 'host': 'c1', 'provider_location': 'fake_netapp_server_0:/vol/v00_cinder'} FAKE_ENTRY_1 = {'id': 'fake_volume_id_1', 'host': 'c2@another_wrong_id', 'provider_location': 'fake_netapp_server_1:/vol/v01_cinder'} class CinderNetAppTestCase(test.TestCase):
from tests import test FAKE_CONFIG = utils.ext_dict(cloud=utils.ext_dict({ 'user': '******', 'password': '******', 'tenant': 'fake_tenant', 'host': '1.1.1.1', 'ssh_host': '1.1.1.10', 'auth_url': 'http://1.1.1.1:35357/v2.0/', 'region': None, 'cacert': '', 'insecure': False }), migrate=utils.ext_dict({ 'retry': '7', 'time_wait': 5, 'keep_volume_storage': False, 'keep_volume_snapshots': False }), mysql=utils.ext_dict({'db_host': '1.1.1.1'}), storage=utils.ext_dict({ 'backend': 'ceph', 'rbd_pool': 'volumes', 'volume_name_template': 'volume-', 'host': '1.1.1.1' }))
import mock from novaclient.v1_1 import client as nova_client from oslotest import mockpatch from cloudferrylib.os.network.nova_network import NovaNetwork from cloudferrylib.utils import utils from tests import test FAKE_CONFIG = utils.ext_dict( cloud=utils.ext_dict({'user': '******', 'password': '******', 'tenant': 'fake_tenant', 'auth_url': 'http://1.1.1.1:35357/v2.0/'}), migrate=utils.ext_dict({'speed_limit': '10MB', 'retry': '7', 'time_wait': 5})) class TestNovaNetwork(test.TestCase): def setUp(self): super(TestNovaNetwork, self).setUp() self.nova_mock_client = mock.MagicMock() self.nova_client_patch = mockpatch.PatchObject( nova_client, 'Client', new=self.nova_mock_client)
from glanceclient.v1 import client as glance_client from oslotest import mockpatch from cloudferrylib.os.image.glance_image import GlanceImage from cloudferrylib.utils import utils from tests import test FAKE_CONFIG = utils.ext_dict( cloud=utils.ext_dict( { "user": "******", "password": "******", "tenant": "fake_tenant", "region": None, "host": "1.1.1.1", "ssh_user": "******", "cacert": "", "insecure": False, } ), migrate=utils.ext_dict({"retry": "7", "time_wait": 5}), ) class FakeUser(object): def __init__(self): self.name = "fake_user_name" class GlanceImageTestCase(test.TestCase):
import mock from neutronclient.v2_0 import client as neutron_client from oslotest import mockpatch from cloudferrylib.os.network import neutron from cloudferrylib.utils import utils from tests import test FAKE_CONFIG = utils.ext_dict(cloud=utils.ext_dict({ 'user': '******', 'password': '******', 'tenant': 'fake_tenant', 'auth_url': 'http://1.1.1.1:35357/v2.0/', 'region': None, 'service_tenant': 'services' }), migrate=utils.ext_dict({ 'ext_net_map': 'fake_ext_net_map.yaml', 'speed_limit': '10MB', 'retry': '7', 'time_wait': 5 })) class NeutronTestCase(test.TestCase): def setUp(self): super(NeutronTestCase, self).setUp() self.neutron_mock_client = mock.MagicMock() self.neutron_client_patch = \
def deploy(self, info): LOG.info("Glance images deployment started...") info = copy.deepcopy(info) new_info = {'images': {}} created_images = [] delete_container_format, delete_disk_format = [], [] empty_image_list = {} keystone = self.cloud.resources["identity"] # List for obsolete/broken images IDs, that will not be migrated obsolete_images_ids_list = [] dst_images = {} for dst_image in self.get_image_list(): tenant_name = keystone.try_get_tenant_name_by_id( dst_image.owner, default=self.cloud.cloud_config.cloud.tenant) image_key = (dst_image.name, tenant_name, dst_image.checksum, dst_image.is_public) dst_images[image_key] = dst_image view = GlanceImageProgessMigrationView(info['images'], dst_images) view.show_info() for image_id_src in info['images']: img = info['images'][image_id_src]['image'] meta = info['images'][image_id_src]['meta'] if img and img['resource']: checksum_current = img['checksum'] name_current = img['name'] tenant_name = img['owner_name'] image_key = (name_current, tenant_name, checksum_current, img['is_public']) if image_key in dst_images: existing_image = dst_images[image_key] created_images.append((existing_image, meta)) image_members = img['members'].get(img['id'], {}) self.update_membership(existing_image.id, image_members) LOG.info("Image '%s' is already present on destination, " "skipping", img['name']) continue view.show_progress() view.inc_progress(img['size']) LOG.debug("Updating owner '%s' of image '%s'", tenant_name, img["name"]) img["owner"] = \ self.identity_client.get_tenant_id_by_name(tenant_name) if img["properties"]: # update snapshot metadata metadata = img["properties"] if "owner_id" in metadata: # update tenant id LOG.debug("Updating snapshot metadata for field " "'owner_id' for image %s", img["id"]) metadata["owner_id"] = img["owner"] if "user_name" in metadata: # update user id by specified name LOG.debug("Updating snapshot metadata for field " "'user_id' for image %s", img["id"]) try: ks_client = self.identity_client.keystone_client metadata["user_id"] = ks_client.users.find( username=metadata["user_name"]).id del metadata["user_name"] except keystone_exceptions.NotFound: LOG.warning("Cannot update user name for image %s", img['name']) if img["checksum"] is None: LOG.warning("re-creating image %s from original source " "URL", img["id"]) if meta['img_loc'] is not None: self.glance_img_create( img['name'], img['disk_format'] or "qcow2", meta['img_loc'] ) recreated_image = utl.ext_dict( name=img["name"] ) created_images.append((recreated_image, meta)) else: raise exception.AbortMigrationError( "image information has no original source URL") continue LOG.debug("Creating image '%s' (%s)", img["name"], img['id']) # we can face situation when image has no # disk_format and container_format properties # this situation appears, when image was created # with option --copy-from # glance-client cannot create image without this # properties, we need to create them artificially # and then - delete from database try: data_proxy = file_like_proxy.FileLikeProxy( img, self.config['migrate']['speed_limit']) created_image = self.create_image( name=img['name'], container_format=(img['container_format'] or "bare"), disk_format=(img['disk_format'] or "qcow2"), is_public=img['is_public'], protected=img['protected'], owner=img['owner'], size=img['size'], properties=img['properties'], data=data_proxy) image_members = img['members'].get(img['id'], {}) LOG.debug("new image ID %s", created_image.id) self.update_membership(created_image.id, image_members) created_images.append((created_image, meta)) except (exception.ImageDownloadError, httplib.IncompleteRead, glance_exceptions.HTTPInternalServerError) as e: LOG.debug(e, exc_info=True) LOG.warning("Unable to reach image's data due to " "Glance HTTPInternalServerError. Skipping " "image: %s (%s)", img['name'], img["id"]) obsolete_images_ids_list.append(img["id"]) continue if not img["container_format"]: delete_container_format.append(created_image.id) if not img["disk_format"]: delete_disk_format.append(created_image.id) elif img['resource'] is None: recreated_image = utl.ext_dict(name=img["name"]) created_images.append((recreated_image, meta)) elif not img: empty_image_list[image_id_src] = info['images'][image_id_src] view.show_progress() # Remove obsolete/broken images from info for img_id in obsolete_images_ids_list: info['images'].pop(img_id) if created_images: im_name_list = [(im.name, tmp_meta) for (im, tmp_meta) in created_images] LOG.debug("images on destination: %s", [im for (im, tmp_meta) in im_name_list]) new_info = self._convert_images_with_metadata(im_name_list) new_info['images'].update(empty_image_list) self.delete_fields('disk_format', delete_disk_format) self.delete_fields('container_format', delete_container_format) LOG.info("Glance images deployment finished.") return new_info
class ColdEvacuateTestCase(test.TestCase): config = utils.ext_dict( cloud=utils.ext_dict( ssh_user='******', ssh_sudo_password='******', ssh_host='ssh_host', host='host', ), migrate=utils.ext_dict( ssh_chunk_size=1337, retry=42, ), ) def setUp(self): test.TestCase.setUp(self) self.server = self._make_server('fake-instance-id', status='ACTive') self.servers = {'fake-instance-id': self.server} self._services = {} self._make_service('nova-compute', 'fake-host-1', 'enabled') self._make_service('nova-compute', 'fake-host-2', 'enabled') self._make_service('nova-compute', 'fake-host-3', 'disabled') self._make_service('nova-compute', 'fake-host-4', 'disabled') self._make_service('nova-compute', 'fake-host-5', 'enabled') self.compute_api = mock.Mock() self.compute_api.servers.get.side_effect = self._servers_get self.compute_api.servers.delete.side_effect = self._servers_delete self.compute_api.servers.start.side_effect = self._servers_start self.compute_api.servers.stop.side_effect = self._servers_stop self.compute_api.servers.migrate.side_effect = self._migrate self.compute_api.servers.confirm_resize.side_effect = \ self._confirm_resize self.compute_api.services.list.side_effect = self._services_list self.compute_api.services.disable.side_effect = self._service_disable self.compute_api.services.enable.side_effect = self._service_enable cfglib_conf_patcher = mock.patch('cfglib.CONF') self.addCleanup(cfglib_conf_patcher.stop) self.cfglib_conf = cfglib_conf_patcher.start() self.cfglib_conf.evacuation.state_change_timeout = 1 self.cfglib_conf.evacuation.nova_home_path = '/fake/home' self.cfglib_conf.evacuation.nova_user = '******' remote_runner_patcher = mock.patch( 'cloudferrylib.utils.remote_runner.RemoteRunner') self.addCleanup(remote_runner_patcher.stop) self.remote_runner = remote_runner_patcher.start() def _servers_get(self, server_id): if not isinstance(server_id, basestring): server_id = server_id.id if server_id not in self.servers: raise nova_exc.NotFound(404) return self.servers[server_id] def _servers_delete(self, server_id): if not isinstance(server_id, basestring): server_id = server_id.id if server_id not in self.servers: raise nova_exc.NotFound(404) del self.servers[server_id] def _servers_stop(self, server_id): self._servers_get(server_id).status = 'SHUTOFF' def _servers_start(self, server_id): self._servers_get(server_id).status = 'ACTIVE' def _migrate(self, server_id): server = self._servers_get(server_id) server.status = 'VERIFY_RESIZE' services = [ s for s in self._services.values() if s.status == 'enabled' and s.binary == 'nova-compute' and s.host != getattr(s, cold_evacuate.INSTANCE_HOST_ATTRIBUTE) ] # concatenate all host names to fail test when there is any choice setattr(server, cold_evacuate.INSTANCE_HOST_ATTRIBUTE, ','.join(s.host for s in services)) def _confirm_resize(self, server_id): self._servers_get(server_id).status = 'ACTIVE' def _make_server(self, instance_id, name='fake-instance', status='active', image='fake-image-id', flavor='fake-flavor-id', availability_zone='fake-az:fake-host', block_device_mapping=None, nics=None): if block_device_mapping is None: block_device_mapping = { '/dev/vdb': 'volume-1', '/dev/vdc': 'volume-2' } _, host = availability_zone.split(':') server = mock.Mock() server.id = instance_id server.name = name server.status = status server.image = {'id': image} server.flavor = {'id': flavor} setattr(server, cold_evacuate.INSTANCE_HOST_ATTRIBUTE, host) server.block_device_mapping = block_device_mapping server.user_id = 'fake-user-id' server.nics = nics return server def _services_list(self, binary=None): services = sorted(self._services.values(), key=lambda x: x.host) return [s for s in services if s.binary == binary] def _make_service(self, binary, host, status): service = mock.MagicMock() service.binary = binary service.host = host service.status = status self._services[(binary, host)] = service def _service_disable(self, host, binary): self._services[binary, host].status = 'disabled' def _service_enable(self, host, binary): self._services[binary, host].status = 'enabled' def test_cold_evacuate(self): cold_evacuate.cold_evacuate(self.config, self.compute_api, self.server, 'fake-host-5') # Check that services are restored after migration self.assertEqual(self._services['nova-compute', 'fake-host-1'].status, 'enabled') self.assertEqual(self._services['nova-compute', 'fake-host-2'].status, 'enabled') self.assertEqual(self._services['nova-compute', 'fake-host-3'].status, 'disabled') self.assertEqual(self._services['nova-compute', 'fake-host-4'].status, 'disabled') self.assertEqual(self._services['nova-compute', 'fake-host-5'].status, 'enabled') # Check that server migrated to right host self.assertEqual( getattr(self.server, cold_evacuate.INSTANCE_HOST_ATTRIBUTE), 'fake-host-5')
def deploy(self, info): LOG.info("Glance images deployment started...") info = copy.deepcopy(info) new_info = {'images': {}} migrate_images_list = [] delete_container_format, delete_disk_format = [], [] empty_image_list = {} # List for obsolete/broken images IDs, that will not be migrated obsolete_images_ids_list = [] for image_id_src, gl_image in info['images'].iteritems(): if gl_image['image'] and gl_image['image']['resource']: dst_img_checksums = { x.checksum: x for x in self.get_image_list() } dst_img_names = [x.name for x in self.get_image_list()] checksum_current = gl_image['image']['checksum'] name_current = gl_image['image']['name'] meta = gl_image['meta'] if checksum_current in dst_img_checksums and ( name_current) in dst_img_names: migrate_images_list.append( (dst_img_checksums[checksum_current], meta)) continue LOG.debug("Updating owner '{owner}' of image '{image}'".format( owner=gl_image["image"]["owner_name"], image=gl_image["image"]["name"])) gl_image["image"]["owner"] = \ self.identity_client.get_tenant_id_by_name( gl_image["image"]["owner_name"]) del gl_image["image"]["owner_name"] if gl_image["image"]["properties"]: # update snapshot metadata metadata = gl_image["image"]["properties"] if "owner_id" in metadata: # update tenant id LOG.debug("updating snapshot metadata for field " "'owner_id' for image {image}".format( image=gl_image["image"]["id"])) metadata["owner_id"] = gl_image["image"]["owner"] if "user_id" in metadata: # update user id by specified name LOG.debug("updating snapshot metadata for field " "'user_id' for image {image}".format( image=gl_image["image"]["id"])) metadata["user_id"] = \ self.identity_client.keystone_client.users.find( username=metadata["user_name"]).id del metadata["user_name"] if gl_image["image"]["checksum"] is None: LOG.warning("re-creating image {} " "from original source URL".format( gl_image["image"]["id"])) if meta['img_loc'] is not None: self.glance_img_create( gl_image['image']['name'], gl_image['image']['disk_format'] or "qcow2", meta['img_loc']) recreated_image = utl.ext_dict( name=gl_image["image"]["name"]) migrate_images_list.append( (recreated_image, gl_image['meta'])) else: raise exception.AbortMigrationError( "image information has no original source URL") continue LOG.debug("Creating image '{image}' ({image_id})".format( image=gl_image["image"]["name"], image_id=gl_image['image']['id'])) # we can face situation when image has no # disk_format and container_format properties # this situation appears, when image was created # with option --copy-from # glance-client cannot create image without this # properties, we need to create them artificially # and then - delete from database try: migrate_image = self.create_image( name=gl_image['image']['name'], container_format=(gl_image['image']['container_format'] or "bare"), disk_format=(gl_image['image']['disk_format'] or "qcow2"), is_public=gl_image['image']['is_public'], protected=gl_image['image']['protected'], owner=gl_image['image']['owner'], size=gl_image['image']['size'], properties=gl_image['image']['properties'], data=file_like_proxy.FileLikeProxy( gl_image['image'], self.config['migrate']['speed_limit'])) LOG.debug("new image ID {}".format(migrate_image.id)) except exception.ImageDownloadError: LOG.warning( "Unable to reach image's data due to " "Glance HTTPInternalServerError. Skipping " "image: (id = %s)", gl_image["image"]["id"]) obsolete_images_ids_list.append(gl_image["image"]["id"]) continue migrate_images_list.append((migrate_image, meta)) if not gl_image["image"]["container_format"]: delete_container_format.append(migrate_image.id) if not gl_image["image"]["disk_format"]: delete_disk_format.append(migrate_image.id) elif gl_image['image']['resource'] is None: recreated_image = utl.ext_dict(name=gl_image["image"]["name"]) migrate_images_list.append((recreated_image, gl_image['meta'])) elif not gl_image['image']: empty_image_list[image_id_src] = gl_image # Remove obsolete/broken images from info [info['images'].pop(img_id) for img_id in obsolete_images_ids_list] if migrate_images_list: im_name_list = [(im.name, tmp_meta) for (im, tmp_meta) in migrate_images_list] LOG.debug("images on destination: {}".format( [im for (im, tmp_meta) in im_name_list])) new_info = self.read_info(images_list_meta=im_name_list) new_info['images'].update(empty_image_list) # on this step we need to create map between source ids and dst ones LOG.debug("creating map between source and destination image ids") image_ids_map = {} dst_img_checksums = {x.checksum: x.id for x in self.get_image_list()} for image_id_src, gl_image in info['images'].iteritems(): cur_image = gl_image["image"] image_ids_map[cur_image["id"]] = \ dst_img_checksums[cur_image["checksum"]] LOG.debug("deploying image members") for image_id, data in info.get("members", {}).items(): for tenant_name, can_share in data.items(): LOG.debug("deploying image member for image {image}" " tenant {tenant}".format(image=image_id, tenant=tenant_name)) self.create_member(image_ids_map[image_id], tenant_name, can_share) self.delete_fields('disk_format', delete_disk_format) self.delete_fields('container_format', delete_container_format) LOG.info("Glance images deployment finished.") return new_info
import mock from keystoneclient.v2_0 import client as keystone_client from oslotest import mockpatch from cloudferrylib.os.identity import keystone from cloudferrylib.utils import utils from tests import test FAKE_CONFIG = utils.ext_dict( cloud=utils.ext_dict({'user': '******', 'password': '******', 'tenant': 'fake_tenant', 'host': '1.1.1.1'}), migrate=utils.ext_dict({'speed_limit': '10MB', 'retry': '7', 'time_wait': '5', 'keep_user_passwords': False, 'overwrite_user_passwords': False}), mail=utils.ext_dict({'server': '-'})) class KeystoneIdentityTestCase(test.TestCase): def setUp(self): super(KeystoneIdentityTestCase, self).setUp() self.mock_client = mock.MagicMock() self.kc_patch = mockpatch.PatchObject(keystone_client, 'Client', new=self.mock_client) self.useFixture(self.kc_patch) self.fake_cloud = mock.Mock()
def deploy(self, info, *args, **kwargs): LOG.info("Glance images deployment started...") info = copy.deepcopy(info) created_images = [] delete_container_format, delete_disk_format = [], [] empty_image_list = {} # List for obsolete/broken images IDs, that will not be migrated obsolete_images_ids_list = [] dst_images = self._dst_images() view = GlanceImageProgessMigrationView(info['images'], dst_images) view.show_info() for image_id_src in info['images']: img = info['images'][image_id_src]['image'] meta = info['images'][image_id_src]['meta'] if img and img['resource']: checksum_current = img['checksum'] name_current = img['name'] tenant_name = img['owner_name'] image_key = (name_current, tenant_name, checksum_current, img['is_public']) if image_key in dst_images: existing_image = dst_images[image_key] created_images.append((existing_image, meta)) image_members = img['members'].get(img['id'], {}) self.update_membership(existing_image.id, image_members) LOG.info( "Image '%s' is already present on destination, " "skipping", img['name']) continue view.show_progress() view.inc_progress(img['size']) LOG.debug("Updating owner '%s' of image '%s'", tenant_name, img["name"]) img["owner"] = \ self.identity_client.get_tenant_id_by_name(tenant_name) if img["properties"]: # update snapshot metadata metadata = img["properties"] if "owner_id" in metadata: # update tenant id LOG.debug( "Updating snapshot metadata for field " "'owner_id' for image %s", img["id"]) metadata["owner_id"] = img["owner"] if "user_name" in metadata: # update user id by specified name LOG.debug( "Updating snapshot metadata for field " "'user_id' for image %s", img["id"]) try: ks_client = self.identity_client.keystone_client metadata["user_id"] = ks_client.users.find( username=metadata["user_name"]).id del metadata["user_name"] except keystone_exceptions.NotFound: LOG.warning("Cannot update user name for image %s", img['name']) if img["checksum"] is None: LOG.warning( "re-creating image %s from original source " "URL", img["id"]) if meta['img_loc'] is not None: self.create_image( id=img['id'], name=img['name'], disk_format=img['disk_format'] or "qcow2", location=meta['img_loc'], container_format=img['container_format'] or 'bare', ) recreated_image = utl.ext_dict(name=img["name"]) created_images.append((recreated_image, meta)) else: raise exception.AbortMigrationError( "image information has no original source URL") continue LOG.debug("Creating image '%s' (%s)", img["name"], img['id']) # we can face situation when image has no # disk_format and container_format properties # this situation appears, when image was created # with option --copy-from # glance-client cannot create image without this # properties, we need to create them artificially # and then - delete from database try: file_obj = img['resource'].get_ref_image(img['id']) data_proxy = file_proxy.FileProxy(file_obj, name="image %s ('%s')" % (img['name'], img['id']), size=img['size']) created_image = self.create_image( id=img['id'], name=img['name'], container_format=(img['container_format'] or "bare"), disk_format=(img['disk_format'] or "qcow2"), is_public=img['is_public'], protected=img['protected'], owner=img['owner'], size=img['size'], properties=img['properties'], data=data_proxy) image_members = img['members'].get(img['id'], {}) LOG.debug("new image ID %s", created_image.id) self.update_membership(created_image.id, image_members) created_images.append((created_image, meta)) except (exception.ImageDownloadError, httplib.IncompleteRead, glance_exceptions.HTTPInternalServerError) as e: LOG.debug(e, exc_info=True) LOG.warning( "Unable to reach image's data due to " "Glance HTTPInternalServerError. Skipping " "image: %s (%s)", img['name'], img["id"]) obsolete_images_ids_list.append(img["id"]) continue if not img["container_format"]: delete_container_format.append(created_image.id) if not img["disk_format"]: delete_disk_format.append(created_image.id) elif img['resource'] is None: recreated_image = utl.ext_dict(name=img["name"]) created_images.append((recreated_image, meta)) elif not img: empty_image_list[image_id_src] = info['images'][image_id_src] view.show_progress() if obsolete_images_ids_list: LOG.warning('List of broken images: %s', obsolete_images_ids_list) # Remove obsolete/broken images from info for img_id in obsolete_images_ids_list: info['images'].pop(img_id) return self._new_info(created_images, empty_image_list, delete_disk_format, delete_container_format)
from oslotest import mockpatch import cfglib from cloudferrylib.os.identity import keystone from cloudferrylib.utils import utils from tests import test FAKE_CONFIG = utils.ext_dict( cloud=utils.ext_dict({'user': '******', 'password': '******', 'tenant': 'fake_tenant', 'auth_url': 'http://1.1.1.1:35357/v2.0/', 'region': None, 'service_tenant': 'service', 'cacert': '', 'insecure': False}), migrate=utils.ext_dict({'retry': '7', 'time_wait': 5, 'keep_user_passwords': False, 'overwrite_user_passwords': False, 'migrate_users': True, 'optimize_user_role_fetch': False}), mail=utils.ext_dict({'server': '-'})) @mock.patch("cloudferrylib.base.clients.os_cli_cmd", mock.MagicMock()) class KeystoneIdentityTestCase(test.TestCase): def setUp(self): super(KeystoneIdentityTestCase, self).setUp() self.mock_client = mock.MagicMock() self.kc_patch = mockpatch.PatchObject(keystone_client, 'Client',
from glanceclient.v1 import client as glance_client from oslotest import mockpatch from cloudferrylib.os.image.glance_image import GlanceImage from cloudferrylib.utils import utils from tests import test FAKE_CONFIG = utils.ext_dict(cloud=utils.ext_dict({ 'user': '******', 'password': '******', 'tenant': 'fake_tenant', 'region': None, 'host': '1.1.1.1', 'ssh_host': '1.1.1.10', 'ssh_user': '******', 'cacert': '', 'insecure': False }), migrate=utils.ext_dict({ 'retry': '7', 'time_wait': 5 })) class FakeUser(object): def __init__(self): self.name = 'fake_user_name' class GlanceImageTestCase(test.TestCase):
from keystoneclient.v2_0 import client as keystone_client from oslotest import mockpatch from cloudferrylib.os.identity import keystone from cloudferrylib.utils import utils from tests import test FAKE_CONFIG = utils.ext_dict( cloud=utils.ext_dict({'user': '******', 'password': '******', 'tenant': 'fake_tenant', 'auth_url': 'http://1.1.1.1:35357/v2.0/', 'service_tenant': 'services'}), migrate=utils.ext_dict({'speed_limit': '10MB', 'retry': '7', 'time_wait': 5, 'keep_user_passwords': False, 'overwrite_user_passwords': False, 'migrate_users': True}), mail=utils.ext_dict({'server': '-'})) class KeystoneIdentityTestCase(test.TestCase): def setUp(self): super(KeystoneIdentityTestCase, self).setUp() self.mock_client = mock.MagicMock() self.kc_patch = mockpatch.PatchObject(keystone_client, 'Client', new=self.mock_client) self.useFixture(self.kc_patch)
import mock from glanceclient.v1 import client as glance_client from oslotest import mockpatch from cloudferrylib.os.image.glance_image import GlanceImage from cloudferrylib.utils import utils from tests import test FAKE_CONFIG = utils.ext_dict(cloud=utils.ext_dict({'user': '******', 'password': '******', 'tenant': 'fake_tenant', 'host': '1.1.1.1', }), migrate=utils.ext_dict({'speed_limit': '10MB', 'retry': '7', 'time_wait': '5'})) class GlanceImageTestCase(test.TestCase): def setUp(self): super(GlanceImageTestCase, self).setUp() self.glance_mock_client = mock.MagicMock() self.glance_mock_client().images.data()._resp = 'fake_resp_1' self.glance_client_patch = mockpatch.PatchObject( glance_client, 'Client',
from oslotest import mockpatch import cfglib from cloudferrylib.os.compute import nova_compute from cloudferrylib.utils import timeout_exception from cloudferrylib.utils import utils from tests import test FAKE_CONFIG = utils.ext_dict( cloud=utils.ext_dict({'user': '******', 'password': '******', 'tenant': 'fake_tenant', 'region': None, 'auth_url': 'http://1.1.1.1:35357/v2.0/'}), mysql=utils.ext_dict({'host': '1.1.1.1'}), migrate=utils.ext_dict({'migrate_quotas': True, 'speed_limit': '10MB', 'retry': '7', 'time_wait': 5})) class NovaComputeTestCase(test.TestCase): def setUp(self): super(NovaComputeTestCase, self).setUp() self.mock_client = mock.MagicMock() self.nc_patch = mockpatch.PatchObject(nova_client, 'Client', new=self.mock_client) self.useFixture(self.nc_patch)
from neutronclient.v2_0 import client as neutron_client from oslotest import mockpatch from cloudferrylib.base import exception from cloudferrylib.os.network import neutron from cloudferrylib.utils import utils from tests import test FAKE_CONFIG = utils.ext_dict(cloud=utils.ext_dict({ 'user': '******', 'password': '******', 'tenant': 'fake_tenant', 'auth_url': 'http://1.1.1.1:35357/v2.0/', 'region': None, 'service_tenant': 'services', 'cacert': '', 'insecure': False }), migrate=utils.ext_dict({ 'ext_net_map': 'fake_ext_net_map.yaml', 'retry': '7', 'time_wait': 5 }), network=utils.ext_dict({'get_all_quota': True})) class NeutronTestCase(test.TestCase): def setUp(self): super(NeutronTestCase, self).setUp() self.neutron_mock_client = mock.MagicMock()
from oslotest import mockpatch from cloudferrylib.os.compute import nova_compute from cloudferrylib.utils import timeout_exception from cloudferrylib.utils import utils from tests import test FAKE_CONFIG = utils.ext_dict( cloud=utils.ext_dict({'user': '******', 'password': '******', 'tenant': 'fake_tenant', 'region': None, 'auth_url': 'http://1.1.1.1:35357/v2.0/', 'cacert': '', 'insecure': False}), mysql=utils.ext_dict({'host': '1.1.1.1'}), migrate=utils.ext_dict({'migrate_quotas': True, 'speed_limit': '10MB', 'retry': '7', 'time_wait': 5})) class NovaComputeTestCase(test.TestCase): def setUp(self): super(NovaComputeTestCase, self).setUp() self.mock_client = mock.MagicMock() self.nc_patch = mockpatch.PatchObject(nova_client, 'Client', new=self.mock_client)
import mock from neutronclient.v2_0 import client as neutron_client from oslotest import mockpatch from cloudferrylib.os.network import neutron from cloudferrylib.utils import utils from tests import test FAKE_CONFIG = utils.ext_dict( cloud=utils.ext_dict({'user': '******', 'password': '******', 'tenant': 'fake_tenant', 'auth_url': 'http://1.1.1.1:35357/v2.0/', 'region': None, 'service_tenant': 'services'}), migrate=utils.ext_dict({'ext_net_map': 'fake_ext_net_map.yaml', 'speed_limit': '10MB', 'retry': '7', 'time_wait': 5})) class NeutronTestCase(test.TestCase): def setUp(self): super(NeutronTestCase, self).setUp() self.neutron_mock_client = mock.MagicMock() self.neutron_client_patch = \
# Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from cloudferrylib.os.storage import cinder_storage from tests import test from oslotest import mockpatch from cinderclient.v1 import client as cinder_client from cloudferrylib.utils import utils FAKE_CONFIG = utils.ext_dict(cloud=utils.ext_dict({'user': '******', 'password': '******', 'tenant': 'fake_tenant', 'host': '1.1.1.1'})) class CinderStorageTestCase(test.TestCase): def setUp(self): super(CinderStorageTestCase, self).setUp() self.mock_client = mock.Mock() self.cs_patch = mockpatch.PatchObject(cinder_client, 'Client', new=self.mock_client) self.useFixture(self.cs_patch) self.identity_mock = mock.Mock() self.fake_cloud = mock.Mock() self.fake_cloud.mysql_connector = mock.Mock()
from glanceclient.v1 import client as glance_client from oslotest import mockpatch from cloudferrylib.os.image.glance_image import GlanceImage from cloudferrylib.utils import utils from tests import test FAKE_CONFIG = utils.ext_dict( cloud=utils.ext_dict( { "user": "******", "password": "******", "tenant": "fake_tenant", "region": None, "host": "1.1.1.1", "ssh_user": "******", } ), migrate=utils.ext_dict({"speed_limit": "10MB", "retry": "7", "time_wait": 5}), ) class FakeUser(object): def __init__(self): self.name = "fake_user_name" class GlanceImageTestCase(test.TestCase): def setUp(self):