def _check_opts_img(self, opts):
        image_resource = self.cloud.resources[utl.IMAGE_RESOURCE]
        if opts and \
                opts.get('images_list') and opts.get('exclude_images_list'):
            raise exception.AbortMigrationError(
                "In the filter config file was specified "
                "'images_list' and 'exclude_images_list'. "
                "Must either specify - 'images_list' or "
                "'exclude_images_list'.")

        if opts and opts.get('images_list'):
            images_list = opts['images_list']
            for img_id in images_list:
                LOG.debug('Filtered image id: %s', img_id)
                try:
                    with proxy_client.expect_exception(glance_exc.NotFound):
                        img = image_resource.glance_client.images.get(img_id)
                    if img:
                        LOG.debug('Filter config check: Image ID %s is OK',
                                  img_id)
                except glance_exc.HTTPNotFound:
                    LOG.error(
                        'Filter config check: Image ID %s '
                        'is not present in source cloud, '
                        'please update your filter config. Aborting.', img_id)
                    raise
Exemple #2
0
    def check_floating_ips_overlapping(self, dst_floating_ip):
        """
        Check if Floating IP overlaps with DST.

        Parameters to compare:
        - same floating ip address;
        - same tenant;
        - same network;
        - same network's tenant.

        Also check if this Floating IP is not busy (i.e. is not associated to
        VM on SRC and DST at the same time) on both environments.

        :param dst_floating_ip: DST FloatingIp instance.

        :raise AbortMigrationError: If FloatingIp overlaps with the DST.
        """

        # Check association to VMs on SRC and DST aa the same time
        ports_overlap = self.port_id and dst_floating_ip.port_id

        if not self == dst_floating_ip or ports_overlap:
            message = ("Floating IP '%s' overlaps with the same IP on DST." %
                       self.address)
            LOG.error(message)
            raise exception.AbortMigrationError(message)
    def _mount_output(self, position, vt=None, dirs_only=False):
        if dirs_only:
            print_cmd = "{print $3}}'; done"
        else:
            print_cmd = "{print $3\"%s\"$1}}'; done" % MOUNT_DELIM

        res = None
        if vt:
            res = self._mount_output_all(position, dirs_only=dirs_only).get(
                vt['id'], None)
        if not res:
            res = self._mount_output_all(position, dirs_only=dirs_only).get(
                DEFAULT, None)
        if not res:
            # default nfs_shares_config
            cmd = ("for exp in " "$(awk -F'[ =\t]+' '")
            cmd += \
                AWK_GET_MOUNTED_LAST_NFS_SHARES % self.storage[position].conf
            cmd += print_cmd
            res = self._run_cmd(self.clouds[position], cmd)
            res = set(res if isinstance(res, list) else [res])
        if not res:
            raise exception.AbortMigrationError('No NFS share found on "%s"' %
                                                position)
        return res
    def _ssh_connectivity_between_controllers(self):
        src_host = self.cfg.src.ssh_host
        src_user = self.cfg.src.ssh_user
        dst_host = self.cfg.dst.ssh_host
        dst_user = self.cfg.dst.ssh_user

        LOG.info("Checking ssh connectivity between '%s' and '%s'", src_host,
                 dst_host)

        rr = remote_runner.RemoteRunner(src_host, src_user)

        ssh_opts = ('-o UserKnownHostsFile=/dev/null '
                    '-o StrictHostKeyChecking=no')

        cmd = "ssh {opts} {user}@{host} 'echo ok'".format(opts=ssh_opts,
                                                          user=dst_user,
                                                          host=dst_host)

        try:
            rr.run(cmd)
        except remote_runner.RemoteExecutionError:
            msg = ("No ssh connectivity between source host '{src_host}' and "
                   "destination host '{dst_host}'. Make sure you have keys "
                   "and correct configuration on these nodes. To verify run "
                   "'{ssh_cmd}' from '{src_host}' node")
            msg = msg.format(src_host=src_host, dst_host=dst_host, ssh_cmd=cmd)
            LOG.error(msg)
            raise exception.AbortMigrationError(msg)
Exemple #5
0
def check_affinity_api(cloud):
    compute_resource = cloud.resources[utils.COMPUTE_RESOURCE]
    with proxy_client.expect_exception(nova_exceptions.NotFound):
        try:
            compute_resource.nova_client.server_groups.list()
        except nova_exceptions.NotFound:
            raise cf_exceptions.AbortMigrationError(
                "'%s' cloud does not support affinity/anti-affinity "
                "(Nova server groups) API." % cloud.position)
    def run(self, *args, **kwargs):
        """Run TransportVolumes Action."""
        data_from_namespace = kwargs.get(NAMESPACE_CINDER_CONST)
        if not data_from_namespace:
            raise exception.AbortMigrationError(
                "Cannot read attribute {attribute} from namespace".format(
                    attribute=NAMESPACE_CINDER_CONST))

        data = data_from_namespace
        self.get_resource().deploy(data)
 def _iscsiadm_is_installed_locally(self):
     LOG.info("Checking if iscsiadm tool is installed")
     try:
         local.run('iscsiadm --help &>/dev/null')
     except local.LocalExecutionFailed:
         msg = ("iscsiadm is not available on the local host. Please "
                "install iscsiadm tool on the node you running on or "
                "choose other cinder backend for migration. iscsiadm is "
                "mandatory for migrations with EMC VMAX cinder backend")
         LOG.error(msg)
         raise exception.AbortMigrationError(msg)
Exemple #8
0
 def check_quotas(self, cloud):
     compute_resource = cloud.resources[utils.COMPUTE_RESOURCE]
     keystone_resource = cloud.resources[utils.IDENTITY_RESOURCE]
     tenant = cloud.cloud_config['cloud']['tenant']
     ten_id = keystone_resource.get_tenant_id_by_name(tenant)
     with proxy_client.expect_exception(nova_exceptions.ClientException):
         try:
             compute_resource.nova_client.quotas.update(ten_id)
         except nova_exceptions.ClientException:
             raise cf_exceptions.AbortMigrationError(
                 "'%s' cloud does not support quotas "
                 "(Nova quotas)." % cloud.position)
    def get_resource(self):
        """
        Get cinder-volume resource.

        :return: cinder_database resource

        """
        cinder_resource = self.cloud.resources.get(utils.STORAGE_RESOURCE)
        if not cinder_resource:
            raise exception.AbortMigrationError(
                "No resource {res} found".format(res=utils.STORAGE_RESOURCE))
        return cinder_resource
 def check_network_overlapping(self, network):
     for subnet in network.subnets:
         LOG.debug("Work with SRC subnet: '%s'" % subnet['id'])
         if self.is_subnet_eq(subnet):
             LOG.debug("We have the same subnet on DST by hash")
             continue
         overlapping_subnet = self.get_overlapping_subnet(subnet)
         if overlapping_subnet:
             message = (
                 "Subnet '%s' in network '%s' on SRC overlaps with "
                 "subnet '%s' in network '%s' on DST" %
                 (overlapping_subnet, self.id, subnet['id'], network.id))
             LOG.error(message)
             raise exception.AbortMigrationError(message)
Exemple #11
0
    def run(self, info=None, **kwargs):
        check_results = []
        check_failed = False

        for node in self.get_compute_nodes():
            node_ssh_failed = self.check_access(node)
            check_failed = check_failed or node_ssh_failed
            check_results.append((node, node_ssh_failed))

        if check_failed:
            message = "SSH check failed for following nodes: '{nodes}'".format(
                nodes=map(itemgetter(0),
                          filter(lambda (n, status): status, check_results)))
            LOG.error(message)
            raise exception.AbortMigrationError(message)
Exemple #12
0
def check(os_api_call, os_api_type, position, *os_api_call_args,
          **os_api_call_kwargs):
    try:
        LOG.info("Checking %s APIs availability on %s.", os_api_type,
                 position.upper())
        os_api_call(*os_api_call_args, **os_api_call_kwargs)
    except (neutron_exc.NeutronException, glance_exc.BaseException,
            glance_exc.ClientException, ks_exc.ClientException,
            cinder_exc.ClientException, nova_exc.ClientException) as e:
        message = ('{os_api_type} APIs on {position} check failed with: '
                   '"{msg}". Check your configuration.').format(
                       os_api_type=os_api_type,
                       msg=e.message,
                       position=position.upper())
        LOG.error(message)
        raise exception.AbortMigrationError(message)
    def check_segmentation_id_overlapping(self, dst_seg_ids):
        """
        Check if segmentation ID of current network overlaps with destination.

        :param dst_seg_ids: Dictionary with busy segmentation IDs on DST
        """

        if self.network_type not in dst_seg_ids:
            return

        if self.seg_id in dst_seg_ids[self.network_type]:
            message = ("Segmentation ID '%s' (network type = '%s', "
                       "network ID = '%s') is already busy on the destination "
                       "cloud.") % (self.seg_id, self.network_type, self.id)
            LOG.error(message)
            raise exception.AbortMigrationError(message)
    def _transfer(self, src, dstpaths, volume, src_size):
        LOG.debug("Trying transfer file for volume: %s[%s]",
                  volume.get('display_name', None), volume['id'])
        dstfile = self.find_dir(DST, dstpaths, volume)
        LOG.debug("Source file size = %d", src_size)
        LOG.debug("Searching for space for volume: %s[%s]",
                  volume.get('display_name', None), volume['id'])
        if dstfile:
            LOG.info("File found on destination: %s", dstfile)
            dst_size = self.volume_size(self.clouds[DST], dstfile)
            LOG.debug("Destination file (%s) size = %d", dstfile, dst_size)
            dst = os.path.dirname(dstfile)

            LOG.info('Calculate and compare checksums volume on the source '
                     'and on the destionation cloud.')
            if src_size == dst_size:
                src_md5 = self.checksum(self.clouds[SRC], src)
                dst_md5 = self.checksum(self.clouds[DST], dstfile)
                if src_md5 == dst_md5:
                    LOG.info(
                        "Destination file %s is up-to-date. "
                        "Sizes and checksums are matched.", dstfile)
                    return dst, 0

            LOG.info('Checksums are different. Start copying volume %s(%s)',
                     volume.get('display_name', ''), volume['id'])
            start_time = time.time()
            if self.transfer_if_enough_space(src_size - dst_size, src, dst):
                elapsed_time = time.time() - start_time
                return dst, elapsed_time
            else:
                LOG.info(
                    'Copying volume %s(%s) failed. '
                    'Volume will be deleted.', volume.get('display_name', ''),
                    volume['id'])
                self._clean(self.clouds[DST], dstfile)

        for dst in dstpaths:
            start_time = time.time()
            res = self.transfer_if_enough_space(src_size, src, dst)
            elapsed_time = time.time() - start_time
            if res:
                return dst, elapsed_time
        raise exception.AbortMigrationError('No space found for %s on %s' %
                                            (str(volume), str(dstpaths)))
Exemple #15
0
 def _check_opts_tenant(self, opts):
     ident_resource = self.cloud.resources[utl.IDENTITY_RESOURCE]
     if opts and opts.get('tenant_id'):
         tenants = opts['tenant_id']
         if len(tenants) > 1:
             raise exception.AbortMigrationError(
                 'More than one tenant in tenant filters is not supported.')
         for tenant_id in tenants:
             LOG.debug('Filtered tenant id: %s', tenant_id)
             try:
                 with proxy_client.expect_exception(keystone_exc.NotFound):
                     tenant = ident_resource.keystone_client.tenants.find(
                         id=tenant_id)
                 if tenant:
                     LOG.debug('Filter config check: Tenant ID %s is OK',
                               tenant_id)
             except keystone_exc.NotFound:
                 LOG.error(
                     'Filter config check: Tenant ID %s '
                     'is not present in source cloud, '
                     'please update your filter config. Aborting.',
                     tenant_id)
                 raise
Exemple #16
0
    def run(self, **kwargs):
        if self.cfg.migrate.migrate_users:
            LOG.info("Users will be migrated. Skipping this check.")
            return
        src_identity = self.src_cloud.resources[utils.IDENTITY_RESOURCE]
        dst_identity = self.dst_cloud.resources[utils.IDENTITY_RESOURCE]

        src_keystone_client = src_identity.get_client()
        dst_keystone_client = dst_identity.get_client()

        LOG.info("Going to get all users from source cloud, this may take a "
                 "while for large LDAP-backed clouds, please be patient")

        src_users = src_keystone_client.users.list()
        dst_users = dst_keystone_client.users.list()

        src_user_names = {
            name.lower(): name
            for name in map(attrgetter('name'), src_users)
        }
        dst_user_names = {
            name.lower(): name
            for name in map(attrgetter('name'), dst_users)
        }

        users_missing_on_dst = \
            set(src_user_names.keys()) - set(dst_user_names.keys())

        if users_missing_on_dst:
            msg = "{n} missing users on destination: {users}".format(
                n=len(users_missing_on_dst),
                users=", ".join(src_user_names[key]
                                for key in users_missing_on_dst))
            LOG.error(msg)
            raise cf_exceptions.AbortMigrationError(msg)

        LOG.info("All users are available on source, migration can proceed")
Exemple #17
0
    def get_filters(self):
        is_public = public_filter()
        is_active = active_filter()
        is_datetime = datetime_filter(self.filter_yaml.get_image_date())
        is_tenant = tenant_filter(self.filter_yaml.get_tenant())

        images_list = self.filter_yaml.get_image_ids()
        excluded_images_list = self.filter_yaml.get_excluded_image_ids()

        if images_list and excluded_images_list:
            raise exception.AbortMigrationError("In the filter config file "
                                                "specified 'images_list' and "
                                                "'exclude_images_list'. Must "
                                                "be only one list with "
                                                "images - 'images_list' or "
                                                "'exclude_images_list'.")

        if excluded_images_list:
            is_image_id = image_id_exclude_filter(excluded_images_list)
        else:
            is_image_id = image_id_filter(images_list)

        is_member = member_filter(self.glance_client,
                                  self.filter_yaml.get_tenant())

        if self.filter_yaml.is_public_and_member_images_filtered():
            return [
                lambda i: (is_active(i) and is_tenant(i) and is_image_id(i) and
                           is_datetime(i))
            ]
        else:
            return [
                lambda i: (is_active(i) and is_public(i) or is_active(
                    i) and is_member(i) or is_active(i) and is_tenant(i) and
                           is_image_id(i) and is_datetime(i))
            ]
    def _try_copy_volumes(self):
        vt_map = self._vt_map()

        failed = []

        volumes_size_map = self._volumes_size_map()
        view = cinder_storage_view.CinderStorageMigrationProgressView(
            self.data[SRC]['volumes'], self.data[DST]['volumes'],
            volumes_size_map)

        view.show_stats()
        for v in self.data[SRC]['volumes']:
            LOG.info('Start migrate volume %s(%s)', v.get('display_name', ''),
                     v['id'])

            volume_type_id = v.get('volume_type_id', None)
            srcpaths = self._paths(SRC, volume_type_id)
            LOG.debug('srcpaths: %s', str(srcpaths))

            if volume_type_id in vt_map:
                # src -> dst
                v['volume_type_id'] = vt_map.get(volume_type_id, None)
            else:
                v['volume_type_id'] = None
            LOG.debug('Vt map: %s', str(vt_map))

            dstpaths = self._paths(DST, v['volume_type_id'])
            if not dstpaths:
                err_msg = 'No mount found on DST Cloud'
                if v['volume_type_id']:
                    err_msg += ' for volume type: %s' % v['volume_type_id']
                raise exception.AbortMigrationError(err_msg)

            LOG.debug('dstpaths: %s', str(dstpaths))

            src = self.find_dir(SRC, srcpaths, v)
            if not src:
                raise exception.AbortMigrationError(
                    'No SRC volume file found for %s[%s]' %
                    (v.get('display_name', None), v['id']))
            dst, elapsed_time = self._transfer(src, dstpaths, v,
                                               volumes_size_map[v['id']])

            if dst:
                v['provider_location'] = self._dir_to_provider(dst)
                vtid = self._provider_to_vtid(v['provider_location'])
                v[HOST] = self._dst_host(vtid)
                view.sync_migrated_volumes_info(v, elapsed_time)
            else:
                failed.append(v)
                view.sync_failed_volumes_info(v)

            view.show_progress()

        if failed:
            LOG.error(
                'Migration failed for volumes: %s', ', '.join(
                    ["%s(%s)" % (v['display_name'], v['id']) for v in failed]))
            self.data[SRC]['volumes'] = [
                v for v in self.data[SRC]['volumes'] if v not in failed
            ]

        return failed
Exemple #19
0
    def run(self, **kwargs):
        """Check write access to cloud."""
        ident_resource = self.dst_cloud.resources[utl.IDENTITY_RESOURCE]
        image_res = self.cloud.resources[utl.IMAGE_RESOURCE]
        compute_resource = self.cloud.resources[utl.COMPUTE_RESOURCE]
        volume_resource = self.cloud.resources[utl.STORAGE_RESOURCE]
        net_resource = self.cloud.resources[utl.NETWORK_RESOURCE]
        adm_tenant_name = self.cloud.cloud_config.cloud.tenant
        adm_tenant_id = ident_resource.get_tenant_id_by_name(adm_tenant_name)
        tenant_name = 'test_name'
        flavor_id = 'c0c0c0c0'
        err_message = 'Failed to create object in the cloud'
        tenant = [{
            'meta': {},
            'tenant': {
                'name': tenant_name,
                'description': None
            }
        }]
        flavor = {
            flavor_id: {
                'flavor': {
                    'name': 'test_flavor',
                    'is_public': True,
                    'ram': '1',
                    'vcpus': '1',
                    'disk': '1',
                    'ephemeral': '1',
                    'swap': '1',
                    'rxtx_factor': '1'
                },
                'meta': {
                    'id': flavor_id
                }
            }
        }
        try:
            ident_resource._deploy_tenants(tenant)
            tenant_id = ident_resource.get_tenant_id_by_name(tenant_name)
            compute_resource._deploy_flavors(flavor, None)
        except (ks_exc.ClientException, nova_exc.ClientException):
            LOG.error(err_message)
            raise exception.AbortMigrationError(err_message)
        migrate_image = image_res.create_image(
            name='test_image',
            container_format='bare',
            disk_format='qcow2',
            is_public=True,
            protected=False,
            owner=adm_tenant_id,
            size=4,
            properties={'user_name': 'test_user_name'},
            data='test')

        # Creating private network
        private_network_info = {
            'network': {
                'tenant_id': tenant_id,
                'admin_state_up': False,
                'shared': False,
                'name': 'private_test_net',
                'router:external': False
            }
        }
        private_network_id = net_resource.neutron_client.create_network(
            private_network_info)['network']['id']

        # Creating subnet for private network
        subnet_info = {
            'subnet': {
                'name': 'test_subnet',
                'network_id': private_network_id,
                'cidr': '192.168.1.0/24',
                'ip_version': 4,
                'tenant_id': tenant_id,
            }
        }

        net_resource.neutron_client.create_subnet(subnet_info)

        nics = [{'net-id': private_network_id}]

        info = {
            'instances': {
                'a0a0a0a': {
                    'instance': {
                        'id': 'a0a0a0a0',
                        'name': 'test_vm',
                        'image_id': migrate_image.id,
                        'flavor_id': flavor_id,
                        'key_name': '1',
                        'nics': nics,
                        'user_id': '1',
                        'boot_mode': utl.BOOT_FROM_IMAGE,
                        'availability_zone': 'nova',
                        'tenant_name': adm_tenant_name
                    }
                }
            },
            'volumes': {
                'd0d0d0d0': {
                    'volume': {
                        'availability_zone': 'nova',
                        'display_description': None,
                        'id': 'd0d0d0d0',
                        'size': 1,
                        'display_name': 'test_volume',
                        'bootable': False,
                        'volume_type': None
                    },
                    'meta': {},
                }
            }
        }
        vol_new_ids = volume_resource.deploy_volumes(info)
        volume_resource.cinder_client.volumes.delete(vol_new_ids.keys()[0])
        network_info = {
            'network': {
                'tenant_id': adm_tenant_id,
                'admin_state_up': True,
                'shared': True,
                'name': 'test_net',
                'router:external': True
            }
        }
        new_net_id = net_resource.neutron_client.create_network(
            network_info)['network']['id']
        net_resource.neutron_client.delete_network(new_net_id)
        vm_new_ids = compute_resource._deploy_instances(info)
        if (not vm_new_ids or not vol_new_ids or not migrate_image
                or not new_net_id):
            LOG.error(err_message)
            raise exception.AbortMigrationError(err_message)
        compute_resource.nova_client.servers.delete(vm_new_ids.keys()[0])
        image_res.glance_client.images.delete(migrate_image.id)
        compute_resource.nova_client.flavors.delete(flavor_id)
        ident_resource.keystone_client.tenants.delete(tenant_id)

        # delete private network and subnet
        net_resource.neutron_client.delete_network(private_network_id)
Exemple #20
0
    def run(self, **kwargs):
        LOG.debug("Checking networks...")
        overlapping_resources = {}
        invalid_resources = {}

        src_net = self.src_cloud.resources[utils.NETWORK_RESOURCE]
        dst_net = self.dst_cloud.resources[utils.NETWORK_RESOURCE]
        src_compute = self.src_cloud.resources[utils.COMPUTE_RESOURCE]

        search_opts = kwargs.get('search_opts_tenant', {})
        search_opts.update({'search_opts': kwargs.get('search_opts', {})})

        LOG.debug("Retrieving Network information from Source cloud...")
        ports = src_net.get_ports_list()
        src_net_info = NetworkInfo(src_net.read_info(**search_opts), ports)
        LOG.debug("Retrieving Network information from Destination cloud...")
        dst_net_info = NetworkInfo(dst_net.read_info())
        LOG.debug("Retrieving Compute information from Source cloud...")
        src_compute_info = ComputeInfo(src_compute, search_opts)

        ext_net_map = utils.read_yaml_file(self.cfg.migrate.ext_net_map) or {}

        # Check external networks mapping
        if ext_net_map:
            LOG.info("Check external networks mapping...")
            invalid_ext_net_ids = src_net_info.get_invalid_ext_net_ids(
                dst_net_info, ext_net_map)
            if invalid_ext_net_ids:
                invalid_resources.update(
                    {"invalid_external_nets_ids_in_map": invalid_ext_net_ids})

        # Check subnets and segmentation IDs overlap
        LOG.info("Check networks overlapping...")
        nets_overlapping_subnets, nets_overlapping_seg_ids = (
            src_net_info.get_overlapping_networks(dst_net_info))
        if nets_overlapping_subnets:
            overlapping_resources.update(
                {'networks_with_overlapping_subnets':
                    nets_overlapping_subnets})
        if nets_overlapping_seg_ids:
            LOG.warning("Networks with segmentation IDs overlapping:\n%s",
                        nets_overlapping_seg_ids)

        # Check external subnets overlap
        LOG.info("Check external subnets overlapping...")
        overlapping_external_subnets = (
            src_net_info.get_overlapping_external_subnets(dst_net_info,
                                                          ext_net_map))
        if overlapping_external_subnets:
            overlapping_resources.update(
                {"overlapping_external_subnets": overlapping_external_subnets})

        # Check floating IPs overlap
        LOG.info("Check floating IPs overlapping...")
        floating_ips = src_net_info.list_overlapping_floating_ips(dst_net_info,
                                                                  ext_net_map)
        if floating_ips:
            overlapping_resources.update(
                {'overlapping_floating_ips': floating_ips})

        # Check busy physical networks on DST of FLAT network type
        LOG.info("Check busy physical networks for FLAT network type...")
        busy_flat_physnets = src_net_info.busy_flat_physnets(dst_net_info)
        if busy_flat_physnets:
            overlapping_resources.update(
                {'busy_flat_physnets': busy_flat_physnets})

        # Check physical networks existence on DST for VLAN network type
        LOG.info("Check physical networks existence for VLAN network type...")
        dst_neutron_client = dst_net.neutron_client
        missing_vlan_physnets = src_net_info.missing_vlan_physnets(
            dst_net_info, dst_neutron_client)
        if missing_vlan_physnets:
            overlapping_resources.update(
                {'missing_vlan_physnets': missing_vlan_physnets})

        # Check VMs spawned directly in external network
        LOG.info("Check VMs spawned directly in external networks...")
        devices = src_net_info.get_devices_from_external_networks()
        vms_list = src_compute_info.list_vms_in_external_network(devices)
        if vms_list:
            overlapping_resources.update({'vms_in_external_network': vms_list})

        # Print LOG message with all overlapping stuff and abort migration
        if overlapping_resources or invalid_resources:
            if overlapping_resources:
                LOG.critical('Network overlapping list:\n%s',
                             overlapping_resources)
            if invalid_resources:
                LOG.critical('Invalid Network resources list:\n%s',
                             invalid_resources)
            raise exception.AbortMigrationError(
                "There is a number of overlapping/invalid Network resources, "
                "so migration process can not be continued. Resolve it please "
                "and try again")
Exemple #21
0
    def run(self, **kwargs):
        if not kwargs.get('info'):
            raise exception.AbortMigrationError(
                "No information from destination cloud."
                "Something has been broken on early steps.")
        dst_info = kwargs['info']
        search_opts = {'search_opts': kwargs.get('search_opts', {})}
        search_opts.update(kwargs.get('search_opts_tenant', {}))
        src_info = kwargs['info_backup']
        old_ids = set(dst_inst['meta']['old_id']
                      for dst_inst in dst_info['instances'].values())
        dst_cmp_info = {}
        inst_cnt = 0
        for dst_inst in dst_info['instances'].values():
            old_id = dst_inst['meta']['old_id']
            dst_cmp_info[old_id] = {}
            dst_inst_ = dst_inst['instance']
            dst_cmp_info[old_id].update({'name': dst_inst_['name']})
            dst_cmp_info[old_id].update(
                {'flav_details': dst_inst_['flav_details']})
            dst_cmp_info[old_id].update({'key_name': dst_inst_['key_name']})
            dst_cmp_info[old_id].update(
                {'interfaces': dst_inst_['interfaces']})
            dst_volumes = dst_inst['meta']['volume']
            new_dst_volumes = []
            for dst_vol in dst_volumes:
                new_dst_volumes.append(dst_vol['volume'])
            dst_cmp_info[old_id].update({'volumes': new_dst_volumes})

            dst_cmp_info[old_id].update(
                {'server_group': dst_inst_['server_group']})

            inst_cnt += 1
        failed_vms = []
        for src_inst_id in src_info['instances']:
            if ((src_inst_id not in old_ids)
                    or (src_inst_id not in dst_cmp_info)):
                failed_vms.append(src_inst_id)
            else:
                dst_cmp_inst = dst_cmp_info[src_inst_id]
                src_inst_info = src_info['instances'][src_inst_id]['instance']
                if src_inst_info['name'] != dst_cmp_inst['name']:
                    LOG.warning("Wrong name of instance %s on DST",
                                src_inst_id)
                    failed_vms.append(src_inst_id)
                if (src_inst_info['flav_details'] !=
                        dst_cmp_inst['flav_details']):
                    LOG.warning("Wrong flav_details of instance %s on DST",
                                src_inst_id)
                if src_inst_info['key_name'] != dst_cmp_inst['key_name']:
                    LOG.warning("Wrong key_name of instance %s on DST",
                                src_inst_id)
                    failed_vms.append(src_inst_id)
                if (sorted(src_inst_info['interfaces']) != sorted(
                        dst_cmp_inst['interfaces'])):
                    LOG.warning("Wrong interfaces of instance %s on DST",
                                src_inst_id)
                    failed_vms.append(src_inst_id)
                if src_inst_info['volumes'] != dst_cmp_inst['volumes']:
                    LOG.warning("Wrong volumes of instance %s on DST",
                                src_inst_id)

                # Verify that migrated VM belongs to correct server group
                if (src_inst_info['server_group'] !=
                        dst_cmp_inst['server_group']):
                    LOG.warning(
                        "Wrong server group of instance '%s' on DST! "
                        "SRC server group: '%s', "
                        "DST server group: '%s'.", src_inst_id,
                        src_inst_info['server_group'],
                        dst_cmp_inst['server_group'])

        if failed_vms:
            LOG.warning("Instances were not migrated:")
            for vm in failed_vms:
                LOG.warning("%s", vm)
            return False
        LOG.debug(
            "Compared instance names, flavors, "
            "interfaces, volumes and key names. "
            "Number of migrated instances: %s", inst_cnt)
        return True
    def deploy(self, info):
        LOG.info("Glance images deployment started...")
        info = copy.deepcopy(info)
        new_info = {'images': {}}
        migrate_images_list = []
        delete_container_format, delete_disk_format = [], []
        empty_image_list = {}

        # List for obsolete/broken images IDs, that will not be migrated
        obsolete_images_ids_list = []

        for image_id_src, gl_image in info['images'].iteritems():
            if gl_image['image'] and gl_image['image']['resource']:
                dst_img_checksums = {
                    x.checksum: x
                    for x in self.get_image_list()
                }
                dst_img_names = [x.name for x in self.get_image_list()]
                checksum_current = gl_image['image']['checksum']
                name_current = gl_image['image']['name']
                meta = gl_image['meta']
                if checksum_current in dst_img_checksums and (
                        name_current) in dst_img_names:
                    migrate_images_list.append(
                        (dst_img_checksums[checksum_current], meta))
                    continue

                LOG.debug("Updating owner '{owner}' of image '{image}'".format(
                    owner=gl_image["image"]["owner_name"],
                    image=gl_image["image"]["name"]))
                gl_image["image"]["owner"] = \
                    self.identity_client.get_tenant_id_by_name(
                    gl_image["image"]["owner_name"])
                del gl_image["image"]["owner_name"]

                if gl_image["image"]["properties"]:
                    # update snapshot metadata
                    metadata = gl_image["image"]["properties"]
                    if "owner_id" in metadata:
                        # update tenant id
                        LOG.debug("updating snapshot metadata for field "
                                  "'owner_id' for image {image}".format(
                                      image=gl_image["image"]["id"]))
                        metadata["owner_id"] = gl_image["image"]["owner"]
                    if "user_id" in metadata:
                        # update user id by specified name
                        LOG.debug("updating snapshot metadata for field "
                                  "'user_id' for image {image}".format(
                                      image=gl_image["image"]["id"]))
                        metadata["user_id"] = \
                            self.identity_client.keystone_client.users.find(
                                username=metadata["user_name"]).id
                        del metadata["user_name"]
                if gl_image["image"]["checksum"] is None:
                    LOG.warning("re-creating image {} "
                                "from original source URL".format(
                                    gl_image["image"]["id"]))
                    if meta['img_loc'] is not None:
                        self.glance_img_create(
                            gl_image['image']['name'],
                            gl_image['image']['disk_format'] or "qcow2",
                            meta['img_loc'])
                        recreated_image = utl.ext_dict(
                            name=gl_image["image"]["name"])
                        migrate_images_list.append(
                            (recreated_image, gl_image['meta']))
                    else:
                        raise exception.AbortMigrationError(
                            "image information has no original source URL")
                    continue

                LOG.debug("Creating image '{image}' ({image_id})".format(
                    image=gl_image["image"]["name"],
                    image_id=gl_image['image']['id']))
                # we can face situation when image has no
                # disk_format and container_format properties
                # this situation appears, when image was created
                # with option --copy-from
                # glance-client cannot create image without this
                # properties, we need to create them artificially
                # and then - delete from database

                try:
                    migrate_image = self.create_image(
                        name=gl_image['image']['name'],
                        container_format=(gl_image['image']['container_format']
                                          or "bare"),
                        disk_format=(gl_image['image']['disk_format']
                                     or "qcow2"),
                        is_public=gl_image['image']['is_public'],
                        protected=gl_image['image']['protected'],
                        owner=gl_image['image']['owner'],
                        size=gl_image['image']['size'],
                        properties=gl_image['image']['properties'],
                        data=file_like_proxy.FileLikeProxy(
                            gl_image['image'],
                            self.config['migrate']['speed_limit']))
                    LOG.debug("new image ID {}".format(migrate_image.id))
                except exception.ImageDownloadError:
                    LOG.warning(
                        "Unable to reach image's data due to "
                        "Glance HTTPInternalServerError. Skipping "
                        "image: (id = %s)", gl_image["image"]["id"])
                    obsolete_images_ids_list.append(gl_image["image"]["id"])
                    continue

                migrate_images_list.append((migrate_image, meta))
                if not gl_image["image"]["container_format"]:
                    delete_container_format.append(migrate_image.id)
                if not gl_image["image"]["disk_format"]:
                    delete_disk_format.append(migrate_image.id)
            elif gl_image['image']['resource'] is None:
                recreated_image = utl.ext_dict(name=gl_image["image"]["name"])
                migrate_images_list.append((recreated_image, gl_image['meta']))
            elif not gl_image['image']:
                empty_image_list[image_id_src] = gl_image

        # Remove obsolete/broken images from info
        [info['images'].pop(img_id) for img_id in obsolete_images_ids_list]

        if migrate_images_list:
            im_name_list = [(im.name, tmp_meta)
                            for (im, tmp_meta) in migrate_images_list]
            LOG.debug("images on destination: {}".format(
                [im for (im, tmp_meta) in im_name_list]))
            new_info = self.read_info(images_list_meta=im_name_list)
        new_info['images'].update(empty_image_list)
        # on this step we need to create map between source ids and dst ones
        LOG.debug("creating map between source and destination image ids")
        image_ids_map = {}
        dst_img_checksums = {x.checksum: x.id for x in self.get_image_list()}
        for image_id_src, gl_image in info['images'].iteritems():
            cur_image = gl_image["image"]
            image_ids_map[cur_image["id"]] = \
                dst_img_checksums[cur_image["checksum"]]
        LOG.debug("deploying image members")
        for image_id, data in info.get("members", {}).items():
            for tenant_name, can_share in data.items():
                LOG.debug("deploying image member for image {image}"
                          " tenant {tenant}".format(image=image_id,
                                                    tenant=tenant_name))
                self.create_member(image_ids_map[image_id], tenant_name,
                                   can_share)
        self.delete_fields('disk_format', delete_disk_format)
        self.delete_fields('container_format', delete_container_format)
        LOG.info("Glance images deployment finished.")
        return new_info
Exemple #23
0
    def deploy(self, info, *args, **kwargs):
        LOG.info("Glance images deployment started...")
        info = copy.deepcopy(info)
        created_images = []
        delete_container_format, delete_disk_format = [], []
        empty_image_list = {}

        # List for obsolete/broken images IDs, that will not be migrated
        obsolete_images_ids_list = []
        dst_images = self._dst_images()

        view = GlanceImageProgessMigrationView(info['images'], dst_images)
        view.show_info()
        for image_id_src in info['images']:
            img = info['images'][image_id_src]['image']
            meta = info['images'][image_id_src]['meta']
            if img and img['resource']:
                checksum_current = img['checksum']
                name_current = img['name']
                tenant_name = img['owner_name']
                image_key = (name_current, tenant_name, checksum_current,
                             img['is_public'])

                if image_key in dst_images:
                    existing_image = dst_images[image_key]
                    created_images.append((existing_image, meta))
                    image_members = img['members'].get(img['id'], {})
                    self.update_membership(existing_image.id, image_members)
                    LOG.info(
                        "Image '%s' is already present on destination, "
                        "skipping", img['name'])
                    continue

                view.show_progress()
                view.inc_progress(img['size'])

                LOG.debug("Updating owner '%s' of image '%s'", tenant_name,
                          img["name"])
                img["owner"] = \
                    self.identity_client.get_tenant_id_by_name(tenant_name)

                if img["properties"]:
                    # update snapshot metadata
                    metadata = img["properties"]
                    if "owner_id" in metadata:
                        # update tenant id
                        LOG.debug(
                            "Updating snapshot metadata for field "
                            "'owner_id' for image %s", img["id"])
                        metadata["owner_id"] = img["owner"]
                    if "user_name" in metadata:
                        # update user id by specified name
                        LOG.debug(
                            "Updating snapshot metadata for field "
                            "'user_id' for image %s", img["id"])
                        try:
                            ks_client = self.identity_client.keystone_client
                            metadata["user_id"] = ks_client.users.find(
                                username=metadata["user_name"]).id
                            del metadata["user_name"]
                        except keystone_exceptions.NotFound:
                            LOG.warning("Cannot update user name for image %s",
                                        img['name'])
                if img["checksum"] is None:
                    LOG.warning(
                        "re-creating image %s from original source "
                        "URL", img["id"])
                    if meta['img_loc'] is not None:
                        self.create_image(
                            id=img['id'],
                            name=img['name'],
                            disk_format=img['disk_format'] or "qcow2",
                            location=meta['img_loc'],
                            container_format=img['container_format'] or 'bare',
                        )

                        recreated_image = utl.ext_dict(name=img["name"])
                        created_images.append((recreated_image, meta))
                    else:
                        raise exception.AbortMigrationError(
                            "image information has no original source URL")
                    continue

                LOG.debug("Creating image '%s' (%s)", img["name"], img['id'])
                # we can face situation when image has no
                # disk_format and container_format properties
                # this situation appears, when image was created
                # with option --copy-from
                # glance-client cannot create image without this
                # properties, we need to create them artificially
                # and then - delete from database

                try:
                    file_obj = img['resource'].get_ref_image(img['id'])
                    data_proxy = file_proxy.FileProxy(file_obj,
                                                      name="image %s ('%s')" %
                                                      (img['name'], img['id']),
                                                      size=img['size'])

                    created_image = self.create_image(
                        id=img['id'],
                        name=img['name'],
                        container_format=(img['container_format'] or "bare"),
                        disk_format=(img['disk_format'] or "qcow2"),
                        is_public=img['is_public'],
                        protected=img['protected'],
                        owner=img['owner'],
                        size=img['size'],
                        properties=img['properties'],
                        data=data_proxy)

                    image_members = img['members'].get(img['id'], {})
                    LOG.debug("new image ID %s", created_image.id)
                    self.update_membership(created_image.id, image_members)
                    created_images.append((created_image, meta))
                except (exception.ImageDownloadError, httplib.IncompleteRead,
                        glance_exceptions.HTTPInternalServerError) as e:
                    LOG.debug(e, exc_info=True)
                    LOG.warning(
                        "Unable to reach image's data due to "
                        "Glance HTTPInternalServerError. Skipping "
                        "image: %s (%s)", img['name'], img["id"])
                    obsolete_images_ids_list.append(img["id"])
                    continue

                if not img["container_format"]:
                    delete_container_format.append(created_image.id)
                if not img["disk_format"]:
                    delete_disk_format.append(created_image.id)
            elif img['resource'] is None:
                recreated_image = utl.ext_dict(name=img["name"])
                created_images.append((recreated_image, meta))
            elif not img:
                empty_image_list[image_id_src] = info['images'][image_id_src]

        view.show_progress()
        if obsolete_images_ids_list:
            LOG.warning('List of broken images: %s', obsolete_images_ids_list)
            # Remove obsolete/broken images from info
            for img_id in obsolete_images_ids_list:
                info['images'].pop(img_id)

        return self._new_info(created_images, empty_image_list,
                              delete_disk_format, delete_container_format)