예제 #1
0
 def check_tenants_amount(self):
     if len(self.ids_list) > 1:
         raise exception.AbortMigrationError(
             'More than one tenant in filter config file is not supported. '
             'Aborting migration...')
     elif len(self.ids_list) < 1:
         raise exception.AbortMigrationError(
             "Tenant ID in not specified in the filter config file. Please"
             " either specify it or use 'migrate_whole_cloud = True' in the"
             " main config file for the whole cloud migration.")
예제 #2
0
    def run(self, **kwargs):
        if self.cfg.migrate.migrate_users:
            LOG.info("Users will be migrated. Skipping this check.")
            return
        src_identity = self.src_cloud.resources[utils.IDENTITY_RESOURCE]
        dst_identity = self.dst_cloud.resources[utils.IDENTITY_RESOURCE]

        src_keystone_client = src_identity.get_client()
        dst_keystone_client = dst_identity.get_client()

        LOG.info("Going to get all users from source cloud, this may take a "
                 "while for large LDAP-backed clouds, please be patient")

        src_users = src_keystone_client.users.list()
        dst_users = dst_keystone_client.users.list()

        src_user_names = {usr.name.lower(): usr.name for usr in src_users}
        dst_user_names = {usr.name.lower(): usr.name for usr in dst_users}

        users_missing_on_dst = \
            set(src_user_names.keys()) - set(dst_user_names.keys())

        if users_missing_on_dst:
            msg = "{n} missing users on destination: {users}".format(
                n=len(users_missing_on_dst),
                users=", ".join(src_user_names[key]
                                for key in users_missing_on_dst))
            LOG.error(msg)
            raise cf_exceptions.AbortMigrationError(msg)

        LOG.info("All users are available on source, migration can proceed")
예제 #3
0
    def _mount_output(self, position, vt=None, dirs_only=False):
        if dirs_only:
            print_cmd = "{print $3}}'; done"
        else:
            print_cmd = "{print $3\"%s\"$1}}'; done" % MOUNT_DELIM

        res = None
        if vt:
            res = self._mount_output_all(
                position, dirs_only=dirs_only).get(vt['id'], None)
        if not res:
            res = self._mount_output_all(
                position, dirs_only=dirs_only).get(DEFAULT, None)
        if not res:
            # default nfs_shares_config
            cmd = (
                "for exp in "
                "$(awk -F'[ =\t]+' '"
            )
            cmd += \
                AWK_GET_MOUNTED_LAST_NFS_SHARES % self.storage[position].conf
            cmd += print_cmd
            res = self._run_cmd(self.clouds[position], cmd)
            res = set(res if isinstance(res, list) else [res])
        if not res:
            raise exception.AbortMigrationError(
                'No NFS share found on "%s"' % position)
        return res
    def _ssh_connectivity_between_controllers(self):
        src_host = self.cfg.src.ssh_host
        src_user = self.cfg.src.ssh_user
        dst_host = self.cfg.dst.ssh_host
        dst_user = self.cfg.dst.ssh_user

        LOG.info("Checking ssh connectivity between '%s' and '%s'", src_host,
                 dst_host)

        rr = remote_runner.RemoteRunner(src_host, src_user)

        ssh_opts = ('-o UserKnownHostsFile=/dev/null '
                    '-o StrictHostKeyChecking=no')

        cmd = "ssh {opts} {user}@{host} 'echo ok'".format(opts=ssh_opts,
                                                          user=dst_user,
                                                          host=dst_host)

        try:
            rr.run(cmd)
        except remote_runner.RemoteExecutionError:
            msg = ("No ssh connectivity between source host '{src_host}' and "
                   "destination host '{dst_host}'. Make sure you have keys "
                   "and correct configuration on these nodes. To verify run "
                   "'{ssh_cmd}' from '{src_host}' node")
            msg = msg.format(src_host=src_host, dst_host=dst_host, ssh_cmd=cmd)
            LOG.error(msg)
            raise exception.AbortMigrationError(msg)
예제 #5
0
    def check_floating_ips_overlapping(self, dst_floating_ip):
        """
        Check if Floating IP overlaps with DST.

        Parameters to compare:
        - same floating ip address;
        - same tenant;
        - same network;
        - same network's tenant.

        Also check if this Floating IP is not busy (i.e. is not associated to
        VM on SRC and DST at the same time) on both environments.

        :param dst_floating_ip: DST FloatingIp instance.

        :raise AbortMigrationError: If FloatingIp overlaps with the DST.
        """

        # Check association to VMs on SRC and DST aa the same time
        ports_overlap = self.port_id and dst_floating_ip.port_id

        if not self == dst_floating_ip or ports_overlap:
            message = ("Floating IP '%s' overlaps with the same IP on DST." %
                       self.address)
            LOG.error(message)
            raise exception.AbortMigrationError(message)
예제 #6
0
    def run(self, **kwargs):

        if self.cfg.migrate.migrate_whole_cloud:
            LOG.info("Whole cloud migration is enabled. Ignore filtering...")
            return

        filter_path = self.cfg.migrate.filter_path

        if not utils.check_file(filter_path):
            raise exception.AbortMigrationError(
                "Filter file '%s' has not been found. Please check filter file"
                " path in the CloudFerry configuration file." % filter_path)

        if not utils.read_yaml_file(filter_path):
            raise exception.AbortMigrationError("Filter file '%s' is empty." %
                                                filter_path)

        try:
            tenant_opts = kwargs['search_opts_tenant']
            instance_opts = kwargs['search_opts']
            volume_opts = kwargs['search_opts_vol']
            image_opts = kwargs['search_opts_img']
        except KeyError:
            raise exception.AbortMigrationError(
                "Action 'act_get_filter' should be specified prior this action"
                " in the scenario file. Aborting migration...")

        tenant = Tenant(self.cloud, tenant_opts)
        instance = Instance(self.cloud, instance_opts)
        volume = Volume(self.cloud, volume_opts)
        image = Image(self.cloud, image_opts)

        invalid_data = {}
        for filter_object in [tenant, instance, volume, image]:
            invalid_data.update(filter_object.check())

        # Filter only non-empty values
        invalid_data = {k: v for k, v in invalid_data.iteritems() if v}

        if invalid_data:
            msg = "\n\nInvalid Filter Data:\n\n%s" % yaml.dump(invalid_data)
            LOG.critical(msg)
            raise exception.AbortMigrationError(
                "There is a number of invalid data specified in the filter "
                "file '%s', so migration process can not be continued. Please "
                "update your filter config file and try again. %s" %
                (filter_path, msg))
예제 #7
0
def check_affinity_api(cloud):
    compute_resource = cloud.resources[utils.COMPUTE_RESOURCE]
    with proxy_client.expect_exception(nova_exceptions.NotFound):
        try:
            compute_resource.nova_client.server_groups.list()
        except nova_exceptions.NotFound:
            raise cf_exceptions.AbortMigrationError(
                "'%s' cloud does not support affinity/anti-affinity "
                "(Nova server groups) API." % cloud.position)
예제 #8
0
    def check_conflict(self):
        if not self.opts:
            return

        if (self.opts.get('images_list')
                and self.opts.get('exclude_images_list')):
            raise exception.AbortMigrationError(
                "Options 'images_list' and 'exclude_images_list' can not be "
                "specified together at the same time in the filter file. "
                "Should be only one of them. Aborting migration...")
예제 #9
0
    def run(self, *args, **kwargs):
        """Run TransportVolumes Action."""
        data_from_namespace = kwargs.get(NAMESPACE_CINDER_CONST)
        if not data_from_namespace:
            raise exception.AbortMigrationError(
                "Cannot read attribute {attribute} from namespace".format(
                    attribute=NAMESPACE_CINDER_CONST))

        data = data_from_namespace
        self.get_resource().deploy(data)
 def _iscsiadm_is_installed_locally(self):
     LOG.info("Checking if iscsiadm tool is installed")
     try:
         local.run('iscsiadm --help &>/dev/null')
     except local.LocalExecutionFailed:
         msg = ("iscsiadm is not available on the local host. Please "
                "install iscsiadm tool on the node you running on or "
                "choose other cinder backend for migration. iscsiadm is "
                "mandatory for migrations with EMC VMAX cinder backend")
         LOG.error(msg)
         raise exception.AbortMigrationError(msg)
예제 #11
0
 def check_quotas(self, cloud):
     compute_resource = cloud.resources[utils.COMPUTE_RESOURCE]
     keystone_resource = cloud.resources[utils.IDENTITY_RESOURCE]
     tenant = cloud.cloud_config['cloud']['tenant']
     ten_id = keystone_resource.get_tenant_id_by_name(tenant)
     with proxy_client.expect_exception(nova_exceptions.ClientException):
         try:
             compute_resource.nova_client.quotas.update(ten_id)
         except nova_exceptions.ClientException:
             raise cf_exceptions.AbortMigrationError(
                 "'%s' cloud does not support quotas "
                 "(Nova quotas)." % cloud.position)
예제 #12
0
    def get_resource(self):
        """
        Get cinder-volume resource.

        :return: cinder_database resource

        """
        cinder_resource = self.cloud.resources.get(
            utils.STORAGE_RESOURCE)
        if not cinder_resource:
            raise exception.AbortMigrationError(
                "No resource {res} found".format(res=utils.STORAGE_RESOURCE))
        return cinder_resource
예제 #13
0
 def _check_local_sudo_password_set(self):
     current_user = getpass.getuser()
     if current_user != 'root' and \
             self.cfg.migrate.local_sudo_password is None:
         try:
             local.sudo('ls')
         except local.LocalExecutionFailed:
             msg = ("CloudFerry is running as '{user}' user, but "
                    "passwordless sudo does not seem to be configured on "
                    "current host. Please either specify password in "
                    "`local_sudo_password` config option, or run "
                    "CloudFerry as root user.").format(user=current_user)
             LOG.error(msg)
             raise exception.AbortMigrationError(msg)
예제 #14
0
def check(os_api_call, os_api_type, position, *os_api_call_args,
          **os_api_call_kwargs):
    try:
        LOG.info("Checking %s APIs availability on %s.", os_api_type,
                 position.upper())
        os_api_call(*os_api_call_args, **os_api_call_kwargs)
    except (neutron_exc.NeutronException, glance_exc.BaseException,
            glance_exc.ClientException, ks_exc.ClientException,
            cinder_exc.ClientException, nova_exc.ClientException) as e:
        message = ('{os_api_type} APIs on {position} check failed with: '
                   '"{msg}". Check your configuration.').format(
                       os_api_type=os_api_type,
                       msg=e.message,
                       position=position.upper())
        LOG.error(message)
        raise exception.AbortMigrationError(message)
예제 #15
0
    def check_segmentation_id_overlapping(self, dst_seg_ids):
        """
        Check if segmentation ID of current network overlaps with destination.

        :param dst_seg_ids: Dictionary with busy segmentation IDs on DST
        """

        if self.network_type not in dst_seg_ids:
            return

        if self.seg_id in dst_seg_ids[self.network_type]:
            message = ("Segmentation ID '%s' (network type = '%s', "
                       "network ID = '%s') is already busy on the destination "
                       "cloud.") % (self.seg_id, self.network_type, self.id)
            LOG.warning(message)
            raise exception.AbortMigrationError(message)
예제 #16
0
    def _transfer(self, src, dstpaths, volume, src_size):
        LOG.debug("Trying transfer file for volume: %s[%s]",
                  volume.get('display_name', None), volume['id'])
        dstfile = self.find_dir(DST, dstpaths, volume)
        LOG.debug("Source file size = %d", src_size)
        LOG.debug("Searching for space for volume: %s[%s]",
                  volume.get('display_name', None), volume['id'])
        if dstfile:
            LOG.info("File found on destination: %s", dstfile)
            dst_size = self.volume_size(self.clouds[DST], dstfile)
            LOG.debug("Destination file (%s) size = %d", dstfile, dst_size)
            dst = os.path.dirname(dstfile)

            LOG.info('Calculate and compare checksums volume on the source '
                     'and on the destionation cloud.')
            if src_size == dst_size:
                src_md5 = self.checksum(self.clouds[SRC], src)
                dst_md5 = self.checksum(self.clouds[DST], dstfile)
                if src_md5 == dst_md5:
                    LOG.info("Destination file %s is up-to-date. "
                             "Sizes and checksums are matched.", dstfile)
                    return dst, 0

            LOG.info('Checksums are different. Start copying volume %s(%s)',
                     volume.get('display_name', ''),
                     volume['id'])
            start_time = time.time()
            if self.transfer_if_enough_space(src_size - dst_size, src, dst):
                elapsed_time = time.time() - start_time
                return dst, elapsed_time
            else:
                LOG.info('Copying volume %s(%s) failed. '
                         'Volume will be deleted.',
                         volume.get('display_name', ''),
                         volume['id'])
                self._clean(self.clouds[DST], dstfile)

        for dst in dstpaths:
            start_time = time.time()
            res = self.transfer_if_enough_space(src_size, src, dst)
            elapsed_time = time.time() - start_time
            if res:
                return dst, elapsed_time
        raise exception.AbortMigrationError('No space found for %s on %s' % (
            str(volume), str(dstpaths)))
예제 #17
0
    def get_filters(self):
        is_public = public_filter()
        is_active = active_filter()
        is_datetime = datetime_filter(self.filter_yaml.get_image_date())
        is_tenant = tenant_filter(self.filter_yaml.get_tenant())

        images_list = self.filter_yaml.get_image_ids()
        excluded_images_list = self.filter_yaml.get_excluded_image_ids()

        if images_list and excluded_images_list:
            raise exception.AbortMigrationError("In the filter config file "
                                                "specified 'images_list' and "
                                                "'exclude_images_list'. Must "
                                                "be only one list with "
                                                "images - 'images_list' or "
                                                "'exclude_images_list'.")

        if excluded_images_list:
            is_image_id = image_id_exclude_filter(excluded_images_list)
        else:
            is_image_id = image_id_filter(images_list)

        is_member = member_filter(self.glance_client,
                                  self.filter_yaml.get_tenant())

        if self.filter_yaml.is_public_and_member_images_filtered():
            return [
                lambda i: (is_active(i) and is_tenant(i) and is_image_id(i) and
                           is_datetime(i))
            ]
        else:
            return [
                lambda i: (is_active(i) and is_public(i) or is_active(
                    i) and is_member(i) or is_active(i) and is_tenant(i) and
                           is_image_id(i) and is_datetime(i))
            ]
예제 #18
0
    def run(self, **kwargs):
        LOG.debug("Checking networks...")
        overlapping_resources = {}
        invalid_resources = {}

        src_net = self.src_cloud.resources[utils.NETWORK_RESOURCE]
        dst_net = self.dst_cloud.resources[utils.NETWORK_RESOURCE]
        src_compute = self.src_cloud.resources[utils.COMPUTE_RESOURCE]

        search_opts = kwargs.get('search_opts_tenant', {})
        search_opts.update({'search_opts': kwargs.get('search_opts', {})})

        LOG.debug("Retrieving Network information from Source cloud...")
        ports = src_net.get_ports_list()
        src_net_info = NetworkInfo(src_net.read_info(**search_opts), ports)
        LOG.debug("Retrieving Network information from Destination cloud...")
        dst_net_info = NetworkInfo(dst_net.read_info())
        LOG.debug("Retrieving Compute information from Source cloud...")
        src_compute_info = ComputeInfo(src_compute, search_opts)

        ext_net_map = utils.read_yaml_file(self.cfg.migrate.ext_net_map) or {}

        # Check external networks mapping
        if ext_net_map:
            LOG.info("Check external networks mapping...")
            invalid_ext_net_ids = src_net_info.get_invalid_ext_net_ids(
                dst_net_info, ext_net_map)
            if invalid_ext_net_ids:
                invalid_resources.update(
                    {"invalid_external_nets_ids_in_map": invalid_ext_net_ids})

        # Check networks' segmentation IDs overlap
        LOG.info("Check networks' segmentation IDs overlapping...")
        nets_overlapping_seg_ids = (
            src_net_info.get_overlapping_seg_ids(dst_net_info))
        if nets_overlapping_seg_ids:
            LOG.warning("Networks with segmentation IDs overlapping:\n%s",
                        nets_overlapping_seg_ids)

        # Check external subnets overlap
        LOG.info("Check external subnets overlapping...")
        overlapping_external_subnets = (
            src_net_info.get_overlapping_external_subnets(
                dst_net_info, ext_net_map))
        if overlapping_external_subnets:
            overlapping_resources.update(
                {"overlapping_external_subnets": overlapping_external_subnets})

        # Check floating IPs overlap
        LOG.info("Check floating IPs overlapping...")
        floating_ips = src_net_info.list_overlapping_floating_ips(
            dst_net_info, ext_net_map)
        if floating_ips:
            overlapping_resources.update(
                {'overlapping_floating_ips': floating_ips})

        # Check busy physical networks on DST of FLAT network type
        LOG.info("Check busy physical networks for FLAT network type...")
        busy_flat_physnets = src_net_info.busy_flat_physnets(dst_net_info)
        if busy_flat_physnets:
            overlapping_resources.update(
                {'busy_flat_physnets': busy_flat_physnets})

        # Check physical networks existence on DST for VLAN network type
        LOG.info("Check physical networks existence for VLAN network type...")
        dst_neutron_client = dst_net.neutron_client
        missing_vlan_physnets = src_net_info.missing_vlan_physnets(
            dst_net_info, dst_neutron_client)
        if missing_vlan_physnets:
            overlapping_resources.update(
                {'missing_vlan_physnets': missing_vlan_physnets})

        # Check VMs spawned directly in external network
        LOG.info("Check VMs spawned directly in external networks...")
        devices = src_net_info.get_devices_from_external_networks()
        vms_list = src_compute_info.list_vms_in_external_network(devices)
        if vms_list:
            overlapping_resources.update({'vms_in_external_network': vms_list})

        # Print LOG message with all overlapping stuff and abort migration
        if overlapping_resources or invalid_resources:
            if overlapping_resources:
                LOG.critical('Network overlapping list:\n%s',
                             overlapping_resources)
            if invalid_resources:
                LOG.critical('Invalid Network resources list:\n%s',
                             invalid_resources)
            raise exception.AbortMigrationError(
                "There is a number of overlapping/invalid Network resources, "
                "so migration process can not be continued. Resolve it please "
                "and try again")
예제 #19
0
    def change_status(self, status, instance=None, instance_id=None):
        if instance_id:
            instance = self.nova_client.servers.get(instance_id)
        curr = self.get_status(instance.id).lower()
        will = status.lower()

        def wait_status(status):
            return self.wait_for_status(
                instance.id,
                self.get_status,
                status,
                timeout=self.config.migrate.boot_timeout)

        try:
            if curr == 'paused' and will == 'active':
                self.nova_client.servers.unpause(instance)
                wait_status('active')
            elif curr == 'paused' and will == 'shutoff':
                self.nova_client.servers.unpause(instance)
                wait_status('active')
                self.nova_client.servers.stop(instance)
                wait_status('shutoff')
            elif curr == 'paused' and will == 'suspended':
                self.nova_client.servers.unpause(instance)
                wait_status('active')
                self.nova_client.servers.suspend(instance)
                wait_status('suspended')
            elif curr == 'suspended' and will == 'active':
                self.nova_client.servers.resume(instance)
                wait_status('active')
            elif curr == 'suspended' and will == 'shutoff':
                self.nova_client.servers.resume(instance)
                wait_status('active')
                self.nova_client.servers.stop(instance)
                wait_status('shutoff')
            elif curr == 'suspended' and will == 'paused':
                self.nova_client.servers.resume(instance)
                wait_status('active')
                self.nova_client.servers.pause(instance)
                wait_status('paused')
            elif curr == 'active' and will == 'paused':
                self.nova_client.servers.pause(instance)
                wait_status('paused')
            elif curr == 'active' and will == 'suspended':
                self.nova_client.servers.suspend(instance)
                wait_status('suspended')
            elif curr == 'active' and will == 'shutoff':
                self.nova_client.servers.stop(instance)
                wait_status('shutoff')
            elif curr == 'shutoff' and will == 'active':
                self.nova_client.servers.start(instance)
                wait_status('active')
            elif curr == 'shutoff' and will == 'paused':
                self.nova_client.servers.start(instance)
                wait_status('active')
                self.nova_client.servers.pause(instance)
                wait_status('paused')
            elif curr == 'shutoff' and will == 'suspended':
                self.nova_client.servers.start(instance)
                wait_status('active')
                self.nova_client.servers.suspend(instance)
                wait_status('suspended')
            elif curr == 'shutoff' and will == 'verify_resize':
                self.nova_client.servers.start(instance)
                wait_status('active')
            elif curr == 'verify_resize' and will == 'shutoff':
                self.nova_client.servers.confirm_resize(instance)
                wait_status('active')
                self.nova_client.servers.stop(instance)
                wait_status('shutoff')
            elif curr != will:
                raise exception.AbortMigrationError(
                    'Invalid state change: {curr} -> {will}',
                    curr=curr,
                    will=will)
        except exception.TimeoutException:
            LOG.warning(
                "Failed to change state from '%s' to '%s' for VM "
                "'%s'", curr, will, instance.name)
예제 #20
0
    def run(self, **kwargs):
        if not kwargs.get('info'):
            raise exception.AbortMigrationError(
                "No information from destination cloud."
                "Something has been broken on early steps.")
        dst_info = kwargs['info']
        search_opts = {'search_opts': kwargs.get('search_opts', {})}
        search_opts.update(kwargs.get('search_opts_tenant', {}))
        src_info = kwargs['info_backup']
        old_ids = set(dst_inst['meta']['old_id']
                      for dst_inst in dst_info['instances'].values())
        dst_cmp_info = {}
        inst_cnt = 0
        for dst_inst in dst_info['instances'].values():
            old_id = dst_inst['meta']['old_id']
            dst_cmp_info[old_id] = {}
            dst_inst_ = dst_inst['instance']
            dst_cmp_info[old_id].update({'name': dst_inst_['name']})
            dst_cmp_info[old_id].update(
                {'flav_details': dst_inst_['flav_details']})
            dst_cmp_info[old_id].update({'key_name': dst_inst_['key_name']})
            dst_cmp_info[old_id].update(
                {'interfaces': dst_inst_['interfaces']})
            dst_volumes = dst_inst['meta']['volume']
            new_dst_volumes = []
            for dst_vol in dst_volumes:
                new_dst_volumes.append(dst_vol['volume'])
            dst_cmp_info[old_id].update({'volumes': new_dst_volumes})

            dst_cmp_info[old_id].update(
                {'server_group': dst_inst_['server_group']})

            inst_cnt += 1
        failed_vms = []
        for src_inst_id in src_info['instances']:
            if ((src_inst_id not in old_ids)
                    or (src_inst_id not in dst_cmp_info)):
                failed_vms.append(src_inst_id)
            else:
                dst_cmp_inst = dst_cmp_info[src_inst_id]
                src_inst_info = src_info['instances'][src_inst_id]['instance']
                if src_inst_info['name'] != dst_cmp_inst['name']:
                    LOG.warning("Wrong name of instance %s on DST",
                                src_inst_id)
                    failed_vms.append(src_inst_id)
                if (src_inst_info['flav_details'] !=
                        dst_cmp_inst['flav_details']):
                    LOG.warning("Wrong flav_details of instance %s on DST",
                                src_inst_id)
                if src_inst_info['key_name'] != dst_cmp_inst['key_name']:
                    LOG.warning("Wrong key_name of instance %s on DST",
                                src_inst_id)
                    failed_vms.append(src_inst_id)
                if (sorted(src_inst_info['interfaces']) != sorted(
                        dst_cmp_inst['interfaces'])):
                    LOG.warning("Wrong interfaces of instance %s on DST",
                                src_inst_id)
                    failed_vms.append(src_inst_id)
                if src_inst_info['volumes'] != dst_cmp_inst['volumes']:
                    LOG.warning("Wrong volumes of instance %s on DST",
                                src_inst_id)

                # Verify that migrated VM belongs to correct server group
                if (src_inst_info['server_group'] !=
                        dst_cmp_inst['server_group']):
                    LOG.warning(
                        "Wrong server group of instance '%s' on DST! "
                        "SRC server group: '%s', "
                        "DST server group: '%s'.", src_inst_id,
                        src_inst_info['server_group'],
                        dst_cmp_inst['server_group'])

        if failed_vms:
            LOG.warning("Instances were not migrated:")
            for vm in failed_vms:
                LOG.warning("%s", vm)
            return False
        LOG.info(
            "Compared instance names, flavors, "
            "interfaces, volumes and key names. "
            "Number of migrated instances: %s", inst_cnt)
        return True
예제 #21
0
    def deploy(self, info, *args, **kwargs):
        LOG.info("Glance images deployment started...")
        info = copy.deepcopy(info)
        created_images = []
        delete_container_format, delete_disk_format = [], []
        empty_image_list = {}

        # List for obsolete/broken images IDs, that will not be migrated
        obsolete_images_ids_list = []
        dst_images = self._dst_images()

        view = GlanceImageProgessMigrationView(info['images'], dst_images)
        view.show_info()
        for image_id_src in info['images']:
            img = info['images'][image_id_src]['image']
            meta = info['images'][image_id_src]['meta']
            if img and img['resource']:
                checksum_current = img['checksum']
                name_current = img['name']
                tenant_name = img['owner_name']
                image_key = (name_current, tenant_name, checksum_current,
                             img['is_public'])

                if image_key in dst_images:
                    existing_image = dst_images[image_key]
                    created_images.append((existing_image, meta))
                    image_members = img['members'].get(img['id'], {})
                    self.update_membership(existing_image.id, image_members)
                    LOG.info("Image '%s' is already present on destination, "
                             "skipping", img['name'])
                    continue

                view.show_progress()
                view.inc_progress(img['size'])

                LOG.debug("Updating owner '%s' of image '%s'",
                          tenant_name, img["name"])
                img["owner"] = \
                    self.identity_client.get_tenant_id_by_name(tenant_name)

                if img["properties"]:
                    # update snapshot metadata
                    metadata = img["properties"]
                    if "owner_id" in metadata:
                        # update tenant id
                        LOG.debug("Updating snapshot metadata for field "
                                  "'owner_id' for image %s", img["id"])
                        metadata["owner_id"] = img["owner"]
                    if "user_name" in metadata:
                        # update user id by specified name
                        LOG.debug("Updating snapshot metadata for field "
                                  "'user_id' for image %s", img["id"])
                        try:
                            ks_client = self.identity_client.keystone_client
                            metadata["user_id"] = ks_client.users.find(
                                username=metadata["user_name"]).id
                            del metadata["user_name"]
                        except keystone_exceptions.NotFound:
                            LOG.warning("Cannot update user name for image %s",
                                        img['name'])
                if img["checksum"] is None:
                    LOG.warning("re-creating image %s from original source "
                                "URL", img["id"])
                    if meta['img_loc'] is not None:
                        self.create_image(
                            id=img['id'],
                            name=img['name'],
                            disk_format=img['disk_format'] or "qcow2",
                            location=meta['img_loc'],
                            container_format=img['container_format'] or 'bare',
                        )

                        recreated_image = utl.ext_dict(
                            name=img["name"]
                        )
                        created_images.append((recreated_image, meta))
                    else:
                        raise exception.AbortMigrationError(
                            "image information has no original source URL")
                    continue

                LOG.debug("Creating image '%s' (%s)", img["name"], img['id'])
                # we can face situation when image has no
                # disk_format and container_format properties
                # this situation appears, when image was created
                # with option --copy-from
                # glance-client cannot create image without this
                # properties, we need to create them artificially
                # and then - delete from database

                try:
                    data = img['resource'].get_ref_image(img['id'])
                    data_proxy = file_proxy.IterProxy(
                        data,
                        name="image %s ('%s')" % (img['name'], img['id']),
                        size=len(data))

                    created_image = self.create_image(
                        id=img['id'],
                        name=img['name'],
                        container_format=(img['container_format'] or "bare"),
                        disk_format=(img['disk_format'] or "qcow2"),
                        is_public=img['is_public'],
                        protected=img['protected'],
                        owner=img['owner'],
                        size=img['size'],
                        properties=img['properties'],
                        data=data_proxy)

                    image_members = img['members'].get(img['id'], {})
                    LOG.debug("new image ID %s", created_image.id)
                    self.update_membership(created_image.id, image_members)
                    created_images.append((created_image, meta))
                except (exception.ImageDownloadError,
                        httplib.IncompleteRead,
                        glance_exceptions.HTTPInternalServerError) as e:
                    LOG.debug(e, exc_info=True)
                    LOG.warning("Unable to reach image's data due to "
                                "Glance HTTPInternalServerError. Skipping "
                                "image: %s (%s)", img['name'], img["id"])
                    obsolete_images_ids_list.append(img["id"])
                    continue

                if not img["container_format"]:
                    delete_container_format.append(created_image.id)
                if not img["disk_format"]:
                    delete_disk_format.append(created_image.id)
            elif img['resource'] is None:
                recreated_image = utl.ext_dict(name=img["name"])
                created_images.append((recreated_image, meta))
            elif not img:
                empty_image_list[image_id_src] = info['images'][image_id_src]

        view.show_progress()
        if obsolete_images_ids_list:
            LOG.warning('List of broken images: %s', obsolete_images_ids_list)
            # Remove obsolete/broken images from info
            for img_id in obsolete_images_ids_list:
                info['images'].pop(img_id)

        return self._new_info(created_images, empty_image_list,
                              delete_disk_format, delete_container_format)
예제 #22
0
    def run(self, **kwargs):
        LOG.debug("Checking networks...")
        has_overlapping_resources = False
        has_invalid_resources = False

        src_net = self.src_cloud.resources[utils.NETWORK_RESOURCE]
        dst_net = self.dst_cloud.resources[utils.NETWORK_RESOURCE]
        src_compute = self.src_cloud.resources[utils.COMPUTE_RESOURCE]

        tenant_ids = kwargs.get('search_opts_tenant', {}).get('tenant_id')
        search_opts = kwargs.get('search_opts', {})

        LOG.debug("Retrieving Network information from Source cloud...")
        ports = src_net.get_ports_list()
        src_net_info = NetworkInfo(src_net.read_info(tenant_id=tenant_ids),
                                   ports)
        LOG.debug("Retrieving Network information from Destination cloud...")
        dst_net_info = NetworkInfo(dst_net.read_info())
        LOG.debug("Retrieving Compute information from Source cloud...")
        src_compute_info = ComputeInfo(src_compute, search_opts, tenant_ids)

        ext_net_map = mapper.Mapper('ext_network_map')

        # Check external networks mapping
        if ext_net_map:
            LOG.info("Check external networks mapping...")
            invalid_ext_net_ids = src_net_info.get_invalid_ext_net_ids(
                dst_net_info, ext_net_map)
            if invalid_ext_net_ids['src_nets'] or \
                    invalid_ext_net_ids['dst_nets']:
                invalid_src_nets = invalid_ext_net_ids['src_nets']
                invalid_dst_nets = invalid_ext_net_ids['dst_nets']
                invalid_nets_str = ""

                if invalid_src_nets:
                    invalid_nets_str = 'Source cloud:\n' + \
                                       '\n'.join(invalid_src_nets) + '\n'
                if invalid_dst_nets:
                    invalid_nets_str += 'Destination cloud:\n' + \
                                        '\n'.join(invalid_dst_nets) + '\n'

                LOG.error(
                    "External networks mapping file has non-existing "
                    "network UUIDs defined:\n%s\nPlease update '%s' "
                    "file with correct values and re-run networks "
                    "check.", invalid_nets_str, self.cfg.migrate.ext_net_map)
                has_invalid_resources = True

        # Check networks' segmentation IDs overlap
        LOG.info("Check networks' segmentation IDs overlapping...")
        nets_overlapping_seg_ids = (
            src_net_info.get_overlapping_seg_ids(dst_net_info))
        if nets_overlapping_seg_ids:
            LOG.warning(
                "Segmentation IDs for these networks in source cloud "
                "WILL NOT BE KEPT regardless of options defined in "
                "config, because networks with the same segmentation "
                "IDs already exist in destination: %s.",
                '\n'.join([n['src_net_id'] for n in nets_overlapping_seg_ids]))

        # Check external subnets overlap
        LOG.info("Check external subnets overlapping...")
        overlapping_external_subnets = (
            src_net_info.get_overlapping_external_subnets(
                dst_net_info, ext_net_map))
        if overlapping_external_subnets:
            pool_fmt = '"{pool}" pool of subnet "{snet_name}" ({snet_id})'
            fmt = "{src_pool} overlaps with {dst_pool}"
            overlapping_nets = []

            for snet in overlapping_external_subnets:
                overlapping_nets.append(
                    fmt.format(src_pool=pool_fmt.format(
                        pool=snet['src_subnet']['allocation_pools'],
                        snet_name=snet['src_subnet']['name'],
                        snet_id=snet['src_subnet']['id']),
                               dst_pool=pool_fmt.format(
                                   pool=snet['dst_subnet']['allocation_pools'],
                                   snet_name=snet['dst_subnet']['name'],
                                   snet_id=snet['dst_subnet']['id'],
                               )))

            message = ("Following external networks have overlapping "
                       "allocation pools in source and destination:\n{}.\nTo "
                       "resolve this:\n"
                       " 1. Manually change allocation pools in source or "
                       "destination networks to be identical;\n"
                       " 2. Use '[migrate] ext_net_map' external networks "
                       "mapping. Floating IPs will NOT BE KEPT in that "
                       "case.".format('\n'.join(overlapping_nets)))

            LOG.error(message)
            has_overlapping_resources = True

        # Check floating IPs overlap
        LOG.info("Check floating IPs overlapping...")
        floating_ips = src_net_info.list_overlapping_floating_ips(
            dst_net_info, ext_net_map)
        if floating_ips:
            LOG.error(
                "Following floating IPs from source cloud already exist "
                "in destination, but either tenant, or external "
                "network doesn't match source cloud floating IP: %s\n"
                "In order to resolve you'd need to either delete "
                "floating IP from destination, or recreate floating "
                "IP so that they match fully in source and destination.",
                '\n'.join(floating_ips))
            has_overlapping_resources = True

        # Check busy physical networks on DST of FLAT network type
        LOG.info("Check busy physical networks for FLAT network type...")
        busy_flat_physnets = src_net_info.busy_flat_physnets(
            dst_net_info, ext_net_map)
        if busy_flat_physnets:
            LOG.error(
                "Flat network(s) allocated in different physical "
                "network(s) exist in destination cloud:\n%s\nIn order "
                "to resolve flat networks in the list must be "
                "connected to the same physical network in source and "
                "destination.",
                '\n'.join([str(n) for n in busy_flat_physnets]))
            has_overlapping_resources = True

        # Check physical networks existence on DST for VLAN network type
        LOG.info("Check physical networks existence for VLAN network type...")
        dst_neutron_client = dst_net.neutron_client
        missing_vlan_physnets = src_net_info.missing_vlan_physnets(
            dst_net_info, dst_neutron_client, ext_net_map)
        if missing_vlan_physnets:
            LOG.error(
                "Following physical networks are not present in "
                "destination, but required by source cloud networks: "
                "%s\nIn order to resolve make sure neutron has "
                "required physical networks defined in config.",
                '\n'.join(missing_vlan_physnets))

            has_overlapping_resources = True

        # Check VMs spawned directly in external network
        LOG.info("Check VMs spawned directly in external networks...")
        devices = src_net_info.get_devices_from_external_networks()
        vms_list = src_compute_info.list_vms_in_external_network(devices)
        if vms_list:
            LOG.warning(
                'Following VMs are booted directly in external '
                'network, which is not recommended: %s', vms_list)

        # Print LOG message with all overlapping stuff and abort migration
        if has_overlapping_resources or has_invalid_resources:
            raise exception.AbortMigrationError(
                "There is a number of overlapping/invalid network resources "
                "which require manual resolution. See error messages above "
                "for details.")
예제 #23
0
    def run(self, **kwargs):
        """Check write access to cloud."""

        ks_client = keystone.KeystoneIdentity(config=self.cloud.cloud_config,
                                              cloud=self.dst_cloud)
        nt_client = neutron.NeutronNetwork(config=self.cloud.cloud_config,
                                           cloud=self.dst_cloud)
        gl_client = glance_image.GlanceImage(config=self.cloud.cloud_config,
                                             cloud=self.dst_cloud)
        cn_client = cinder.CinderStorage(config=self.cloud.cloud_config,
                                         cloud=self.dst_cloud)

        adm_tenant_name = self.cloud.cloud_config.cloud.tenant
        adm_tenant_id = ks_client.get_tenant_id_by_name(adm_tenant_name)

        unique = str(int(time.time()))
        tenant_name = 'tenant_%s' % unique

        flavor = {
            'name': 'flavor_%s' % unique,
            'is_public': True,
            'ram': 1,
            'vcpus': 1,
            'disk': 1,
        }

        image_info = {
            'name': 'image_%s' % unique,
            'container_format': 'bare',
            'disk_format': 'qcow2',
            'is_public': True,
            'protected': False,
            'owner': adm_tenant_id,
            'size': 4,
            'properties': {
                'user_name': 'test_user_name'
            },
            'data': 'test'
        }

        shared_network_info = {
            'network': {
                'tenant_id': adm_tenant_id,
                'admin_state_up': True,
                'shared': True,
                'name': 'shared_net_%s' % unique,
                'router:external': True
            }
        }

        try:
            with self.create_tenant(ks_client, tenant_name) as tenant, \
                    self.create_image(gl_client, image_info) as image_id, \
                    self.create_network(nt_client, shared_network_info):

                private_network_info = {
                    'network': {
                        'tenant_id': tenant.id,
                        'name': 'private_net_%s' % unique,
                    }
                }

                volume_info = {
                    'size': 1,
                    'display_name': 'volume_%s' % unique,
                    'project_id': tenant.id
                }

                with self.create_network(nt_client, private_network_info) as \
                        private_network_id, \
                        self.create_volume(cn_client, volume_info):

                    subnet_info = {
                        'subnet': {
                            'name': 'subnet_%s' % unique,
                            'network_id': private_network_id,
                            'cidr': '192.168.1.0/24',
                            'ip_version': 4,
                            'tenant_id': tenant.id,
                        }
                    }

                    nv_client_config = copy.deepcopy(self.cloud.cloud_config)
                    nv_client_config.cloud.tenant = tenant.name

                    nv_client = nova_compute.NovaCompute(
                        config=nv_client_config, cloud=self.dst_cloud)

                    with self.create_subnet(nt_client, subnet_info), \
                            self.create_flavor(nv_client, flavor) as flavor_id:

                        instance_info = {
                            'name': 'test_vm_%s' % unique,
                            'image': image_id,
                            'flavor': flavor_id,
                            'nics': [{
                                'net-id': private_network_id
                            }]
                        }

                        with self.create_instance(nv_client, instance_info):
                            pass

        except (ks_exc.ClientException, nova_exc.ClientException,
                cinder_exc.ClientException, glance_exc.ClientException,
                neutron_exc.NeutronClientException) as e:
            raise exception.AbortMigrationError(
                "Destination cloud verification failed: {error_message}",
                error_message=e.message)
예제 #24
0
    def _try_copy_volumes(self):
        vt_map = self._vt_map()

        failed = []

        volumes_size_map = self._volumes_size_map()
        view = cinder_storage_view.CinderStorageMigrationProgressView(
            self.data[SRC]['volumes'],
            self.data[DST]['volumes'],
            volumes_size_map
        )

        view.show_stats()
        for v in self.data[SRC]['volumes']:
            LOG.info('Start migrate volume %s(%s)',
                     v.get('display_name', ''), v['id'])

            volume_type_id = v.get('volume_type_id', None)
            srcpaths = self._paths(SRC, volume_type_id)
            LOG.debug('srcpaths: %s', str(srcpaths))

            if volume_type_id in vt_map:
                # src -> dst
                v['volume_type_id'] = vt_map.get(volume_type_id, None)
            else:
                v['volume_type_id'] = None
            LOG.debug('Vt map: %s', str(vt_map))

            dstpaths = self._paths(DST, v['volume_type_id'])
            if not dstpaths:
                err_msg = 'No mount found on DST Cloud'
                if v['volume_type_id']:
                    err_msg += ' for volume type: %s' % v['volume_type_id']
                raise exception.AbortMigrationError(err_msg)

            LOG.debug('dstpaths: %s', str(dstpaths))

            src = self.find_dir(SRC, srcpaths, v)
            if not src:
                raise exception.AbortMigrationError(
                    'No SRC volume file found for %s[%s]'
                    % (v.get('display_name', None), v['id']))
            dst, elapsed_time = self._transfer(src, dstpaths, v,
                                               volumes_size_map[v['id']])

            if dst:
                v['provider_location'] = self._dir_to_provider(dst)
                vtid = self._provider_to_vtid(v['provider_location'])
                v[HOST] = self._dst_host(vtid)
                view.sync_migrated_volumes_info(v, elapsed_time)
            else:
                failed.append(v)
                view.sync_failed_volumes_info(v)

            view.show_progress()

        if failed:
            LOG.error(
                'Migration failed for volumes: %s',
                ', '.join([
                    "%s(%s)" % (v['display_name'], v['id'])
                    for v in failed])
            )
            self.data[SRC]['volumes'] = [
                v for v in self.data[SRC]['volumes'] if v not in failed
            ]

        return failed