def verify(self, host_src, path_src, host_dst, path_dst, gateway=None): """ Verification that the file has been copied correctly. :param data: The dictionary with necessary information :return: True or False """ src_runner = self.runner(host_src, 'src', gateway) dst_runner = self.runner(host_dst, 'dst', gateway) src_size = files.remote_file_size(src_runner, path_src) dst_size = files.remote_file_size(dst_runner, path_dst) if src_size != dst_size: LOG.warning("The sizes of '%s' (%s) and '%s' (%s) are mismatch", path_src, sizeof_format.sizeof_fmt(src_size), path_dst, sizeof_format.sizeof_fmt(dst_size)) return False if CONF.migrate.copy_with_md5_verification: LOG.info("Running md5 checksum calculation on the file '%s' with " "size %s on host '%s'", path_src, sizeof_format.sizeof_fmt(src_size), host_src) src_md5 = files.remote_md5_sum(src_runner, path_src) LOG.info("Running md5 checksum calculation on the file '%s' with " "size %s on host '%s'", path_dst, sizeof_format.sizeof_fmt(dst_size), host_dst) dst_md5 = files.remote_md5_sum(dst_runner, path_dst) if src_md5 != dst_md5: LOG.warning("The md5 checksums of '%s' (%s) and '%s' (%s) are " "mismatch", path_src, src_md5, path_dst, dst_md5) return False return True
def run(self): if self.show_unused: self.get_used_objects() self.show_vms = False self.show_ephemeral = False data = self.get_data() result = [] total_cnt = 0 total_size = 0 for k, records in data.items(): cnt = 0 size = 0 if self.limit: records = heapq.nlargest(self.limit, data, key=lambda o: o.size) else: records = sorted(records, key=lambda o: o.size, reverse=True) for r in records: result.append((k, r.id, r.name, sizeof_format.sizeof_fmt(r.size))) cnt += 1 size += r.size result.append((k, 'Total', cnt, sizeof_format.sizeof_fmt(size))) total_cnt += cnt total_size += size result.append(('Total', '', total_cnt, sizeof_format.sizeof_fmt(total_size))) return result
def run(self): if self.show_unused: self.get_used_objects() self.show_vms = False self.show_ephemeral = False data = self.get_data() result = [] total_cnt = 0 total_size = 0 for k, records in data.iteritems(): cnt = 0 size = 0 if self.limit: records = heapq.nlargest(self.limit, data, key=lambda o: o.size) else: records = sorted(records, key=lambda o: o.size, reverse=True) for r in records: result.append((k, r.id, r.name, sizeof_format.sizeof_fmt(r.size))) cnt += 1 size += r.size result.append((k, 'Total', cnt, sizeof_format.sizeof_fmt(size))) total_cnt += cnt total_size += size result.append(('Total', '', total_cnt, sizeof_format.sizeof_fmt(total_size))) return result
def show_largest_unused_resources(count, cloud_name, tenant): with model.Session() as session: used_volumes = set() used_images = set() servers = list_filtered(session, nova.Server, cloud_name, tenant) for server in servers: if server.image is not None: used_images.add(server.image.object_id) for volume in server.attached_volumes: used_volumes.add(volume.object_id) # Find unused volumes volumes_output = [] volumes_size = 0 volumes = list_filtered(session, cinder.Volume, cloud_name, tenant) for index, volume in enumerate( heapq.nlargest(count, (v for v in volumes if v.object_id not in used_volumes), key=lambda v: v.size), start=1): volumes_size += volume.size size = sizeof_format.sizeof_fmt(volume.size, 'G') volumes_output.append( ' {0:3d}. {1.object_id.id} {2:10s} {1.name}'.format( index, volume, size)) # Find unused images images_output = [] images_size = 0 images = list_filtered(session, glance.Image, cloud_name, tenant) for index, image in enumerate( heapq.nlargest(count, (i for i in images if i.object_id not in used_images), key=lambda i: i.size), start=1): images_size += image.size size = sizeof_format.sizeof_fmt(image.size) images_output.append( ' {0:3d}. {1.object_id.id} {2:10s} {1.name}'.format( index, image, size)) # Output result if volumes_output: print '\n{0} largest unused volumes:'.format(len(volumes_output)) for line in volumes_output: print line print ' Total:', sizeof_format.sizeof_fmt(volumes_size, 'G') if images_output: print '\n{0} largest unused images:'.format(len(images_output)) for line in images_output: print line print ' Total:', sizeof_format.sizeof_fmt(images_size)
def show_info(self): LOG.info('Total number of images to be migrated: %d, ' 'total size: %s\n' 'Number of private images: %d\n' 'Number of public images: %d\n' 'Number of already migrated images: %d, total size: %s', self.num_private + self.num_public, sizeof_format.sizeof_fmt(self.total_size), self.num_private, self.num_public, self.num_migrated, sizeof_format.sizeof_fmt(self.migrated_size)) LOG.info('List of private images:\n%s', '\n'.join(self.list_private)) LOG.info('List of public images:\n%s', '\n'.join(self.list_public)) LOG.info('List of migrated images:\n%s', '\n'.join(self.list_migrated))
def show_largest_unused_resources(count, cloud_name, tenant): with model.Session() as session: used_volumes = set() used_images = set() servers = list_filtered(session, nova.Server, cloud_name, tenant) for server in servers: if server.image is not None: used_images.add(server.image.object_id) for volume in server.attached_volumes: used_volumes.add(volume.object_id) # Find unused volumes volumes_output = [] volumes_size = 0 volumes = list_filtered(session, cinder.Volume, cloud_name, tenant) for index, volume in enumerate(heapq.nlargest( count, (v for v in volumes if v.object_id not in used_volumes), key=lambda v: v.size), start=1): volumes_size += volume.size size = sizeof_format.sizeof_fmt(volume.size, 'G') volumes_output.append( ' {0:3d}. {1.object_id.id} {2:10s} {1.name}'.format( index, volume, size)) # Find unused images images_output = [] images_size = 0 images = list_filtered(session, glance.Image, cloud_name, tenant) for index, image in enumerate(heapq.nlargest( count, (i for i in images if i.object_id not in used_images), key=lambda i: i.size), start=1): images_size += image.size size = sizeof_format.sizeof_fmt(image.size) images_output.append( ' {0:3d}. {1.object_id.id} {2:10s} {1.name}'.format( index, image, size)) # Output result if volumes_output: print '\n{0} largest unused volumes:'.format(len(volumes_output)) for line in volumes_output: print line print ' Total:', sizeof_format.sizeof_fmt(volumes_size, 'G') if images_output: print '\n{0} largest unused images:'.format(len(images_output)) for line in images_output: print line print ' Total:', sizeof_format.sizeof_fmt(images_size)
def show_largest_servers(cfg, count, migration_name): def server_size(server): size = 0 if server.image is not None: size += server.image.size for ephemeral_disk in server.ephemeral_disks: size += ephemeral_disk.size for volume in server.attached_volumes: size += volume.size return size output = [] migration = cfg.migrations[migration_name] with model.Session() as session: for index, server in enumerate(heapq.nlargest( count, migration.query.search(session, migration.source, nova.Server), key=server_size), start=1): output.append(' {0}. {1.object_id.id} {1.name} - {2}'.format( index, server, sizeof_format.sizeof_fmt(server_size(server)))) if output: print '\n{0} largest servers:'.format(len(output)) for line in output: print line
def show_largest_servers(cfg, count, migration_name): def server_size(server): size = 0 if server.image is not None: size += server.image.size for ephemeral_disk in server.ephemeral_disks: size += ephemeral_disk.size for volume in server.attached_volumes: size += volume.size return size output = [] migration = cfg.migrations[migration_name] with model.Session() as session: for index, server in enumerate( heapq.nlargest( count, migration.query.search(session, migration.source, nova.Server), key=server_size), start=1): output.append( ' {0}. {1.object_id.id} {1.name} - {2}'.format( index, server, sizeof_format.sizeof_fmt(server_size(server)))) if output: print '\n{0} largest servers:'.format(len(output)) for line in output: print line
def show_progress(self): progress_hr = sizeof_format.sizeof_fmt(self.progress) args = {'progress': progress_hr, 'size': self.size_hr, 'name': self.name} if self.size: args['percentage'] = self.progress * 100 / self.size LOG.info(self.progress_message, args)
def show_progress(self): LOG.info('Number of migrated volumes %d of %d. ' 'Volumes migrated %.1f%% and ' 'failed %.1f%% of %s total at %.1f MB/s.', self.num_migrated, len(self.src_volumes) + len(self.dst_volumes), float(self.migrated_vol_size_b) / self.total_vol_size_b * 100, float(self.failed_vol_size_b) / self.total_vol_size_b * 100, sizeof_format.sizeof_fmt(self.total_vol_size_b), self.avg_speed_mb_s)
def show_progress(self, ): size_percentage = (self.progress * 100 / self.total_size if self.total_size else 100) LOG.info('%(num_migrated)d of %(num_total_images)d images ' 'migrated (%(size_percentage)d%% of %(size_total)s ' 'total)', {'num_migrated': self.cnt, 'num_total_images': self.num_private + self.num_public, 'size_percentage': size_percentage, 'size_total': sizeof_format.sizeof_fmt(self.total_size)})
def __init__(self, name=None, size=None): self.name = name self.size = size self.size_hr = sizeof_format.sizeof_fmt(size) if size else "NAN" self.show_size = (size / 100 if size else sizeof_format.parse_size('100MB')) self.current_show_size = 0 self.progress_message = "Copying %(name)s: %(progress)s of %(size)s" if size: self.progress_message += " %(percentage)s%%" self.progress = 0 self.first_run = None
def estimate_copy(cfg, migration_name): migration = cfg.migrations[migration_name] query = migration.query src_cloud = migration.source with model.Session() as session: total_ephemeral_size = 0 total_volume_size = 0 total_image_size = 0 accounted_volumes = set() accounted_images = set() for server in query.search(session, src_cloud, nova.Server): for ephemeral_disk in server.ephemeral_disks: total_ephemeral_size += ephemeral_disk.size if server.image is not None \ and server.image.object_id not in accounted_images: total_image_size += server.image.size accounted_images.add(server.image.object_id) for volume in server.attached_volumes: if volume.object_id not in accounted_volumes: total_volume_size += volume.size accounted_volumes.add(volume.object_id) for volume in query.search(session, src_cloud, cinder.Volume): if volume.object_id not in accounted_volumes: total_volume_size += volume.size for image in query.search(session, src_cloud, glance.Image): if image.object_id not in accounted_images: total_image_size += image.size print 'Migration', migration_name, 'estimates:' print 'Images:' print ' Size:', sizeof_format.sizeof_fmt(total_image_size) print 'Ephemeral disks:' print ' Size:', sizeof_format.sizeof_fmt(total_ephemeral_size) print 'Volumes:' print ' Size:', sizeof_format.sizeof_fmt(total_volume_size, 'G')
def verify(self, host_src, path_src, host_dst, path_dst, gateway=None): """ Verification that the file has been copied correctly. :param data: The dictionary with necessary information :return: True or False """ src_runner = self.runner(host_src, 'src', gateway) dst_runner = self.runner(host_dst, 'dst', gateway) src_size = files.remote_file_size(src_runner, path_src) dst_size = files.remote_file_size(dst_runner, path_dst) if src_size != dst_size: LOG.warning("The sizes of '%s' (%s) and '%s' (%s) are mismatch", path_src, sizeof_format.sizeof_fmt(src_size), path_dst, sizeof_format.sizeof_fmt(dst_size)) return False if CONF.migrate.copy_with_md5_verification: LOG.info( "Running md5 checksum calculation on the file '%s' with " "size %s on host '%s'", path_src, sizeof_format.sizeof_fmt(src_size), host_src) src_md5 = files.remote_md5_sum(src_runner, path_src) LOG.info( "Running md5 checksum calculation on the file '%s' with " "size %s on host '%s'", path_dst, sizeof_format.sizeof_fmt(dst_size), host_dst) dst_md5 = files.remote_md5_sum(dst_runner, path_dst) if src_md5 != dst_md5: LOG.warning( "The md5 checksums of '%s' (%s) and '%s' (%s) are " "mismatch", path_src, src_md5, path_dst, dst_md5) return False return True
def _volumes_size_map(self): LOG.info('Calculate size of each volume.') volumes_size_map = {} for position in self.clouds: for v in self.data[position]['volumes']: LOG.debug('Calculating size of volume %s on %s cloud', v['id'], position) volume_type_id = v.get('volume_type_id', None) srcpaths = self._paths(position, volume_type_id) src = self.find_dir(position, srcpaths, v) vol_size = self.volume_size(self.clouds[position], src) volumes_size_map[v['id']] = vol_size LOG.info('Volume %s(%s) size is %s.', v.get('display_name', ''), v['id'], sizeof_format.sizeof_fmt(vol_size)) return volumes_size_map
def run(self): images_size, images_count = 0, 0 used_images = set() images_unused_size, images_unused_count = 0, 0 volumes_size, volumes_count = 0, 0 used_volumes = set() volumes_unused_size, volumes_unused_count = 0, 0 ephemeral_size, ephemeral_count = 0, 0 for server in self.get_objects('vms'): if server.image is not None: images_count += 1 images_size += server.image.size used_images.add(server.image.object_id) for ephemeral_disk in server.ephemeral_disks: ephemeral_count += 1 ephemeral_size += ephemeral_disk.size for obj in server.attached_volumes: volume = obj.volume volumes_count += 1 volumes_size += volume.size * G used_volumes.add(volume.object_id) for image in self.get_objects('images', used_images): images_count += 1 images_size += image.size images_unused_count += 1 images_unused_size += image.size for volume in self.get_objects('volumes', used_volumes): size = volume.size * G volumes_count += 1 volumes_size += size volumes_unused_count += 1 volumes_unused_size += size return (('Volumes', volumes_count, sizeof_format.sizeof_fmt(volumes_size, target_unit='G')), ('Unused volumes', volumes_unused_count, sizeof_format.sizeof_fmt(volumes_unused_size, target_unit='G')), ('Images', images_count, sizeof_format.sizeof_fmt(images_size, target_unit='G')), ('Unused images', images_unused_count, sizeof_format.sizeof_fmt(images_unused_size, target_unit='G')), ('Ephemeral disks', ephemeral_count, sizeof_format.sizeof_fmt(ephemeral_size, target_unit='G')), ('Total', volumes_count + images_count + ephemeral_count, sizeof_format.sizeof_fmt(volumes_size + images_size + ephemeral_size, target_unit='G')))
def run(self): images_size, images_count = 0, 0 used_images = set() images_unused_size, images_unused_count = 0, 0 volumes_size, volumes_count = 0, 0 used_volumes = set() volumes_unused_size, volumes_unused_count = 0, 0 ephemeral_size, ephemeral_count = 0, 0 for server in self.get_objects('vms'): if server.image is not None: images_count += 1 images_size += server.image.size used_images.add(server.image.object_id) for ephemeral_disk in server.ephemeral_disks: ephemeral_count += 1 ephemeral_size += ephemeral_disk.size for obj in server.attached_volumes: volume = obj.volume volumes_count += 1 volumes_size += volume.size * G used_volumes.add(volume.object_id) for image in self.get_objects('images', used_images): images_count += 1 images_size += image.size images_unused_count += 1 images_unused_size += image.size for volume in self.get_objects('volumes', used_volumes): size = volume.size * G volumes_count += 1 volumes_size += size volumes_unused_count += 1 volumes_unused_size += size return ( ('Volumes', volumes_count, sizeof_format.sizeof_fmt(volumes_size, target_unit='G')), ('Unused volumes', volumes_unused_count, sizeof_format.sizeof_fmt(volumes_unused_size, target_unit='G')), ('Images', images_count, sizeof_format.sizeof_fmt(images_size, target_unit='G')), ('Unused images', images_unused_count, sizeof_format.sizeof_fmt(images_unused_size, target_unit='G')), ('Ephemeral disks', ephemeral_count, sizeof_format.sizeof_fmt(ephemeral_size, target_unit='G')), ('Total', volumes_count + images_count + ephemeral_count, sizeof_format.sizeof_fmt(volumes_size + images_size + ephemeral_size, target_unit='G')) )
def test_default(self): res = sizeof_format.sizeof_fmt(self.num) self.assertEqual("117.7MB", res)
def test_current_unit_mega_lowercase(self): res = sizeof_format.sizeof_fmt(self.num, unit="m") self.assertEqual("117.7TB", res)
def test_default(self): res = sizeof_format.sizeof_fmt(self.num) self.assertEqual('117.7MB', res)
def test_current_unit_mega_lowercase(self): res = sizeof_format.sizeof_fmt(self.num, unit='m') self.assertEqual('117.7TB', res)