def do_copyimage(self): LOG.debug('--- Copying images (do_copyimage) ---') for image in self.driver.image_scheme.images: LOG.debug('Processing image: %s' % image.uri) processing = au.Chain() LOG.debug('Appending uri processor: %s' % image.uri) processing.append(image.uri) if image.uri.startswith('http://'): LOG.debug('Appending HTTP processor') processing.append(au.HttpUrl) elif image.uri.startswith('file://'): LOG.debug('Appending FILE processor') processing.append(au.LocalFile) if image.container == 'gzip': LOG.debug('Appending GZIP processor') processing.append(au.GunzipStream) LOG.debug('Appending TARGET processor: %s' % image.target_device) error = None if not os.path.exists(image.target_device): error = "TARGET processor '{0}' does not exist." elif not hw.is_block_device(image.target_device): error = "TARGET processor '{0}' is not a block device." if error: error = error.format(image.target_device) LOG.error(error) raise errors.WrongDeviceError(error) processing.append(image.target_device) LOG.debug('Launching image processing chain') processing.process() if image.size and image.md5: LOG.debug('Trying to compare image checksum') actual_md5 = utils.calculate_md5(image.target_device, image.size) if actual_md5 == image.md5: LOG.debug('Checksum matches successfully: md5=%s' % actual_md5) else: raise errors.ImageChecksumMismatchError( 'Actual checksum %s mismatches with expected %s for ' 'file %s' % (actual_md5, image.md5, image.target_device)) else: LOG.debug('Skipping image checksum comparing. ' 'Ether size or hash have been missed') # TODO(agordeev): separate to another action? LOG.debug('Extending image file systems') if image.format in ('ext2', 'ext3', 'ext4', 'xfs'): LOG.debug('Extending %s %s' % (image.format, image.target_device)) fu.extend_fs(image.format, image.target_device) self.move_files_to_their_places()
def do_copyimage(self): LOG.debug('--- Copying images (do_copyimage) ---') for image in self.driver.image_scheme.images: LOG.debug('Processing image: %s' % image.uri) processing = au.Chain() LOG.debug('Appending uri processor: %s' % image.uri) processing.append(image.uri) if image.uri.startswith('http://'): LOG.debug('Appending HTTP processor') processing.append(au.HttpUrl) elif image.uri.startswith('file://'): LOG.debug('Appending FILE processor') processing.append(au.LocalFile) if image.container == 'gzip': LOG.debug('Appending GZIP processor') processing.append(au.GunzipStream) LOG.debug('Appending TARGET processor: %s' % image.target_device) error = None if not os.path.exists(image.target_device): error = "TARGET processor '{0}' does not exist." elif not hw.is_block_device(image.target_device): error = "TARGET processor '{0}' is not a block device." if error: error = error.format(image.target_device) LOG.error(error) raise errors.WrongDeviceError(error) processing.append(image.target_device) LOG.debug('Launching image processing chain') processing.process() if image.size and image.md5: LOG.debug('Trying to compare image checksum') actual_md5 = utils.calculate_md5(image.target_device, image.size) if actual_md5 == image.md5: LOG.debug('Checksum matches successfully: md5=%s' % actual_md5) else: raise errors.ImageChecksumMismatchError( 'Actual checksum %s mismatches with expected %s for ' 'file %s' % (actual_md5, image.md5, image.target_device)) else: LOG.debug('Skipping image checksum comparing. ' 'Ether size or hash have been missed') # TODO(agordeev): separate to another action? LOG.debug('Extending image file systems') if image.format in ('ext2', 'ext3', 'ext4', 'xfs'): LOG.debug('Extending %s %s' % (image.format, image.target_device)) fu.extend_fs(image.format, image.target_device)
def do_copyimage(self, os_id): LOG.debug('--- Copying images (do_copyimage) ---') for image in self.driver.image_scheme.get_os_images(os_id): LOG.debug('Processing image: %s' % image.uri) processing = au.Chain() LOG.debug('Appending uri processor: %s' % image.uri) processing.append(image.uri) if image.uri.startswith('http://'): LOG.debug('Appending HTTP processor') processing.append(au.HttpUrl) elif image.uri.startswith('file://'): LOG.debug('Appending FILE processor') processing.append(au.LocalFile) if image.container == 'gzip': LOG.debug('Appending GZIP processor') processing.append(au.GunzipStream) LOG.debug('Appending TARGET processor: %s' % image.target_device) target = self.driver.partition_scheme.fs_by_mount( image.target_device, os_id=os_id).device processing.append(target) LOG.debug('Launching image processing chain') processing.process() if image.size and image.md5: LOG.debug('Trying to compare image checksum') actual_md5 = utils.calculate_md5(image.target_device, image.size) if actual_md5 == image.md5: LOG.debug('Checksum matches successfully: md5=%s' % actual_md5) else: raise errors.ImageChecksumMismatchError( 'Actual checksum %s mismatches with expected %s for ' 'file %s' % (actual_md5, image.md5, image.target_device)) else: LOG.debug('Skipping image checksum comparing. ' 'Ether size or hash have been missed') LOG.debug('Extending image file systems') if image.format in ('ext2', 'ext3', 'ext4', 'xfs'): LOG.debug('Extending %s %s' % (image.format, image.target_device)) fu.extend_fs(image.format, image.target_device) fu.change_uuid(target)
def _add_configdrive_image(self): configdrive_device = self.partition_scheme.configdrive_device() if configdrive_device is None: raise errors.WrongPartitionSchemeError( 'Error while trying to get configdrive device: ' 'configdrive device not found') size = os.path.getsize(CONF.config_drive_path) md5 = utils.calculate_md5(CONF.config_drive_path, size) self.image_scheme.add_image( uri='file://%s' % CONF.config_drive_path, target_device=configdrive_device, format='iso9660', container='raw', size=size, md5=md5, )
def _add_configdrive_image(self): # TODO(agordeev): move to validate? configdrive_device = self.driver.partition_scheme.configdrive_device() if configdrive_device is None: raise errors.WrongPartitionSchemeError( 'Error while trying to get configdrive device: ' 'configdrive device not found') size = os.path.getsize(CONF.config_drive_path) md5 = utils.calculate_md5(CONF.config_drive_path, size) fs_type = fu.get_fs_type(CONF.config_drive_path) self.driver.image_scheme.add_image( uri='file://%s' % CONF.config_drive_path, target_device=configdrive_device, format=fs_type, container='raw', size=size, md5=md5, )
def test_calculate_md5_ok(self): # calculated by 'printf %10000s | md5sum' mock_open = mock.Mock() mock_open.__enter__ = mock.Mock(side_effect=(six.BytesIO(b" " * 10000) for _ in range(6))) mock_open.__exit__ = mock.Mock(return_value=False) with mock.patch("six.moves.builtins.open", mock.Mock(return_value=mock_open), create=True): self.assertEqual("f38898bb69bb02bccb9594dfe471c5c0", utils.calculate_md5("fake", 10000)) self.assertEqual("6934d9d33cd2d0c005994e7d96d2e0d9", utils.calculate_md5("fake", 1000)) self.assertEqual("1e68934346ee57858834a205017af8b7", utils.calculate_md5("fake", 100)) self.assertEqual("41b394758330c83757856aa482c79977", utils.calculate_md5("fake", 10)) self.assertEqual("7215ee9c7d9dc229d2921a40e899ec5f", utils.calculate_md5("fake", 1)) self.assertEqual("d41d8cd98f00b204e9800998ecf8427e", utils.calculate_md5("fake", 0))
def dump_mkbootstrap_meta(self, metadata, c_dir, bootstrap_scheme): """Dump mkbootstrap metadata to yaml file :param metadata: dict with meta :param file: :return: 1)Process module files 2)Collect data from do_mkbootstrap metadata 3)Collect somedata from driver 4_Drop result dict 'drop_data' to yaml file """ meta_file = os.path.join( c_dir, bootstrap_scheme.container.meta_file) drop_data = {'modules': {}} for module in bootstrap_scheme.modules: fname = os.path.basename(module.uri) fs_file = os.path.join(c_dir, fname) try: raw_size = os.path.getsize(fs_file) except IOError as exc: LOG.error('There was an error while getting file' ' size: {0}'.format(exc)) raise raw_md5 = utils.calculate_md5(fs_file, raw_size) drop_data['modules'][module.name] = { 'raw_md5': raw_md5, 'raw_size': raw_size, 'file': fname, 'uri': module.uri } drop_data['uuid'] = bootstrap_scheme.uuid drop_data['extend_kopts'] = bootstrap_scheme.extend_kopts drop_data['os'] = metadata['os'] drop_data['all_packages'] = metadata['all_packages'] drop_data['repos'] = metadata['repos'] drop_data['label'] = bootstrap_scheme.label LOG.debug('Image metadata: %s', drop_data) with open(meta_file, 'wt') as f: yaml.safe_dump(drop_data, stream=f, encoding='utf-8')
def dump_mkbootstrap_meta(self, metadata, c_dir, bootstrap_scheme): """Dump mkbootstrap metadata to yaml file :param metadata: dict with meta :param file: :return: 1)Process module files 2)Collect data from do_mkbootstrap metadata 3)Collect somedata from driver 4_Drop result dict 'drop_data' to yaml file """ meta_file = os.path.join(c_dir, bootstrap_scheme.container.meta_file) drop_data = {'modules': {}} for module in bootstrap_scheme.modules: fname = os.path.basename(module.uri) fs_file = os.path.join(c_dir, fname) try: raw_size = os.path.getsize(fs_file) except IOError as exc: LOG.error('There was an error while getting file' ' size: {0}'.format(exc)) raise raw_md5 = utils.calculate_md5(fs_file, raw_size) drop_data['modules'][module.name] = { 'raw_md5': raw_md5, 'raw_size': raw_size, 'file': fname, 'uri': module.uri } drop_data['uuid'] = bootstrap_scheme.uuid drop_data['extend_kopts'] = bootstrap_scheme.extend_kopts drop_data['os'] = metadata['os'] drop_data['all_packages'] = metadata['all_packages'] drop_data['repos'] = metadata['repos'] drop_data['label'] = bootstrap_scheme.label LOG.debug('Image metadata: %s', drop_data) with open(meta_file, 'wt') as f: yaml.safe_dump(drop_data, stream=f, encoding='utf-8')
def test_calculate_md5_ok(self): # calculated by 'printf %10000s | md5sum' mock_open = mock.Mock() mock_open.__enter__ = mock.Mock(side_effect=(six.BytesIO(b' ' * 10000) for _ in range(6))) mock_open.__exit__ = mock.Mock(return_value=False) with mock.patch('six.moves.builtins.open', mock.Mock(return_value=mock_open), create=True): self.assertEqual('f38898bb69bb02bccb9594dfe471c5c0', utils.calculate_md5('fake', 10000)) self.assertEqual('6934d9d33cd2d0c005994e7d96d2e0d9', utils.calculate_md5('fake', 1000)) self.assertEqual('1e68934346ee57858834a205017af8b7', utils.calculate_md5('fake', 100)) self.assertEqual('41b394758330c83757856aa482c79977', utils.calculate_md5('fake', 10)) self.assertEqual('7215ee9c7d9dc229d2921a40e899ec5f', utils.calculate_md5('fake', 1)) self.assertEqual('d41d8cd98f00b204e9800998ecf8427e', utils.calculate_md5('fake', 0))
def do_build_image(self): """Building OS images Includes the following steps 1) create temporary sparse files for all images (truncate) 2) attach temporary files to loop devices (losetup) 3) create file systems on these loop devices 4) create temporary chroot directory 5) install operating system (install_base_os) 6) configure apt-get sources,and perform package install. 7) configure OS (clean sources.list and preferences, etc.) 8) umount loop devices 9) resize file systems on loop devices 10) shrink temporary sparse files (images) 11) containerize (gzip) temporary sparse files 12) move temporary gzipped files to their final location """ LOG.info('--- Building image (do_build_image) ---') driver_os = self.driver.operating_system # TODO(kozhukalov): Implement metadata # as a pluggable data driver to avoid any fixed format. metadata = {} metadata['os'] = driver_os.to_dict() # TODO(kozhukalov): implement this using image metadata # we need to compare list of packages and repos LOG.info('*** Checking if image exists ***') if all([os.path.exists(img.uri.split('file://', 1)[1]) for img in self.driver.image_scheme.images]): LOG.debug('All necessary images are available. ' 'Nothing needs to be done.') return LOG.debug('At least one of the necessary images is unavailable. ' 'Starting build process.') chroot = bu.mkdtemp_smart( CONF.image_build_dir, CONF.image_build_suffix) try: self.install_base_os(chroot) packages = driver_os.packages metadata['packages'] = packages self._set_apt_repos( chroot, driver_os.repos, proxies=driver_os.proxies.proxies, direct_repo_addrs=driver_os.proxies.direct_repo_addr_list) self._update_metadata_with_repos( metadata, driver_os.repos) LOG.debug('Installing packages using apt-get: %s', ' '.join(packages)) bu.run_apt_get(chroot, packages=packages, attempts=CONF.fetch_packages_attempts) LOG.debug('Post-install OS configuration') root = driver_os.get_user_by_name('root') bu.do_post_inst(chroot, hashed_root_password=root.hashed_password, allow_unsigned_file=CONF.allow_unsigned_file, force_ipv4_file=CONF.force_ipv4_file) LOG.debug('Making sure there are no running processes ' 'inside chroot before trying to umount chroot') if not bu.stop_chrooted_processes(chroot, signal=signal.SIGTERM): if not bu.stop_chrooted_processes( chroot, signal=signal.SIGKILL): raise errors.UnexpectedProcessError( 'Stopping chrooted processes failed. ' 'There are some processes running in chroot %s', chroot) LOG.info('*** Finalizing image space ***') fu.umount_fs(os.path.join(chroot, 'proc')) # umounting all loop devices self.umount_target(chroot, pseudo=False) for image in self.driver.image_scheme.images: # find fs with the same loop device object # as image.target_device fs = self.driver.partition_scheme.fs_by_device( image.target_device) if fs.type == 'ext4': LOG.debug('Trying to re-enable journaling for ext4') utils.execute('tune2fs', '-O', 'has_journal', str(fs.device)) if image.target_device.name: LOG.debug('Finally: detaching loop device: {0}'.format( image.target_device.name)) try: bu.deattach_loop(image.target_device.name) except errors.ProcessExecutionError as e: LOG.warning('Error occured while trying to detach ' 'loop device {0}. Error message: {1}'. format(image.target_device.name, e)) LOG.debug('Shrinking temporary image file: %s', image.img_tmp_file) bu.shrink_sparse_file(image.img_tmp_file) raw_size = os.path.getsize(image.img_tmp_file) raw_md5 = utils.calculate_md5(image.img_tmp_file, raw_size) LOG.debug('Containerizing temporary image file: %s', image.img_tmp_file) img_tmp_containerized = bu.containerize( image.img_tmp_file, image.container, chunk_size=CONF.data_chunk_size) img_containerized = image.uri.split('file://', 1)[1] # NOTE(kozhukalov): implement abstract publisher LOG.debug('Moving image file to the final location: %s', img_containerized) shutil.move(img_tmp_containerized, img_containerized) container_size = os.path.getsize(img_containerized) container_md5 = utils.calculate_md5( img_containerized, container_size) metadata.setdefault('images', []).append({ 'raw_md5': raw_md5, 'raw_size': raw_size, 'raw_name': None, 'container_name': os.path.basename(img_containerized), 'container_md5': container_md5, 'container_size': container_size, 'container': image.container, 'format': image.format}) # NOTE(kozhukalov): implement abstract publisher LOG.debug('Image metadata: %s', metadata) with open(self.driver.metadata_uri.split('file://', 1)[1], 'wt', encoding='utf-8') as f: yaml.safe_dump(metadata, stream=f) LOG.info('--- Building image END (do_build_image) ---') except Exception as exc: LOG.error('Failed to build image: %s', exc) raise finally: LOG.info('Cleanup chroot') self.destroy_chroot(chroot)
def do_build_image(self): """Building OS images Includes the following steps 1) create temporary sparse files for all images (truncate) 2) attach temporary files to loop devices (losetup) 3) create file systems on these loop devices 4) create temporary chroot directory 5) install operating system (install_base_os) 6) configure apt-get sources,and perform package install. 7) configure OS (clean sources.list and preferences, etc.) 8) umount loop devices 9) resize file systems on loop devices 10) shrink temporary sparse files (images) 11) containerize (gzip) temporary sparse files 12) move temporary gzipped files to their final location """ LOG.info('--- Building image (do_build_image) ---') driver_os = self.driver.operating_system # TODO(kozhukalov): Implement metadata # as a pluggable data driver to avoid any fixed format. metadata = {} metadata['os'] = driver_os.to_dict() # TODO(kozhukalov): implement this using image metadata # we need to compare list of packages and repos LOG.info('*** Checking if image exists ***') if all([ os.path.exists(img.uri.split('file://', 1)[1]) for img in self.driver.image_scheme.images ]): LOG.debug('All necessary images are available. ' 'Nothing needs to be done.') return LOG.debug('At least one of the necessary images is unavailable. ' 'Starting build process.') chroot = bu.mkdtemp_smart(CONF.image_build_dir, CONF.image_build_suffix) try: self.install_base_os(chroot) packages = driver_os.packages metadata['packages'] = packages self._set_apt_repos( chroot, driver_os.repos, proxies=driver_os.proxies.proxies, direct_repo_addrs=driver_os.proxies.direct_repo_addr_list) self._update_metadata_with_repos(metadata, driver_os.repos) LOG.debug('Installing packages using apt-get: %s', ' '.join(packages)) bu.run_apt_get(chroot, packages=packages, attempts=CONF.fetch_packages_attempts) LOG.debug('Post-install OS configuration') bu.do_post_inst(chroot, allow_unsigned_file=CONF.allow_unsigned_file, force_ipv4_file=CONF.force_ipv4_file) LOG.debug('Making sure there are no running processes ' 'inside chroot before trying to umount chroot') if not bu.stop_chrooted_processes(chroot, signal=signal.SIGTERM): if not bu.stop_chrooted_processes(chroot, signal=signal.SIGKILL): raise errors.UnexpectedProcessError( 'Stopping chrooted processes failed. ' 'There are some processes running in chroot %s', chroot) LOG.info('*** Finalizing image space ***') fu.umount_fs(os.path.join(chroot, 'proc')) # umounting all loop devices self.umount_target(chroot, pseudo=False) for image in self.driver.image_scheme.images: # find fs with the same loop device object # as image.target_device fs = self.driver.partition_scheme.fs_by_device( image.target_device) if fs.type == 'ext4': LOG.debug('Trying to re-enable journaling for ext4') utils.execute('tune2fs', '-O', 'has_journal', str(fs.device)) if image.target_device.name: LOG.debug('Finally: detaching loop device: {0}'.format( image.target_device.name)) try: bu.deattach_loop(image.target_device.name) except errors.ProcessExecutionError as e: LOG.warning( 'Error occured while trying to detach ' 'loop device {0}. Error message: {1}'.format( image.target_device.name, e)) LOG.debug('Shrinking temporary image file: %s', image.img_tmp_file) bu.shrink_sparse_file(image.img_tmp_file) raw_size = os.path.getsize(image.img_tmp_file) raw_md5 = utils.calculate_md5(image.img_tmp_file, raw_size) LOG.debug('Containerizing temporary image file: %s', image.img_tmp_file) img_tmp_containerized = bu.containerize( image.img_tmp_file, image.container, chunk_size=CONF.data_chunk_size) img_containerized = image.uri.split('file://', 1)[1] # NOTE(kozhukalov): implement abstract publisher LOG.debug('Moving image file to the final location: %s', img_containerized) shutil.move(img_tmp_containerized, img_containerized) container_size = os.path.getsize(img_containerized) container_md5 = utils.calculate_md5(img_containerized, container_size) metadata.setdefault('images', []).append({ 'raw_md5': raw_md5, 'raw_size': raw_size, 'raw_name': None, 'container_name': os.path.basename(img_containerized), 'container_md5': container_md5, 'container_size': container_size, 'container': image.container, 'format': image.format }) # NOTE(kozhukalov): implement abstract publisher LOG.debug('Image metadata: %s', metadata) with open(self.driver.metadata_uri.split('file://', 1)[1], 'wt', encoding='utf-8') as f: yaml.safe_dump(metadata, stream=f) LOG.info('--- Building image END (do_build_image) ---') except Exception as exc: LOG.error('Failed to build image: %s', exc) raise finally: LOG.info('Cleanup chroot') self.destroy_chroot(chroot)