def main(): """ DistMigration post mount actions Preserve custom data file(s) e.g udev rules from the system to be migrated to the live migration system and activate those file changes to become effective """ Logger.setup() log = logging.getLogger(Defaults.get_migration_log_name()) root_path = Defaults.get_system_root_path() migration_config = MigrationConfig() preserve_info = migration_config.get_preserve_info() if preserve_info: for _, preserve_files in preserve_info.items(): for preserve_file in preserve_files: target_dir = os.path.dirname(preserve_file) source_file = os.path.normpath( os.sep.join([root_path, preserve_file])) log.info('Copy file: {0} to: {1}'.format( source_file, target_dir)) if not os.path.exists(target_dir): Command.run(['mkdir', '-p', target_dir]) shutil.copy(source_file, target_dir) if 'rules' in preserve_info.keys(): Command.run(['udevadm', 'control', '--reload']) Command.run( ['udevadm', 'trigger', '--type=subsystems', '--action=add']) Command.run( ['udevadm', 'trigger', '--type=devices', '--action=add'])
def main(): """ DistMigration setup baseproduct for migration Creates a backup of the system products data and prepares the baseproduct to be suitable for the distribution migration. In case of an error the backup information is used by the zypper migration plugin to restore the original product data such that the plugin's rollback mechanism is not negatively influenced. """ Logger.setup() log = logging.getLogger(Defaults.get_migration_log_name()) try: # Note: # zypper implements a handling for the distro_target attribute. # If present in the repository metadata, zypper compares the # value with the baseproduct <target> setup in /etc/products.d. # If they mismatch zypper refuses to create the repo. In the # process of a migration from distribution [A] to [B] this mismatch # always applies by design and prevents the zypper migration # plugin to work. In SCC or RMT the repositories doesn't # contain the distro_target attribute which is the reason # why we don't see this problem there. SMT however creates repos # including distro_target. The current workaround solution is # to delete the target specification in the baseproduct # registration if present. log.info('Updating Base Product to be suitable for migration') SUSEBaseProduct().delete_target_registration() except Exception as issue: message = 'Base Product update failed with: {0}'.format(issue) log.error(message) raise DistMigrationProductSetupException(message)
def main(): """ DistMigration update grub to migrated version Setup and update grub with content from the migrated system Uninstall live migration packages such that they are no longer part of the now migrated system. """ Logger.setup() log = logging.getLogger(Defaults.get_migration_log_name()) root_path = Defaults.get_system_root_path() grub_config_file = Defaults.get_grub_config_file() try: log.info('Running grub setup service') migration_packages = ['SLE*-Migration', 'suse-migration-*-activation'] log.info('Uninstalling migration: {0}{1}'.format( os.linesep, Command.run([ 'chroot', root_path, 'zypper', '--non-interactive', '--no-gpg-checks', 'remove' ] + migration_packages, raise_on_error=False).output)) log.info('Creating new grub menu: {0}{1}'.format( os.linesep, Command.run([ 'chroot', root_path, 'grub2-mkconfig', '-o', '{0}{1}'.format(os.sep, grub_config_file) ]).error)) except Exception as issue: message = 'Update grub failed with {0}'.format(issue) log.error(message) raise DistMigrationGrubConfigException(message)
def main(): """ DistMigration load new kernel for kexec reboot Loads the new kernel/initrd after migration for system reboot """ Logger.setup() log = logging.getLogger(Defaults.get_migration_log_name()) if not MigrationConfig().is_soft_reboot_requested(): log.info('skipping kexec --load (hard reboot requested)') return root_path = Defaults.get_system_root_path() target_kernel = os.sep.join([root_path, Defaults.get_target_kernel()]) target_initrd = os.sep.join([root_path, Defaults.get_target_initrd()]) kexec_boot_data = '/var/tmp/kexec' Path.create(kexec_boot_data) shutil.copy(target_initrd, kexec_boot_data) try: log.info('Running kernel load service') log.info('Loading the target kernel') Command.run([ 'kexec', '--load', target_kernel, '--initrd', os.sep.join([kexec_boot_data, os.path.basename(target_initrd)]), '--kexec-file-syscall', '--command-line', _get_cmdline(os.path.basename(target_kernel)) ]) except Exception as issue: log.error('Kernel load service raised exception: {0}'.format(issue)) raise DistMigrationKernelRebootException( 'Failed to load kernel/initrd into memory: {0}'.format(issue))
def main(): """ DistMigration post mount actions Preserve custom data file(s) e.g udev rules from the system to be migrated to the live migration system and activate those file changes to become effective """ Logger.setup() log = logging.getLogger(Defaults.get_migration_log_name()) root_path = Defaults.get_system_root_path() migration_config = MigrationConfig() system_udev_rules = migration_config.get_preserve_udev_rules_list() if system_udev_rules: for rule_file in system_udev_rules: target_rule_dir = os.path.dirname(rule_file) source_rule_file = os.path.normpath( os.sep.join([root_path, rule_file])) log.info('Copy udev rule: {0} to: {1}'.format( source_rule_file, target_rule_dir)) shutil.copy(source_rule_file, target_rule_dir) Command.run(['udevadm', 'control', '--reload']) Command.run( ['udevadm', 'trigger', '--type=subsystems', '--action=add']) Command.run(['udevadm', 'trigger', '--type=devices', '--action=add'])
def test_setup_no_system_root(self, mock_Path_create): with patch('builtins.open', create=True) as mock_open: mock_open.return_value = MagicMock(spec=io.IOBase) logger = Logger() logger.setup(system_root=False) mock_Path_create.assert_called_once_with('/var/log') mock_open.assert_called_once_with('/var/log/distro_migration.log', 'a', encoding=None)
def main(): """ DistMigration pre checks before migration starts Checks whether - repositories' locations are not remote - filesystems in fstab are using LUKS encryption """ Logger.setup(system_root=False) log = logging.getLogger(Defaults.get_migration_log_name()) log.info('Running pre migration checks') check_repos.remote_repos() check_fs.encryption()
def main(): """ DistMigration reboot with new kernel After the migration process is finished, the system reboots unless the debug option is set. Before reboot a reverse umount of the filesystems that got mounted by the mount_system service is performed and thus releases the upgraded system from the migration host. If for whatever reason a filesystem is busy and can't be umounted, this condition is not handled as an error. The reason is that the cleanup should not prevent us from continuing with the reboot process. The risk on reboot of the migration host with a potential active mount is something we accept """ Logger.setup() log = logging.getLogger(Defaults.get_migration_log_name()) try: log.info('Systemctl Status Information: {0}{1}'.format( os.linesep, Command.run(['systemctl', 'status', '-l', '--all'], raise_on_error=False).output)) # stop console dialog log. The service holds a busy state # on system-root and stands in our way in case of debug # mode because it grabs the master console in/output Command.run(['systemctl', 'stop', 'suse-migration-console-log'], raise_on_error=False) if MigrationConfig().is_debug_requested(): log.info('Reboot skipped due to debug flag set') else: log.info('Umounting system') system_mount = Fstab() system_mount.read(Defaults.get_system_mount_info_file()) for mount in reversed(system_mount.get_devices()): log.info('Umounting {0}: {1}'.format( mount.mountpoint, Command.run(['umount', '--lazy', mount.mountpoint], raise_on_error=False))) if not MigrationConfig().is_soft_reboot_requested(): restart_system = 'reboot' else: restart_system = 'kexec' log.info('Reboot system: {0}{1}'.format( os.linesep, Command.run(['systemctl', restart_system]))) except Exception: # Uhh, we don't want to be here, but we also don't # want to be stuck in the migration live system. # Keep fingers crossed: log.warning('Reboot system: [Force Reboot]') Command.run(['systemctl', '--force', 'reboot'])
def main(): """ DistMigration mount system to upgrade Searches on all partitions for a fstab file. The first fstab file found is used as the system to upgrade. Filesystems relevant for an upgrade process are read from that fstab in order and mounted such that the system rootfs is available for a zypper based migration process. """ Logger.setup() log = logging.getLogger(Defaults.get_migration_log_name()) root_path = Defaults.get_system_root_path() Path.create(root_path) log.info('Running mount system service') if is_mounted(root_path): # root_path is already a mount point, better not continue # The condition is not handled as an error because the # existing mount point under this service created root_path # is considered to represent the system to upgrade and # not something else. Thus if already mounted, let's use # what is there. return log.info('Mount system service: {0} is mounted'.format(root_path)) # Check if booted via loopback grub isoscan_loop_mount = '/run/initramfs/isoscan' if is_mounted(isoscan_loop_mount): # The system to become migrated was booted via a grub # loopback menuentry. This means the disk is blocked by # that readonly loopback mount and needs to be # remounted for read write access first log.info( 'Mount system service: {0} is mounted'.format(isoscan_loop_mount)) Command.run(['mount', '-o', 'remount,rw', isoscan_loop_mount]) fstab, storage_info = read_system_fstab(root_path) if not fstab: log.error('Could not find system in fstab on {0}'.format(storage_info)) raise DistMigrationSystemNotFoundException( 'Could not find system with fstab on {0}'.format(storage_info)) mount_system(root_path, fstab) migration_config = MigrationConfig() migration_config.update_migration_config_file() log.info('Config file content:\n{content}\n'.format( content=migration_config.get_migration_config_file_content()))
def main(): """ DistMigration create a new initrd with added modules Run dracut to build a new initrd that includes multipath modules """ Logger.setup() log = logging.getLogger(Defaults.get_migration_log_name()) if MigrationConfig().is_host_independent_initd_requested(): log.info('Creating a new host independent initrd') root_path = Defaults.get_system_root_path() dracut_bind_mounts(root_path) run_dracut(root_path)
def main(): """ DistMigration activate host network setup Setup and activate the network as it is setup on the host to become migrated. This includes the import of the resolver and network configuration from the migration host """ Logger.setup() log = logging.getLogger(Defaults.get_migration_log_name()) root_path = Defaults.get_system_root_path() resolv_conf = os.sep.join([root_path, 'etc', 'resolv.conf']) if not os.path.exists(resolv_conf): raise DistMigrationNameResolverException( 'Could not find {0} on migration host'.format(resolv_conf)) if has_host_resolv_setup(resolv_conf): log.info('Copying {}'.format(resolv_conf)) shutil.copy(resolv_conf, '/etc/resolv.conf') else: log.info('Empty {}, continuing without copying it'.format(resolv_conf)) sysconfig_network_providers = os.sep.join( [root_path, 'etc', 'sysconfig', 'network', 'providers']) sysconfig_network_setup = os.sep.join( [root_path, 'etc', 'sysconfig', 'network', '*']) try: log.info('Running setup host network service') system_mount = Fstab() system_mount.read(Defaults.get_system_mount_info_file()) Command.run([ 'mount', '--bind', sysconfig_network_providers, '/etc/sysconfig/network/providers' ]) system_mount.add_entry(sysconfig_network_providers, '/etc/sysconfig/network/providers') for network_setup in glob.glob(sysconfig_network_setup): if os.path.isfile(network_setup): shutil.copy(network_setup, '/etc/sysconfig/network') Command.run(['systemctl', 'reload', 'network']) system_mount.export(Defaults.get_system_mount_info_file()) except Exception as issue: log.error( 'Preparation of migration host network failed with {0}'.format( issue)) raise DistMigrationHostNetworkException( 'Preparation of migration host network failed with {0}'.format( issue))
def main(): """ DistMigration prepare for migration Prepare the migration live system to allow zypper migration to upgrade the system across major distribution versions. The zypper migration process contacts the service that provides the configured repositories on the system being migrated. The service must be one of SUSE's repository services, SCC, RMT, or SMT. This requiers information from the target system. This service makes the necessary information available inside the live system that performs the migration. """ Logger.setup() log = logging.getLogger(Defaults.get_migration_log_name()) root_path = Defaults.get_system_root_path() suse_connect_setup = os.sep.join([root_path, 'etc', 'SUSEConnect']) suse_cloud_regionsrv_setup = os.sep.join( [root_path, 'etc', 'regionserverclnt.cfg']) hosts_setup = os.sep.join([root_path, 'etc', 'hosts']) trust_anchors = os.sep.join( [root_path, 'usr', 'share', 'pki', 'trust', 'anchors']) if os.path.exists(suse_connect_setup): shutil.copy(suse_connect_setup, '/etc/SUSEConnect') if os.path.exists(suse_cloud_regionsrv_setup): migration_suse_cloud_regionsrv_setup = '/etc/regionserverclnt.cfg' shutil.copy(suse_cloud_regionsrv_setup, migration_suse_cloud_regionsrv_setup) update_regionsrv_setup(root_path, migration_suse_cloud_regionsrv_setup) if os.path.exists(hosts_setup): shutil.copy(hosts_setup, '/etc/hosts') if os.path.exists(trust_anchors): certificates = os.listdir(trust_anchors) if certificates: for cert in certificates: log.info('Importing certificate: {0}'.format(cert)) shutil.copy(os.sep.join([trust_anchors, cert]), '/usr/share/pki/trust/anchors/') log.info('Update certificate pool') Command.run(['update-ca-certificates']) zypp_metadata = os.sep.join([root_path, 'etc', 'zypp']) zypp_plugins_services = os.sep.join( [root_path, 'usr', 'lib', 'zypp', 'plugins', 'services']) cloud_register_metadata = os.sep.join( [root_path, 'var', 'lib', 'cloudregister']) zypper_log_file = os.sep.join([root_path, 'var', 'log', 'zypper.log']) if os.path.exists(zypper_log_file): try: zypper_host_log_file = zypper_log_file.replace(root_path, '') if not os.path.exists(zypper_host_log_file): with open(zypper_host_log_file, 'w'): # we bind mount the system zypper log file # but the mount target does not exist. # Create it as empty file prior bind mounting pass Command.run( ['mount', '--bind', zypper_log_file, zypper_host_log_file]) except Exception as issue: log.warning( 'Bind mounting zypper log file failed with: {0}'.format(issue)) try: # log network info as network-online.target is done at this point log_network_details() log.info('Running prepare service') system_mount = Fstab() system_mount.read(Defaults.get_system_mount_info_file()) log.info('Bind mounting /etc/zypp') Command.run(['mount', '--bind', zypp_metadata, '/etc/zypp']) system_mount.add_entry(zypp_metadata, '/etc/zypp') log.info('Bind mounting /usr/lib/zypp/plugins') Command.run([ 'mount', '--bind', zypp_plugins_services, '/usr/lib/zypp/plugins/services' ]) system_mount.add_entry(zypp_plugins_services, '/usr/lib/zypp/plugins/services') if os.path.exists(cloud_register_metadata): log.info('Bind mounting /var/lib/cloudregister') Path.create('/var/lib/cloudregister') Command.run([ 'mount', '--bind', cloud_register_metadata, '/var/lib/cloudregister' ]) update_smt_cache = '/usr/sbin/updatesmtcache' if os.path.isfile(update_smt_cache): log.info('Updating SMT cache') Command.run([update_smt_cache]) system_mount.export(Defaults.get_system_mount_info_file()) # Check if system is registered migration_config = MigrationConfig() if migration_config.is_zypper_migration_plugin_requested(): if not SUSEConnect.is_registered(): message = 'System not registered. Aborting migration.' log.error(message) raise DistMigrationSystemNotRegisteredException(message) except Exception as issue: log.error( 'Preparation of zypper metadata failed with {0}'.format(issue)) # Not unmounting any of the bind mounts above; the reboot # service should take care of that anyway raise DistMigrationZypperMetaDataException( 'Preparation of zypper metadata failed with {0}'.format(issue))
def main(): """ DistMigration run zypper based migration Call zypper migration plugin and migrate the system. The output of the call is logged on the system to migrate """ Logger.setup() log = logging.getLogger(Defaults.get_migration_log_name()) root_path = Defaults.get_system_root_path() try: log.info('Running migrate service') migration_config = MigrationConfig() if migration_config.get_preserve_info(): # set potential missing settings in env log.info('Update env variables') update_env(migration_config.get_preserve_info()) log_env(log) verbose_migration = '--verbose' if migration_config.is_verbosity_requested() else '--no-verbose' if migration_config.is_zypper_migration_plugin_requested(): bash_command = ' '.join( [ 'zypper', 'migration', verbose_migration, '--non-interactive', '--gpg-auto-import-keys', '--no-selfupdate', '--auto-agree-with-licenses', '--allow-vendor-change', '--strict-errors-dist-migration', '--replacefiles', '--product', migration_config.get_migration_product(), '--root', root_path, '&>>', Defaults.get_migration_log_file() ] ) Command.run( ['bash', '-c', bash_command] ) else: bash_command = ' '.join( [ 'zypper', '--no-cd', '--non-interactive', '--gpg-auto-import-keys', '--root', root_path, 'dup', '--auto-agree-with-licenses', '--allow-vendor-change', '--download', 'in-advance', '--replacefiles', '--allow-downgrade', '&>>', Defaults.get_migration_log_file() ] ) zypper_call = Command.run( ['bash', '-c', bash_command], raise_on_error=False ) if zypper_has_failed(zypper_call.returncode): raise DistMigrationCommandException( '{0} failed with: {1}: {2}'.format( bash_command, zypper_call.output, zypper_call.error ) ) except Exception as issue: etc_issue_path = os.sep.join( [root_path, 'etc/issue'] ) log_path_migrated_system = os.sep + os.path.relpath( Defaults.get_migration_log_file(), root_path ) with open(etc_issue_path, 'w') as issue_file: issue_file.write( 'Migration has failed, for further details see {0}'.format( log_path_migrated_system ) ) log.error('migrate service failed with {0}'.format(issue)) raise DistMigrationZypperException( 'Migration failed with {0}'.format(issue) )
def main(): """ DistMigration ssh access for migration user Copy the authoritation key found to the migration user directory in order to access through ssh """ Logger.setup() log = logging.getLogger(Defaults.get_migration_log_name()) ssh_keys_glob_paths = Defaults.get_ssh_keys_paths() migration_ssh_file = Defaults.get_migration_ssh_file() system_ssh_host_keys_glob_path = \ Defaults.get_system_ssh_host_keys_glob_path() sshd_config_path = Defaults.get_system_sshd_config_path() try: log.info('Running ssh keys service') ssh_keys_paths = [] for glob_path in ssh_keys_glob_paths: ssh_keys_paths.extend(glob.glob(glob_path)) keys_list = [] for ssh_keys_path in ssh_keys_paths: log.info('Getting keys from {0}'.format(ssh_keys_path)) with open(ssh_keys_path) as authorized_keys_file: keys_list.append(authorized_keys_file.read()) authorized_keys_content = ''.join(keys_list) log.info('Save keys to {0}'.format(migration_ssh_file)) with open(migration_ssh_file, 'w') as authorized_migration_file: authorized_migration_file.write(authorized_keys_content) system_ssh_host_keys = glob.glob(system_ssh_host_keys_glob_path) sshd_config_host_keys_entries = [] log.info('Copying host ssh keys') for system_ssh_host_key in system_ssh_host_keys: if not system_ssh_host_key.endswith('ssh_host_key'): shutil.copy(system_ssh_host_key, '/etc/ssh/') if not system_ssh_host_key.endswith('.pub'): live_private_ssh_host_key_path = os.sep.join( [ os.path.dirname(sshd_config_path), os.path.basename(system_ssh_host_key) ] ) entry = 'HostKey {0}'.format( live_private_ssh_host_key_path ) sshd_config_host_keys_entries.append(entry) with open(sshd_config_path, 'a') as live_sshd_config_file: # write one newline to be sure any subsequent # HostKey entry starts correctly live_sshd_config_file.write(os.linesep) live_sshd_config_file.write( os.linesep.join(sshd_config_host_keys_entries) ) log.info('Restarting sshd') Command.run( ['systemctl', 'restart', 'sshd'] ) except Exception as issue: log.error( 'SSH key/identity setup failed with: {0}. {1}'.format( issue, 'Continue without ssh access' ) )