def run(self, ctxt, instance, origin, destination, task_info, event_handler): event_manager = events.EventManager(event_handler) destination_type = destination["type"] if task_info.get("export_info") is None: event_manager.progress_update( "Instance export info is not set. Cannot perform Migration " "Import validation for destination platform " "'%s'" % destination_type) return task_info destination_connection_info = base.get_connection_info( ctxt, destination) destination_provider = providers_factory.get_provider( destination_type, constants.PROVIDER_TYPE_VALIDATE_MIGRATION_IMPORT, event_handler, raise_if_not_found=False) if not destination_provider: event_manager.progress_update( "Migration Import Provider for platform '%s' does not " "support Migration input validation" % destination_type) return task_info # NOTE: the target environment JSON schema should have been validated # upon accepting the Migration API creation request. target_environment = destination.get("target_environment", {}) destination_provider.validate_migration_import_input( ctxt, destination_connection_info, target_environment, task_info["export_info"]) return task_info
def run(self, ctxt, instance, origin, destination, task_info, event_handler): event_manager = events.EventManager(event_handler) new_source_env = task_info.get('source_environment', {}) if not new_source_env: event_manager.progress_update( "No new source environment options provided") return task_info source_provider = providers_factory.get_provider( origin["type"], constants.PROVIDER_TYPE_SOURCE_REPLICA_UPDATE, event_handler, raise_if_not_found=False) if not source_provider: raise exception.CoriolisException( "Replica source provider plugin for '%s' does not support" " updating Replicas" % origin["type"]) origin_connection_info = base.get_connection_info(ctxt, origin) volumes_info = task_info.get("volumes_info", {}) LOG.info("Checking source provider environment params") # NOTE: the `source_environment` in the `origin` is the one set # in the dedicated DB column of the Replica and thus stores # the previous value of it: old_source_env = origin.get('source_environment', {}) volumes_info = (source_provider.check_update_source_environment_params( ctxt, origin_connection_info, instance, volumes_info, old_source_env, new_source_env)) task_info['volumes_info'] = volumes_info return task_info
def _run(self, ctxt, instance, origin, destination, task_info, event_handler): event_manager = events.EventManager(event_handler) if not task_info.get("volumes_info"): LOG.debug("No volumes_info present. Skipping disk deletion.") event_manager.progress_update( "No previous volumes information present, nothing to delete") return {'volumes_info': []} provider = providers_factory.get_provider( destination["type"], constants.PROVIDER_TYPE_REPLICA_IMPORT, event_handler) connection_info = base.get_connection_info(ctxt, destination) volumes_info = _get_volumes_info(task_info) target_environment = task_info['target_environment'] volumes_info = provider.delete_replica_disks(ctxt, connection_info, target_environment, volumes_info) if volumes_info: LOG.warn( "'volumes_info' should have been void after disk " "deletion task but it is: %s" % (utils.sanitize_task_info({'volumes_info': volumes_info}))) return {'volumes_info': []}
def run(self, ctxt, instance, origin, destination, task_info, event_handler): event_manager = events.EventManager(event_handler) destination_type = destination["type"] destination_connection_info = base.get_connection_info( ctxt, destination) destination_provider = providers_factory.get_provider( destination_type, constants.PROVIDER_TYPE_VALIDATE_REPLICA_IMPORT, event_handler, raise_if_not_found=False) if not destination_provider: event_manager.progress_update( "Replica Import Provider for platform '%s' does not support " "Replica input validation" % destination_type) return task_info export_info = task_info.get("export_info") if not export_info: raise exception.CoriolisException( "Instance export info is not set. Cannot perform " "Replica Import validation for destination platform " "'%s'" % destination_type) # NOTE: the target environment JSON schema should have been validated # upon accepting the Replica API creation request. target_environment = destination.get("target_environment", {}) destination_provider.validate_replica_import_input( ctxt, destination_connection_info, target_environment, export_info) return task_info
def run(self, ctxt, instance, origin, destination, task_info, event_handler): event_manager = events.EventManager(event_handler) destination_connection_info = base.get_connection_info( ctxt, destination) destination_type = destination["type"] export_info = task_info["export_info"] # validate Export info: schemas.validate_value(export_info, schemas.CORIOLIS_VM_EXPORT_INFO_SCHEMA) # validate destination params: destination_provider = providers_factory.get_provider( destination_type, constants.PROVIDER_TYPE_VALIDATE_REPLICA_IMPORT, event_handler, raise_if_not_found=False) if not destination_provider: event_manager.progress_update( "Replica Deployment Provider for platform '%s' does not " "support Replica Deployment input validation" % (destination_type)) return task_info # NOTE: the target environment JSON schema should have been validated # upon accepting the Replica API creation request. target_environment = destination.get("target_environment", {}) destination_provider.validate_replica_deployment_input( ctxt, destination_connection_info, target_environment, export_info) return task_info
def _run(self, ctxt, instance, origin, destination, task_info, event_handler): event_manager = events.EventManager(event_handler) destination_type = destination["type"] destination_connection_info = base.get_connection_info( ctxt, destination) destination_provider = providers_factory.get_provider( destination_type, constants.PROVIDER_TYPE_VALIDATE_REPLICA_IMPORT, event_handler, raise_if_not_found=False) if not destination_provider: event_manager.progress_update( "Replica Import Provider for platform '%s' does not support " "Replica input validation" % destination_type) return {} export_info = task_info.get("export_info") if not export_info: raise exception.InvalidActionTasksExecutionState( "Instance export info is not set. Cannot perform " "Replica Import validation for destination platform " "'%s'" % destination_type) target_environment = task_info["target_environment"] self._validate_provider_replica_import_input( destination_provider, ctxt, destination_connection_info, target_environment, export_info) return {}
def _run(self, ctxt, instance, origin, destination, task_info, event_handler): event_manager = events.EventManager(event_handler) event_manager.progress_update("Releasing minion machine") return { field: None for field in self.get_returned_task_info_properties() }
def _run(self, ctxt, instance, origin, destination, task_info, event_handler): event_manager = events.EventManager(event_handler) volumes_info = task_info.get("volumes_info", []) new_destination_env = task_info.get('target_environment', {}) # NOTE: the `target_environment` in the `destination` is the one # set in the dedicated DB column of the Replica and thus stores # the previous value of it: old_destination_env = destination.get('target_environment', {}) if not new_destination_env: event_manager.progress_update( "No new destination environment options provided") return { "target_environment": old_destination_env, "volumes_info": volumes_info } destination_provider = providers_factory.get_provider( destination["type"], constants.PROVIDER_TYPE_DESTINATION_REPLICA_UPDATE, event_handler, raise_if_not_found=False) if not destination_provider: raise exception.InvalidActionTasksExecutionState( "Replica destination provider plugin for '%s' does not " "support updating Replicas" % destination["type"]) destination_connection_info = base.get_connection_info( ctxt, destination) export_info = task_info.get("export_info", {}) LOG.info("Checking destination provider environment params") volumes_info = ( destination_provider.check_update_destination_environment_params( ctxt, destination_connection_info, export_info, volumes_info, old_destination_env, new_destination_env)) if volumes_info: schemas.validate_value(volumes_info, schemas.CORIOLIS_VOLUMES_INFO_SCHEMA) volumes_info = _check_ensure_volumes_info_ordering( export_info, volumes_info) else: LOG.warn( "Destination update method for '%s' dest provider did NOT " "return any volumes info. Defaulting to old value.", destination["type"]) volumes_info = task_info.get("volumes_info", []) return { "volumes_info": volumes_info, "target_environment": new_destination_env }
def _run(self, ctxt, instance, origin, destination, task_info, event_handler): event_manager = events.EventManager(event_handler) volumes_info = task_info.get("volumes_info", []) new_source_env = task_info.get('source_environment', {}) # NOTE: the `source_environment` in the `origin` is the one set # in the dedicated DB column of the Replica and thus stores # the previous value of it: old_source_env = origin.get('source_environment') if not new_source_env: event_manager.progress_update( "No new source environment options provided") return { 'volumes_info': volumes_info, 'source_environment': old_source_env } source_provider = providers_factory.get_provider( origin["type"], constants.PROVIDER_TYPE_SOURCE_REPLICA_UPDATE, event_handler, raise_if_not_found=False) if not source_provider: raise exception.InvalidActionTasksExecutionState( "Replica source provider plugin for '%s' does not support" " updating Replicas" % origin["type"]) origin_connection_info = base.get_connection_info(ctxt, origin) LOG.info("Checking source provider environment params") volumes_info = (source_provider.check_update_source_environment_params( ctxt, origin_connection_info, instance, volumes_info, old_source_env, new_source_env)) if volumes_info: schemas.validate_value(volumes_info, schemas.CORIOLIS_VOLUMES_INFO_SCHEMA) else: LOG.warn( "Source update method for '%s' source provider did NOT " "return any volumes info. Defaulting to old value.", origin["type"]) volumes_info = task_info.get("volumes_info", []) return { "volumes_info": volumes_info, "source_environment": new_source_env }
def run(self, ctxt, instance, origin, destination, task_info, event_handler): event_manager = events.EventManager(event_handler) origin_connection_info = base.get_connection_info(ctxt, origin) origin_type = origin["type"] source_provider = providers_factory.get_provider( origin_type, constants.PROVIDER_TYPE_VALIDATE_MIGRATION_EXPORT, event_handler, raise_if_not_found=False) export_info = None if source_provider: export_info = source_provider.validate_migration_export_input( ctxt, origin_connection_info, instance, source_environment=origin.get("source_environment", {})) else: event_manager.progress_update( "Migration Export Provider for platform '%s' does not " "support Migration input validation" % origin_type) if export_info is None: source_endpoint_provider = providers_factory.get_provider( origin_type, constants.PROVIDER_TYPE_ENDPOINT_INSTANCES, event_handler, raise_if_not_found=False) if not source_endpoint_provider: event_manager.progress_update( "Migration Export Provider for platform '%s' does not " "support querying instance export info" % origin_type) return task_info export_info = source_endpoint_provider.get_instance( ctxt, origin_connection_info, instance) # validate Export info: schemas.validate_value(export_info, schemas.CORIOLIS_VM_EXPORT_INFO_SCHEMA) # NOTE: this export info will get overridden with updated values # and disk paths after the ExportInstanceTask. task_info["export_info"] = export_info return task_info
def run(self, ctxt, instance, origin, destination, task_info, event_handler): event_manager = events.EventManager(event_handler) origin_type = origin["type"] source_provider = providers_factory.get_provider( origin_type, constants.PROVIDER_TYPE_VALIDATE_REPLICA_EXPORT, event_handler, raise_if_not_found=False) origin_connection_info = base.get_connection_info(ctxt, origin) if not source_provider: event_manager.progress_update( "Replica Export Provider for platform '%s' does not support " "Replica input validation" % origin_type) else: source_provider.validate_replica_export_input( ctxt, origin_connection_info, instance, source_environment=origin.get("source_environment", {})) return task_info
def run(self, ctxt, instance, origin, destination, task_info, event_handler): event_manager = events.EventManager(event_handler) new_destination_env = task_info.get('destination_environment', {}) if not new_destination_env: event_manager.progress_update( "No new destination environment options provided") return task_info destination_provider = providers_factory.get_provider( destination["type"], constants.PROVIDER_TYPE_DESTINATION_REPLICA_UPDATE, event_handler, raise_if_not_found=False) if not destination_provider: raise exception.CoriolisException( "Replica destination provider plugin for '%s' does not " "support updating Replicas" % destination["type"]) destination_connection_info = base.get_connection_info( ctxt, destination) export_info = task_info.get("export_info", {}) volumes_info = task_info.get("volumes_info", {}) LOG.info("Checking destination provider environment params") # NOTE: the `target_environment` in the `destination` is the one # set in the dedicated DB column of the Replica and thus stores # the previous value of it: old_destination_env = destination.get('target_environment', {}) volumes_info = ( destination_provider.check_update_destination_environment_params( ctxt, destination_connection_info, export_info, volumes_info, old_destination_env, new_destination_env)) schemas.validate_value(volumes_info, schemas.CORIOLIS_VOLUMES_INFO_SCHEMA) volumes_info = _check_ensure_volumes_info_ordering( export_info, volumes_info) task_info['volumes_info'] = volumes_info return task_info
def _run(self, ctxt, instance, origin, destination, task_info, event_handler): event_manager = events.EventManager(event_handler) if not task_info.get("volumes_info"): LOG.debug( "No volumes_info present. Skipping source snapshot deletion.") event_manager.progress_update( "No previous volumes information present, nothing to delete") return {'volumes_info': []} provider = providers_factory.get_provider( origin['type'], constants.PROVIDER_TYPE_REPLICA_EXPORT, event_handler) connection_info = base.get_connection_info(ctxt, origin) source_environment = task_info['source_environment'] volumes_info = _get_volumes_info(task_info) volumes_info = provider.delete_replica_source_snapshots( ctxt, connection_info, source_environment, volumes_info) return {'volumes_info': volumes_info}
def _run(self, ctxt, instance, origin, destination, task_info, event_handler): provider = providers_factory.get_provider( destination["type"], constants.PROVIDER_TYPE_INSTANCE_FLAVOR, event_handler) connection_info = base.get_connection_info(ctxt, destination) target_environment = task_info["target_environment"] export_info = task_info["export_info"] flavor = provider.get_optimal_flavor(ctxt, connection_info, target_environment, export_info) instance_deployment_info = task_info.get("instance_deployment_info") if instance_deployment_info is None: instance_deployment_info = {} instance_deployment_info["selected_flavor"] = flavor events.EventManager(event_handler).progress_update( "Selected flavor: %s" % flavor) return {"instance_deployment_info": instance_deployment_info}
def copy_disk_data(target_conn_info, volumes_info, event_handler): # TODO(gsamfira): the disk image should be an URI that can either be local # (file://) or remote (https://, ftp://, smb://, nfs:// etc). # This must happen if we are to implement multi-worker scenarios. # In such cases, it is not guaranteed that the disk sync task # will be started on the same node onto which the import # happened. It may also be conceivable, that wherever the disk # image ends up, we might be able to directly expose it using # NFS, iSCSI or any other network protocol. In which case, # we can skip downloading it locally just to sync it. event_manager = events.EventManager(event_handler) ip = target_conn_info["ip"] port = target_conn_info.get("port", 22) username = target_conn_info["username"] pkey = target_conn_info.get("pkey") password = target_conn_info.get("password") event_manager.progress_update("Waiting for connectivity on %s:%s" % (ip, port)) utils.wait_for_port_connectivity(ip, port) backup_writer = backup_writers.SSHBackupWriter(ip, port, username, pkey, password, volumes_info) disk_image_reader = qemu_reader.QEMUDiskImageReader() pool = eventlet.greenpool.GreenPool() job_data = [(vol, disk_image_reader, backup_writer, event_manager) for vol in volumes_info] for result, disk_id, error in pool.imap(_copy_wrapper, job_data): # TODO(gsamfira): There is no use in letting the other disks finish # sync-ing as we don't save the state of the disk sync anywhere (yet). # When/If we ever do add this info to the database, keep track of # failures, and allow any other paralel sync to finish if error: event_manager.progress_update("Volume \"%s\" failed to sync" % disk_id) raise result[0](result[1]).with_traceback(result[2])
def morph_image(origin_provider, destination_provider, connection_info, osmorphing_info, user_script, event_handler): event_manager = events.EventManager(event_handler) event_manager.progress_update("Preparing instance for target platform") os_type = osmorphing_info.get('os_type') ignore_devices = osmorphing_info.get('ignore_devices', []) os_mount_tools = osmount_factory.get_os_mount_tools( os_type, connection_info, event_manager, ignore_devices) proxy_settings = _get_proxy_settings() os_mount_tools.set_proxy(proxy_settings) event_manager.progress_update("Preparing for OS partitions discovery") os_mount_tools.setup() event_manager.progress_update("Discovering and mounting OS partitions") os_root_dir, os_root_dev = os_mount_tools.mount_os() osmorphing_info['os_root_dir'] = os_root_dir osmorphing_info['os_root_dev'] = os_root_dev conn = os_mount_tools.get_connection() environment = os_mount_tools.get_environment() try: (export_os_morphing_tools, _) = origin_provider.get_os_morphing_tools(conn, osmorphing_info) export_os_morphing_tools.set_environment(environment) except exception.OSMorphingToolsNotFound: LOG.warn("No tools found for export provider of type: %s", type(origin_provider)) export_os_morphing_tools = None (import_os_morphing_tools, os_info) = destination_provider.get_os_morphing_tools( conn, osmorphing_info) if user_script: event_manager.progress_update('Running OS morphing user script') import_os_morphing_tools.run_user_script(user_script) else: event_manager.progress_update('No OS morphing user script specified') if not import_os_morphing_tools: event_manager.progress_update( 'No OS morphing tools found for this instance') else: import_os_morphing_tools.set_environment(environment) event_manager.progress_update('OS being migrated: %s' % str(os_info)) (packages_add, _) = import_os_morphing_tools.get_packages() if export_os_morphing_tools: (_, packages_remove) = export_os_morphing_tools.get_packages() # Don't remove packages that need to be installed packages_remove = list(set(packages_remove) - set(packages_add)) LOG.info("Pre packages uninstall") export_os_morphing_tools.pre_packages_uninstall(packages_remove) if packages_remove: event_manager.progress_update("Removing packages: %s" % str(packages_remove)) export_os_morphing_tools.uninstall_packages(packages_remove) LOG.info("Post packages uninstall") export_os_morphing_tools.post_packages_uninstall(packages_remove) LOG.info("Pre packages install") import_os_morphing_tools.pre_packages_install(packages_add) nics_info = osmorphing_info.get('nics_info') set_dhcp = osmorphing_info.get('nics_set_dhcp', True) import_os_morphing_tools.set_net_config(nics_info, dhcp=set_dhcp) LOG.info("Pre packages") if packages_add: event_manager.progress_update("Adding packages: %s" % str(packages_add)) try: import_os_morphing_tools.install_packages(packages_add) except Exception as err: raise exception.CoriolisException( "Failed to install packages: %s. Please review logs" " for more details." % ", ".join(packages_add)) from err LOG.info("Post packages install") import_os_morphing_tools.post_packages_install(packages_add) event_manager.progress_update("Dismounting OS partitions") os_mount_tools.dismount_os(os_root_dir)
def morph_image(origin_provider, destination_provider, connection_info, osmorphing_info, user_script, event_handler): event_manager = events.EventManager(event_handler) os_type = osmorphing_info.get('os_type') ignore_devices = osmorphing_info.get('ignore_devices', []) # instantiate and run OSMount tools: os_mount_tools = osmount_factory.get_os_mount_tools( os_type, connection_info, event_manager, ignore_devices) proxy_settings = _get_proxy_settings() os_mount_tools.set_proxy(proxy_settings) LOG.info("Preparing for OS partitions discovery") os_mount_tools.setup() event_manager.progress_update("Discovering and mounting OS partitions") os_root_dir, os_root_dev = os_mount_tools.mount_os() osmorphing_info['os_root_dir'] = os_root_dir osmorphing_info['os_root_dev'] = os_root_dev conn = os_mount_tools.get_connection() environment = os_mount_tools.get_environment() detected_os_info = run_os_detect(origin_provider, destination_provider, conn, os_type, os_root_dir, osmorphing_info, tools_environment=environment) # TODO(aznashwan): # - export the source hypervisor type option in the VM's export info # - automatically detect the target hypervisor type from the worker VM hypervisor_type = osmorphing_info.get('hypervisor_type', None) export_os_morphing_tools = None try: export_tools_cls = get_osmorphing_tools_class_for_provider( origin_provider, detected_os_info, os_type, osmorphing_info) if export_tools_cls: LOG.info( "Instantiating OSMorphing tools class '%s' for export provider" " '%s'", export_tools_cls.__name__, type(origin_provider)) export_os_morphing_tools = export_tools_cls( conn, os_root_dir, os_root_dev, hypervisor_type, event_manager, detected_os_info) export_os_morphing_tools.set_environment(environment) else: LOG.debug( "No compatible OSMorphing tools class found for export provider " "'%s'", type(origin_provider).__name__) except exception.OSMorphingToolsNotFound: LOG.warn("No tools found for export provider of type: %s", type(origin_provider)) import_os_morphing_tools_cls = get_osmorphing_tools_class_for_provider( destination_provider, detected_os_info, os_type, osmorphing_info) if not import_os_morphing_tools_cls: LOG.error( "No compatible OSMorphing tools found from import provider '%s' " "for the given detected OS info %s", type(destination_provider), detected_os_info) raise exception.OSMorphingToolsNotFound(os_type=os_type) import_os_morphing_tools = import_os_morphing_tools_cls( conn, os_root_dir, os_root_dev, hypervisor_type, event_manager, detected_os_info) import_os_morphing_tools.set_environment(environment) if user_script: event_manager.progress_update('Running OS morphing user script') import_os_morphing_tools.run_user_script(user_script) else: event_manager.progress_update('No OS morphing user script specified') event_manager.progress_update('OS being migrated: %s' % detected_os_info['friendly_release_name']) (packages_add, _) = import_os_morphing_tools.get_packages() if export_os_morphing_tools: (_, packages_remove) = export_os_morphing_tools.get_packages() # Don't remove packages that need to be installed packages_remove = list(set(packages_remove) - set(packages_add)) LOG.info("Pre packages uninstall") export_os_morphing_tools.pre_packages_uninstall(packages_remove) if packages_remove: event_manager.progress_update("Removing packages: %s" % str(packages_remove)) export_os_morphing_tools.uninstall_packages(packages_remove) LOG.info("Post packages uninstall") export_os_morphing_tools.post_packages_uninstall(packages_remove) LOG.info("Pre packages install") import_os_morphing_tools.pre_packages_install(packages_add) nics_info = osmorphing_info.get('nics_info') set_dhcp = osmorphing_info.get('nics_set_dhcp', True) import_os_morphing_tools.set_net_config(nics_info, dhcp=set_dhcp) LOG.info("Pre packages") if packages_add: event_manager.progress_update("Adding packages: %s" % str(packages_add)) try: import_os_morphing_tools.install_packages(packages_add) except Exception as err: raise exception.CoriolisException( "Failed to install packages: %s. Please review logs" " for more details." % ", ".join(packages_add)) from err LOG.info("Post packages install") import_os_morphing_tools.post_packages_install(packages_add) event_manager.progress_update("Dismounting OS partitions") os_mount_tools.dismount_os(os_root_dir)
def run(self, ctxt, instance, origin, destination, task_info, event_handler): event_manager = events.EventManager(event_handler) # validate source params: origin_type = origin["type"] origin_connection_info = base.get_connection_info(ctxt, origin) destination_connection_info = base.get_connection_info( ctxt, destination) destination_type = destination["type"] source_provider = providers_factory.get_provider( origin_type, constants.PROVIDER_TYPE_VALIDATE_MIGRATION_EXPORT, event_handler, raise_if_not_found=False) export_info = None if source_provider: export_info = source_provider.validate_migration_export_input( ctxt, base.get_connection_info(ctxt, origin), instance, source_environment=origin.get("source_environment", {})) else: event_manager.progress_update( "Migration Export Provider for platform '%s' does not support " "Migration input validation" % origin_type) if export_info is None: source_endpoint_provider = providers_factory.get_provider( origin_type, constants.PROVIDER_TYPE_ENDPOINT_INSTANCES, event_handler, raise_if_not_found=False) if not source_endpoint_provider: event_manager.progress_update( "Migration Export Provider for platform '%s' does not " "support querying instance export info. Cannot perform " "Migration Import validation for destination platform " "'%s'" % (origin_type, destination_type)) return task_info export_info = source_endpoint_provider.get_instance( ctxt, origin_connection_info, instance) # validate Export info: schemas.validate_value(export_info, schemas.CORIOLIS_VM_EXPORT_INFO_SCHEMA) # NOTE: this export info will get overriden with updated values # and disk paths after the ExportInstanceTask. task_info["export_info"] = export_info # validate destination params: destination_provider = providers_factory.get_provider( destination_type, constants.PROVIDER_TYPE_VALIDATE_MIGRATION_IMPORT, event_handler, raise_if_not_found=False) if not destination_provider: event_manager.progress_update( "Migration Import Provider for platform '%s' does not support " "Migration input validation" % destination_type) return task_info # NOTE: the target environment JSON schema should have been validated # upon accepting the Migration API creation request. target_environment = destination.get("target_environment", {}) destination_provider.validate_migration_import_input( ctxt, destination_connection_info, target_environment, export_info) return task_info
def morph_image(origin_provider, destination_provider, connection_info, osmorphing_info, event_handler): event_manager = events.EventManager(event_handler) event_manager.progress_update("Preparing instance for target platform") os_type = osmorphing_info.get('os_type') ignore_devices = osmorphing_info.get('ignore_devices', []) os_mount_tools = osmount_factory.get_os_mount_tools( os_type, connection_info, event_manager, ignore_devices) proxy_settings = _get_proxy_settings() os_mount_tools.set_proxy(proxy_settings) event_manager.progress_update("Preparing for OS partitions discovery") os_mount_tools.setup() event_manager.progress_update("Discovering and mounting OS partitions") os_root_dir, other_mounted_dirs, os_root_dev = os_mount_tools.mount_os() osmorphing_info['os_root_dir'] = os_root_dir osmorphing_info['os_root_dev'] = os_root_dev conn = os_mount_tools.get_connection() environment = os_mount_tools.get_environment() try: (export_os_morphing_tools, _) = origin_provider.get_os_morphing_tools( conn, osmorphing_info) export_os_morphing_tools.set_environment(environment) except exception.OSMorphingToolsNotFound: export_os_morphing_tools = None try: (import_os_morphing_tools, os_info) = destination_provider.get_os_morphing_tools( conn, osmorphing_info) import_os_morphing_tools.set_environment(environment) except exception.OSMorphingToolsNotFound: import_os_morphing_tools = None os_info = None if not import_os_morphing_tools: event_manager.progress_update( 'No OS morphing tools found for this instance') else: event_manager.progress_update('OS being migrated: %s' % str(os_info)) (packages_add, _) = import_os_morphing_tools.get_packages() if export_os_morphing_tools: (_, packages_remove) = export_os_morphing_tools.get_packages() # Don't remove packages that need to be installed packages_remove = list(set(packages_remove) - set(packages_add)) LOG.info("Pre packages uninstall") export_os_morphing_tools.pre_packages_uninstall(packages_remove) if packages_remove: event_manager.progress_update( "Removing packages: %s" % str(packages_remove)) export_os_morphing_tools.uninstall_packages(packages_remove) LOG.info("Post packages uninstall") export_os_morphing_tools.post_packages_uninstall(packages_remove) LOG.info("Pre packages install") import_os_morphing_tools.pre_packages_install(packages_add) nics_info = osmorphing_info.get('nics_info') set_dhcp = osmorphing_info.get('nics_set_dhcp', True) import_os_morphing_tools.set_net_config(nics_info, dhcp=set_dhcp) LOG.info("Pre packages") if packages_add: event_manager.progress_update( "Adding packages: %s" % str(packages_add)) import_os_morphing_tools.install_packages(packages_add) LOG.info("Post packages install") import_os_morphing_tools.post_packages_install(packages_add) event_manager.progress_update("Dismounting OS partitions") os_mount_tools.dismount_os(other_mounted_dirs + [os_root_dir])
def morph_image(origin_provider, destination_provider, connection_info, osmorphing_info, user_script, event_handler): event_manager = events.EventManager(event_handler) os_type = osmorphing_info.get('os_type') ignore_devices = osmorphing_info.get('ignore_devices', []) # instantiate and run OSMount tools: os_mount_tools = osmount_factory.get_os_mount_tools( os_type, connection_info, event_manager, ignore_devices, CONF.default_osmorphing_operation_timeout) proxy_settings = _get_proxy_settings() os_mount_tools.set_proxy(proxy_settings) LOG.info("Preparing for OS partitions discovery") try: os_mount_tools.setup() except Exception as err: raise exception.CoriolisException( "Failed to set up the minion machine for OSMorphing. This may be " "due to an incompatibility between the OS image used for the " "OSMorphing minion machine and the VM undergoing OSMorphing. " "Error was: %s" % str(err)) from err event_manager.progress_update("Discovering and mounting OS partitions") os_root_dir, os_root_dev = os_mount_tools.mount_os() osmorphing_info['os_root_dir'] = os_root_dir osmorphing_info['os_root_dev'] = os_root_dev conn = os_mount_tools.get_connection() environment = os_mount_tools.get_environment() detected_os_info = run_os_detect(origin_provider, destination_provider, conn, os_type, os_root_dir, osmorphing_info, tools_environment=environment) # TODO(aznashwan): # - export the source hypervisor type option in the VM's export info # - automatically detect the target hypervisor type from the worker VM hypervisor_type = osmorphing_info.get('hypervisor_type', None) osmorphing_parameters = osmorphing_info.get('osmorphing_parameters', {}) export_os_morphing_tools = None try: export_tools_cls = get_osmorphing_tools_class_for_provider( origin_provider, detected_os_info, os_type, osmorphing_info) if export_tools_cls: LOG.info( "Instantiating OSMorphing tools class '%s' for export provider" " '%s'", export_tools_cls.__name__, type(origin_provider)) export_os_morphing_tools = export_tools_cls( conn, os_root_dir, os_root_dev, hypervisor_type, event_manager, detected_os_info, osmorphing_parameters, CONF.default_osmorphing_operation_timeout) export_os_morphing_tools.set_environment(environment) else: LOG.debug( "No compatible OSMorphing tools class found for export provider " "'%s'", type(origin_provider).__name__) except exception.OSMorphingToolsNotFound: LOG.warn("No tools found for export provider of type: %s", type(origin_provider)) import_os_morphing_tools_cls = get_osmorphing_tools_class_for_provider( destination_provider, detected_os_info, os_type, osmorphing_info) if not import_os_morphing_tools_cls: LOG.error( "No compatible OSMorphing tools found from import provider '%s' " "for the given detected OS info %s", type(destination_provider), detected_os_info) raise exception.OSMorphingToolsNotFound(os_type=os_type) import_os_morphing_tools = import_os_morphing_tools_cls( conn, os_root_dir, os_root_dev, hypervisor_type, event_manager, detected_os_info, osmorphing_parameters, CONF.default_osmorphing_operation_timeout) import_os_morphing_tools.set_environment(environment) if user_script: event_manager.progress_update('Running OS morphing user script') import_os_morphing_tools.run_user_script(user_script) else: event_manager.progress_update('No OS morphing user script specified') event_manager.progress_update('OS being migrated: %s' % detected_os_info['friendly_release_name']) (packages_add, _) = import_os_morphing_tools.get_packages() if export_os_morphing_tools: (_, packages_remove) = export_os_morphing_tools.get_packages() # Don't remove packages that need to be installed packages_remove = list(set(packages_remove) - set(packages_add)) LOG.info("Pre packages uninstall") export_os_morphing_tools.pre_packages_uninstall(packages_remove) if packages_remove: event_manager.progress_update("Removing packages: %s" % str(packages_remove)) export_os_morphing_tools.uninstall_packages(packages_remove) LOG.info("Post packages uninstall") export_os_morphing_tools.post_packages_uninstall(packages_remove) LOG.info("Pre packages install") import_os_morphing_tools.pre_packages_install(packages_add) nics_info = osmorphing_info.get('nics_info') set_dhcp = osmorphing_info.get('nics_set_dhcp', True) import_os_morphing_tools.set_net_config(nics_info, dhcp=set_dhcp) LOG.info("Pre packages") if packages_add: event_manager.progress_update("Adding packages: %s" % str(packages_add)) import_os_morphing_tools.install_packages(packages_add) LOG.info("Post packages install") import_os_morphing_tools.post_packages_install(packages_add) event_manager.progress_update("Dismounting OS partitions") try: os_mount_tools.dismount_os(os_root_dir) except Exception as err: raise exception.CoriolisException( "Failed to dismount the OS undergoing OSMorphing. This could have " "been caused by minor FS corruption during the last disk sync. " "Please ensure that any source-side FS integrity mechanisms (e.g. " "filesystem quiescing, crash-consistent backups, etc.) are " "enabled and available for the source machine. If none are " "available, please try migrating/replicating the source machine " "while it is powered off. Error was: %s" % str(err)) from err