def get_shared_libs_for_providers( self, ctxt, origin, destination, event_handler): """ Returns a list of directories containing libraries needed for both the source and destination providers. """ required_libs = [] origin_provider = providers_factory.get_provider( origin["type"], constants.PROVIDER_TYPE_SETUP_LIBS, event_handler, raise_if_not_found=False) if origin_provider: conn_info = get_connection_info(ctxt, origin) required_libs.extend( origin_provider.get_shared_library_directories( ctxt, conn_info)) destination_provider = providers_factory.get_provider( destination["type"], constants.PROVIDER_TYPE_SETUP_LIBS, event_handler, raise_if_not_found=False) if destination_provider: conn_info = get_connection_info(ctxt, destination) required_libs.extend( destination_provider.get_shared_library_directories( ctxt, conn_info)) return required_libs
def get_shared_libs_for_providers( self, ctxt, origin, destination, event_handler): """ Returns a list of directories containing libraries needed for both the source and destination providers. """ required_libs = [] platform = self.get_required_platform() if platform in [ constants.TASK_PLATFORM_SOURCE, constants.TASK_PLATFORM_BILATERAL]: origin_provider = providers_factory.get_provider( origin["type"], constants.PROVIDER_TYPE_SETUP_LIBS, event_handler, raise_if_not_found=False) if origin_provider: conn_info = get_connection_info(ctxt, origin) required_libs.extend( origin_provider.get_shared_library_directories( ctxt, conn_info)) if platform in [ constants.TASK_PLATFORM_DESTINATION, constants.TASK_PLATFORM_BILATERAL]: destination_provider = providers_factory.get_provider( destination["type"], constants.PROVIDER_TYPE_SETUP_LIBS, event_handler, raise_if_not_found=False) if destination_provider: conn_info = get_connection_info(ctxt, destination) required_libs.extend( destination_provider.get_shared_library_directories( ctxt, conn_info)) return required_libs
def _run(self, ctxt, instance, origin, destination, task_info, event_handler): origin_provider = providers_factory.get_provider( origin["type"], constants.PROVIDER_TYPE_REPLICA_EXPORT, event_handler) destination_provider = providers_factory.get_provider( destination["type"], constants.PROVIDER_TYPE_REPLICA_IMPORT, event_handler) osmorphing_connection_info = base.unmarshal_migr_conn_info( task_info['osmorphing_connection_info']) osmorphing_info = task_info.get('osmorphing_info', {}) user_scripts = task_info.get("user_scripts") instance_script = None if user_scripts: instance_script = user_scripts.get("instances", {}).get(instance) if not instance_script: os_type = osmorphing_info.get("os_type") if os_type: instance_script = user_scripts.get("global", {}).get(os_type) osmorphing_manager.morph_image(origin_provider, destination_provider, osmorphing_connection_info, osmorphing_info, instance_script, event_handler) return {}
def run(self, ctxt, instance, origin, destination, task_info, event_handler): destination_provider = None source_provider = None dest_volumes_info = {} new_source_environment = task_info.get('source_environment') new_destination_environment = task_info.get('destination_environment') if new_source_environment: source_provider = providers_factory.get_provider( origin["type"], constants.PROVIDER_TYPE_REPLICA_EXPORT, event_handler) source_environment_schema = ( source_provider.get_source_environment_schema()) schemas.validate_value(new_source_environment, source_environment_schema) if new_destination_environment: destination_provider = providers_factory.get_provider( destination["type"], constants.PROVIDER_TYPE_REPLICA_IMPORT, event_handler) destination_environment_schema = ( destination_provider.get_target_environment_schema) schemas.validate_value(new_destination_environment, destination_environment_schema) connection_info = base.get_connection_info(ctxt, destination) export_info = task_info.get("export_info", {}) volumes_info = task_info.get("volumes_info", {}) old_source_environment = origin.get('source_environment', {}) new_source_environment = task_info.get('source_environment', {}) if source_provider: LOG.info("Checking source provider environment params") source_provider.check_update_environment_params( ctxt, connection_info, export_info, volumes_info, old_source_environment, new_source_environment) if destination_provider: LOG.info("Checking destination provider environment params") old_destination_environment = destination.get( 'target_environment', {}) new_destination_environment = task_info.get( 'target_environment', {}) dest_volumes_info = ( destination_provider.check_update_environment_params( ctxt, connection_info, export_info, volumes_info, old_destination_environment, new_destination_environment)) task_info['volumes_info'] = dest_volumes_info return task_info
def _run(self, ctxt, instance, origin, destination, task_info, event_handler): event_manager = events.EventManager(event_handler) destination_type = destination["type"] destination_connection_info = base.get_connection_info( ctxt, destination) destination_provider = providers_factory.get_provider( destination_type, constants.PROVIDER_TYPE_VALIDATE_REPLICA_IMPORT, event_handler, raise_if_not_found=False) if not destination_provider: event_manager.progress_update( "Replica Import Provider for platform '%s' does not support " "Replica input validation" % destination_type) return {} export_info = task_info.get("export_info") if not export_info: raise exception.InvalidActionTasksExecutionState( "Instance export info is not set. Cannot perform " "Replica Import validation for destination platform " "'%s'" % destination_type) target_environment = task_info["target_environment"] self._validate_provider_replica_import_input( destination_provider, ctxt, destination_connection_info, target_environment, export_info) return {}
def run(self, ctxt, instance, origin, destination, task_info, event_handler): provider = providers_factory.get_provider( origin["type"], constants.PROVIDER_TYPE_REPLICA_EXPORT, event_handler) connection_info = base.get_connection_info(ctxt, origin) volumes_info = _get_volumes_info(task_info) migr_source_conn_info = base.unmarshal_migr_conn_info( task_info["migr_source_connection_info"]) migr_target_conn_info = base.unmarshal_migr_conn_info( task_info["migr_target_connection_info"]) incremental = task_info.get("incremental", True) source_environment = origin.get('source_environment') or {} volumes_info = provider.replicate_disks(ctxt, connection_info, source_environment, instance, migr_source_conn_info, migr_target_conn_info, volumes_info, incremental) task_info["volumes_info"] = volumes_info return task_info
def run(self, ctxt, instance, origin, destination, task_info, event_handler): event_manager = events.EventManager(event_handler) new_source_env = task_info.get('source_environment', {}) if not new_source_env: event_manager.progress_update( "No new source environment options provided") return task_info source_provider = providers_factory.get_provider( origin["type"], constants.PROVIDER_TYPE_SOURCE_REPLICA_UPDATE, event_handler, raise_if_not_found=False) if not source_provider: raise exception.CoriolisException( "Replica source provider plugin for '%s' does not support" " updating Replicas" % origin["type"]) origin_connection_info = base.get_connection_info(ctxt, origin) volumes_info = task_info.get("volumes_info", {}) LOG.info("Checking source provider environment params") # NOTE: the `source_environment` in the `origin` is the one set # in the dedicated DB column of the Replica and thus stores # the previous value of it: old_source_env = origin.get('source_environment', {}) volumes_info = (source_provider.check_update_source_environment_params( ctxt, origin_connection_info, instance, volumes_info, old_source_env, new_source_env)) task_info['volumes_info'] = volumes_info return task_info
def _run(self, ctxt, instance, origin, destination, task_info, event_handler): event_manager = events.EventManager(event_handler) if not task_info.get("volumes_info"): LOG.debug("No volumes_info present. Skipping disk deletion.") event_manager.progress_update( "No previous volumes information present, nothing to delete") return {'volumes_info': []} provider = providers_factory.get_provider( destination["type"], constants.PROVIDER_TYPE_REPLICA_IMPORT, event_handler) connection_info = base.get_connection_info(ctxt, destination) volumes_info = _get_volumes_info(task_info) target_environment = task_info['target_environment'] volumes_info = provider.delete_replica_disks(ctxt, connection_info, target_environment, volumes_info) if volumes_info: LOG.warn( "'volumes_info' should have been void after disk " "deletion task but it is: %s" % (utils.sanitize_task_info({'volumes_info': volumes_info}))) return {'volumes_info': []}
def get_provider_schemas(self, ctxt, platform_name, provider_type): provider = providers_factory.get_provider(platform_name, provider_type, None) schemas = {} if provider_type == constants.PROVIDER_TYPE_ENDPOINT: schema = provider.get_connection_info_schema() schemas["connection_info_schema"] = schema if provider_type in [ constants.PROVIDER_TYPE_IMPORT, constants.PROVIDER_TYPE_REPLICA_IMPORT ]: schema = provider.get_target_environment_schema() schemas["destination_environment_schema"] = schema if provider_type in [ constants.PROVIDER_TYPE_EXPORT, constants.PROVIDER_TYPE_REPLICA_EXPORT ]: schema = provider.get_source_environment_schema() schemas["source_environment_schema"] = schema return schemas
def get_endpoint_destination_minion_pool_options(self, ctxt, platform_name, connection_info, env, option_names): provider = providers_factory.get_provider( platform_name, constants.PROVIDER_TYPE_DESTINATION_MINION_POOL, None, raise_if_not_found=False) if not provider: raise exception.InvalidInput( "Provider plugin for platform '%s' does not support " "destination minion pool creation or management." % (platform_name)) secret_connection_info = utils.get_secret_connection_info( ctxt, connection_info) options = provider.get_minion_pool_options(ctxt, secret_connection_info, env=env, option_names=option_names) # NOTE: the structure of option values is the same for minion pools: schemas.validate_value( options, schemas.CORIOLIS_DESTINATION_ENVIRONMENT_OPTIONS_SCHEMA) return options
def run(self, ctxt, instance, origin, destination, task_info, event_handler): event_manager = events.EventManager(event_handler) destination_type = destination["type"] if task_info.get("export_info") is None: event_manager.progress_update( "Instance export info is not set. Cannot perform Migration " "Import validation for destination platform " "'%s'" % destination_type) return task_info destination_connection_info = base.get_connection_info( ctxt, destination) destination_provider = providers_factory.get_provider( destination_type, constants.PROVIDER_TYPE_VALIDATE_MIGRATION_IMPORT, event_handler, raise_if_not_found=False) if not destination_provider: event_manager.progress_update( "Migration Import Provider for platform '%s' does not " "support Migration input validation" % destination_type) return task_info # NOTE: the target environment JSON schema should have been validated # upon accepting the Migration API creation request. target_environment = destination.get("target_environment", {}) destination_provider.validate_migration_import_input( ctxt, destination_connection_info, target_environment, task_info["export_info"]) return task_info
def _run(self, ctxt, instance, origin, destination, task_info, event_handler): platform_to_target = None required_platform = self.get_required_platform() if required_platform == constants.TASK_PLATFORM_SOURCE: platform_to_target = origin elif required_platform == constants.TASK_PLATFORM_DESTINATION: platform_to_target = destination else: raise NotImplementedError( "Unknown minion pool validation operation platform '%s'" % (required_platform)) connection_info = base.get_connection_info(ctxt, platform_to_target) provider_type = self.get_required_provider_types()[ self.get_required_platform()][0] provider = providers_factory.get_provider(platform_to_target["type"], provider_type, event_handler) export_info = task_info["export_info"] minion_properties = task_info[ self._get_minion_properties_task_info_field()] transfer_properties = task_info[ self._get_transfer_properties_task_info_field()] validation_op = self._get_provider_pool_validation_operation(provider) validation_op(ctxt, connection_info, export_info, transfer_properties, minion_properties) field_mappings = self._get_minion_task_info_field_mappings() return { field_mappings[field]: task_info[field] for field in field_mappings }
def get_provider_schemas(self, ctxt, platform_name, provider_type): provider = providers_factory.get_provider(platform_name, provider_type, None) schemas = {} if provider_type == constants.PROVIDER_TYPE_ENDPOINT: schema = provider.get_connection_info_schema() schemas["connection_info_schema"] = schema if provider_type == constants.PROVIDER_TYPE_REPLICA_IMPORT: schema = provider.get_target_environment_schema() schemas["destination_environment_schema"] = schema if provider_type == constants.PROVIDER_TYPE_REPLICA_EXPORT: schema = provider.get_source_environment_schema() schemas["source_environment_schema"] = schema if provider_type == constants.PROVIDER_TYPE_SOURCE_MINION_POOL: schema = provider.get_minion_pool_environment_schema() schemas["source_minion_pool_environment_schema"] = schema if provider_type == constants.PROVIDER_TYPE_DESTINATION_MINION_POOL: schema = provider.get_minion_pool_environment_schema() schemas["destination_minion_pool_environment_schema"] = schema return schemas
def run(self, ctxt, instance, origin, destination, task_info, event_handler): target_environment = destination.get("target_environment") or {} export_info = task_info["export_info"] provider = providers_factory.get_provider( destination["type"], constants.PROVIDER_TYPE_IMPORT, event_handler) connection_info = base.get_connection_info(ctxt, destination) import_info = provider.import_instance(ctxt, connection_info, target_environment, instance, export_info) if task_info.get("instance_deployment_info") is None: task_info["instance_deployment_info"] = {} task_info["instance_deployment_info"].update( import_info["instance_deployment_info"]) task_info["origin_provider_type"] = constants.PROVIDER_TYPE_EXPORT task_info["destination_provider_type"] = constants.PROVIDER_TYPE_IMPORT # We need to retain export info until after disk sync # TODO(gsamfira): remove this when we implement multi-worker, and by # extension some external storage for needed resources (like swift) task_info["retain_export_path"] = True return task_info
def run(self, ctxt, instance, origin, destination, task_info, event_handler): provider = providers_factory.get_provider( destination["type"], constants.PROVIDER_TYPE_IMPORT, event_handler) connection_info = base.get_connection_info(ctxt, destination) target_environment = destination.get("target_environment") or {} instance_deployment_info = task_info["instance_deployment_info"] resources_info = provider.deploy_disk_copy_resources( ctxt, connection_info, target_environment, instance_deployment_info) conn_info = resources_info["instance_deployment_info"][ "disk_sync_connection_info"] conn_info = base.marshal_migr_conn_info(conn_info) task_info["instance_deployment_info"] = resources_info[ "instance_deployment_info"] task_info["instance_deployment_info"][ "disk_sync_connection_info"] = conn_info # We need to retain export info until after disk sync # TODO(gsamfira): remove this when we implement multi-worker, and by # extension some external storage for needed resources (like swift) task_info["retain_export_path"] = True return task_info
def run(self, ctxt, instance, origin, destination, task_info, event_handler): event_manager = events.EventManager(event_handler) destination_connection_info = base.get_connection_info( ctxt, destination) destination_type = destination["type"] export_info = task_info["export_info"] # validate Export info: schemas.validate_value(export_info, schemas.CORIOLIS_VM_EXPORT_INFO_SCHEMA) # validate destination params: destination_provider = providers_factory.get_provider( destination_type, constants.PROVIDER_TYPE_VALIDATE_REPLICA_IMPORT, event_handler, raise_if_not_found=False) if not destination_provider: event_manager.progress_update( "Replica Deployment Provider for platform '%s' does not " "support Replica Deployment input validation" % (destination_type)) return task_info # NOTE: the target environment JSON schema should have been validated # upon accepting the Replica API creation request. target_environment = destination.get("target_environment", {}) destination_provider.validate_replica_deployment_input( ctxt, destination_connection_info, target_environment, export_info) return task_info
def run(self, ctxt, instance, origin, destination, task_info, event_handler): target_environment = destination.get("target_environment") or {} export_info = task_info["export_info"] provider = providers_factory.get_provider( destination["type"], constants.PROVIDER_TYPE_REPLICA_IMPORT, event_handler) connection_info = base.get_connection_info(ctxt, destination) volumes_info = _get_volumes_info(task_info) clone_disks = task_info.get("clone_disks", True) LOG.debug("Clone disks: %s", clone_disks) import_info = provider.deploy_replica_instance(ctxt, connection_info, target_environment, instance, export_info, volumes_info, clone_disks) if task_info.get("instance_deployment_info") is None: task_info["instance_deployment_info"] = {} task_info["instance_deployment_info"].update( import_info["instance_deployment_info"]) task_info[ "origin_provider_type"] = constants.PROVIDER_TYPE_REPLICA_EXPORT task_info[ "destination_provider_type"] = constants.PROVIDER_TYPE_REPLICA_IMPORT return task_info
def run(self, ctxt, instance, origin, destination, task_info, event_handler): event_manager = events.EventManager(event_handler) destination_type = destination["type"] destination_connection_info = base.get_connection_info( ctxt, destination) destination_provider = providers_factory.get_provider( destination_type, constants.PROVIDER_TYPE_VALIDATE_REPLICA_IMPORT, event_handler, raise_if_not_found=False) if not destination_provider: event_manager.progress_update( "Replica Import Provider for platform '%s' does not support " "Replica input validation" % destination_type) return task_info export_info = task_info.get("export_info") if not export_info: raise exception.CoriolisException( "Instance export info is not set. Cannot perform " "Replica Import validation for destination platform " "'%s'" % destination_type) # NOTE: the target environment JSON schema should have been validated # upon accepting the Replica API creation request. target_environment = destination.get("target_environment", {}) destination_provider.validate_replica_import_input( ctxt, destination_connection_info, target_environment, export_info) return task_info
def run(self, ctxt, instance, origin, destination, task_info, event_handler): target_environment = destination.get("target_environment") or {} export_info = task_info["export_info"] provider = providers_factory.get_provider( destination["type"], constants.PROVIDER_TYPE_REPLICA_IMPORT, event_handler) connection_info = base.get_connection_info(ctxt, destination) volumes_info = task_info.get("volumes_info", []) if volumes_info is None: # In case Replica disks were deleted: volumes_info = [] volumes_info = provider.deploy_replica_disks(ctxt, connection_info, target_environment, instance, export_info, volumes_info) schemas.validate_value(volumes_info, schemas.CORIOLIS_VOLUMES_INFO_SCHEMA) volumes_info = _check_ensure_volumes_info_ordering( export_info, volumes_info) task_info["volumes_info"] = volumes_info return task_info
def run(self, ctxt, instance, origin, destination, task_info, event_handler): provider = providers_factory.get_provider( origin["type"], constants.PROVIDER_TYPE_REPLICA_EXPORT, event_handler) connection_info = base.get_connection_info(ctxt, origin) source_environment = origin.get('source_environment') or {} replica_resources_info = provider.deploy_replica_source_resources( ctxt, connection_info, source_environment) task_info["migr_source_resources"] = replica_resources_info[ "migr_resources"] migr_connection_info = replica_resources_info.get("connection_info") if migr_connection_info: migr_connection_info = base.marshal_migr_conn_info( migr_connection_info) schemas.validate_value( migr_connection_info, schemas.CORIOLIS_DISK_SYNC_RESOURCES_CONN_INFO_SCHEMA, # NOTE: we avoid raising so that the cleanup task # can [try] to deal with the temporary resources. raise_on_error=False) task_info["migr_source_connection_info"] = migr_connection_info return task_info
def validate_endpoint_connection(self, ctxt, platform_name, connection_info): provider = providers_factory.get_provider( platform_name, constants.PROVIDER_TYPE_ENDPOINT, None) secret_connection_info = utils.get_secret_connection_info( ctxt, connection_info) is_valid = True message = None try: schemas.validate_value(secret_connection_info, provider.get_connection_info_schema()) provider.validate_connection(ctxt, secret_connection_info) except exception.SchemaValidationException as ex: LOG.debug("Connection info schema validation failed: %s", ex) is_valid = False message = ( "Schema validation for the provided connection parameters has " "failed. Please ensure that you have included all the " "necessary connection parameters and they are all properly " "formatted for the '%s' Coriolis plugin in use." % (platform_name)) except exception.ConnectionValidationException as ex: LOG.warn(utils.get_exception_details()) is_valid = False message = str(ex) except Exception as ex: LOG.warn(utils.get_exception_details()) is_valid = False message = ("An unexpected connection validation exception " "ocurred: %s" % str(ex)) return (is_valid, message)
def _run(self, ctxt, instance, origin, destination, task_info, event_handler): platform_to_target = None required_platform = self.get_required_platform() if required_platform == constants.TASK_PLATFORM_SOURCE: platform_to_target = origin elif required_platform == constants.TASK_PLATFORM_DESTINATION: platform_to_target = destination else: raise NotImplementedError( "Unknown minion healthcheck platform '%s'" % (required_platform)) connection_info = base.get_connection_info(ctxt, platform_to_target) provider_type = self.get_required_provider_types()[ self.get_required_platform()][0] provider = providers_factory.get_provider(platform_to_target["type"], provider_type, event_handler) minion_properties = task_info['minion_provider_properties'] minion_connection_info = base.unmarshal_migr_conn_info( task_info['minion_connection_info']) provider.healthcheck_minion(ctxt, connection_info, minion_properties, minion_connection_info) return {}
def _validate_create_body(self, body): migration = body["migration"] origin = migration["origin"] destination = migration["destination"] export_provider = factory.get_provider(origin["type"], constants.PROVIDER_TYPE_EXPORT, None) if not export_provider.validate_connection_info(origin.get("connection_info", {})): # TODO: use a decent exception raise exception.CoriolisException("Invalid connection info") import_provider = factory.get_provider(destination["type"], constants.PROVIDER_TYPE_IMPORT, None) if not import_provider.validate_connection_info(destination.get("connection_info", {})): # TODO: use a decent exception raise exception.CoriolisException("Invalid connection info") return origin, destination, migration["instances"]
def run(self, ctxt, instance, origin, destination, task_info, event_handler): event_manager = events.EventManager(event_handler) origin_connection_info = base.get_connection_info(ctxt, origin) origin_type = origin["type"] source_provider = providers_factory.get_provider( origin_type, constants.PROVIDER_TYPE_VALIDATE_MIGRATION_EXPORT, event_handler, raise_if_not_found=False) export_info = None if source_provider: export_info = source_provider.validate_migration_export_input( ctxt, origin_connection_info, instance, source_environment=origin.get("source_environment", {})) else: event_manager.progress_update( "Migration Export Provider for platform '%s' does not " "support Migration input validation" % origin_type) if export_info is None: source_endpoint_provider = providers_factory.get_provider( origin_type, constants.PROVIDER_TYPE_ENDPOINT_INSTANCES, event_handler, raise_if_not_found=False) if not source_endpoint_provider: event_manager.progress_update( "Migration Export Provider for platform '%s' does not " "support querying instance export info" % origin_type) return task_info export_info = source_endpoint_provider.get_instance( ctxt, origin_connection_info, instance) # validate Export info: schemas.validate_value(export_info, schemas.CORIOLIS_VM_EXPORT_INFO_SCHEMA) # NOTE: this export info will get overridden with updated values # and disk paths after the ExportInstanceTask. task_info["export_info"] = export_info return task_info
def run(self, ctxt, instance, origin, destination, task_info, event_handler): provider = providers_factory.get_provider( origin["type"], constants.PROVIDER_TYPE_REPLICA_EXPORT, event_handler) connection_info = base.get_connection_info(ctxt, origin) provider.shutdown_instance(ctxt, connection_info, instance) return task_info
def _run(self, ctxt, instance, origin, destination, task_info, event_handler): platform_to_target = None required_platform = self.get_required_platform() if required_platform == constants.TASK_PLATFORM_SOURCE: platform_to_target = origin elif required_platform == constants.TASK_PLATFORM_DESTINATION: platform_to_target = destination else: raise NotImplementedError( "Unknown minion pool disk operation platform '%s'" % (required_platform)) connection_info = base.get_connection_info(ctxt, platform_to_target) provider_type = self.get_required_provider_types()[ self.get_required_platform()][0] provider = providers_factory.get_provider(platform_to_target["type"], provider_type, event_handler) volumes_info = self._get_volumes_info_from_task_info(task_info) minion_properties = task_info[ self._get_minion_properties_task_info_field()] res = self._get_provider_disk_operation(provider)(ctxt, connection_info, minion_properties, volumes_info) missing_result_props = [ prop for prop in ["volumes_info", "minion_properties"] if prop not in res ] if missing_result_props: raise exception.CoriolisException( "The following properties were missing from minion disk " "operation '%s' from platform '%s'." % (self._get_provider_disk_operation.__name__, platform_to_target)) field_name_map = self._get_minion_task_info_field_mappings() result = { "volumes_info": res['volumes_info'], self._get_minion_properties_task_info_field(): res["minion_properties"], field_name_map[self._get_minion_properties_task_info_field()]: res["minion_properties"] } result.update({ field_name_map[field]: task_info[field] for field in field_name_map if field_name_map[field] not in result }) return result
def _run(self, ctxt, instance, origin, destination, task_info, event_handler): event_manager = events.EventManager(event_handler) volumes_info = task_info.get("volumes_info", []) new_destination_env = task_info.get('target_environment', {}) # NOTE: the `target_environment` in the `destination` is the one # set in the dedicated DB column of the Replica and thus stores # the previous value of it: old_destination_env = destination.get('target_environment', {}) if not new_destination_env: event_manager.progress_update( "No new destination environment options provided") return { "target_environment": old_destination_env, "volumes_info": volumes_info } destination_provider = providers_factory.get_provider( destination["type"], constants.PROVIDER_TYPE_DESTINATION_REPLICA_UPDATE, event_handler, raise_if_not_found=False) if not destination_provider: raise exception.InvalidActionTasksExecutionState( "Replica destination provider plugin for '%s' does not " "support updating Replicas" % destination["type"]) destination_connection_info = base.get_connection_info( ctxt, destination) export_info = task_info.get("export_info", {}) LOG.info("Checking destination provider environment params") volumes_info = ( destination_provider.check_update_destination_environment_params( ctxt, destination_connection_info, export_info, volumes_info, old_destination_env, new_destination_env)) if volumes_info: schemas.validate_value(volumes_info, schemas.CORIOLIS_VOLUMES_INFO_SCHEMA) volumes_info = _check_ensure_volumes_info_ordering( export_info, volumes_info) else: LOG.warn( "Destination update method for '%s' dest provider did NOT " "return any volumes info. Defaulting to old value.", destination["type"]) volumes_info = task_info.get("volumes_info", []) return { "volumes_info": volumes_info, "target_environment": new_destination_env }
def run(self, ctxt, instance, origin, destination, task_info, event_handler): provider = providers_factory.get_provider( destination["type"], constants.PROVIDER_TYPE_IMPORT, event_handler) connection_info = base.get_connection_info(ctxt, destination) instance_deployment_info = task_info.get("instance_deployment_info", {}) provider.cleanup_failed_import_instance(ctxt, connection_info, instance_deployment_info) return task_info
def run(self, ctxt, instance, origin, destination, task_info, event_handler): provider = providers_factory.get_provider( destination["type"], constants.PROVIDER_TYPE_OS_MORPHING, event_handler) connection_info = base.get_connection_info(ctxt, destination) os_morphing_resources = task_info.get("os_morphing_resources") provider.delete_os_morphing_resources( ctxt, connection_info, os_morphing_resources) return task_info
def run(self, ctxt, instance, origin, destination, task_info, event_handler): origin_provider_type = task_info["origin_provider_type"] destination_provider_type = task_info["destination_provider_type"] origin_provider = providers_factory.get_provider( origin["type"], origin_provider_type, event_handler) destination_provider = providers_factory.get_provider( destination["type"], destination_provider_type, event_handler) osmorphing_connection_info = base.unmarshal_migr_conn_info( task_info['osmorphing_connection_info']) osmorphing_info = task_info.get('osmorphing_info', {}) osmorphing_manager.morph_image(origin_provider, destination_provider, osmorphing_connection_info, osmorphing_info, event_handler) return task_info
def get_endpoint_storage(self, ctxt, platform_name, connection_info, env): provider = providers_factory.get_provider( platform_name, constants.PROVIDER_TYPE_ENDPOINT_STORAGE, None) secret_connection_info = utils.get_secret_connection_info( ctxt, connection_info) storage = provider.get_storage(ctxt, secret_connection_info, env) schemas.validate_value(storage, schemas.CORIOLIS_VM_STORAGE_SCHEMA) return storage
def _task_process(ctxt, task_id, task_type, origin, destination, instance, task_info, mp_q, mp_log_q): try: _setup_task_process(mp_log_q) if task_type == constants.TASK_TYPE_EXPORT_INSTANCE: provider_type = constants.PROVIDER_TYPE_EXPORT data = origin elif task_type == constants.TASK_TYPE_IMPORT_INSTANCE: provider_type = constants.PROVIDER_TYPE_IMPORT data = destination else: raise exception.NotFound( "Unknown task type: %s" % task_type) event_handler = _ConductorProviderEventHandler(ctxt, task_id) provider = factory.get_provider(data["type"], provider_type, event_handler) connection_info = data.get("connection_info") or {} target_environment = data.get("target_environment") or {} secret_ref = connection_info.get("secret_ref") if secret_ref: LOG.info("Retrieving connection info from secret: %s", secret_ref) connection_info = secrets.get_secret(ctxt, secret_ref) if provider_type == constants.PROVIDER_TYPE_EXPORT: export_path = _get_task_export_path(task_id, create=True) result = provider.export_instance(ctxt, connection_info, instance, export_path) result[TMP_DIRS_KEY] = [export_path] else: result = provider.import_instance(ctxt, connection_info, target_environment, instance, task_info) mp_q.put(result) except Exception as ex: mp_q.put(str(ex)) LOG.exception(ex) finally: # Signal the log event handler that there are no more events mp_log_q.put(None)