def _check_running_executions(action): if [ e for e in action.executions if e.status == constants.EXECUTION_STATUS_RUNNING ]: raise exception.InvalidActionTasksExecutionState( "Another tasks execution is in progress")
def _run(self, ctxt, instance, origin, destination, task_info, event_handler): event_manager = events.EventManager(event_handler) destination_type = destination["type"] destination_connection_info = base.get_connection_info( ctxt, destination) destination_provider = providers_factory.get_provider( destination_type, constants.PROVIDER_TYPE_VALIDATE_REPLICA_IMPORT, event_handler, raise_if_not_found=False) if not destination_provider: event_manager.progress_update( "Replica Import Provider for platform '%s' does not support " "Replica input validation" % destination_type) return {} export_info = task_info.get("export_info") if not export_info: raise exception.InvalidActionTasksExecutionState( "Instance export info is not set. Cannot perform " "Replica Import validation for destination platform " "'%s'" % destination_type) target_environment = task_info["target_environment"] self._validate_provider_replica_import_input( destination_provider, ctxt, destination_connection_info, target_environment, export_info) return {}
def _run(self, ctxt, instance, origin, destination, task_info, event_handler): event_manager = events.EventManager(event_handler) volumes_info = task_info.get("volumes_info", []) new_destination_env = task_info.get('target_environment', {}) # NOTE: the `target_environment` in the `destination` is the one # set in the dedicated DB column of the Replica and thus stores # the previous value of it: old_destination_env = destination.get('target_environment', {}) if not new_destination_env: event_manager.progress_update( "No new destination environment options provided") return { "target_environment": old_destination_env, "volumes_info": volumes_info } destination_provider = providers_factory.get_provider( destination["type"], constants.PROVIDER_TYPE_DESTINATION_REPLICA_UPDATE, event_handler, raise_if_not_found=False) if not destination_provider: raise exception.InvalidActionTasksExecutionState( "Replica destination provider plugin for '%s' does not " "support updating Replicas" % destination["type"]) destination_connection_info = base.get_connection_info( ctxt, destination) export_info = task_info.get("export_info", {}) LOG.info("Checking destination provider environment params") volumes_info = ( destination_provider.check_update_destination_environment_params( ctxt, destination_connection_info, export_info, volumes_info, old_destination_env, new_destination_env)) if volumes_info: schemas.validate_value(volumes_info, schemas.CORIOLIS_VOLUMES_INFO_SCHEMA) volumes_info = _check_ensure_volumes_info_ordering( export_info, volumes_info) else: LOG.warn( "Destination update method for '%s' dest provider did NOT " "return any volumes info. Defaulting to old value.", destination["type"]) volumes_info = task_info.get("volumes_info", []) return { "volumes_info": volumes_info, "target_environment": new_destination_env }
def _check_ensure_volumes_info_ordering(export_info, volumes_info): """ Returns a new list of volumes_info, ensuring that the order of the disks in 'volumes_info' is consistent with the order that the disks appear in 'export_info[devices][disks]' """ instance = export_info.get('instance_name', export_info.get('name', export_info['id'])) ordered_volumes_info = [] for disk in export_info['devices']['disks']: disk_id = disk['id'] matching_volumes = [ vol for vol in volumes_info if vol['disk_id'] == disk_id ] if not matching_volumes: raise exception.InvalidActionTasksExecutionState( "Could not find source disk '%s' (ID '%s') in Replica " "volumes info: %s" % (disk, disk_id, volumes_info)) elif len(matching_volumes) > 1: raise exception.InvalidActionTasksExecutionState( "Multiple disks with ID '%s' foind in Replica " "volumes info: %s" % (disk_id, volumes_info)) ordered_volumes_info.append(matching_volumes[0]) vol_info_cpy = utils.sanitize_task_info({ "volumes_info": volumes_info }).get("volumes_info", []) ordered_vol_info_cpy = utils.sanitize_task_info({ "volumes_info": ordered_volumes_info }).get("volumes_info", []) LOG.debug("volumes_info returned by provider for instance " "'%s': %s", instance, vol_info_cpy) LOG.debug("volumes_info for instance '%s' after " "reordering: %s", instance, ordered_vol_info_cpy) return ordered_volumes_info
def _run(self, ctxt, instance, origin, destination, task_info, event_handler): event_manager = events.EventManager(event_handler) volumes_info = task_info.get("volumes_info", []) new_source_env = task_info.get('source_environment', {}) # NOTE: the `source_environment` in the `origin` is the one set # in the dedicated DB column of the Replica and thus stores # the previous value of it: old_source_env = origin.get('source_environment') if not new_source_env: event_manager.progress_update( "No new source environment options provided") return { 'volumes_info': volumes_info, 'source_environment': old_source_env } source_provider = providers_factory.get_provider( origin["type"], constants.PROVIDER_TYPE_SOURCE_REPLICA_UPDATE, event_handler, raise_if_not_found=False) if not source_provider: raise exception.InvalidActionTasksExecutionState( "Replica source provider plugin for '%s' does not support" " updating Replicas" % origin["type"]) origin_connection_info = base.get_connection_info(ctxt, origin) LOG.info("Checking source provider environment params") volumes_info = (source_provider.check_update_source_environment_params( ctxt, origin_connection_info, instance, volumes_info, old_source_env, new_source_env)) if volumes_info: schemas.validate_value(volumes_info, schemas.CORIOLIS_VOLUMES_INFO_SCHEMA) else: LOG.warn( "Source update method for '%s' source provider did NOT " "return any volumes info. Defaulting to old value.", origin["type"]) volumes_info = task_info.get("volumes_info", []) return { "volumes_info": volumes_info, "source_environment": new_source_env }
def run(self, ctxt, instance, origin, destination, task_info, event_handler): instance_deployment_info = task_info["instance_deployment_info"] volumes_info = instance_deployment_info["volumes_info"] LOG.info("Volumes info is: %r" % volumes_info) image_paths = [i.get("disk_image_uri") for i in volumes_info] if None in image_paths: raise exception.InvalidActionTasksExecutionState( "disk_image_uri must be part of volumes_info for" " standard migrations") target_conn_info = base.unmarshal_migr_conn_info( instance_deployment_info["disk_sync_connection_info"]) manager.copy_disk_data(target_conn_info, volumes_info, event_handler) return task_info
def run(self, ctxt, instance, origin, destination, task_info, event_handler): instance_deployment_info = task_info["instance_deployment_info"] schemas.validate_value( instance_deployment_info['disk_sync_connection_info'], schemas.CORIOLIS_DISK_SYNC_RESOURCES_CONN_INFO_SCHEMA) volumes_info = instance_deployment_info["volumes_info"] schemas.validate_value( {"volumes_info": volumes_info}, schemas.CORIOLIS_DISK_SYNC_RESOURCES_INFO_SCHEMA) LOG.info("Volumes info is: %r" % volumes_info) image_paths = [i.get("disk_image_uri") for i in volumes_info] if None in image_paths: raise exception.InvalidActionTasksExecutionState( "disk_image_uri must be part of volumes_info for" " standard migrations") target_conn_info = base.unmarshal_migr_conn_info( instance_deployment_info["disk_sync_connection_info"]) manager.copy_disk_data(target_conn_info, volumes_info, event_handler) return task_info
def _get_volumes_info(task_info): volumes_info = task_info.get("volumes_info") if not volumes_info: raise exception.InvalidActionTasksExecutionState( "No volumes information present") return volumes_info