예제 #1
0
    def _run(self, ctxt, instance, origin, destination, task_info,
             event_handler):
        event_manager = events.EventManager(event_handler)
        destination_type = destination["type"]

        destination_connection_info = base.get_connection_info(
            ctxt, destination)
        destination_provider = providers_factory.get_provider(
            destination_type,
            constants.PROVIDER_TYPE_VALIDATE_REPLICA_IMPORT,
            event_handler,
            raise_if_not_found=False)
        if not destination_provider:
            event_manager.progress_update(
                "Replica Import Provider for platform '%s' does not support "
                "Replica input validation" % destination_type)
            return {}

        export_info = task_info.get("export_info")
        if not export_info:
            raise exception.InvalidActionTasksExecutionState(
                "Instance export info is not set. Cannot perform "
                "Replica Import validation for destination platform "
                "'%s'" % destination_type)

        target_environment = task_info["target_environment"]
        self._validate_provider_replica_import_input(
            destination_provider, ctxt, destination_connection_info,
            target_environment, export_info)

        return {}
예제 #2
0
    def run(self, ctxt, instance, origin, destination, task_info,
            event_handler):
        provider = providers_factory.get_provider(
            destination["type"], constants.PROVIDER_TYPE_IMPORT, event_handler)
        connection_info = base.get_connection_info(ctxt, destination)
        target_environment = destination.get("target_environment") or {}
        instance_deployment_info = task_info["instance_deployment_info"]

        resources_info = provider.deploy_disk_copy_resources(
            ctxt, connection_info, target_environment,
            instance_deployment_info)

        conn_info = resources_info["instance_deployment_info"][
            "disk_sync_connection_info"]
        conn_info = base.marshal_migr_conn_info(conn_info)
        task_info["instance_deployment_info"] = resources_info[
            "instance_deployment_info"]
        task_info["instance_deployment_info"][
            "disk_sync_connection_info"] = conn_info
        # We need to retain export info until after disk sync
        # TODO(gsamfira): remove this when we implement multi-worker, and by
        # extension some external storage for needed resources (like swift)
        task_info["retain_export_path"] = True

        return task_info
예제 #3
0
    def _run(self, ctxt, instance, origin, destination, task_info,
             event_handler):

        platform_to_target = None
        required_platform = self.get_required_platform()
        if required_platform == constants.TASK_PLATFORM_SOURCE:
            platform_to_target = origin
        elif required_platform == constants.TASK_PLATFORM_DESTINATION:
            platform_to_target = destination
        else:
            raise NotImplementedError(
                "Unknown minion pool validation operation platform '%s'" %
                (required_platform))

        connection_info = base.get_connection_info(ctxt, platform_to_target)
        provider_type = self.get_required_provider_types()[
            self.get_required_platform()][0]
        provider = providers_factory.get_provider(platform_to_target["type"],
                                                  provider_type, event_handler)

        export_info = task_info["export_info"]
        minion_properties = task_info[
            self._get_minion_properties_task_info_field()]
        transfer_properties = task_info[
            self._get_transfer_properties_task_info_field()]
        validation_op = self._get_provider_pool_validation_operation(provider)
        validation_op(ctxt, connection_info, export_info, transfer_properties,
                      minion_properties)

        field_mappings = self._get_minion_task_info_field_mappings()
        return {
            field_mappings[field]: task_info[field]
            for field in field_mappings
        }
예제 #4
0
    def _run(self, ctxt, instance, origin, destination, task_info,
             event_handler):
        event_manager = events.EventManager(event_handler)
        if not task_info.get("volumes_info"):
            LOG.debug("No volumes_info present. Skipping disk deletion.")
            event_manager.progress_update(
                "No previous volumes information present, nothing to delete")
            return {'volumes_info': []}

        provider = providers_factory.get_provider(
            destination["type"], constants.PROVIDER_TYPE_REPLICA_IMPORT,
            event_handler)
        connection_info = base.get_connection_info(ctxt, destination)

        volumes_info = _get_volumes_info(task_info)
        target_environment = task_info['target_environment']

        volumes_info = provider.delete_replica_disks(ctxt, connection_info,
                                                     target_environment,
                                                     volumes_info)
        if volumes_info:
            LOG.warn(
                "'volumes_info' should have been void after disk "
                "deletion task but it is: %s" %
                (utils.sanitize_task_info({'volumes_info': volumes_info})))

        return {'volumes_info': []}
예제 #5
0
    def run(self, ctxt, instance, origin, destination, task_info,
            event_handler):
        event_manager = events.EventManager(event_handler)
        destination_type = destination["type"]

        destination_connection_info = base.get_connection_info(
            ctxt, destination)
        destination_provider = providers_factory.get_provider(
            destination_type,
            constants.PROVIDER_TYPE_VALIDATE_REPLICA_IMPORT,
            event_handler,
            raise_if_not_found=False)
        if not destination_provider:
            event_manager.progress_update(
                "Replica Import Provider for platform '%s' does not support "
                "Replica input validation" % destination_type)
            return task_info

        export_info = task_info.get("export_info")
        if not export_info:
            raise exception.CoriolisException(
                "Instance export info is not set. Cannot perform "
                "Replica Import validation for destination platform "
                "'%s'" % destination_type)

        # NOTE: the target environment JSON schema should have been validated
        # upon accepting the Replica API creation request.
        target_environment = destination.get("target_environment", {})
        destination_provider.validate_replica_import_input(
            ctxt, destination_connection_info, target_environment, export_info)

        return task_info
예제 #6
0
    def _run(self, ctxt, instance, origin, destination, task_info,
             event_handler):

        platform_to_target = None
        required_platform = self.get_required_platform()
        if required_platform == constants.TASK_PLATFORM_SOURCE:
            platform_to_target = origin
        elif required_platform == constants.TASK_PLATFORM_DESTINATION:
            platform_to_target = destination
        else:
            raise NotImplementedError(
                "Unknown minion healthcheck platform '%s'" %
                (required_platform))

        connection_info = base.get_connection_info(ctxt, platform_to_target)
        provider_type = self.get_required_provider_types()[
            self.get_required_platform()][0]
        provider = providers_factory.get_provider(platform_to_target["type"],
                                                  provider_type, event_handler)

        minion_properties = task_info['minion_provider_properties']
        minion_connection_info = base.unmarshal_migr_conn_info(
            task_info['minion_connection_info'])

        provider.healthcheck_minion(ctxt, connection_info, minion_properties,
                                    minion_connection_info)

        return {}
예제 #7
0
    def run(self, ctxt, instance, origin, destination, task_info,
            event_handler):
        target_environment = destination.get("target_environment") or {}
        export_info = task_info["export_info"]

        provider = providers_factory.get_provider(
            destination["type"], constants.PROVIDER_TYPE_REPLICA_IMPORT,
            event_handler)
        connection_info = base.get_connection_info(ctxt, destination)

        volumes_info = task_info.get("volumes_info", [])
        if volumes_info is None:
            # In case Replica disks were deleted:
            volumes_info = []

        volumes_info = provider.deploy_replica_disks(ctxt, connection_info,
                                                     target_environment,
                                                     instance, export_info,
                                                     volumes_info)
        schemas.validate_value(volumes_info,
                               schemas.CORIOLIS_VOLUMES_INFO_SCHEMA)

        volumes_info = _check_ensure_volumes_info_ordering(
            export_info, volumes_info)

        task_info["volumes_info"] = volumes_info

        return task_info
예제 #8
0
    def run(self, ctxt, instance, origin, destination, task_info,
            event_handler):
        target_environment = destination.get("target_environment") or {}
        export_info = task_info["export_info"]

        provider = providers_factory.get_provider(
            destination["type"], constants.PROVIDER_TYPE_IMPORT, event_handler)
        connection_info = base.get_connection_info(ctxt, destination)

        import_info = provider.import_instance(ctxt, connection_info,
                                               target_environment, instance,
                                               export_info)

        if task_info.get("instance_deployment_info") is None:
            task_info["instance_deployment_info"] = {}
        task_info["instance_deployment_info"].update(
            import_info["instance_deployment_info"])

        task_info["origin_provider_type"] = constants.PROVIDER_TYPE_EXPORT
        task_info["destination_provider_type"] = constants.PROVIDER_TYPE_IMPORT
        # We need to retain export info until after disk sync
        # TODO(gsamfira): remove this when we implement multi-worker, and by
        # extension some external storage for needed resources (like swift)
        task_info["retain_export_path"] = True

        return task_info
예제 #9
0
    def run(self, ctxt, instance, origin, destination, task_info,
            event_handler):
        provider = providers_factory.get_provider(
            origin["type"], constants.PROVIDER_TYPE_REPLICA_EXPORT,
            event_handler)
        connection_info = base.get_connection_info(ctxt, origin)

        source_environment = origin.get('source_environment') or {}
        replica_resources_info = provider.deploy_replica_source_resources(
            ctxt, connection_info, source_environment)

        task_info["migr_source_resources"] = replica_resources_info[
            "migr_resources"]
        migr_connection_info = replica_resources_info.get("connection_info")
        if migr_connection_info:
            migr_connection_info = base.marshal_migr_conn_info(
                migr_connection_info)
            schemas.validate_value(
                migr_connection_info,
                schemas.CORIOLIS_DISK_SYNC_RESOURCES_CONN_INFO_SCHEMA,
                # NOTE: we avoid raising so that the cleanup task
                # can [try] to deal with the temporary resources.
                raise_on_error=False)

        task_info["migr_source_connection_info"] = migr_connection_info

        return task_info
예제 #10
0
    def run(self, ctxt, instance, origin, destination, task_info,
            event_handler):
        event_manager = events.EventManager(event_handler)
        destination_type = destination["type"]
        if task_info.get("export_info") is None:
            event_manager.progress_update(
                "Instance export info is not set. Cannot perform Migration "
                "Import validation for destination platform "
                "'%s'" % destination_type)
            return task_info

        destination_connection_info = base.get_connection_info(
            ctxt, destination)
        destination_provider = providers_factory.get_provider(
            destination_type,
            constants.PROVIDER_TYPE_VALIDATE_MIGRATION_IMPORT,
            event_handler,
            raise_if_not_found=False)
        if not destination_provider:
            event_manager.progress_update(
                "Migration Import Provider for platform '%s' does not "
                "support Migration input validation" % destination_type)
            return task_info

        # NOTE: the target environment JSON schema should have been validated
        # upon accepting the Migration API creation request.
        target_environment = destination.get("target_environment", {})
        destination_provider.validate_migration_import_input(
            ctxt, destination_connection_info, target_environment,
            task_info["export_info"])

        return task_info
예제 #11
0
    def run(self, ctxt, instance, origin, destination, task_info,
            event_handler):
        target_environment = destination.get("target_environment") or {}
        export_info = task_info["export_info"]

        provider = providers_factory.get_provider(
            destination["type"], constants.PROVIDER_TYPE_REPLICA_IMPORT,
            event_handler)
        connection_info = base.get_connection_info(ctxt, destination)

        volumes_info = _get_volumes_info(task_info)
        clone_disks = task_info.get("clone_disks", True)
        LOG.debug("Clone disks: %s", clone_disks)

        import_info = provider.deploy_replica_instance(ctxt, connection_info,
                                                       target_environment,
                                                       instance, export_info,
                                                       volumes_info,
                                                       clone_disks)

        if task_info.get("instance_deployment_info") is None:
            task_info["instance_deployment_info"] = {}
        task_info["instance_deployment_info"].update(
            import_info["instance_deployment_info"])

        task_info[
            "origin_provider_type"] = constants.PROVIDER_TYPE_REPLICA_EXPORT
        task_info[
            "destination_provider_type"] = constants.PROVIDER_TYPE_REPLICA_IMPORT

        return task_info
예제 #12
0
    def run(self, ctxt, instance, origin, destination, task_info,
            event_handler):
        event_manager = events.EventManager(event_handler)
        destination_connection_info = base.get_connection_info(
            ctxt, destination)
        destination_type = destination["type"]
        export_info = task_info["export_info"]
        # validate Export info:
        schemas.validate_value(export_info,
                               schemas.CORIOLIS_VM_EXPORT_INFO_SCHEMA)

        # validate destination params:
        destination_provider = providers_factory.get_provider(
            destination_type,
            constants.PROVIDER_TYPE_VALIDATE_REPLICA_IMPORT,
            event_handler,
            raise_if_not_found=False)
        if not destination_provider:
            event_manager.progress_update(
                "Replica Deployment Provider for platform '%s' does not "
                "support Replica Deployment input validation" %
                (destination_type))
            return task_info

        # NOTE: the target environment JSON schema should have been validated
        # upon accepting the Replica API creation request.
        target_environment = destination.get("target_environment", {})
        destination_provider.validate_replica_deployment_input(
            ctxt, destination_connection_info, target_environment, export_info)

        return task_info
예제 #13
0
    def run(self, ctxt, instance, origin, destination, task_info,
            event_handler):
        provider = providers_factory.get_provider(
            origin["type"], constants.PROVIDER_TYPE_REPLICA_EXPORT,
            event_handler)
        connection_info = base.get_connection_info(ctxt, origin)

        volumes_info = _get_volumes_info(task_info)

        migr_source_conn_info = base.unmarshal_migr_conn_info(
            task_info["migr_source_connection_info"])

        migr_target_conn_info = base.unmarshal_migr_conn_info(
            task_info["migr_target_connection_info"])

        incremental = task_info.get("incremental", True)

        source_environment = origin.get('source_environment') or {}

        volumes_info = provider.replicate_disks(ctxt, connection_info,
                                                source_environment, instance,
                                                migr_source_conn_info,
                                                migr_target_conn_info,
                                                volumes_info, incremental)

        task_info["volumes_info"] = volumes_info

        return task_info
예제 #14
0
    def run(self, ctxt, instance, origin, destination, task_info,
            event_handler):
        event_manager = events.EventManager(event_handler)
        new_source_env = task_info.get('source_environment', {})
        if not new_source_env:
            event_manager.progress_update(
                "No new source environment options provided")
            return task_info

        source_provider = providers_factory.get_provider(
            origin["type"],
            constants.PROVIDER_TYPE_SOURCE_REPLICA_UPDATE,
            event_handler,
            raise_if_not_found=False)
        if not source_provider:
            raise exception.CoriolisException(
                "Replica source provider plugin for '%s' does not support"
                " updating Replicas" % origin["type"])

        origin_connection_info = base.get_connection_info(ctxt, origin)
        volumes_info = task_info.get("volumes_info", {})

        LOG.info("Checking source provider environment params")
        # NOTE: the `source_environment` in the `origin` is the one set
        # in the dedicated DB column of the Replica and thus stores
        # the previous value of it:
        old_source_env = origin.get('source_environment', {})
        volumes_info = (source_provider.check_update_source_environment_params(
            ctxt, origin_connection_info, instance, volumes_info,
            old_source_env, new_source_env))

        task_info['volumes_info'] = volumes_info

        return task_info
예제 #15
0
    def run(self, ctxt, instance, origin, destination, task_info,
            event_handler):
        provider = providers_factory.get_provider(
            origin["type"], constants.PROVIDER_TYPE_REPLICA_EXPORT,
            event_handler)
        connection_info = base.get_connection_info(ctxt, origin)

        provider.shutdown_instance(ctxt, connection_info, instance)

        return task_info
예제 #16
0
    def _run(self, ctxt, instance, origin, destination, task_info,
             event_handler):

        platform_to_target = None
        required_platform = self.get_required_platform()
        if required_platform == constants.TASK_PLATFORM_SOURCE:
            platform_to_target = origin
        elif required_platform == constants.TASK_PLATFORM_DESTINATION:
            platform_to_target = destination
        else:
            raise NotImplementedError(
                "Unknown minion pool disk operation platform '%s'" %
                (required_platform))

        connection_info = base.get_connection_info(ctxt, platform_to_target)
        provider_type = self.get_required_provider_types()[
            self.get_required_platform()][0]
        provider = providers_factory.get_provider(platform_to_target["type"],
                                                  provider_type, event_handler)

        volumes_info = self._get_volumes_info_from_task_info(task_info)
        minion_properties = task_info[
            self._get_minion_properties_task_info_field()]
        res = self._get_provider_disk_operation(provider)(ctxt,
                                                          connection_info,
                                                          minion_properties,
                                                          volumes_info)

        missing_result_props = [
            prop for prop in ["volumes_info", "minion_properties"]
            if prop not in res
        ]
        if missing_result_props:
            raise exception.CoriolisException(
                "The following properties were missing from minion disk "
                "operation '%s' from platform '%s'." %
                (self._get_provider_disk_operation.__name__,
                 platform_to_target))

        field_name_map = self._get_minion_task_info_field_mappings()
        result = {
            "volumes_info":
            res['volumes_info'],
            self._get_minion_properties_task_info_field():
            res["minion_properties"],
            field_name_map[self._get_minion_properties_task_info_field()]:
            res["minion_properties"]
        }

        result.update({
            field_name_map[field]: task_info[field]
            for field in field_name_map if field_name_map[field] not in result
        })

        return result
예제 #17
0
    def _run(self, ctxt, instance, origin, destination, task_info,
             event_handler):
        event_manager = events.EventManager(event_handler)

        volumes_info = task_info.get("volumes_info", [])
        new_destination_env = task_info.get('target_environment', {})
        # NOTE: the `target_environment` in the `destination` is the one
        # set in the dedicated DB column of the Replica and thus stores
        # the previous value of it:
        old_destination_env = destination.get('target_environment', {})
        if not new_destination_env:
            event_manager.progress_update(
                "No new destination environment options provided")
            return {
                "target_environment": old_destination_env,
                "volumes_info": volumes_info
            }

        destination_provider = providers_factory.get_provider(
            destination["type"],
            constants.PROVIDER_TYPE_DESTINATION_REPLICA_UPDATE,
            event_handler,
            raise_if_not_found=False)
        if not destination_provider:
            raise exception.InvalidActionTasksExecutionState(
                "Replica destination provider plugin for '%s' does not "
                "support updating Replicas" % destination["type"])

        destination_connection_info = base.get_connection_info(
            ctxt, destination)
        export_info = task_info.get("export_info", {})

        LOG.info("Checking destination provider environment params")
        volumes_info = (
            destination_provider.check_update_destination_environment_params(
                ctxt, destination_connection_info, export_info, volumes_info,
                old_destination_env, new_destination_env))

        if volumes_info:
            schemas.validate_value(volumes_info,
                                   schemas.CORIOLIS_VOLUMES_INFO_SCHEMA)
            volumes_info = _check_ensure_volumes_info_ordering(
                export_info, volumes_info)
        else:
            LOG.warn(
                "Destination update method for '%s' dest provider did NOT "
                "return any volumes info. Defaulting to old value.",
                destination["type"])
            volumes_info = task_info.get("volumes_info", [])

        return {
            "volumes_info": volumes_info,
            "target_environment": new_destination_env
        }
예제 #18
0
    def run(self, ctxt, instance, origin, destination, task_info,
            event_handler):
        provider = providers_factory.get_provider(
            destination["type"], constants.PROVIDER_TYPE_IMPORT, event_handler)
        connection_info = base.get_connection_info(ctxt, destination)
        instance_deployment_info = task_info.get("instance_deployment_info",
                                                 {})

        provider.cleanup_failed_import_instance(ctxt, connection_info,
                                                instance_deployment_info)

        return task_info
예제 #19
0
    def run(self, ctxt, instance, origin, destination, task_info,
            event_handler):
        provider = providers_factory.get_provider(
            destination["type"], constants.PROVIDER_TYPE_REPLICA_IMPORT,
            event_handler)
        connection_info = base.get_connection_info(ctxt, destination)
        instance_deployment_info = task_info["instance_deployment_info"]

        provider.finalize_replica_instance_deployment(
            ctxt, connection_info, instance_deployment_info)

        return task_info
예제 #20
0
    def run(self, ctxt, instance, origin, destination, task_info,
            event_handler):
        destination_provider = None
        source_provider = None
        dest_volumes_info = {}
        new_source_environment = task_info.get('source_environment')
        new_destination_environment = task_info.get('destination_environment')

        if new_source_environment:
            source_provider = providers_factory.get_provider(
                origin["type"], constants.PROVIDER_TYPE_REPLICA_EXPORT,
                event_handler)
            source_environment_schema = (
                source_provider.get_source_environment_schema())
            schemas.validate_value(new_source_environment,
                                   source_environment_schema)

        if new_destination_environment:
            destination_provider = providers_factory.get_provider(
                destination["type"], constants.PROVIDER_TYPE_REPLICA_IMPORT,
                event_handler)
            destination_environment_schema = (
                destination_provider.get_target_environment_schema)
            schemas.validate_value(new_destination_environment,
                                   destination_environment_schema)

        connection_info = base.get_connection_info(ctxt, destination)
        export_info = task_info.get("export_info", {})
        volumes_info = task_info.get("volumes_info", {})

        old_source_environment = origin.get('source_environment', {})
        new_source_environment = task_info.get('source_environment', {})
        if source_provider:
            LOG.info("Checking source provider environment params")
            source_provider.check_update_environment_params(
                ctxt, connection_info, export_info, volumes_info,
                old_source_environment, new_source_environment)

        if destination_provider:
            LOG.info("Checking destination provider environment params")
            old_destination_environment = destination.get(
                'target_environment', {})
            new_destination_environment = task_info.get(
                'target_environment', {})

            dest_volumes_info = (
                destination_provider.check_update_environment_params(
                    ctxt, connection_info, export_info, volumes_info,
                    old_destination_environment, new_destination_environment))

        task_info['volumes_info'] = dest_volumes_info

        return task_info
예제 #21
0
    def run(self, ctxt, instance, origin, destination, task_info,
            event_handler):
        provider = providers_factory.get_provider(
            destination["type"], constants.PROVIDER_TYPE_OS_MORPHING,
            event_handler)
        connection_info = base.get_connection_info(ctxt, destination)
        os_morphing_resources = task_info.get("os_morphing_resources")

        provider.delete_os_morphing_resources(
            ctxt, connection_info, os_morphing_resources)

        return task_info
예제 #22
0
    def _run(self, ctxt, minion_pool_machine_id, origin, destination,
             task_info, event_handler):

        # NOTE: both origin or target endpoints would work:
        connection_info = base.get_connection_info(ctxt, destination)
        provider_type = self.get_required_provider_types()[
            self.get_required_platform()][0]
        provider = providers_factory.get_provider(destination["type"],
                                                  provider_type, event_handler)

        pool_identifier = task_info['pool_identifier']
        environment_options = task_info['pool_environment_options']
        pool_shared_resources = task_info['pool_shared_resources']
        pool_os_type = task_info["pool_os_type"]
        minion_properties = provider.create_minion(
            ctxt, connection_info, environment_options, pool_identifier,
            pool_os_type, pool_shared_resources, minion_pool_machine_id)

        missing = [
            key for key in [
                "connection_info", "minion_provider_properties",
                "backup_writer_connection_info"
            ] if key not in minion_properties
        ]
        if missing:
            LOG.warn(
                "Provider of type '%s' failed to return the following minion "
                "property keys: %s. Allowing run to completion for later "
                "cleanup.")

        minion_connection_info = {}
        if 'connection_info' in minion_properties:
            minion_connection_info = base.marshal_migr_conn_info(
                minion_properties['connection_info'])
        minion_backup_writer_conn = {}
        if 'backup_writer_connection_info' in minion_properties:
            minion_backup_writer_conn = minion_properties[
                'backup_writer_connection_info']
            if 'connection_details' in minion_backup_writer_conn:
                minion_backup_writer_conn['connection_details'] = (
                    base.marshal_migr_conn_info(
                        minion_backup_writer_conn['connection_details']))

        return {
            "minion_connection_info":
            minion_connection_info,
            "minion_backup_writer_connection_info":
            (minion_backup_writer_conn),
            "minion_provider_properties":
            minion_properties.get("minion_provider_properties")
        }
예제 #23
0
    def _run(self, ctxt, instance, origin, destination, task_info,
             event_handler):
        provider = providers_factory.get_provider(
            destination["type"], constants.PROVIDER_TYPE_REPLICA_IMPORT,
            event_handler)
        connection_info = base.get_connection_info(ctxt, destination)
        target_environment = task_info["target_environment"]
        instance_deployment_info = task_info["instance_deployment_info"]

        provider.cleanup_failed_replica_instance_deployment(
            ctxt, connection_info, target_environment,
            instance_deployment_info)

        return {"instance_deployment_info": None}
예제 #24
0
    def run(self, ctxt, instance, origin, destination, task_info,
            event_handler):
        provider = providers_factory.get_provider(
            destination["type"], constants.PROVIDER_TYPE_REPLICA_IMPORT,
            event_handler)
        connection_info = base.get_connection_info(ctxt, destination)

        volumes_info = _get_volumes_info(task_info)

        provider.delete_replica_disks(ctxt, connection_info, volumes_info)

        task_info["volumes_info"] = None

        return task_info
예제 #25
0
    def _run(self, ctxt, instance, origin, destination, task_info,
             event_handler):
        provider = providers_factory.get_provider(
            origin["type"], constants.PROVIDER_TYPE_REPLICA_EXPORT,
            event_handler)
        connection_info = base.get_connection_info(ctxt, origin)

        source_environment = task_info['source_environment'] or {}
        export_info = task_info['export_info']
        replica_resources_info = provider.deploy_replica_source_resources(
            ctxt, connection_info, export_info, source_environment)

        migr_connection_info = replica_resources_info.get(
            "connection_info", {})
        if 'connection_info' not in replica_resources_info:
            LOG.warn(
                "Replica source provider for '%s' did NOT return any "
                "'connection_info'. Defaulting to '%s'", origin["type"],
                migr_connection_info)
        else:
            migr_connection_info = replica_resources_info['connection_info']
            if migr_connection_info:
                migr_connection_info = base.marshal_migr_conn_info(
                    migr_connection_info)
                schemas.validate_value(
                    migr_connection_info,
                    schemas.CORIOLIS_REPLICATION_WORKER_CONN_INFO_SCHEMA,
                    # NOTE: we avoid raising so that the cleanup task
                    # can [try] to deal with the temporary resources.
                    raise_on_error=False)
            else:
                LOG.warn(
                    "Replica source provider for '%s' returned empty "
                    "'connection_info' in source resources deployment: %s",
                    origin["type"], migr_connection_info)

        migr_resources = {}
        if 'migr_resources' not in replica_resources_info:
            LOG.warn(
                "Replica source provider for '%s' did NOT return any "
                "'migr_resources'. Defaulting to %s", origin["type"],
                migr_resources)
        else:
            migr_resources = replica_resources_info['migr_resources']

        return {
            "source_resources": migr_resources,
            "source_resources_connection_info": migr_connection_info
        }
예제 #26
0
    def _run(self, ctxt, minion_pool_machine_id, origin, destination,
             task_info, event_handler):

        # NOTE: both origin or target endpoints would work:
        connection_info = base.get_connection_info(ctxt, destination)
        provider_type = self.get_required_provider_types()[
            self.get_required_platform()][0]
        provider = providers_factory.get_provider(destination["type"],
                                                  provider_type, event_handler)

        environment_options = task_info['pool_environment_options']
        provider.validate_minion_pool_environment_options(
            ctxt, connection_info, environment_options)

        return {}
예제 #27
0
    def _run(self, ctxt, instance, origin, destination, task_info,
             event_handler):
        event_manager = events.EventManager(event_handler)

        volumes_info = task_info.get("volumes_info", [])
        new_source_env = task_info.get('source_environment', {})
        # NOTE: the `source_environment` in the `origin` is the one set
        # in the dedicated DB column of the Replica and thus stores
        # the previous value of it:
        old_source_env = origin.get('source_environment')
        if not new_source_env:
            event_manager.progress_update(
                "No new source environment options provided")
            return {
                'volumes_info': volumes_info,
                'source_environment': old_source_env
            }

        source_provider = providers_factory.get_provider(
            origin["type"],
            constants.PROVIDER_TYPE_SOURCE_REPLICA_UPDATE,
            event_handler,
            raise_if_not_found=False)
        if not source_provider:
            raise exception.InvalidActionTasksExecutionState(
                "Replica source provider plugin for '%s' does not support"
                " updating Replicas" % origin["type"])

        origin_connection_info = base.get_connection_info(ctxt, origin)

        LOG.info("Checking source provider environment params")
        volumes_info = (source_provider.check_update_source_environment_params(
            ctxt, origin_connection_info, instance, volumes_info,
            old_source_env, new_source_env))
        if volumes_info:
            schemas.validate_value(volumes_info,
                                   schemas.CORIOLIS_VOLUMES_INFO_SCHEMA)
        else:
            LOG.warn(
                "Source update method for '%s' source provider did NOT "
                "return any volumes info. Defaulting to old value.",
                origin["type"])
            volumes_info = task_info.get("volumes_info", [])

        return {
            "volumes_info": volumes_info,
            "source_environment": new_source_env
        }
예제 #28
0
    def _run(self, ctxt, minion_pool_machine_id, origin, destination,
             task_info, event_handler):

        # NOTE: both origin or target endpoints would work:
        connection_info = base.get_connection_info(ctxt, destination)
        provider_type = self.get_required_provider_types()[
            self.get_required_platform()][0]
        provider = providers_factory.get_provider(destination["type"],
                                                  provider_type, event_handler)

        pool_identifier = task_info['pool_identifier']
        environment_options = task_info['pool_environment_options']
        pool_shared_resources = provider.set_up_pool_shared_resources(
            ctxt, connection_info, environment_options, pool_identifier)

        return {"pool_shared_resources": pool_shared_resources}
예제 #29
0
    def _run(self, ctxt, instance, origin, destination, task_info,
             event_handler):
        provider = providers_factory.get_provider(
            origin["type"], constants.PROVIDER_TYPE_REPLICA_EXPORT,
            event_handler)
        connection_info = base.get_connection_info(ctxt, origin)

        source_environment = task_info['source_environment']
        export_info = provider.get_replica_instance_info(
            ctxt, connection_info, source_environment, instance)

        # Validate the output
        schemas.validate_value(export_info,
                               schemas.CORIOLIS_VM_EXPORT_INFO_SCHEMA)

        return {'export_info': export_info}
예제 #30
0
    def run(self, ctxt, instance, origin, destination, task_info,
            event_handler):
        provider = providers_factory.get_provider(
            destination["type"], constants.PROVIDER_TYPE_IMPORT, event_handler)
        connection_info = base.get_connection_info(ctxt, destination)
        instance_deployment_info = task_info.get("instance_deployment_info",
                                                 {})
        provider.delete_disk_copy_resources(ctxt, connection_info,
                                            instance_deployment_info)

        if instance_deployment_info.get("disk_sync_connection_info"):
            del instance_deployment_info["disk_sync_connection_info"]
        if instance_deployment_info.get("disk_sync_tgt_resources"):
            del instance_deployment_info["disk_sync_tgt_resources"]

        return task_info