def _run(self, ctxt, minion_pool_machine_id, origin, destination, task_info, event_handler): # NOTE: both origin or target endpoints would work: connection_info = base.get_connection_info(ctxt, destination) provider_type = self.get_required_provider_types()[ self.get_required_platform()][0] provider = providers_factory.get_provider(destination["type"], provider_type, event_handler) pool_identifier = task_info['pool_identifier'] environment_options = task_info['pool_environment_options'] pool_shared_resources = task_info['pool_shared_resources'] pool_os_type = task_info["pool_os_type"] minion_properties = provider.create_minion( ctxt, connection_info, environment_options, pool_identifier, pool_os_type, pool_shared_resources, minion_pool_machine_id) missing = [ key for key in [ "connection_info", "minion_provider_properties", "backup_writer_connection_info" ] if key not in minion_properties ] if missing: LOG.warn( "Provider of type '%s' failed to return the following minion " "property keys: %s. Allowing run to completion for later " "cleanup.") minion_connection_info = {} if 'connection_info' in minion_properties: minion_connection_info = base.marshal_migr_conn_info( minion_properties['connection_info']) minion_backup_writer_conn = {} if 'backup_writer_connection_info' in minion_properties: minion_backup_writer_conn = minion_properties[ 'backup_writer_connection_info'] if 'connection_details' in minion_backup_writer_conn: minion_backup_writer_conn['connection_details'] = ( base.marshal_migr_conn_info( minion_backup_writer_conn['connection_details'])) return { "minion_connection_info": minion_connection_info, "minion_backup_writer_connection_info": (minion_backup_writer_conn), "minion_provider_properties": minion_properties.get("minion_provider_properties") }
def run(self, ctxt, instance, origin, destination, task_info, event_handler): provider = providers_factory.get_provider( origin["type"], constants.PROVIDER_TYPE_REPLICA_EXPORT, event_handler) connection_info = base.get_connection_info(ctxt, origin) source_environment = origin.get('source_environment') or {} replica_resources_info = provider.deploy_replica_source_resources( ctxt, connection_info, source_environment) task_info["migr_source_resources"] = replica_resources_info[ "migr_resources"] migr_connection_info = replica_resources_info.get("connection_info") if migr_connection_info: migr_connection_info = base.marshal_migr_conn_info( migr_connection_info) schemas.validate_value( migr_connection_info, schemas.CORIOLIS_DISK_SYNC_RESOURCES_CONN_INFO_SCHEMA, # NOTE: we avoid raising so that the cleanup task # can [try] to deal with the temporary resources. raise_on_error=False) task_info["migr_source_connection_info"] = migr_connection_info return task_info
def run(self, ctxt, instance, origin, destination, task_info, event_handler): provider = providers_factory.get_provider( destination["type"], constants.PROVIDER_TYPE_IMPORT, event_handler) connection_info = base.get_connection_info(ctxt, destination) target_environment = destination.get("target_environment") or {} instance_deployment_info = task_info["instance_deployment_info"] resources_info = provider.deploy_disk_copy_resources( ctxt, connection_info, target_environment, instance_deployment_info) conn_info = resources_info["instance_deployment_info"][ "disk_sync_connection_info"] conn_info = base.marshal_migr_conn_info(conn_info) task_info["instance_deployment_info"] = resources_info[ "instance_deployment_info"] task_info["instance_deployment_info"][ "disk_sync_connection_info"] = conn_info # We need to retain export info until after disk sync # TODO(gsamfira): remove this when we implement multi-worker, and by # extension some external storage for needed resources (like swift) task_info["retain_export_path"] = True return task_info
def _run(self, ctxt, instance, origin, destination, task_info, event_handler): provider = providers_factory.get_provider( origin["type"], constants.PROVIDER_TYPE_REPLICA_EXPORT, event_handler) connection_info = base.get_connection_info(ctxt, origin) source_environment = task_info['source_environment'] or {} export_info = task_info['export_info'] replica_resources_info = provider.deploy_replica_source_resources( ctxt, connection_info, export_info, source_environment) migr_connection_info = replica_resources_info.get( "connection_info", {}) if 'connection_info' not in replica_resources_info: LOG.warn( "Replica source provider for '%s' did NOT return any " "'connection_info'. Defaulting to '%s'", origin["type"], migr_connection_info) else: migr_connection_info = replica_resources_info['connection_info'] if migr_connection_info: migr_connection_info = base.marshal_migr_conn_info( migr_connection_info) schemas.validate_value( migr_connection_info, schemas.CORIOLIS_REPLICATION_WORKER_CONN_INFO_SCHEMA, # NOTE: we avoid raising so that the cleanup task # can [try] to deal with the temporary resources. raise_on_error=False) else: LOG.warn( "Replica source provider for '%s' returned empty " "'connection_info' in source resources deployment: %s", origin["type"], migr_connection_info) migr_resources = {} if 'migr_resources' not in replica_resources_info: LOG.warn( "Replica source provider for '%s' did NOT return any " "'migr_resources'. Defaulting to %s", origin["type"], migr_resources) else: migr_resources = replica_resources_info['migr_resources'] return { "source_resources": migr_resources, "source_resources_connection_info": migr_connection_info }
def _run(self, ctxt, instance, origin, destination, task_info, event_handler): provider = providers_factory.get_provider( destination["type"], constants.PROVIDER_TYPE_OS_MORPHING, event_handler) connection_info = base.get_connection_info(ctxt, destination) target_environment = task_info["target_environment"] instance_deployment_info = task_info["instance_deployment_info"] import_info = provider.deploy_os_morphing_resources( ctxt, connection_info, target_environment, instance_deployment_info) schemas.validate_value( import_info, schemas.CORIOLIS_OS_MORPHING_RESOURCES_SCHEMA, # NOTE: we avoid raising so that the cleanup task # can [try] to deal with the temporary resources. raise_on_error=False) os_morphing_resources = import_info.get('os_morphing_resources') if not os_morphing_resources: raise exception.InvalidTaskResult( "Target provider for '%s' did NOT return any " "'os_morphing_resources'." % (destination["type"])) osmorphing_connection_info = import_info.get( 'osmorphing_connection_info') if not osmorphing_connection_info: raise exception.InvalidTaskResult( "Target provider '%s' did NOT return any " "'osmorphing_connection_info'." % (destination["type"])) osmorphing_connection_info = base.marshal_migr_conn_info( osmorphing_connection_info) os_morphing_info = import_info.get("osmorphing_info", {}) if not os_morphing_info: LOG.warn( "Target provider for '%s' did NOT return any " "'osmorphing_info'. Defaulting to %s", destination["type"], os_morphing_info) return { "os_morphing_resources": os_morphing_resources, "osmorphing_connection_info": osmorphing_connection_info, "osmorphing_info": os_morphing_info }
def run(self, ctxt, instance, origin, destination, task_info, event_handler): provider = providers_factory.get_provider( origin["type"], constants.PROVIDER_TYPE_REPLICA_EXPORT, event_handler) connection_info = base.get_connection_info(ctxt, origin) replica_resources_info = provider.deploy_replica_source_resources( ctxt, connection_info) task_info["migr_source_resources"] = replica_resources_info[ "migr_resources"] migr_connection_info = base.marshal_migr_conn_info( replica_resources_info["connection_info"]) task_info["migr_source_connection_info"] = migr_connection_info return task_info
def run(self, ctxt, instance, origin, destination, task_info, event_handler): target_environment = destination.get("target_environment") or {} export_info = task_info['export_info'] provider = providers_factory.get_provider( destination["type"], constants.PROVIDER_TYPE_REPLICA_IMPORT, event_handler) connection_info = base.get_connection_info(ctxt, destination) volumes_info = _get_volumes_info(task_info) replica_resources_info = provider.deploy_replica_target_resources( ctxt, connection_info, target_environment, volumes_info) schemas.validate_value( replica_resources_info, schemas.CORIOLIS_DISK_SYNC_RESOURCES_INFO_SCHEMA, # NOTE: we avoid raising so that the cleanup task # can [try] to deal with the temporary resources. raise_on_error=False) volumes_info = _check_ensure_volumes_info_ordering( export_info, replica_resources_info["volumes_info"]) task_info["volumes_info"] = volumes_info task_info["migr_target_resources"] = replica_resources_info[ "migr_resources"] migr_connection_info = replica_resources_info["connection_info"] migr_connection_info = base.marshal_migr_conn_info( migr_connection_info) schemas.validate_value( migr_connection_info, schemas.CORIOLIS_DISK_SYNC_RESOURCES_CONN_INFO_SCHEMA, # NOTE: we avoid raising so that the cleanup task # can [try] to deal with the temporary resources. raise_on_error=False) task_info["migr_target_connection_info"] = migr_connection_info return task_info
def run(self, ctxt, instance, origin, destination, task_info, event_handler): provider = providers_factory.get_provider( destination["type"], constants.PROVIDER_TYPE_OS_MORPHING, event_handler) connection_info = base.get_connection_info(ctxt, destination) instance_deployment_info = task_info["instance_deployment_info"] import_info = provider.deploy_os_morphing_resources( ctxt, connection_info, instance_deployment_info) task_info["os_morphing_resources"] = import_info.get( "os_morphing_resources") task_info["osmorphing_info"] = import_info.get("osmorphing_info", {}) task_info["osmorphing_connection_info"] = base.marshal_migr_conn_info( import_info["osmorphing_connection_info"]) schemas.validate_value( task_info, schemas.CORIOLIS_OS_MORPHING_RESOURCES_SCHEMA) return task_info
def run(self, ctxt, instance, origin, destination, task_info, event_handler): target_environment = destination.get("target_environment") or {} provider = providers_factory.get_provider( destination["type"], constants.PROVIDER_TYPE_REPLICA_IMPORT, event_handler) connection_info = base.get_connection_info(ctxt, destination) volumes_info = _get_volumes_info(task_info) replica_resources_info = provider.deploy_replica_target_resources( ctxt, connection_info, target_environment, volumes_info) task_info["volumes_info"] = replica_resources_info["volumes_info"] task_info["migr_target_resources"] = replica_resources_info[ "migr_resources"] migr_connection_info = base.marshal_migr_conn_info( replica_resources_info["connection_info"]) task_info["migr_target_connection_info"] = migr_connection_info return task_info
def run(self, ctxt, instance, origin, destination, task_info, event_handler): provider = providers_factory.get_provider( destination["type"], constants.PROVIDER_TYPE_IMPORT, event_handler) connection_info = base.get_connection_info(ctxt, destination) target_environment = destination.get("target_environment") or {} instance_deployment_info = task_info["instance_deployment_info"] resources_info = provider.deploy_disk_copy_resources( ctxt, connection_info, target_environment, instance_deployment_info) instance_deployment_info = resources_info["instance_deployment_info"] schemas.validate_value( instance_deployment_info, schemas.CORIOLIS_DISK_SYNC_RESOURCES_INFO_SCHEMA, # NOTE: we avoid raising so that the cleanup task # can [try] to deal with the temporary resources. raise_on_error=False) disk_sync_conn_info = resources_info["instance_deployment_info"][ "disk_sync_connection_info"] disk_sync_conn_info = base.marshal_migr_conn_info(disk_sync_conn_info) schemas.validate_value( disk_sync_conn_info, schemas.CORIOLIS_DISK_SYNC_RESOURCES_CONN_INFO_SCHEMA) instance_deployment_info[ 'disk_sync_connection_info'] = disk_sync_conn_info task_info["instance_deployment_info"] = instance_deployment_info # We need to retain export info until after disk sync # TODO(gsamfira): remove this when we implement multi-worker, and by # extension some external storage for needed resources (like swift) task_info["retain_export_path"] = True return task_info
def _run(self, ctxt, instance, origin, destination, task_info, event_handler): target_environment = task_info["target_environment"] export_info = task_info['export_info'] provider = providers_factory.get_provider( destination["type"], constants.PROVIDER_TYPE_REPLICA_IMPORT, event_handler) connection_info = base.get_connection_info(ctxt, destination) volumes_info = _get_volumes_info(task_info) replica_resources_info = provider.deploy_replica_target_resources( ctxt, connection_info, target_environment, volumes_info) schemas.validate_value( replica_resources_info, schemas.CORIOLIS_DISK_SYNC_RESOURCES_INFO_SCHEMA, # NOTE: we avoid raising so that the cleanup task # can [try] to deal with the temporary resources. raise_on_error=False) if "volumes_info" in replica_resources_info: volumes_info = replica_resources_info["volumes_info"] volumes_info = _check_ensure_volumes_info_ordering( export_info, volumes_info) else: LOG.warn("Replica target provider for '%s' did not return any " "'volumes_info'. Using the previous value of it.") migr_connection_info = {} if 'connection_info' in replica_resources_info: migr_connection_info = replica_resources_info['connection_info'] try: backup_writers.BackupWritersFactory(migr_connection_info, None).get_writer() except Exception as err: LOG.warn( "Seemingly invalid backup writer conn info. Replica will " "likely fail during disk Replication. Error is: %s" % (str(err))) if migr_connection_info: if 'connection_details' in migr_connection_info: migr_connection_info['connection_details'] = ( base.marshal_migr_conn_info( migr_connection_info['connection_details'])) schemas.validate_value( migr_connection_info, schemas.CORIOLIS_DISK_SYNC_RESOURCES_CONN_INFO_SCHEMA, # NOTE: we avoid raising so that the cleanup task # can [try] to deal with the temporary resources. raise_on_error=False) else: LOG.warn( "Replica target provider for '%s' did NOT return any " "'connection_info'. Defaulting to %s", destination["type"], migr_connection_info) target_resources = {} if 'migr_resources' not in replica_resources_info: LOG.warn( "Replica target provider for '%s' did NOT return any " "'migr_resources'. Defaulting to %s", destination["type"], target_resources) else: target_resources = replica_resources_info["migr_resources"] return { "volumes_info": volumes_info, "target_resources": target_resources, "target_resources_connection_info": migr_connection_info }