def _validate_create_body(self, context, body): replica = body["replica"] origin_endpoint_id = replica["origin_endpoint_id"] destination_endpoint_id = replica["destination_endpoint_id"] destination_environment = replica.get("destination_environment", {}) instances = api_utils.validate_instances_list_for_transfer( replica.get('instances')) notes = replica.get("notes") source_environment = replica.get("source_environment", {}) self._endpoints_api.validate_source_environment( context, origin_endpoint_id, source_environment) origin_minion_pool_id = replica.get('origin_minion_pool_id') destination_minion_pool_id = replica.get('destination_minion_pool_id') instance_osmorphing_minion_pool_mappings = replica.get( 'instance_osmorphing_minion_pool_mappings', {}) extras = [ instance for instance in instance_osmorphing_minion_pool_mappings if instance not in instances ] if extras: raise ValueError( "One or more instance OSMorphing pool mappings were " "provided for instances (%s) which are not part of the " "Replicas's declared instances (%s)" % (extras, instances)) # TODO(aznashwan): until the provider plugin interface is updated # to have separate 'network_map' and 'storage_mappings' fields, # we add them as part of the destination environment: network_map = replica.get("network_map", {}) api_utils.validate_network_map(network_map) destination_environment['network_map'] = network_map self._endpoints_api.validate_target_environment( context, destination_endpoint_id, destination_environment) user_scripts = replica.get('user_scripts', {}) api_utils.validate_user_scripts(user_scripts) user_scripts = api_utils.normalize_user_scripts( user_scripts, instances) # NOTE(aznashwan): we validate the destination environment for the # import provider before appending the 'storage_mappings' parameter # for plugins with strict property name checks which do not yet # support storage mapping features: storage_mappings = replica.get("storage_mappings", {}) api_utils.validate_storage_mappings(storage_mappings) destination_environment['storage_mappings'] = storage_mappings return (origin_endpoint_id, destination_endpoint_id, source_environment, destination_environment, instances, network_map, storage_mappings, notes, origin_minion_pool_id, destination_minion_pool_id, instance_osmorphing_minion_pool_mappings, user_scripts)
def create(self, req, body): migration_body = body.get("migration", {}) context = req.environ['coriolis.context'] context.can(migration_policies.get_migrations_policy_label("create")) user_scripts = migration_body.get('user_scripts', {}) api_utils.validate_user_scripts(user_scripts) user_scripts = api_utils.normalize_user_scripts( user_scripts, migration_body.get("instances", [])) replica_id = migration_body.get("replica_id") if replica_id: clone_disks = migration_body.get("clone_disks", True) force = migration_body.get("force", False) skip_os_morphing = migration_body.get("skip_os_morphing", False) instance_osmorphing_minion_pool_mappings = migration_body.get( 'instance_osmorphing_minion_pool_mappings', {}) # NOTE: destination environment for replica should have been # validated upon its creation. migration = self._migration_api.deploy_replica_instances( context, replica_id, instance_osmorphing_minion_pool_mappings, clone_disks, force, skip_os_morphing, user_scripts=user_scripts) else: (origin_endpoint_id, destination_endpoint_id, origin_minion_pool_id, destination_minion_pool_id, instance_osmorphing_minion_pool_mappings, source_environment, destination_environment, instances, notes, skip_os_morphing, replication_count, shutdown_instances, network_map, storage_mappings) = self._validate_migration_input( context, body) migration = self._migration_api.migrate_instances( context, origin_endpoint_id, destination_endpoint_id, origin_minion_pool_id, destination_minion_pool_id, instance_osmorphing_minion_pool_mappings, source_environment, destination_environment, instances, network_map, storage_mappings, replication_count, shutdown_instances, notes=notes, skip_os_morphing=skip_os_morphing, user_scripts=user_scripts) return migration_view.single(req, migration)
def _validate_update_body(self, id, context, body): replica = self._replica_api.get_replica(context, id) replica_body = body['replica'] origin_endpoint_id = replica_body.get('origin_endpoint_id', None) destination_endpoint_id = replica_body.get('destination_endpoint_id', None) instances = body['replica'].get('instances', None) if origin_endpoint_id or destination_endpoint_id: raise exc.HTTPBadRequest( explanation="The source or destination endpoints for a " "Coriolis Replica cannot be updated after its " "creation. If the credentials of any of the " "Replica's endpoints need updating, please update " "the endpoints themselves.") if instances: raise exc.HTTPBadRequest( explanation="The list of instances of a Replica cannot be " "updated") merged_body = self._get_merged_replica_values(replica, replica_body) replica_origin_endpoint_id = replica["origin_endpoint_id"] replica_destination_endpoint_id = replica["destination_endpoint_id"] self._endpoints_api.validate_source_environment( context, replica_origin_endpoint_id, merged_body["source_environment"]) destination_environment = merged_body["destination_environment"] self._endpoints_api.validate_target_environment( context, replica_destination_endpoint_id, destination_environment) api_utils.validate_network_map(merged_body["network_map"]) api_utils.validate_storage_mappings(merged_body["storage_mappings"]) user_scripts = merged_body['user_scripts'] api_utils.validate_user_scripts(user_scripts) merged_body['user_scripts'] = api_utils.normalize_user_scripts( user_scripts, replica.get('instances', [])) return merged_body
def _get_merged_replica_values(self, replica, updated_values): """ Looks for the following keys in the original replica body and updated values (preferring the updated values where needed, but using `.update()` on dicts): "source_environment", "destination_environment", "network_map", "notes" Does special merging for the "storage_mappings" and "user_scripts" Returns a dict with the merged values (or at least all if the keys having a default value of {}) """ final_values = {} # NOTE: this just replaces options at the top-level and does not do # merging of container types (ex: lists, dicts) for option in [ "source_environment", "destination_environment", "network_map" ]: before = replica.get(option) after = updated_values.get(option) # NOTE: for Replicas created before the separation of these fields # in the DB there is the chance that some of these may be NULL: if before is None: before = {} if after is None: after = {} before.update(after) final_values[option] = before original_storage_mappings = replica.get('storage_mappings') if original_storage_mappings is None: original_storage_mappings = {} new_storage_mappings = updated_values.get('storage_mappings') if new_storage_mappings is None: new_storage_mappings = {} final_values['storage_mappings'] = self._update_storage_mappings( original_storage_mappings, new_storage_mappings) original_user_scripts = api_utils.validate_user_scripts( replica.get('user_scripts', {})) new_user_scripts = api_utils.validate_user_scripts( updated_values.get('user_scripts', {})) final_values['user_scripts'] = self._get_updated_user_scripts( original_user_scripts, new_user_scripts) if 'notes' in updated_values: final_values['notes'] = updated_values.get('notes', '') else: final_values['notes'] = replica.get('notes', '') # NOTE: until the provider plugin interface is updated # to have separate 'network_map' and 'storage_mappings' fields, # we add them as part of the destination environment: final_storage_mappings = final_values['storage_mappings'] final_network_map = final_values['network_map'] if final_storage_mappings: final_values['destination_environment'][ 'storage_mappings'] = final_storage_mappings if final_network_map: final_values['destination_environment'][ 'network_map'] = final_network_map minion_pool_fields = [ "origin_minion_pool_id", "destination_minion_pool_id", "instance_osmorphing_minion_pool_mappings" ] final_values.update({ mpf: updated_values[mpf] for mpf in minion_pool_fields if mpf in updated_values }) return final_values