def update_snapshot_metadata(request, snapshot_id, reset=False): req = utils.get_json_body(request) log.debug('update_snapshot_meta snapshot_id: %s, reset: %s request: %s', snapshot_id, reset, req) snapshot = util.get_snapshot(request.user_uniq, snapshot_id) meta_dict = utils.get_attribute(req, "metadata", required=True, attr_type=dict) with backend.PlanktonBackend(request.user_uniq) as b: b.update_properties(snapshot_id, meta_dict, replace=reset) snapshot = util.get_snapshot(request.user_uniq, snapshot_id) metadata = snapshot["properties"] data = json.dumps({"metadata": dict(metadata)}) return HttpResponse(data, content_type="application/json", status=200)
def update_snapshot_metadata(request, snapshot_id, reset=False): credentials = request.credentials util.assert_snapshots_enabled(request) req = utils.get_json_body(request) log.debug("User: %s, Snapshot: %s Action: update_metadata", credentials.userid, snapshot_id) snapshot = util.get_snapshot(credentials.userid, snapshot_id) meta_dict = utils.get_attribute(req, "metadata", required=True, attr_type=dict) with backend.PlanktonBackend(credentials.userid) as b: b.update_properties(snapshot_id, meta_dict, replace=reset) snapshot = util.get_snapshot(credentials.userid, snapshot_id) metadata = snapshot["properties"] data = json.dumps({"metadata": dict(metadata)}) return HttpResponse(data, content_type="application/json", status=200)
def delete_snapshot(request, snapshot_id): log.debug("delete_snapshot snapshot_id: %s", snapshot_id) snapshot = util.get_snapshot(request.user_uniq, snapshot_id) snapshots.delete(snapshot) return HttpResponse(status=202)
def list_snapshot_metadata(request, snapshot_id): credentials = request.credentials util.assert_snapshots_enabled(request) snapshot = util.get_snapshot(credentials.userid, snapshot_id) metadata = snapshot["properties"] data = json.dumps({"metadata": dict(metadata)}) return HttpResponse(data, content_type="application/json", status=200)
def update_snapshot(request, snapshot_id): credentials = request.credentials util.assert_snapshots_enabled(request) req = utils.get_json_body(request) log.debug("User: %s, Snapshot: %s Action: update", credentials.userid, snapshot_id) snapshot = util.get_snapshot(credentials.userid, snapshot_id) snap_dict = utils.get_attribute(req, "snapshot", attr_type=dict, required=True) new_name = utils.get_attribute(snap_dict, "display_name", required=False, attr_type=basestring) new_description = utils.get_attribute(snap_dict, "display_description", required=False, attr_type=basestring) if new_name is None and new_description is None: raise faults.BadRequest("Nothing to update.") snapshot = snapshots.update(snapshot, name=new_name, description=new_description) log.info("User %s updated snapshot %s", credentials.userid, snapshot["id"]) data = json.dumps({'snapshot': snapshot_to_dict(snapshot, detail=True)}) return HttpResponse(data, content_type="application/json", status=200)
def update_snapshot(request, snapshot_id): util.assert_snapshots_enabled(request) req = utils.get_json_body(request) log.debug("User: %s, Snapshot: %s Action: update", request.user_uniq, snapshot_id) snapshot = util.get_snapshot(request.user_uniq, snapshot_id) snap_dict = utils.get_attribute(req, "snapshot", attr_type=dict, required=True) new_name = utils.get_attribute(snap_dict, "display_name", required=False, attr_type=basestring) new_description = utils.get_attribute(snap_dict, "display_description", required=False, attr_type=basestring) if new_name is None and new_description is None: raise faults.BadRequest("Nothing to update.") snapshot = snapshots.update(snapshot, name=new_name, description=new_description) log.info("User %s updated snapshot %s", request.user_uniq, snapshot["id"]) data = json.dumps({'snapshot': snapshot_to_dict(snapshot, detail=True)}) return HttpResponse(data, content_type="application/json", status=200)
def delete_snapshot_metadata_item(request, snapshot_id, key): log.debug('delete_snapshot_meta_item snapshot_id: %s, key: %s', snapshot_id, key) snapshot = util.get_snapshot(request.user_uniq, snapshot_id) if key in snapshot["properties"]: with backend.PlanktonBackend(request.user_uniq) as b: b.remove_property(snapshot_id, key) return HttpResponse(status=200)
def delete_snapshot_metadata_item(request, snapshot_id, key): util.assert_snapshots_enabled(request) log.debug("User: %s, Snapshot: %s Action: delete_metadata", request.user_uniq, snapshot_id) snapshot = util.get_snapshot(request.user_uniq, snapshot_id) if key in snapshot["properties"]: with backend.PlanktonBackend(request.user_uniq) as b: b.remove_property(snapshot_id, key) return HttpResponse(status=200)
def delete_snapshot_metadata_item(request, snapshot_id, key): credentials = request.credentials util.assert_snapshots_enabled(request) log.debug("User: %s, Snapshot: %s Action: delete_metadata", credentials.userid, snapshot_id) snapshot = util.get_snapshot(credentials.userid, snapshot_id) if key in snapshot["properties"]: with backend.PlanktonBackend(credentials.userid) as b: b.remove_property(snapshot_id, key) return HttpResponse(status=200)
def delete_snapshot(request, snapshot_id): util.assert_snapshots_enabled(request) log.debug("User: %s, Snapshot: %s Action: delete", request.user_uniq, snapshot_id) snapshot = util.get_snapshot(request.user_uniq, snapshot_id) snapshots.delete(snapshot) log.info("User %s deleted snapshot %s", request.user_uniq, snapshot["id"]) return HttpResponse(status=202)
def delete_snapshot(request, snapshot_id): credentials = request.credentials util.assert_snapshots_enabled(request) log.debug("User: %s, Snapshot: %s Action: delete", credentials.userid, snapshot_id) snapshot = util.get_snapshot(credentials.userid, snapshot_id) snapshots.delete(snapshot) log.info("User %s deleted snapshot %s", credentials.userid, snapshot["id"]) return HttpResponse(status=202)
def handle(self, *args, **options): if not args: raise CommandError("Please provide a snapshot ID") snapshot_id = args[0] userid = options["user"] name = options["name"] description = options["description"] snapshot = util.get_snapshot(userid, snapshot_id) snapshots.update(snapshot, name=name, description=description) self.stdout.write("Successfully updated snapshot %s\n" % snapshot_id)
def handle(self, *args, **options): if not args: raise CommandError("Please provide a snapshot ID") force = options['force'] message = "snapshots" if len(args) > 1 else "snapshot" self.confirm_deletion(force, message, args) userid = options["user"] for snapshot_id in args: self.stdout.write("\n") try: snapshot = util.get_snapshot(userid, snapshot_id) snapshots.delete(snapshot) self.stdout.write("Successfully removed snapshot %s\n" % snapshot_id) except CommandError as e: self.stdout.write("Error -- %s\n" % e.message)
def update_snapshot(request, snapshot_id): req = utils.get_json_body(request) log.debug('update_snapshot snapshot_id: %s, request: %s', snapshot_id, req) snapshot = util.get_snapshot(request.user_uniq, snapshot_id) snap_dict = utils.get_attribute(req, "snapshot", attr_type=dict, required=True) new_name = utils.get_attribute(snap_dict, "display_name", required=False, attr_type=basestring) new_description = utils.get_attribute(snap_dict, "display_description", required=False, attr_type=basestring) if new_name is None and new_description is None: raise faults.BadRequest("Nothing to update.") snapshot = snapshots.update(snapshot, name=new_name, description=new_description) data = json.dumps({'snapshot': snapshot_to_dict(snapshot, detail=True)}) return HttpResponse(data, content_type="application/json", status=200)
def create(user_id, volume, name, description, metadata, force=False): """Create a snapshot from a given volume Create a snapshot from a given volume. The snapshot is first created as a file in Pithos, with specified metadata to indicate that it is a snapshot. Then a job is sent to Ganeti backend to create the actual snapshot of the volume. Snapshots are only supported for volumes of ext_ disk template. Also, the volume must be attached to some server. """ if name is None: raise faults.BadRequest("Snapshot 'name' is required") # Check that taking a snapshot is feasible if volume.machine is None: raise faults.BadRequest("Cannot snapshot a detached volume!") if volume.status not in ["AVAILABLE", "IN_USE"]: raise faults.BadRequest("Cannot create snapshot while volume is in" " '%s' status" % volume.status) volume_type = volume.volume_type if not volume_type.disk_template.startswith("ext_"): msg = ("Cannot take a snapshot from a volume with volume type '%s' and" " '%s' disk template" % (volume_type.id, volume_type.disk_template)) raise faults.BadRequest(msg) # Increase the snapshot counter of the volume that is used in order to # generate unique snapshot names volume.snapshot_counter += 1 volume.save() transaction.commit() snapshot_metadata = { "name": name, "disk_format": "diskdump", "container_format": "bare", # Snapshot specific "description": description, "volume_id": volume.id, } # Snapshots are used as images. We set the most important properties # that are being used for images. We set 'EXCLUDE_ALL_TASKS' to bypass # image customization. Also, we get some basic metadata for the volume from # the server that the volume is attached metadata.update({"exclude_all_tasks": "yes", "description": description}) if volume.index == 0: # Copy the metadata of the VM into the image properties only when the # volume is the root volume of the VM. vm_metadata = dict(volume.machine.metadata .filter(meta_key__in=["OS", "users"]) .values_list("meta_key", "meta_value")) metadata.update(vm_metadata) snapshot_properties = PlanktonBackend._prefix_properties(metadata) snapshot_metadata.update(snapshot_properties) # Generate a name for the Archipelago mapfile. mapfile = generate_mapfile_name(volume) # Convert size from Gbytes to bytes size = volume.size << 30 with PlanktonBackend(user_id) as b: try: snapshot_id = b.register_snapshot(name=name, mapfile=mapfile, size=size, metadata=snapshot_metadata) except faults.OverLimit: msg = ("Resource limit exceeded for your account." " Not enough storage space to create snapshot of" " %s size." % units.show(size, "bytes", "gb")) raise faults.OverLimit(msg) try: job_id = backend.snapshot_instance(volume.machine, volume, snapshot_name=mapfile, snapshot_id=snapshot_id) except: # If failed to enqueue job to Ganeti, mark snapshot as ERROR b.update_snapshot_state(snapshot_id, OBJECT_ERROR) raise # Store the backend and job id as metadata in the snapshot in order # to make reconciliation based on the Ganeti job possible. backend_info = { "ganeti_job_id": job_id, "ganeti_backend_id": volume.machine.backend_id } metadata = {"backend_info": json.dumps(backend_info)} b.update_metadata(snapshot_id, metadata) snapshot = util.get_snapshot(user_id, snapshot_id) return snapshot
def get_snapshot(request, snapshot_id): log.debug('get_snapshot snapshot_id: %s', snapshot_id) snapshot = util.get_snapshot(request.user_uniq, snapshot_id) data = json.dumps({'snapshot': snapshot_to_dict(snapshot, detail=True)}) return HttpResponse(data, content_type="application/json", status=200)
def list_snapshot_metadata(request, snapshot_id): log.debug('list_snapshot_meta snapshot_id: %s', snapshot_id) snapshot = util.get_snapshot(request.user_uniq, snapshot_id) metadata = snapshot["properties"] data = json.dumps({"metadata": dict(metadata)}) return HttpResponse(data, content_type="application/json", status=200)
def get_snapshot(request, snapshot_id): util.assert_snapshots_enabled(request) snapshot = util.get_snapshot(request.user_uniq, snapshot_id) data = json.dumps({'snapshot': snapshot_to_dict(snapshot, detail=True)}) return HttpResponse(data, content_type="application/json", status=200)
def get_snapshot(request, snapshot_id): credentials = request.credentials util.assert_snapshots_enabled(request) snapshot = util.get_snapshot(credentials.userid, snapshot_id) data = json.dumps({'snapshot': snapshot_to_dict(snapshot, detail=True)}) return HttpResponse(data, content_type="application/json", status=200)
def _create_volume(server, user_id, project, size, source_type, source_uuid, volume_type, name=None, description=None, index=None, delete_on_termination=True): utils.check_name_length(name, Volume.NAME_LENGTH, "Volume name is too long") utils.check_name_length(description, Volume.DESCRIPTION_LENGTH, "Volume description is too long") validate_volume_termination(volume_type, delete_on_termination) if index is None: # Counting a server's volumes is safe, because we have an # X-lock on the server. index = server.volumes.filter(deleted=False).count() if size is not None: try: size = int(size) except (TypeError, ValueError): raise faults.BadRequest("Volume 'size' needs to be a positive" " integer value.") if size < 1: raise faults.BadRequest("Volume size must be a positive integer") if size > settings.CYCLADES_VOLUME_MAX_SIZE: raise faults.BadRequest("Maximum volume size is '%sGB'" % settings.CYCLADES_VOLUME_MAX_SIZE) # Only ext_ disk template supports cloning from another source. Otherwise # is must be the root volume so that 'snf-image' fill the volume can_have_source = (index == 0 or volume_type.provider in settings.GANETI_CLONE_PROVIDERS) if not can_have_source and source_type != "blank": msg = ("Cannot specify a 'source' attribute for volume type '%s' with" " disk template '%s'" % (volume_type.id, volume_type.disk_template)) raise faults.BadRequest(msg) source_version = None origin_size = None # TODO: Check Volume/Snapshot Status if source_type == "snapshot": source_snapshot = util.get_snapshot(user_id, source_uuid, exception=faults.BadRequest) snap_status = source_snapshot.get("status", "").upper() if snap_status != OBJECT_AVAILABLE: raise faults.BadRequest("Cannot create volume from snapshot, while" " snapshot is in '%s' status" % snap_status) source = Volume.prefix_source(source_uuid, source_type="snapshot") if size is None: raise faults.BadRequest("Volume size is required") elif (size << 30) < int(source_snapshot["size"]): raise faults.BadRequest("Volume size '%s' is smaller than" " snapshot's size '%s'" % (size << 30, source_snapshot["size"])) source_version = source_snapshot["version"] origin = source_snapshot["mapfile"] origin_size = source_snapshot["size"] elif source_type == "image": source_image = util.get_image(user_id, source_uuid, exception=faults.BadRequest) img_status = source_image.get("status", "").upper() if img_status != OBJECT_AVAILABLE: raise faults.BadRequest("Cannot create volume from image, while" " image is in '%s' status" % img_status) if size is None: raise faults.BadRequest("Volume size is required") elif (size << 30) < int(source_image["size"]): raise faults.BadRequest("Volume size '%s' is smaller than" " image's size '%s'" % (size << 30, source_image["size"])) source = Volume.prefix_source(source_uuid, source_type="image") source_version = source_image["version"] origin = source_image["mapfile"] origin_size = source_image["size"] elif source_type == "blank": if size is None: raise faults.BadRequest("Volume size is required") source = origin = None elif source_type == "volume": # Currently, Archipelago does not support cloning a volume raise faults.BadRequest("Cloning a volume is not supported") # source_volume = util.get_volume(user_id, source_uuid, # for_update=True, non_deleted=True, # exception=faults.BadRequest) # if source_volume.status != "IN_USE": # raise faults.BadRequest("Cannot clone volume while it is in '%s'" # " status" % source_volume.status) # # If no size is specified, use the size of the volume # if size is None: # size = source_volume.size # elif size < source_volume.size: # raise faults.BadRequest("Volume size cannot be smaller than the" # " source volume") # source = Volume.prefix_source(source_uuid, source_type="volume") # origin = source_volume.backend_volume_uuid else: raise faults.BadRequest("Unknown source type") volume = Volume.objects.create(userid=user_id, project=project, size=size, volume_type=volume_type, name=name, machine=server, description=description, delete_on_termination=delete_on_termination, source=source, source_version=source_version, origin=origin, index=index, status="CREATING") # Store the size of the origin in the volume object but not in the DB. # We will have to change this in order to support detachable volumes. volume.origin_size = origin_size return volume
def do_create(user_id, volume_id, name, description, metadata, force=False, credentials=None): volume = util.get_volume(credentials, volume_id, for_update=True, non_deleted=True, exception=faults.BadRequest) _check(volume) snapshot_metadata = { "name": name, "disk_format": "diskdump", "container_format": "bare", # Snapshot specific "description": description, "volume_id": volume_id, } # Snapshots are used as images. We set the most important properties # that are being used for images. We set 'EXCLUDE_ALL_TASKS' to bypass # image customization. Also, we get some basic metadata for the volume from # the server that the volume is attached metadata.update({"exclude_all_tasks": "yes", "description": description}) if volume.index == 0: # Copy the metadata of the VM into the image properties only when the # volume is the root volume of the VM. vm_metadata = dict( volume.machine.metadata.filter( meta_key__in=["OS", "users"]).values_list( "meta_key", "meta_value")) metadata.update(vm_metadata) snapshot_properties = PlanktonBackend._prefix_properties(metadata) snapshot_metadata.update(snapshot_properties) # Generate a name for the Archipelago mapfile. mapfile = generate_mapfile_name(volume) # Convert size from Gbytes to bytes size = volume.size << 30 with PlanktonBackend(user_id) as b: try: snapshot_id = b.register_snapshot(name=name, mapfile=mapfile, size=size, metadata=snapshot_metadata) except faults.OverLimit: msg = ("Resource limit exceeded for your account." " Not enough storage space to create snapshot of" " %s size." % units.show(size, "bytes", "gb")) raise faults.OverLimit(msg) try: job_id = backend.snapshot_instance(volume.machine, volume, snapshot_name=mapfile, snapshot_id=snapshot_id) except: # If failed to enqueue job to Ganeti, mark snapshot as ERROR b.update_snapshot_state(snapshot_id, OBJECT_ERROR) raise # Store the backend and job id as metadata in the snapshot in order # to make reconciliation based on the Ganeti job possible. backend_info = { "ganeti_job_id": job_id, "ganeti_backend_id": volume.machine.backend_id } metadata = {"backend_info": json.dumps(backend_info)} b.update_metadata(snapshot_id, metadata) snapshot = util.get_snapshot(user_id, snapshot_id) return snapshot
def do_create(user_id, volume_id, name, description, metadata, force=False, credentials=None): volume = util.get_volume(credentials, volume_id, for_update=True, non_deleted=True, exception=faults.BadRequest) _check(volume) snapshot_metadata = { "name": name, "disk_format": "diskdump", "container_format": "bare", # Snapshot specific "description": description, "volume_id": volume_id, } # Snapshots are used as images. We set the most important properties # that are being used for images. We set 'EXCLUDE_ALL_TASKS' to bypass # image customization. Also, we get some basic metadata for the volume from # the server that the volume is attached metadata.update({"exclude_all_tasks": "yes", "description": description}) if volume.index == 0: # Copy the metadata of the VM into the image properties only when the # volume is the root volume of the VM. vm_metadata = dict(volume.machine.metadata .filter(meta_key__in=["OS", "users"]) .values_list("meta_key", "meta_value")) metadata.update(vm_metadata) snapshot_properties = PlanktonBackend._prefix_properties(metadata) snapshot_metadata.update(snapshot_properties) # Generate a name for the Archipelago mapfile. mapfile = generate_mapfile_name(volume) # Convert size from Gbytes to bytes size = volume.size << 30 with PlanktonBackend(user_id) as b: try: snapshot_id = b.register_snapshot(name=name, mapfile=mapfile, size=size, metadata=snapshot_metadata) except faults.OverLimit: msg = ("Resource limit exceeded for your account." " Not enough storage space to create snapshot of" " %s size." % units.show(size, "bytes", "gb")) raise faults.OverLimit(msg) try: job_id = backend.snapshot_instance(volume.machine, volume, snapshot_name=mapfile, snapshot_id=snapshot_id) except: # If failed to enqueue job to Ganeti, mark snapshot as ERROR b.update_snapshot_state(snapshot_id, OBJECT_ERROR) raise # Store the backend and job id as metadata in the snapshot in order # to make reconciliation based on the Ganeti job possible. backend_info = { "ganeti_job_id": job_id, "ganeti_backend_id": volume.machine.backend_id } metadata = {"backend_info": json.dumps(backend_info)} b.update_metadata(snapshot_id, metadata) snapshot = util.get_snapshot(user_id, snapshot_id) return snapshot
def create(user_id, volume, name, description, metadata, force=False): """Create a snapshot from a given volume Create a snapshot from a given volume. The snapshot is first created as a file in Pithos, with specified metadata to indicate that it is a snapshot. Then a job is sent to Ganeti backend to create the actual snapshot of the volume. Snapshots are only supported for volumes of ext_ disk template. Also, the volume must be attached to some server. """ if name is None: raise faults.BadRequest("Snapshot 'name' is required") # Check that taking a snapshot is feasible if volume.machine is None: raise faults.BadRequest("Cannot snapshot a detached volume!") if volume.status not in ["AVAILABLE", "IN_USE"]: raise faults.BadRequest("Cannot create snapshot while volume is in" " '%s' status" % volume.status) volume_type = volume.volume_type if not volume_type.disk_template.startswith("ext_"): msg = ("Cannot take a snapshot from a volume with volume type '%s' and" " '%s' disk template" % (volume_type.id, volume_type.disk_template)) raise faults.BadRequest(msg) # Increase the snapshot counter of the volume that is used in order to # generate unique snapshot names volume.snapshot_counter += 1 volume.save() transaction.commit() snapshot_metadata = { "name": name, "disk_format": "diskdump", "container_format": "bare", # Snapshot specific "description": description, "volume_id": volume.id, } # Snapshots are used as images. We set the most important properties # that are being used for images. We set 'EXCLUDE_ALL_TASKS' to bypass # image customization. Also, we get some basic metadata for the volume from # the server that the volume is attached metadata.update({"exclude_all_tasks": "yes", "description": description}) if volume.index == 0: # Copy the metadata of the VM into the image properties only when the # volume is the root volume of the VM. vm_metadata = dict( volume.machine.metadata.filter( meta_key__in=["OS", "users"]).values_list( "meta_key", "meta_value")) metadata.update(vm_metadata) snapshot_properties = PlanktonBackend._prefix_properties(metadata) snapshot_metadata.update(snapshot_properties) # Generate a name for the Archipelago mapfile. mapfile = generate_mapfile_name(volume) # Convert size from Gbytes to bytes size = volume.size << 30 with PlanktonBackend(user_id) as b: try: snapshot_id = b.register_snapshot(name=name, mapfile=mapfile, size=size, metadata=snapshot_metadata) except faults.OverLimit: msg = ("Resource limit exceeded for your account." " Not enough storage space to create snapshot of" " %s size." % units.show(size, "bytes", "gb")) raise faults.OverLimit(msg) try: job_id = backend.snapshot_instance(volume.machine, volume, snapshot_name=mapfile, snapshot_id=snapshot_id) except: # If failed to enqueue job to Ganeti, mark snapshot as ERROR b.update_snapshot_state(snapshot_id, OBJECT_ERROR) raise # Store the backend and job id as metadata in the snapshot in order # to make reconciliation based on the Ganeti job possible. backend_info = { "ganeti_job_id": job_id, "ganeti_backend_id": volume.machine.backend_id } metadata = {"backend_info": json.dumps(backend_info)} b.update_metadata(snapshot_id, metadata) snapshot = util.get_snapshot(user_id, snapshot_id) return snapshot
def _create_volume(user_id, project, size, source_type, source_uuid, volume_type, name=None, description=None, index=None, delete_on_termination=True, shared_to_project=False): """Create the volume in the DB. This function can be called from two different places: 1) During server creation, when creating the volumes of a new server 2) During volume creation. """ utils.check_name_length(name, Volume.NAME_LENGTH, "Volume name is too long") utils.check_name_length(description, Volume.DESCRIPTION_LENGTH, "Volume description is too long") validate_volume_termination(volume_type, delete_on_termination) if size is not None: try: size = int(size) except (TypeError, ValueError): raise faults.BadRequest("Volume size must be a positive integer") if size < 1: raise faults.BadRequest("Volume size must be a positive integer") if size > settings.CYCLADES_VOLUME_MAX_SIZE: raise faults.BadRequest("Maximum volume size is %sGB" % settings.CYCLADES_VOLUME_MAX_SIZE) # Only ext_ disk template supports cloning from another source. Otherwise # it must be the root volume so that 'snf-image' fill the volume can_have_source = (index == 0 or volume_type.provider in settings.GANETI_CLONE_PROVIDERS) if not can_have_source and source_type != "blank": msg = ("Cannot specify a 'source' attribute for volume type '%s' with" " disk template '%s'" % (volume_type.id, volume_type.disk_template)) raise faults.BadRequest(msg) source_version = None origin_size = None # TODO: Check Volume/Snapshot Status if source_type == "snapshot": source_snapshot = util.get_snapshot(user_id, source_uuid, exception=faults.BadRequest) snap_status = source_snapshot.get("status", "").upper() if snap_status != OBJECT_AVAILABLE: raise faults.BadRequest("Cannot create volume from snapshot, while" " snapshot is in '%s' status" % snap_status) source = Volume.prefix_source(source_uuid, source_type="snapshot") if size is None: raise faults.BadRequest("Volume size is required") elif (size << 30) < int(source_snapshot["size"]): raise faults.BadRequest("Volume size '%s' is smaller than" " snapshot's size '%s'" % (size << 30, source_snapshot["size"])) source_version = source_snapshot["version"] origin = source_snapshot["mapfile"] origin_size = source_snapshot["size"] elif source_type == "image": source_image = util.get_image(user_id, source_uuid, exception=faults.BadRequest) img_status = source_image.get("status", "").upper() if img_status != OBJECT_AVAILABLE: raise faults.BadRequest("Cannot create volume from image, while" " image is in '%s' status" % img_status) if size is None: raise faults.BadRequest("Volume size is required") elif (size << 30) < int(source_image["size"]): raise faults.BadRequest("Volume size '%s' is smaller than" " image's size '%s'" % (size << 30, source_image["size"])) source = Volume.prefix_source(source_uuid, source_type="image") source_version = source_image["version"] origin = source_image["mapfile"] origin_size = source_image["size"] elif source_type == "blank": if size is None: raise faults.BadRequest("Volume size is required") source = origin = None elif source_type == "volume": # Currently, Archipelago does not support cloning a volume raise faults.BadRequest("Cloning a volume is not supported") # source_volume = util.get_volume(user_id, source_uuid, # for_update=True, non_deleted=True, # exception=faults.BadRequest) # if source_volume.status != "IN_USE": # raise faults.BadRequest("Cannot clone volume while it is in '%s'" # " status" % source_volume.status) # # If no size is specified, use the size of the volume # if size is None: # size = source_volume.size # elif size < source_volume.size: # raise faults.BadRequest("Volume size cannot be smaller than the" # " source volume") # source = Volume.prefix_source(source_uuid, source_type="volume") # origin = source_volume.backend_volume_uuid else: raise faults.BadRequest("Unknown source type") volume = Volume.objects.create(userid=user_id, project=project, index=index, shared_to_project=shared_to_project, size=size, volume_type=volume_type, name=name, description=description, delete_on_termination=delete_on_termination, source=source, source_version=source_version, origin=origin, status="CREATING") # Store the size of the origin in the volume object but not in the DB. # We will have to change this in order to support detachable volumes. volume.origin_size = origin_size return volume