def get_image(image_id, user_id): """Return an Image instance or raise ItemNotFound.""" with PlanktonBackend(user_id) as backend: try: return backend.get_image(image_id) except faults.ItemNotFound: raise faults.ItemNotFound("Image '%s' not found" % image_id)
def get_image_meta(request, image_id): """Return detailed metadata on a specific image Described in: 3.4. Requesting Detailed Metadata on a Specific Image """ with PlanktonBackend(request.user_uniq) as backend: image = backend.get_image(image_id) return _create_image_response(image)
def delete(snapshot): """Delete a snapshot. Delete a snapshot by deleting the corresponding file from Pithos. """ user_id = snapshot["owner"] log.info("Deleting snapshot '%s'", snapshot["location"]) with PlanktonBackend(user_id) as pithos_backend: pithos_backend.delete_snapshot(snapshot["id"]) return snapshot
def remove_image_member(request, image_id, member): """Remove a member from an image Described in: 3.10. Removing a Member from an Image """ log.debug('remove_image_member %s %s', image_id, member) with PlanktonBackend(request.user_uniq) as backend: backend.remove_user(image_id, member) return HttpResponse(status=204)
def list_images(request, detail=False): """Return a list of available images. This includes images owned by the user, images shared with the user and public images. """ def get_request_params(keys): params = {} for key in keys: val = request.GET.get(key, None) if val is not None: params[key] = val return params log.debug('list_public_images detail=%s', detail) filters = get_request_params(FILTERS) params = get_request_params(PARAMS) params.setdefault('sort_key', 'created_at') params.setdefault('sort_dir', 'desc') if not params['sort_key'] in SORT_KEY_OPTIONS: raise faults.BadRequest("Invalid 'sort_key'") if not params['sort_dir'] in SORT_DIR_OPTIONS: raise faults.BadRequest("Invalid 'sort_dir'") if 'size_max' in filters: try: filters['size_max'] = int(filters['size_max']) except ValueError: raise faults.BadRequest("Malformed request.") if 'size_min' in filters: try: filters['size_min'] = int(filters['size_min']) except ValueError: raise faults.BadRequest("Malformed request.") with PlanktonBackend(request.user_uniq) as backend: images = backend.list_images(filters, params) # Remove keys that should not be returned fields = DETAIL_FIELDS if detail else LIST_FIELDS for image in images: for key in image.keys(): if key not in fields: del image[key] data = json.dumps(images, indent=settings.DEBUG) return HttpResponse(data)
def add_image(request): """Add a new virtual machine image Described in: 3.6. Adding a New Virtual Machine Image Implementation notes: * The implementation is very inefficient as it loads the whole image in memory. Limitations: * x-image-meta-id is not supported. Will always return 409 Conflict. Extensions: * An x-image-meta-location header can be passed with a link to file, instead of uploading the data. """ params = headers_to_image_params(request) log.debug('add_image %s', params) if not set(params.keys()).issubset(set(ADD_FIELDS)): raise faults.BadRequest("Invalid parameters") name = params.pop('name', None) if name is None: raise faults.BadRequest("Image 'name' parameter is required") elif len(smart_unicode(name, encoding="utf-8")) == 0: raise faults.BadRequest("Invalid image name") location = params.pop('location', None) if location is None: raise faults.BadRequest("'location' parameter is required") try: split_url(location) except AssertionError: raise faults.BadRequest("Invalid location '%s'" % location) validate_fields(params) if location: with PlanktonBackend(request.user_uniq) as backend: image = backend.register(name, location, params) else: # f = StringIO(request.body) # image = backend.put(name, f, params) return HttpResponse(status=501) # Not Implemented if not image: return HttpResponse('Registration failed', status=500) return _create_image_response(image)
def list_image_members(request, image_id): """List image memberships Described in: 3.7. Requesting Image Memberships """ with PlanktonBackend(request.user_uniq) as backend: users = backend.list_users(image_id) members = [{'member_id': u, 'can_share': False} for u in users] data = json.dumps({'members': members}, indent=settings.DEBUG) return HttpResponse(data)
def add_image_member(request, image_id, member): """Add a member to an image Described in: 3.9. Adding a Member to an Image Limitations: * Passing a body to enable `can_share` is not supported. """ log.debug('add_image_member %s %s', image_id, member) with PlanktonBackend(request.user_uniq) as backend: backend.add_user(image_id, member) return HttpResponse(status=204)
def handle(self, *args, **options): if len(args) != 1: raise CommandError("Please provide an image ID") image_id = args[0] try: with PlanktonBackend(None) as backend: image = backend.get_image(image_id, check_permissions=False) except: raise CommandError("An error occurred, verify that image or " "user ID are valid") utils.pprint_table(out=self.stdout, table=[image.values()], headers=image.keys(), vertical=True)
def update(snapshot, name=None, description=None): """Update a snapshot Update the name or description of a snapshot. """ metadata = {} if name is not None: metadata["name"] = name if description is not None: metadata["description"] = description if not metadata: return user_id = snapshot["owner"] with PlanktonBackend(user_id) as b: return b.update_metadata(snapshot["id"], metadata)
def get_image(self, imageid, userid): if imageid not in self.images: try: with PlanktonBackend(userid) as ib: image = ib.get_image(imageid) properties = image.get("properties") os = properties.get("os", properties.get("osfamily", "unknown")) owner = image["owner"] owner = "system" if image["owner"] == self.system_user_uuid\ else "user" self.images[imageid] = owner + ":" + os except Exception: self.images[imageid] = "unknown:unknown" return self.images[imageid]
def handle(self, *args, **options): if len(args) != 1: raise CommandError("Please provide a snapshot ID") snapshot_id = args[0] try: with PlanktonBackend(None) as backend: snapshot = backend.get_snapshot(snapshot_id, check_permissions=False) except: raise CommandError("An error occurred, verify that snapshot and " "user ID are valid") utils.pprint_table(out=self.stdout, table=[snapshot.values()], headers=snapshot.keys(), vertical=True)
def handle(self, **options): user = options['userid'] check_perm = user is not None with PlanktonBackend(user) as backend: images = backend.list_images(user, check_permissions=check_perm) if options["public"]: images = filter(lambda x: x['is_public'], images) images.sort(key=lambda x: x['created_at'], reverse=True) headers = ("id", "name", "user.uuid", "public", "snapshot") table = [] for img in images: fields = (img["id"], img["name"], img["owner"], str(img["is_public"]), str(img["is_snapshot"])) table.append(fields) pprint_table(self.stdout, table, headers)
def delete_image(request, image_id): """Delete an Image. This API call is not described in the Openstack Glance API. Implementation notes: * The implementation does not delete the Image from the storage backend. Instead it unregisters the image by removing all the metadata from the plankton metadata domain. """ log.info("delete_image '%s'" % image_id) userid = request.user_uniq with PlanktonBackend(userid) as backend: backend.unregister(image_id) log.info("User '%s' deleted image '%s'" % (userid, image_id)) return HttpResponse(status=204)
def reconcile_unsynced_snapshots(self): # Find the biggest ID of the retrieved Ganeti jobs. Reconciliation # will be performed for IDs that are smaller from this. max_job_id = max(self.gnt_jobs.keys()) if self.gnt_jobs.keys() else 0 with PlanktonBackend(None) as b: snapshots = b.list_snapshots(check_permissions=False) unavail_snapshots = [ s for s in snapshots if s["status"] == OBJECT_UNAVAILABLE ] for snapshot in unavail_snapshots: uuid = snapshot["id"] backend_info = snapshot["backend_info"] if backend_info is None: self.log.warning( "Cannot perform reconciliation for" " snapshot '%s'. Not enough information.", uuid) continue job_info = json.loads(backend_info) backend_id = job_info["ganeti_backend_id"] job_id = job_info["ganeti_job_id"] if backend_id == self.backend.id and job_id <= max_job_id: if job_id in self.gnt_jobs: job_status = self.gnt_jobs[job_id]["status"] state = \ backend_mod.snapshot_state_from_job_status(job_status) if state == OBJECT_UNAVAILABLE: continue else: # Snapshot in unavailable but no job exists state = OBJECT_ERROR self.log.info( "Snapshot '%s' is '%s' in Pithos DB but should" " be '%s'", uuid, snapshot["status"], state) if self.options["fix_unsynced_snapshots"]: backend_mod.update_snapshot(uuid, snapshot["owner"], job_id=-1, job_status=job_status, etime=self.event_time) self.log.info("Fixed state of snapshot '%s'.", uuid)
def handle(self, **options): user = options['userid'] check_perm = user is not None with PlanktonBackend(user) as backend: snapshots = backend.list_snapshots(user, check_permissions=check_perm) if options['public']: snapshots = filter(lambda x: x['is_public'], snapshots) headers = ("id", "name", "volume_id", "size", "mapfile", "status", "owner", "is_public") table = [] for snap in snapshots: fields = (snap["id"], snap["name"], snap["volume_id"], snap["size"], snap["mapfile"], snap["status"], snap["owner"], snap["is_public"]) table.append(fields) pprint_table(self.stdout, table, headers)
def list_shared_images(request, member): """Request shared images Described in: 3.8. Requesting Shared Images Implementation notes: * It is not clear what this method should do. We return the IDs of the users's images that are accessible by `member`. """ log.debug('list_shared_images %s', member) images = [] with PlanktonBackend(request.user_uniq) as backend: for image in backend.list_shared_images(member=member): images.append({'image_id': image["id"], 'can_share': False}) data = json.dumps({'shared_images': images}, indent=settings.DEBUG) return HttpResponse(data)
def update_image(request, image_id): """Update an image Described in: 3.6.2. Updating an Image Implementation notes: * It is not clear which metadata are allowed to be updated. We support: name, disk_format, container_format, is_public, owner, properties and status. """ meta = headers_to_image_params(request) log.debug('update_image %s', meta) if not set(meta.keys()).issubset(set(UPDATE_FIELDS)): raise faults.BadRequest("Invalid metadata") validate_fields(meta) with PlanktonBackend(request.user_uniq) as backend: image = backend.update_metadata(image_id, meta) return _create_image_response(image)
def update_image_members(request, image_id): """Replace a membership list for an image Described in: 3.11. Replacing a Membership List for an Image Limitations: * can_share value is ignored """ log.debug('update_image_members %s', image_id) data = api.utils.get_json_body(request) members = [] memberships = api.utils.get_attribute(data, "memberships", attr_type=list) for member in memberships: if not isinstance(member, dict): raise faults.BadRequest("Invalid 'memberships' field") member = api.utils.get_attribute(member, "member_id") members.append(member) with PlanktonBackend(request.user_uniq) as backend: backend.replace_users(image_id, members) return HttpResponse(status=204)
def create(user_id, volume, name, description, metadata, force=False): """Create a snapshot from a given volume Create a snapshot from a given volume. The snapshot is first created as a file in Pithos, with specified metadata to indicate that it is a snapshot. Then a job is sent to Ganeti backend to create the actual snapshot of the volume. Snapshots are only supported for volumes of ext_ disk template. Also, the volume must be attached to some server. """ if name is None: raise faults.BadRequest("Snapshot 'name' is required") # Check that taking a snapshot is feasible if volume.machine is None: raise faults.BadRequest("Cannot snapshot a detached volume!") if volume.status not in ["AVAILABLE", "IN_USE"]: raise faults.BadRequest("Cannot create snapshot while volume is in" " '%s' status" % volume.status) volume_type = volume.volume_type if not volume_type.disk_template.startswith("ext_"): msg = ("Cannot take a snapshot from a volume with volume type '%s' and" " '%s' disk template" % (volume_type.id, volume_type.disk_template)) raise faults.BadRequest(msg) # Increase the snapshot counter of the volume that is used in order to # generate unique snapshot names volume.snapshot_counter += 1 volume.save() transaction.commit() snapshot_metadata = { "name": name, "disk_format": "diskdump", "container_format": "bare", # Snapshot specific "description": description, "volume_id": volume.id, } # Snapshots are used as images. We set the most important properties # that are being used for images. We set 'EXCLUDE_ALL_TASKS' to bypass # image customization. Also, we get some basic metadata for the volume from # the server that the volume is attached metadata.update({"exclude_all_tasks": "yes", "description": description}) if volume.index == 0: # Copy the metadata of the VM into the image properties only when the # volume is the root volume of the VM. vm_metadata = dict(volume.machine.metadata .filter(meta_key__in=["OS", "users"]) .values_list("meta_key", "meta_value")) metadata.update(vm_metadata) snapshot_properties = PlanktonBackend._prefix_properties(metadata) snapshot_metadata.update(snapshot_properties) # Generate a name for the Archipelago mapfile. mapfile = generate_mapfile_name(volume) # Convert size from Gbytes to bytes size = volume.size << 30 with PlanktonBackend(user_id) as b: try: snapshot_id = b.register_snapshot(name=name, mapfile=mapfile, size=size, metadata=snapshot_metadata) except faults.OverLimit: msg = ("Resource limit exceeded for your account." " Not enough storage space to create snapshot of" " %s size." % units.show(size, "bytes", "gb")) raise faults.OverLimit(msg) try: job_id = backend.snapshot_instance(volume.machine, volume, snapshot_name=mapfile, snapshot_id=snapshot_id) except: # If failed to enqueue job to Ganeti, mark snapshot as ERROR b.update_snapshot_state(snapshot_id, OBJECT_ERROR) raise # Store the backend and job id as metadata in the snapshot in order # to make reconciliation based on the Ganeti job possible. backend_info = { "ganeti_job_id": job_id, "ganeti_backend_id": volume.machine.backend_id } metadata = {"backend_info": json.dumps(backend_info)} b.update_metadata(snapshot_id, metadata) snapshot = util.get_snapshot(user_id, snapshot_id) return snapshot
def create(user_id, volume, name, description, metadata, force=False): """Create a snapshot from a given volume Create a snapshot from a given volume. The snapshot is first created as a file in Pithos, with specified metadata to indicate that it is a snapshot. Then a job is sent to Ganeti backend to create the actual snapshot of the volume. Snapshots are only supported for volumes of ext_ disk template. Also, the volume must be attached to some server. """ if name is None: raise faults.BadRequest("Snapshot 'name' is required") # Check that taking a snapshot is feasible if volume.machine is None: raise faults.BadRequest("Cannot snapshot a detached volume!") if volume.status not in ["AVAILABLE", "IN_USE"]: raise faults.BadRequest("Cannot create snapshot while volume is in" " '%s' status" % volume.status) volume_type = volume.volume_type if not volume_type.disk_template.startswith("ext_"): msg = ("Cannot take a snapshot from a volume with volume type '%s' and" " '%s' disk template" % (volume_type.id, volume_type.disk_template)) raise faults.BadRequest(msg) # Increase the snapshot counter of the volume that is used in order to # generate unique snapshot names volume.snapshot_counter += 1 volume.save() transaction.commit() snapshot_metadata = { "name": name, "disk_format": "diskdump", "container_format": "bare", # Snapshot specific "description": description, "volume_id": volume.id, } # Snapshots are used as images. We set the most important properties # that are being used for images. We set 'EXCLUDE_ALL_TASKS' to bypass # image customization. Also, we get some basic metadata for the volume from # the server that the volume is attached metadata.update({"exclude_all_tasks": "yes", "description": description}) if volume.index == 0: # Copy the metadata of the VM into the image properties only when the # volume is the root volume of the VM. vm_metadata = dict( volume.machine.metadata.filter( meta_key__in=["OS", "users"]).values_list( "meta_key", "meta_value")) metadata.update(vm_metadata) snapshot_properties = PlanktonBackend._prefix_properties(metadata) snapshot_metadata.update(snapshot_properties) # Generate a name for the Archipelago mapfile. mapfile = generate_mapfile_name(volume) # Convert size from Gbytes to bytes size = volume.size << 30 with PlanktonBackend(user_id) as b: try: snapshot_id = b.register_snapshot(name=name, mapfile=mapfile, size=size, metadata=snapshot_metadata) except faults.OverLimit: msg = ("Resource limit exceeded for your account." " Not enough storage space to create snapshot of" " %s size." % units.show(size, "bytes", "gb")) raise faults.OverLimit(msg) try: job_id = backend.snapshot_instance(volume.machine, volume, snapshot_name=mapfile, snapshot_id=snapshot_id) except: # If failed to enqueue job to Ganeti, mark snapshot as ERROR b.update_snapshot_state(snapshot_id, OBJECT_ERROR) raise # Store the backend and job id as metadata in the snapshot in order # to make reconciliation based on the Ganeti job possible. backend_info = { "ganeti_job_id": job_id, "ganeti_backend_id": volume.machine.backend_id } metadata = {"backend_info": json.dumps(backend_info)} b.update_metadata(snapshot_id, metadata) snapshot = util.get_snapshot(user_id, snapshot_id) return snapshot
def do_create(user_id, volume_id, name, description, metadata, force=False, credentials=None): volume = util.get_volume(credentials, volume_id, for_update=True, non_deleted=True, exception=faults.BadRequest) _check(volume) snapshot_metadata = { "name": name, "disk_format": "diskdump", "container_format": "bare", # Snapshot specific "description": description, "volume_id": volume_id, } # Snapshots are used as images. We set the most important properties # that are being used for images. We set 'EXCLUDE_ALL_TASKS' to bypass # image customization. Also, we get some basic metadata for the volume from # the server that the volume is attached metadata.update({"exclude_all_tasks": "yes", "description": description}) if volume.index == 0: # Copy the metadata of the VM into the image properties only when the # volume is the root volume of the VM. vm_metadata = dict(volume.machine.metadata .filter(meta_key__in=["OS", "users"]) .values_list("meta_key", "meta_value")) metadata.update(vm_metadata) snapshot_properties = PlanktonBackend._prefix_properties(metadata) snapshot_metadata.update(snapshot_properties) # Generate a name for the Archipelago mapfile. mapfile = generate_mapfile_name(volume) # Convert size from Gbytes to bytes size = volume.size << 30 with PlanktonBackend(user_id) as b: try: snapshot_id = b.register_snapshot(name=name, mapfile=mapfile, size=size, metadata=snapshot_metadata) except faults.OverLimit: msg = ("Resource limit exceeded for your account." " Not enough storage space to create snapshot of" " %s size." % units.show(size, "bytes", "gb")) raise faults.OverLimit(msg) try: job_id = backend.snapshot_instance(volume.machine, volume, snapshot_name=mapfile, snapshot_id=snapshot_id) except: # If failed to enqueue job to Ganeti, mark snapshot as ERROR b.update_snapshot_state(snapshot_id, OBJECT_ERROR) raise # Store the backend and job id as metadata in the snapshot in order # to make reconciliation based on the Ganeti job possible. backend_info = { "ganeti_job_id": job_id, "ganeti_backend_id": volume.machine.backend_id } metadata = {"backend_info": json.dumps(backend_info)} b.update_metadata(snapshot_id, metadata) snapshot = util.get_snapshot(user_id, snapshot_id) return snapshot
def do_create(user_id, volume_id, name, description, metadata, force=False, credentials=None): volume = util.get_volume(credentials, volume_id, for_update=True, non_deleted=True, exception=faults.BadRequest) _check(volume) snapshot_metadata = { "name": name, "disk_format": "diskdump", "container_format": "bare", # Snapshot specific "description": description, "volume_id": volume_id, } # Snapshots are used as images. We set the most important properties # that are being used for images. We set 'EXCLUDE_ALL_TASKS' to bypass # image customization. Also, we get some basic metadata for the volume from # the server that the volume is attached metadata.update({"exclude_all_tasks": "yes", "description": description}) if volume.index == 0: # Copy the metadata of the VM into the image properties only when the # volume is the root volume of the VM. vm_metadata = dict( volume.machine.metadata.filter( meta_key__in=["OS", "users"]).values_list( "meta_key", "meta_value")) metadata.update(vm_metadata) snapshot_properties = PlanktonBackend._prefix_properties(metadata) snapshot_metadata.update(snapshot_properties) # Generate a name for the Archipelago mapfile. mapfile = generate_mapfile_name(volume) # Convert size from Gbytes to bytes size = volume.size << 30 with PlanktonBackend(user_id) as b: try: snapshot_id = b.register_snapshot(name=name, mapfile=mapfile, size=size, metadata=snapshot_metadata) except faults.OverLimit: msg = ("Resource limit exceeded for your account." " Not enough storage space to create snapshot of" " %s size." % units.show(size, "bytes", "gb")) raise faults.OverLimit(msg) try: job_id = backend.snapshot_instance(volume.machine, volume, snapshot_name=mapfile, snapshot_id=snapshot_id) except: # If failed to enqueue job to Ganeti, mark snapshot as ERROR b.update_snapshot_state(snapshot_id, OBJECT_ERROR) raise # Store the backend and job id as metadata in the snapshot in order # to make reconciliation based on the Ganeti job possible. backend_info = { "ganeti_job_id": job_id, "ganeti_backend_id": volume.machine.backend_id } metadata = {"backend_info": json.dumps(backend_info)} b.update_metadata(snapshot_id, metadata) snapshot = util.get_snapshot(user_id, snapshot_id) return snapshot