def create_snapshot(request): """Create a new Snapshot.""" req = utils.get_json_body(request) log.debug("create_snapshot %s", req) user_id = request.user_uniq snap_dict = utils.get_attribute(req, "snapshot", required=True, attr_type=dict) volume_id = utils.get_attribute(snap_dict, "volume_id", required=True) volume = util.get_volume(user_id, volume_id, for_update=True, non_deleted=True, exception=faults.BadRequest) metadata = utils.get_attribute(snap_dict, "metadata", required=False, attr_type=dict, default={}) name = utils.get_attribute(snap_dict, "display_name", required=False, attr_type=basestring, default="Snapshot of volume '%s'" % volume_id) description = utils.get_attribute(snap_dict, "display_description", required=False, attr_type=basestring, default="") # TODO: What to do with force ? force = utils.get_attribute(req, "force", required=False, attr_type=bool, default=False) snapshot = snapshots.create(user_id=user_id, volume=volume, name=name, description=description, metadata=metadata, force=force) # Render response data = json.dumps(dict(snapshot=snapshot_to_dict(snapshot, detail=False))) return HttpResponse(data, status=202)
def update_volume(request, volume_id): req = utils.get_json_body(request) log.debug("User: %s, Volume: %s Action: update_volume, Request: %s", request.user_uniq, volume_id, req) volume = util.get_volume(request.user_uniq, request.user_projects, volume_id, for_update=True, non_deleted=True) vol_req = utils.get_attribute(req, "volume", attr_type=dict, required=True) name = utils.get_attribute(vol_req, "display_name", required=False) description = utils.get_attribute(vol_req, "display_description", required=False) delete_on_termination = utils.get_attribute(vol_req, "delete_on_termination", attr_type=bool, required=False) if name is None and description is None and\ delete_on_termination is None: raise faults.BadRequest("Nothing to update.") else: volume = volumes.update(volume, name, description, delete_on_termination) log.info("User %s updated volume %s", request.user_uniq, volume.id) data = json.dumps({'volume': volume_to_dict(volume, detail=True)}) return HttpResponse(data, content_type="application/json", status=200)
def update_volume(request, volume_id): req = utils.get_json_body(request) log.debug('update_volume volume_id: %s, request: %s', volume_id, req) volume = util.get_volume(request.user_uniq, volume_id, for_update=True, non_deleted=True) vol_req = utils.get_attribute(req, "volume", attr_type=dict, required=True) name = utils.get_attribute(vol_req, "display_name", required=False) description = utils.get_attribute(vol_req, "display_description", required=False) delete_on_termination = utils.get_attribute(vol_req, "delete_on_termination", attr_type=bool, required=False) if name is None and description is None and\ delete_on_termination is None: raise faults.BadRequest("Nothing to update.") else: volume = volumes.update(volume, name, description, delete_on_termination) data = json.dumps({'volume': volume_to_dict(volume, detail=True)}) return HttpResponse(data, content_type="application/json", status=200)
def get_volume(request, volume_id): log.debug('get_volume volume_id: %s', volume_id) volume = util.get_volume(request.user_uniq, volume_id, non_deleted=False) data = json.dumps({'volume': volume_to_dict(volume, detail=True)}) return HttpResponse(data, content_type="application/json", status=200)
def reassign_volume(volume_id, project, shared_to_project, credentials, atomic_context=None): volume = util.get_volume(credentials, volume_id, for_update=True, non_deleted=True) if not credentials.is_admin and credentials.userid != volume.userid: raise faults.Forbidden("Action 'reassign' is allowed only to the owner" " of the volume.") if volume.index == 0: raise faults.Conflict("Cannot reassign: %s is a system volume" % volume.id) server = volume.machine if server is not None: commands.validate_server_action(server, "REASSIGN") if volume.project == project: if volume.shared_to_project != shared_to_project: log.info("%s volume %s to project %s", "Sharing" if shared_to_project else "Unsharing", volume, project) volume.shared_to_project = shared_to_project volume.save() else: action_fields = {"to_project": project, "from_project": volume.project} log.info("Reassigning volume %s from project %s to %s, shared: %s", volume, volume.project, project, shared_to_project) volume.project = project volume.shared_to_project = shared_to_project volume.save() quotas.issue_and_accept_commission(volume, action="REASSIGN", action_fields=action_fields, atomic_context=atomic_context) return volume
def reassign_volume(request, volume_id, args): req = utils.get_json_body(request) log.debug("User: %s, Volume: %s Action: reassign_volume, Request: %s", request.user_uniq, volume_id, args) shared_to_project = args.get("shared_to_project", False) if shared_to_project and not settings.CYCLADES_SHARED_RESOURCES_ENABLED: raise faults.Forbidden("Sharing resource to the members of the project" " is not permitted") project = args.get("project") if project is None: raise faults.BadRequest("Missing 'project' attribute.") volume = util.get_volume(request.user_uniq, request.user_projects, volume_id, for_update=True, non_deleted=True) if request.user_uniq != volume.userid: raise faults.Forbidden("Action 'reassign' is allowed only to the owner" " of the volume.") volumes.reassign_volume(volume, project, shared_to_project) log.info("User %s reassigned volume %s to project %s, shared: %s", request.user_uniq, volume.id, project, shared_to_project) return HttpResponse(status=200)
def list_volume_metadata(request, volume_id): log.debug('list_volume_meta volume_id: %s', volume_id) volume = util.get_volume(request.user_uniq, volume_id, for_update=False, non_deleted=False) metadata = volume.metadata.values_list('key', 'value') data = json.dumps({"metadata": dict(metadata)}) return HttpResponse(data, content_type="application/json", status=200)
def detach_volume(server_id, volume_id, credentials): user_id = credentials.userid vm = util.get_vm(server_id, credentials, for_update=True, non_deleted=True) volume = get_volume(credentials, volume_id, for_update=True, non_deleted=True, exception=faults.BadRequest) server_attachments.detach_volume(vm, volume) log.info("User %s detached volume %s to VM %s", user_id, volume.id, vm.id)
def delete_volume(request, volume_id): log.debug("delete_volume volume_id: %s", volume_id) volume = util.get_volume(request.user_uniq, volume_id, for_update=True, non_deleted=True) volumes.delete(volume) return HttpResponse(status=202)
def check_and_record(volume_id, credentials): volume = util.get_volume(credentials, volume_id, for_update=True, non_deleted=True, exception=faults.BadRequest) _check(volume) # Increase the snapshot counter of the volume that is used in order to # generate unique snapshot names volume.snapshot_counter += 1 volume.save()
def list_volume_metadata(request, volume_id): volume = util.get_volume(request.user_uniq, request.user_projects, volume_id, for_update=False, non_deleted=False) metadata = volume.metadata.values_list('key', 'value') data = json.dumps({"metadata": dict(metadata)}) return HttpResponse(data, content_type="application/json", status=200)
def detach_volume(request, server_id, volume_id): log.debug("detach_volume server_id %s volume_id", server_id, volume_id) user_id = request.user_uniq vm = util.get_vm(server_id, user_id, for_update=True, non_deleted=True) volume = get_volume(user_id, volume_id, for_update=True, non_deleted=True, exception=faults.BadRequest) vm = server_attachments.detach_volume(vm, volume) # TODO: Check volume state, send job to detach volume return HttpResponse(status=202)
def attach(server_id, volume_id, credentials, atomic_context=None): """Attach a volume to a server.""" vm = util.get_vm(server_id, credentials, for_update=True, non_deleted=True) volume = util.get_volume(credentials, volume_id, for_update=True, non_deleted=True, exception=faults.BadRequest) server_attachments.attach_volume(vm, volume, atomic_context) return volume
def get_volume_info(request, server_id, volume_id): log.debug("get_volume_info server_id %s volume_id", server_id, volume_id) user_id = request.user_uniq vm = util.get_vm(server_id, user_id, for_update=False) volume = get_volume(user_id, volume_id, for_update=False, non_deleted=True, exception=faults.BadRequest) servers._check_attachment(vm, volume) attachment = volume_to_attachment(volume) data = json.dumps({'volumeAttachment': attachment}) return HttpResponse(data, status=200)
def delete_volume_metadata_item(request, volume_id, key): log.debug('delete_volume_meta_item volume_id: %s, key: %s', volume_id, key) volume = util.get_volume(request.user_uniq, volume_id, for_update=False, non_deleted=True) try: volume.metadata.get(key=key).delete() except VolumeMetadata.DoesNotExist: raise faults.BadRequest("Metadata key not found") return HttpResponse(status=200)
def reassign_volume(request, volume_id, args): req = utils.get_json_body(request) log.debug('reassign_volume volume_id: %s, request: %s', volume_id, req) project = args.get("project") if project is None: raise faults.BadRequest("Missing 'project' attribute.") volume = util.get_volume(request.user_uniq, volume_id, for_update=True, non_deleted=True) volumes.reassign_volume(volume, project) return HttpResponse(status=200)
def attach_volume(server_id, volume_id, credentials, atomic_context=None): user_id = credentials.userid vm = util.get_vm(server_id, credentials, for_update=True, non_deleted=True) volume = get_volume(credentials, volume_id, for_update=True, non_deleted=True, exception=faults.BadRequest) server_attachments.attach_volume(vm, volume, atomic_context) log.info("User %s attached volume %s to VM %s", user_id, volume.id, vm.id) return volume
def create_snapshot(request): """Create a new Snapshot.""" util.assert_snapshots_enabled(request) req = utils.get_json_body(request) user_id = request.user_uniq log.debug("User: %s, Action: create_snapshot, Request: %s", user_id, req) snap_dict = utils.get_attribute(req, "snapshot", required=True, attr_type=dict) volume_id = utils.get_attribute(snap_dict, "volume_id", required=True) volume = util.get_volume(user_id, request.user_projects, volume_id, for_update=True, non_deleted=True, exception=faults.BadRequest) metadata = utils.get_attribute(snap_dict, "metadata", required=False, attr_type=dict, default={}) name = utils.get_attribute(snap_dict, "display_name", required=False, attr_type=basestring, default="Snapshot of volume '%s'" % volume_id) description = utils.get_attribute(snap_dict, "display_description", required=False, attr_type=basestring, default="") # TODO: What to do with force ? force = utils.get_attribute(req, "force", required=False, attr_type=bool, default=False) snapshot = snapshots.create(user_id=user_id, volume=volume, name=name, description=description, metadata=metadata, force=force) log.info("User %s created snapshot %s", user_id, snapshot["id"]) # Render response data = json.dumps(dict(snapshot=snapshot_to_dict(snapshot, detail=False))) return HttpResponse(data, status=202)
def get_volume_info(request, server_id, volume_id): credentials = request.credentials vm = util.get_vm(server_id, credentials, for_update=False) volume = get_volume(credentials, volume_id, for_update=False, non_deleted=True, exception=faults.BadRequest) server_attachments._check_attachment(vm, volume) attachment = volume_to_attachment(volume) data = json.dumps({'volumeAttachment': attachment}) return HttpResponse(data, status=200)
def update_volume_metadata(request, volume_id, reset=False): req = utils.get_json_body(request) log.debug("User: %s, Volume: %s Action: update_metadata, Request: %s", request.user_uniq, volume_id, req) meta_dict = utils.get_attribute(req, "metadata", required=True, attr_type=dict) for key, value in meta_dict.items(): check_name_length(key, VolumeMetadata.KEY_LENGTH, "Metadata key is too long.") check_name_length(value, VolumeMetadata.VALUE_LENGTH, "Metadata value is too long.") volume = util.get_volume(request.user_uniq, request.user_projects, volume_id, for_update=True, non_deleted=True) if reset: if len(meta_dict) > settings.CYCLADES_VOLUME_MAX_METADATA: raise faults.BadRequest("Volumes cannot have more than %s metadata" " items" % settings.CYCLADES_VOLUME_MAX_METADATA) volume.metadata.all().delete() for key, value in meta_dict.items(): volume.metadata.create(key=key, value=value) else: if len(meta_dict) + len(volume.metadata.all()) - \ len(volume.metadata.all().filter(key__in=meta_dict.keys())) > \ settings.CYCLADES_VOLUME_MAX_METADATA: raise faults.BadRequest("Volumes cannot have more than %s metadata" " items" % settings.CYCLADES_VOLUME_MAX_METADATA) for key, value in meta_dict.items(): try: # Update existing metadata meta = volume.metadata.get(key=key) meta.value = value meta.save() except VolumeMetadata.DoesNotExist: # Or create a new one volume.metadata.create(key=key, value=value) log.info("User %s updated metadata for volume %s", request.user_uniq, volume.id) metadata = volume.metadata.values_list('key', 'value') data = json.dumps({"metadata": dict(metadata)}) return HttpResponse(data, content_type="application/json", status=200)
def detach(volume_id, credentials, atomic_context=None): """Detach a Volume""" volume = util.get_volume(credentials, volume_id, for_update=False, non_deleted=True, exception=faults.BadRequest) server_id = volume.machine_id if server_id is None: raise faults.BadRequest("Volume is already detached") vm = util.get_vm(server_id, credentials, for_update=True, non_deleted=True) volume = util.get_volume(credentials, volume_id, for_update=True, non_deleted=True, exception=faults.BadRequest) server_id = volume.machine_id if server_id is None: raise faults.BadRequest("Volume is already detached") server_attachments.detach_volume(vm, volume) log.info("Detaching volume '%s' from server '%s', job: %s", volume.id, server_id, volume.backendjobid) return volume
def delete_volume(request, volume_id): log.debug("User: %s, Volume: %s Action: delete_volume", request.user_uniq, volume_id) volume = util.get_volume(request.user_uniq, request.user_projects, volume_id, for_update=True, non_deleted=True) volumes.delete(volume) log.info("User %s deleted volume %s", request.user_uniq, volume.id) return HttpResponse(status=202)
def detach_volume(request, server_id, volume_id): log.debug("User %s, VM: %s, Action: detach_volume, Volume: %s", request.user_uniq, server_id, volume_id) user_id = request.user_uniq vm = util.get_vm(server_id, user_id, request.user_projects, for_update=True, non_deleted=True) volume = get_volume(user_id, request.user_projects, volume_id, for_update=True, non_deleted=True, exception=faults.BadRequest) vm = server_attachments.detach_volume(vm, volume) log.info("User %s detached volume %s to VM %s", user_id, volume.id, vm.id) # TODO: Check volume state, send job to detach volume return HttpResponse(status=202)
def update_volume_metadata(request, volume_id, reset=False): credentials = request.credentials req = utils.get_json_body(request) log.debug("User: %s, Volume: %s Action: update_metadata, Request: %s", credentials.userid, volume_id, req) meta_dict = utils.get_attribute(req, "metadata", required=True, attr_type=dict) for key, value in meta_dict.items(): check_name_length(key, VolumeMetadata.KEY_LENGTH, "Metadata key is too long.") check_name_length(value, VolumeMetadata.VALUE_LENGTH, "Metadata value is too long.") volume = util.get_volume(request.credentials, volume_id, for_update=True, non_deleted=True) if reset: if len(meta_dict) > settings.CYCLADES_VOLUME_MAX_METADATA: raise faults.BadRequest("Volumes cannot have more than %s metadata" " items" % settings.CYCLADES_VOLUME_MAX_METADATA) volume.metadata.all().delete() for key, value in meta_dict.items(): volume.metadata.create(key=key, value=value) else: if len(meta_dict) + len(volume.metadata.all()) - \ len(volume.metadata.all().filter(key__in=meta_dict.keys())) > \ settings.CYCLADES_VOLUME_MAX_METADATA: raise faults.BadRequest("Volumes cannot have more than %s metadata" " items" % settings.CYCLADES_VOLUME_MAX_METADATA) for key, value in meta_dict.items(): try: # Update existing metadata meta = volume.metadata.get(key=key) meta.value = value meta.save() except VolumeMetadata.DoesNotExist: # Or create a new one volume.metadata.create(key=key, value=value) log.info("User %s updated metadata for volume %s", credentials.userid, volume.id) metadata = volume.metadata.values_list('key', 'value') data = json.dumps({"metadata": dict(metadata)}) return HttpResponse(data, content_type="application/json", status=200)
def delete_volume_metadata_item(request, volume_id, key): credentials = request.credentials log.debug("User: %s, Volume: %s Action: delete_metadata, Key: %s", credentials.userid, volume_id, key) volume = util.get_volume(request.credentials, volume_id, for_update=False, non_deleted=True) try: volume.metadata.get(key=key).delete() except VolumeMetadata.DoesNotExist: raise faults.BadRequest("Metadata key not found") log.info("User %s deleted metadata for volume %s", credentials.userid, volume.id) return HttpResponse(status=200)
def attach_volume(request, server_id): req = utils.get_json_body(request) log.debug("attach_volume server_id %s request", server_id, req) user_id = request.user_uniq vm = util.get_vm(server_id, user_id, for_update=True, non_deleted=True) attachment_dict = api.utils.get_attribute(req, "volumeAttachment", required=True) # Get volume volume_id = api.utils.get_attribute(attachment_dict, "volumeId") volume = get_volume(user_id, volume_id, for_update=True, non_deleted=True, exception=faults.BadRequest) vm = server_attachments.attach_volume(vm, volume) attachment = volume_to_attachment(volume) data = json.dumps({'volumeAttachment': attachment}) return HttpResponse(data, status=202)
def update(volume_id, name=None, description=None, delete_on_termination=None, credentials=None): volume = util.get_volume(credentials, volume_id, for_update=True, non_deleted=True) if name is not None: utils.check_name_length(name, Volume.NAME_LENGTH, "Volume name is too long") volume.name = name if description is not None: utils.check_name_length(description, Volume.DESCRIPTION_LENGTH, "Volume description is too long") volume.description = description if delete_on_termination is not None: validate_volume_termination(volume.volume_type, delete_on_termination) volume.delete_on_termination = delete_on_termination volume.save() return volume
def delete(volume_id, credentials, atomic_context=None): """Delete a Volume. The canonical way of deleting a volume is to send a command to Ganeti to remove the volume from a specific server. There are two cases however when a volume may not be attached to a server: * Case 1: The volume has been created only in DB and was never attached to a server. In this case, we can simply mark the volume as deleted without using Ganeti to do so. * Case 2: The volume has been detached from a VM. This means that there are still data in the storage backend. Thus, in order to delete the volume safely, we must attach it to a helper VM, thereby handing the delete action to the dispatcher. """ volume = util.get_volume(credentials, volume_id, for_update=True, non_deleted=True) server_id = volume.machine_id if server_id is not None: server = get_vm(server_id) server_attachments.delete_volume(server, volume, atomic_context) log.info("Deleting volume '%s' from server '%s', job: %s", volume.id, server_id, volume.backendjobid) elif volume.backendjobid is None: # Case 1: Uninitialized volume if volume.status not in ("AVAILABLE", "ERROR"): raise faults.BadRequest("Volume is in invalid state: %s" % volume.status) log.debug("Attempting to delete uninitialized volume %s.", volume) util.mark_volume_as_deleted(volume, immediate=True) quotas.issue_and_accept_commission(volume, action="DESTROY", atomic_context=atomic_context) log.info("Deleting uninitialized volume '%s'", volume.id) else: # Case 2: Detached volume log.debug("Attempting to delete detached volume %s", volume) delete_detached_volume(volume, atomic_context) log.info("Deleting volume '%s' from helper server '%s', job: %s", volume.id, volume.machine.id, volume.backendjobid) return volume
def attach_volume(request, server_id): req = utils.get_json_body(request) user_id = request.user_uniq log.debug("User %s, VM: %s, Action: attach_volume, Request: %s", request.user_uniq, server_id, req) vm = util.get_vm(server_id, user_id, request.user_projects, for_update=True, non_deleted=True) attachment_dict = api.utils.get_attribute(req, "volumeAttachment", required=True) # Get volume volume_id = api.utils.get_attribute(attachment_dict, "volumeId") volume = get_volume(user_id, request.user_projects, volume_id, for_update=True, non_deleted=True, exception=faults.BadRequest) vm = server_attachments.attach_volume(vm, volume) attachment = volume_to_attachment(volume) data = json.dumps({'volumeAttachment': attachment}) log.info("User %s attached volume %s to VM %s", user_id, volume.id, vm.id) return HttpResponse(data, status=202)
def get_volume(request, volume_id): volume = util.get_volume(request.credentials, volume_id, non_deleted=False) data = json.dumps({'volume': volume_to_dict(volume, detail=True)}) return HttpResponse(data, content_type="application/json", status=200)
def create(userid, name, password, flavor, image_id, metadata={}, personality=[], networks=None, use_backend=None, project=None, volumes=None, helper=False, user_projects=None, shared_to_project=False): utils.check_name_length(name, VirtualMachine.VIRTUAL_MACHINE_NAME_LENGTH, "Server name is too long") # Get the image, if any, that is used for the first volume vol_image_id = None if volumes: vol = volumes[0] if vol["source_type"] in ["image", "snapshot"]: vol_image_id = vol["source_uuid"] # Check conflict between server's and volume's image if image_id and vol_image_id and image_id != vol_image_id: raise faults.BadRequest("The specified server's image is different" " from the the source of the first volume.") elif vol_image_id and not image_id: image_id = vol_image_id elif not image_id: raise faults.BadRequest("You need to specify either an image or a" " block device mapping.") if len(metadata) > settings.CYCLADES_VM_MAX_METADATA: raise faults.BadRequest("Virtual Machines cannot have more than %s " "metadata items" % settings.CYCLADES_VM_MAX_METADATA) # Get image info image = util.get_image_dict(image_id, userid) if not volumes: # If no volumes are specified, we automatically create a volume with # the size of the flavor and filled with the specified image. volumes = [{"source_type": "image", "source_uuid": image_id, "size": flavor.disk, "delete_on_termination": True}] assert(len(volumes) > 0), "Cannot create server without volumes" if volumes[0]["source_type"] == "blank": raise faults.BadRequest("Root volume cannot be blank") try: is_system = (image["owner"] == settings.SYSTEM_IMAGES_OWNER) img, created = Image.objects.get_or_create(uuid=image["id"], version=image["version"]) if created: img.owner = image["owner"] img.name = image["name"] img.location = image["location"] img.mapfile = image["mapfile"] img.is_public = image["is_public"] img.is_snapshot = image["is_snapshot"] img.is_system = is_system img.os = image["metadata"].get("OS", "unknown") img.osfamily = image["metadata"].get("OSFAMILY", "unknown") img.save() except Exception as e: # Image info is not critical. Continue if it fails for any reason log.warning("Failed to store image info: %s", e) if use_backend is None: # Allocate server to a Ganeti backend use_backend = allocate_new_server(userid, flavor) # Create the ports for the server ports = create_instance_ports(userid, user_projects, networks) if project is None: project = userid # We must save the VM instance now, so that it gets a valid # vm.backend_vm_id. vm = VirtualMachine.objects.create(name=name, backend=use_backend, userid=userid, project=project, shared_to_project=shared_to_project, imageid=image["id"], image_version=image["version"], flavor=flavor, operstate="BUILD", helper=helper) log.info("Created entry in DB for VM '%s'", vm) # Associate the ports with the server for index, port in enumerate(ports): associate_port_with_machine(port, vm) port.index = index port.save() # Create instance volumes server_vtype = flavor.volume_type server_volumes = [] for index, vol_info in enumerate(volumes): if vol_info["source_type"] == "volume": uuid = vol_info["source_uuid"] v = get_volume(userid, user_projects, uuid, for_update=True, non_deleted=True, exception=faults.BadRequest) if v.volume_type_id != server_vtype.id: msg = ("Volume '%s' has type '%s' while flavor's volume type" " is '%s'" % (v.id, v.volume_type_id, server_vtype.id)) raise faults.BadRequest(msg) if v.status != "AVAILABLE": raise faults.BadRequest("Cannot use volume while it is in %s" " status" % v.status) v.delete_on_termination = vol_info["delete_on_termination"] else: v = _create_volume(user_id=userid, volume_type=server_vtype, project=project, index=index, shared_to_project=shared_to_project, **vol_info) assign_volume_to_server(vm, v, index=index) server_volumes.append(v) # Create instance metadata for key, val in metadata.items(): utils.check_name_length(key, VirtualMachineMetadata.KEY_LENGTH, "Metadata key is too long") utils.check_name_length(val, VirtualMachineMetadata.VALUE_LENGTH, "Metadata value is too long") VirtualMachineMetadata.objects.create( meta_key=key, meta_value=val, vm=vm) # Create the server in Ganeti. vm = create_server(vm, ports, server_volumes, flavor, image, personality, password) return vm
def do_create(user_id, volume_id, name, description, metadata, force=False, credentials=None): volume = util.get_volume(credentials, volume_id, for_update=True, non_deleted=True, exception=faults.BadRequest) _check(volume) snapshot_metadata = { "name": name, "disk_format": "diskdump", "container_format": "bare", # Snapshot specific "description": description, "volume_id": volume_id, } # Snapshots are used as images. We set the most important properties # that are being used for images. We set 'EXCLUDE_ALL_TASKS' to bypass # image customization. Also, we get some basic metadata for the volume from # the server that the volume is attached metadata.update({"exclude_all_tasks": "yes", "description": description}) if volume.index == 0: # Copy the metadata of the VM into the image properties only when the # volume is the root volume of the VM. vm_metadata = dict(volume.machine.metadata .filter(meta_key__in=["OS", "users"]) .values_list("meta_key", "meta_value")) metadata.update(vm_metadata) snapshot_properties = PlanktonBackend._prefix_properties(metadata) snapshot_metadata.update(snapshot_properties) # Generate a name for the Archipelago mapfile. mapfile = generate_mapfile_name(volume) # Convert size from Gbytes to bytes size = volume.size << 30 with PlanktonBackend(user_id) as b: try: snapshot_id = b.register_snapshot(name=name, mapfile=mapfile, size=size, metadata=snapshot_metadata) except faults.OverLimit: msg = ("Resource limit exceeded for your account." " Not enough storage space to create snapshot of" " %s size." % units.show(size, "bytes", "gb")) raise faults.OverLimit(msg) try: job_id = backend.snapshot_instance(volume.machine, volume, snapshot_name=mapfile, snapshot_id=snapshot_id) except: # If failed to enqueue job to Ganeti, mark snapshot as ERROR b.update_snapshot_state(snapshot_id, OBJECT_ERROR) raise # Store the backend and job id as metadata in the snapshot in order # to make reconciliation based on the Ganeti job possible. backend_info = { "ganeti_job_id": job_id, "ganeti_backend_id": volume.machine.backend_id } metadata = {"backend_info": json.dumps(backend_info)} b.update_metadata(snapshot_id, metadata) snapshot = util.get_snapshot(user_id, snapshot_id) return snapshot
def do_create(user_id, volume_id, name, description, metadata, force=False, credentials=None): volume = util.get_volume(credentials, volume_id, for_update=True, non_deleted=True, exception=faults.BadRequest) _check(volume) snapshot_metadata = { "name": name, "disk_format": "diskdump", "container_format": "bare", # Snapshot specific "description": description, "volume_id": volume_id, } # Snapshots are used as images. We set the most important properties # that are being used for images. We set 'EXCLUDE_ALL_TASKS' to bypass # image customization. Also, we get some basic metadata for the volume from # the server that the volume is attached metadata.update({"exclude_all_tasks": "yes", "description": description}) if volume.index == 0: # Copy the metadata of the VM into the image properties only when the # volume is the root volume of the VM. vm_metadata = dict( volume.machine.metadata.filter( meta_key__in=["OS", "users"]).values_list( "meta_key", "meta_value")) metadata.update(vm_metadata) snapshot_properties = PlanktonBackend._prefix_properties(metadata) snapshot_metadata.update(snapshot_properties) # Generate a name for the Archipelago mapfile. mapfile = generate_mapfile_name(volume) # Convert size from Gbytes to bytes size = volume.size << 30 with PlanktonBackend(user_id) as b: try: snapshot_id = b.register_snapshot(name=name, mapfile=mapfile, size=size, metadata=snapshot_metadata) except faults.OverLimit: msg = ("Resource limit exceeded for your account." " Not enough storage space to create snapshot of" " %s size." % units.show(size, "bytes", "gb")) raise faults.OverLimit(msg) try: job_id = backend.snapshot_instance(volume.machine, volume, snapshot_name=mapfile, snapshot_id=snapshot_id) except: # If failed to enqueue job to Ganeti, mark snapshot as ERROR b.update_snapshot_state(snapshot_id, OBJECT_ERROR) raise # Store the backend and job id as metadata in the snapshot in order # to make reconciliation based on the Ganeti job possible. backend_info = { "ganeti_job_id": job_id, "ganeti_backend_id": volume.machine.backend_id } metadata = {"backend_info": json.dumps(backend_info)} b.update_metadata(snapshot_id, metadata) snapshot = util.get_snapshot(user_id, snapshot_id) return snapshot
def _db_create_server( credentials, name, flavor, image, metadata, networks, use_backend, project, volumes, helper, shared_to_project, key_names, atomic_context=None): rescue_properties = RescueProperties() try: rescue_properties.os = image["metadata"].get("OSFAMILY", '') rescue_properties.os_family = image["metadata"].get("OS", '') except KeyError as e: log.error("Failed to parse iamge info: %s", e) rescue_properties.save() # Create the ports for the server ports = create_instance_ports(credentials, networks) # We must save the VM instance now, so that it gets a valid # vm.backend_vm_id. vm = VirtualMachine.objects.create(name=name, backend=use_backend, userid=credentials.userid, project=project, shared_to_project=shared_to_project, imageid=image["id"], image_version=image["version"], key_names=json.dumps(key_names), flavor=flavor, operstate="BUILD", rescue_properties=rescue_properties, helper=helper) log.info("Created entry in DB for VM '%s'", vm) # Associate the ports with the server for index, port in enumerate(ports): associate_port_with_machine(port, vm) port.index = index port.save() # Create instance volumes server_vtype = flavor.volume_type server_volumes = [] for index, vol_info in enumerate(volumes): if vol_info["source_type"] == "volume": uuid = vol_info["source_uuid"] v = get_volume(credentials, uuid, for_update=True, non_deleted=True, exception=faults.BadRequest) if v.volume_type_id != server_vtype.id: msg = ("Volume '%s' has type '%s' while flavor's volume type" " is '%s'" % (v.id, v.volume_type_id, server_vtype.id)) raise faults.BadRequest(msg) if v.status != "AVAILABLE": raise faults.BadRequest("Cannot use volume while it is in %s" " status" % v.status) v.delete_on_termination = vol_info["delete_on_termination"] else: v = _create_volume(user_id=credentials.userid, volume_type=server_vtype, project=project, index=index, shared_to_project=shared_to_project, **vol_info) assign_volume_to_server(vm, v, index=index) server_volumes.append(v) # Create instance metadata for key, val in metadata.items(): utils.check_name_length(key, VirtualMachineMetadata.KEY_LENGTH, "Metadata key is too long") utils.check_name_length(val, VirtualMachineMetadata.VALUE_LENGTH, "Metadata value is too long") VirtualMachineMetadata.objects.create( meta_key=key, meta_value=val, vm=vm) quotas.issue_and_accept_commission(vm, action="BUILD", atomic_context=atomic_context) return (vm.id, [port.id for port in ports], [volume.id for volume in server_volumes], {v.id: v.origin_size for v in server_volumes})
def create(userid, name, password, flavor, image_id, metadata={}, personality=[], networks=None, use_backend=None, project=None, volumes=None): utils.check_name_length(name, VirtualMachine.VIRTUAL_MACHINE_NAME_LENGTH, "Server name is too long") # Get the image, if any, that is used for the first volume vol_image_id = None if volumes: vol = volumes[0] if vol["source_type"] in ["image", "snapshot"]: vol_image_id = vol["source_uuid"] # Check conflict between server's and volume's image if image_id and vol_image_id and image_id != vol_image_id: raise faults.BadRequest("The specified server's image is different" " from the the source of the first volume.") elif vol_image_id and not image_id: image_id = vol_image_id elif not image_id: raise faults.BadRequest("You need to specify either an image or a" " block device mapping.") if len(metadata) > settings.CYCLADES_VM_MAX_METADATA: raise faults.BadRequest("Virtual Machines cannot have more than %s " "metadata items" % settings.CYCLADES_VM_MAX_METADATA) # Get image info image = util.get_image_dict(image_id, userid) if not volumes: # If no volumes are specified, we automatically create a volume with # the size of the flavor and filled with the specified image. volumes = [{ "source_type": "image", "source_uuid": image_id, "size": flavor.disk, "delete_on_termination": True }] assert (len(volumes) > 0), "Cannot create server without volumes" if volumes[0]["source_type"] == "blank": raise faults.BadRequest("Root volume cannot be blank") try: is_system = (image["owner"] == settings.SYSTEM_IMAGES_OWNER) img, created = Image.objects.get_or_create(uuid=image["id"], version=image["version"]) if created: img.owner = image["owner"] img.name = image["name"] img.location = image["location"] img.mapfile = image["mapfile"] img.is_public = image["is_public"] img.is_snapshot = image["is_snapshot"] img.is_system = is_system img.os = image["metadata"].get("OS", "unknown") img.osfamily = image["metadata"].get("OSFAMILY", "unknown") img.save() except Exception as e: # Image info is not critical. Continue if it fails for any reason log.warning("Failed to store image info: %s", e) if use_backend is None: # Allocate server to a Ganeti backend use_backend = allocate_new_server(userid, flavor) # Create the ports for the server ports = create_instance_ports(userid, networks) if project is None: project = userid # We must save the VM instance now, so that it gets a valid # vm.backend_vm_id. vm = VirtualMachine.objects.create(name=name, backend=use_backend, userid=userid, project=project, imageid=image["id"], image_version=image["version"], flavor=flavor, operstate="BUILD") log.info("Created entry in DB for VM '%s'", vm) # Associate the ports with the server for index, port in enumerate(ports): associate_port_with_machine(port, vm) port.index = index port.save() # Create instance volumes server_vtype = flavor.volume_type server_volumes = [] for index, vol_info in enumerate(volumes): if vol_info["source_type"] == "volume": uuid = vol_info["source_uuid"] v = get_volume(userid, uuid, for_update=True, non_deleted=True, exception=faults.BadRequest) if v.volume_type_id != server_vtype.id: msg = ("Volume '%s' has type '%s' while flavor's volume type" " is '%s'" % (v.id, v.volume_type_id, server_vtype.id)) raise faults.BadRequest(msg) if v.status != "AVAILABLE": raise faults.BadRequest("Cannot use volume while it is in %s" " status" % v.status) v.delete_on_termination = vol_info["delete_on_termination"] v.machine = vm v.index = index v.save() else: v = _create_volume(server=vm, user_id=userid, volume_type=server_vtype, project=project, index=index, **vol_info) server_volumes.append(v) # Create instance metadata for key, val in metadata.items(): utils.check_name_length(key, VirtualMachineMetadata.KEY_LENGTH, "Metadata key is too long") utils.check_name_length(val, VirtualMachineMetadata.VALUE_LENGTH, "Metadata value is too long") VirtualMachineMetadata.objects.create(meta_key=key, meta_value=val, vm=vm) # Create the server in Ganeti. vm = create_server(vm, ports, server_volumes, flavor, image, personality, password) return vm
def _db_create_server(credentials, name, flavor, image, metadata, networks, use_backend, project, volumes, helper, shared_to_project, key_names, atomic_context=None): rescue_properties = RescueProperties() try: rescue_properties.os = image["metadata"].get("OSFAMILY", '') rescue_properties.os_family = image["metadata"].get("OS", '') except KeyError as e: log.error("Failed to parse iamge info: %s", e) rescue_properties.save() # Create the ports for the server ports = create_instance_ports(credentials, networks) # We must save the VM instance now, so that it gets a valid # vm.backend_vm_id. vm = VirtualMachine.objects.create(name=name, backend=use_backend, userid=credentials.userid, project=project, shared_to_project=shared_to_project, imageid=image["id"], image_version=image["version"], key_names=json.dumps(key_names), flavor=flavor, operstate="BUILD", rescue_properties=rescue_properties, helper=helper) log.info("Created entry in DB for VM '%s'", vm) # Associate the ports with the server for index, port in enumerate(ports): associate_port_with_machine(port, vm) port.index = index port.save() # Create instance volumes server_vtype = flavor.volume_type server_volumes = [] for index, vol_info in enumerate(volumes): if vol_info["source_type"] == "volume": uuid = vol_info["source_uuid"] v = get_volume(credentials, uuid, for_update=True, non_deleted=True, exception=faults.BadRequest) if v.volume_type_id != server_vtype.id: msg = ("Volume '%s' has type '%s' while flavor's volume type" " is '%s'" % (v.id, v.volume_type_id, server_vtype.id)) raise faults.BadRequest(msg) if v.status != "AVAILABLE": raise faults.BadRequest("Cannot use volume while it is in %s" " status" % v.status) v.delete_on_termination = vol_info["delete_on_termination"] else: v = _create_volume(user_id=credentials.userid, volume_type=server_vtype, project=project, index=index, shared_to_project=shared_to_project, **vol_info) assign_volume_to_server(vm, v, index=index) server_volumes.append(v) # Create instance metadata for key, val in metadata.items(): utils.check_name_length(key, VirtualMachineMetadata.KEY_LENGTH, "Metadata key is too long") utils.check_name_length(val, VirtualMachineMetadata.VALUE_LENGTH, "Metadata value is too long") VirtualMachineMetadata.objects.create(meta_key=key, meta_value=val, vm=vm) quotas.issue_and_accept_commission(vm, action="BUILD", atomic_context=atomic_context) return (vm.id, [port.id for port in ports ], [volume.id for volume in server_volumes], {v.id: v.origin_size for v in server_volumes})