def detach_volume(vm, volume): """Detach a Volume from a VM The volume must be in 'IN_USE' status in order to be detached. Also, the root volume of the instance (index=0) can not be detached. This function will send the corresponding job to Ganeti backend and update the status of the volume to 'DETACHING'. """ util.assert_detachable_volume_type(volume.volume_type) _check_attachment(vm, volume) if volume.status not in ["IN_USE", "ERROR"]: raise faults.BadRequest("Cannot detach volume while volume is in" " '%s' status." % volume.status) if volume.index == 0: raise faults.BadRequest("Cannot detach the root volume of server %s." % vm) with commands.ServerCommand("DETACH_VOLUME", vm): jobid = backend.detach_volume(vm, volume) vm.record_job(jobid) log.info("Detached volume '%s' from server '%s'. JobID: '%s'", volume.id, volume.machine_id, jobid) volume.backendjobid = jobid volume.status = "DETACHING" volume.save()
def delete_volume(vm, volume, atomic_context): """Delete attached volume and update its status The volume must be in 'IN_USE' status in order to be deleted. This function will send the corresponding job to Ganeti backend and update the status of the volume to 'DELETING'. """ _check_attachment(vm, volume) if volume.status not in ["IN_USE", "ERROR"]: raise faults.BadRequest("Cannot delete volume while volume is in" " '%s' status." % volume.status) if volume.index == 0: raise faults.BadRequest("Cannot delete the root volume of server %s." % vm) action_fields = {"disks": [("remove", volume, {})]} with commands.ServerCommand("DELETE_VOLUME", vm, atomic_context=atomic_context, action_fields=action_fields, for_user=volume.userid): jobid = backend.delete_volume(vm, volume) vm.record_job(jobid) log.info("Deleted volume '%s' from server '%s'. JobID: '%s'", volume.id, volume.machine_id, jobid) volume.backendjobid = jobid util.mark_volume_as_deleted(volume)
def unrescue(server_id, credentials=None): with commands.ServerCommand("UNRESCUE", server_id, credentials=credentials) as vm: log.info("Unrescuing VM %s", vm) job_id = backend.unrescue_instance(vm) vm.record_job(job_id) return vm
def resize(server_id, flavor_id, credentials=None, atomic_context=None): vm = util.get_vm(server_id, credentials, for_update=True, non_deleted=True, non_suspended=True) flavor = util.get_flavor(flavor_id, credentials, include_deleted=False, for_project=vm.project) action_fields = {"beparams": {"vcpus": flavor.cpu, "maxmem": flavor.ram}} with commands.ServerCommand("RESIZE", server_id, credentials, atomic_context, action_fields=action_fields) as vm: old_flavor = vm.flavor # User requested the same flavor if old_flavor.id == flavor.id: raise faults.BadRequest("Server '%s' flavor is already '%s'." % (vm, flavor)) # Check that resize can be performed if old_flavor.disk != flavor.disk: raise faults.BadRequest("Cannot change instance's disk size.") if old_flavor.volume_type_id != flavor.volume_type_id: raise faults.BadRequest("Cannot change instance's volume type.") log.info("Resizing VM from flavor '%s' to '%s", old_flavor, flavor) job_id = backend.resize_instance(vm, vcpus=flavor.cpu, memory=flavor.ram) vm.record_job(job_id) return vm
def start(server_id, credentials, atomic_context=None): with commands.ServerCommand("START", server_id, credentials, atomic_context) as vm: log.info("Starting VM %s", vm) job_id = backend.startup_instance(vm) vm.record_job(job_id) return vm
def connect_port(vm, network, port): with commands.ServerCommand("CONNECT", vm): associate_port_with_machine(port, vm) log.info("Creating NIC %s with IPv4 Address %s", port, port.ipv4_address) job_id = backend.connect_to_network(vm, port) vm.record_job(job_id) return vm
def unsuspend(server_id, credentials=None, atomic_context=None): if not credentials.is_admin: raise faults.Forbidden("Cannot unsuspend vm.") with commands.ServerCommand("UNSUSPEND", server_id, credentials, atomic_context) as vm: vm.suspended = False vm.save() log.info("Unsuspended %s", vm) return vm
def stop(server_id, shutdown_timeout=None, credentials=None, atomic_context=None): with commands.ServerCommand("STOP", server_id, credentials, atomic_context) as vm: log.info("Stopping VM %s", vm) job_id = backend.shutdown_instance(vm, shutdown_timeout=shutdown_timeout) vm.record_job(job_id) return vm
def attach_volume(vm, volume, atomic_context): """Attach a volume to a server. The volume must be in 'AVAILABLE' status in order to be attached. Also, number of the volumes that are attached to the server must remain less than 'GANETI_MAX_DISKS_PER_INSTANCE' setting. This function will send the corresponding job to Ganeti backend and update the status of the volume to 'ATTACHING'. """ # Check volume state if volume.status not in ["AVAILABLE", "CREATING"]: raise faults.BadRequest("Cannot attach volume while volume is in" " '%s' status." % volume.status) elif volume.status == "AVAILABLE": util.assert_detachable_volume_type(volume.volume_type) # Check that disk templates are the same if volume.volume_type.template != vm.flavor.volume_type.template: msg = ("Volume and server must have the same volume template. Volume" " has volume template'%s' while server has '%s'" % (volume.volume_type.template, vm.flavor.volume_type.template)) raise faults.BadRequest(msg) # Check maximum disk per instance hard limit vm_volumes_num = vm.volumes.filter(deleted=False).count() if vm_volumes_num == settings.GANETI_MAX_DISKS_PER_INSTANCE: raise faults.BadRequest("Maximum volumes per server limit reached") if volume.status == "CREATING": action_fields = {"disks": [("add", volume, {})]} else: action_fields = None with commands.ServerCommand("ATTACH_VOLUME", vm, atomic_context=atomic_context, action_fields=action_fields): util.assign_volume_to_server(vm, volume) jobid = backend.attach_volume(vm, volume) vm.record_job(jobid) log.info("Attached volume '%s' to server '%s'. JobID: '%s'", volume.id, volume.machine_id, jobid) volume.backendjobid = jobid volume.machine = vm if volume.status == "AVAILABLE": volume.status = "ATTACHING" else: volume.status = "CREATING" volume.save()
def set_firewall_profile(server_id, profile, nic_id, credentials=None, atomic_context=None): with commands.ServerCommand("SET_FIREWALL_PROFILE", server_id, credentials, atomic_context) as vm: nic = util.get_vm_nic(vm, nic_id) log.info("Setting VM %s, NIC %s, firewall %s", vm, nic, profile) if profile not in [x[0] for x in NetworkInterface.FIREWALL_PROFILES]: raise faults.BadRequest("Unsupported firewall profile") backend.set_firewall_profile(vm, profile=profile, nic=nic) return vm
def reboot(server_id, reboot_type, shutdown_timeout=None, credentials=None, atomic_context=None): with commands.ServerCommand("REBOOT", server_id, credentials, atomic_context) as vm: if reboot_type not in ("SOFT", "HARD"): raise faults.BadRequest("Malformed request. Invalid reboot" " type %s" % reboot_type) log.info("Rebooting VM %s. Type %s", vm, reboot_type) job_id = backend.reboot_instance(vm, reboot_type.lower(), shutdown_timeout=shutdown_timeout) vm.record_job(job_id) return vm
def destroy(server_id, shutdown_timeout=None, credentials=None, atomic_context=None): with commands.ServerCommand("DESTROY", server_id, credentials, atomic_context) as vm: # XXX: Workaround for race where OP_INSTANCE_REMOVE starts executing on # Ganeti before OP_INSTANCE_CREATE. This will be fixed when # OP_INSTANCE_REMOVE supports the 'depends' request attribute. if (vm.backendopcode == "OP_INSTANCE_CREATE" and vm.backendjobstatus not in rapi.JOB_STATUS_FINALIZED and backend.job_is_still_running(vm) and not backend.vm_exists_in_backend(vm)): raise faults.BuildInProgress("Server is being build") log.info("Deleting VM %s", vm) job_id = backend.delete_instance(vm, shutdown_timeout=shutdown_timeout) vm.record_job(job_id) return vm
def rescue(server_id, rescue_image_ref=None, credentials=None): with commands.ServerCommand("RESCUE", server_id, credentials=credentials) as vm: if rescue_image_ref is None: # If the user does not provide an image, the system should decide # one based on the VM rescue properties rescue_properties = vm.rescue_properties rescue_image = util.get_rescue_image(properties=rescue_properties) else: rescue_image = util.get_rescue_image(image_id=rescue_image_ref) # Rescue image field acts a 'RESCUING' state and should be assigned # when a rescue action is issued location = rescue_image.location if rescue_image.location_type == RescueImage.FILETYPE_FILE: location = path.join(settings.RESCUE_IMAGE_PATH, location) vm.rescue_image = rescue_image log.info("Rescuing VM %s with image %s", vm, location) job_id = backend.rescue_instance(vm, location) vm.record_job(job_id) return vm
def disconnect_port(vm, nic): with commands.ServerCommand("DISCONNECT", vm): log.info("Removing NIC %s from VM %s", nic, vm) job_id = backend.disconnect_from_network(vm, nic) vm.record_job(job_id) return vm