def delete(self): self._check_bkp(lost_ok=True) self._check_bkp_node() bkp = self.bkp vm = bkp.vm # Can be None, because this can be a backup for already deleted VM self.msg = LOG_BKP_DELETE if vm: self._check_vm(vm) self.obj = vm else: self.obj = bkp.node if bkp.status == Backup.LOST: bkp.delete() res = {'message': 'Backup successfully deleted from DB'} return SuccessTaskResponse(self.request, res, msg=LOG_BKP_DELETE, vm=vm, detail=self._detail()) if self.execute(get_backup_cmd('delete', bkp), lock=self.LOCK % (bkp.vm_uuid, bkp.disk_id)): bkp.save_status(bkp.PENDING) return self.task_response return self.error_response
def delete(self): """Delete multiple backups""" # TODO: not documented :( bkp_filter = filter_disk_id(None, self.bkp_filter, self.data, default=1) # vm_disk_id instead of disk_id bkps, __ = get_backups(self.request, bkp_filter, self.data) # Parse data['bkpnames'] bkps_lost = bkps.filter(status=Backup.LOST) bkp = bkps[0] vm = bkp.vm self.bkp = bkp self.disk_id = bkp.array_disk_id if vm: self._check_vm(vm) obj = vm else: obj = bkp.node if bkps_lost: self._check_bkp_node(bkp.node) _result = {'message': 'Backups successfully deleted from DB'} _detail = self._detail(bkpnames=[i.name for i in bkps_lost]) bkps_lost.delete() res = SuccessTaskResponse(self.request, _result, msg=LOG_BKPS_DELETE, obj=obj, detail=_detail) bkps = bkps.filter( status=Backup.OK) # Work with OK backups from now on if not bkps: return res bkpnames = [i.name for i in bkps] self.bkps = bkps self.bkpnames = bkpnames self._check_bkp() self._check_bkp_node(bkp.node) self.msg = LOG_BKPS_DELETE self.obj = obj if self.execute(get_backup_cmd('delete', bkp, bkps=bkps), lock=self.LOCK % (bkp.vm_uuid, bkp.disk_id)): bkps.update(status=Backup.PENDING) return self.task_response return self.error_response
def post(self): bkp, vm, define = self.bkp, self.vm, self.define bkp.disk_id = self.real_disk_id bkp.dc = vm.dc bkp.vm = vm bkp.json = vm.json_active bkp.define = define bkp.node = define.node bkp.zpool = define.zpool bkp.type = define.type bkp.status = bkp.PENDING ser = BackupSerializer(self.request, bkp, data=self.data) if not ser.is_valid(): return FailureTaskResponse(self.request, ser.errors, vm=vm) if define.fsfreeze: # Store in self.fsfreeze, because it is displayed in response/tasklog detail qga_socket = vm.qga_socket_path if qga_socket: bkp.fsfreeze = True if vm.status == vm.STOPPED: qga_socket = None else: qga_socket = None self._check_bkp_dc_size_limit() # Issue #chili-848 self._check_vm(vm) self.obj = vm self._check_bkp_node() bkp.save() self.msg = LOG_BKP_CREATE if self.execute(get_backup_cmd('create', bkp, define=define, zfs_filesystem=self.zfs_filesystem, fsfreeze=qga_socket), lock=self.LOCK % (vm.uuid, bkp.disk_id), stdin=bkp.json.dump()): return self.task_response bkp.delete() return self.error_response
def put(self): if 'note' in self.data: # Changing backup note instead of restore (not logging!) return self.save_note() ser = BackupRestoreSerializer(data=self.data) if not ser.is_valid(): return FailureTaskResponse(self.request, ser.errors) self._check_bkp() self._check_bkp_node() # Prepare vm for restore request, bkp = self.request, self.bkp vm = get_vm(request, ser.data['target_hostname_or_uuid'], exists_ok=True, noexists_fail=True, check_node_status=None) if vm.node.status not in vm.node.STATUS_OPERATIONAL: raise NodeIsNotOperational if vm.locked: raise VmIsLocked if not vm.has_compatible_brand(bkp.vm_brand): raise PreconditionRequired('VM brand mismatch') disk_id, real_disk_id, zfs_filesystem = get_disk_id( request, vm, self.data, key='target_disk_id', default=None) tgt_disk = vm.json_active_get_disks()[disk_id - 1] if tgt_disk['size'] != bkp.disk_size: raise PreconditionRequired('Disk size mismatch') target_ns = vm.get_node_storage(real_disk_id) # The backup is first restored to a temporary dataset, so it is required to have as much free space # as the backup size (which we don't have -> so we use the backup disk size [pessimism]) if bkp.disk_size > target_ns.storage.size_free: raise PreconditionRequired( 'Not enough free space on target storage') if not ser.data['force'] and Snapshot.objects.only('id').filter( vm=vm, disk_id=real_disk_id).exists(): raise ExpectationFailed('VM has snapshots') if vm.status != vm.STOPPED: raise VmIsNotOperational(_('VM is not stopped')) if vm.tasks: raise VmHasPendingTasks self.msg = LOG_BKP_UPDATE self.obj = vm # Cache apiview and detail # noinspection PyUnusedLocal apiview = self.apiview # noqa: F841 # noinspection PyUnusedLocal detail = self.detail # noqa: F841 self._detail_ += ", target_hostname='%s', target_disk_id=%s" % ( vm.hostname, disk_id) self._apiview_['target_hostname'] = vm.hostname self._apiview_['target_disk_id'] = disk_id self._apiview_['force'] = ser.data['force'] if bkp.vm: self._apiview_['source_hostname'] = bkp.vm.hostname else: self._apiview_['source_hostname'] = '' vm.set_notready() if self.execute(get_backup_cmd('restore', bkp, zfs_filesystem=zfs_filesystem, vm=vm), lock=self.LOCK % (vm.uuid, disk_id)): bkp.save_status(bkp.RESTORE) return self.task_response vm.revert_notready() return self.error_response