def validate(self, attrs): target_hostname_or_uuid = attrs.get('target_hostname_or_uuid', None) target_disk_id = attrs.get('target_disk_id', None) if target_hostname_or_uuid and not target_disk_id: err_msg = _( 'This field is required when target_hostname_or_uuid is specified.' ) self._errors['target_disk_id'] = s.ErrorList([err_msg]) return attrs elif not target_hostname_or_uuid and target_disk_id: err_msg = _( 'This field is required when target_disk_id is specified.') self._errors['target_hostname_or_uuid'] = s.ErrorList([err_msg]) return attrs elif target_hostname_or_uuid and target_disk_id: try: self.target_vm = get_vm(self.request, target_hostname_or_uuid, exists_ok=True, noexists_fail=True, check_node_status=None) except ObjectNotFound as exc: self._errors['target_hostname_or_uuid'] = s.ErrorList( [exc.detail]) else: try: self.target_vm_disk_id, self.target_vm_real_disk_id, self.target_vm_disk_zfs_filesystem = \ get_disk_id(self.request, self.target_vm, disk_id=target_disk_id) except InvalidInput as exc: self._errors['target_disk_id'] = s.ErrorList([exc.detail]) return attrs
def put(self): """Sync snapshots in DB with snapshots on compute node and update snapshot status and size.""" request, vm = self.request, self.vm disk_id, real_disk_id, zfs_filesystem = get_disk_id(request, vm, self.data) self._check_vm_status() # Prepare task data apiview = { 'view': 'vm_snapshot_list', 'method': request.method, 'hostname': vm.hostname, 'disk_id': disk_id, } meta = { 'output': {'returncode': 'returncode', 'stdout': 'data', 'stderr': 'message'}, 'replace_text': ((vm.uuid, vm.hostname),), 'msg': LOG_SNAPS_SYNC, 'vm_uuid': vm.uuid, 'apiview': apiview, } detail = 'disk_id=%s' % disk_id cmd = 'esnapshot list "%s"' % zfs_filesystem lock = self.LOCK % (vm.uuid, real_disk_id) callback = ('api.vm.snapshot.tasks.vm_snapshot_sync_cb', {'vm_uuid': vm.uuid, 'disk_id': real_disk_id}) # Run task tid, err = execute(request, vm.owner.id, cmd, meta=meta, lock=lock, callback=callback, queue=vm.node.fast_queue) if err: return FailureTaskResponse(request, err, vm=vm) else: return TaskResponse(request, tid, msg=LOG_SNAPS_SYNC, vm=vm, api_view=apiview, detail=detail, data=self.data)
def delete(self): """Delete multiple snapshots""" request, data, vm = self.request, self.data, self.vm disk_id, real_disk_id, zfs_filesystem = get_disk_id(request, vm, data) # Parse data['snapnames'] snaps, __ = get_snapshots(request, vm, real_disk_id, data) self._check_vm_status() snaps_lost = snaps.filter(status=Snapshot.LOST) msg = LOG_SNAPS_DELETE if snaps_lost: _result = {'message': 'Snapshots successfully deleted from DB'} _detail = "snapnames='%s', disk_id=%s" % (','.join(i.name for i in snaps_lost), disk_id) snaps_lost.delete() res = SuccessTaskResponse(request, _result, msg=msg, vm=vm, detail=_detail) snaps = snaps.filter(status=Snapshot.OK) # Work with OK snapshots from now on if not snaps: return res elif any(i.status != Snapshot.OK for i in snaps): raise ExpectationFailed('VM snapshot status is not OK') # Task type (a = automatic, e = manual) if getattr(request, 'define_id', None): tt = TT_AUTO else: tt = TT_EXEC snapnames = [i.name for i in snaps] _apiview_ = {'view': 'vm_snapshot_list', 'method': request.method, 'hostname': vm.hostname, 'disk_id': disk_id, 'snapnames': snapnames} _detail_ = "snapnames='%s', disk_id=%s" % (','.join(snapnames), disk_id) snap_ids = [snap.id for snap in snaps] zfs_names = ','.join([snap.zfs_name for snap in snaps]) lock = self.LOCK % (vm.uuid, real_disk_id) cmd = 'esnapshot destroy "%s@%s" 2>&1' % (zfs_filesystem, zfs_names) callback = ('api.vm.snapshot.tasks.vm_snapshot_list_cb', {'vm_uuid': vm.uuid, 'snap_ids': snap_ids}) tid, err = execute(request, vm.owner.id, cmd, meta=snap_meta(vm, msg, _apiview_, _detail_), lock=lock, callback=callback, queue=vm.node.fast_queue, tt=tt) if err: return FailureTaskResponse(request, err, vm=vm) else: snaps.update(status=Snapshot.PENDING) return TaskResponse(request, tid, msg=msg, vm=vm, api_view=_apiview_, detail=_detail_, data=self.data)
def __init__(self, request, hostname_or_uuid, bkpname, data): super(VmBackup, self).__init__(request) if request.method == 'POST': # Got bkpdef instead of bkpname vm = get_vm(request, hostname_or_uuid, exists_ok=True, noexists_fail=True) disk_id, real_disk_id, zfs_filesystem = get_disk_id(request, vm, data) # TODO: check indexes define = get_object(request, BackupDefine, {'name': bkpname, 'vm': vm, 'disk_id': real_disk_id}, exists_ok=True, noexists_fail=True, sr=('vm', 'node')) bkpname = define.generate_backup_name() bkp_get = {'name': bkpname, 'vm_hostname': vm.hostname, 'vm_disk_id': disk_id - 1, 'vm': vm} else: try: if 'hostname' in data: # Force original hostname raise ObjectNotFound # Only target VM status and backup node status are important vm = get_vm(request, hostname_or_uuid, exists_ok=True, noexists_fail=True, check_node_status=None) except ObjectNotFound: vm = None bkp_get = {'name': bkpname, 'vm_hostname': hostname_or_uuid} else: bkp_get = {'vm': vm, 'name': bkpname} define = None real_disk_id = None zfs_filesystem = None bkp_get = filter_disk_id(None, bkp_get, data, default=1) # vm_disk_id instead of disk_id bkp_get['dc'] = request.dc # Backup instance self.bkp = bkp = get_object(request, Backup, bkp_get, sr=('node', 'define', 'vm')) self.disk_id = bkp.array_disk_id self.hostname = bkp.vm_hostname_real self.define = define self.real_disk_id = real_disk_id self.zfs_filesystem = zfs_filesystem self.vm = vm self.data = data # Task type (a = automatic, e = manual) if getattr(request, 'define_id', None): self.tt = TT_AUTO else: self.tt = TT_EXEC
def __init__(self, request, hostname_or_uuid, snapname, data): super(VmSnapshot, self).__init__(request) self.data = data self.vm = vm = get_vm( request, hostname_or_uuid, exists_ok=True, noexists_fail=True, check_node_status=('POST', 'DELETE')) # custom node check inside put() self.disk_id, real_disk_id, self.zfs_filesystem = get_disk_id( request, vm, data) self.snap = get_object(request, Snapshot, { 'name': snapname, 'vm': vm, 'disk_id': real_disk_id }, sr=('define', ))
def create_from_template(cls, request, vm, vm_define_backup, log=logger): """Create backup definitions from vm.template.vm_define_backup list""" if vm_define_backup and isinstance(vm_define_backup, list): request = set_request_method(request, 'POST') for i, data in enumerate(vm_define_backup): try: try: bkpdef = data['bkpdef'] except KeyError: bkpdef = data['name'] disk_id, real_disk_id, zfs_filesystem = get_disk_id(request, vm, data) log.info('Creating backup definition [%d] "%s" for vm=%s, disk_id=%d defined by template %s', i, bkpdef, vm, disk_id, vm.template) define = get_object(request, BackupDefine, {'name': bkpdef, 'vm': vm, 'disk_id': real_disk_id}) res = cls(request, data=data).post(vm, define, vm_template=True) if res.status_code != scode.HTTP_201_CREATED: raise APIError('vm_define_backup error [%s]: %s' % (res.status_code, res.data)) except Exception as ex: log.warn('Failed to create backup definition [%d] for vm=%s defined by template %s with ' 'data="%s". Error: %s', i, vm, vm.template, data, ex)
def put(self): if 'note' in self.data: # Changing backup note instead of restore (not logging!) return self.save_note() ser = BackupRestoreSerializer(data=self.data) if not ser.is_valid(): return FailureTaskResponse(self.request, ser.errors) self._check_bkp() self._check_bkp_node() # Prepare vm for restore request, bkp = self.request, self.bkp vm = get_vm(request, ser.data['target_hostname_or_uuid'], exists_ok=True, noexists_fail=True, check_node_status=None) if vm.node.status not in vm.node.STATUS_OPERATIONAL: raise NodeIsNotOperational if vm.locked: raise VmIsLocked if not vm.has_compatible_brand(bkp.vm_brand): raise PreconditionRequired('VM brand mismatch') disk_id, real_disk_id, zfs_filesystem = get_disk_id( request, vm, self.data, key='target_disk_id', default=None) tgt_disk = vm.json_active_get_disks()[disk_id - 1] if tgt_disk['size'] != bkp.disk_size: raise PreconditionRequired('Disk size mismatch') target_ns = vm.get_node_storage(real_disk_id) # The backup is first restored to a temporary dataset, so it is required to have as much free space # as the backup size (which we don't have -> so we use the backup disk size [pessimism]) if bkp.disk_size > target_ns.storage.size_free: raise PreconditionRequired( 'Not enough free space on target storage') if not ser.data['force'] and Snapshot.objects.only('id').filter( vm=vm, disk_id=real_disk_id).exists(): raise ExpectationFailed('VM has snapshots') if vm.status != vm.STOPPED: raise VmIsNotOperational(_('VM is not stopped')) if vm.tasks: raise VmHasPendingTasks self.msg = LOG_BKP_UPDATE self.obj = vm # Cache apiview and detail # noinspection PyUnusedLocal apiview = self.apiview # noqa: F841 # noinspection PyUnusedLocal detail = self.detail # noqa: F841 self._detail_ += ", target_hostname='%s', target_disk_id=%s" % ( vm.hostname, disk_id) self._apiview_['target_hostname'] = vm.hostname self._apiview_['target_disk_id'] = disk_id self._apiview_['force'] = ser.data['force'] if bkp.vm: self._apiview_['source_hostname'] = bkp.vm.hostname else: self._apiview_['source_hostname'] = '' vm.set_notready() if self.execute(get_backup_cmd('restore', bkp, zfs_filesystem=zfs_filesystem, vm=vm), lock=self.LOCK % (vm.uuid, disk_id)): bkp.save_status(bkp.RESTORE) return self.task_response vm.revert_notready() return self.error_response
def vm_define_snapshot(request, hostname_or_uuid, snapdef, data=None): """ Show (:http:get:`GET </vm/(hostname_or_uuid)/define/snapshot/(snapdef)>`), create (:http:post:`POST </vm/(hostname_or_uuid)/define/snapshot/(snapdef)>`), remove (:http:delete:`DELETE </vm/(hostname_or_uuid)/define/snapshot/(snapdef)>`) or update (:http:put:`PUT </vm/(hostname_or_uuid)/define/snapshot/(snapdef)>`) a VM snapshot definition and schedule. .. http:get:: /vm/(hostname_or_uuid)/define/snapshot/(snapdef) :DC-bound?: * |dc-yes| :Permissions: * |VmOwner| :Asynchronous?: * |async-no| :arg hostname_or_uuid: **required** - Server hostname or uuid :type hostname_or_uuid: string :arg snapdef: **required** - Snapshot definition name :type snapdef: string :arg data.disk_id: **required** - Disk number/ID (default: 1) :type data.disk_id: integer :arg data.extended: Include total number of snapshots (default: false) :type data.extended: boolean :status 200: SUCCESS :status 403: Forbidden :status 404: VM not found / Snapshot definition not found :status 412: Invalid disk_id .. http:post:: /vm/(hostname_or_uuid)/define/snapshot/(snapdef) :DC-bound?: * |dc-yes| :Permissions: * |Admin| :Asynchronous?: * |async-no| :arg hostname_or_uuid: **required** - Server hostname or uuid :type hostname_or_uuid: string :arg snapdef: **required** - Snapshot definition name (predefined: hourly, daily, weekly, monthly) :type snapdef: string :arg data.disk_id: **required** - Disk number/ID (default: 1) :type data.disk_id: integer :arg data.schedule: **required** - Schedule in UTC CRON format (e.g. 30 4 * * 6) :type data.schedule: string :arg data.retention: **required** - Maximum number of snapshots to keep :type data.retention: integer :arg data.active: Enable or disable snapshot schedule (default: true) :type data.active: boolean :arg data.desc: Snapshot definition description :type data.desc: string :arg data.fsfreeze: Whether to send filesystem freeze command to QEMU agent socket before \ creating snapshot (requires QEMU Guest Agent) (default: false) :type data.fsfreeze: boolean :status 200: SUCCESS :status 400: FAILURE :status 403: Forbidden :status 404: VM not found :status 406: Snapshot definition already exists :status 412: Invalid disk_id :status 423: Node is not operational / VM is not operational .. http:put:: /vm/(hostname_or_uuid)/define/snapshot/(snapdef) :DC-bound?: * |dc-yes| :Permissions: * |Admin| :Asynchronous?: * |async-no| :arg hostname_or_uuid: **required** - Server hostname or uuid :type hostname_or_uuid: string :arg snapdef: **required** - Snapshot definition name :type snapdef: string :arg data.disk_id: **required** - Disk number/ID (default: 1) :type data.disk_id: integer :arg data.schedule: Schedule in UTC CRON format (e.g. 30 4 * * 6) :type data.schedule: string :arg data.retention: Maximum number of snapshots to keep :type data.retention: integer :arg data.active: Enable or disable snapshot schedule :type data.active: boolean :arg data.desc: Snapshot definition description :type data.desc: string :status 200: SUCCESS :status 400: FAILURE :status 403: Forbidden :status 404: VM not found / Snapshot definition not found :status 412: Invalid disk_id :status 423: Node is not operational / VM is not operational .. http:delete:: /vm/(hostname_or_uuid)/define/snapshot/(snapdef) :DC-bound?: * |dc-yes| :Permissions: * |Admin| :Asynchronous?: * |async-no| :arg hostname_or_uuid: **required** - Server hostname or uuid :type hostname_or_uuid: string :arg snapdef: **required** - Snapshot definition name :type snapdef: string :arg data.disk_id: **required** - Disk number/ID (default: 1) :type data.disk_id: integer :status 200: SUCCESS :status 400: FAILURE :status 403: Forbidden :status 404: VM not found / Snapshot definition not found :status 412: Invalid disk_id :status 423: Node is not operational / VM is not operational """ vm = get_vm(request, hostname_or_uuid, exists_ok=True, noexists_fail=True) disk_id, real_disk_id, zfs_filesystem = get_disk_id(request, vm, data) extra = output_extended_snap_count(request, data) define = get_object(request, SnapshotDefine, { 'name': snapdef, 'vm': vm, 'disk_id': real_disk_id }, sr=('vm', 'periodic_task', 'periodic_task__crontab'), extra={'select': extra}) return SnapshotDefineView(request, data=data).response(vm, define, extended=bool(extra))
def vm_define_backup(request, hostname_or_uuid, bkpdef, data=None): """ Show (:http:get:`GET </vm/(hostname_or_uuid)/define/backup/(bkpdef)>`), create (:http:post:`POST </vm/(hostname_or_uuid)/define/backup/(bkpdef)>`), remove (:http:delete:`DELETE </vm/(hostname_or_uuid)/define/backup/(bkpdef)>`) or update (:http:put:`PUT </vm/(hostname_or_uuid)/define/backup/(bkpdef)>`) a VM backup definition and schedule. .. http:get:: /vm/(hostname_or_uuid)/define/backup/(bkpdef) :DC-bound?: * |dc-yes| :Permissions: * |VmOwner| :Asynchronous?: * |async-no| :arg hostname_or_uuid: **required** - Server hostname or uuid :type hostname_or_uuid: string :arg bkpdef: **required** - Backup definition name :type bkpdef: string :arg data.disk_id: **required** - Disk number/ID (default: 1) :type data.disk_id: integer :arg data.extended: Include total number of backups (default: false) :type data.extended: boolean :status 200: SUCCESS :status 403: Forbidden :status 404: VM not found / Backup definition not found :status 412: Invalid disk_id .. http:post:: /vm/(hostname_or_uuid)/define/backup/(bkpdef) :DC-bound?: * |dc-yes| :Permissions: * |Admin| :Asynchronous?: * |async-no| :arg hostname_or_uuid: **required** - Server hostname or uuid :type hostname_or_uuid: string :arg bkpdef: **required** - Backup definition name (predefined: hourly, daily, weekly, monthly) :type bkpdef: string :arg data.disk_id: **required** - Disk number/ID (default: 1) :type data.disk_id: integer :arg data.type: **required** - Backup type (1 - dataset, 2 - file) (default: 1) :type: data.type: integer :arg data.node: **required** - Name of the backup node :type data.node: string :arg data.zpool: **required** - The zpool used on the backup node (default: zones) :type data.zpool: string :arg data.schedule: **required** - Schedule in UTC CRON format (e.g. 30 4 * * 6) :type data.schedule: string :arg data.retention: **required** - Maximum number of backups to keep :type data.retention: integer :arg data.active: Enable or disable backup schedule (default: true) :type data.active: boolean :arg data.compression: Backup file compression algorithm (0 - none, 1 - gzip, 2 - bzip2) (default: 0) :type data.compression: integer :arg data.bwlimit: Transfer rate limit in bytes (default: null => no limit) :type data.bwlimit: integer :arg data.desc: Backup definition description :type data.desc: string :arg data.fsfreeze: Whether to send filesystem freeze command to QEMU agent socket before \ creating backup snapshot (requires QEMU Guest Agent) (default: false) :type data.fsfreeze: boolean :status 200: SUCCESS :status 400: FAILURE :status 403: Forbidden :status 404: VM not found :status 406: Backup definition already exists :status 412: Invalid disk_id :status 423: Node is not operational / VM is not operational .. http:put:: /vm/(hostname_or_uuid)/define/backup/(bkpdef) :DC-bound?: * |dc-yes| :Permissions: * |Admin| :Asynchronous?: * |async-no| :arg hostname_or_uuid: **required** - Server hostname or uuid :type hostname_or_uuid: string :arg bkpdef: **required** - Backup definition name :type bkpdef: string :arg data.disk_id: **required** - Disk number/ID (default: 1) :type data.disk_id: integer :arg data.schedule: Schedule in UTC CRON format (e.g. 30 4 * * 6) :type data.schedule: string :arg data.retention: Maximum number of backups to keep :type data.retention: integer :arg data.active: Enable or disable backup schedule :type data.active: boolean :arg data.compression: Backup file compression algorithm (0 - none, 1 - gzip, 2 - bzip2) :type data.compression: integer :arg data.bwlimit: Transfer rate limit in bytes :type data.bwlimit: integer :arg data.desc: Backup definition description :type data.desc: string :status 200: SUCCESS :status 400: FAILURE :status 403: Forbidden :status 404: VM not found / Backup definition not found :status 412: Invalid disk_id :status 423: Node is not operational / VM is not operational .. http:delete:: /vm/(hostname_or_uuid)/define/backup/(bkpdef) :DC-bound?: * |dc-yes| :Permissions: * |Admin| :Asynchronous?: * |async-no| :arg hostname_or_uuid: **required** - Server hostname or uuid :type hostname_or_uuid: string :arg bkpdef: **required** - Backup definition name :type bkpdef: string :arg data.disk_id: **required** - Disk number/ID (default: 1) :type data.disk_id: integer :status 200: SUCCESS :status 400: FAILURE :status 403: Forbidden :status 404: VM not found / Backup definition not found :status 412: Invalid disk_id :status 423: Node is not operational / VM is not operational """ vm = get_vm(request, hostname_or_uuid, exists_ok=True, noexists_fail=True) disk_id, real_disk_id, zfs_filesystem = get_disk_id(request, vm, data) extra = output_extended_backup_count(request, data) define = get_object(request, BackupDefine, { 'name': bkpdef, 'vm': vm, 'disk_id': real_disk_id }, sr=('vm', 'vm__dc', 'node', 'periodic_task', 'periodic_task__crontab'), extra={'select': extra}) return BackupDefineView(request, data=data).response(vm, define, extended=bool(extra))
def image_snapshot(request, hostname_or_uuid, snapname, name, data=None): """ Create (:http:post:`POST </vm/(hostname_or_uuid)/snapshot/(snapname)/image/(name)>`) a server disk image from a disk snapshot. .. note:: A global image server (:http:put:`VMS_IMAGE_VM </dc/(dc)/settings>`) must be configured in the system. .. http:post:: /vm/(hostname_or_uuid)/snapshot/(snapname)/image/(name) :DC-bound?: * |dc-yes| :Permissions: * |ImageAdmin| :Asynchronous?: * |async-yes| :arg name: **required** - Server disk image name :type name: string :arg hostname_or_uuid: **required** - Server hostname or uuid :type hostname_or_uuid: string :arg snapname: **required** - Snapshot name :type snapname: string :arg data.disk_id: **required** - Disk number/ID (default: 1) :type data.disk_id: integer :arg data.alias: Short image name (default: ``name``) :type data.alias: string :arg data.access: Access type (1 - Public, 3 - Private, 4 - Deleted) (default: 3) :type data.access: integer :arg data.owner: User that owns the image (default: logged in user) :type data.owner: string :arg data.desc: Image description :type data.desc: string :arg data.version: Image version (default: 1.0) :type data.version: string :arg data.resize: Whether the image is able to resize the disk during an initial start or deploy process \ (default: false) :type data.resize: boolean :arg data.deploy: Whether the image is able to shut down the server after an initial start (default: false) :type data.deploy: boolean :status 200: SUCCESS :status 201: PENDING :status 400: FAILURE :status 403: Forbidden :status 404: VM not found :status 406: Image already exists :status 412: Invalid disk_id :status 423: Node is not operational / VM is not operational :status 417: Image status is not OK / VM snapshot status is not OK :status 428: Image server is not available """ from api.utils.db import get_object from api.vm.utils import get_vm from api.vm.snapshot.utils import get_disk_id from vms.models import Snapshot vm = get_vm(request, hostname_or_uuid, exists_ok=True, noexists_fail=True) disk_id, real_disk_id, zfs_filesystem = get_disk_id(request, vm, data) snap = get_object(request, Snapshot, { 'name': snapname, 'vm': vm, 'disk_id': real_disk_id }, exists_ok=True, noexists_fail=True) assert zfs_filesystem == snap.zfs_filesystem return ImageView(request, name, data).create(vm, snap)