Exemple #1
0
    def delete(self, vm, data, **kwargs):
        """Delete VM definition"""
        if vm.is_deployed():
            raise VmIsNotOperational(_('VM is not notcreated'))

        owner = vm.owner
        dead_vm = vm.log_list
        uuid = vm.uuid
        hostname = vm.hostname
        alias = vm.alias
        zabbix_sync = vm.is_zabbix_sync_active()
        external_zabbix_sync = vm.is_external_zabbix_sync_active()
        task_id = SuccessTaskResponse.gen_task_id(self.request, vm=dead_vm, owner=owner)

        # Every VM NIC could have an association to other tables. Cleanup first:
        for nic in vm.json_get_nics():
            # noinspection PyBroadException
            try:
                nic_ser = VmDefineNicSerializer(self.request, vm, nic)
                nic_ser.delete_ip(task_id)
            except Exception as ex:
                logger.exception(ex)
                continue

        # Finally delete VM
        logger.debug('Deleting VM %s from DB', vm)
        vm.delete()

        try:
            return SuccessTaskResponse(self.request, None, vm=dead_vm, owner=owner, task_id=task_id, msg=LOG_DEF_DELETE)
        finally:
            # Signal!
            vm_undefined.send(TaskID(task_id, request=self.request), vm_uuid=uuid, vm_hostname=hostname, vm_alias=alias,
                              dc=self.request.dc, zabbix_sync=zabbix_sync, external_zabbix_sync=external_zabbix_sync)
Exemple #2
0
    def put(self, vm, data):
        """Revert json_active (undo). Problematic attributes:
            - hostname  - handled by revert_active() + change requires some post configuration
            - alias     - handled by revert_active()
            - owner     - handled by revert_active()
            - template  - handled by revert_active()
            - monitored - handled by revert_active(), but mon_vm_sync task must be run via vm_define_reverted signal
            - tags      - wont be reverted (not saved in json)
            - nics.*.ip - ip reservation is fixed via vm_update_ipaddress_usage()
            - nics.*.dns + ptr - known bug - wont be reverted
        """
        if vm.is_notcreated():
            raise VmIsNotOperational('VM is not created')

        if vm.json == vm.json_active:
            raise ExpectationFailed('VM definition unchanged')

        if vm.tasks:
            raise VmHasPendingTasks

        # Prerequisites
        vm.hostname_is_valid_fqdn(
            cache=False
        )  # Cache vm._fqdn hostname/domain pair and find dns record
        hostname = vm.hostname  # Save hostname configured in DB

        # The magic happens here: get_diff() will run vm.revert_active() and return a diff
        vm_diff = VmDefineView(self.request).get_diff(vm, full=True)

        # Save VM
        hostname_changed = hostname != vm.hostname
        vm.unlock()  # vm saving was locked by revert_active()
        vm.save(update_hostname=hostname_changed,
                update_node_resources=True,
                update_storage_resources=True)

        # Generate response
        detail = 'Reverted VM configuration from %s.\n%s' % (
            vm.changed.strftime('%Y-%m-%d %H:%M:%S%z'),
            self.nice_diff(vm_diff))
        vm_diff['reverted_from'] = vm.changed

        res = SuccessTaskResponse(self.request,
                                  vm_diff,
                                  detail=detail,
                                  msg=LOG_DEF_REVERT,
                                  vm=vm)

        # Post-save stuff
        task_id = TaskID(res.data.get('task_id'), request=self.request)
        vm_update_ipaddress_usage(vm)
        vm_define_reverted.send(task_id, vm=vm)  # Signal!

        if hostname_changed:
            VmDefineHostnameChanged(self.request, vm,
                                    hostname).send()  # Task event for GUI

        return res
Exemple #3
0
    def put(self):
        if 'note' in self.data:
            # Changing snapshot note instead of rollback (not logging)
            return self._update_note()

        request, vm, snap = self.request, self.vm, self.snap

        if vm.node.status not in vm.node.STATUS_OPERATIONAL:
            raise NodeIsNotOperational

        if vm.locked:
            raise VmIsLocked

        self._check_vm_status()
        self._check_snap_status()
        apiview, detail = self._get_apiview_detail()

        apiview['force'] = bool(ForceSerializer(data=self.data, default=True))

        if not apiview['force']:
            snaplast = Snapshot.objects.only('id').filter(
                vm=vm, disk_id=snap.disk_id).order_by('-id')[0]
            if snap.id != snaplast.id:
                raise ExpectationFailed('VM has more recent snapshots')

        if vm.status != vm.STOPPED:
            raise VmIsNotOperational('VM is not stopped')

        if vm.tasks:
            raise VmHasPendingTasks

        msg = LOG_SNAP_UPDATE
        lock = self.LOCK % (vm.uuid, snap.disk_id)
        cmd = 'esnapshot rollback "%s@%s" 2>&1' % (self.zfs_filesystem,
                                                   snap.zfs_name)
        vm.set_notready()
        tid, err = execute(request,
                           vm.owner.id,
                           cmd,
                           meta=snap_meta(vm, msg, apiview, detail),
                           lock=lock,
                           callback=snap_callback(vm, snap),
                           queue=vm.node.fast_queue)

        if err:
            vm.revert_notready()
            return FailureTaskResponse(request, err, vm=vm)
        else:
            snap.save_status(snap.ROLLBACK)
            return TaskResponse(request,
                                tid,
                                msg=msg,
                                vm=vm,
                                api_view=apiview,
                                detail=detail,
                                data=self.data)
Exemple #4
0
    def put(self):
        request, vm = self.request, self.vm

        if vm.locked:
            raise VmIsLocked

        if vm.status not in (vm.STOPPED, vm.RUNNING, vm.NOTCREATED):
            raise VmIsNotOperational(
                'VM is not stopped, running or notcreated')

        if vm.json_changed():
            raise PreconditionRequired(
                'VM definition has changed; Update first')

        ser = VmDcSerializer(request, vm, data=self.data)

        if not ser.is_valid():
            return FailureTaskResponse(request, ser.errors, vm=vm)

        if vm.tasks:
            raise VmHasPendingTasks

        old_dc = vm.dc
        dc = ser.dc
        # Change DC for one VM, repeat this for other VM + Recalculate node & storage resources in target and source
        vm.dc = dc
        vm.save(update_node_resources=True, update_storage_resources=True)
        # Change task log entries DC for target VM
        TaskLogEntry.objects.filter(object_pk=vm.uuid).update(dc=dc)
        # Change related VM backup's DC
        Backup.objects.filter(vm=vm).update(dc=dc)

        for ns in ser.nss:  # Issue #chili-885
            for i in (dc, old_dc):
                Backup.update_resources(ns, vm, i)
                Snapshot.update_resources(ns, vm, i)

        detail = 'Successfully migrated VM %s from datacenter %s to datacenter %s' % (
            vm.hostname, old_dc.name, dc.name)
        # Will create task log entry in old DC
        res = SuccessTaskResponse(request,
                                  detail,
                                  vm=vm,
                                  msg=LOG_MIGRATE_DC,
                                  detail=detail)
        # Create task log entry in new DC too
        task_log_success(task_id_from_task_id(res.data.get('task_id'),
                                              dc_id=dc.id),
                         LOG_MIGRATE_DC,
                         obj=vm,
                         detail=detail,
                         update_user_tasks=False)

        return res
Exemple #5
0
    def delete(self):
        request, vm = self.request, self.vm

        # only admin
        if not (request.user and request.user.is_admin(request)):
            raise PermissionDenied

        if vm.uuid == ImageVm.get_uuid():
            raise VmIsLocked('VM is image server')

        if vm.locked:
            raise VmIsLocked

        if vm.status not in (vm.STOPPED, vm.FROZEN):
            raise VmIsNotOperational('VM is not stopped')

        if vm.tasks:
            raise VmHasPendingTasks

        apiview = self.apiview
        msg = LOG_VM_DELETE
        cmd = 'vmadm delete ' + vm.uuid
        meta = {
            'output': {'returncode': 'returncode', 'stderr': 'message'},
            'replace_text': ((vm.uuid, vm.hostname),),
            'msg': msg, 'vm_uuid': vm.uuid, 'apiview': apiview
        }
        callback = ('api.vm.base.tasks.vm_delete_cb', {'vm_uuid': vm.uuid})

        logger.debug('Deleting VM %s from compute node', vm)

        err = True
        vm.set_notready()

        try:
            tid, err = execute(request, vm.owner.id, cmd, meta=meta, lock=self.lock, expires=VM_VM_EXPIRES,
                               callback=callback, queue=vm.node.slow_queue)

            if err:
                return FailureTaskResponse(request, err, vm=vm)
            else:
                return TaskResponse(request, tid, msg=msg, vm=vm, api_view=apiview, data=self.data)
        finally:
            if err:
                vm.revert_notready()
Exemple #6
0
    def post(self):
        """Create and initialize slave VM and create replication service"""
        request, vm = self.request, self.vm

        if vm.status not in (vm.STOPPED, vm.RUNNING):
            raise VmIsNotOperational('VM is not stopped or running')

        if vm.json_changed():
            raise PreconditionRequired('VM definition has changed; Update first')

        ser = VmReplicaSerializer(request, self.slave_vm, data=self.data)

        if not ser.is_valid():
            return FailureTaskResponse(request, ser.errors, vm=vm)

        if vm.tasks:
            raise VmHasPendingTasks

        cmd = self.CMD['init'] % self._esrep_init_opts + ' && ' + self.CMD['svc-create'] % self._esrep_svc_opts
        slave_vm = None
        # Set VM to nonready (+"api lock")
        vm.set_notready()

        try:
            # Create slave VM
            slave_vm = ser.save_slave_vm()
            stdin = slave_vm.vm.fix_json(resize=True).dump()
            logger.debug('Creating new slave VM %s on node %s with json: """%s"""', slave_vm, slave_vm.node, stdin)

            return self._run_execute(LOG_REPLICA_CREATE, cmd, stdin=stdin, detail_dict=ser.detail_dict(),
                                     block_key=ser.node_image_import())
        finally:
            if not self._success:
                vm.revert_notready()
                if slave_vm:
                    slave_vm.delete()
Exemple #7
0
    def put(self):  # noqa: R701
        request, vm, action = self.request, self.vm, self.action

        # Cannot change status unless the VM is created on node
        if vm.status not in self.statuses and action != 'current':
            raise VmIsNotOperational

        if action not in self.actions:
            raise ExpectationFailed('Bad action')

        apiview = self.apiview
        f_ser = VmStatusFreezeSerializer(data=self.data)

        if f_ser.is_valid():
            freeze = apiview['freeze'] = f_ser.data['freeze']
            unfreeze = apiview['unfreeze'] = f_ser.data['unfreeze']
        else:
            return FailureTaskResponse(request, f_ser.errors, vm=vm)

        if ((action == 'start' and vm.status == Vm.STOPPED and not freeze) or
            (action == 'reboot' and vm.status == Vm.RUNNING and not freeze) or
            (action == 'stop' and vm.status in (Vm.STOPPING, Vm.RUNNING))):
            pass

        elif action == 'stop' and vm.status == Vm.STOPPED and freeze:
            if not request.user.is_admin(request):
                raise PermissionDenied

            tid = task_id_from_request(request,
                                       owner_id=vm.owner.id,
                                       dummy=True)
            vm_status_changed(tid, vm, vm.FROZEN, save_state=True)
            res = {
                'message':
                'VM %s is already stopped. Changing status to frozen.' %
                vm.hostname
            }

            return SuccessTaskResponse(request, res, task_id=tid, vm=vm)

        elif action == 'stop' and vm.status == Vm.FROZEN and unfreeze:
            if not request.user.is_admin(request):
                raise PermissionDenied

            tid = task_id_from_request(request,
                                       owner_id=vm.owner.id,
                                       dummy=True)
            vm_status_changed(tid, vm, vm.STOPPED, save_state=True)
            res = {
                'message': 'Removing frozen status for VM %s.' % vm.hostname
            }

            return SuccessTaskResponse(request, res, task_id=tid, vm=vm)

        elif action == 'current':
            # Limit PUT /current/ action to be Admins and SuperAdmins
            if not request.user.is_admin(request):
                raise PermissionDenied

            if vm.status in self.statuses_force_change_allowed:
                return self.get_current_status(force_change=True)
            elif vm.status in self.stuck_statuses_force_change_allowed:
                if vm.tasks:
                    raise VmHasPendingTasks
                else:
                    return self.get_current_status(force_change=True)
            else:
                raise VmIsNotOperational

        else:
            raise ExpectationFailed('Bad action')

        dc_settings = request.dc.settings

        if action in ('stop',
                      'reboot') and vm.uuid in dc_settings.VMS_NO_SHUTDOWN:
            raise PreconditionRequired('Internal VM can\'t be stopped')

        lock = 'vm_status vm:%s' % vm.uuid
        stdin = None
        apiview['update'] = False
        transition_to_stopping = False

        # The update parameter is used by all actions (start, stop, reboot)
        ser_update = VmStatusUpdateJSONSerializer(data=self.data,
                                                  default=(action
                                                           in ('start',
                                                               'reboot')))

        if not ser_update.is_valid():
            return FailureTaskResponse(request, ser_update.errors, vm=vm)

        if vm.json_changed():
            apiview['update'] = ser_update.data['update']
            logger.info('VM %s json != json_active', vm)

            if not apiview['update']:
                logger.info('VM %s json_active update disabled', vm)

        if action == 'start':
            ser = VmStatusActionIsoSerializer(request, vm, data=self.data)

            if not ser.is_valid():
                return FailureTaskResponse(request, ser.errors, vm=vm)

            if ser.data and ser.iso:
                if not request.user.is_admin(request) and vm.is_installed() and \
                        (ser.iso.name != dc_settings.VMS_ISO_RESCUECD):
                    raise PreconditionRequired('VM is not installed')

                msg = LOG_START_ISO
                iso = ser.iso
                cmd = self._start_cmd(iso=iso,
                                      iso2=ser.iso2,
                                      once=ser.data['cdimage_once'])
            else:
                msg = LOG_START
                iso = None
                cmd = self._start_cmd()

            if apiview['update']:
                if vm.tasks:
                    raise VmHasPendingTasks

                cmd, stdin = self._add_update_cmd(cmd, os_cmd_allowed=False)

                if iso:
                    msg = LOG_START_UPDATE_ISO
                else:
                    msg = LOG_START_UPDATE

        else:
            ser_stop_reboot = VmStatusStopSerializer(request,
                                                     vm,
                                                     data=self.data)

            if not ser_stop_reboot.is_valid():
                return FailureTaskResponse(request,
                                           ser_stop_reboot.errors,
                                           vm=vm)

            update = apiview.get('update',
                                 False)  # VmStatusUpdateJSONSerializer
            force = apiview['force'] = ser_stop_reboot.data.get('force', False)
            timeout = ser_stop_reboot.data.get('timeout', None)

            if not force and timeout:
                apiview['timeout'] = timeout

            if update:
                if vm.tasks:
                    raise VmHasPendingTasks

                # This will always perform a vmadm stop command, followed by a vmadm update command and optionally
                # followed by a vmadm start command (reboot)
                pre_cmd = self._action_cmd('stop',
                                           force=force,
                                           timeout=timeout)

                if action == 'reboot':
                    if force:
                        msg = LOG_REBOOT_FORCE_UPDATE
                    else:
                        msg = LOG_REBOOT_UPDATE

                    post_cmd = self._action_cmd('start')
                else:
                    if force:
                        msg = LOG_STOP_FORCE_UPDATE
                    else:
                        msg = LOG_STOP_UPDATE

                    post_cmd = ''

                cmd, stdin = self._add_update_cmd(post_cmd,
                                                  os_cmd_allowed=True,
                                                  pre_cmd=pre_cmd)
            else:
                cmd = self._action_cmd(action, force=force, timeout=timeout)

                if force:
                    if action == 'reboot':
                        msg = LOG_REBOOT_FORCE
                    else:
                        lock += ' force'
                        msg = LOG_STOP_FORCE
                else:
                    if action == 'reboot':
                        msg = LOG_REBOOT
                    else:
                        msg = LOG_STOP

            if vm.status == Vm.STOPPING:
                if update:
                    raise PreconditionRequired(
                        'Cannot perform update while VM is stopping')
                if not force:
                    raise VmIsNotOperational(
                        'VM is already stopping; try to use force')
            else:
                transition_to_stopping = True

        meta = {
            'output': {
                'returncode': 'returncode',
                'stderr': 'message',
                'stdout': 'json'
            },
            'replace_stderr': ((vm.uuid, vm.hostname), ),
            'detail': self.detail,
            'msg': msg,
            'vm_uuid': vm.uuid,
            'apiview': apiview,
            'last_status': vm.status,
        }
        callback = ('api.vm.status.tasks.vm_status_cb', {'vm_uuid': vm.uuid})

        tid, err = execute(request,
                           vm.owner.id,
                           cmd,
                           stdin=stdin,
                           meta=meta,
                           lock=lock,
                           callback=callback,
                           queue=vm.node.fast_queue)

        if err:
            return FailureTaskResponse(request, err, vm=vm)
        else:
            if transition_to_stopping:
                vm.save_status(Vm.STOPPING)

            return TaskResponse(request,
                                tid,
                                msg=msg,
                                vm=vm,
                                api_view=apiview,
                                detail=self.detail,
                                data=self.data,
                                api_data={
                                    'status': vm.status,
                                    'status_display': vm.status_display()
                                })
Exemple #8
0
    def put(self):
        request, vm = self.request, self.vm

        # only admin
        if not (request.user and request.user.is_admin(request)):
            raise PermissionDenied

        apiview = self.apiview
        apiview['force'] = bool(ForceSerializer(data=self.data, default=False))

        if vm.status not in (vm.RUNNING, vm.STOPPED):
            raise VmIsNotOperational('VM is not stopped or running')

        if apiview['force']:
            # final cmd and empty stdin
            cmd = 'vmadm get %s 2>/dev/null' % vm.uuid
            stdin = None
            block_key = None

        elif vm.json_changed():
            if vm.locked:
                raise VmIsLocked

            json_update = vm.json_update()
            self.check_update(json_update)

            if (vm.json_disks_changed() or vm.json_nics_changed()) and vm.tasks:
                raise VmHasPendingTasks

            # create json suitable for update
            stdin, cmd1 = self.fix_update(json_update)
            stdin = stdin.dump()

            # cmd = zfs set... >&2;
            if cmd1 and vm.snapshot_set.exists():
                raise ExpectationFailed('VM has snapshots')

            # final cmd
            cmd = cmd1 + 'vmadm update %s >&2; e=$?; vmadm get %s 2>/dev/null; exit $e' % (vm.uuid, vm.uuid)

            # Possible node_image import task which will block this task on node worker
            block_key = self.node_image_import(vm.node, json_update.get('add_disks', []))

        else:  # JSON unchanged and not force
            detail = 'Successfully updated VM %s (locally)' % vm.hostname
            res = SuccessTaskResponse(request, detail, msg=LOG_VM_UPDATE, vm=vm, detail=detail)
            vm_updated.send(TaskID(res.data.get('task_id'), request=request), vm=vm)  # Signal!

            return res

        msg = LOG_VM_UPDATE
        meta = {
            'output': {'returncode': 'returncode', 'stderr': 'message', 'stdout': 'json'},
            'replace_stderr': ((vm.uuid, vm.hostname),), 'msg': msg, 'vm_uuid': vm.uuid, 'apiview': apiview
        }
        callback = ('api.vm.base.tasks.vm_update_cb', {'vm_uuid': vm.uuid})

        logger.debug('Updating VM %s with json: """%s"""', vm, stdin)

        err = True
        vm.set_notready()

        try:
            tid, err = execute(request, vm.owner.id, cmd, stdin=stdin, meta=meta, lock=self.lock, callback=callback,
                               queue=vm.node.fast_queue, block_key=block_key)

            if err:
                return FailureTaskResponse(request, err, vm=vm)
            else:
                return TaskResponse(request, tid, msg=msg, vm=vm, api_view=apiview, data=self.data)
        finally:
            if err:
                vm.revert_notready()
Exemple #9
0
    def post(self):
        request, vm = self.request, self.vm
        ser = VmCreateSerializer(data=self.data)

        if not ser.is_valid():
            return FailureTaskResponse(request, ser.errors, vm=vm)

        if not vm.is_kvm():
            if not (vm.dc.settings.VMS_VM_SSH_KEYS_DEFAULT or vm.owner.usersshkey_set.exists()):
                raise PreconditionRequired('VM owner has no SSH keys available')

        apiview = self.apiview
        # noinspection PyTypeChecker
        cmd = 'vmadm create >&2; e=$? %s; vmadm get %s 2>/dev/null; vmadm start %s >&2; exit $e' % (
            self.fix_create(vm), vm.uuid, vm.uuid)

        recreate = apiview['recreate'] = ser.data['recreate']
        # noinspection PyAugmentAssignment
        if recreate:
            # recreate should be available to every vm owner
            if not (request.user and request.user.is_authenticated()):
                raise PermissionDenied

            if vm.locked:
                raise VmIsLocked

            if vm.status != vm.STOPPED:
                raise VmIsNotOperational('VM is not stopped')

            if not ser.data['force']:
                raise ExpectationFailed('Are you sure?')

            msg = LOG_VM_RECREATE
            # noinspection PyAugmentAssignment
            cmd = 'vmadm delete ' + vm.uuid + ' >&2 && sleep 1; ' + cmd

        elif vm.status == vm.NOTCREATED:
            # only admin
            if not (request.user and request.user.is_admin(request)):
                raise PermissionDenied

            if not vm.node:  # we need to find a node for this vm now
                logger.debug('VM %s has no compute node defined. Choosing node automatically', vm)
                VmDefineView(request).choose_node(vm)
                logger.info('New compute node %s for VM %s was chosen automatically.', vm.node, vm)

            msg = LOG_VM_CREATE

        else:
            raise VmIsNotOperational('VM is already created')

        # Check boot flag (KVM) or disk image (OS) (bug #chili-418)
        if not vm.is_bootable():
            raise PreconditionRequired('VM has no bootable disk')

        if vm.tasks:
            raise VmHasPendingTasks

        old_status = vm.status
        deploy = apiview['deploy'] = vm.is_deploy_needed()
        resize = apiview['resize'] = vm.is_resize_needed()

        if not vm.is_blank():
            vm.set_root_pw()

        # Set new status also for blank VM (where deployment is not needed)
        # This status will be changed in vm_status_event_cb (if everything goes well).
        vm.status = vm.CREATING
        vm.save()  # save status / node / vnc_port / root_pw

        stdin = vm.fix_json(deploy=deploy, resize=resize, recreate=recreate).dump()
        meta = {
            'output': {'returncode': 'returncode', 'stderr': 'message', 'stdout': 'json'},
            'replace_stderr': ((vm.uuid, vm.hostname),),
            'msg': msg,
            'vm_uuid': vm.uuid,
            'apiview': apiview
        }
        callback = ('api.vm.base.tasks.vm_create_cb', {'vm_uuid': vm.uuid})
        err = True

        try:
            # Possible node_image import task which will block this task on node worker
            block_key = self.node_image_import(vm.node, vm.json_get_disks())
            logger.debug('Creating new VM %s on node %s with json: """%s"""', vm, vm.node, stdin)
            tid, err = execute(request, vm.owner.id, cmd, stdin=stdin, meta=meta, expires=VM_VM_EXPIRES, lock=self.lock,
                               callback=callback, queue=vm.node.slow_queue, block_key=block_key)

            if err:
                return FailureTaskResponse(request, err, vm=vm)
            else:
                # Inform user about creating
                vm_status_changed(tid, vm, vm.CREATING, save_state=False)
                return TaskResponse(request, tid, msg=msg, vm=vm, api_view=apiview, data=self.data)
        finally:
            if err:  # Revert old status
                vm.status = old_status
                vm.save_status()
Exemple #10
0
    def put(self):
        if 'note' in self.data:  # Changing backup note instead of restore (not logging!)
            return self.save_note()

        ser = BackupRestoreSerializer(data=self.data)
        if not ser.is_valid():
            return FailureTaskResponse(self.request, ser.errors)

        self._check_bkp()
        self._check_bkp_node()

        # Prepare vm for restore
        request, bkp = self.request, self.bkp

        vm = get_vm(request,
                    ser.data['target_hostname_or_uuid'],
                    exists_ok=True,
                    noexists_fail=True,
                    check_node_status=None)

        if vm.node.status not in vm.node.STATUS_OPERATIONAL:
            raise NodeIsNotOperational

        if vm.locked:
            raise VmIsLocked

        if not vm.has_compatible_brand(bkp.vm_brand):
            raise PreconditionRequired('VM brand mismatch')

        disk_id, real_disk_id, zfs_filesystem = get_disk_id(
            request, vm, self.data, key='target_disk_id', default=None)
        tgt_disk = vm.json_active_get_disks()[disk_id - 1]

        if tgt_disk['size'] != bkp.disk_size:
            raise PreconditionRequired('Disk size mismatch')

        target_ns = vm.get_node_storage(real_disk_id)
        # The backup is first restored to a temporary dataset, so it is required to have as much free space
        # as the backup size (which we don't have -> so we use the backup disk size [pessimism])
        if bkp.disk_size > target_ns.storage.size_free:
            raise PreconditionRequired(
                'Not enough free space on target storage')

        if not ser.data['force'] and Snapshot.objects.only('id').filter(
                vm=vm, disk_id=real_disk_id).exists():
            raise ExpectationFailed('VM has snapshots')

        if vm.status != vm.STOPPED:
            raise VmIsNotOperational(_('VM is not stopped'))

        if vm.tasks:
            raise VmHasPendingTasks

        self.msg = LOG_BKP_UPDATE
        self.obj = vm
        # Cache apiview and detail
        # noinspection PyUnusedLocal
        apiview = self.apiview  # noqa: F841
        # noinspection PyUnusedLocal
        detail = self.detail  # noqa: F841
        self._detail_ += ", target_hostname='%s', target_disk_id=%s" % (
            vm.hostname, disk_id)
        self._apiview_['target_hostname'] = vm.hostname
        self._apiview_['target_disk_id'] = disk_id
        self._apiview_['force'] = ser.data['force']

        if bkp.vm:
            self._apiview_['source_hostname'] = bkp.vm.hostname
        else:
            self._apiview_['source_hostname'] = ''

        vm.set_notready()

        if self.execute(get_backup_cmd('restore',
                                       bkp,
                                       zfs_filesystem=zfs_filesystem,
                                       vm=vm),
                        lock=self.LOCK % (vm.uuid, disk_id)):
            bkp.save_status(bkp.RESTORE)
            return self.task_response

        vm.revert_notready()
        return self.error_response
Exemple #11
0
    def put(self):
        request, vm = self.request, self.vm

        if vm.locked:
            raise VmIsLocked

        if vm.status not in (vm.STOPPED, vm.RUNNING):
            raise VmIsNotOperational('VM is not stopped or running')

        if vm.json_changed():
            raise PreconditionRequired(
                'VM definition has changed; Update first')

        ser = VmMigrateSerializer(request, vm, data=self.data)

        if not ser.is_valid():
            return FailureTaskResponse(request, ser.errors, vm=vm)

        if vm.tasks:
            raise VmHasPendingTasks

        err = True
        ghost_vm = None
        # Set VM to nonready (+"api lock")
        vm.set_notready()

        try:
            # Create a dummy/placeholder VM
            ghost_vm = ser.save_ghost_vm()

            # Possible node_image import task which will block this task on node worker
            block_key = ser.node_image_import()

            # We have a custom detail dict with all necessary api view parameters
            detail_dict = ser.detail_dict()

            # Prepare task data
            apiview = {
                'view': 'vm_migrate',
                'method': request.method,
                'hostname': vm.hostname
            }
            apiview.update(detail_dict)
            lock = 'vm_migrate vm:%s' % vm.uuid
            meta = {
                'output': {
                    'returncode': 'returncode',
                    'stderr': 'message',
                    'stdout': 'json'
                },
                'replace_stderr': ((vm.uuid, vm.hostname), ),
                'msg': LOG_MIGRATE,
                'vm_uuid': vm.uuid,
                'slave_vm_uuid': ghost_vm.uuid,
                'apiview': apiview,
            }
            callback = ('api.vm.migrate.tasks.vm_migrate_cb', {
                'vm_uuid': vm.uuid,
                'slave_vm_uuid': ghost_vm.uuid
            })

            # Execute task
            tid, err = execute(request,
                               vm.owner.id,
                               ser.esmigrate_cmd,
                               meta=meta,
                               lock=lock,
                               callback=callback,
                               queue=vm.node.fast_queue,
                               block_key=block_key)

            if err:  # Error, revert VM status, delete placeholder VM
                return FailureTaskResponse(request, err, vm=vm)
            else:  # Success, task is running
                return TaskResponse(request,
                                    tid,
                                    msg=LOG_MIGRATE,
                                    vm=vm,
                                    api_view=apiview,
                                    detail_dict=detail_dict,
                                    data=self.data)
        finally:
            if err:
                vm.revert_notready()
                if ghost_vm:
                    ghost_vm.delete()
Exemple #12
0
    def put(self):
        request, vm = self.request, self.vm

        # only admin
        if not (request.user and request.user.is_admin(request)):
            raise PermissionDenied

        node = vm.node
        apiview = self.apiview
        apiview['force'] = bool(ForceSerializer(data=self.data, default=False))
        queue = vm.node.fast_queue
        new_node_uuid = None
        detail_dict = {}

        if vm.status not in (vm.RUNNING, vm.STOPPED):
            raise VmIsNotOperational('VM is not stopped or running')

        if apiview['force']:
            detail_dict['force'] = True
            # final cmd and empty stdin
            cmd = 'vmadm get %s 2>/dev/null' % vm.uuid
            stdin = None
            block_key = None
            node_param = self.data.get('node')

            if node_param:
                if not request.user.is_staff:
                    raise PermissionDenied

                node = get_node(request, node_param, dc=request.dc, exists_ok=True, noexists_fail=True)

                if node.hostname == vm.node.hostname:
                    raise InvalidInput('VM already has the requested node set in DB')

                apiview['node'] = detail_dict['node'] = node.hostname
                queue = node.fast_queue
                new_node_uuid = node.uuid

        elif vm.json_changed():
            if vm.locked:
                raise VmIsLocked

            json_update = vm.json_update()
            self.check_update(json_update)

            if (vm.json_disks_changed() or vm.json_nics_changed()) and vm.tasks:
                raise VmHasPendingTasks

            # create json suitable for update
            stdin, cmd1 = self.fix_update(json_update)
            self.validate_update(vm, stdin, cmd1)
            stdin = stdin.dump()

            # final cmd
            cmd = cmd1 + 'vmadm update %s >&2; e=$?; vmadm get %s 2>/dev/null; exit $e' % (vm.uuid, vm.uuid)

            # Possible node_image import task which will block this task on node worker
            block_key = self.node_image_import(vm.node, json_update.get('add_disks', []))

        else:  # JSON unchanged and not force
            detail = 'Successfully updated VM %s (locally)' % vm.hostname
            res = SuccessTaskResponse(request, detail, msg=LOG_VM_UPDATE, vm=vm, detail=detail)
            vm_updated.send(TaskID(res.data.get('task_id'), request=request), vm=vm)  # Signal!

            return res

        # Check compute node status after we know which compute node the task is going to be run on
        # The internal vm.node.status checking is disabled in get_vm() in __init__
        if node.status != node.ONLINE:
            raise NodeIsNotOperational

        msg = LOG_VM_UPDATE
        meta = {
            'output': {'returncode': 'returncode', 'stderr': 'message', 'stdout': 'json'},
            'replace_stderr': ((vm.uuid, vm.hostname),), 'msg': msg, 'vm_uuid': vm.uuid, 'apiview': apiview
        }
        callback = ('api.vm.base.tasks.vm_update_cb', {'vm_uuid': vm.uuid, 'new_node_uuid': new_node_uuid})

        logger.debug('Updating VM %s with json: """%s"""', vm, stdin)

        err = True
        vm.set_notready()

        try:
            tid, err = execute(request, vm.owner.id, cmd, stdin=stdin, meta=meta, lock=self.lock, callback=callback,
                               queue=queue, block_key=block_key)

            if err:
                return FailureTaskResponse(request, err, vm=vm)
            else:
                return TaskResponse(request, tid, msg=msg, vm=vm, api_view=apiview, data=self.data,
                                    detail_dict=detail_dict)
        finally:
            if err:
                vm.revert_notready()
Exemple #13
0
    def put(self):
        if 'note' in self.data:
            # Changing snapshot note instead of rollback (not logging)
            return self._update_note()

        request, vm, snap = self.request, self.vm, self.snap

        ser = SnapshotRestoreSerializer(request, vm, data=self.data)
        if not ser.is_valid():
            return FailureTaskResponse(self.request, ser.errors)

        target_vm, target_vm_disk_id = ser.target_vm, ser.target_vm_disk_id

        if vm.node.status not in vm.node.STATUS_OPERATIONAL:
            raise NodeIsNotOperational

        if target_vm.locked:
            raise VmIsLocked

        if target_vm != vm:
            if target_vm.node.status not in target_vm.node.STATUS_OPERATIONAL:
                raise NodeIsNotOperational

            self._check_vm_status(vm=target_vm)

            if not vm.has_compatible_brand(target_vm.brand):
                raise PreconditionRequired('VM brand mismatch')

            source_disk = vm.json_active_get_disks()[self.disk_id - 1]
            target_disk = target_vm.json_active_get_disks()[target_vm_disk_id -
                                                            1]

            if target_disk['size'] != source_disk['size']:
                raise PreconditionRequired('Disk size mismatch')

        self._check_vm_status()
        self._check_snap_status()
        apiview, detail = self._get_apiview_detail()
        apiview['force'] = ser.data['force']

        if target_vm != vm:
            detail += ", source_hostname='%s', target_hostname='%s', target_disk_id=%s" % (
                vm.hostname, target_vm.hostname, target_vm_disk_id)
            apiview['source_hostname'] = vm.hostname
            apiview['target_hostname'] = target_vm.hostname
            apiview['target_disk_id'] = target_vm_disk_id

            if not apiview['force']:
                if Snapshot.objects.only('id').filter(
                        vm=target_vm,
                        disk_id=ser.target_vm_real_disk_id).exists():
                    raise ExpectationFailed('Target VM has snapshots')

        elif not apiview['force']:
            snaplast = Snapshot.objects.only('id').filter(
                vm=vm, disk_id=snap.disk_id).order_by('-id')[0]
            if snap.id != snaplast.id:
                raise ExpectationFailed('VM has more recent snapshots')

        if target_vm.status != vm.STOPPED:
            raise VmIsNotOperational('VM is not stopped')

        if target_vm.tasks:
            raise VmHasPendingTasks

        msg = LOG_SNAP_UPDATE
        lock = self.LOCK % (vm.uuid, snap.disk_id)

        if target_vm == vm:
            cmd = 'esnapshot rollback "%s@%s" 2>&1' % (self.zfs_filesystem,
                                                       snap.zfs_name)
        else:
            cmd = 'esbackup snap-restore -s %s@%s -d %s' % (
                self.zfs_filesystem, snap.zfs_name,
                ser.target_vm_disk_zfs_filesystem)
            if vm.node != target_vm.node:
                cmd += ' -H %s' % target_vm.node.address

            vm.set_notready()

        target_vm.set_notready()
        tid, err = execute(request,
                           target_vm.owner.id,
                           cmd,
                           meta=snap_meta(target_vm, msg, apiview, detail),
                           lock=lock,
                           callback=snap_callback(target_vm, snap),
                           queue=vm.node.fast_queue)

        if err:
            target_vm.revert_notready()
            if vm != target_vm:
                vm.revert_notready()
            return FailureTaskResponse(request, err, vm=target_vm)
        else:
            snap.save_status(snap.ROLLBACK)
            return TaskResponse(request,
                                tid,
                                msg=msg,
                                vm=target_vm,
                                api_view=apiview,
                                detail=detail,
                                data=self.data)
Exemple #14
0
    def put(self):
        request, vm, action = self.request, self.vm, self.action

        # Cannot change status unless the VM is created on node
        if vm.status not in self.statuses:
            raise VmIsNotOperational

        if action not in self.actions:
            raise ExpectationFailed('Bad action')

        apiview = self.apiview
        f_ser = VmStatusFreezeSerializer(data=self.data)

        if f_ser.is_valid():
            freeze = apiview['freeze'] = f_ser.data['freeze']
            unfreeze = apiview['unfreeze'] = f_ser.data['unfreeze']
        else:
            return FailureTaskResponse(request, f_ser.errors, vm=vm)

        if ((action == 'start' and vm.status == Vm.STOPPED and not freeze) or
            (action == 'reboot' and vm.status == Vm.RUNNING and not freeze) or
            (action == 'stop' and vm.status in (Vm.STOPPING, Vm.RUNNING))):
            pass

        elif action == 'stop' and vm.status == Vm.STOPPED and freeze:
            if not request.user.is_admin(request):
                raise PermissionDenied

            tid = task_id_from_request(request,
                                       owner_id=vm.owner.id,
                                       dummy=True)
            vm_status_changed(tid, vm, vm.FROZEN, save_state=True)
            res = {
                'message':
                'VM %s is already stopped. Changing status to frozen.' %
                vm.hostname
            }

            return SuccessTaskResponse(request, res, task_id=tid, vm=vm)

        elif action == 'stop' and vm.status == Vm.FROZEN and unfreeze:
            if not request.user.is_admin(request):
                raise PermissionDenied

            tid = task_id_from_request(request,
                                       owner_id=vm.owner.id,
                                       dummy=True)
            vm_status_changed(tid, vm, vm.STOPPED, save_state=True)
            res = {
                'message': 'Removing frozen status for VM %s.' % vm.hostname
            }

            return SuccessTaskResponse(request, res, task_id=tid, vm=vm)

        else:
            raise ExpectationFailed('Bad action')

        dc_settings = request.dc.settings

        if action in ('stop',
                      'reboot') and vm.uuid in dc_settings.VMS_NO_SHUTDOWN:
            raise PreconditionRequired('Internal VM can\'t be stopped')

        lock = 'vm_status vm:%s' % vm.uuid
        stdin = None
        apiview['update'] = False
        transition_to_stopping = False

        if action == 'start':
            msg = LOG_START
            ser = VmStatusActionIsoSerializer(request, vm, data=self.data)

            if not ser.is_valid():
                return FailureTaskResponse(request, ser.errors, vm=vm)

            if ser.data and ser.iso:
                if not request.user.is_admin(request) and vm.is_installed() and \
                        (ser.iso.name != dc_settings.VMS_ISO_RESCUECD):
                    raise PreconditionRequired('VM is not installed')

                msg = LOG_START_ISO
                iso = ser.iso
                cmd = self._start_cmd(iso=iso,
                                      iso2=ser.iso2,
                                      once=ser.data['cdimage_once'])
            else:
                iso = None
                cmd = self._start_cmd()

            ser_update = VmStatusUpdateJSONSerializer(data=self.data)

            if ser_update.is_valid():
                if vm.json_changed():
                    apiview['update'] = ser_update.data['update']
                    logger.info('VM %s json != json_active', vm)

                    if apiview['update']:
                        from api.vm.base.vm_manage import VmManage
                        stdin, os_cmd = VmManage.fix_update(vm.json_update())
                        stdin = stdin.dump()

                        if os_cmd:  # Dangerous, explicit update needed
                            # TODO: fix in gui
                            raise PreconditionRequired(
                                'VM must be updated first')

                        if iso:
                            msg = LOG_START_UPDATE_ISO
                        else:
                            msg = LOG_START_UPDATE

                        cmd_update = 'vmadm update %s >&2; e=$?; vmadm get %s 2>/dev/null; ' % (
                            vm.uuid, vm.uuid)
                        cmd = cmd_update + cmd + '; exit $e'
                        # logger.info('VM %s json_active is going to be updated with json """%s"""', vm, stdin)
                    else:
                        logger.warning('VM %s json_active update disabled', vm)

            else:
                return FailureTaskResponse(request, ser_update.errors, vm=vm)

        else:
            force = ForceSerializer(data=self.data, default=False).is_true()
            cmd = self._action_cmd(action, force=force)

            if action == 'reboot':
                msg = LOG_REBOOT
            else:
                msg = LOG_STOP

            if force:
                apiview['force'] = True

                if action == 'reboot':
                    msg = LOG_REBOOT_FORCE
                else:
                    lock += ' force'
                    msg = LOG_STOP_FORCE

            elif vm.status == Vm.STOPPING:
                raise VmIsNotOperational(
                    'VM is already stopping; try to use force')
            else:
                transition_to_stopping = True

        meta = {
            'output': {
                'returncode': 'returncode',
                'stderr': 'message',
                'stdout': 'json'
            },
            'replace_stderr': ((vm.uuid, vm.hostname), ),
            'detail': self.detail,
            'msg': msg,
            'vm_uuid': vm.uuid,
            'apiview': apiview,
            'last_status': vm.status,
        }
        callback = ('api.vm.status.tasks.vm_status_cb', {'vm_uuid': vm.uuid})

        tid, err = execute(request,
                           vm.owner.id,
                           cmd,
                           stdin=stdin,
                           meta=meta,
                           lock=lock,
                           callback=callback,
                           queue=vm.node.fast_queue)

        if err:
            return FailureTaskResponse(request, err, vm=vm)
        else:
            if transition_to_stopping:
                vm.save_status(Vm.STOPPING)

            return TaskResponse(request,
                                tid,
                                msg=msg,
                                vm=vm,
                                api_view=apiview,
                                detail=self.detail,
                                data=self.data,
                                api_data={
                                    'status': vm.status,
                                    'status_display': vm.status_display()
                                })