Exemple #1
0
    def _check_img(self):
        img = self.img

        if img.status != img.OK:
            raise ExpectationFailed('Image status is not OK')

        if img.get_ns_status(self.ns) != img.READY:
            raise ExpectationFailed('Image is not ready')
Exemple #2
0
def parse_yyyymm(yyyymm, min_value):
    """Process the yyyymm string and return (yyyymm, since, until, current_month) tuple consisting of:
    - validated yyyymm string,
    - since and until datetime objects,
    - current_month boolean.

    Used in SLA views.
    """
    # noinspection PyBroadException
    try:
        yyyymm = str(yyyymm)
        since = datetime(year=int(yyyymm[:4]), month=int(yyyymm[4:]), day=1)
    except Exception:
        raise InvalidInput('Invalid yyyymm')

    now = datetime.now()
    yyyymm = since.strftime('%Y%m')
    current_month = now.strftime('%Y%m') == yyyymm

    if current_month:
        until = now
    else:
        until = since + relativedelta(months=+1)

    if until < min_value or since > now:
        raise ExpectationFailed('Monitoring data not available')

    return yyyymm, since, until, current_month
Exemple #3
0
    def validate_update(vm, json_update, os_cmd):
        """Check if (json_update, os_cmd) tuple from fix_update() can be run on a VM"""
        # cmd = zfs set... >&2;
        if os_cmd and vm.snapshot_set.exists():
            raise ExpectationFailed('VM has snapshots')

        return True
Exemple #4
0
    def _set_record(self):
        request = self.request
        record_id = self.record_id

        # Check IsSuperAdmin or IsDomainOwner permissions in get_domain
        self.domain = get_domain(request, self.domain_name, exists_ok=True, noexists_fail=True)

        # Records for slave domains cannot be modified
        if request.method != 'GET' and self.domain.type in (Domain.SLAVE, Domain.SUPERSLAVE):
            raise ExpectationFailed(_('Changing DNS records is not allowed for %s domain') % self.domain.type)

        if record_id is None:  # Get many
            records = self.data.get('records', None)
            qs = self.domain.record_set.select_related('domain').order_by(*self.order_by)

            if records is None:
                self.record = qs
            else:
                if not isinstance(records, (tuple, list)):
                    raise InvalidInput('Invalid records')
                self.record = qs.filter(id__in=records)
        else:
            if record_id == 0:  # New record
                self.record = Record(domain=self.domain)
            else:  # Existing record
                self.record = get_object(request, Record, {'domain': self.domain, 'id': record_id}, sr=('domain',),
                                         noexists_fail=True)
Exemple #5
0
 def _check_snap_size_limit(self):
     """Issue #chili-848"""
     limit = self.vm.snapshot_size_quota_value
     if limit is not None:
         total = Snapshot.get_total_vm_size(self.vm)
         if total >= limit:
             raise ExpectationFailed('VM snapshot size limit reached')
Exemple #6
0
    def put(self, vm, data):
        """Revert json_active (undo). Problematic attributes:
            - hostname  - handled by revert_active() + change requires some post configuration
            - alias     - handled by revert_active()
            - owner     - handled by revert_active()
            - template  - handled by revert_active()
            - monitored - handled by revert_active(), but mon_vm_sync task must be run via vm_define_reverted signal
            - tags      - wont be reverted (not saved in json)
            - nics.*.ip - ip reservation is fixed via vm_update_ipaddress_usage()
            - nics.*.dns + ptr - known bug - wont be reverted
        """
        if vm.is_notcreated():
            raise VmIsNotOperational('VM is not created')

        if vm.json == vm.json_active:
            raise ExpectationFailed('VM definition unchanged')

        if vm.tasks:
            raise VmHasPendingTasks

        # Prerequisites
        vm.hostname_is_valid_fqdn(
            cache=False
        )  # Cache vm._fqdn hostname/domain pair and find dns record
        hostname = vm.hostname  # Save hostname configured in DB

        # The magic happens here: get_diff() will run vm.revert_active() and return a diff
        vm_diff = VmDefineView(self.request).get_diff(vm, full=True)

        # Save VM
        hostname_changed = hostname != vm.hostname
        vm.unlock()  # vm saving was locked by revert_active()
        vm.save(update_hostname=hostname_changed,
                update_node_resources=True,
                update_storage_resources=True)

        # Generate response
        detail = 'Reverted VM configuration from %s.\n%s' % (
            vm.changed.strftime('%Y-%m-%d %H:%M:%S%z'),
            self.nice_diff(vm_diff))
        vm_diff['reverted_from'] = vm.changed

        res = SuccessTaskResponse(self.request,
                                  vm_diff,
                                  detail=detail,
                                  msg=LOG_DEF_REVERT,
                                  vm=vm)

        # Post-save stuff
        task_id = TaskID(res.data.get('task_id'), request=self.request)
        vm_update_ipaddress_usage(vm)
        vm_define_reverted.send(task_id, vm=vm)  # Signal!

        if hostname_changed:
            VmDefineHostnameChanged(self.request, vm,
                                    hostname).send()  # Task event for GUI

        return res
Exemple #7
0
    def put(self):
        if 'note' in self.data:
            # Changing snapshot note instead of rollback (not logging)
            return self._update_note()

        request, vm, snap = self.request, self.vm, self.snap

        if vm.node.status not in vm.node.STATUS_OPERATIONAL:
            raise NodeIsNotOperational

        if vm.locked:
            raise VmIsLocked

        self._check_vm_status()
        self._check_snap_status()
        apiview, detail = self._get_apiview_detail()

        apiview['force'] = bool(ForceSerializer(data=self.data, default=True))

        if not apiview['force']:
            snaplast = Snapshot.objects.only('id').filter(
                vm=vm, disk_id=snap.disk_id).order_by('-id')[0]
            if snap.id != snaplast.id:
                raise ExpectationFailed('VM has more recent snapshots')

        if vm.status != vm.STOPPED:
            raise VmIsNotOperational('VM is not stopped')

        if vm.tasks:
            raise VmHasPendingTasks

        msg = LOG_SNAP_UPDATE
        lock = self.LOCK % (vm.uuid, snap.disk_id)
        cmd = 'esnapshot rollback "%s@%s" 2>&1' % (self.zfs_filesystem,
                                                   snap.zfs_name)
        vm.set_notready()
        tid, err = execute(request,
                           vm.owner.id,
                           cmd,
                           meta=snap_meta(vm, msg, apiview, detail),
                           lock=lock,
                           callback=snap_callback(vm, snap),
                           queue=vm.node.fast_queue)

        if err:
            vm.revert_notready()
            return FailureTaskResponse(request, err, vm=vm)
        else:
            snap.save_status(snap.ROLLBACK)
            return TaskResponse(request,
                                tid,
                                msg=msg,
                                vm=vm,
                                api_view=apiview,
                                detail=detail,
                                data=self.data)
Exemple #8
0
    def create(self, vm, snap):
        """Create [POST] image from VM snapshot (ImageAdmin).

        This is always a DC bound task, but the task_id has a DC_UNBOUND task group flag,
        because socket.io will inform any admin regardless of the current admin DC.
        The callback is responsible for attaching the image into current DC.
        """
        img, data, request = self.img, self.data, self.request

        assert request.dc == vm.dc

        if vm.uuid in settings.VMS_INTERNAL:  # Bug #chili-792
            raise PreconditionRequired('Internal VM can\'t be used for creating images')

        data.pop('dc_bound', None)  # Default DC binding cannot be changed when creating Image for the first time
        img.dc_bound = vm.dc        # Default DC binding set to VM DC (cannot be changed, ^^^)
        img.ostype = vm.ostype      # Default ostype inherited from VM (cannot be changed)
        img.size = snap.disk_size   # Default disk size inherited from VM (cannot be changed)
        img.owner = request.user    # Default user (can be changed)
        img.alias = img.name        # Default alias (can be changed)
        img.status = Image.OK       # Set status for preliminary checks
        # Validate data (manifest info)
        ser = ImageSerializer(request, img, data)

        if not ser.is_valid():
            return FailureTaskResponse(request, ser.errors, dc_bound=self.dc_bound)

        # Preliminary checks
        self._run_checks(img_server_must_exist=True)  # This sets self.img_server to ImageVm()

        if vm.status not in (vm.RUNNING, vm.STOPPED, vm.STOPPING, vm.FROZEN):
            raise VmIsNotOperational

        if snap.status != snap.OK:
            raise ExpectationFailed('VM snapshot status is not OK')

        # Build manifest and set PENDING status
        # noinspection PyUnusedLocal
        data = ser.data
        img.manifest = img.build_manifest()
        img.status = Image.PENDING
        img.src_vm = vm
        img.src_snap = snap
        img.save()
        # Set snapshot status to PENDING
        snap.save_status(snap.PENDING)
        # Build command
        cmd_add = ' ; e=$?; cat %s/%s/manifest 2>&1; exit $e' % (self.img_server.datasets_dir, img.uuid)
        cmd = 'esimg create -s %s@%s' % (snap.zfs_filesystem, snap.zfs_name)

        if self.img_server.node != vm.node:
            cmd += ' -H %s' % vm.node.address

        return self._run_execute(LOG_IMAGE_CREATE, cmd, stdin=img.manifest.dump(), delete_on_error=True, vm=vm,
                                 snap=snap, error_fun=lambda: snap.save_status(snap.OK), detail_dict=ser.detail_dict(),
                                 cmd_add=cmd_add)
Exemple #9
0
    def _check_snap_dc_size_limit(self):
        """Issue #chili-848"""
        limit = self.vm.dc.settings.VMS_VM_SNAPSHOT_DC_SIZE_LIMIT

        if limit is not None:
            limit = int(limit)
            total = Snapshot.get_total_dc_size(self.vm.dc)

            if total >= limit:
                raise ExpectationFailed('DC snapshot size limit reached')
Exemple #10
0
    def _check_bkp_dc_size_limit(self):
        """Issue #chili-848"""
        bkp = self.bkp
        limit = bkp.dc.settings.VMS_VM_BACKUP_DC_SIZE_LIMIT

        if limit is not None:
            limit = int(limit)
            total = Backup.get_total_dc_size(bkp.dc)

            if total >= limit:
                raise ExpectationFailed('DC backup size limit reached')
Exemple #11
0
 def _check_snap_size_limit(self):
     """Issue #chili-848"""
     try:
         limit = int(self.vm.json_active['internal_metadata']
                     ['snapshot_size_limit'])
     except (TypeError, KeyError, IndexError):
         pass
     else:
         total = Snapshot.get_total_vm_size(self.vm)
         if total >= limit:
             raise ExpectationFailed('VM snapshot size limit reached')
Exemple #12
0
 def _check_snap_limit(self):
     try:
         limit = int(
             self.vm.json_active['internal_metadata'][self.limit_key])
     except (TypeError, KeyError, IndexError):
         pass
     else:
         # TODO: check indexes
         total = Snapshot.objects.filter(vm=self.vm,
                                         type=self.snaptype).count()
         if total >= limit:
             raise ExpectationFailed('VM snapshot limit reached')
Exemple #13
0
    def delete(self):
        """Delete multiple snapshots"""
        request, data, vm = self.request, self.data, self.vm

        disk_id, real_disk_id, zfs_filesystem = get_disk_id(request, vm, data)
        # Parse data['snapnames']
        snaps, __ = get_snapshots(request, vm, real_disk_id, data)

        self._check_vm_status()

        snaps_lost = snaps.filter(status=Snapshot.LOST)
        msg = LOG_SNAPS_DELETE

        if snaps_lost:
            _result = {'message': 'Snapshots successfully deleted from DB'}
            _detail = "snapnames='%s', disk_id=%s" % (','.join(i.name for i in snaps_lost), disk_id)
            snaps_lost.delete()
            res = SuccessTaskResponse(request, _result, msg=msg, vm=vm, detail=_detail)
            snaps = snaps.filter(status=Snapshot.OK)  # Work with OK snapshots from now on

            if not snaps:
                return res

        elif any(i.status != Snapshot.OK for i in snaps):
            raise ExpectationFailed('VM snapshot status is not OK')

        # Task type (a = automatic, e = manual)
        if getattr(request, 'define_id', None):
            tt = TT_AUTO
        else:
            tt = TT_EXEC

        snapnames = [i.name for i in snaps]
        _apiview_ = {'view': 'vm_snapshot_list', 'method': request.method,
                     'hostname': vm.hostname, 'disk_id': disk_id, 'snapnames': snapnames}
        _detail_ = "snapnames='%s', disk_id=%s" % (','.join(snapnames), disk_id)

        snap_ids = [snap.id for snap in snaps]
        zfs_names = ','.join([snap.zfs_name for snap in snaps])
        lock = self.LOCK % (vm.uuid, real_disk_id)
        cmd = 'esnapshot destroy "%s@%s" 2>&1' % (zfs_filesystem, zfs_names)
        callback = ('api.vm.snapshot.tasks.vm_snapshot_list_cb', {'vm_uuid': vm.uuid, 'snap_ids': snap_ids})

        tid, err = execute(request, vm.owner.id, cmd, meta=snap_meta(vm, msg, _apiview_, _detail_), lock=lock,
                           callback=callback, queue=vm.node.fast_queue, tt=tt)
        if err:
            return FailureTaskResponse(request, err, vm=vm)
        else:
            snaps.update(status=Snapshot.PENDING)
            return TaskResponse(request, tid, msg=msg, vm=vm, api_view=_apiview_, detail=_detail_, data=self.data)
Exemple #14
0
    def choose_node(self, vm):
        """Used by POST vm_manage when node needs to be chosen automatically"""
        new_node = Node.choose(vm)
        err = 'Could not find node with free resources'

        if not new_node:
            raise ExpectationFailed(err)

        old_request = self.request
        self.request = set_request_method(old_request, 'PUT')

        try:
            res = self.put(vm, {'node': new_node.hostname})
        finally:
            self.request = old_request

        if res.status_code != scode.HTTP_200_OK:
            try:
                err = res.data['result']['node']
            except Exception as e:
                logger.exception(e)
            raise ExpectationFailed(err)

        return new_node
Exemple #15
0
    def delete(self):
        domain = self.domain

        for dc in Dc.objects.all():
            if dc.settings.VMS_VM_DOMAIN_DEFAULT == domain.name:
                raise ExpectationFailed(_('Default VM domain cannot be deleted'))

        owner = domain.owner
        obj = domain.log_list

        # unlink TSIG keys that were defined for this domain
        [linked_key.unlink_axfr_domain(domain) for linked_key in TsigKey.get_linked_axfr_keys(domain)]

        domain.delete()

        return SuccessTaskResponse(self.request, None, obj=obj, owner=owner, msg=LOG_DOMAIN_DELETE, dc_bound=False)
Exemple #16
0
    def delete(self):
        dc, domain = self.request.dc, self.domain

        if dc.settings.VMS_VM_DOMAIN_DEFAULT == domain.name:
            raise ExpectationFailed(
                _('Default VM domain cannot be removed from datacenter'))

        ser = DomainSerializer(self.request, domain)
        DomainDc.objects.filter(dc=dc, domain_id=domain.id).delete()
        res = SuccessTaskResponse(self.request,
                                  None,
                                  obj=domain,
                                  detail_dict=ser.detail_dict(),
                                  msg=LOG_DOMAIN_DETACH)
        self._remove_dc_binding(res)

        return res
Exemple #17
0
    def import_for_vm(cls, request, ns, img, vm):
        """Import image required by VM. Return block_key or raise a FailedDependency API Exception (424)."""
        node = ns.node
        logger.warn(
            'Image %s required for VM %s must be imported to node=%s, zpool=%s',
            img.name, vm, node, ns.zpool)
        img_ns_status = img.get_ns_status(ns)

        if img_ns_status == img.DELETING:  # Someone is currently removing the image from node pool
            # We can't do anything about this
            raise ExpectationFailed(
                'Required disk image is processed by another task')

        block_key = img.get_block_key(ns)

        if img_ns_status == img.IMPORTING:
            logger.warn(
                'Image %s is being imported to node=%s, zpool=%s; vm_manage will be blocked by block_key=%s',
                img, node, ns.zpool, block_key)
            return block_key

        req = set_request_method(request, 'POST')

        try:
            res = cls(req, ns, img, None).post()
        except Exception as ex:
            res = exception_handler(ex, req)
            if res is None:
                raise ex
            res.exception = True

        if res.status_code in (200, 201):
            logger.warn(
                'POST node_image(%s, %s, %s) was successful: %s; task will be blocked by block_key=%s',
                node.hostname, ns.zpool, img.name, res.data, block_key)
            return block_key
        else:
            logger.error(
                'POST node_image(%s, %s, %s) failed: %s (%s): %s; raising 424 API exception',
                node.hostname, ns.zpool, img.name, res.status_code,
                res.status_text, res.data)
            errmsg = get_task_error_message(res.data)
            raise FailedDependency(
                'Cannot import required image %s to node %s (%s: %s)' %
                (img.name, node.hostname, res.status_code, errmsg))
Exemple #18
0
    def delete(self):
        domain = self.domain

        for dc in Dc.objects.all():
            if dc.settings.VMS_VM_DOMAIN_DEFAULT == domain.name:
                raise ExpectationFailed(
                    _('Default VM domain cannot be deleted'))

        owner = domain.owner
        obj = domain.log_list
        domain.delete()

        return SuccessTaskResponse(self.request,
                                   None,
                                   obj=obj,
                                   owner=owner,
                                   msg=LOG_DOMAIN_DELETE,
                                   dc_bound=False)
Exemple #19
0
    def get(self):
        request, vm, graph = self.request, self.vm, self.graph_type

        if not vm.is_zabbix_sync_active():
            raise ExpectationFailed('VM monitoring disabled')

        if vm.status not in vm.STATUS_OPERATIONAL:
            raise VmIsNotOperational

        try:
            graph_settings = GRAPH_ITEMS.get_options(graph, vm)
        except KeyError:
            raise InvalidInput('Invalid graph')
        else:
            required_ostype = graph_settings.get('required_ostype', None)

            if required_ostype is not None and vm.ostype not in required_ostype:
                raise InvalidInput('Invalid OS type')

        if graph.startswith(('nic-', 'net-')):
            ser_class = NetworkVmMonHistorySerializer
        elif graph.startswith(('disk-', 'hdd-', 'fs-')):
            ser_class = DiskVmMonHistorySerializer
        else:
            ser_class = MonHistorySerializer

        ser = ser_class(obj=self.vm, data=self.data)

        if not ser.is_valid():
            return FailureTaskResponse(request, ser.errors, vm=vm)

        return call_mon_history_task(request,
                                     t_mon_vm_history,
                                     view_fun_name='mon_vm_history',
                                     obj=self.vm,
                                     dc_bound=True,
                                     serializer=ser,
                                     data=self.data,
                                     graph=graph,
                                     graph_settings=graph_settings)
Exemple #20
0
    def put(self):  # noqa: R701
        request, vm, action = self.request, self.vm, self.action

        # Cannot change status unless the VM is created on node
        if vm.status not in self.statuses and action != 'current':
            raise VmIsNotOperational

        if action not in self.actions:
            raise ExpectationFailed('Bad action')

        apiview = self.apiview
        f_ser = VmStatusFreezeSerializer(data=self.data)

        if f_ser.is_valid():
            freeze = apiview['freeze'] = f_ser.data['freeze']
            unfreeze = apiview['unfreeze'] = f_ser.data['unfreeze']
        else:
            return FailureTaskResponse(request, f_ser.errors, vm=vm)

        if ((action == 'start' and vm.status == Vm.STOPPED and not freeze) or
            (action == 'reboot' and vm.status == Vm.RUNNING and not freeze) or
            (action == 'stop' and vm.status in (Vm.STOPPING, Vm.RUNNING))):
            pass

        elif action == 'stop' and vm.status == Vm.STOPPED and freeze:
            if not request.user.is_admin(request):
                raise PermissionDenied

            tid = task_id_from_request(request,
                                       owner_id=vm.owner.id,
                                       dummy=True)
            vm_status_changed(tid, vm, vm.FROZEN, save_state=True)
            res = {
                'message':
                'VM %s is already stopped. Changing status to frozen.' %
                vm.hostname
            }

            return SuccessTaskResponse(request, res, task_id=tid, vm=vm)

        elif action == 'stop' and vm.status == Vm.FROZEN and unfreeze:
            if not request.user.is_admin(request):
                raise PermissionDenied

            tid = task_id_from_request(request,
                                       owner_id=vm.owner.id,
                                       dummy=True)
            vm_status_changed(tid, vm, vm.STOPPED, save_state=True)
            res = {
                'message': 'Removing frozen status for VM %s.' % vm.hostname
            }

            return SuccessTaskResponse(request, res, task_id=tid, vm=vm)

        elif action == 'current':
            # Limit PUT /current/ action to be Admins and SuperAdmins
            if not request.user.is_admin(request):
                raise PermissionDenied

            if vm.status in self.statuses_force_change_allowed:
                return self.get_current_status(force_change=True)
            elif vm.status in self.stuck_statuses_force_change_allowed:
                if vm.tasks:
                    raise VmHasPendingTasks
                else:
                    return self.get_current_status(force_change=True)
            else:
                raise VmIsNotOperational

        else:
            raise ExpectationFailed('Bad action')

        dc_settings = request.dc.settings

        if action in ('stop',
                      'reboot') and vm.uuid in dc_settings.VMS_NO_SHUTDOWN:
            raise PreconditionRequired('Internal VM can\'t be stopped')

        lock = 'vm_status vm:%s' % vm.uuid
        stdin = None
        apiview['update'] = False
        transition_to_stopping = False

        # The update parameter is used by all actions (start, stop, reboot)
        ser_update = VmStatusUpdateJSONSerializer(data=self.data,
                                                  default=(action
                                                           in ('start',
                                                               'reboot')))

        if not ser_update.is_valid():
            return FailureTaskResponse(request, ser_update.errors, vm=vm)

        if vm.json_changed():
            apiview['update'] = ser_update.data['update']
            logger.info('VM %s json != json_active', vm)

            if not apiview['update']:
                logger.info('VM %s json_active update disabled', vm)

        if action == 'start':
            ser = VmStatusActionIsoSerializer(request, vm, data=self.data)

            if not ser.is_valid():
                return FailureTaskResponse(request, ser.errors, vm=vm)

            if ser.data and ser.iso:
                if not request.user.is_admin(request) and vm.is_installed() and \
                        (ser.iso.name != dc_settings.VMS_ISO_RESCUECD):
                    raise PreconditionRequired('VM is not installed')

                msg = LOG_START_ISO
                iso = ser.iso
                cmd = self._start_cmd(iso=iso,
                                      iso2=ser.iso2,
                                      once=ser.data['cdimage_once'])
            else:
                msg = LOG_START
                iso = None
                cmd = self._start_cmd()

            if apiview['update']:
                if vm.tasks:
                    raise VmHasPendingTasks

                cmd, stdin = self._add_update_cmd(cmd, os_cmd_allowed=False)

                if iso:
                    msg = LOG_START_UPDATE_ISO
                else:
                    msg = LOG_START_UPDATE

        else:
            ser_stop_reboot = VmStatusStopSerializer(request,
                                                     vm,
                                                     data=self.data)

            if not ser_stop_reboot.is_valid():
                return FailureTaskResponse(request,
                                           ser_stop_reboot.errors,
                                           vm=vm)

            update = apiview.get('update',
                                 False)  # VmStatusUpdateJSONSerializer
            force = apiview['force'] = ser_stop_reboot.data.get('force', False)
            timeout = ser_stop_reboot.data.get('timeout', None)

            if not force and timeout:
                apiview['timeout'] = timeout

            if update:
                if vm.tasks:
                    raise VmHasPendingTasks

                # This will always perform a vmadm stop command, followed by a vmadm update command and optionally
                # followed by a vmadm start command (reboot)
                pre_cmd = self._action_cmd('stop',
                                           force=force,
                                           timeout=timeout)

                if action == 'reboot':
                    if force:
                        msg = LOG_REBOOT_FORCE_UPDATE
                    else:
                        msg = LOG_REBOOT_UPDATE

                    post_cmd = self._action_cmd('start')
                else:
                    if force:
                        msg = LOG_STOP_FORCE_UPDATE
                    else:
                        msg = LOG_STOP_UPDATE

                    post_cmd = ''

                cmd, stdin = self._add_update_cmd(post_cmd,
                                                  os_cmd_allowed=True,
                                                  pre_cmd=pre_cmd)
            else:
                cmd = self._action_cmd(action, force=force, timeout=timeout)

                if force:
                    if action == 'reboot':
                        msg = LOG_REBOOT_FORCE
                    else:
                        lock += ' force'
                        msg = LOG_STOP_FORCE
                else:
                    if action == 'reboot':
                        msg = LOG_REBOOT
                    else:
                        msg = LOG_STOP

            if vm.status == Vm.STOPPING:
                if update:
                    raise PreconditionRequired(
                        'Cannot perform update while VM is stopping')
                if not force:
                    raise VmIsNotOperational(
                        'VM is already stopping; try to use force')
            else:
                transition_to_stopping = True

        meta = {
            'output': {
                'returncode': 'returncode',
                'stderr': 'message',
                'stdout': 'json'
            },
            'replace_stderr': ((vm.uuid, vm.hostname), ),
            'detail': self.detail,
            'msg': msg,
            'vm_uuid': vm.uuid,
            'apiview': apiview,
            'last_status': vm.status,
        }
        callback = ('api.vm.status.tasks.vm_status_cb', {'vm_uuid': vm.uuid})

        tid, err = execute(request,
                           vm.owner.id,
                           cmd,
                           stdin=stdin,
                           meta=meta,
                           lock=lock,
                           callback=callback,
                           queue=vm.node.fast_queue)

        if err:
            return FailureTaskResponse(request, err, vm=vm)
        else:
            if transition_to_stopping:
                vm.save_status(Vm.STOPPING)

            return TaskResponse(request,
                                tid,
                                msg=msg,
                                vm=vm,
                                api_view=apiview,
                                detail=self.detail,
                                data=self.data,
                                api_data={
                                    'status': vm.status,
                                    'status_display': vm.status_display()
                                })
Exemple #21
0
    def put(self):
        request, vm, action = self.request, self.vm, self.action

        # Cannot change status unless the VM is created on node
        if vm.status not in self.statuses:
            raise VmIsNotOperational

        if action not in self.actions:
            raise ExpectationFailed('Bad action')

        apiview = self.apiview
        f_ser = VmStatusFreezeSerializer(data=self.data)

        if f_ser.is_valid():
            freeze = apiview['freeze'] = f_ser.data['freeze']
            unfreeze = apiview['unfreeze'] = f_ser.data['unfreeze']
        else:
            return FailureTaskResponse(request, f_ser.errors, vm=vm)

        if ((action == 'start' and vm.status == Vm.STOPPED and not freeze) or
            (action == 'reboot' and vm.status == Vm.RUNNING and not freeze) or
            (action == 'stop' and vm.status in (Vm.STOPPING, Vm.RUNNING))):
            pass

        elif action == 'stop' and vm.status == Vm.STOPPED and freeze:
            if not request.user.is_admin(request):
                raise PermissionDenied

            tid = task_id_from_request(request,
                                       owner_id=vm.owner.id,
                                       dummy=True)
            vm_status_changed(tid, vm, vm.FROZEN, save_state=True)
            res = {
                'message':
                'VM %s is already stopped. Changing status to frozen.' %
                vm.hostname
            }

            return SuccessTaskResponse(request, res, task_id=tid, vm=vm)

        elif action == 'stop' and vm.status == Vm.FROZEN and unfreeze:
            if not request.user.is_admin(request):
                raise PermissionDenied

            tid = task_id_from_request(request,
                                       owner_id=vm.owner.id,
                                       dummy=True)
            vm_status_changed(tid, vm, vm.STOPPED, save_state=True)
            res = {
                'message': 'Removing frozen status for VM %s.' % vm.hostname
            }

            return SuccessTaskResponse(request, res, task_id=tid, vm=vm)

        else:
            raise ExpectationFailed('Bad action')

        dc_settings = request.dc.settings

        if action in ('stop',
                      'reboot') and vm.uuid in dc_settings.VMS_NO_SHUTDOWN:
            raise PreconditionRequired('Internal VM can\'t be stopped')

        lock = 'vm_status vm:%s' % vm.uuid
        stdin = None
        apiview['update'] = False
        transition_to_stopping = False

        if action == 'start':
            msg = LOG_START
            ser = VmStatusActionIsoSerializer(request, vm, data=self.data)

            if not ser.is_valid():
                return FailureTaskResponse(request, ser.errors, vm=vm)

            if ser.data and ser.iso:
                if not request.user.is_admin(request) and vm.is_installed() and \
                        (ser.iso.name != dc_settings.VMS_ISO_RESCUECD):
                    raise PreconditionRequired('VM is not installed')

                msg = LOG_START_ISO
                iso = ser.iso
                cmd = self._start_cmd(iso=iso,
                                      iso2=ser.iso2,
                                      once=ser.data['cdimage_once'])
            else:
                iso = None
                cmd = self._start_cmd()

            ser_update = VmStatusUpdateJSONSerializer(data=self.data)

            if ser_update.is_valid():
                if vm.json_changed():
                    apiview['update'] = ser_update.data['update']
                    logger.info('VM %s json != json_active', vm)

                    if apiview['update']:
                        from api.vm.base.vm_manage import VmManage
                        stdin, os_cmd = VmManage.fix_update(vm.json_update())
                        stdin = stdin.dump()

                        if os_cmd:  # Dangerous, explicit update needed
                            # TODO: fix in gui
                            raise PreconditionRequired(
                                'VM must be updated first')

                        if iso:
                            msg = LOG_START_UPDATE_ISO
                        else:
                            msg = LOG_START_UPDATE

                        cmd_update = 'vmadm update %s >&2; e=$?; vmadm get %s 2>/dev/null; ' % (
                            vm.uuid, vm.uuid)
                        cmd = cmd_update + cmd + '; exit $e'
                        # logger.info('VM %s json_active is going to be updated with json """%s"""', vm, stdin)
                    else:
                        logger.warning('VM %s json_active update disabled', vm)

            else:
                return FailureTaskResponse(request, ser_update.errors, vm=vm)

        else:
            force = ForceSerializer(data=self.data, default=False).is_true()
            cmd = self._action_cmd(action, force=force)

            if action == 'reboot':
                msg = LOG_REBOOT
            else:
                msg = LOG_STOP

            if force:
                apiview['force'] = True

                if action == 'reboot':
                    msg = LOG_REBOOT_FORCE
                else:
                    lock += ' force'
                    msg = LOG_STOP_FORCE

            elif vm.status == Vm.STOPPING:
                raise VmIsNotOperational(
                    'VM is already stopping; try to use force')
            else:
                transition_to_stopping = True

        meta = {
            'output': {
                'returncode': 'returncode',
                'stderr': 'message',
                'stdout': 'json'
            },
            'replace_stderr': ((vm.uuid, vm.hostname), ),
            'detail': self.detail,
            'msg': msg,
            'vm_uuid': vm.uuid,
            'apiview': apiview,
            'last_status': vm.status,
        }
        callback = ('api.vm.status.tasks.vm_status_cb', {'vm_uuid': vm.uuid})

        tid, err = execute(request,
                           vm.owner.id,
                           cmd,
                           stdin=stdin,
                           meta=meta,
                           lock=lock,
                           callback=callback,
                           queue=vm.node.fast_queue)

        if err:
            return FailureTaskResponse(request, err, vm=vm)
        else:
            if transition_to_stopping:
                vm.save_status(Vm.STOPPING)

            return TaskResponse(request,
                                tid,
                                msg=msg,
                                vm=vm,
                                api_view=apiview,
                                detail=self.detail,
                                data=self.data,
                                api_data={
                                    'status': vm.status,
                                    'status_display': vm.status_display()
                                })
Exemple #22
0
    def put(self):
        request, vm = self.request, self.vm

        # only admin
        if not (request.user and request.user.is_admin(request)):
            raise PermissionDenied

        apiview = self.apiview
        apiview['force'] = bool(ForceSerializer(data=self.data, default=False))

        if vm.status not in (vm.RUNNING, vm.STOPPED):
            raise VmIsNotOperational('VM is not stopped or running')

        if apiview['force']:
            # final cmd and empty stdin
            cmd = 'vmadm get %s 2>/dev/null' % vm.uuid
            stdin = None
            block_key = None

        elif vm.json_changed():
            if vm.locked:
                raise VmIsLocked

            json_update = vm.json_update()
            self.check_update(json_update)

            if (vm.json_disks_changed() or vm.json_nics_changed()) and vm.tasks:
                raise VmHasPendingTasks

            # create json suitable for update
            stdin, cmd1 = self.fix_update(json_update)
            stdin = stdin.dump()

            # cmd = zfs set... >&2;
            if cmd1 and vm.snapshot_set.exists():
                raise ExpectationFailed('VM has snapshots')

            # final cmd
            cmd = cmd1 + 'vmadm update %s >&2; e=$?; vmadm get %s 2>/dev/null; exit $e' % (vm.uuid, vm.uuid)

            # Possible node_image import task which will block this task on node worker
            block_key = self.node_image_import(vm.node, json_update.get('add_disks', []))

        else:  # JSON unchanged and not force
            detail = 'Successfully updated VM %s (locally)' % vm.hostname
            res = SuccessTaskResponse(request, detail, msg=LOG_VM_UPDATE, vm=vm, detail=detail)
            vm_updated.send(TaskID(res.data.get('task_id'), request=request), vm=vm)  # Signal!

            return res

        msg = LOG_VM_UPDATE
        meta = {
            'output': {'returncode': 'returncode', 'stderr': 'message', 'stdout': 'json'},
            'replace_stderr': ((vm.uuid, vm.hostname),), 'msg': msg, 'vm_uuid': vm.uuid, 'apiview': apiview
        }
        callback = ('api.vm.base.tasks.vm_update_cb', {'vm_uuid': vm.uuid})

        logger.debug('Updating VM %s with json: """%s"""', vm, stdin)

        err = True
        vm.set_notready()

        try:
            tid, err = execute(request, vm.owner.id, cmd, stdin=stdin, meta=meta, lock=self.lock, callback=callback,
                               queue=vm.node.fast_queue, block_key=block_key)

            if err:
                return FailureTaskResponse(request, err, vm=vm)
            else:
                return TaskResponse(request, tid, msg=msg, vm=vm, api_view=apiview, data=self.data)
        finally:
            if err:
                vm.revert_notready()
Exemple #23
0
    def post(self):
        request, vm = self.request, self.vm
        ser = VmCreateSerializer(data=self.data)

        if not ser.is_valid():
            return FailureTaskResponse(request, ser.errors, vm=vm)

        if not vm.is_kvm():
            if not (vm.dc.settings.VMS_VM_SSH_KEYS_DEFAULT or vm.owner.usersshkey_set.exists()):
                raise PreconditionRequired('VM owner has no SSH keys available')

        apiview = self.apiview
        # noinspection PyTypeChecker
        cmd = 'vmadm create >&2; e=$? %s; vmadm get %s 2>/dev/null; vmadm start %s >&2; exit $e' % (
            self.fix_create(vm), vm.uuid, vm.uuid)

        recreate = apiview['recreate'] = ser.data['recreate']
        # noinspection PyAugmentAssignment
        if recreate:
            # recreate should be available to every vm owner
            if not (request.user and request.user.is_authenticated()):
                raise PermissionDenied

            if vm.locked:
                raise VmIsLocked

            if vm.status != vm.STOPPED:
                raise VmIsNotOperational('VM is not stopped')

            if not ser.data['force']:
                raise ExpectationFailed('Are you sure?')

            msg = LOG_VM_RECREATE
            # noinspection PyAugmentAssignment
            cmd = 'vmadm delete ' + vm.uuid + ' >&2 && sleep 1; ' + cmd

        elif vm.status == vm.NOTCREATED:
            # only admin
            if not (request.user and request.user.is_admin(request)):
                raise PermissionDenied

            if not vm.node:  # we need to find a node for this vm now
                logger.debug('VM %s has no compute node defined. Choosing node automatically', vm)
                VmDefineView(request).choose_node(vm)
                logger.info('New compute node %s for VM %s was chosen automatically.', vm.node, vm)

            msg = LOG_VM_CREATE

        else:
            raise VmIsNotOperational('VM is already created')

        # Check boot flag (KVM) or disk image (OS) (bug #chili-418)
        if not vm.is_bootable():
            raise PreconditionRequired('VM has no bootable disk')

        if vm.tasks:
            raise VmHasPendingTasks

        old_status = vm.status
        deploy = apiview['deploy'] = vm.is_deploy_needed()
        resize = apiview['resize'] = vm.is_resize_needed()

        if not vm.is_blank():
            vm.set_root_pw()

        # Set new status also for blank VM (where deployment is not needed)
        # This status will be changed in vm_status_event_cb (if everything goes well).
        vm.status = vm.CREATING
        vm.save()  # save status / node / vnc_port / root_pw

        stdin = vm.fix_json(deploy=deploy, resize=resize, recreate=recreate).dump()
        meta = {
            'output': {'returncode': 'returncode', 'stderr': 'message', 'stdout': 'json'},
            'replace_stderr': ((vm.uuid, vm.hostname),),
            'msg': msg,
            'vm_uuid': vm.uuid,
            'apiview': apiview
        }
        callback = ('api.vm.base.tasks.vm_create_cb', {'vm_uuid': vm.uuid})
        err = True

        try:
            # Possible node_image import task which will block this task on node worker
            block_key = self.node_image_import(vm.node, vm.json_get_disks())
            logger.debug('Creating new VM %s on node %s with json: """%s"""', vm, vm.node, stdin)
            tid, err = execute(request, vm.owner.id, cmd, stdin=stdin, meta=meta, expires=VM_VM_EXPIRES, lock=self.lock,
                               callback=callback, queue=vm.node.slow_queue, block_key=block_key)

            if err:
                return FailureTaskResponse(request, err, vm=vm)
            else:
                # Inform user about creating
                vm_status_changed(tid, vm, vm.CREATING, save_state=False)
                return TaskResponse(request, tid, msg=msg, vm=vm, api_view=apiview, data=self.data)
        finally:
            if err:  # Revert old status
                vm.status = old_status
                vm.save_status()
Exemple #24
0
    def put(self):
        if 'note' in self.data:  # Changing backup note instead of restore (not logging!)
            return self.save_note()

        ser = BackupRestoreSerializer(data=self.data)
        if not ser.is_valid():
            return FailureTaskResponse(self.request, ser.errors)

        self._check_bkp()
        self._check_bkp_node()

        # Prepare vm for restore
        request, bkp = self.request, self.bkp

        vm = get_vm(request,
                    ser.data['target_hostname_or_uuid'],
                    exists_ok=True,
                    noexists_fail=True,
                    check_node_status=None)

        if vm.node.status not in vm.node.STATUS_OPERATIONAL:
            raise NodeIsNotOperational

        if vm.locked:
            raise VmIsLocked

        if not vm.has_compatible_brand(bkp.vm_brand):
            raise PreconditionRequired('VM brand mismatch')

        disk_id, real_disk_id, zfs_filesystem = get_disk_id(
            request, vm, self.data, key='target_disk_id', default=None)
        tgt_disk = vm.json_active_get_disks()[disk_id - 1]

        if tgt_disk['size'] != bkp.disk_size:
            raise PreconditionRequired('Disk size mismatch')

        target_ns = vm.get_node_storage(real_disk_id)
        # The backup is first restored to a temporary dataset, so it is required to have as much free space
        # as the backup size (which we don't have -> so we use the backup disk size [pessimism])
        if bkp.disk_size > target_ns.storage.size_free:
            raise PreconditionRequired(
                'Not enough free space on target storage')

        if not ser.data['force'] and Snapshot.objects.only('id').filter(
                vm=vm, disk_id=real_disk_id).exists():
            raise ExpectationFailed('VM has snapshots')

        if vm.status != vm.STOPPED:
            raise VmIsNotOperational(_('VM is not stopped'))

        if vm.tasks:
            raise VmHasPendingTasks

        self.msg = LOG_BKP_UPDATE
        self.obj = vm
        # Cache apiview and detail
        # noinspection PyUnusedLocal
        apiview = self.apiview  # noqa: F841
        # noinspection PyUnusedLocal
        detail = self.detail  # noqa: F841
        self._detail_ += ", target_hostname='%s', target_disk_id=%s" % (
            vm.hostname, disk_id)
        self._apiview_['target_hostname'] = vm.hostname
        self._apiview_['target_disk_id'] = disk_id
        self._apiview_['force'] = ser.data['force']

        if bkp.vm:
            self._apiview_['source_hostname'] = bkp.vm.hostname
        else:
            self._apiview_['source_hostname'] = ''

        vm.set_notready()

        if self.execute(get_backup_cmd('restore',
                                       bkp,
                                       zfs_filesystem=zfs_filesystem,
                                       vm=vm),
                        lock=self.LOCK % (vm.uuid, disk_id)):
            bkp.save_status(bkp.RESTORE)
            return self.task_response

        vm.revert_notready()
        return self.error_response
Exemple #25
0
 def _check_bkp(self, lost_ok=False):
     if not (self.bkp.status == Backup.OK or
             (lost_ok and self.bkp.status == Backup.LOST)):
         raise ExpectationFailed('VM backup status is not OK')
Exemple #26
0
 def _check_img(self):
     if self.img.status != Image.OK:
         raise ExpectationFailed('Image status is not OK')
Exemple #27
0
 def _check_bkp(self):
     if any(bkp.status != Backup.OK for bkp in self.bkps):
         raise ExpectationFailed('VM backup status is not OK')
Exemple #28
0
    def put(self):
        if 'note' in self.data:
            # Changing snapshot note instead of rollback (not logging)
            return self._update_note()

        request, vm, snap = self.request, self.vm, self.snap

        ser = SnapshotRestoreSerializer(request, vm, data=self.data)
        if not ser.is_valid():
            return FailureTaskResponse(self.request, ser.errors)

        target_vm, target_vm_disk_id = ser.target_vm, ser.target_vm_disk_id

        if vm.node.status not in vm.node.STATUS_OPERATIONAL:
            raise NodeIsNotOperational

        if target_vm.locked:
            raise VmIsLocked

        if target_vm != vm:
            if target_vm.node.status not in target_vm.node.STATUS_OPERATIONAL:
                raise NodeIsNotOperational

            self._check_vm_status(vm=target_vm)

            if not vm.has_compatible_brand(target_vm.brand):
                raise PreconditionRequired('VM brand mismatch')

            source_disk = vm.json_active_get_disks()[self.disk_id - 1]
            target_disk = target_vm.json_active_get_disks()[target_vm_disk_id -
                                                            1]

            if target_disk['size'] != source_disk['size']:
                raise PreconditionRequired('Disk size mismatch')

        self._check_vm_status()
        self._check_snap_status()
        apiview, detail = self._get_apiview_detail()
        apiview['force'] = ser.data['force']

        if target_vm != vm:
            detail += ", source_hostname='%s', target_hostname='%s', target_disk_id=%s" % (
                vm.hostname, target_vm.hostname, target_vm_disk_id)
            apiview['source_hostname'] = vm.hostname
            apiview['target_hostname'] = target_vm.hostname
            apiview['target_disk_id'] = target_vm_disk_id

            if not apiview['force']:
                if Snapshot.objects.only('id').filter(
                        vm=target_vm,
                        disk_id=ser.target_vm_real_disk_id).exists():
                    raise ExpectationFailed('Target VM has snapshots')

        elif not apiview['force']:
            snaplast = Snapshot.objects.only('id').filter(
                vm=vm, disk_id=snap.disk_id).order_by('-id')[0]
            if snap.id != snaplast.id:
                raise ExpectationFailed('VM has more recent snapshots')

        if target_vm.status != vm.STOPPED:
            raise VmIsNotOperational('VM is not stopped')

        if target_vm.tasks:
            raise VmHasPendingTasks

        msg = LOG_SNAP_UPDATE
        lock = self.LOCK % (vm.uuid, snap.disk_id)

        if target_vm == vm:
            cmd = 'esnapshot rollback "%s@%s" 2>&1' % (self.zfs_filesystem,
                                                       snap.zfs_name)
        else:
            cmd = 'esbackup snap-restore -s %s@%s -d %s' % (
                self.zfs_filesystem, snap.zfs_name,
                ser.target_vm_disk_zfs_filesystem)
            if vm.node != target_vm.node:
                cmd += ' -H %s' % target_vm.node.address

            vm.set_notready()

        target_vm.set_notready()
        tid, err = execute(request,
                           target_vm.owner.id,
                           cmd,
                           meta=snap_meta(target_vm, msg, apiview, detail),
                           lock=lock,
                           callback=snap_callback(target_vm, snap),
                           queue=vm.node.fast_queue)

        if err:
            target_vm.revert_notready()
            if vm != target_vm:
                vm.revert_notready()
            return FailureTaskResponse(request, err, vm=target_vm)
        else:
            snap.save_status(snap.ROLLBACK)
            return TaskResponse(request,
                                tid,
                                msg=msg,
                                vm=target_vm,
                                api_view=apiview,
                                detail=detail,
                                data=self.data)
Exemple #29
0
    def _check_snap_status(self, lost_ok=False):
        assert self.zfs_filesystem == self.snap.zfs_filesystem

        if not (self.snap.status == Snapshot.OK or
                (lost_ok and self.snap.status == Snapshot.LOST)):
            raise ExpectationFailed('VM snapshot status is not OK')