Пример #1
0
    def get_current_status(self, force_change=False):
        """Get current VM status"""
        request, vm = self.request, self.vm

        if vm.node.status not in vm.node.STATUS_OPERATIONAL:
            raise NodeIsNotOperational

        apiview = self.apiview
        msg = LOG_STATUS_GET
        cmd = 'vmadm list -p -H -o state uuid=' + vm.uuid
        lock = 'vm_status_current vm:%s' % vm.uuid
        meta = {
            'output': {
                'returncode': 'returncode',
                'stdout': 'stdout',
                'stderr': 'stderr',
                'hostname': vm.hostname
            },
            'msg': msg,
            'vm_uuid': vm.uuid,
            'apiview': apiview,
            'last_status': vm.status,
        }
        callback = ('api.vm.status.tasks.vm_status_current_cb', {
            'vm_uuid': vm.uuid,
            'force_change': force_change
        })

        tid, err = execute(request,
                           vm.owner.id,
                           cmd,
                           meta=meta,
                           callback=callback,
                           queue=vm.node.fast_queue,
                           lock=lock,
                           lock_timeout=5,
                           nolog=True)

        if err:
            return FailureTaskResponse(request, err, vm=vm)
        else:
            return TaskResponse(request,
                                tid,
                                vm=vm,
                                api_view=apiview,
                                data=self.data)  # No msg
Пример #2
0
    def _run_execute(self, msg, cmd, stdin=None, detail_dict=None, block_key=None, **apiview_kwargs):
        request, master_vm, slave_vm, repname = self.request, self.vm, self.slave_vm, self.slave_vm.name

        if detail_dict is None:
            detail_dict = {'repname': repname}

        # Prepare task data
        apiview = {
            'view': self._api_view_,
            'method': request.method,
            'hostname': master_vm.hostname,
            'repname': repname
        }
        apiview.update(apiview_kwargs)
        meta = {
            'output': {'returncode': 'returncode', 'stdout': 'jsons', 'stderr': 'message'},
            'replace_stdout': ((master_vm.uuid, master_vm.hostname), (slave_vm.uuid, repname)),
            'msg': msg,
            'vm_uuid': master_vm.uuid,
            'slave_vm_uuid': slave_vm.uuid,
            'apiview': apiview,
        }

        lock = 'vm_replica vm:%s' % master_vm.uuid
        callback = (
            'api.vm.replica.tasks.%s_cb' % self._api_view_,
            {'vm_uuid': master_vm.uuid, 'slave_vm_uuid': slave_vm.uuid}
        )
        cmd = cmd.format(
            master_uuid=master_vm.uuid,
            slave_uuid=slave_vm.uuid,
            master_node=master_vm.node.address,
            id=slave_vm.rep_id,
        )

        self._check_slave_vm_node()
        # Execute task
        tid, err = execute(request, master_vm.owner.id, cmd, meta=meta, lock=lock, callback=callback, stdin=stdin,
                           queue=slave_vm.node.fast_queue, block_key=block_key)

        if err:
            return FailureTaskResponse(request, err, vm=master_vm)
        else:
            self._success = True
            return TaskResponse(request, tid, msg=msg, vm=master_vm, api_view=apiview,
                                detail_dict=detail_dict, data=self.data)
Пример #3
0
    def get_current_status(self):
        """Get current VM status"""
        request, vm = self.request, self.vm

        if vm.status not in (Vm.RUNNING, Vm.STOPPED, Vm.STOPPING, Vm.ERROR):
            raise VmIsNotOperational

        if vm.node.status not in vm.node.STATUS_OPERATIONAL:
            raise NodeIsNotOperational

        apiview = self.apiview
        msg = LOG_STATUS_GET
        cmd = 'vmadm list -p -H -o state,zoneid uuid=' + vm.uuid
        meta = {
            'output': {
                'returncode': 'returncode',
                'stdout': 'stdout',
                'stderr': 'stderr',
                'hostname': vm.hostname
            },
            'msg': msg,
            'vm_uuid': vm.uuid,
            'apiview': apiview,
            'last_status': vm.status,
        }
        callback = ('api.vm.status.tasks.vm_status_current_cb', {
            'vm_uuid': vm.uuid
        })

        tid, err = execute(request,
                           vm.owner.id,
                           cmd,
                           meta=meta,
                           callback=callback,
                           queue=vm.node.fast_queue,
                           nolog=True)

        if err:
            return FailureTaskResponse(request, err, vm=vm)
        else:
            return TaskResponse(request,
                                tid,
                                vm=vm,
                                api_view=apiview,
                                data=self.data)  # No msg
Пример #4
0
    def delete(self):
        request, vm = self.request, self.vm

        # only admin
        if not (request.user and request.user.is_admin(request)):
            raise PermissionDenied

        if vm.uuid == ImageVm.get_uuid():
            raise VmIsLocked('VM is image server')

        if vm.locked:
            raise VmIsLocked

        if vm.status not in (vm.STOPPED, vm.FROZEN):
            raise VmIsNotOperational('VM is not stopped')

        if vm.tasks:
            raise VmHasPendingTasks

        apiview = self.apiview
        msg = LOG_VM_DELETE
        cmd = 'vmadm delete ' + vm.uuid
        meta = {
            'output': {'returncode': 'returncode', 'stderr': 'message'},
            'replace_text': ((vm.uuid, vm.hostname),),
            'msg': msg, 'vm_uuid': vm.uuid, 'apiview': apiview
        }
        callback = ('api.vm.base.tasks.vm_delete_cb', {'vm_uuid': vm.uuid})

        logger.debug('Deleting VM %s from compute node', vm)

        err = True
        vm.set_notready()

        try:
            tid, err = execute(request, vm.owner.id, cmd, meta=meta, lock=self.lock, expires=VM_VM_EXPIRES,
                               callback=callback, queue=vm.node.slow_queue)

            if err:
                return FailureTaskResponse(request, err, vm=vm)
            else:
                return TaskResponse(request, tid, msg=msg, vm=vm, api_view=apiview, data=self.data)
        finally:
            if err:
                vm.revert_notready()
Пример #5
0
def run_node_img_sources_sync(node,
                              new_img_sources=None,
                              node_img_sources=None):
    """
    Update imgadm sources on compute node.
    Always called by node_sysinfo_cb after the sysinfo data is processed (even if no node data is changed).
    """
    if new_img_sources is None:
        image_vm = ImageVm()
        new_img_sources = image_vm.sources

    if node_img_sources is not None and new_img_sources == node_img_sources:
        logger.info(
            'Image sources already synced for node %s - skipping update', node)
        return

    logger.warn(
        'Image sources are not synchronized on node %s - creating imgadm sources synchronization task',
        node)
    stdin = ImageVm.get_imgadm_conf(new_img_sources).dump()
    cmd = 'cat /dev/stdin > /var/imgadm/imgadm.conf'
    lock = 'node %s imgadm_sources' % node.uuid

    tid, err = execute(ERIGONES_TASK_USER,
                       None,
                       cmd,
                       stdin=stdin,
                       callback=False,
                       lock=lock,
                       queue=node.fast_queue,
                       expires=180,
                       nolog=True,
                       tg=TG_DC_UNBOUND,
                       ping_worker=False,
                       check_user_tasks=False)
    if err:
        logger.error(
            'Got error (%s) when running task %s for updating imgadm sources on node %s',
            err, tid, node)
    else:
        logger.info('Created task %s for updating imgadm sources on node %s',
                    tid, node)
Пример #6
0
def run_node_authorized_keys_sync():
    """
    Create and update authorized_keys on every compute node.
    """
    dc1_settings = DefaultDc().settings  # erigones.conf.settings

    if not dc1_settings.VMS_NODE_SSH_KEYS_SYNC:
        logger.warn('Node authorized_keys synchronization is disabled!')
        return

    nodes = Node.objects.all().order_by('hostname')
    # Create one authorized_keys list
    authorized_keys = [node.sshkey for node in nodes if node.sshkey]

    # Add user specified compute node SSH keys
    authorized_keys.extend(dc1_settings.VMS_NODE_SSH_KEYS_DEFAULT)

    # Save the authorized_keys file on every compute node to persistent /usbkey/config.inc/ and /root/.ssh locations
    files = '/usbkey/config.inc/authorized_keys /root/.ssh/authorized_keys'
    cmd = 'tee %s; chmod 640 %s' % (files, files)
    stdin = '\n'.join(authorized_keys)

    for node in nodes:
        if node.authorized_keys == stdin:
            logger.info('authorized_keys already synced for node %s - skipping update', node)
            continue

        # We update authorized_keys only on online nodes
        # But we will also run this whenever node status is changed to online
        if not node.is_online():
            logger.warn('Excluding node %s from updating authorized_keys because it is not in online state', node)
            continue

        lock = 'node %s authorized_keys' % node.uuid
        cb = ('api.node.sshkey.tasks.node_authorized_keys_sync_cb', {'node_uuid': node.uuid})
        tid, err = execute(ERIGONES_TASK_USER, None, cmd, stdin=stdin, callback=cb, lock=lock, queue=node.fast_queue,
                           expires=180, nolog=True, tg=TG_DC_UNBOUND, ping_worker=False, check_user_tasks=False)
        if err:
            logger.error('Got error (%s) when running task %s for updating authorized_keys on node %s', err, tid, node)
        else:
            logger.info('Created task %s for updating authorized_keys on node %s', tid, node)
Пример #7
0
    def delete(self):
        self._check_vm_status()
        self._check_snap_status(lost_ok=True)
        request, vm, snap = self.request, self.vm, self.snap
        apiview, detail = self._get_apiview_detail()
        msg = LOG_SNAP_DELETE

        if snap.status == Snapshot.LOST:
            snap.delete()
            res = {'message': 'Snapshot successfully deleted from DB'}
            return SuccessTaskResponse(request,
                                       res,
                                       msg=msg,
                                       vm=vm,
                                       detail=detail)

        SnapshotSerializer(request, snap)
        lock = self.LOCK % (vm.uuid, snap.disk_id)
        cmd = 'esnapshot destroy "%s@%s" 2>&1' % (self.zfs_filesystem,
                                                  snap.zfs_name)
        tid, err = execute(request,
                           vm.owner.id,
                           cmd,
                           meta=snap_meta(vm, msg, apiview, detail),
                           lock=lock,
                           callback=snap_callback(vm, snap),
                           queue=vm.node.fast_queue,
                           tt=self.tt)

        if err:
            return FailureTaskResponse(request, err, vm=vm)
        else:
            snap.save_status(snap.PENDING)
            return TaskResponse(request,
                                tid,
                                msg=msg,
                                vm=vm,
                                api_view=apiview,
                                detail=detail,
                                data=self.data)
Пример #8
0
    def post(self):
        request, vm = self.request, self.vm

        if not self.vm.is_kvm():
            raise OperationNotSupported

        if vm.status not in (vm.RUNNING, vm.STOPPING):
            raise VmIsNotOperational

        apiview = {'view': 'vm_screenshot', 'method': request.method, 'hostname': vm.hostname}
        cmd = 'vmadm sysrq %s nmi >&2 && sleep 0.5 && vmadm sysrq %s screenshot >&2 && \
cat /%s/%s/root/tmp/vm.ppm' % (vm.uuid, vm.uuid, vm.zpool, vm.uuid)
        lock = 'vm_screenshot vm:%s' % vm.uuid
        meta = {'output': {'returncode': 'returncode', 'stderr': 'message', 'stdout': 'image'},
                'replace_stderr': ((vm.uuid, vm.hostname),),
                'encode_stdout': True, 'compress_stdout': True, 'apiview': apiview}
        callback = ('api.vm.other.tasks.vm_screenshot_cb', {'vm_uuid': vm.uuid})

        tid, err = execute(request, vm.owner.id, cmd, meta=meta, lock=lock, callback=callback, queue=vm.node.fast_queue)
        if err:
            return FailureTaskResponse(request, err, vm=vm)
        else:
            return TaskResponse(request, tid, vm=vm, api_view=apiview, data=self.data)  # No msg
Пример #9
0
def vm_deploy(vm, force_stop=False):
    """
    Internal API call used for finishing VM deploy;
    Actually cleaning the json and starting the VM.
    """
    if force_stop:  # VM is running without OS -> stop
        cmd = 'vmadm stop %s -F >/dev/null 2>/dev/null; vmadm get %s 2>/dev/null' % (
            vm.uuid, vm.uuid)
    else:  # VM is stopped and deployed -> start
        cmd = 'vmadm start %s >/dev/null 2>/dev/null; vmadm get %s 2>/dev/null' % (
            vm.uuid, vm.uuid)

    msg = 'Deploy server'
    lock = 'vmadm deploy ' + vm.uuid
    meta = {
        'output': {
            'returncode': 'returncode',
            'stderr': 'message',
            'stdout': 'json'
        },
        'replace_stderr': ((vm.uuid, vm.hostname), ),
        'msg': msg,
        'vm_uuid': vm.uuid
    }
    callback = ('api.vm.base.tasks.vm_deploy_cb', {'vm_uuid': vm.uuid})

    return execute(ERIGONES_TASK_USER,
                   None,
                   cmd,
                   meta=meta,
                   lock=lock,
                   callback=callback,
                   queue=vm.node.fast_queue,
                   nolog=True,
                   ping_worker=False,
                   check_user_tasks=False)
Пример #10
0
    def put(self):
        request, vm = self.request, self.vm

        # only admin
        if not (request.user and request.user.is_admin(request)):
            raise PermissionDenied

        node = vm.node
        apiview = self.apiview
        apiview['force'] = bool(ForceSerializer(data=self.data, default=False))
        queue = vm.node.fast_queue
        new_node_uuid = None
        detail_dict = {}

        if vm.status not in (vm.RUNNING, vm.STOPPED):
            raise VmIsNotOperational('VM is not stopped or running')

        if apiview['force']:
            detail_dict['force'] = True
            # final cmd and empty stdin
            cmd = 'vmadm get %s 2>/dev/null' % vm.uuid
            stdin = None
            block_key = None
            node_param = self.data.get('node')

            if node_param:
                if not request.user.is_staff:
                    raise PermissionDenied

                node = get_node(request, node_param, dc=request.dc, exists_ok=True, noexists_fail=True)

                if node.hostname == vm.node.hostname:
                    raise InvalidInput('VM already has the requested node set in DB')

                apiview['node'] = detail_dict['node'] = node.hostname
                queue = node.fast_queue
                new_node_uuid = node.uuid

        elif vm.json_changed():
            if vm.locked:
                raise VmIsLocked

            json_update = vm.json_update()
            self.check_update(json_update)

            if (vm.json_disks_changed() or vm.json_nics_changed()) and vm.tasks:
                raise VmHasPendingTasks

            # create json suitable for update
            stdin, cmd1 = self.fix_update(json_update)
            self.validate_update(vm, stdin, cmd1)
            stdin = stdin.dump()

            # final cmd
            cmd = cmd1 + 'vmadm update %s >&2; e=$?; vmadm get %s 2>/dev/null; exit $e' % (vm.uuid, vm.uuid)

            # Possible node_image import task which will block this task on node worker
            block_key = self.node_image_import(vm.node, json_update.get('add_disks', []))

        else:  # JSON unchanged and not force
            detail = 'Successfully updated VM %s (locally)' % vm.hostname
            res = SuccessTaskResponse(request, detail, msg=LOG_VM_UPDATE, vm=vm, detail=detail)
            vm_updated.send(TaskID(res.data.get('task_id'), request=request), vm=vm)  # Signal!

            return res

        # Check compute node status after we know which compute node the task is going to be run on
        # The internal vm.node.status checking is disabled in get_vm() in __init__
        if node.status != node.ONLINE:
            raise NodeIsNotOperational

        msg = LOG_VM_UPDATE
        meta = {
            'output': {'returncode': 'returncode', 'stderr': 'message', 'stdout': 'json'},
            'replace_stderr': ((vm.uuid, vm.hostname),), 'msg': msg, 'vm_uuid': vm.uuid, 'apiview': apiview
        }
        callback = ('api.vm.base.tasks.vm_update_cb', {'vm_uuid': vm.uuid, 'new_node_uuid': new_node_uuid})

        logger.debug('Updating VM %s with json: """%s"""', vm, stdin)

        err = True
        vm.set_notready()

        try:
            tid, err = execute(request, vm.owner.id, cmd, stdin=stdin, meta=meta, lock=self.lock, callback=callback,
                               queue=queue, block_key=block_key)

            if err:
                return FailureTaskResponse(request, err, vm=vm)
            else:
                return TaskResponse(request, tid, msg=msg, vm=vm, api_view=apiview, data=self.data,
                                    detail_dict=detail_dict)
        finally:
            if err:
                vm.revert_notready()
Пример #11
0
    def put(self):
        request, vm = self.request, self.vm

        # only admin
        if not (request.user and request.user.is_admin(request)):
            raise PermissionDenied

        apiview = self.apiview
        apiview['force'] = bool(ForceSerializer(data=self.data, default=False))

        if vm.status not in (vm.RUNNING, vm.STOPPED):
            raise VmIsNotOperational('VM is not stopped or running')

        if apiview['force']:
            # final cmd and empty stdin
            cmd = 'vmadm get %s 2>/dev/null' % vm.uuid
            stdin = None
            block_key = None

        elif vm.json_changed():
            if vm.locked:
                raise VmIsLocked

            json_update = vm.json_update()
            self.check_update(json_update)

            if (vm.json_disks_changed() or vm.json_nics_changed()) and vm.tasks:
                raise VmHasPendingTasks

            # create json suitable for update
            stdin, cmd1 = self.fix_update(json_update)
            stdin = stdin.dump()

            # cmd = zfs set... >&2;
            if cmd1 and vm.snapshot_set.exists():
                raise ExpectationFailed('VM has snapshots')

            # final cmd
            cmd = cmd1 + 'vmadm update %s >&2; e=$?; vmadm get %s 2>/dev/null; exit $e' % (vm.uuid, vm.uuid)

            # Possible node_image import task which will block this task on node worker
            block_key = self.node_image_import(vm.node, json_update.get('add_disks', []))

        else:  # JSON unchanged and not force
            detail = 'Successfully updated VM %s (locally)' % vm.hostname
            res = SuccessTaskResponse(request, detail, msg=LOG_VM_UPDATE, vm=vm, detail=detail)
            vm_updated.send(TaskID(res.data.get('task_id'), request=request), vm=vm)  # Signal!

            return res

        msg = LOG_VM_UPDATE
        meta = {
            'output': {'returncode': 'returncode', 'stderr': 'message', 'stdout': 'json'},
            'replace_stderr': ((vm.uuid, vm.hostname),), 'msg': msg, 'vm_uuid': vm.uuid, 'apiview': apiview
        }
        callback = ('api.vm.base.tasks.vm_update_cb', {'vm_uuid': vm.uuid})

        logger.debug('Updating VM %s with json: """%s"""', vm, stdin)

        err = True
        vm.set_notready()

        try:
            tid, err = execute(request, vm.owner.id, cmd, stdin=stdin, meta=meta, lock=self.lock, callback=callback,
                               queue=vm.node.fast_queue, block_key=block_key)

            if err:
                return FailureTaskResponse(request, err, vm=vm)
            else:
                return TaskResponse(request, tid, msg=msg, vm=vm, api_view=apiview, data=self.data)
        finally:
            if err:
                vm.revert_notready()
Пример #12
0
    def post(self):
        request, vm = self.request, self.vm
        ser = VmCreateSerializer(data=self.data)

        if not ser.is_valid():
            return FailureTaskResponse(request, ser.errors, vm=vm)

        if not vm.is_kvm():
            if not (vm.dc.settings.VMS_VM_SSH_KEYS_DEFAULT or vm.owner.usersshkey_set.exists()):
                raise PreconditionRequired('VM owner has no SSH keys available')

        apiview = self.apiview
        # noinspection PyTypeChecker
        cmd = 'vmadm create >&2; e=$? %s; vmadm get %s 2>/dev/null; vmadm start %s >&2; exit $e' % (
            self.fix_create(vm), vm.uuid, vm.uuid)

        recreate = apiview['recreate'] = ser.data['recreate']
        # noinspection PyAugmentAssignment
        if recreate:
            # recreate should be available to every vm owner
            if not (request.user and request.user.is_authenticated()):
                raise PermissionDenied

            if vm.locked:
                raise VmIsLocked

            if vm.status != vm.STOPPED:
                raise VmIsNotOperational('VM is not stopped')

            if not ser.data['force']:
                raise ExpectationFailed('Are you sure?')

            msg = LOG_VM_RECREATE
            # noinspection PyAugmentAssignment
            cmd = 'vmadm delete ' + vm.uuid + ' >&2 && sleep 1; ' + cmd

        elif vm.status == vm.NOTCREATED:
            # only admin
            if not (request.user and request.user.is_admin(request)):
                raise PermissionDenied

            if not vm.node:  # we need to find a node for this vm now
                logger.debug('VM %s has no compute node defined. Choosing node automatically', vm)
                VmDefineView(request).choose_node(vm)
                logger.info('New compute node %s for VM %s was chosen automatically.', vm.node, vm)

            msg = LOG_VM_CREATE

        else:
            raise VmIsNotOperational('VM is already created')

        # Check boot flag (KVM) or disk image (OS) (bug #chili-418)
        if not vm.is_bootable():
            raise PreconditionRequired('VM has no bootable disk')

        if vm.tasks:
            raise VmHasPendingTasks

        old_status = vm.status
        deploy = apiview['deploy'] = vm.is_deploy_needed()
        resize = apiview['resize'] = vm.is_resize_needed()

        if not vm.is_blank():
            vm.set_root_pw()

        # Set new status also for blank VM (where deployment is not needed)
        # This status will be changed in vm_status_event_cb (if everything goes well).
        vm.status = vm.CREATING
        vm.save()  # save status / node / vnc_port / root_pw

        stdin = vm.fix_json(deploy=deploy, resize=resize, recreate=recreate).dump()
        meta = {
            'output': {'returncode': 'returncode', 'stderr': 'message', 'stdout': 'json'},
            'replace_stderr': ((vm.uuid, vm.hostname),),
            'msg': msg,
            'vm_uuid': vm.uuid,
            'apiview': apiview
        }
        callback = ('api.vm.base.tasks.vm_create_cb', {'vm_uuid': vm.uuid})
        err = True

        try:
            # Possible node_image import task which will block this task on node worker
            block_key = self.node_image_import(vm.node, vm.json_get_disks())
            logger.debug('Creating new VM %s on node %s with json: """%s"""', vm, vm.node, stdin)
            tid, err = execute(request, vm.owner.id, cmd, stdin=stdin, meta=meta, expires=VM_VM_EXPIRES, lock=self.lock,
                               callback=callback, queue=vm.node.slow_queue, block_key=block_key)

            if err:
                return FailureTaskResponse(request, err, vm=vm)
            else:
                # Inform user about creating
                vm_status_changed(tid, vm, vm.CREATING, save_state=False)
                return TaskResponse(request, tid, msg=msg, vm=vm, api_view=apiview, data=self.data)
        finally:
            if err:  # Revert old status
                vm.status = old_status
                vm.save_status()
Пример #13
0
    def put(self):
        request, vm, action = self.request, self.vm, self.action

        # Cannot change status unless the VM is created on node
        if vm.status not in self.statuses:
            raise VmIsNotOperational

        if action not in self.actions:
            raise ExpectationFailed('Bad action')

        apiview = self.apiview
        f_ser = VmStatusFreezeSerializer(data=self.data)

        if f_ser.is_valid():
            freeze = apiview['freeze'] = f_ser.data['freeze']
            unfreeze = apiview['unfreeze'] = f_ser.data['unfreeze']
        else:
            return FailureTaskResponse(request, f_ser.errors, vm=vm)

        if ((action == 'start' and vm.status == Vm.STOPPED and not freeze) or
            (action == 'reboot' and vm.status == Vm.RUNNING and not freeze) or
            (action == 'stop' and vm.status in (Vm.STOPPING, Vm.RUNNING))):
            pass

        elif action == 'stop' and vm.status == Vm.STOPPED and freeze:
            if not request.user.is_admin(request):
                raise PermissionDenied

            tid = task_id_from_request(request,
                                       owner_id=vm.owner.id,
                                       dummy=True)
            vm_status_changed(tid, vm, vm.FROZEN, save_state=True)
            res = {
                'message':
                'VM %s is already stopped. Changing status to frozen.' %
                vm.hostname
            }

            return SuccessTaskResponse(request, res, task_id=tid, vm=vm)

        elif action == 'stop' and vm.status == Vm.FROZEN and unfreeze:
            if not request.user.is_admin(request):
                raise PermissionDenied

            tid = task_id_from_request(request,
                                       owner_id=vm.owner.id,
                                       dummy=True)
            vm_status_changed(tid, vm, vm.STOPPED, save_state=True)
            res = {
                'message': 'Removing frozen status for VM %s.' % vm.hostname
            }

            return SuccessTaskResponse(request, res, task_id=tid, vm=vm)

        else:
            raise ExpectationFailed('Bad action')

        dc_settings = request.dc.settings

        if action in ('stop',
                      'reboot') and vm.uuid in dc_settings.VMS_NO_SHUTDOWN:
            raise PreconditionRequired('Internal VM can\'t be stopped')

        lock = 'vm_status vm:%s' % vm.uuid
        stdin = None
        apiview['update'] = False
        transition_to_stopping = False

        if action == 'start':
            msg = LOG_START
            ser = VmStatusActionIsoSerializer(request, vm, data=self.data)

            if not ser.is_valid():
                return FailureTaskResponse(request, ser.errors, vm=vm)

            if ser.data and ser.iso:
                if not request.user.is_admin(request) and vm.is_installed() and \
                        (ser.iso.name != dc_settings.VMS_ISO_RESCUECD):
                    raise PreconditionRequired('VM is not installed')

                msg = LOG_START_ISO
                iso = ser.iso
                cmd = self._start_cmd(iso=iso,
                                      iso2=ser.iso2,
                                      once=ser.data['cdimage_once'])
            else:
                iso = None
                cmd = self._start_cmd()

            ser_update = VmStatusUpdateJSONSerializer(data=self.data)

            if ser_update.is_valid():
                if vm.json_changed():
                    apiview['update'] = ser_update.data['update']
                    logger.info('VM %s json != json_active', vm)

                    if apiview['update']:
                        from api.vm.base.vm_manage import VmManage
                        stdin, os_cmd = VmManage.fix_update(vm.json_update())
                        stdin = stdin.dump()

                        if os_cmd:  # Dangerous, explicit update needed
                            # TODO: fix in gui
                            raise PreconditionRequired(
                                'VM must be updated first')

                        if iso:
                            msg = LOG_START_UPDATE_ISO
                        else:
                            msg = LOG_START_UPDATE

                        cmd_update = 'vmadm update %s >&2; e=$?; vmadm get %s 2>/dev/null; ' % (
                            vm.uuid, vm.uuid)
                        cmd = cmd_update + cmd + '; exit $e'
                        # logger.info('VM %s json_active is going to be updated with json """%s"""', vm, stdin)
                    else:
                        logger.warning('VM %s json_active update disabled', vm)

            else:
                return FailureTaskResponse(request, ser_update.errors, vm=vm)

        else:
            force = ForceSerializer(data=self.data, default=False).is_true()
            cmd = self._action_cmd(action, force=force)

            if action == 'reboot':
                msg = LOG_REBOOT
            else:
                msg = LOG_STOP

            if force:
                apiview['force'] = True

                if action == 'reboot':
                    msg = LOG_REBOOT_FORCE
                else:
                    lock += ' force'
                    msg = LOG_STOP_FORCE

            elif vm.status == Vm.STOPPING:
                raise VmIsNotOperational(
                    'VM is already stopping; try to use force')
            else:
                transition_to_stopping = True

        meta = {
            'output': {
                'returncode': 'returncode',
                'stderr': 'message',
                'stdout': 'json'
            },
            'replace_stderr': ((vm.uuid, vm.hostname), ),
            'detail': self.detail,
            'msg': msg,
            'vm_uuid': vm.uuid,
            'apiview': apiview,
            'last_status': vm.status,
        }
        callback = ('api.vm.status.tasks.vm_status_cb', {'vm_uuid': vm.uuid})

        tid, err = execute(request,
                           vm.owner.id,
                           cmd,
                           stdin=stdin,
                           meta=meta,
                           lock=lock,
                           callback=callback,
                           queue=vm.node.fast_queue)

        if err:
            return FailureTaskResponse(request, err, vm=vm)
        else:
            if transition_to_stopping:
                vm.save_status(Vm.STOPPING)

            return TaskResponse(request,
                                tid,
                                msg=msg,
                                vm=vm,
                                api_view=apiview,
                                detail=self.detail,
                                data=self.data,
                                api_data={
                                    'status': vm.status,
                                    'status_display': vm.status_display()
                                })
Пример #14
0
    def put(self):
        assert self.request.dc.is_default()

        ser = UpdateSerializer(self.request, data=self.data)

        if not ser.is_valid():
            return FailureTaskResponse(self.request,
                                       ser.errors,
                                       dc_bound=False)

        node = self.node
        version = ser.data['version']
        key = ser.data.get('key')
        cert = ser.data.get('cert')
        del node.system_version  # Request latest version in next command
        node_version = node.system_version

        if not (isinstance(node_version, text_type) and node_version):
            raise NodeIsNotOperational(
                'Node version information could not be retrieved')

        node_version = node_version.split(':')[-1]  # remove edition prefix

        if version == ('v' + node_version) and not ser.data.get('force'):
            raise PreconditionRequired('Node is already up-to-date')

        if node.status != node.OFFLINE:
            raise NodeIsNotOperational(
                'Unable to perform update on node that is not in maintenance state'
            )

        if node_version.startswith('2.'):
            # Old-style (pre 3.0) update mechanism
            return self._update_v2(version, key=key, cert=cert)

        # Upload key and cert and get command array
        worker = node.worker(Q_FAST)
        update_cmd = worker_command('system_update_command',
                                    worker,
                                    version=version,
                                    key=key,
                                    cert=cert,
                                    force=ser.data.get('force'),
                                    timeout=10)

        if update_cmd is None:
            raise GatewayTimeout('Node worker is not responding')

        if not isinstance(update_cmd, list):
            raise PreconditionRequired(
                'Node update command could be retrieved')

        msg = LOG_SYSTEM_UPDATE
        _apiview_ = {
            'view': 'system_node_update',
            'method': self.request.method,
            'hostname': node.hostname,
            'version': version,
        }
        meta = {
            'apiview': _apiview_,
            'msg': msg,
            'node_uuid': node.uuid,
            'output': {
                'returncode': 'returncode',
                'stdout': 'message'
            },
            'check_returncode': True,
        }
        lock = self.LOCK % node.hostname
        cmd = '%s 2>&1' % ' '.join(update_cmd)

        tid, err = execute(self.request,
                           node.owner.id,
                           cmd,
                           meta=meta,
                           lock=lock,
                           queue=node.fast_queue,
                           tg=TG_DC_UNBOUND)

        if err:
            return FailureTaskResponse(self.request, err, dc_bound=False)
        else:
            return TaskResponse(self.request,
                                tid,
                                msg=msg,
                                obj=node,
                                api_view=_apiview_,
                                data=self.data,
                                dc_bound=False,
                                detail_dict=ser.detail_dict(force_full=True))
Пример #15
0
    def put(self):
        request, vm = self.request, self.vm

        if vm.locked:
            raise VmIsLocked

        if vm.status not in (vm.STOPPED, vm.RUNNING):
            raise VmIsNotOperational('VM is not stopped or running')

        if vm.json_changed():
            raise PreconditionRequired(
                'VM definition has changed; Update first')

        ser = VmMigrateSerializer(request, vm, data=self.data)

        if not ser.is_valid():
            return FailureTaskResponse(request, ser.errors, vm=vm)

        if vm.tasks:
            raise VmHasPendingTasks

        err = True
        ghost_vm = None
        # Set VM to nonready (+"api lock")
        vm.set_notready()

        try:
            # Create a dummy/placeholder VM
            ghost_vm = ser.save_ghost_vm()

            # Possible node_image import task which will block this task on node worker
            block_key = ser.node_image_import()

            # We have a custom detail dict with all necessary api view parameters
            detail_dict = ser.detail_dict()

            # Prepare task data
            apiview = {
                'view': 'vm_migrate',
                'method': request.method,
                'hostname': vm.hostname
            }
            apiview.update(detail_dict)
            lock = 'vm_migrate vm:%s' % vm.uuid
            meta = {
                'output': {
                    'returncode': 'returncode',
                    'stderr': 'message',
                    'stdout': 'json'
                },
                'replace_stderr': ((vm.uuid, vm.hostname), ),
                'msg': LOG_MIGRATE,
                'vm_uuid': vm.uuid,
                'slave_vm_uuid': ghost_vm.uuid,
                'apiview': apiview,
            }
            callback = ('api.vm.migrate.tasks.vm_migrate_cb', {
                'vm_uuid': vm.uuid,
                'slave_vm_uuid': ghost_vm.uuid
            })

            # Execute task
            tid, err = execute(request,
                               vm.owner.id,
                               ser.esmigrate_cmd,
                               meta=meta,
                               lock=lock,
                               callback=callback,
                               queue=vm.node.fast_queue,
                               block_key=block_key)

            if err:  # Error, revert VM status, delete placeholder VM
                return FailureTaskResponse(request, err, vm=vm)
            else:  # Success, task is running
                return TaskResponse(request,
                                    tid,
                                    msg=LOG_MIGRATE,
                                    vm=vm,
                                    api_view=apiview,
                                    detail_dict=detail_dict,
                                    data=self.data)
        finally:
            if err:
                vm.revert_notready()
                if ghost_vm:
                    ghost_vm.delete()
Пример #16
0
    def delete(self):
        """Delete multiple snapshots"""
        # TODO: not documented
        request, data, vm = self.request, self.data, self.vm

        disk_id, real_disk_id, zfs_filesystem = get_disk_id(request, vm, data)
        # Parse data['snapnames']
        snaps, __ = get_snapshots(request, vm, real_disk_id, data)

        self._check_vm_status()

        snaps_lost = snaps.filter(status=Snapshot.LOST)
        msg = LOG_SNAPS_DELETE

        if snaps_lost:
            _result = {'message': 'Snapshots successfully deleted from DB'}
            _detail = "snapnames='%s', disk_id=%s" % (','.join(
                i.name for i in snaps_lost), disk_id)
            snaps_lost.delete()
            res = SuccessTaskResponse(request,
                                      _result,
                                      msg=msg,
                                      vm=vm,
                                      detail=_detail)
            snaps = snaps.filter(
                status=Snapshot.OK)  # Work with OK snapshots from now on

            if not snaps:
                return res

        elif any(i.status != Snapshot.OK for i in snaps):
            raise ExpectationFailed('VM snapshot status is not OK')

        # Task type (a = automatic, e = manual)
        if getattr(request, 'define_id', None):
            tt = TT_AUTO
        else:
            tt = TT_EXEC

        snapnames = [i.name for i in snaps]
        _apiview_ = {
            'view': 'vm_snapshot_list',
            'method': request.method,
            'hostname': vm.hostname,
            'disk_id': disk_id,
            'snapnames': snapnames
        }
        _detail_ = "snapnames='%s', disk_id=%s" % (','.join(snapnames),
                                                   disk_id)

        snap_ids = [snap.id for snap in snaps]
        zfs_names = ','.join([snap.zfs_name for snap in snaps])
        lock = self.LOCK % (vm.uuid, real_disk_id)
        cmd = 'esnapshot destroy "%s@%s" 2>&1' % (zfs_filesystem, zfs_names)
        callback = ('api.vm.snapshot.tasks.vm_snapshot_list_cb', {
            'vm_uuid': vm.uuid,
            'snap_ids': snap_ids
        })

        tid, err = execute(request,
                           vm.owner.id,
                           cmd,
                           meta=snap_meta(vm, msg, _apiview_, _detail_),
                           lock=lock,
                           callback=callback,
                           queue=vm.node.fast_queue,
                           tt=tt)
        if err:
            return FailureTaskResponse(request, err, vm=vm)
        else:
            snaps.update(status=Snapshot.PENDING)
            return TaskResponse(request,
                                tid,
                                msg=msg,
                                vm=vm,
                                api_view=_apiview_,
                                detail=_detail_,
                                data=self.data)
Пример #17
0
def node_overlay_arp_file(task_id,
                          overlay_rule_name,
                          node_exclusive=None,
                          **kwargs):
    """
    Task for generating ARP files for VMs and nodes connected to specific overlay.
    It is called by various signals (see below).
    """
    if node_exclusive:
        # update rules only on a specific compute node
        if node_exclusive.overlay_rules.get(overlay_rule_name,
                                            {}).get('arp_file'):
            overlay_nodes = [node_exclusive]
        else:
            return
    else:
        # list of nodes where the overlay rule is defined and uses the files search plugin
        overlay_nodes = [
            node for node in Node.objects.all()
            if node.overlay_rules.get(overlay_rule_name, {}).get('arp_file')
        ]
    # list of VM NICs (see VNIC namedtuple above) defined over the overlay
    overlay_vnics = list(
        _get_overlay_vm_vnics(
            overlay_rule_name,
            Vm.objects.select_related('node').filter(
                slavevm__isnull=True,
                node__in=overlay_nodes).exclude(status=Vm.NOTCREATED)))
    # Add list of Node VNICs which are defined over the overlay
    overlay_vnics += list(
        _get_overlay_node_vnics(overlay_rule_name, overlay_nodes))

    for node in overlay_nodes:
        # We update arp files only on online nodes
        # But we will also run this whenever node status is changed to online
        if not node.is_online():
            logger.warn(
                'Excluding node %s from updating arp file for overlay "%s" because it is not in online state',
                node, overlay_rule_name)
            continue

        overlay_arp_file = node.overlay_rules[overlay_rule_name]['arp_file']
        arp_table = json.dumps(_get_overlay_arp_table(node, overlay_rule_name,
                                                      overlay_vnics),
                               indent=2)
        cmd = ('cat /dev/stdin > {arp_file} && '
               'chmod 0400 {arp_file} && '
               'chown netadm:netadm {arp_file} && '
               'svcadm restart network/varpd').format(
                   arp_file=overlay_arp_file)
        lock = 'node:{node_uuid} overlay:{overlay_rule_name}'.format(
            node_uuid=node.uuid, overlay_rule_name=overlay_rule_name)
        queue = node.fast_queue

        tid, err = execute(ERIGONES_TASK_USER,
                           None,
                           cmd,
                           stdin=arp_table,
                           callback=False,
                           lock=lock,
                           queue=queue,
                           expires=300,
                           nolog=True,
                           tg=TG_DC_UNBOUND,
                           ping_worker=False,
                           check_user_tasks=False)
        if err:
            logger.error(
                'Got error (%s) when running task %s for updating overlay ARP file %s on node %s',
                err, tid, overlay_arp_file, node)
        else:
            logger.info(
                'Created task %s for updating overlay ARP file %s on node %s',
                tid, overlay_arp_file, node)
Пример #18
0
    def put(self):
        if 'note' in self.data:
            # Changing snapshot note instead of rollback (not logging)
            return self._update_note()

        request, vm, snap = self.request, self.vm, self.snap

        ser = SnapshotRestoreSerializer(request, vm, data=self.data)
        if not ser.is_valid():
            return FailureTaskResponse(self.request, ser.errors)

        target_vm, target_vm_disk_id = ser.target_vm, ser.target_vm_disk_id

        if vm.node.status not in vm.node.STATUS_OPERATIONAL:
            raise NodeIsNotOperational

        if target_vm.locked:
            raise VmIsLocked

        if target_vm != vm:
            if target_vm.node.status not in target_vm.node.STATUS_OPERATIONAL:
                raise NodeIsNotOperational

            self._check_vm_status(vm=target_vm)

            if not vm.has_compatible_brand(target_vm.brand):
                raise PreconditionRequired('VM brand mismatch')

            source_disk = vm.json_active_get_disks()[self.disk_id - 1]
            target_disk = target_vm.json_active_get_disks()[target_vm_disk_id -
                                                            1]

            if target_disk['size'] != source_disk['size']:
                raise PreconditionRequired('Disk size mismatch')

        self._check_vm_status()
        self._check_snap_status()
        apiview, detail = self._get_apiview_detail()
        apiview['force'] = ser.data['force']

        if target_vm != vm:
            detail += ", source_hostname='%s', target_hostname='%s', target_disk_id=%s" % (
                vm.hostname, target_vm.hostname, target_vm_disk_id)
            apiview['source_hostname'] = vm.hostname
            apiview['target_hostname'] = target_vm.hostname
            apiview['target_disk_id'] = target_vm_disk_id

            if not apiview['force']:
                if Snapshot.objects.only('id').filter(
                        vm=target_vm,
                        disk_id=ser.target_vm_real_disk_id).exists():
                    raise ExpectationFailed('Target VM has snapshots')

        elif not apiview['force']:
            snaplast = Snapshot.objects.only('id').filter(
                vm=vm, disk_id=snap.disk_id).order_by('-id')[0]
            if snap.id != snaplast.id:
                raise ExpectationFailed('VM has more recent snapshots')

        if target_vm.status != vm.STOPPED:
            raise VmIsNotOperational('VM is not stopped')

        if target_vm.tasks:
            raise VmHasPendingTasks

        msg = LOG_SNAP_UPDATE
        lock = self.LOCK % (vm.uuid, snap.disk_id)

        if target_vm == vm:
            cmd = 'esnapshot rollback "%s@%s" 2>&1' % (self.zfs_filesystem,
                                                       snap.zfs_name)
        else:
            cmd = 'esbackup snap-restore -s %s@%s -d %s' % (
                self.zfs_filesystem, snap.zfs_name,
                ser.target_vm_disk_zfs_filesystem)
            if vm.node != target_vm.node:
                cmd += ' -H %s' % target_vm.node.address

            vm.set_notready()

        target_vm.set_notready()
        tid, err = execute(request,
                           target_vm.owner.id,
                           cmd,
                           meta=snap_meta(target_vm, msg, apiview, detail),
                           lock=lock,
                           callback=snap_callback(target_vm, snap),
                           queue=vm.node.fast_queue)

        if err:
            target_vm.revert_notready()
            if vm != target_vm:
                vm.revert_notready()
            return FailureTaskResponse(request, err, vm=target_vm)
        else:
            snap.save_status(snap.ROLLBACK)
            return TaskResponse(request,
                                tid,
                                msg=msg,
                                vm=target_vm,
                                api_view=apiview,
                                detail=detail,
                                data=self.data)
Пример #19
0
    def put(self):  # noqa: R701
        request, vm, action = self.request, self.vm, self.action

        # Cannot change status unless the VM is created on node
        if vm.status not in self.statuses and action != 'current':
            raise VmIsNotOperational

        if action not in self.actions:
            raise ExpectationFailed('Bad action')

        apiview = self.apiview
        f_ser = VmStatusFreezeSerializer(data=self.data)

        if f_ser.is_valid():
            freeze = apiview['freeze'] = f_ser.data['freeze']
            unfreeze = apiview['unfreeze'] = f_ser.data['unfreeze']
        else:
            return FailureTaskResponse(request, f_ser.errors, vm=vm)

        if ((action == 'start' and vm.status == Vm.STOPPED and not freeze) or
            (action == 'reboot' and vm.status == Vm.RUNNING and not freeze) or
            (action == 'stop' and vm.status in (Vm.STOPPING, Vm.RUNNING))):
            pass

        elif action == 'stop' and vm.status == Vm.STOPPED and freeze:
            if not request.user.is_admin(request):
                raise PermissionDenied

            tid = task_id_from_request(request,
                                       owner_id=vm.owner.id,
                                       dummy=True)
            vm_status_changed(tid, vm, vm.FROZEN, save_state=True)
            res = {
                'message':
                'VM %s is already stopped. Changing status to frozen.' %
                vm.hostname
            }

            return SuccessTaskResponse(request, res, task_id=tid, vm=vm)

        elif action == 'stop' and vm.status == Vm.FROZEN and unfreeze:
            if not request.user.is_admin(request):
                raise PermissionDenied

            tid = task_id_from_request(request,
                                       owner_id=vm.owner.id,
                                       dummy=True)
            vm_status_changed(tid, vm, vm.STOPPED, save_state=True)
            res = {
                'message': 'Removing frozen status for VM %s.' % vm.hostname
            }

            return SuccessTaskResponse(request, res, task_id=tid, vm=vm)

        elif action == 'current':
            # Limit PUT /current/ action to be Admins and SuperAdmins
            if not request.user.is_admin(request):
                raise PermissionDenied

            if vm.status in self.statuses_force_change_allowed:
                return self.get_current_status(force_change=True)
            elif vm.status in self.stuck_statuses_force_change_allowed:
                if vm.tasks:
                    raise VmHasPendingTasks
                else:
                    return self.get_current_status(force_change=True)
            else:
                raise VmIsNotOperational

        else:
            raise ExpectationFailed('Bad action')

        dc_settings = request.dc.settings

        if action in ('stop',
                      'reboot') and vm.uuid in dc_settings.VMS_NO_SHUTDOWN:
            raise PreconditionRequired('Internal VM can\'t be stopped')

        lock = 'vm_status vm:%s' % vm.uuid
        stdin = None
        apiview['update'] = False
        transition_to_stopping = False

        # The update parameter is used by all actions (start, stop, reboot)
        ser_update = VmStatusUpdateJSONSerializer(data=self.data,
                                                  default=(action
                                                           in ('start',
                                                               'reboot')))

        if not ser_update.is_valid():
            return FailureTaskResponse(request, ser_update.errors, vm=vm)

        if vm.json_changed():
            apiview['update'] = ser_update.data['update']
            logger.info('VM %s json != json_active', vm)

            if not apiview['update']:
                logger.info('VM %s json_active update disabled', vm)

        if action == 'start':
            ser = VmStatusActionIsoSerializer(request, vm, data=self.data)

            if not ser.is_valid():
                return FailureTaskResponse(request, ser.errors, vm=vm)

            if ser.data and ser.iso:
                if not request.user.is_admin(request) and vm.is_installed() and \
                        (ser.iso.name != dc_settings.VMS_ISO_RESCUECD):
                    raise PreconditionRequired('VM is not installed')

                msg = LOG_START_ISO
                iso = ser.iso
                cmd = self._start_cmd(iso=iso,
                                      iso2=ser.iso2,
                                      once=ser.data['cdimage_once'])
            else:
                msg = LOG_START
                iso = None
                cmd = self._start_cmd()

            if apiview['update']:
                if vm.tasks:
                    raise VmHasPendingTasks

                cmd, stdin = self._add_update_cmd(cmd, os_cmd_allowed=False)

                if iso:
                    msg = LOG_START_UPDATE_ISO
                else:
                    msg = LOG_START_UPDATE

        else:
            ser_stop_reboot = VmStatusStopSerializer(request,
                                                     vm,
                                                     data=self.data)

            if not ser_stop_reboot.is_valid():
                return FailureTaskResponse(request,
                                           ser_stop_reboot.errors,
                                           vm=vm)

            update = apiview.get('update',
                                 False)  # VmStatusUpdateJSONSerializer
            force = apiview['force'] = ser_stop_reboot.data.get('force', False)
            timeout = ser_stop_reboot.data.get('timeout', None)

            if not force and timeout:
                apiview['timeout'] = timeout

            if update:
                if vm.tasks:
                    raise VmHasPendingTasks

                # This will always perform a vmadm stop command, followed by a vmadm update command and optionally
                # followed by a vmadm start command (reboot)
                pre_cmd = self._action_cmd('stop',
                                           force=force,
                                           timeout=timeout)

                if action == 'reboot':
                    if force:
                        msg = LOG_REBOOT_FORCE_UPDATE
                    else:
                        msg = LOG_REBOOT_UPDATE

                    post_cmd = self._action_cmd('start')
                else:
                    if force:
                        msg = LOG_STOP_FORCE_UPDATE
                    else:
                        msg = LOG_STOP_UPDATE

                    post_cmd = ''

                cmd, stdin = self._add_update_cmd(post_cmd,
                                                  os_cmd_allowed=True,
                                                  pre_cmd=pre_cmd)
            else:
                cmd = self._action_cmd(action, force=force, timeout=timeout)

                if force:
                    if action == 'reboot':
                        msg = LOG_REBOOT_FORCE
                    else:
                        lock += ' force'
                        msg = LOG_STOP_FORCE
                else:
                    if action == 'reboot':
                        msg = LOG_REBOOT
                    else:
                        msg = LOG_STOP

            if vm.status == Vm.STOPPING:
                if update:
                    raise PreconditionRequired(
                        'Cannot perform update while VM is stopping')
                if not force:
                    raise VmIsNotOperational(
                        'VM is already stopping; try to use force')
            else:
                transition_to_stopping = True

        meta = {
            'output': {
                'returncode': 'returncode',
                'stderr': 'message',
                'stdout': 'json'
            },
            'replace_stderr': ((vm.uuid, vm.hostname), ),
            'detail': self.detail,
            'msg': msg,
            'vm_uuid': vm.uuid,
            'apiview': apiview,
            'last_status': vm.status,
        }
        callback = ('api.vm.status.tasks.vm_status_cb', {'vm_uuid': vm.uuid})

        tid, err = execute(request,
                           vm.owner.id,
                           cmd,
                           stdin=stdin,
                           meta=meta,
                           lock=lock,
                           callback=callback,
                           queue=vm.node.fast_queue)

        if err:
            return FailureTaskResponse(request, err, vm=vm)
        else:
            if transition_to_stopping:
                vm.save_status(Vm.STOPPING)

            return TaskResponse(request,
                                tid,
                                msg=msg,
                                vm=vm,
                                api_view=apiview,
                                detail=self.detail,
                                data=self.data,
                                api_data={
                                    'status': vm.status,
                                    'status_display': vm.status_display()
                                })
Пример #20
0
 def execute(self, *args, **kwargs):
     self.task_id, self.error = execute(self.request, self.obj.owner.id,
                                        *args, **kwargs)
     if self.error:
         return None
     return self.task_id