Ejemplo n.º 1
0
    def delete(self):
        dc, request = self.dc, self.request

        if dc.is_default():
            raise PreconditionRequired(_('Default datacenter cannot be deleted'))
        if dc.dcnode_set.exists():
            raise PreconditionRequired(_('Datacenter has nodes'))  # also "checks" DC backups
        if dc.vm_set.exists():
            raise PreconditionRequired(_('Datacenter has VMs'))
        if dc.backup_set.exists():
            raise PreconditionRequired(_('Datacenter has backups'))  # should be checked by dcnode check above

        dc_id = dc.id
        ser = self.serializer(request, dc)
        dc_bound_objects = dc.get_bound_objects()

        # After deleting a DC the current_dc is automatically set to DefaultDc by the on_delete db field parameter
        ser.object.delete()

        # Remove cached tasklog for this DC (DB tasklog entries will be remove automatically)
        delete_tasklog_cached(dc_id)

        res = SuccessTaskResponse(request, None)  # no msg => won't be logged

        # Every DC-bound object looses their DC => becomes DC-unbound
        task_id = res.data.get('task_id')

        # Update bound virt objects to be DC-unbound after DC removal
        for model, objects in dc_bound_objects.items():
            msg = LOG_VIRT_OBJECT_UPDATE_MESSAGES.get(model, None)
            if objects and msg:
                for obj in objects:
                    if obj.dc_bound:
                        # noinspection PyUnresolvedReferences
                        remove_dc_binding_virt_object(task_id, msg, obj, user=request.user, dc_id=DefaultDc.id)

        return res
Ejemplo n.º 2
0
    def put(self):
        """Update [PUT] image manifest in DB and on image server if needed.

        The task group is always DC unbound, but the current DC depends on the dc_bound flag:
            - dc_bound=False:   task DC is default DC
            - dc_bound=[DC]:    task DC is dc_bound DC
        The callback is responsible for restoring the active manifest if something goes wrong.
        """
        img = self.img
        ser = ImageSerializer(self.request, img, self.data, partial=True)
        img_backup = ser.create_img_backup()

        if not ser.is_valid():
            return FailureTaskResponse(self.request, ser.errors, dc_bound=self.dc_bound)

        # Preliminary checks
        self._run_checks()  # This sets self.img_server to ImageVm()
        ser_data = ser.data

        if ser.update_manifest:
            # Rebuild manifest
            img.manifest = img.build_manifest()

        if self.img_server and ser.update_manifest:
            img.status = Image.PENDING
            img.backup = img_backup
            img.save()

            return self._run_execute(LOG_IMAGE_UPDATE, 'esimg update', stdin=img.manifest.dump(),
                                     recover_on_error=img_backup, detail_dict=ser.detail_dict())
        else:
            # Just save new data
            img.manifest_active = img.manifest
            img.save()

            return SuccessTaskResponse(self.request, ser_data, obj=img, msg=LOG_IMAGE_UPDATE,
                                       detail_dict=ser.detail_dict(), dc_bound=self.dc_bound)
Ejemplo n.º 3
0
    def post(self):
        dc, group = self.dc, self.role

        if group.dc_set.filter(id=dc.id).exists():
            raise ObjectAlreadyExists(model=Role)

        ser = self.serializer(self.request, group)
        group.dc_set.add(dc)
        res = SuccessTaskResponse(self.request,
                                  ser.data,
                                  obj=group,
                                  status=status.HTTP_201_CREATED,
                                  detail_dict=ser.detail_dict(),
                                  msg=LOG_GROUP_ATTACH)
        task_id = res.data.get('task_id')
        connection.on_commit(lambda: group_relationship_changed.send(
            task_id,
            group_name=group.name,  # Signal!
            dc_name=dc.name))
        self._remove_dc_binding(task_id)
        self._remove_user_dc_binding(task_id)
        self._update_affected_users()

        return res
Ejemplo n.º 4
0
    def delete(self):
        self._check_bkp(lost_ok=True)
        self._check_bkp_node()

        bkp = self.bkp
        vm = bkp.vm  # Can be None, because this can be a backup for already deleted VM
        self.msg = LOG_BKP_DELETE

        if vm:
            self._check_vm(vm)
            self.obj = vm
        else:
            self.obj = bkp.node

        if bkp.status == Backup.LOST:
            bkp.delete()
            res = {'message': 'Backup successfully deleted from DB'}
            return SuccessTaskResponse(self.request, res, msg=LOG_BKP_DELETE, vm=vm, detail=self._detail())

        if self.execute(get_backup_cmd('delete', bkp), lock=self.LOCK % (bkp.vm_uuid, bkp.disk_id)):
            bkp.save_status(bkp.PENDING)
            return self.task_response

        return self.error_response
Ejemplo n.º 5
0
    def delete(self, many=False):
        record = self.record

        if many:
            assert not self.record_id

            if not record:  # SELECT count(*) from record ???
                raise ObjectNotFound(model=Record)

            msg = LOG_RECORDS_DELETE
            dd = {'records': [r.desc for r in record]}
        else:
            msg = LOG_RECORD_DELETE
            dd = {'record': record.desc}

        record.delete()

        return SuccessTaskResponse(self.request,
                                   None,
                                   obj=self.domain,
                                   msg=msg,
                                   detail_dict=self._fix_detail_dict(dd),
                                   task_id=self.task_id,
                                   dc_bound=False)
Ejemplo n.º 6
0
    def delete(self, ns):
        """Update node-storage"""
        ser = NodeStorageSerializer(self.request, ns)
        node = ns.node

        for vm in node.vm_set.all():
            if ns.zpool in vm.get_used_disk_pools():  # active + current
                raise PreconditionRequired(_('Storage is used by some VMs'))

        if node.is_backup:
            if ns.backup_set.exists():
                raise PreconditionRequired(
                    _('Storage is used by some VM backups'))

        obj = ns.log_list
        owner = ns.storage.owner
        ser.object.delete()  # Will delete Storage in post_delete

        return SuccessTaskResponse(self.request,
                                   None,
                                   obj=obj,
                                   owner=owner,
                                   msg=LOG_NS_DELETE,
                                   dc_bound=False)
Ejemplo n.º 7
0
    def put(self, vm, nic_id, nics, nic, data):
        """Update VM nic definition"""
        ser = VmDefineNicSerializer(self.request,
                                    vm,
                                    nic.copy(),
                                    nic_id=nic_id,
                                    data=data,
                                    partial=True)

        if ser.is_valid():
            nics[nic_id].update(ser.jsondata)
            vm.resolvers = ser.resolvers
            vm.save_nics(nics, monitoring_ip=ser.get_monitoring_ip())
            res = SuccessTaskResponse(self.request,
                                      ser.data,
                                      vm=vm,
                                      detail='nic_id=' + str(nic_id + 1),
                                      detail_dict=ser.detail_dict(),
                                      msg=LOG_NIC_UPDATE)
            ser.update_ip(res.data.get('task_id'))  # Always update ip.vm

            return res

        return FailureTaskResponse(self.request, ser.errors, vm=vm)
Ejemplo n.º 8
0
    def get(self):
        request, data = self.request, self.data

        # Prepare filter dict
        snap_filter = {'zpool': self.ns}
        self.filter_snapshot_vm(snap_filter, data)
        filter_disk_id(None, snap_filter, data)
        filter_snap_type(snap_filter, data)
        filter_snap_define(snap_filter, data)

        # TODO: check indexes
        snapqs = Snapshot.objects.select_related(
            'vm', 'define',
            'zpool').filter(**snap_filter).order_by(*self.order_by)

        if self.full or self.extended:
            if snapqs:
                res = SnapshotSerializer(request, snapqs, many=True).data
            else:
                res = []
        else:
            res = list(snapqs.values_list('name', flat=True))

        return SuccessTaskResponse(request, res, dc_bound=False)
Ejemplo n.º 9
0
    def get(self, many=False):
        if self.extended:
            self.serializer = ExtendedDcNodeSerializer

        if many or not self.hostname:
            if self.full or self.extended:

                if self.dcnode:
                    res = self.serializer(self.request, self.dcnode,
                                          many=True).data
                else:
                    res = []
            else:
                res = list(self.dcnode.values_list('node__hostname',
                                                   flat=True))
        else:
            if self.extended:
                self.dcnode.vms = self.node.vm_set.filter(
                    dc=self.request.dc).count()
                self.dcnode.real_vms = self.node.vm_set.filter(
                    dc=self.request.dc, slavevm__isnull=True).count()
            res = self.serializer(self.request, self.dcnode).data

        return SuccessTaskResponse(self.request, res)
Ejemplo n.º 10
0
    def post(self):
        iso, request = self.iso, self.request

        if not request.user.is_staff:
            self.data.pop(
                'dc_bound', None
            )  # default DC binding cannot be changed when creating object

        iso.owner = request.user  # just a default
        iso.alias = re.sub(r'\.iso\s*$', '', iso.name)  # just a default
        iso.status = Iso.OK  # TODO: status is not used right now
        ser = IsoSerializer(request, iso, data=self.data)

        if not ser.is_valid():
            return FailureTaskResponse(request,
                                       ser.errors,
                                       obj=iso,
                                       dc_bound=False)

        ser.object.save()
        res = SuccessTaskResponse(request,
                                  ser.data,
                                  status=HTTP_201_CREATED,
                                  obj=iso,
                                  dc_bound=False,
                                  detail_dict=ser.detail_dict(),
                                  msg=LOG_ISO_CREATE)

        if iso.dc_bound:
            attach_dc_virt_object(res.data.get('task_id'),
                                  LOG_ISO_ATTACH,
                                  iso,
                                  iso.dc_bound,
                                  user=request.user)

        return res
Ejemplo n.º 11
0
 def get(self):
     return SuccessTaskResponse(self.request,
                                self.repo,
                                dc_bound=self.dc_bound)
Ejemplo n.º 12
0
    def group_modify(self, update=False):
        group = self.group
        request = self.request

        if update:
            # We are deleting users that are not assigned to group any more, so we have to store all of them before
            # deleting because we have to update task log for user so he can see he was removed from group
            original_group_users = set(
                group.user_set.select_related('dc_bound', 'default_dc').all())
        else:
            group.alias = group.name  # just a default
            original_group_users = set()

        ser = self.serializer(request, group, data=self.data, partial=update)

        if not ser.is_valid():
            return FailureTaskResponse(request,
                                       ser.errors,
                                       obj=group,
                                       dc_bound=False)

        ser.save()
        if update:
            msg = LOG_GROUP_UPDATE
            status = HTTP_200_OK
        else:
            msg = LOG_GROUP_CREATE
            status = HTTP_201_CREATED

        connection.on_commit(lambda: group_relationship_changed.send(
            group_name=ser.object.name))
        res = SuccessTaskResponse(request,
                                  ser.data,
                                  status=status,
                                  obj=group,
                                  msg=msg,
                                  detail_dict=ser.detail_dict(),
                                  dc_bound=False)

        # let's get the task_id so we use the same one for each log message
        task_id = res.data.get('task_id')
        removed_users = None

        if group.dc_bound and not update:
            attach_dc_virt_object(res.data.get('task_id'),
                                  LOG_GROUP_ATTACH,
                                  group,
                                  group.dc_bound,
                                  user=request.user)

        if ser.object._users_to_save is not None:
            # Update Users log that are attached to group
            current_users = set(ser.object._users_to_save)
            added_users = current_users - original_group_users
            removed_users = original_group_users - current_users
            affected_users = current_users.symmetric_difference(
                original_group_users)

            # Remove user.dc_bound flag for newly added users if group is attached to multiple DCs or
            #                                                          to one DC that is different from user.dc_bound
            if added_users:
                group_dcs_count = group.dc_set.count()

                if group_dcs_count >= 1:
                    if group_dcs_count == 1:
                        dc = group.dc_set.get()
                    else:
                        dc = None

                    for user in added_users:
                        remove_user_dc_binding(task_id, user, dc=dc)

            # Update Users that were removed from group or added to group
            for user in affected_users:
                detail = "groups='%s'" % ','.join(user.roles.all().values_list(
                    'name', flat=True))
                task_log_success(task_id,
                                 LOG_USER_UPDATE,
                                 obj=user,
                                 owner=user,
                                 update_user_tasks=False,
                                 detail=detail)

        # Permission or users for this group were changed, which may affect the cached list of DC admins for DCs which
        # are attached to this group. So we need to clear the list of admins cached for each affected DC
        if ser.object._permissions_to_save is not None or ser.object._users_to_save is not None:
            for dc in group.dc_set.all():
                User.clear_dc_admin_ids(dc)

            # Users were removed from this group and may loose access to DCs which are attached to this group
            # So we better set all users current_dc to default_dc
            if removed_users:
                for user in removed_users:
                    if not user.is_staff:
                        user.reset_current_dc()

        return res
Ejemplo n.º 13
0
 def log_detail(self):
     return SuccessTaskResponse.dict_to_detail(self)
Ejemplo n.º 14
0
    def delete(self):
        """Delete node definition"""
        node = self.node

        if node.is_head:
            raise PreconditionRequired('Head node cannot be deleted')

        force = bool(ForceSerializer(data=self.data, default=False))

        # Check if node has VMs and backups if not using force
        if force:
            # Fetch data for vm_undefined signal
            vms = [{'vm_uuid': vm.uuid, 'dc': vm.dc, 'zabbix_sync': vm.is_zabbix_sync_active(),
                    'external_zabbix_sync': vm.is_external_zabbix_sync_active()}
                   for vm in node.vm_set.select_related('dc').all()]
        else:
            vms = ()
            # Simulate turning compute and backup flags off
            ser = NodeDefineSerializer(self.request, node, data={'is_backup': False, 'is_compute': False}, partial=True)

            if not ser.is_valid():
                return FailureTaskResponse(self.request, ser.errors, obj=node, dc_bound=False)

        if node.tasks:
            raise NodeHasPendingTasks

        if node.has_related_tasks():
            raise NodeHasPendingTasks('Node has related objects with pending tasks')

        uuid = node.uuid
        hostname = node.hostname
        obj = node.log_list
        owner = node.owner
        queues = node.all_queues

        try:
            ip_address = node.ip_address
        except ObjectDoesNotExist:
            ip_address = None

        # Bypass signal handling for VMs (needed when using force)
        #   Fixes: IntegrityError: insert or update on table "vms_nodestorage"
        post_delete.disconnect(Vm.post_delete, sender=Vm, dispatch_uid='post_delete_vm')

        try:
            node.delete()
        finally:
            post_delete.connect(Vm.post_delete, sender=Vm, dispatch_uid='post_delete_vm')

        res = SuccessTaskResponse(self.request, None, obj=obj, owner=owner, detail_dict={'force': force},
                                  msg=LOG_DEF_DELETE, dc_bound=False)
        task_id = TaskID(res.data.get('task_id'), request=self.request)
        node_deleted.send(task_id, node_uuid=uuid, node_hostname=hostname)  # Signal!

        # Force deletion will delete all node related objects (VMs, backups...)
        for vm in vms:
            vm_undefined.send(task_id, **vm)  # Signal! for every vm on deleted node

        try:
            # Delete DNS records associated with node
            self._delete_dns_records(self.request, task_id, node, hostname, ip_address)

            # Delete celery (amqp) task queues (named after node hostname); fail silently
            self._delete_queues(queues, fail_silently=True)

            # Delete IP address associated with node
            self._delete_ip_address(ip_address)
        except Exception as exc:
            logger.exception(exc)

        return res
Ejemplo n.º 15
0
    def put(self):
        """Update node definition"""
        node = self.node
        ser = NodeDefineSerializer(self.request, node, data=self.data, partial=True)

        if node.tasks:
            raise NodeHasPendingTasks

        if not ser.is_valid():
            return FailureTaskResponse(self.request, ser.errors, obj=node, dc_bound=False)

        if ser.status_changed == Node.OFFLINE and node.has_related_tasks():
            raise NodeHasPendingTasks('Node has related objects with pending tasks')

        # Changing cpu or disk coefficients can lead to negative numbers in node.cpu/ram_free or dc_node.cpu/ram_free
        update_node_resources = ser.update_node_resources

        try:
            with transaction.atomic():
                ser.object.save(update_resources=update_node_resources, clear_cache=ser.clear_cache)

                if update_node_resources:
                    if node.cpu_free < 0 or node.dcnode_set.filter(cpu_free__lt=0).exists():
                        raise IntegrityError('cpu_check')

                    if node.ram_free < 0 or node.dcnode_set.filter(ram_free__lt=0).exists():
                        raise IntegrityError('ram_check')

        except IntegrityError as exc:
            errors = {}
            exc_error = str(exc)
            # ram or cpu constraint was violated on vms_dcnode (can happen when DcNode strategy is set to RESERVED)
            # OR a an exception was raised above
            if 'ram_check' in exc_error:
                errors['ram_coef'] = ser.error_negative_resources
            if 'cpu_check' in exc_error:
                errors['cpu_coef'] = ser.error_negative_resources

            if not errors:
                raise exc

            return FailureTaskResponse(self.request, errors, obj=node, dc_bound=False)

        if update_node_resources:  # cpu_free or ram_free changed
            ser.reload()

        res = SuccessTaskResponse(self.request, ser.data, obj=node, detail_dict=ser.detail_dict(), dc_bound=False,
                                  msg=LOG_DEF_UPDATE)
        task_id = TaskID(res.data.get('task_id'), request=self.request)

        if ser.status_changed:
            node_status_changed.send(task_id, node=node, automatic=False)  # Signal!

            if node.is_online():
                node_online.send(task_id, node=node, automatic=False)  # Signal!
            elif node.is_offline():
                node_offline.send(task_id, node=node)  # Signal!

        if ser.monitoring_changed:
            node_json_changed.send(task_id, node=node)  # Signal!

        return res
Ejemplo n.º 16
0
    def post(self):
        node, dcnode = self.node, self.dcnode
        request, data = self.request, self.data

        # Set defaults for Shared strategy (default)
        try:
            strategy = int(data.get('strategy', DcNode.SHARED))
        except ValueError:
            strategy = DcNode.SHARED
        if strategy == DcNode.SHARED:
            dcnode.cpu = dcnode.ram = dcnode.disk = 0  # Value doesn't matter => will be set in save/update_resources
            data.pop('cpu', None)
            data.pop('ram', None)
            data.pop('disk', None)

        # Used in GUI
        try:
            add_storage = int(data.pop('add_storage', DcNode.NS_ATTACH_NONE))
        except ValueError:
            add_storage = DcNode.NS_ATTACH_NONE

        ser = DcNodeSerializer(request, dcnode, data=data)

        if not ser.is_valid():
            return FailureTaskResponse(request, ser.errors, obj=node)

        ser.object.save(update_resources=False)
        DcNode.update_all(node=node)
        ser.reload()

        if add_storage:
            from api.utils.views import call_api_view
            from api.dc.storage.views import dc_storage
            ns = NodeStorage.objects.filter(node=node)

            if add_storage != DcNode.NS_ATTACH_ALL:
                ns = ns.filter(storage__access=add_storage)

            for zpool in ns.values_list('zpool', flat=True):
                try:
                    zpool_node = '%s@%s' % (zpool, node.hostname)
                    res = call_api_view(request,
                                        'POST',
                                        dc_storage,
                                        zpool_node,
                                        data={},
                                        log_response=True)

                    if res.status_code == 201:
                        logger.info('POST dc_storage(%s) was successful: %s',
                                    zpool_node, res.data)
                    else:
                        logger.error('POST dc_storage(%s) failed: %s: %s',
                                     zpool_node, res.status_code, res.data)
                except Exception as ex:
                    logger.exception(ex)

        return SuccessTaskResponse(request,
                                   ser.data,
                                   status=status.HTTP_201_CREATED,
                                   obj=node,
                                   detail_dict=ser.detail_dict(),
                                   msg=LOG_NODE_ATTACH)
Ejemplo n.º 17
0
    def put(self):
        request, vm = self.request, self.vm

        # only admin
        if not (request.user and request.user.is_admin(request)):
            raise PermissionDenied

        node = vm.node
        apiview = self.apiview
        apiview['force'] = bool(ForceSerializer(data=self.data, default=False))
        queue = vm.node.fast_queue
        new_node_uuid = None
        detail_dict = {}

        if vm.status not in (vm.RUNNING, vm.STOPPED):
            raise VmIsNotOperational('VM is not stopped or running')

        if apiview['force']:
            detail_dict['force'] = True
            # final cmd and empty stdin
            cmd = 'vmadm get %s 2>/dev/null' % vm.uuid
            stdin = None
            block_key = None
            node_param = self.data.get('node')

            if node_param:
                if not request.user.is_staff:
                    raise PermissionDenied

                node = get_node(request, node_param, dc=request.dc, exists_ok=True, noexists_fail=True)

                if node.hostname == vm.node.hostname:
                    raise InvalidInput('VM already has the requested node set in DB')

                apiview['node'] = detail_dict['node'] = node.hostname
                queue = node.fast_queue
                new_node_uuid = node.uuid

        elif vm.json_changed():
            if vm.locked:
                raise VmIsLocked

            json_update = vm.json_update()
            self.check_update(json_update)

            if (vm.json_disks_changed() or vm.json_nics_changed()) and vm.tasks:
                raise VmHasPendingTasks

            # create json suitable for update
            stdin, cmd1 = self.fix_update(json_update)
            self.validate_update(vm, stdin, cmd1)
            stdin = stdin.dump()

            # final cmd
            cmd = cmd1 + 'vmadm update %s >&2; e=$?; vmadm get %s 2>/dev/null; exit $e' % (vm.uuid, vm.uuid)

            # Possible node_image import task which will block this task on node worker
            block_key = self.node_image_import(vm.node, json_update.get('add_disks', []))

        else:  # JSON unchanged and not force
            detail = 'Successfully updated VM %s (locally)' % vm.hostname
            res = SuccessTaskResponse(request, detail, msg=LOG_VM_UPDATE, vm=vm, detail=detail)
            vm_updated.send(TaskID(res.data.get('task_id'), request=request), vm=vm)  # Signal!

            return res

        # Check compute node status after we know which compute node the task is going to be run on
        # The internal vm.node.status checking is disabled in get_vm() in __init__
        if node.status != node.ONLINE:
            raise NodeIsNotOperational

        msg = LOG_VM_UPDATE
        meta = {
            'output': {'returncode': 'returncode', 'stderr': 'message', 'stdout': 'json'},
            'replace_stderr': ((vm.uuid, vm.hostname),), 'msg': msg, 'vm_uuid': vm.uuid, 'apiview': apiview
        }
        callback = ('api.vm.base.tasks.vm_update_cb', {'vm_uuid': vm.uuid, 'new_node_uuid': new_node_uuid})

        logger.debug('Updating VM %s with json: """%s"""', vm, stdin)

        err = True
        vm.set_notready()

        try:
            tid, err = execute(request, vm.owner.id, cmd, stdin=stdin, meta=meta, lock=self.lock, callback=callback,
                               queue=queue, block_key=block_key)

            if err:
                return FailureTaskResponse(request, err, vm=vm)
            else:
                return TaskResponse(request, tid, msg=msg, vm=vm, api_view=apiview, data=self.data,
                                    detail_dict=detail_dict)
        finally:
            if err:
                vm.revert_notready()
Ejemplo n.º 18
0
    def put(self):
        request, vm = self.request, self.vm

        # only admin
        if not (request.user and request.user.is_admin(request)):
            raise PermissionDenied

        apiview = self.apiview
        apiview['force'] = bool(ForceSerializer(data=self.data, default=False))

        if vm.status not in (vm.RUNNING, vm.STOPPED):
            raise VmIsNotOperational('VM is not stopped or running')

        if apiview['force']:
            # final cmd and empty stdin
            cmd = 'vmadm get %s 2>/dev/null' % vm.uuid
            stdin = None
            block_key = None

        elif vm.json_changed():
            if vm.locked:
                raise VmIsLocked

            json_update = vm.json_update()
            self.check_update(json_update)

            if (vm.json_disks_changed() or vm.json_nics_changed()) and vm.tasks:
                raise VmHasPendingTasks

            # create json suitable for update
            stdin, cmd1 = self.fix_update(json_update)
            stdin = stdin.dump()

            # cmd = zfs set... >&2;
            if cmd1 and vm.snapshot_set.exists():
                raise ExpectationFailed('VM has snapshots')

            # final cmd
            cmd = cmd1 + 'vmadm update %s >&2; e=$?; vmadm get %s 2>/dev/null; exit $e' % (vm.uuid, vm.uuid)

            # Possible node_image import task which will block this task on node worker
            block_key = self.node_image_import(vm.node, json_update.get('add_disks', []))

        else:  # JSON unchanged and not force
            detail = 'Successfully updated VM %s (locally)' % vm.hostname
            res = SuccessTaskResponse(request, detail, msg=LOG_VM_UPDATE, vm=vm, detail=detail)
            vm_updated.send(TaskID(res.data.get('task_id'), request=request), vm=vm)  # Signal!

            return res

        msg = LOG_VM_UPDATE
        meta = {
            'output': {'returncode': 'returncode', 'stderr': 'message', 'stdout': 'json'},
            'replace_stderr': ((vm.uuid, vm.hostname),), 'msg': msg, 'vm_uuid': vm.uuid, 'apiview': apiview
        }
        callback = ('api.vm.base.tasks.vm_update_cb', {'vm_uuid': vm.uuid})

        logger.debug('Updating VM %s with json: """%s"""', vm, stdin)

        err = True
        vm.set_notready()

        try:
            tid, err = execute(request, vm.owner.id, cmd, stdin=stdin, meta=meta, lock=self.lock, callback=callback,
                               queue=vm.node.fast_queue, block_key=block_key)

            if err:
                return FailureTaskResponse(request, err, vm=vm)
            else:
                return TaskResponse(request, tid, msg=msg, vm=vm, api_view=apiview, data=self.data)
        finally:
            if err:
                vm.revert_notready()
Ejemplo n.º 19
0
 def get(self):
     return SuccessTaskResponse(
         self.request,
         BackupSerializer(self.request, self.bkp).data)
Ejemplo n.º 20
0
 def get(self):
     return SuccessTaskResponse(self.request,
                                self.get_stats(),
                                dc_bound=self.dc_bound)
Ejemplo n.º 21
0
    def get(self, many=False):
        res = NodeVersionSerializer(self.node, many=many).data

        return SuccessTaskResponse(self.request, res, dc_bound=self.dc_bound)
Ejemplo n.º 22
0
    def get(self, vm, nic_id, nics, nic, data, many=False):
        """Get VM nic definition"""
        ser = VmDefineNicSerializer(self.request, vm, nic, nic_id=nic_id, many=many)

        return SuccessTaskResponse(self.request, ser.data, vm=vm)
Ejemplo n.º 23
0
    def get(self):
        ser = self.serializer(self.request, self.dc)

        return SuccessTaskResponse(self.request, ser.data)
Ejemplo n.º 24
0
    def delete(self):
        """Delete multiple snapshots"""
        # TODO: not documented
        request, data, vm = self.request, self.data, self.vm

        disk_id, real_disk_id, zfs_filesystem = get_disk_id(request, vm, data)
        # Parse data['snapnames']
        snaps, __ = get_snapshots(request, vm, real_disk_id, data)

        self._check_vm_status()

        snaps_lost = snaps.filter(status=Snapshot.LOST)
        msg = LOG_SNAPS_DELETE

        if snaps_lost:
            _result = {'message': 'Snapshots successfully deleted from DB'}
            _detail = "snapnames='%s', disk_id=%s" % (','.join(
                i.name for i in snaps_lost), disk_id)
            snaps_lost.delete()
            res = SuccessTaskResponse(request,
                                      _result,
                                      msg=msg,
                                      vm=vm,
                                      detail=_detail)
            snaps = snaps.filter(
                status=Snapshot.OK)  # Work with OK snapshots from now on

            if not snaps:
                return res

        elif any(i.status != Snapshot.OK for i in snaps):
            raise ExpectationFailed('VM snapshot status is not OK')

        # Task type (a = automatic, e = manual)
        if getattr(request, 'define_id', None):
            tt = TT_AUTO
        else:
            tt = TT_EXEC

        snapnames = [i.name for i in snaps]
        _apiview_ = {
            'view': 'vm_snapshot_list',
            'method': request.method,
            'hostname': vm.hostname,
            'disk_id': disk_id,
            'snapnames': snapnames
        }
        _detail_ = "snapnames='%s', disk_id=%s" % (','.join(snapnames),
                                                   disk_id)

        snap_ids = [snap.id for snap in snaps]
        zfs_names = ','.join([snap.zfs_name for snap in snaps])
        lock = self.LOCK % (vm.uuid, real_disk_id)
        cmd = 'esnapshot destroy "%s@%s" 2>&1' % (zfs_filesystem, zfs_names)
        callback = ('api.vm.snapshot.tasks.vm_snapshot_list_cb', {
            'vm_uuid': vm.uuid,
            'snap_ids': snap_ids
        })

        tid, err = execute(request,
                           vm.owner.id,
                           cmd,
                           meta=snap_meta(vm, msg, _apiview_, _detail_),
                           lock=lock,
                           callback=callback,
                           queue=vm.node.fast_queue,
                           tt=tt)
        if err:
            return FailureTaskResponse(request, err, vm=vm)
        else:
            snaps.update(status=Snapshot.PENDING)
            return TaskResponse(request,
                                tid,
                                msg=msg,
                                vm=vm,
                                api_view=_apiview_,
                                detail=_detail_,
                                data=self.data)
Ejemplo n.º 25
0
    def delete(self):
        """Delete [DELETE] image from DB and from Image server.

        The task group is always DC unbound, but the current DC depends on the dc_bound flag:
            - dc_bound=False:   task DC is default DC
            - dc_bound=[DC]:    task DC is dc_bound DC
        The callback is responsible for detaching the image from each DC and deleting it from DB.
        """
        request, img, data = self.request, self.img, self.data

        # Check if image is used by som VMs
        if img.is_used_by_vms():
            raise PreconditionRequired(_('Image is used by some VMs'))

        # Preliminary checks
        self._run_checks()  # This sets self.img_server to ImageVm()

        request.disable_throttling = True
        delete_node_image_tasks = []

        # Run task for removing the image from all NodeStorages which have the image imported locally
        for ns in img.nodestorage_set.select_related('node').all():
            # We need to bypass the permission checks, because node_image can be called by SuperAdmin only
            try:
                res = NodeImageView(request, ns, img, data).delete()
            except Exception as ex:
                res = exception_handler(ex, request)
                if res is None:
                    raise
                res.exception = True
                logger.error('DELETE node_image(%s, %s, %s) failed (%s): %s',
                             ns.node.hostname, ns.zpool, img.name,
                             res.status_code, res.data)
            else:
                logger.info(
                    'DELETE node_image(%s, %s, %s) was successful (%s): %s',
                    ns.node.hostname, ns.zpool, img.name, res.status_code,
                    res.data)

            if res.status_code == 200:
                continue
            elif res.status_code == 201:
                delete_node_image_tasks.append(res.data['task_id'])
            else:
                return res

        if self.img_server:
            # Set PENDING status
            img.save_status(Image.PENDING)

            return self._run_execute(
                LOG_IMAGE_DELETE,
                'esimg delete -u %s' % img.uuid,
                cb_add={'delete_node_image_tasks': delete_node_image_tasks})

        else:
            if wait_for_delete_node_image_tasks(img,
                                                delete_node_image_tasks,
                                                timeout=30):
                obj = img.log_list
                owner = img.owner
                img.delete()

                return SuccessTaskResponse(self.request,
                                           None,
                                           obj=obj,
                                           owner=owner,
                                           msg=LOG_IMAGE_DELETE,
                                           dc_bound=self.dc_bound)
            else:
                raise PreconditionRequired(
                    _('Image is being deleted from compute node storages; Try again later'
                      ))
Ejemplo n.º 26
0
    def post(self):
        dc = self.dc
        request = self.request

        if not DefaultDc().settings.VMS_DC_ENABLED:
            raise PermissionDenied

        dc.owner = request.user  # just a default
        dc.alias = dc.name  # just a default
        ser = self.serializer(request, dc, data=self.data)

        if not ser.is_valid():
            return FailureTaskResponse(request, ser.errors, obj=dc)

        # Create default custom settings suitable for new DC (without global settings)
        default_custom_settings = DefaultDc().custom_settings.copy()
        for key in DefaultDcSettingsSerializer.get_global_settings():
            try:
                del default_custom_settings[key]
            except KeyError:
                pass

        # Copy custom settings from default DC and save new DC
        ser.object.custom_settings = default_custom_settings
        ser.save()
        connection.on_commit(
            lambda: dc_relationship_changed.send(dc_name=dc.name))

        res = SuccessTaskResponse(request,
                                  ser.data,
                                  status=status.HTTP_201_CREATED,
                                  obj=dc,
                                  detail_dict=ser.detail_dict(),
                                  msg=LOG_DC_CREATE)
        dcs = dc.settings
        task_id = res.data.get('task_id')

        # Changing DC groups affects the group.dc_bound flag
        if dc.roles.exists():
            # The groups that are added to newly created DC should not be DC-bound anymore
            for group in dc.roles.all():
                if group.dc_bound:
                    remove_dc_binding_virt_object(task_id,
                                                  LOG_GROUP_UPDATE,
                                                  group,
                                                  user=request.user)

        # Creating new DC can affect the dc_bound flag on users (owner + users from dc.groups)
        self._remove_user_dc_binding(task_id,
                                     owner=dc.owner,
                                     groups=dc.roles.all())

        # Create association with default server domain
        if dcs.DNS_ENABLED:
            from api.dc.domain.views import dc_domain
            call_api_view(request,
                          None,
                          dc_domain,
                          dcs.VMS_VM_DOMAIN_DEFAULT,
                          data={'dc': dc},
                          log_response=True)

        # Create association with default rescue CD
        if dcs.VMS_ISO_RESCUECD:
            from api.dc.iso.views import dc_iso
            call_api_view(request,
                          None,
                          dc_iso,
                          dcs.VMS_ISO_RESCUECD,
                          data={'dc': dc},
                          log_response=True)
        return res
Ejemplo n.º 27
0
    def post(self):
        request = self.request
        dc1_settings = DefaultDc().settings
        domain = self.domain
        domain.owner = request.user  # just a default
        domain.type = dc1_settings.DNS_DOMAIN_TYPE_DEFAULT

        if not request.user.is_staff:
            self.data.pop(
                'dc_bound', None
            )  # default DC binding cannot be changed when creating object

        ser = DomainSerializer(request, domain, data=self.data)

        if not ser.is_valid():
            return FailureTaskResponse(request,
                                       ser.errors,
                                       obj=domain,
                                       dc_bound=False)

        ser.object.save()
        res = SuccessTaskResponse(request,
                                  ser.data,
                                  status=HTTP_201_CREATED,
                                  obj=domain,
                                  dc_bound=False,
                                  msg=LOG_DOMAIN_CREATE,
                                  detail_dict=ser.detail_dict())

        # Create SOA and NS records for new MASTER/NATIVE domain
        from api.dns.record.views import dns_record
        try:
            if dc1_settings.DNS_SOA_DEFAULT and dc1_settings.DNS_NAMESERVERS:
                soa_attrs = {
                    'hostmaster':
                    dc1_settings.DNS_HOSTMASTER.replace('@', '.'),
                    'nameserver': dc1_settings.DNS_NAMESERVERS[0]
                }
                soa_data = {
                    'type': Record.SOA,
                    'name': domain.name,
                    'content': dc1_settings.DNS_SOA_DEFAULT.format(**soa_attrs)
                }
                call_api_view(request,
                              'POST',
                              dns_record,
                              domain.name,
                              0,
                              data=soa_data,
                              log_response=True)

            for ns in dc1_settings.DNS_NAMESERVERS:
                ns_data = {
                    'type': Record.NS,
                    'name': domain.name,
                    'content': ns
                }
                call_api_view(request,
                              'POST',
                              dns_record,
                              domain.name,
                              0,
                              data=ns_data,
                              log_response=True)
        except Exception as e:
            logger.exception(e)

        if domain.dc_bound:
            assert request.dc.id == domain.dc_bound
            attach_dc_virt_object(res.data.get('task_id'),
                                  LOG_DOMAIN_ATTACH,
                                  domain,
                                  request.dc,
                                  user=request.user)

        return res
Ejemplo n.º 28
0
    def get(self):
        ser = SnapshotSerializer(self.request, self.snap)

        return SuccessTaskResponse(self.request, ser.data, vm=self.vm)
Ejemplo n.º 29
0
    def put(self):  # noqa: R701
        request, vm, action = self.request, self.vm, self.action

        # Cannot change status unless the VM is created on node
        if vm.status not in self.statuses and action != 'current':
            raise VmIsNotOperational

        if action not in self.actions:
            raise ExpectationFailed('Bad action')

        apiview = self.apiview
        f_ser = VmStatusFreezeSerializer(data=self.data)

        if f_ser.is_valid():
            freeze = apiview['freeze'] = f_ser.data['freeze']
            unfreeze = apiview['unfreeze'] = f_ser.data['unfreeze']
        else:
            return FailureTaskResponse(request, f_ser.errors, vm=vm)

        if ((action == 'start' and vm.status == Vm.STOPPED and not freeze) or
            (action == 'reboot' and vm.status == Vm.RUNNING and not freeze) or
            (action == 'stop' and vm.status in (Vm.STOPPING, Vm.RUNNING))):
            pass

        elif action == 'stop' and vm.status == Vm.STOPPED and freeze:
            if not request.user.is_admin(request):
                raise PermissionDenied

            tid = task_id_from_request(request,
                                       owner_id=vm.owner.id,
                                       dummy=True)
            vm_status_changed(tid, vm, vm.FROZEN, save_state=True)
            res = {
                'message':
                'VM %s is already stopped. Changing status to frozen.' %
                vm.hostname
            }

            return SuccessTaskResponse(request, res, task_id=tid, vm=vm)

        elif action == 'stop' and vm.status == Vm.FROZEN and unfreeze:
            if not request.user.is_admin(request):
                raise PermissionDenied

            tid = task_id_from_request(request,
                                       owner_id=vm.owner.id,
                                       dummy=True)
            vm_status_changed(tid, vm, vm.STOPPED, save_state=True)
            res = {
                'message': 'Removing frozen status for VM %s.' % vm.hostname
            }

            return SuccessTaskResponse(request, res, task_id=tid, vm=vm)

        elif action == 'current':
            # Limit PUT /current/ action to be Admins and SuperAdmins
            if not request.user.is_admin(request):
                raise PermissionDenied

            if vm.status in self.statuses_force_change_allowed:
                return self.get_current_status(force_change=True)
            elif vm.status in self.stuck_statuses_force_change_allowed:
                if vm.tasks:
                    raise VmHasPendingTasks
                else:
                    return self.get_current_status(force_change=True)
            else:
                raise VmIsNotOperational

        else:
            raise ExpectationFailed('Bad action')

        dc_settings = request.dc.settings

        if action in ('stop',
                      'reboot') and vm.uuid in dc_settings.VMS_NO_SHUTDOWN:
            raise PreconditionRequired('Internal VM can\'t be stopped')

        lock = 'vm_status vm:%s' % vm.uuid
        stdin = None
        apiview['update'] = False
        transition_to_stopping = False

        # The update parameter is used by all actions (start, stop, reboot)
        ser_update = VmStatusUpdateJSONSerializer(data=self.data,
                                                  default=(action
                                                           in ('start',
                                                               'reboot')))

        if not ser_update.is_valid():
            return FailureTaskResponse(request, ser_update.errors, vm=vm)

        if vm.json_changed():
            apiview['update'] = ser_update.data['update']
            logger.info('VM %s json != json_active', vm)

            if not apiview['update']:
                logger.info('VM %s json_active update disabled', vm)

        if action == 'start':
            ser = VmStatusActionIsoSerializer(request, vm, data=self.data)

            if not ser.is_valid():
                return FailureTaskResponse(request, ser.errors, vm=vm)

            if ser.data and ser.iso:
                if not request.user.is_admin(request) and vm.is_installed() and \
                        (ser.iso.name != dc_settings.VMS_ISO_RESCUECD):
                    raise PreconditionRequired('VM is not installed')

                msg = LOG_START_ISO
                iso = ser.iso
                cmd = self._start_cmd(iso=iso,
                                      iso2=ser.iso2,
                                      once=ser.data['cdimage_once'])
            else:
                msg = LOG_START
                iso = None
                cmd = self._start_cmd()

            if apiview['update']:
                if vm.tasks:
                    raise VmHasPendingTasks

                cmd, stdin = self._add_update_cmd(cmd, os_cmd_allowed=False)

                if iso:
                    msg = LOG_START_UPDATE_ISO
                else:
                    msg = LOG_START_UPDATE

        else:
            ser_stop_reboot = VmStatusStopSerializer(request,
                                                     vm,
                                                     data=self.data)

            if not ser_stop_reboot.is_valid():
                return FailureTaskResponse(request,
                                           ser_stop_reboot.errors,
                                           vm=vm)

            update = apiview.get('update',
                                 False)  # VmStatusUpdateJSONSerializer
            force = apiview['force'] = ser_stop_reboot.data.get('force', False)
            timeout = ser_stop_reboot.data.get('timeout', None)

            if not force and timeout:
                apiview['timeout'] = timeout

            if update:
                if vm.tasks:
                    raise VmHasPendingTasks

                # This will always perform a vmadm stop command, followed by a vmadm update command and optionally
                # followed by a vmadm start command (reboot)
                pre_cmd = self._action_cmd('stop',
                                           force=force,
                                           timeout=timeout)

                if action == 'reboot':
                    if force:
                        msg = LOG_REBOOT_FORCE_UPDATE
                    else:
                        msg = LOG_REBOOT_UPDATE

                    post_cmd = self._action_cmd('start')
                else:
                    if force:
                        msg = LOG_STOP_FORCE_UPDATE
                    else:
                        msg = LOG_STOP_UPDATE

                    post_cmd = ''

                cmd, stdin = self._add_update_cmd(post_cmd,
                                                  os_cmd_allowed=True,
                                                  pre_cmd=pre_cmd)
            else:
                cmd = self._action_cmd(action, force=force, timeout=timeout)

                if force:
                    if action == 'reboot':
                        msg = LOG_REBOOT_FORCE
                    else:
                        lock += ' force'
                        msg = LOG_STOP_FORCE
                else:
                    if action == 'reboot':
                        msg = LOG_REBOOT
                    else:
                        msg = LOG_STOP

            if vm.status == Vm.STOPPING:
                if update:
                    raise PreconditionRequired(
                        'Cannot perform update while VM is stopping')
                if not force:
                    raise VmIsNotOperational(
                        'VM is already stopping; try to use force')
            else:
                transition_to_stopping = True

        meta = {
            'output': {
                'returncode': 'returncode',
                'stderr': 'message',
                'stdout': 'json'
            },
            'replace_stderr': ((vm.uuid, vm.hostname), ),
            'detail': self.detail,
            'msg': msg,
            'vm_uuid': vm.uuid,
            'apiview': apiview,
            'last_status': vm.status,
        }
        callback = ('api.vm.status.tasks.vm_status_cb', {'vm_uuid': vm.uuid})

        tid, err = execute(request,
                           vm.owner.id,
                           cmd,
                           stdin=stdin,
                           meta=meta,
                           lock=lock,
                           callback=callback,
                           queue=vm.node.fast_queue)

        if err:
            return FailureTaskResponse(request, err, vm=vm)
        else:
            if transition_to_stopping:
                vm.save_status(Vm.STOPPING)

            return TaskResponse(request,
                                tid,
                                msg=msg,
                                vm=vm,
                                api_view=apiview,
                                detail=self.detail,
                                data=self.data,
                                api_data={
                                    'status': vm.status,
                                    'status_display': vm.status_display()
                                })
Ejemplo n.º 30
0
    def post(self):
        """Import [POST] image from URL.

        This is always a DC bound task, but the task_id has a DC_UNBOUND task group flag,
        because socket.io will inform any admin regardless of the current admin DC.
        The callback is responsible for attaching the image into current DC if the image is dc_bound.
        """
        img, data, request = self.img, self.data, self.request

        # ImageImportAdmin permission is required
        if not request.user.has_permission(request,
                                           ImageImportAdminPermission.name):
            raise PermissionDenied

        # Validate URL and file URL
        ser_import = ImportImageSerializer(img, data=data)

        if not ser_import.is_valid():
            return FailureTaskResponse(request,
                                       ser_import.errors,
                                       dc_bound=self.dc_bound)

        if not request.user.is_staff:
            self.data.pop(
                'dc_bound', None
            )  # default DC binding cannot be changed when creating object

        img.manifest = ser_import.manifest  # Load imported manifest
        img.owner = request.user  # Default user (can be changed)
        img.alias = img.name  # Default alias (can be changed)
        img.status = Image.OK  # Set status for preliminary checks

        # More default fields retrieved from the downloaded image manifest
        for img_field in ('version', 'desc', 'resize', 'deploy', 'tags'):
            if img_field not in data:
                def_value = getattr(img, img_field, None)
                if def_value:
                    data[img_field] = def_value

        # Validate data for overriding manifest info
        ser = ImageSerializer(request, img, data)

        if not ser.is_valid():
            return FailureTaskResponse(request,
                                       ser.errors,
                                       dc_bound=self.dc_bound)

        # Preliminary checks
        self._run_checks()
        # Build new manifest
        img.manifest = img.build_manifest()
        # Add URL into detail dict
        ser_data = ser.data
        dd = ser.detail_dict()
        dd.update(ser_import.detail_dict())

        if self.img_server:
            img.status = Image.PENDING
            img.save()

            if ser_import.img_file_url.startswith(self.img_server.repo_url):
                logger.info(
                    'Importing image from local image server - assuming that image exists on server'
                )
                cmd = 'esimg update -c'
            else:
                cmd = 'esimg import -f %s' % ser_import.img_file_url

            return self._run_execute(LOG_IMAGE_IMPORT,
                                     cmd,
                                     stdin=img.manifest.dump(),
                                     delete_on_error=True,
                                     detail_dict=dd)
        else:
            img.status = Image.OK
            img.manifest_active = img.manifest
            img.save()

            return SuccessTaskResponse(self.request,
                                       ser_data,
                                       obj=img,
                                       msg=LOG_IMAGE_IMPORT,
                                       detail_dict=dd,
                                       dc_bound=self.dc_bound)