Exemplo n.º 1
0
    def delete(self, vm, data, **kwargs):
        """Delete VM definition"""
        if vm.is_deployed():
            raise VmIsNotOperational(_('VM is not notcreated'))

        owner = vm.owner
        dead_vm = vm.log_list
        uuid = vm.uuid
        hostname = vm.hostname
        alias = vm.alias
        zabbix_sync = vm.is_zabbix_sync_active()
        external_zabbix_sync = vm.is_external_zabbix_sync_active()
        task_id = SuccessTaskResponse.gen_task_id(self.request, vm=dead_vm, owner=owner)

        # Every VM NIC could have an association to other tables. Cleanup first:
        for nic in vm.json_get_nics():
            # noinspection PyBroadException
            try:
                nic_ser = VmDefineNicSerializer(self.request, vm, nic)
                nic_ser.delete_ip(task_id)
            except Exception as ex:
                logger.exception(ex)
                continue

        # Finally delete VM
        logger.debug('Deleting VM %s from DB', vm)
        vm.delete()

        try:
            return SuccessTaskResponse(self.request, None, vm=dead_vm, owner=owner, task_id=task_id, msg=LOG_DEF_DELETE)
        finally:
            # Signal!
            vm_undefined.send(TaskID(task_id, request=self.request), vm_uuid=uuid, vm_hostname=hostname, vm_alias=alias,
                              dc=self.request.dc, zabbix_sync=zabbix_sync, external_zabbix_sync=external_zabbix_sync)
Exemplo n.º 2
0
    def post(self, vm, data, hostname_or_uuid=None):
        """
        Create VM definition
        In this case, hostname_or_uuid parameter has to be only hostname, never uuid
        """
        ser = VmDefineSerializer(self.request,
                                 data=data,
                                 hostname=hostname_or_uuid)

        if ser.is_valid():
            ser.object.save(sync_json=True,
                            update_node_resources=ser.update_node_resources)
            vm = ser.object

            try:
                res = SuccessTaskResponse(self.request,
                                          ser.data,
                                          status=scode.HTTP_201_CREATED,
                                          vm=vm,
                                          msg=LOG_DEF_CREATE,
                                          detail_dict=ser.detail_dict())
                vm_defined.send(TaskID(res.data.get('task_id'),
                                       request=self.request),
                                vm=vm)  # Signal!

                return res
            finally:
                # Create disk/nics if defined in template
                if vm.template:
                    self._create_disks_and_nics(vm)

        return FailureTaskResponse(self.request, ser.errors)
Exemplo n.º 3
0
    def put(self):
        dc = self.dc
        ser = self.serializer(self.request, dc, data=self.data, partial=True)

        if not ser.is_valid():
            return FailureTaskResponse(self.request, ser.errors, obj=dc)

        dcs = dc.custom_settings
        dcs.update(ser.settings)
        new_settings = dcs
        old_settings = dc.custom_settings
        dc.custom_settings = dcs
        dc.save()
        data = ser.data  # Prepare ser._data for ser.detail_dict() to work
        res = SuccessTaskResponse(self.request,
                                  data,
                                  obj=dc,
                                  detail_dict=ser.detail_dict(),
                                  msg=LOG_DC_SETTINGS_UPDATE)
        task_id = TaskID(res.data.get('task_id'), request=self.request)

        if old_settings != new_settings:
            dc_settings_changed.send(task_id,
                                     dc=dc,
                                     old_settings=old_settings,
                                     new_settings=new_settings)  # Signal!

        return res
Exemplo n.º 4
0
    def put(self):
        dc = self.dc
        ser = self.serializer(self.request, dc, data=self.data, partial=True)

        if not ser.is_valid():
            return FailureTaskResponse(self.request, ser.errors, obj=dc)

        dcs = dc.custom_settings
        dcs.update(ser.settings)
        new_settings = dcs
        old_settings = dc.custom_settings
        dc.custom_settings = dcs
        dc.save()
        data = ser.data
        dd = ser.detail_dict()
        res = SuccessTaskResponse(self.request, data, obj=dc, detail_dict=dd, msg=LOG_DC_SETTINGS_UPDATE)

        # Check if monitoring settings have been changed
        if any(['MON_ZABBIX' in i for i in dd]):
            logger.info('Monitoring settings have been changed in DC %s. Running task for clearing zabbix cache', dc)
            try:
                mon_clear_zabbix_cache.call(dc.id, full=True)
            except Exception as e:
                logger.exception(e)

        # Check if compute node SSH key was added to VMS_NODE_SSH_KEYS_DEFAULT
        task_id = TaskID(res.data.get('task_id'), request=self.request)

        if old_settings != new_settings:
            dc_settings_changed.send(task_id, dc=dc, old_settings=old_settings, new_settings=new_settings)  # Signal

        return res
Exemplo n.º 5
0
    def put(self, vm, data):
        """Revert json_active (undo). Problematic attributes:
            - hostname  - handled by revert_active() + change requires some post configuration
            - alias     - handled by revert_active()
            - owner     - handled by revert_active()
            - template  - handled by revert_active()
            - monitored - handled by revert_active(), but mon_vm_sync task must be run via vm_define_reverted signal
            - tags      - wont be reverted (not saved in json)
            - nics.*.ip - ip reservation is fixed via vm_update_ipaddress_usage()
            - nics.*.dns + ptr - known bug - wont be reverted
        """
        if vm.is_notcreated():
            raise VmIsNotOperational('VM is not created')

        if vm.json == vm.json_active:
            raise ExpectationFailed('VM definition unchanged')

        if vm.tasks:
            raise VmHasPendingTasks

        # Prerequisites
        vm.hostname_is_valid_fqdn(
            cache=False
        )  # Cache vm._fqdn hostname/domain pair and find dns record
        hostname = vm.hostname  # Save hostname configured in DB

        # The magic happens here: get_diff() will run vm.revert_active() and return a diff
        vm_diff = VmDefineView(self.request).get_diff(vm, full=True)

        # Save VM
        hostname_changed = hostname != vm.hostname
        vm.unlock()  # vm saving was locked by revert_active()
        vm.save(update_hostname=hostname_changed,
                update_node_resources=True,
                update_storage_resources=True)

        # Generate response
        detail = 'Reverted VM configuration from %s.\n%s' % (
            vm.changed.strftime('%Y-%m-%d %H:%M:%S%z'),
            self.nice_diff(vm_diff))
        vm_diff['reverted_from'] = vm.changed

        res = SuccessTaskResponse(self.request,
                                  vm_diff,
                                  detail=detail,
                                  msg=LOG_DEF_REVERT,
                                  vm=vm)

        # Post-save stuff
        task_id = TaskID(res.data.get('task_id'), request=self.request)
        vm_update_ipaddress_usage(vm)
        vm_define_reverted.send(task_id, vm=vm)  # Signal!

        if hostname_changed:
            VmDefineHostnameChanged(self.request, vm,
                                    hostname).send()  # Task event for GUI

        return res
Exemplo n.º 6
0
    def put(self):
        """Update node definition"""
        node = self.node
        ser = NodeDefineSerializer(self.request,
                                   node,
                                   data=self.data,
                                   partial=True)

        if node.tasks:
            raise NodeHasPendingTasks

        if not ser.is_valid():
            return FailureTaskResponse(self.request,
                                       ser.errors,
                                       obj=node,
                                       dc_bound=False)

        if ser.status_changed == Node.OFFLINE and node.has_related_tasks():
            raise NodeHasPendingTasks(
                'Node has related objects with pending tasks')

        # Changing cpu or disk coefficients can lead to negative numbers in node.cpu/ram_free or dc_node.cpu/ram_free;
        # This is solved by running the DB update inside a transaction and checking for negative values (=> rollback)
        errors = ser.save()

        if errors:
            return FailureTaskResponse(self.request,
                                       errors,
                                       obj=node,
                                       dc_bound=False)

        res = SuccessTaskResponse(self.request,
                                  ser.data,
                                  obj=node,
                                  detail_dict=ser.detail_dict(),
                                  dc_bound=False,
                                  msg=LOG_DEF_UPDATE)
        task_id = TaskID(res.data.get('task_id'), request=self.request)

        # Delete obsolete IP address and DNS records and create new ones if possible
        self._post_update(task_id, ser)

        # Signals section (should go last)
        if ser.status_changed:
            node_status_changed.send(task_id, node=node,
                                     automatic=False)  # Signal!

            if node.is_online():
                node_online.send(task_id, node=node,
                                 automatic=False)  # Signal!
            elif node.is_offline():
                node_offline.send(task_id, node=node)  # Signal!

        if ser.monitoring_changed or ser.address_changed:
            node_json_changed.send(task_id, node=node)  # Signal!

        return res
Exemplo n.º 7
0
    def put(self):
        request, vm = self.request, self.vm

        # only admin
        if not (request.user and request.user.is_admin(request)):
            raise PermissionDenied

        apiview = self.apiview
        apiview['force'] = bool(ForceSerializer(data=self.data, default=False))

        if vm.status not in (vm.RUNNING, vm.STOPPED):
            raise VmIsNotOperational('VM is not stopped or running')

        if apiview['force']:
            # final cmd and empty stdin
            cmd = 'vmadm get %s 2>/dev/null' % vm.uuid
            stdin = None
            block_key = None

        elif vm.json_changed():
            if vm.locked:
                raise VmIsLocked

            json_update = vm.json_update()
            self.check_update(json_update)

            if (vm.json_disks_changed() or vm.json_nics_changed()) and vm.tasks:
                raise VmHasPendingTasks

            # create json suitable for update
            stdin, cmd1 = self.fix_update(json_update)
            stdin = stdin.dump()

            # cmd = zfs set... >&2;
            if cmd1 and vm.snapshot_set.exists():
                raise ExpectationFailed('VM has snapshots')

            # final cmd
            cmd = cmd1 + 'vmadm update %s >&2; e=$?; vmadm get %s 2>/dev/null; exit $e' % (vm.uuid, vm.uuid)

            # Possible node_image import task which will block this task on node worker
            block_key = self.node_image_import(vm.node, json_update.get('add_disks', []))

        else:  # JSON unchanged and not force
            detail = 'Successfully updated VM %s (locally)' % vm.hostname
            res = SuccessTaskResponse(request, detail, msg=LOG_VM_UPDATE, vm=vm, detail=detail)
            vm_updated.send(TaskID(res.data.get('task_id'), request=request), vm=vm)  # Signal!

            return res

        msg = LOG_VM_UPDATE
        meta = {
            'output': {'returncode': 'returncode', 'stderr': 'message', 'stdout': 'json'},
            'replace_stderr': ((vm.uuid, vm.hostname),), 'msg': msg, 'vm_uuid': vm.uuid, 'apiview': apiview
        }
        callback = ('api.vm.base.tasks.vm_update_cb', {'vm_uuid': vm.uuid})

        logger.debug('Updating VM %s with json: """%s"""', vm, stdin)

        err = True
        vm.set_notready()

        try:
            tid, err = execute(request, vm.owner.id, cmd, stdin=stdin, meta=meta, lock=self.lock, callback=callback,
                               queue=vm.node.fast_queue, block_key=block_key)

            if err:
                return FailureTaskResponse(request, err, vm=vm)
            else:
                return TaskResponse(request, tid, msg=msg, vm=vm, api_view=apiview, data=self.data)
        finally:
            if err:
                vm.revert_notready()
Exemplo n.º 8
0
    def put(self):
        """Update node definition"""
        node = self.node
        ser = NodeDefineSerializer(self.request, node, data=self.data, partial=True)

        if node.tasks:
            raise NodeHasPendingTasks

        if not ser.is_valid():
            return FailureTaskResponse(self.request, ser.errors, obj=node, dc_bound=False)

        if ser.status_changed == Node.OFFLINE and node.has_related_tasks():
            raise NodeHasPendingTasks('Node has related objects with pending tasks')

        # Changing cpu or disk coefficients can lead to negative numbers in node.cpu/ram_free or dc_node.cpu/ram_free
        update_node_resources = ser.update_node_resources

        try:
            with transaction.atomic():
                ser.object.save(update_resources=update_node_resources, clear_cache=ser.clear_cache)

                if update_node_resources:
                    if node.cpu_free < 0 or node.dcnode_set.filter(cpu_free__lt=0).exists():
                        raise IntegrityError('cpu_check')

                    if node.ram_free < 0 or node.dcnode_set.filter(ram_free__lt=0).exists():
                        raise IntegrityError('ram_check')

        except IntegrityError as exc:
            errors = {}
            exc_error = str(exc)
            # ram or cpu constraint was violated on vms_dcnode (can happen when DcNode strategy is set to RESERVED)
            # OR a an exception was raised above
            if 'ram_check' in exc_error:
                errors['ram_coef'] = ser.error_negative_resources
            if 'cpu_check' in exc_error:
                errors['cpu_coef'] = ser.error_negative_resources

            if not errors:
                raise exc

            return FailureTaskResponse(self.request, errors, obj=node, dc_bound=False)

        if update_node_resources:  # cpu_free or ram_free changed
            ser.reload()

        res = SuccessTaskResponse(self.request, ser.data, obj=node, detail_dict=ser.detail_dict(), dc_bound=False,
                                  msg=LOG_DEF_UPDATE)
        task_id = TaskID(res.data.get('task_id'), request=self.request)

        if ser.status_changed:
            node_status_changed.send(task_id, node=node, automatic=False)  # Signal!

            if node.is_online():
                node_online.send(task_id, node=node, automatic=False)  # Signal!
            elif node.is_offline():
                node_offline.send(task_id, node=node)  # Signal!

        if ser.monitoring_changed:
            node_json_changed.send(task_id, node=node)  # Signal!

        return res
Exemplo n.º 9
0
    def delete(self):
        """Delete node definition"""
        node = self.node

        if node.is_head:
            raise PreconditionRequired('Head node cannot be deleted')

        force = bool(ForceSerializer(data=self.data, default=False))

        # Check if node has VMs and backups if not using force
        if force:
            # Fetch data for vm_undefined signal
            vms = [{'vm_uuid': vm.uuid, 'dc': vm.dc, 'zabbix_sync': vm.is_zabbix_sync_active(),
                    'external_zabbix_sync': vm.is_external_zabbix_sync_active()}
                   for vm in node.vm_set.select_related('dc').all()]
        else:
            vms = ()
            # Simulate turning compute and backup flags off
            ser = NodeDefineSerializer(self.request, node, data={'is_backup': False, 'is_compute': False}, partial=True)

            if not ser.is_valid():
                return FailureTaskResponse(self.request, ser.errors, obj=node, dc_bound=False)

        if node.tasks:
            raise NodeHasPendingTasks

        if node.has_related_tasks():
            raise NodeHasPendingTasks('Node has related objects with pending tasks')

        uuid = node.uuid
        hostname = node.hostname
        obj = node.log_list
        owner = node.owner
        queues = node.all_queues

        try:
            ip_address = node.ip_address
        except ObjectDoesNotExist:
            ip_address = None

        # Bypass signal handling for VMs (needed when using force)
        #   Fixes: IntegrityError: insert or update on table "vms_nodestorage"
        post_delete.disconnect(Vm.post_delete, sender=Vm, dispatch_uid='post_delete_vm')

        try:
            node.delete()
        finally:
            post_delete.connect(Vm.post_delete, sender=Vm, dispatch_uid='post_delete_vm')

        res = SuccessTaskResponse(self.request, None, obj=obj, owner=owner, detail_dict={'force': force},
                                  msg=LOG_DEF_DELETE, dc_bound=False)
        task_id = TaskID(res.data.get('task_id'), request=self.request)
        node_deleted.send(task_id, node_uuid=uuid, node_hostname=hostname)  # Signal!

        # Force deletion will delete all node related objects (VMs, backups...)
        for vm in vms:
            vm_undefined.send(task_id, **vm)  # Signal! for every vm on deleted node

        try:
            # Delete DNS records associated with node
            self._delete_dns_records(self.request, task_id, node, hostname, ip_address)

            # Delete celery (amqp) task queues (named after node hostname); fail silently
            self._delete_queues(queues, fail_silently=True)

            # Delete IP address associated with node
            self._delete_ip_address(ip_address)
        except Exception as exc:
            logger.exception(exc)

        return res
Exemplo n.º 10
0
    def put(self):
        request, vm = self.request, self.vm

        # only admin
        if not (request.user and request.user.is_admin(request)):
            raise PermissionDenied

        node = vm.node
        apiview = self.apiview
        apiview['force'] = bool(ForceSerializer(data=self.data, default=False))
        queue = vm.node.fast_queue
        new_node_uuid = None
        detail_dict = {}

        if vm.status not in (vm.RUNNING, vm.STOPPED):
            raise VmIsNotOperational('VM is not stopped or running')

        if apiview['force']:
            detail_dict['force'] = True
            # final cmd and empty stdin
            cmd = 'vmadm get %s 2>/dev/null' % vm.uuid
            stdin = None
            block_key = None
            node_param = self.data.get('node')

            if node_param:
                if not request.user.is_staff:
                    raise PermissionDenied

                node = get_node(request, node_param, dc=request.dc, exists_ok=True, noexists_fail=True)

                if node.hostname == vm.node.hostname:
                    raise InvalidInput('VM already has the requested node set in DB')

                apiview['node'] = detail_dict['node'] = node.hostname
                queue = node.fast_queue
                new_node_uuid = node.uuid

        elif vm.json_changed():
            if vm.locked:
                raise VmIsLocked

            json_update = vm.json_update()
            self.check_update(json_update)

            if (vm.json_disks_changed() or vm.json_nics_changed()) and vm.tasks:
                raise VmHasPendingTasks

            # create json suitable for update
            stdin, cmd1 = self.fix_update(json_update)
            self.validate_update(vm, stdin, cmd1)
            stdin = stdin.dump()

            # final cmd
            cmd = cmd1 + 'vmadm update %s >&2; e=$?; vmadm get %s 2>/dev/null; exit $e' % (vm.uuid, vm.uuid)

            # Possible node_image import task which will block this task on node worker
            block_key = self.node_image_import(vm.node, json_update.get('add_disks', []))

        else:  # JSON unchanged and not force
            detail = 'Successfully updated VM %s (locally)' % vm.hostname
            res = SuccessTaskResponse(request, detail, msg=LOG_VM_UPDATE, vm=vm, detail=detail)
            vm_updated.send(TaskID(res.data.get('task_id'), request=request), vm=vm)  # Signal!

            return res

        # Check compute node status after we know which compute node the task is going to be run on
        # The internal vm.node.status checking is disabled in get_vm() in __init__
        if node.status != node.ONLINE:
            raise NodeIsNotOperational

        msg = LOG_VM_UPDATE
        meta = {
            'output': {'returncode': 'returncode', 'stderr': 'message', 'stdout': 'json'},
            'replace_stderr': ((vm.uuid, vm.hostname),), 'msg': msg, 'vm_uuid': vm.uuid, 'apiview': apiview
        }
        callback = ('api.vm.base.tasks.vm_update_cb', {'vm_uuid': vm.uuid, 'new_node_uuid': new_node_uuid})

        logger.debug('Updating VM %s with json: """%s"""', vm, stdin)

        err = True
        vm.set_notready()

        try:
            tid, err = execute(request, vm.owner.id, cmd, stdin=stdin, meta=meta, lock=self.lock, callback=callback,
                               queue=queue, block_key=block_key)

            if err:
                return FailureTaskResponse(request, err, vm=vm)
            else:
                return TaskResponse(request, tid, msg=msg, vm=vm, api_view=apiview, data=self.data,
                                    detail_dict=detail_dict)
        finally:
            if err:
                vm.revert_notready()