Esempio n. 1
0
    def after_return(self, status, retval, task_id, args, kwargs, einfo):
        self.logger.debug('Task %s("%s") returned %s. Result: """%s"""',
                          self.name, args, status, retval)
        meta = kwargs.get('meta', {})
        nolog = meta.get('nolog', False)

        # In case of emergency log this task
        if not nolog and not self.all_done:
            if isinstance(retval, dict):
                result = retval.copy()
            else:
                if einfo:
                    result = {'detail': str(einfo.exception)}
                else:
                    result = {'detail': str(retval)}

            if 'meta' not in result:
                result['meta'] = meta

            result['meta']['cb_name'] = LOGTASK
            meta['task_status'] = status
            meta['cleanup'] = True
            t = send_task_forever(task_id,
                                  LOGTASK,
                                  nolog=nolog,
                                  args=(result, task_id),
                                  kwargs=meta,
                                  queue=Q_MGMT,
                                  expires=None,
                                  task_id=task_id_from_task_id(task_id))
            self.logger.warn('Created emergency log task %s', t.id)
Esempio n. 2
0
    def put(self):
        request, vm = self.request, self.vm

        if vm.locked:
            raise VmIsLocked

        if vm.status not in (vm.STOPPED, vm.RUNNING, vm.NOTCREATED):
            raise VmIsNotOperational(
                'VM is not stopped, running or notcreated')

        if vm.json_changed():
            raise PreconditionRequired(
                'VM definition has changed; Update first')

        ser = VmDcSerializer(request, vm, data=self.data)

        if not ser.is_valid():
            return FailureTaskResponse(request, ser.errors, vm=vm)

        if vm.tasks:
            raise VmHasPendingTasks

        old_dc = vm.dc
        dc = ser.dc
        # Change DC for one VM, repeat this for other VM + Recalculate node & storage resources in target and source
        vm.dc = dc
        vm.save(update_node_resources=True, update_storage_resources=True)
        # Change task log entries DC for target VM
        TaskLogEntry.objects.filter(object_pk=vm.uuid).update(dc=dc)
        # Change related VM backup's DC
        Backup.objects.filter(vm=vm).update(dc=dc)

        for ns in ser.nss:  # Issue #chili-885
            for i in (dc, old_dc):
                Backup.update_resources(ns, vm, i)
                Snapshot.update_resources(ns, vm, i)

        detail = 'Successfully migrated VM %s from datacenter %s to datacenter %s' % (
            vm.hostname, old_dc.name, dc.name)
        # Will create task log entry in old DC
        res = SuccessTaskResponse(request,
                                  detail,
                                  vm=vm,
                                  msg=LOG_MIGRATE_DC,
                                  detail=detail)
        # Create task log entry in new DC too
        task_log_success(task_id_from_task_id(res.data.get('task_id'),
                                              dc_id=dc.id),
                         LOG_MIGRATE_DC,
                         obj=vm,
                         detail=detail,
                         update_user_tasks=False)

        return res
Esempio n. 3
0
def vm_snapshot_beat(snap_define_id):
    """
    This is a periodic beat task. Run POST vm_snapshot according to snapshot definition.
    """
    from api.vm.snapshot.views import vm_snapshot

    snap_define = SnapshotDefine.objects.get(id=snap_define_id)
    snap_name = snap_define.generate_snapshot_name()
    vm = snap_define.vm
    disk_id = snap_define.array_disk_id
    request = get_dummy_request(vm.dc, method='POST', system_user=True)
    request.define_id = snap_define.id  # Automatic task
    # Go!
    res = call_api_view(request,
                        'POST',
                        vm_snapshot,
                        vm.hostname,
                        snap_name,
                        data={
                            'disk_id': disk_id,
                            'fsfreeze': snap_define.fsfreeze
                        })

    if res.status_code == 201:
        logger.info(
            'POST vm_snapshot(%s, %s, {disk_id=%s}) was successful: %s', vm,
            snap_name, disk_id, res.data)
    else:
        # Need to log this, because nobody else does (+ there is no PENDING task)
        detail = 'snapname=%s, disk_id=%s, type=%s. Error: %s' % (
            snap_name, disk_id, Snapshot.AUTO, get_task_error_message(
                res.data))
        task_log_error(task_id_from_task_id(vm_snapshot_beat.request.id,
                                            dc_id=vm.dc.id),
                       LOG_SNAP_CREATE,
                       vm=vm,
                       detail=detail,
                       update_user_tasks=False)

        if res.status_code == HTTP_423_LOCKED:
            logger.warning(
                'Running POST vm_snapshot(%s, %s, {disk_id=%s}) failed: %s (%s): %s',
                vm, snap_name, disk_id, res.status_code, res.status_text,
                res.data)
        else:
            logger.error(
                'Running POST vm_snapshot(%s, %s, {disk_id=%s}) failed: %s (%s): %s',
                vm, snap_name, disk_id, res.status_code, res.status_text,
                res.data)
            MonitoringBackend.vm_send_alert(
                vm, 'Automatic snapshot %s/disk-%s@%s failed to start.' %
                (vm.hostname, disk_id, snap_define.name))
Esempio n. 4
0
def remove_user_dc_binding(task_id, user, dc=None):
    """Remove user.dc_bound flag"""
    from api.task.utils import task_log_success  # circular imports

    if not user.dc_bound or (dc and user.dc_bound == dc):  # Nothing to do
        return None

    dc_id = user.dc_bound.id
    user.dc_bound = None
    user.save(update_fields=('dc_bound',))

    task_id = task_id_from_task_id(task_id, tg=TG_DC_UNBOUND, dc_id=dc_id, keep_task_suffix=True)
    task_log_success(task_id, LOG_USER_UPDATE, obj=user, update_user_tasks=False, detail='dc_bound=false')
Esempio n. 5
0
def vm_backup_beat(bkp_define_id):
    """
    This is a periodic beat task. Run POST vm_backup according to backup definition.
    """
    from api.vm.backup.views import vm_backup

    bkp_define = BackupDefine.objects.get(id=bkp_define_id)
    vm = bkp_define.vm
    disk_id = bkp_define.array_disk_id
    defname = bkp_define.name
    request = get_dummy_request(vm.dc, method='POST', system_user=True)
    request.define_id = bkp_define.id  # Automatic task
    # Go!
    res = call_api_view(request,
                        'POST',
                        vm_backup,
                        vm.hostname,
                        defname,
                        data={
                            'disk_id': disk_id,
                            'fsfreeze': bkp_define.fsfreeze
                        })

    if res.status_code == 201:
        logger.info('POST vm_backup(%s, %s, {disk_id=%s}) was successful: %s',
                    vm, defname, disk_id, res.data)
    else:
        # Need to log this, because nobody else does (+ there is no PENDING task)
        detail = 'hostname=%s, bkpname=%s, disk_id=%s, Error: %s' % (
            vm.hostname, bkp_define.generate_backup_name(), disk_id,
            get_task_error_message(res.data))
        task_log_error(task_id_from_task_id(vm_backup_beat.request.id,
                                            dc_id=vm.dc.id),
                       LOG_BKP_CREATE,
                       vm=vm,
                       detail=detail,
                       update_user_tasks=False)

        if res.status_code == HTTP_423_LOCKED:
            logger.warning(
                'Running POST vm_backup(%s, %s, {disk_id=%s}) failed: %s (%s): %s',
                vm, defname, disk_id, res.status_code, res.status_text,
                res.data)
        else:
            logger.error(
                'Running POST vm_backup(%s, %s, {disk_id=%s}) failed: %s (%s): %s',
                vm, defname, disk_id, res.status_code, res.status_text,
                res.data)
            Zabbix.vm_send_alert(
                vm, 'Automatic backup %s/disk-%s@%s failed to start.' %
                (vm.hostname, disk_id, defname))
Esempio n. 6
0
def attach_dc_virt_object(task_id, msg, obj, dc, user=None):
    """Attach object into DC and log it"""
    from api.task.utils import task_log_success  # circular imports

    if isinstance(obj, Domain):
        DomainDc.objects.create(dc=dc, domain_id=obj.id)
    elif isinstance(obj, Role):
        obj.dc_set.add(dc)
    else:
        obj.dc.add(dc)

    task_id = task_id_from_task_id(task_id, tg=TG_DC_BOUND, dc_id=dc.id, keep_task_suffix=True)
    task_log_success(task_id, msg, obj=obj, owner=getattr(obj, 'owner', None), user=user, update_user_tasks=False,
                     detail="dc='%s'" % dc.name)
Esempio n. 7
0
def remove_dc_binding_virt_object(task_id, msg, obj, user=None, dc_id=None):
    """Detach object from DC and log it"""
    from api.task.utils import task_log_success  # circular imports

    if dc_id is None:
        if isinstance(obj, Domain):
            dc_id = obj.dc_bound
        else:
            dc_id = obj.dc_bound.id

    obj.dc_bound = None
    obj.save(update_fields=('dc_bound', 'changed'))

    task_id = task_id_from_task_id(task_id, tg=TG_DC_UNBOUND, dc_id=dc_id, keep_task_suffix=True)
    task_log_success(task_id, msg, obj=obj, owner=getattr(obj, 'owner', None), user=user, update_user_tasks=False,
                     detail='dc_bound=false')
Esempio n. 8
0
def vm_status_changed(tid,
                      vm,
                      state,
                      old_state=None,
                      save_state=True,
                      deploy_over=False,
                      change_time=None):
    """
    This function is something like a dummy callback.
    It should be called when VM state is changed from a task.
    """
    if change_time:
        old_change_time = cache.get(Vm.status_change_key(vm.uuid))

        if old_change_time and old_change_time > change_time:
            logger.warn(
                'Ignoring status change %s->%s of VM %s (%s) because it is too old: %s > %s',
                vm.status, state, vm, vm.uuid, old_change_time, change_time)
            return None

    # save to DB and also update cache
    if save_state:
        vm.status = state
        if old_state is not None:  # if cached status != vm.status (don't remember why we need this)
            vm._orig_status = old_state
        vm.save(update_fields=('status', 'status_change', 'uptime',
                               'uptime_changed'),
                status_change_time=change_time)

    if deploy_over:  # deploy process ended
        # Set the deploy_finished flag to inform vm_create_cb
        vm.set_deploy_finished()

    if vm.is_slave_vm():
        logger.info('Detected status change of slave VM %s - "%s"', vm.uuid,
                    vm)
        return None

    # Adjust task ID according to VM parameters
    tid = task_id_from_task_id(tid, owner_id=vm.owner.id, dc_id=vm.dc_id)

    # Signals!
    vm_status_changed_sig.send(tid,
                               vm=vm,
                               old_state=old_state,
                               new_state=state)  # Signal!
    if vm.status == vm.RUNNING:
        vm_running.send(tid, vm=vm, old_state=old_state)  # Signal!
    elif vm.status == vm.STOPPED:
        vm_stopped.send(tid, vm=vm, old_state=old_state)  # Signal!

    # data for next operations
    msg = LOG_STATUS_CHANGE
    task_event = VmStatusChanged(tid, vm)

    # log task
    task_log(tid,
             msg,
             vm=vm,
             owner=vm.owner,
             task_result=task_event.result,
             task_status=states.SUCCESS,
             time=vm.status_change,
             update_user_tasks=False)

    # inform users (VM owners logged in GUI)
    task_event.send()
Esempio n. 9
0
def _execute(self, cmd, stdin, meta=None, callback=None):
    """
    The "real" execute function.
    Just like executing a command in the shell on the compute node.
    Do not use directly. Call the execute() wrapper instead.
    """
    request = self.request

    p = Popen(cmd,
              shell=True,
              bufsize=0,
              close_fds=True,
              stdin=PIPE,
              stdout=PIPE,
              stderr=PIPE,
              preexec_fn=os.setsid)
    exec_time = datetime.utcnow()

    try:
        stdout, stderr = p.communicate(input=stdin)
    except (Terminated, KeyboardInterrupt, SystemExit) as exc:
        # This is mainly used for fetching SIGTERM
        # The SIGTERM signal will be caught here as Terminated exception and the SIGKILL will never be caught here.
        sig = _exc_signal(exc)
        logger.error(
            'Task %s received %r exception -> sending signal %d to %d',
            request.id, exc, sig, p.pid)

        try:
            os.killpg(p.pid, sig)  # Send signal to process group
        except OSError:
            pass

        try:
            p.send_signal(sig)  # Send signal to process and wait
            p.wait()
        except (OSError, NoSuchProcess):
            pass

        raise exc

    finish_time = datetime.utcnow()

    if meta is None:
        meta = {}

    elif meta:
        if 'replace_text' in meta:
            for i in meta['replace_text']:
                stdout = stdout.replace(i[0], i[1])
                stderr = stderr.replace(i[0], i[1])
            del meta['replace_text']

        if 'replace_stdout' in meta:
            for i in meta['replace_stdout']:
                stdout = stdout.replace(i[0], i[1])
            del meta['replace_stdout']

        if 'replace_stderr' in meta:
            for i in meta['replace_stderr']:
                stderr = stderr.replace(i[0], i[1])
            del meta['replace_stderr']

        if 'compress_stdout' in meta:
            stdout = compress(stdout)
            del meta['compress_stdout']

        if 'compress_stderr' in meta:
            stderr = compress(stderr)
            del meta['compress_stderr']

        if 'encode_stdout' in meta:
            stdout = b64encode(stdout)
            del meta['encode_stdout']

        if 'encode_stderr' in meta:
            stderr = b64encode(stderr)
            del meta['encode_stderr']

    meta['exec_time'] = exec_time.isoformat()
    meta['finish_time'] = finish_time.isoformat()

    if 'output' in meta:
        result = meta.pop('output', {})
        result['meta'] = meta

        _stdout = result.pop('stdout', None)
        if _stdout:
            result[_stdout] = stdout.strip()

        _stderr = result.pop('stderr', None)
        if _stderr:
            result[_stderr] = stderr.strip()

        _returncode = result.pop('returncode', None)
        if _returncode:
            result[_returncode] = p.returncode

    else:
        result = {
            'returncode': p.returncode,
            'stdout': stdout,
            'stderr': stderr,
            'meta': meta,
        }

    # Implicit logging if no callback is specified
    # Use callback=False to disable automatic logging
    if callback is None:
        callback = [LOGTASK, meta, None]

    if callback:
        nolog = meta.get('nolog', False)
        cb_name = callback[0]
        cb_kwargs = {}
        cb_expire = None

        if len(callback) > 1:
            cb_kwargs = callback[1]
            if len(callback) > 2:
                cb_expire = callback[2]

        t = send_task_forever(request.id,
                              cb_name,
                              nolog=nolog,
                              args=(result, request.id),
                              kwargs=cb_kwargs,
                              queue=Q_MGMT,
                              expires=cb_expire,
                              task_id=task_id_from_task_id(request.id))
        result['meta']['cb_name'] = cb_name
        result['meta']['callback'] = t.id

    # Do not run emergency callback in after_return
    self.all_done = True

    return result