def __init__(self, request, data): super(UpdateView, self).__init__(request, force_default_dc=True) self.data = data self.user = request.user self.task_id = task_id_from_request(self.request, dummy=True, tt=TT_DUMMY, tg=TG_DC_UNBOUND)
def __init__(self, request, hostname, data): super(NodeUpdateView, self).__init__(request) self.hostname = hostname self.data = data self.node = get_node(request, hostname) self.task_id = task_id_from_request(self.request, dummy=True, tt=TT_DUMMY, tg=TG_DC_UNBOUND)
def gen_task_id(cls, request, tt=TT_DUMMY, dc_bound=True, **kwargs): if dc_bound: tg = TG_DC_BOUND else: tg = TG_DC_UNBOUND return task_id_from_request(request, owner_id=cls._get_owner_id(**kwargs), tt=tt, tg=tg, dummy=True)
def on_dc_switch(self): # Reload user object in request self.request.user = self.request.user.__class__.objects.get(pk=self.request.user.pk) self.setup_user() self.set_active_user() # Inform other sessions for this user about the DC change task_id = task_id_from_request(self.user_id, tt=TT_INTERNAL, tg=TG_DC_UNBOUND, dc_id=self.dc_id) self.last_tasks.append(task_id) new_task = signal('task-for-' + self.user_id) new_task.send('_dc_switch', task_id=task_id, event_status='internal')
def task_log_exception(request, exc, task_id=None, **kwargs): """ Log API exception. """ if not task_id: task_id = task_id_from_request(request, tt=TT_ERROR, dummy=True) task_result, task_status = get_task_exception(exc) task_log_error(task_id, LOG_API_FAILURE, user=request.user, task_result=task_result, task_status=task_status, update_user_tasks=False, **kwargs)
def __init__(self, request, vm, old_hostname): siosid = getattr(request, 'siosid', None) task_id = task_id_from_request(request, owner_id=vm.owner.id) msg = _( 'Hostname of server %(alias)s changed from %(old_hostname)s to %(new_hostname)s. ' 'Please refresh your browser.' % { 'alias': vm.alias, 'old_hostname': old_hostname, 'new_hostname': vm.hostname }) super(VmDefineHostnameChanged, self).__init__(task_id, siosid=siosid, vm_hostname=old_hostname, new_hostname=vm.hostname, new_alias=vm.alias, message=msg)
def put(self): request = self.request task_id = task_id_from_request(request, dummy=True, tt=TT_DUMMY, tg=TG_DC_UNBOUND) if self.many: res = [self.update(task_id, repo) for repo in self.repo] err = any(bool(repo['error']) for repo in res) else: res = self.update(task_id, self.repo) err = bool(res.error) if err: response_class = FailureTaskResponse else: response_class = SuccessTaskResponse return response_class(self.request, res, task_id=task_id, dc_bound=self.dc_bound)
def put(self): # noqa: R701 request, vm, action = self.request, self.vm, self.action # Cannot change status unless the VM is created on node if vm.status not in self.statuses and action != 'current': raise VmIsNotOperational if action not in self.actions: raise ExpectationFailed('Bad action') apiview = self.apiview f_ser = VmStatusFreezeSerializer(data=self.data) if f_ser.is_valid(): freeze = apiview['freeze'] = f_ser.data['freeze'] unfreeze = apiview['unfreeze'] = f_ser.data['unfreeze'] else: return FailureTaskResponse(request, f_ser.errors, vm=vm) if ((action == 'start' and vm.status == Vm.STOPPED and not freeze) or (action == 'reboot' and vm.status == Vm.RUNNING and not freeze) or (action == 'stop' and vm.status in (Vm.STOPPING, Vm.RUNNING))): pass elif action == 'stop' and vm.status == Vm.STOPPED and freeze: if not request.user.is_admin(request): raise PermissionDenied tid = task_id_from_request(request, owner_id=vm.owner.id, dummy=True) vm_status_changed(tid, vm, vm.FROZEN, save_state=True) res = { 'message': 'VM %s is already stopped. Changing status to frozen.' % vm.hostname } return SuccessTaskResponse(request, res, task_id=tid, vm=vm) elif action == 'stop' and vm.status == Vm.FROZEN and unfreeze: if not request.user.is_admin(request): raise PermissionDenied tid = task_id_from_request(request, owner_id=vm.owner.id, dummy=True) vm_status_changed(tid, vm, vm.STOPPED, save_state=True) res = { 'message': 'Removing frozen status for VM %s.' % vm.hostname } return SuccessTaskResponse(request, res, task_id=tid, vm=vm) elif action == 'current': # Limit PUT /current/ action to be Admins and SuperAdmins if not request.user.is_admin(request): raise PermissionDenied if vm.status in self.statuses_force_change_allowed: return self.get_current_status(force_change=True) elif vm.status in self.stuck_statuses_force_change_allowed: if vm.tasks: raise VmHasPendingTasks else: return self.get_current_status(force_change=True) else: raise VmIsNotOperational else: raise ExpectationFailed('Bad action') dc_settings = request.dc.settings if action in ('stop', 'reboot') and vm.uuid in dc_settings.VMS_NO_SHUTDOWN: raise PreconditionRequired('Internal VM can\'t be stopped') lock = 'vm_status vm:%s' % vm.uuid stdin = None apiview['update'] = False transition_to_stopping = False # The update parameter is used by all actions (start, stop, reboot) ser_update = VmStatusUpdateJSONSerializer(data=self.data, default=(action in ('start', 'reboot'))) if not ser_update.is_valid(): return FailureTaskResponse(request, ser_update.errors, vm=vm) if vm.json_changed(): apiview['update'] = ser_update.data['update'] logger.info('VM %s json != json_active', vm) if not apiview['update']: logger.info('VM %s json_active update disabled', vm) if action == 'start': ser = VmStatusActionIsoSerializer(request, vm, data=self.data) if not ser.is_valid(): return FailureTaskResponse(request, ser.errors, vm=vm) if ser.data and ser.iso: if not request.user.is_admin(request) and vm.is_installed() and \ (ser.iso.name != dc_settings.VMS_ISO_RESCUECD): raise PreconditionRequired('VM is not installed') msg = LOG_START_ISO iso = ser.iso cmd = self._start_cmd(iso=iso, iso2=ser.iso2, once=ser.data['cdimage_once']) else: msg = LOG_START iso = None cmd = self._start_cmd() if apiview['update']: if vm.tasks: raise VmHasPendingTasks cmd, stdin = self._add_update_cmd(cmd, os_cmd_allowed=False) if iso: msg = LOG_START_UPDATE_ISO else: msg = LOG_START_UPDATE else: ser_stop_reboot = VmStatusStopSerializer(request, vm, data=self.data) if not ser_stop_reboot.is_valid(): return FailureTaskResponse(request, ser_stop_reboot.errors, vm=vm) update = apiview.get('update', False) # VmStatusUpdateJSONSerializer force = apiview['force'] = ser_stop_reboot.data.get('force', False) timeout = ser_stop_reboot.data.get('timeout', None) if not force and timeout: apiview['timeout'] = timeout if update: if vm.tasks: raise VmHasPendingTasks # This will always perform a vmadm stop command, followed by a vmadm update command and optionally # followed by a vmadm start command (reboot) pre_cmd = self._action_cmd('stop', force=force, timeout=timeout) if action == 'reboot': if force: msg = LOG_REBOOT_FORCE_UPDATE else: msg = LOG_REBOOT_UPDATE post_cmd = self._action_cmd('start') else: if force: msg = LOG_STOP_FORCE_UPDATE else: msg = LOG_STOP_UPDATE post_cmd = '' cmd, stdin = self._add_update_cmd(post_cmd, os_cmd_allowed=True, pre_cmd=pre_cmd) else: cmd = self._action_cmd(action, force=force, timeout=timeout) if force: if action == 'reboot': msg = LOG_REBOOT_FORCE else: lock += ' force' msg = LOG_STOP_FORCE else: if action == 'reboot': msg = LOG_REBOOT else: msg = LOG_STOP if vm.status == Vm.STOPPING: if update: raise PreconditionRequired( 'Cannot perform update while VM is stopping') if not force: raise VmIsNotOperational( 'VM is already stopping; try to use force') else: transition_to_stopping = True meta = { 'output': { 'returncode': 'returncode', 'stderr': 'message', 'stdout': 'json' }, 'replace_stderr': ((vm.uuid, vm.hostname), ), 'detail': self.detail, 'msg': msg, 'vm_uuid': vm.uuid, 'apiview': apiview, 'last_status': vm.status, } callback = ('api.vm.status.tasks.vm_status_cb', {'vm_uuid': vm.uuid}) tid, err = execute(request, vm.owner.id, cmd, stdin=stdin, meta=meta, lock=lock, callback=callback, queue=vm.node.fast_queue) if err: return FailureTaskResponse(request, err, vm=vm) else: if transition_to_stopping: vm.save_status(Vm.STOPPING) return TaskResponse(request, tid, msg=msg, vm=vm, api_view=apiview, detail=self.detail, data=self.data, api_data={ 'status': vm.status, 'status_display': vm.status_display() })
def call(self, request, owner_id, args, kwargs=None, meta=None, tt=TT_MGMT, tg=TG_DC_BOUND, tidlock=None, tidlock_timeout=None, cache_result=None, cache_timeout=None, expires=EXPIRES, nolog=False, ping_worker=True, check_user_tasks=True): """ Creates task in mgmt queue. Returns task_id, error_message and cached_result (if any). """ if kwargs is None: kwargs = {} if meta is None: meta = {} tid = task_id_from_request(request, owner_id=owner_id, tt=tt, tg=tg) task = 'MgmtTask %s[%s]%s' % (self.name, tid, args[:2]) tidlock_acquired = False if cache_result: cache_result = self.CACHE_KEY_TEMPLATE % cache_result result = redis.get(cache_result) if result is not None: try: res = pickle.loads(result) except pickle.UnpicklingError: logger.critical('%s could not parse cache_result "%s"', task, cache_result) else: return None, None, res if ping_worker: if not ping(Q_MGMT, timeout=ping_worker, count=2): return None, 'Task queue worker (%s) is not responding!' % queue_to_hostnames(Q_MGMT), None try: if tidlock: tidlock = self.TIDLOCK_KEY_TEMPLATE % tidlock task_lock = TaskLock(tidlock, desc=task) _tid = task_lock.get() if _tid: logger.info('%s found the same pending task %s :)', task, _tid) return _tid, None, None if tidlock_timeout is None: tidlock_timeout = expires tidlock_acquired = task_lock.acquire(tid, timeout=tidlock_timeout) if not tidlock_acquired: return None, 'MgmtTask did not acquire lock', None kwargs['meta'] = meta kwargs['tidlock'] = tidlock kwargs['cache_result'] = cache_result kwargs['cache_timeout'] = cache_timeout kwargs['nolog'] = nolog kwargs['check_user_tasks'] = check_user_tasks # Run task t = self.apply_async(args=args, kwargs=kwargs, queue=Q_MGMT, task_id=tid, expires=expires, add_to_parent=False) except Exception as e: logger.exception(e) logger.error('%s could not be created (%s)', task, e) if tidlock_acquired: # tidlock_acquired will be True, only if task_lock exists # noinspection PyUnboundLocalVariable task_lock.delete(fail_silently=True, premature=True) return None, e, None else: if nolog: logger.debug('%s created', task) else: logger.info('%s created', task) return t.id, None, None
def put(self): request, vm, action = self.request, self.vm, self.action # Cannot change status unless the VM is created on node if vm.status not in self.statuses: raise VmIsNotOperational if action not in self.actions: raise ExpectationFailed('Bad action') apiview = self.apiview f_ser = VmStatusFreezeSerializer(data=self.data) if f_ser.is_valid(): freeze = apiview['freeze'] = f_ser.data['freeze'] unfreeze = apiview['unfreeze'] = f_ser.data['unfreeze'] else: return FailureTaskResponse(request, f_ser.errors, vm=vm) if ((action == 'start' and vm.status == Vm.STOPPED and not freeze) or (action == 'reboot' and vm.status == Vm.RUNNING and not freeze) or (action == 'stop' and vm.status in (Vm.STOPPING, Vm.RUNNING))): pass elif action == 'stop' and vm.status == Vm.STOPPED and freeze: if not request.user.is_admin(request): raise PermissionDenied tid = task_id_from_request(request, owner_id=vm.owner.id, dummy=True) vm_status_changed(tid, vm, vm.FROZEN, save_state=True) res = { 'message': 'VM %s is already stopped. Changing status to frozen.' % vm.hostname } return SuccessTaskResponse(request, res, task_id=tid, vm=vm) elif action == 'stop' and vm.status == Vm.FROZEN and unfreeze: if not request.user.is_admin(request): raise PermissionDenied tid = task_id_from_request(request, owner_id=vm.owner.id, dummy=True) vm_status_changed(tid, vm, vm.STOPPED, save_state=True) res = { 'message': 'Removing frozen status for VM %s.' % vm.hostname } return SuccessTaskResponse(request, res, task_id=tid, vm=vm) else: raise ExpectationFailed('Bad action') dc_settings = request.dc.settings if action in ('stop', 'reboot') and vm.uuid in dc_settings.VMS_NO_SHUTDOWN: raise PreconditionRequired('Internal VM can\'t be stopped') lock = 'vm_status vm:%s' % vm.uuid stdin = None apiview['update'] = False transition_to_stopping = False if action == 'start': msg = LOG_START ser = VmStatusActionIsoSerializer(request, vm, data=self.data) if not ser.is_valid(): return FailureTaskResponse(request, ser.errors, vm=vm) if ser.data and ser.iso: if not request.user.is_admin(request) and vm.is_installed() and \ (ser.iso.name != dc_settings.VMS_ISO_RESCUECD): raise PreconditionRequired('VM is not installed') msg = LOG_START_ISO iso = ser.iso cmd = self._start_cmd(iso=iso, iso2=ser.iso2, once=ser.data['cdimage_once']) else: iso = None cmd = self._start_cmd() ser_update = VmStatusUpdateJSONSerializer(data=self.data) if ser_update.is_valid(): if vm.json_changed(): apiview['update'] = ser_update.data['update'] logger.info('VM %s json != json_active', vm) if apiview['update']: from api.vm.base.vm_manage import VmManage stdin, os_cmd = VmManage.fix_update(vm.json_update()) stdin = stdin.dump() if os_cmd: # Dangerous, explicit update needed # TODO: fix in gui raise PreconditionRequired( 'VM must be updated first') if iso: msg = LOG_START_UPDATE_ISO else: msg = LOG_START_UPDATE cmd_update = 'vmadm update %s >&2; e=$?; vmadm get %s 2>/dev/null; ' % ( vm.uuid, vm.uuid) cmd = cmd_update + cmd + '; exit $e' # logger.info('VM %s json_active is going to be updated with json """%s"""', vm, stdin) else: logger.warning('VM %s json_active update disabled', vm) else: return FailureTaskResponse(request, ser_update.errors, vm=vm) else: force = ForceSerializer(data=self.data, default=False).is_true() cmd = self._action_cmd(action, force=force) if action == 'reboot': msg = LOG_REBOOT else: msg = LOG_STOP if force: apiview['force'] = True if action == 'reboot': msg = LOG_REBOOT_FORCE else: lock += ' force' msg = LOG_STOP_FORCE elif vm.status == Vm.STOPPING: raise VmIsNotOperational( 'VM is already stopping; try to use force') else: transition_to_stopping = True meta = { 'output': { 'returncode': 'returncode', 'stderr': 'message', 'stdout': 'json' }, 'replace_stderr': ((vm.uuid, vm.hostname), ), 'detail': self.detail, 'msg': msg, 'vm_uuid': vm.uuid, 'apiview': apiview, 'last_status': vm.status, } callback = ('api.vm.status.tasks.vm_status_cb', {'vm_uuid': vm.uuid}) tid, err = execute(request, vm.owner.id, cmd, stdin=stdin, meta=meta, lock=lock, callback=callback, queue=vm.node.fast_queue) if err: return FailureTaskResponse(request, err, vm=vm) else: if transition_to_stopping: vm.save_status(Vm.STOPPING) return TaskResponse(request, tid, msg=msg, vm=vm, api_view=apiview, detail=self.detail, data=self.data, api_data={ 'status': vm.status, 'status_display': vm.status_display() })
def execute(request, owner_id, cmd, stdin=None, meta=None, callback=None, lock=None, lock_timeout=None, queue=None, expires=EXPIRES, tt=TT_EXEC, tg=TG_DC_BOUND, nolog=False, ping_worker=True, check_user_tasks=True, block_key=None): """ _execute task wrapper. This just looks better and does some locking. Returns task_id and error_message """ task_id = task_id_from_request(request, owner_id=owner_id, tt=tt, tg=tg) task = 'Task %s[%s]("%s")' % (_execute.name, task_id, cmd) lock_key = lock lock_acquired = False if meta is None: meta = {} if ping_worker and queue: # callback=None means, that an automatic log task callback will run if callback is not False and queue != Q_MGMT: queues = [queue, Q_MGMT] else: queues = [queue] for q in queues: if not ping(q, timeout=ping_worker, count=2): return None, 'Task queue worker (%s) is not responding!' % queue_to_hostnames( q) try: if lock_key: if lock_timeout is None: lock_timeout = expires lock_key = KEY_PREFIX + lock task_lock = TaskLock(lock_key, desc=task) lock_acquired = task_lock.acquire(task_id, timeout=lock_timeout) if not lock_acquired: return task_id, 'Task did not acquire lock' meta['nolog'] = nolog args = (cmd, stdin) kwargs = { 'meta': meta, 'callback': callback, 'lock': lock_key, 'block': block_key, 'check_user_tasks': check_user_tasks } # Run task task = _execute.apply_async(args=args, kwargs=kwargs, queue=queue, task_id=task_id, expires=expires, add_to_parent=False) except Exception as e: logger.exception(e) logger.error('%s could not be created (%s)', task, e) if lock_acquired: # lock_acquired will be True, only if task_lock exists # noinspection PyUnboundLocalVariable task_lock.delete(fail_silently=True, premature=True) return None, e else: if nolog: logger.debug('%s created', task) else: logger.info('%s created', task) return task.id, None