def put(self): """ Performs call to execute_sysinfo """ if not self.node.is_online(): raise NodeIsNotOperational() apiview = { 'view': 'node_sysinfo', 'method': self.request.method, 'hostname': self.node.hostname, } meta = { 'apiview': apiview, 'msg': LOG_NODE_UPDATE, 'node_uuid': self.node.uuid, } task_id, err = execute_sysinfo(self.request, self.node.owner.id, queue=self.node.fast_queue, meta=meta, node_uuid=self.node.uuid, check_user_tasks=True) if err: return FailureTaskResponse(self.request, err) else: return TaskResponse(self.request, task_id, api_view=apiview, obj=self.node, msg=LOG_NODE_UPDATE, data=self.data)
def get_current_status(self, force_change=False): """Get current VM status""" request, vm = self.request, self.vm if vm.node.status not in vm.node.STATUS_OPERATIONAL: raise NodeIsNotOperational apiview = self.apiview msg = LOG_STATUS_GET cmd = 'vmadm list -p -H -o state uuid=' + vm.uuid meta = { 'output': {'returncode': 'returncode', 'stdout': 'stdout', 'stderr': 'stderr', 'hostname': vm.hostname}, 'msg': msg, 'vm_uuid': vm.uuid, 'apiview': apiview, 'last_status': vm.status, } callback = ( 'api.vm.status.tasks.vm_status_current_cb', {'vm_uuid': vm.uuid, 'force_change': force_change} ) tid, err = execute(request, vm.owner.id, cmd, meta=meta, callback=callback, queue=vm.node.fast_queue, nolog=True) if err: return FailureTaskResponse(request, err, vm=vm) else: return TaskResponse(request, tid, vm=vm, api_view=apiview, data=self.data) # No msg
def put(self): """Sync snapshots in DB with snapshots on compute node and update snapshot status and size.""" request, vm = self.request, self.vm disk_id, real_disk_id, zfs_filesystem = get_disk_id(request, vm, self.data) self._check_vm_status() # Prepare task data apiview = { 'view': 'vm_snapshot_list', 'method': request.method, 'hostname': vm.hostname, 'disk_id': disk_id, } meta = { 'output': {'returncode': 'returncode', 'stdout': 'data', 'stderr': 'message'}, 'replace_text': ((vm.uuid, vm.hostname),), 'msg': LOG_SNAPS_SYNC, 'vm_uuid': vm.uuid, 'apiview': apiview, } detail = 'disk_id=%s' % disk_id cmd = 'esnapshot list "%s"' % zfs_filesystem lock = self.LOCK % (vm.uuid, real_disk_id) callback = ('api.vm.snapshot.tasks.vm_snapshot_sync_cb', {'vm_uuid': vm.uuid, 'disk_id': real_disk_id}) # Run task tid, err = execute(request, vm.owner.id, cmd, meta=meta, lock=lock, callback=callback, queue=vm.node.fast_queue) if err: return FailureTaskResponse(request, err, vm=vm) else: return TaskResponse(request, tid, msg=LOG_SNAPS_SYNC, vm=vm, api_view=apiview, detail=detail, data=self.data)
def task_response(self): return TaskResponse(self.request, self.task_id, msg=self.msg, obj=self.obj, api_view=self.apiview, detail=self.detail, data=self.data)
def _run_execute(self, msg, cmd, status): self._check_img() request, ns, img = self.request, self.ns, self.img node = ns.node detail = 'image=%s' % img.name apiview = { 'view': 'node_image', 'method': request.method, 'hostname': node.hostname, 'zpool': ns.zpool, 'name': img.name, } # Set importing/deleting status img.set_ns_status(ns, status) # Create task tid, err = execute( request, ns.storage.owner.id, cmd, tg=TG_DC_UNBOUND, queue=node.image_queue, meta={ 'output': { 'returncode': 'returncode', 'stdout': 'message' }, 'replace_stdout': ((node.uuid, node.hostname), (img.uuid, img.name)), 'msg': msg, 'nodestorage_id': ns.id, 'apiview': apiview }, callback=('api.node.image.tasks.node_image_cb', { 'nodestorage_id': ns.id, 'zpool': ns.zpool, 'img_uuid': img.uuid }), lock='node_image ns:%s img:%s' % (ns.id, img.uuid), # Lock image per node storage expires=IMAGE_TASK_EXPIRES) if err: img.del_ns_status(ns) return FailureTaskResponse(request, err, obj=ns) else: return TaskResponse(request, tid, msg=msg, obj=ns, api_view=apiview, detail=detail, data=self.data)
def put(self): request, vm, command = self.request, self.vm, self.command if not vm.is_hvm(): raise OperationNotSupported if vm.status not in (vm.RUNNING, vm.STOPPING): raise VmIsNotOperational if command not in COMMANDS: raise InvalidInput('Invalid command') ser = VmQGASerializer(request, command, data=self.data) if not ser.is_valid(): return FailureTaskResponse(request, ser.errors, obj=vm) apiview = { 'view': 'vm_qga', 'method': request.method, 'hostname': vm.hostname, 'command': command, } cmd = 'qga-client %s %s 2>&1' % (vm.qga_socket_path, ' '.join( ser.get_full_command())) lock = 'vm_qga vm:%s' % vm.uuid meta = { 'output': { 'returncode': 'returncode', 'stdout': 'message' }, 'replace_stdout': ((vm.uuid, vm.hostname), ), 'apiview': apiview, 'msg': LOG_QGA_COMMAND, 'vm_uuid': vm.uuid, 'check_returncode': True, } # callback=None means that an implicit LOGTASK callback will be used (task_log_cb) tid, err = execute(request, vm.owner.id, cmd, meta=meta, lock=lock, queue=vm.node.fast_queue) if err: return FailureTaskResponse(request, err, vm=vm) else: return TaskResponse(request, tid, msg=LOG_QGA_COMMAND, obj=vm, api_view=apiview, data=self.data, detail_dict=ser.detail_dict())
def put(self): if 'note' in self.data: # Changing snapshot note instead of rollback (not logging) return self._update_note() request, vm, snap = self.request, self.vm, self.snap if vm.node.status not in vm.node.STATUS_OPERATIONAL: raise NodeIsNotOperational if vm.locked: raise VmIsLocked self._check_vm_status() self._check_snap_status() apiview, detail = self._get_apiview_detail() apiview['force'] = bool(ForceSerializer(data=self.data, default=True)) if not apiview['force']: snaplast = Snapshot.objects.only('id').filter( vm=vm, disk_id=snap.disk_id).order_by('-id')[0] if snap.id != snaplast.id: raise ExpectationFailed('VM has more recent snapshots') if vm.status != vm.STOPPED: raise VmIsNotOperational('VM is not stopped') if vm.tasks: raise VmHasPendingTasks msg = LOG_SNAP_UPDATE lock = self.LOCK % (vm.uuid, snap.disk_id) cmd = 'esnapshot rollback "%s@%s" 2>&1' % (self.zfs_filesystem, snap.zfs_name) vm.set_notready() tid, err = execute(request, vm.owner.id, cmd, meta=snap_meta(vm, msg, apiview, detail), lock=lock, callback=snap_callback(vm, snap), queue=vm.node.fast_queue) if err: vm.revert_notready() return FailureTaskResponse(request, err, vm=vm) else: snap.save_status(snap.ROLLBACK) return TaskResponse(request, tid, msg=msg, vm=vm, api_view=apiview, detail=detail, data=self.data)
def put(self, internal=False): """Sync snapshots in DB with snapshots on compute node storage and update snapshot status and size.""" request, ns = self.request, self.ns node = ns.node # Prepare task data apiview = { 'view': 'node_vm_snapshot_list', 'method': request.method, 'hostname': node.hostname, 'zpool': ns.zpool, } meta = { 'output': { 'returncode': 'returncode', 'stdout': 'data', 'stderr': 'message' }, 'msg': LOG_NS_SNAPS_SYNC, 'nodestorage_id': ns.id, 'apiview': apiview, 'internal': internal, } cmd = 'esnapshot list "%s"' % ns.zpool lock = self.LOCK % ns.id callback = ('api.node.snapshot.tasks.node_vm_snapshot_sync_cb', { 'nodestorage_id': ns.id }) if node.status != node.ONLINE: raise NodeIsNotOperational # Run task tid, err = execute(request, ns.storage.owner.id, cmd, tg=TG_DC_UNBOUND, meta=meta, lock=lock, callback=callback, queue=node.fast_queue, check_user_tasks=not internal) if internal: return tid, err if err: return FailureTaskResponse(request, err, dc_bound=False) else: return TaskResponse(request, tid, msg=LOG_NS_SNAPS_SYNC, obj=ns, api_view=apiview, data=self.data)
def post(self): self._check_vm_status() apiview, detail = self._get_apiview_detail() request, vm, snap = self.request, self.vm, self.snap snap.status = snap.PENDING snap.define_id = self.snap_define_id snap.type = self.snaptype ser = SnapshotSerializer(request, snap, data=self.data) fsfreeze = '' if not ser.is_valid(): return FailureTaskResponse(request, ser.errors, vm=vm) if vm.is_hvm() and self.data.get('fsfreeze', False): qga_socket = vm.qga_socket_path if qga_socket: snap.fsfreeze = True if vm.status != vm.STOPPED: fsfreeze = '"%s"' % qga_socket self._check_snap_limit() self._check_snap_size_limit() # Issue #chili-848 self._check_snap_dc_size_limit() # Issue #chili-848 snap.zpool = vm.node.nodestorage_set.get(zpool=self.zpool) snap.save() detail += ', type=%s, fsfreeze=%s' % (self.snaptype, str( snap.fsfreeze).lower()) msg = LOG_SNAP_CREATE lock = self.LOCK % (vm.uuid, snap.disk_id) cmd = 'esnapshot create "%s@%s" "es:snapname=%s" %s 2>&1' % ( self.zfs_filesystem, snap.zfs_name, snap.name, fsfreeze) tid, err = execute(request, vm.owner.id, cmd, meta=snap_meta(vm, msg, apiview, detail), lock=lock, callback=snap_callback(vm, snap), queue=vm.node.fast_queue, tt=self.tt) if err: snap.delete() return FailureTaskResponse(request, err, vm=vm) else: return TaskResponse(request, tid, msg=msg, vm=vm, api_view=apiview, detail=detail, data=self.data)
def delete(self): """Delete multiple snapshots""" request, data, vm = self.request, self.data, self.vm disk_id, real_disk_id, zfs_filesystem = get_disk_id(request, vm, data) # Parse data['snapnames'] snaps, __ = get_snapshots(request, vm, real_disk_id, data) self._check_vm_status() snaps_lost = snaps.filter(status=Snapshot.LOST) msg = LOG_SNAPS_DELETE if snaps_lost: _result = {'message': 'Snapshots successfully deleted from DB'} _detail = "snapnames='%s', disk_id=%s" % (','.join(i.name for i in snaps_lost), disk_id) snaps_lost.delete() res = SuccessTaskResponse(request, _result, msg=msg, vm=vm, detail=_detail) snaps = snaps.filter(status=Snapshot.OK) # Work with OK snapshots from now on if not snaps: return res elif any(i.status != Snapshot.OK for i in snaps): raise ExpectationFailed('VM snapshot status is not OK') # Task type (a = automatic, e = manual) if getattr(request, 'define_id', None): tt = TT_AUTO else: tt = TT_EXEC snapnames = [i.name for i in snaps] _apiview_ = {'view': 'vm_snapshot_list', 'method': request.method, 'hostname': vm.hostname, 'disk_id': disk_id, 'snapnames': snapnames} _detail_ = "snapnames='%s', disk_id=%s" % (','.join(snapnames), disk_id) snap_ids = [snap.id for snap in snaps] zfs_names = ','.join([snap.zfs_name for snap in snaps]) lock = self.LOCK % (vm.uuid, real_disk_id) cmd = 'esnapshot destroy "%s@%s" 2>&1' % (zfs_filesystem, zfs_names) callback = ('api.vm.snapshot.tasks.vm_snapshot_list_cb', {'vm_uuid': vm.uuid, 'snap_ids': snap_ids}) tid, err = execute(request, vm.owner.id, cmd, meta=snap_meta(vm, msg, _apiview_, _detail_), lock=lock, callback=callback, queue=vm.node.fast_queue, tt=tt) if err: return FailureTaskResponse(request, err, vm=vm) else: snaps.update(status=Snapshot.PENDING) return TaskResponse(request, tid, msg=msg, vm=vm, api_view=_apiview_, detail=_detail_, data=self.data)
def post(self): request, vm = self.request, self.vm if not self.vm.is_hvm(): raise OperationNotSupported if vm.status not in (vm.RUNNING, vm.STOPPING): raise VmIsNotOperational apiview = { 'view': 'vm_screenshot', 'method': request.method, 'hostname': vm.hostname } cmd = 'vmadm sysrq %s nmi >&2 && sleep 0.5 && vmadm sysrq %s screenshot >&2 && \ cat /%s/%s/root/tmp/vm.ppm' % (vm.uuid, vm.uuid, vm.zpool, vm.uuid) lock = 'vm_screenshot vm:%s' % vm.uuid meta = { 'output': { 'returncode': 'returncode', 'stderr': 'message', 'stdout': 'image' }, 'replace_stderr': ((vm.uuid, vm.hostname), ), 'encode_stdout': True, 'compress_stdout': True, 'apiview': apiview } callback = ('api.vm.other.tasks.vm_screenshot_cb', { 'vm_uuid': vm.uuid }) tid, err = execute(request, vm.owner.id, cmd, meta=meta, lock=lock, callback=callback, queue=vm.node.fast_queue) if err: return FailureTaskResponse(request, err, vm=vm) else: return TaskResponse(request, tid, vm=vm, api_view=apiview, data=self.data) # No msg
def _run_execute(self, msg, cmd, stdin=None, detail_dict=None, block_key=None, **apiview_kwargs): request, master_vm, slave_vm, repname = self.request, self.vm, self.slave_vm, self.slave_vm.name if detail_dict is None: detail_dict = {'repname': repname} # Prepare task data apiview = { 'view': self._api_view_, 'method': request.method, 'hostname': master_vm.hostname, 'repname': repname } apiview.update(apiview_kwargs) meta = { 'output': {'returncode': 'returncode', 'stdout': 'jsons', 'stderr': 'message'}, 'replace_stdout': ((master_vm.uuid, master_vm.hostname), (slave_vm.uuid, repname)), 'msg': msg, 'vm_uuid': master_vm.uuid, 'slave_vm_uuid': slave_vm.uuid, 'apiview': apiview, } lock = 'vm_replica vm:%s' % master_vm.uuid callback = ( 'api.vm.replica.tasks.%s_cb' % self._api_view_, {'vm_uuid': master_vm.uuid, 'slave_vm_uuid': slave_vm.uuid} ) cmd = cmd.format( master_uuid=master_vm.uuid, slave_uuid=slave_vm.uuid, master_node=master_vm.node.address, id=slave_vm.rep_id, ) self._check_slave_vm_node() # Execute task tid, err = execute(request, master_vm.owner.id, cmd, meta=meta, lock=lock, callback=callback, stdin=stdin, queue=slave_vm.node.fast_queue, block_key=block_key) if err: return FailureTaskResponse(request, err, vm=master_vm) else: self._success = True return TaskResponse(request, tid, msg=msg, vm=master_vm, api_view=apiview, detail_dict=detail_dict, data=self.data)
def delete(self): request, vm = self.request, self.vm # only admin if not (request.user and request.user.is_admin(request)): raise PermissionDenied if vm.uuid == ImageVm.get_uuid(): raise VmIsLocked('VM is image server') if vm.locked: raise VmIsLocked if vm.status not in (vm.STOPPED, vm.FROZEN): raise VmIsNotOperational('VM is not stopped') if vm.tasks: raise VmHasPendingTasks apiview = self.apiview msg = LOG_VM_DELETE cmd = 'vmadm delete ' + vm.uuid meta = { 'output': {'returncode': 'returncode', 'stderr': 'message'}, 'replace_text': ((vm.uuid, vm.hostname),), 'msg': msg, 'vm_uuid': vm.uuid, 'apiview': apiview } callback = ('api.vm.base.tasks.vm_delete_cb', {'vm_uuid': vm.uuid}) logger.debug('Deleting VM %s from compute node', vm) err = True vm.set_notready() try: tid, err = execute(request, vm.owner.id, cmd, meta=meta, lock=self.lock, expires=VM_VM_EXPIRES, callback=callback, queue=vm.node.slow_queue) if err: return FailureTaskResponse(request, err, vm=vm) else: return TaskResponse(request, tid, msg=msg, vm=vm, api_view=apiview, data=self.data) finally: if err: vm.revert_notready()
def delete(self): self._check_vm_status() self._check_snap_status(lost_ok=True) request, vm, snap = self.request, self.vm, self.snap apiview, detail = self._get_apiview_detail() msg = LOG_SNAP_DELETE if snap.status == Snapshot.LOST: snap.delete() res = {'message': 'Snapshot successfully deleted from DB'} return SuccessTaskResponse(request, res, msg=msg, vm=vm, detail=detail) SnapshotSerializer(request, snap) lock = self.LOCK % (vm.uuid, snap.disk_id) cmd = 'esnapshot destroy "%s@%s" 2>&1' % (self.zfs_filesystem, snap.zfs_name) tid, err = execute(request, vm.owner.id, cmd, meta=snap_meta(vm, msg, apiview, detail), lock=lock, callback=snap_callback(vm, snap), queue=vm.node.fast_queue, tt=self.tt) if err: return FailureTaskResponse(request, err, vm=vm) else: snap.save_status(snap.PENDING) return TaskResponse(request, tid, msg=msg, vm=vm, api_view=apiview, detail=detail, data=self.data)
def put(self): request, vm = self.request, self.vm # only admin if not (request.user and request.user.is_admin(request)): raise PermissionDenied apiview = self.apiview apiview['force'] = bool(ForceSerializer(data=self.data, default=False)) if vm.status not in (vm.RUNNING, vm.STOPPED): raise VmIsNotOperational('VM is not stopped or running') if apiview['force']: # final cmd and empty stdin cmd = 'vmadm get %s 2>/dev/null' % vm.uuid stdin = None block_key = None elif vm.json_changed(): if vm.locked: raise VmIsLocked json_update = vm.json_update() self.check_update(json_update) if (vm.json_disks_changed() or vm.json_nics_changed()) and vm.tasks: raise VmHasPendingTasks # create json suitable for update stdin, cmd1 = self.fix_update(json_update) stdin = stdin.dump() # cmd = zfs set... >&2; if cmd1 and vm.snapshot_set.exists(): raise ExpectationFailed('VM has snapshots') # final cmd cmd = cmd1 + 'vmadm update %s >&2; e=$?; vmadm get %s 2>/dev/null; exit $e' % (vm.uuid, vm.uuid) # Possible node_image import task which will block this task on node worker block_key = self.node_image_import(vm.node, json_update.get('add_disks', [])) else: # JSON unchanged and not force detail = 'Successfully updated VM %s (locally)' % vm.hostname res = SuccessTaskResponse(request, detail, msg=LOG_VM_UPDATE, vm=vm, detail=detail) vm_updated.send(TaskID(res.data.get('task_id'), request=request), vm=vm) # Signal! return res msg = LOG_VM_UPDATE meta = { 'output': {'returncode': 'returncode', 'stderr': 'message', 'stdout': 'json'}, 'replace_stderr': ((vm.uuid, vm.hostname),), 'msg': msg, 'vm_uuid': vm.uuid, 'apiview': apiview } callback = ('api.vm.base.tasks.vm_update_cb', {'vm_uuid': vm.uuid}) logger.debug('Updating VM %s with json: """%s"""', vm, stdin) err = True vm.set_notready() try: tid, err = execute(request, vm.owner.id, cmd, stdin=stdin, meta=meta, lock=self.lock, callback=callback, queue=vm.node.fast_queue, block_key=block_key) if err: return FailureTaskResponse(request, err, vm=vm) else: return TaskResponse(request, tid, msg=msg, vm=vm, api_view=apiview, data=self.data) finally: if err: vm.revert_notready()
def post(self): request, vm = self.request, self.vm ser = VmCreateSerializer(data=self.data) if not ser.is_valid(): return FailureTaskResponse(request, ser.errors, vm=vm) if not vm.is_kvm(): if not (vm.dc.settings.VMS_VM_SSH_KEYS_DEFAULT or vm.owner.usersshkey_set.exists()): raise PreconditionRequired('VM owner has no SSH keys available') apiview = self.apiview # noinspection PyTypeChecker cmd = 'vmadm create >&2; e=$? %s; vmadm get %s 2>/dev/null; vmadm start %s >&2; exit $e' % ( self.fix_create(vm), vm.uuid, vm.uuid) recreate = apiview['recreate'] = ser.data['recreate'] # noinspection PyAugmentAssignment if recreate: # recreate should be available to every vm owner if not (request.user and request.user.is_authenticated()): raise PermissionDenied if vm.locked: raise VmIsLocked if vm.status != vm.STOPPED: raise VmIsNotOperational('VM is not stopped') if not ser.data['force']: raise ExpectationFailed('Are you sure?') msg = LOG_VM_RECREATE # noinspection PyAugmentAssignment cmd = 'vmadm delete ' + vm.uuid + ' >&2 && sleep 1; ' + cmd elif vm.status == vm.NOTCREATED: # only admin if not (request.user and request.user.is_admin(request)): raise PermissionDenied if not vm.node: # we need to find a node for this vm now logger.debug('VM %s has no compute node defined. Choosing node automatically', vm) VmDefineView(request).choose_node(vm) logger.info('New compute node %s for VM %s was chosen automatically.', vm.node, vm) msg = LOG_VM_CREATE else: raise VmIsNotOperational('VM is already created') # Check boot flag (KVM) or disk image (OS) (bug #chili-418) if not vm.is_bootable(): raise PreconditionRequired('VM has no bootable disk') if vm.tasks: raise VmHasPendingTasks old_status = vm.status deploy = apiview['deploy'] = vm.is_deploy_needed() resize = apiview['resize'] = vm.is_resize_needed() if not vm.is_blank(): vm.set_root_pw() # Set new status also for blank VM (where deployment is not needed) # This status will be changed in vm_status_event_cb (if everything goes well). vm.status = vm.CREATING vm.save() # save status / node / vnc_port / root_pw stdin = vm.fix_json(deploy=deploy, resize=resize, recreate=recreate).dump() meta = { 'output': {'returncode': 'returncode', 'stderr': 'message', 'stdout': 'json'}, 'replace_stderr': ((vm.uuid, vm.hostname),), 'msg': msg, 'vm_uuid': vm.uuid, 'apiview': apiview } callback = ('api.vm.base.tasks.vm_create_cb', {'vm_uuid': vm.uuid}) err = True try: # Possible node_image import task which will block this task on node worker block_key = self.node_image_import(vm.node, vm.json_get_disks()) logger.debug('Creating new VM %s on node %s with json: """%s"""', vm, vm.node, stdin) tid, err = execute(request, vm.owner.id, cmd, stdin=stdin, meta=meta, expires=VM_VM_EXPIRES, lock=self.lock, callback=callback, queue=vm.node.slow_queue, block_key=block_key) if err: return FailureTaskResponse(request, err, vm=vm) else: # Inform user about creating vm_status_changed(tid, vm, vm.CREATING, save_state=False) return TaskResponse(request, tid, msg=msg, vm=vm, api_view=apiview, data=self.data) finally: if err: # Revert old status vm.status = old_status vm.save_status()
def put(self): request, vm, action = self.request, self.vm, self.action # Cannot change status unless the VM is created on node if vm.status not in self.statuses: raise VmIsNotOperational if action not in self.actions: raise ExpectationFailed('Bad action') apiview = self.apiview f_ser = VmStatusFreezeSerializer(data=self.data) if f_ser.is_valid(): freeze = apiview['freeze'] = f_ser.data['freeze'] unfreeze = apiview['unfreeze'] = f_ser.data['unfreeze'] else: return FailureTaskResponse(request, f_ser.errors, vm=vm) if ((action == 'start' and vm.status == Vm.STOPPED and not freeze) or (action == 'reboot' and vm.status == Vm.RUNNING and not freeze) or (action == 'stop' and vm.status in (Vm.STOPPING, Vm.RUNNING))): pass elif action == 'stop' and vm.status == Vm.STOPPED and freeze: if not request.user.is_admin(request): raise PermissionDenied tid = task_id_from_request(request, owner_id=vm.owner.id, dummy=True) vm_status_changed(tid, vm, vm.FROZEN, save_state=True) res = { 'message': 'VM %s is already stopped. Changing status to frozen.' % vm.hostname } return SuccessTaskResponse(request, res, task_id=tid, vm=vm) elif action == 'stop' and vm.status == Vm.FROZEN and unfreeze: if not request.user.is_admin(request): raise PermissionDenied tid = task_id_from_request(request, owner_id=vm.owner.id, dummy=True) vm_status_changed(tid, vm, vm.STOPPED, save_state=True) res = { 'message': 'Removing frozen status for VM %s.' % vm.hostname } return SuccessTaskResponse(request, res, task_id=tid, vm=vm) else: raise ExpectationFailed('Bad action') dc_settings = request.dc.settings if action in ('stop', 'reboot') and vm.uuid in dc_settings.VMS_NO_SHUTDOWN: raise PreconditionRequired('Internal VM can\'t be stopped') lock = 'vm_status vm:%s' % vm.uuid stdin = None apiview['update'] = False transition_to_stopping = False if action == 'start': msg = LOG_START ser = VmStatusActionIsoSerializer(request, vm, data=self.data) if not ser.is_valid(): return FailureTaskResponse(request, ser.errors, vm=vm) if ser.data and ser.iso: if not request.user.is_admin(request) and vm.is_installed() and \ (ser.iso.name != dc_settings.VMS_ISO_RESCUECD): raise PreconditionRequired('VM is not installed') msg = LOG_START_ISO iso = ser.iso cmd = self._start_cmd(iso=iso, iso2=ser.iso2, once=ser.data['cdimage_once']) else: iso = None cmd = self._start_cmd() ser_update = VmStatusUpdateJSONSerializer(data=self.data) if ser_update.is_valid(): if vm.json_changed(): apiview['update'] = ser_update.data['update'] logger.info('VM %s json != json_active', vm) if apiview['update']: from api.vm.base.vm_manage import VmManage stdin, os_cmd = VmManage.fix_update(vm.json_update()) stdin = stdin.dump() if os_cmd: # Dangerous, explicit update needed # TODO: fix in gui raise PreconditionRequired( 'VM must be updated first') if iso: msg = LOG_START_UPDATE_ISO else: msg = LOG_START_UPDATE cmd_update = 'vmadm update %s >&2; e=$?; vmadm get %s 2>/dev/null; ' % ( vm.uuid, vm.uuid) cmd = cmd_update + cmd + '; exit $e' # logger.info('VM %s json_active is going to be updated with json """%s"""', vm, stdin) else: logger.warning('VM %s json_active update disabled', vm) else: return FailureTaskResponse(request, ser_update.errors, vm=vm) else: force = ForceSerializer(data=self.data, default=False).is_true() cmd = self._action_cmd(action, force=force) if action == 'reboot': msg = LOG_REBOOT else: msg = LOG_STOP if force: apiview['force'] = True if action == 'reboot': msg = LOG_REBOOT_FORCE else: lock += ' force' msg = LOG_STOP_FORCE elif vm.status == Vm.STOPPING: raise VmIsNotOperational( 'VM is already stopping; try to use force') else: transition_to_stopping = True meta = { 'output': { 'returncode': 'returncode', 'stderr': 'message', 'stdout': 'json' }, 'replace_stderr': ((vm.uuid, vm.hostname), ), 'detail': self.detail, 'msg': msg, 'vm_uuid': vm.uuid, 'apiview': apiview, 'last_status': vm.status, } callback = ('api.vm.status.tasks.vm_status_cb', {'vm_uuid': vm.uuid}) tid, err = execute(request, vm.owner.id, cmd, stdin=stdin, meta=meta, lock=lock, callback=callback, queue=vm.node.fast_queue) if err: return FailureTaskResponse(request, err, vm=vm) else: if transition_to_stopping: vm.save_status(Vm.STOPPING) return TaskResponse(request, tid, msg=msg, vm=vm, api_view=apiview, detail=self.detail, data=self.data, api_data={ 'status': vm.status, 'status_display': vm.status_display() })
def _run_execute(self, msg, cmd, recover_on_error=False, delete_on_error=False, error_fun=None, vm=None, snap=None, detail_dict=None, stdin=None, cmd_add=None, cb_add=None): exc = None img, img_server, request = self.img, self.img_server, self.request self.obj = img # self.execute() requirement # noinspection PyBroadException try: cmd += ' -d %s >&2' % img_server.datasets_dir if cmd_add: cmd += cmd_add lock = 'image_manage %s' % img.uuid callback = ('api.image.base.tasks.image_manage_cb', { 'image_uuid': img.uuid }) apiview = { 'view': 'image_manage', 'method': request.method, 'name': img.name } meta = { 'msg': msg, 'output': { 'returncode': 'returncode', 'stderr': 'message', 'stdout': 'json' }, 'replace_text': [(img.uuid, img.name)], 'image_uuid': img.uuid, 'apiview': apiview, } if cb_add: callback[1].update(cb_add) if vm: # image_snapshot view meta['vm_uuid'] = vm.uuid meta['replace_text'].append((vm.uuid, vm.hostname)) callback[1]['vm_uuid'] = vm.uuid callback[1]['snap_id'] = snap.id apiview['view'] = 'image_snapshot' snap_data = { 'hostname': vm.hostname, 'snapname': snap.name, 'disk_id': snap.array_disk_id } apiview.update(snap_data) detail_dict.update(snap_data) if self.execute(cmd, meta=meta, lock=lock, callback=callback, tg=TG_DC_UNBOUND, queue=img_server.node.image_queue, stdin=stdin, expires=IMAGE_TASK_EXPIRES): if request.method == 'POST' and img.dc_bound: attach_dc_virt_object(self.task_id, LOG_IMAGE_ATTACH, img, img.dc_bound, user=request.user) return TaskResponse(request, self.task_id, msg=msg, obj=img, api_view=apiview, detail_dict=detail_dict, data=self.data) except Exception as exc: pass # Rollback + return error response if error_fun: error_fun() if delete_on_error: img.delete() else: if recover_on_error: for attr, value in img.backup.items(): setattr(img, attr, value) img.backup = {} # Remove backup img.manifest = img.manifest_active img.status = Image.OK img.save() else: img.save_status(Image.OK) if exc: # This should never happen raise exc return FailureTaskResponse(request, self.error, obj=img, dc_bound=self.dc_bound)
def put(self): assert self.request.dc.is_default() ser = UpdateSerializer(self.request, data=self.data) if not ser.is_valid(): return FailureTaskResponse(self.request, ser.errors, dc_bound=False) node = self.node version = ser.data['version'] key = ser.data.get('key') cert = ser.data.get('cert') del node.system_version # Request latest version in next command node_version = node.system_version if not (isinstance(node_version, text_type) and node_version): raise NodeIsNotOperational( 'Node version information could not be retrieved') node_version = node_version.split(':')[-1] # remove edition prefix if version == ('v' + node_version) and not ser.data.get('force'): raise PreconditionRequired('Node is already up-to-date') if node.status != node.OFFLINE: raise NodeIsNotOperational( 'Unable to perform update on node that is not in maintenance state' ) if node_version.startswith('2.'): # Old-style (pre 3.0) update mechanism return self._update_v2(version, key=key, cert=cert) # Upload key and cert and get command array worker = node.worker(Q_FAST) update_cmd = worker_command('system_update_command', worker, version=version, key=key, cert=cert, force=ser.data.get('force'), timeout=10) if update_cmd is None: raise GatewayTimeout('Node worker is not responding') if not isinstance(update_cmd, list): raise PreconditionRequired( 'Node update command could be retrieved') msg = LOG_SYSTEM_UPDATE _apiview_ = { 'view': 'system_node_update', 'method': self.request.method, 'hostname': node.hostname, 'version': version, } meta = { 'apiview': _apiview_, 'msg': msg, 'node_uuid': node.uuid, 'output': { 'returncode': 'returncode', 'stdout': 'message' }, 'check_returncode': True, } lock = self.LOCK % node.hostname cmd = '%s 2>&1' % ' '.join(update_cmd) tid, err = execute(self.request, node.owner.id, cmd, meta=meta, lock=lock, queue=node.fast_queue, tg=TG_DC_UNBOUND) if err: return FailureTaskResponse(self.request, err, dc_bound=False) else: return TaskResponse(self.request, tid, msg=msg, obj=node, api_view=_apiview_, data=self.data, dc_bound=False, detail_dict=ser.detail_dict(force_full=True))
def put(self): request, vm = self.request, self.vm if vm.locked: raise VmIsLocked if vm.status not in (vm.STOPPED, vm.RUNNING): raise VmIsNotOperational('VM is not stopped or running') if vm.json_changed(): raise PreconditionRequired( 'VM definition has changed; Update first') ser = VmMigrateSerializer(request, vm, data=self.data) if not ser.is_valid(): return FailureTaskResponse(request, ser.errors, vm=vm) if vm.tasks: raise VmHasPendingTasks err = True ghost_vm = None # Set VM to nonready (+"api lock") vm.set_notready() try: # Create a dummy/placeholder VM ghost_vm = ser.save_ghost_vm() # Possible node_image import task which will block this task on node worker block_key = ser.node_image_import() # We have a custom detail dict with all necessary api view parameters detail_dict = ser.detail_dict() # Prepare task data apiview = { 'view': 'vm_migrate', 'method': request.method, 'hostname': vm.hostname } apiview.update(detail_dict) lock = 'vm_migrate vm:%s' % vm.uuid meta = { 'output': { 'returncode': 'returncode', 'stderr': 'message', 'stdout': 'json' }, 'replace_stderr': ((vm.uuid, vm.hostname), ), 'msg': LOG_MIGRATE, 'vm_uuid': vm.uuid, 'slave_vm_uuid': ghost_vm.uuid, 'apiview': apiview, } callback = ('api.vm.migrate.tasks.vm_migrate_cb', { 'vm_uuid': vm.uuid, 'slave_vm_uuid': ghost_vm.uuid }) # Execute task tid, err = execute(request, vm.owner.id, ser.esmigrate_cmd, meta=meta, lock=lock, callback=callback, queue=vm.node.fast_queue, block_key=block_key) if err: # Error, revert VM status, delete placeholder VM return FailureTaskResponse(request, err, vm=vm) else: # Success, task is running return TaskResponse(request, tid, msg=LOG_MIGRATE, vm=vm, api_view=apiview, detail_dict=detail_dict, data=self.data) finally: if err: vm.revert_notready() if ghost_vm: ghost_vm.delete()
def put(self): request, vm = self.request, self.vm # only admin if not (request.user and request.user.is_admin(request)): raise PermissionDenied node = vm.node apiview = self.apiview apiview['force'] = bool(ForceSerializer(data=self.data, default=False)) queue = vm.node.fast_queue new_node_uuid = None detail_dict = {} if vm.status not in (vm.RUNNING, vm.STOPPED): raise VmIsNotOperational('VM is not stopped or running') if apiview['force']: detail_dict['force'] = True # final cmd and empty stdin cmd = 'vmadm get %s 2>/dev/null' % vm.uuid stdin = None block_key = None node_param = self.data.get('node') if node_param: if not request.user.is_staff: raise PermissionDenied node = get_node(request, node_param, dc=request.dc, exists_ok=True, noexists_fail=True) if node.hostname == vm.node.hostname: raise InvalidInput('VM already has the requested node set in DB') apiview['node'] = detail_dict['node'] = node.hostname queue = node.fast_queue new_node_uuid = node.uuid elif vm.json_changed(): if vm.locked: raise VmIsLocked json_update = vm.json_update() self.check_update(json_update) if (vm.json_disks_changed() or vm.json_nics_changed()) and vm.tasks: raise VmHasPendingTasks # create json suitable for update stdin, cmd1 = self.fix_update(json_update) self.validate_update(vm, stdin, cmd1) stdin = stdin.dump() # final cmd cmd = cmd1 + 'vmadm update %s >&2; e=$?; vmadm get %s 2>/dev/null; exit $e' % (vm.uuid, vm.uuid) # Possible node_image import task which will block this task on node worker block_key = self.node_image_import(vm.node, json_update.get('add_disks', [])) else: # JSON unchanged and not force detail = 'Successfully updated VM %s (locally)' % vm.hostname res = SuccessTaskResponse(request, detail, msg=LOG_VM_UPDATE, vm=vm, detail=detail) vm_updated.send(TaskID(res.data.get('task_id'), request=request), vm=vm) # Signal! return res # Check compute node status after we know which compute node the task is going to be run on # The internal vm.node.status checking is disabled in get_vm() in __init__ if node.status != node.ONLINE: raise NodeIsNotOperational msg = LOG_VM_UPDATE meta = { 'output': {'returncode': 'returncode', 'stderr': 'message', 'stdout': 'json'}, 'replace_stderr': ((vm.uuid, vm.hostname),), 'msg': msg, 'vm_uuid': vm.uuid, 'apiview': apiview } callback = ('api.vm.base.tasks.vm_update_cb', {'vm_uuid': vm.uuid, 'new_node_uuid': new_node_uuid}) logger.debug('Updating VM %s with json: """%s"""', vm, stdin) err = True vm.set_notready() try: tid, err = execute(request, vm.owner.id, cmd, stdin=stdin, meta=meta, lock=self.lock, callback=callback, queue=queue, block_key=block_key) if err: return FailureTaskResponse(request, err, vm=vm) else: return TaskResponse(request, tid, msg=msg, vm=vm, api_view=apiview, data=self.data, detail_dict=detail_dict) finally: if err: vm.revert_notready()
def put(self): # noqa: R701 request, vm, action = self.request, self.vm, self.action # Cannot change status unless the VM is created on node if vm.status not in self.statuses and action != 'current': raise VmIsNotOperational if action not in self.actions: raise ExpectationFailed('Bad action') apiview = self.apiview f_ser = VmStatusFreezeSerializer(data=self.data) if f_ser.is_valid(): freeze = apiview['freeze'] = f_ser.data['freeze'] unfreeze = apiview['unfreeze'] = f_ser.data['unfreeze'] else: return FailureTaskResponse(request, f_ser.errors, vm=vm) if ((action == 'start' and vm.status == Vm.STOPPED and not freeze) or (action == 'reboot' and vm.status == Vm.RUNNING and not freeze) or (action == 'stop' and vm.status in (Vm.STOPPING, Vm.RUNNING))): pass elif action == 'stop' and vm.status == Vm.STOPPED and freeze: if not request.user.is_admin(request): raise PermissionDenied tid = task_id_from_request(request, owner_id=vm.owner.id, dummy=True) vm_status_changed(tid, vm, vm.FROZEN, save_state=True) res = { 'message': 'VM %s is already stopped. Changing status to frozen.' % vm.hostname } return SuccessTaskResponse(request, res, task_id=tid, vm=vm) elif action == 'stop' and vm.status == Vm.FROZEN and unfreeze: if not request.user.is_admin(request): raise PermissionDenied tid = task_id_from_request(request, owner_id=vm.owner.id, dummy=True) vm_status_changed(tid, vm, vm.STOPPED, save_state=True) res = { 'message': 'Removing frozen status for VM %s.' % vm.hostname } return SuccessTaskResponse(request, res, task_id=tid, vm=vm) elif action == 'current': # Limit PUT /current/ action to be Admins and SuperAdmins if not request.user.is_admin(request): raise PermissionDenied if vm.status in self.statuses_force_change_allowed: return self.get_current_status(force_change=True) elif vm.status in self.stuck_statuses_force_change_allowed: if vm.tasks: raise VmHasPendingTasks else: return self.get_current_status(force_change=True) else: raise VmIsNotOperational else: raise ExpectationFailed('Bad action') dc_settings = request.dc.settings if action in ('stop', 'reboot') and vm.uuid in dc_settings.VMS_NO_SHUTDOWN: raise PreconditionRequired('Internal VM can\'t be stopped') lock = 'vm_status vm:%s' % vm.uuid stdin = None apiview['update'] = False transition_to_stopping = False # The update parameter is used by all actions (start, stop, reboot) ser_update = VmStatusUpdateJSONSerializer(data=self.data, default=(action in ('start', 'reboot'))) if not ser_update.is_valid(): return FailureTaskResponse(request, ser_update.errors, vm=vm) if vm.json_changed(): apiview['update'] = ser_update.data['update'] logger.info('VM %s json != json_active', vm) if not apiview['update']: logger.info('VM %s json_active update disabled', vm) if action == 'start': ser = VmStatusActionIsoSerializer(request, vm, data=self.data) if not ser.is_valid(): return FailureTaskResponse(request, ser.errors, vm=vm) if ser.data and ser.iso: if not request.user.is_admin(request) and vm.is_installed() and \ (ser.iso.name != dc_settings.VMS_ISO_RESCUECD): raise PreconditionRequired('VM is not installed') msg = LOG_START_ISO iso = ser.iso cmd = self._start_cmd(iso=iso, iso2=ser.iso2, once=ser.data['cdimage_once']) else: msg = LOG_START iso = None cmd = self._start_cmd() if apiview['update']: if vm.tasks: raise VmHasPendingTasks cmd, stdin = self._add_update_cmd(cmd, os_cmd_allowed=False) if iso: msg = LOG_START_UPDATE_ISO else: msg = LOG_START_UPDATE else: ser_stop_reboot = VmStatusStopSerializer(request, vm, data=self.data) if not ser_stop_reboot.is_valid(): return FailureTaskResponse(request, ser_stop_reboot.errors, vm=vm) update = apiview.get('update', False) # VmStatusUpdateJSONSerializer force = apiview['force'] = ser_stop_reboot.data.get('force', False) timeout = ser_stop_reboot.data.get('timeout', None) if not force and timeout: apiview['timeout'] = timeout if update: if vm.tasks: raise VmHasPendingTasks # This will always perform a vmadm stop command, followed by a vmadm update command and optionally # followed by a vmadm start command (reboot) pre_cmd = self._action_cmd('stop', force=force, timeout=timeout) if action == 'reboot': if force: msg = LOG_REBOOT_FORCE_UPDATE else: msg = LOG_REBOOT_UPDATE post_cmd = self._action_cmd('start') else: if force: msg = LOG_STOP_FORCE_UPDATE else: msg = LOG_STOP_UPDATE post_cmd = '' cmd, stdin = self._add_update_cmd(post_cmd, os_cmd_allowed=True, pre_cmd=pre_cmd) else: cmd = self._action_cmd(action, force=force, timeout=timeout) if force: if action == 'reboot': msg = LOG_REBOOT_FORCE else: lock += ' force' msg = LOG_STOP_FORCE else: if action == 'reboot': msg = LOG_REBOOT else: msg = LOG_STOP if vm.status == Vm.STOPPING: if update: raise PreconditionRequired( 'Cannot perform update while VM is stopping') if not force: raise VmIsNotOperational( 'VM is already stopping; try to use force') else: transition_to_stopping = True meta = { 'output': { 'returncode': 'returncode', 'stderr': 'message', 'stdout': 'json' }, 'replace_stderr': ((vm.uuid, vm.hostname), ), 'detail': self.detail, 'msg': msg, 'vm_uuid': vm.uuid, 'apiview': apiview, 'last_status': vm.status, } callback = ('api.vm.status.tasks.vm_status_cb', {'vm_uuid': vm.uuid}) tid, err = execute(request, vm.owner.id, cmd, stdin=stdin, meta=meta, lock=lock, callback=callback, queue=vm.node.fast_queue) if err: return FailureTaskResponse(request, err, vm=vm) else: if transition_to_stopping: vm.save_status(Vm.STOPPING) return TaskResponse(request, tid, msg=msg, vm=vm, api_view=apiview, detail=self.detail, data=self.data, api_data={ 'status': vm.status, 'status_display': vm.status_display() })
def put(self): if 'note' in self.data: # Changing snapshot note instead of rollback (not logging) return self._update_note() request, vm, snap = self.request, self.vm, self.snap ser = SnapshotRestoreSerializer(request, vm, data=self.data) if not ser.is_valid(): return FailureTaskResponse(self.request, ser.errors) target_vm, target_vm_disk_id = ser.target_vm, ser.target_vm_disk_id if vm.node.status not in vm.node.STATUS_OPERATIONAL: raise NodeIsNotOperational if target_vm.locked: raise VmIsLocked if target_vm != vm: if target_vm.node.status not in target_vm.node.STATUS_OPERATIONAL: raise NodeIsNotOperational self._check_vm_status(vm=target_vm) if not vm.has_compatible_brand(target_vm.brand): raise PreconditionRequired('VM brand mismatch') source_disk = vm.json_active_get_disks()[self.disk_id - 1] target_disk = target_vm.json_active_get_disks()[target_vm_disk_id - 1] if target_disk['size'] != source_disk['size']: raise PreconditionRequired('Disk size mismatch') self._check_vm_status() self._check_snap_status() apiview, detail = self._get_apiview_detail() apiview['force'] = ser.data['force'] if target_vm != vm: detail += ", source_hostname='%s', target_hostname='%s', target_disk_id=%s" % ( vm.hostname, target_vm.hostname, target_vm_disk_id) apiview['source_hostname'] = vm.hostname apiview['target_hostname'] = target_vm.hostname apiview['target_disk_id'] = target_vm_disk_id if not apiview['force']: if Snapshot.objects.only('id').filter( vm=target_vm, disk_id=ser.target_vm_real_disk_id).exists(): raise ExpectationFailed('Target VM has snapshots') elif not apiview['force']: snaplast = Snapshot.objects.only('id').filter( vm=vm, disk_id=snap.disk_id).order_by('-id')[0] if snap.id != snaplast.id: raise ExpectationFailed('VM has more recent snapshots') if target_vm.status != vm.STOPPED: raise VmIsNotOperational('VM is not stopped') if target_vm.tasks: raise VmHasPendingTasks msg = LOG_SNAP_UPDATE lock = self.LOCK % (vm.uuid, snap.disk_id) if target_vm == vm: cmd = 'esnapshot rollback "%s@%s" 2>&1' % (self.zfs_filesystem, snap.zfs_name) else: cmd = 'esbackup snap-restore -s %s@%s -d %s' % ( self.zfs_filesystem, snap.zfs_name, ser.target_vm_disk_zfs_filesystem) if vm.node != target_vm.node: cmd += ' -H %s' % target_vm.node.address vm.set_notready() target_vm.set_notready() tid, err = execute(request, target_vm.owner.id, cmd, meta=snap_meta(target_vm, msg, apiview, detail), lock=lock, callback=snap_callback(target_vm, snap), queue=vm.node.fast_queue) if err: target_vm.revert_notready() if vm != target_vm: vm.revert_notready() return FailureTaskResponse(request, err, vm=target_vm) else: snap.save_status(snap.ROLLBACK) return TaskResponse(request, tid, msg=msg, vm=target_vm, api_view=apiview, detail=detail, data=self.data)