def delete(self): """Delete [DELETE] image from DB and from Image server. The task group is always DC unbound, but the current DC depends on the dc_bound flag: - dc_bound=False: task DC is default DC - dc_bound=[DC]: task DC is dc_bound DC The callback is responsible for detaching the image from each DC and deleting it from DB. """ request, img, data = self.request, self.img, self.data # Check if image is used by som VMs if img.is_used_by_vms(): raise PreconditionRequired(_('Image is used by some VMs')) # Preliminary checks self._run_checks() # This sets self.img_server to ImageVm() request.disable_throttling = True delete_node_image_tasks = [] # Run task for removing the image from all NodeStorages which have the image imported locally for ns in img.nodestorage_set.select_related('node').all(): # We need to bypass the permission checks, because node_image can be called by SuperAdmin only try: res = NodeImageView(request, ns, img, data).delete() except Exception as ex: res = exception_handler(ex, request) if res is None: raise res.exception = True logger.error('DELETE node_image(%s, %s, %s) failed (%s): %s', ns.node.hostname, ns.zpool, img.name, res.status_code, res.data) else: logger.info('DELETE node_image(%s, %s, %s) was successful (%s): %s', ns.node.hostname, ns.zpool, img.name, res.status_code, res.data) if res.status_code == 200: continue elif res.status_code == 201: delete_node_image_tasks.append(res.data['task_id']) else: return res if self.img_server: # Set PENDING status img.save_status(Image.PENDING) return self._run_execute(LOG_IMAGE_DELETE, 'esimg delete -u %s' % img.uuid, cb_add={'delete_node_image_tasks': delete_node_image_tasks}) else: if wait_for_delete_node_image_tasks(img, delete_node_image_tasks, timeout=30): obj = img.log_list owner = img.owner img.delete() return SuccessTaskResponse(self.request, None, obj=obj, owner=owner, msg=LOG_IMAGE_DELETE, dc_bound=self.dc_bound) else: raise PreconditionRequired(_('Image is being deleted from compute node storages; Try again later'))
def delete(self): ip = self.ip if self.many: if not ip: # SELECT count(*) from IPAddress ??? raise ObjectNotFound(model=IPAddress) for i in ip: # SELECT * from IPAddress if i.vm or i.vms.exists(): raise PreconditionRequired( _('IP address "%s" is used by VM') % i.ip) if i.is_node_address(): raise PreconditionRequired( _('IP address "%s" is used by Compute node') % i.ip) msg = LOG_IPS_DELETE dd = {'ips': ','.join(i.ip for i in ip)} else: if ip.vm or ip.vms.exists(): raise PreconditionRequired(_('IP address is used by VM')) if ip.is_node_address(): raise PreconditionRequired( _('IP address is used by Compute node')) msg = LOG_IP_DELETE dd = {'ip': ip.ip} ip.delete() # DELETE from IPAddress return SuccessTaskResponse(self.request, None, obj=self.net, msg=msg, detail_dict=dd, dc_bound=False)
def delete(self): node, dcnode = self.node, self.dcnode if dcnode.dc.vm_set.filter(node=node).exists(): raise PreconditionRequired(_('Node has VMs in datacenter')) if dcnode.dc.backup_set.filter(node=node).exists(): raise PreconditionRequired(_('Node has VM backups in datacenter')) ser = DcNodeSerializer(self.request, dcnode) ser.object.delete() DcNode.update_all(node=node) # noinspection PyStatementEffect ser.data return SuccessTaskResponse(self.request, None, obj=node, detail_dict=ser.detail_dict(), msg=LOG_NODE_DETACH)
def put(self): assert self.request.dc.id == DefaultDc().id ser = UpdateSerializer(self.request, data=self.data) if not ser.is_valid(): return FailureTaskResponse(self.request, ser.errors, task_id=self.task_id, dc_bound=False) version = ser.object['version'] from core.version import __version__ as mgmt_version if version == ('v' + mgmt_version): raise PreconditionRequired('System is already up-to-date') lock = TaskLock(self._lock_key, desc='System task') if not lock.acquire(self.task_id, timeout=7200, save_reverse=False): raise TaskIsAlreadyRunning try: return self._update(version, key=ser.object.get('key'), cert=ser.object.get('cert')) finally: lock.delete(fail_silently=True, delete_reverse=False)
def _check_platform_version(self): """Issue #chili-937 & Issue #chili-938""" min_version, max_version = self.img.min_platform, self.img.max_platform if min_version or max_version: node_version = parse(self.ns.node.platform_version) if min_version: if parse(min_version) > node_version: raise PreconditionRequired( 'Image requires newer node version') if max_version: if parse(max_version) < node_version: raise PreconditionRequired( 'Image requires older node version')
def check_update(self, json_update): """Changing most of the VM's parameters does not require a VM to be in stopped state. VM has to be stopped when changing some disk/NIC parameters or adding/deleting disks/NICS - issue #chili-879.""" vm = self.vm must_be_stopped = False for key, val in iteritems(json_update): if key in ('add_nics', 'remove_nics', 'add_disks', 'remove_disks'): must_be_stopped = True break if key == 'update_disks': if self._check_disk_update(val): must_be_stopped = True break if key == 'update_nics': if self._check_nic_update(val): must_be_stopped = True break if vm.status != vm.STOPPED and must_be_stopped: raise PreconditionRequired( 'VM has to be stopped when updating disks or NICs')
def _check_vm(self, vm): # Basic checks when working with online vm if not (self.request.user.is_admin(self.request) or vm.is_installed()): raise PreconditionRequired('VM is not installed') if vm.status not in (vm.RUNNING, vm.STOPPED, vm.STOPPING): raise VmIsNotOperational
def put(self): assert self.request.dc.is_default() ser = UpdateSerializer(self.request, data=self.data) if not ser.is_valid(): return FailureTaskResponse(self.request, ser.errors, dc_bound=False) version = ser.data['version'] from core.version import __version__ as mgmt_version # noinspection PyUnboundLocalVariable if version == ('v' + mgmt_version) and not ser.data.get('force'): raise PreconditionRequired('System is already up-to-date') obj = self.request.dc msg = LOG_SYSTEM_UPDATE _apiview_ = { 'view': 'system_update', 'method': self.request.method, 'version': version, } meta = { 'apiview': _apiview_, 'msg': LOG_SYSTEM_UPDATE, } task_kwargs = ser.data.copy() task_kwargs['dc_id'] = obj.id tid, err, res = system_update.call(self.request, None, (), kwargs=task_kwargs, meta=meta, tg=TG_DC_UNBOUND, tidlock=self.LOCK) if err: msg = obj = None # Do not log an error here return mgmt_task_response(self.request, tid, err, res, msg=msg, obj=obj, api_view=_apiview_, dc_bound=False, data=self.data, detail_dict=ser.detail_dict(force_full=True))
def _add_update_cmd(self, orig_cmd, os_cmd_allowed=False, pre_cmd=''): from api.vm.base.vm_manage import VmManage vm = self.vm json_update, os_cmd = VmManage.fix_update(vm.json_update()) if os_cmd: if os_cmd_allowed: VmManage.validate_update(vm, json_update, os_cmd) else: # Dangerous, explicit update needed # TODO: fix in gui raise PreconditionRequired('VM must be updated first') else: os_cmd = '' stdin = json_update.dump() logger.info('VM % is going to be updated with json """%s"""', vm, stdin) update_cmd = 'vmadm update %s >&2; e=$?; vmadm get %s 2>/dev/null; ' % ( vm.uuid, vm.uuid) cmd = os_cmd + update_cmd + orig_cmd + '; exit $e' if pre_cmd: cmd = pre_cmd + ' && ' + cmd return cmd, stdin
def delete(self): dc, request = self.dc, self.request if dc.is_default(): raise PreconditionRequired( _('Default datacenter cannot be deleted')) if dc.dcnode_set.exists(): raise PreconditionRequired( _('Datacenter has nodes')) # also "checks" DC backups if dc.vm_set.exists(): raise PreconditionRequired(_('Datacenter has VMs')) if dc.backup_set.exists(): raise PreconditionRequired( _('Datacenter has backups' )) # should be checked by dcnode check above dc_id = dc.id ser = self.serializer(request, dc) dc_bound_objects = dc.get_bound_objects() # After deleting a DC the current_dc is automatically set to DefaultDc by the on_delete db field parameter ser.object.delete() # Remove cached tasklog for this DC (DB tasklog entries will be remove automatically) delete_tasklog_cached(dc_id) connection.on_commit( lambda: dc_relationship_changed.send(dc_name=ser.object.name)) res = SuccessTaskResponse(request, None) # no msg => won't be logged # Every DC-bound object looses their DC => becomes DC-unbound task_id = res.data.get('task_id') # Update bound virt objects to be DC-unbound after DC removal for model, objects in dc_bound_objects.items(): msg = LOG_VIRT_OBJECT_UPDATE_MESSAGES.get(model, None) if objects and msg: for obj in objects: if obj.dc_bound: # noinspection PyUnresolvedReferences remove_dc_binding_virt_object(task_id, msg, obj, user=request.user, dc_id=DefaultDc.id) return res
def _check_vm_status(self): request, vm = self.request, self.vm if not (request.user.is_admin(request) or vm.is_installed()): raise PreconditionRequired('VM is not installed') if vm.status not in (vm.RUNNING, vm.STOPPED, vm.STOPPING): raise VmIsNotOperational
def delete(self): ns, dc = self.ns, self.request.dc for vm in dc.vm_set.filter(node=ns.node): if ns.zpool in vm.get_used_disk_pools(): # active + current raise PreconditionRequired(_('Storage is used by some VMs')) if dc.backup_set.filter(zpool=ns).exists(): raise PreconditionRequired(_('Storage is used by some VM backups')) ser = self.serializer(self.request, ns) ns.dc.remove(dc) return SuccessTaskResponse(self.request, None, obj=ns, detail_dict=ser.detail_dict(), msg=LOG_STORAGE_DETACH)
def create(self, vm, snap): """Create [POST] image from VM snapshot (ImageAdmin). This is always a DC bound task, but the task_id has a DC_UNBOUND task group flag, because socket.io will inform any admin regardless of the current admin DC. The callback is responsible for attaching the image into current DC. """ img, data, request = self.img, self.data, self.request assert request.dc == vm.dc if vm.uuid in settings.VMS_INTERNAL: # Bug #chili-792 raise PreconditionRequired('Internal VM can\'t be used for creating images') data.pop('dc_bound', None) # Default DC binding cannot be changed when creating Image for the first time img.dc_bound = vm.dc # Default DC binding set to VM DC (cannot be changed, ^^^) img.ostype = vm.ostype # Default ostype inherited from VM (cannot be changed) img.size = snap.disk_size # Default disk size inherited from VM (cannot be changed) img.owner = request.user # Default user (can be changed) img.alias = img.name # Default alias (can be changed) img.status = Image.OK # Set status for preliminary checks # Validate data (manifest info) ser = ImageSerializer(request, img, data) if not ser.is_valid(): return FailureTaskResponse(request, ser.errors, dc_bound=self.dc_bound) # Preliminary checks self._run_checks(img_server_must_exist=True) # This sets self.img_server to ImageVm() if vm.status not in (vm.RUNNING, vm.STOPPED, vm.STOPPING, vm.FROZEN): raise VmIsNotOperational if snap.status != snap.OK: raise ExpectationFailed('VM snapshot status is not OK') # Build manifest and set PENDING status # noinspection PyUnusedLocal data = ser.data img.manifest = img.build_manifest() img.status = Image.PENDING img.src_vm = vm img.src_snap = snap img.save() # Set snapshot status to PENDING snap.save_status(snap.PENDING) # Build command cmd_add = ' ; e=$?; cat %s/%s/manifest 2>&1; exit $e' % (self.img_server.datasets_dir, img.uuid) cmd = 'esimg create -s %s@%s' % (snap.zfs_filesystem, snap.zfs_name) if self.img_server.node != vm.node: cmd += ' -H %s' % vm.node.address return self._run_execute(LOG_IMAGE_CREATE, cmd, stdin=img.manifest.dump(), delete_on_error=True, vm=vm, snap=snap, error_fun=lambda: snap.save_status(snap.OK), detail_dict=ser.detail_dict(), cmd_add=cmd_add)
def put(self): request, vm = self.request, self.vm if vm.locked: raise VmIsLocked if vm.status not in (vm.STOPPED, vm.RUNNING, vm.NOTCREATED): raise VmIsNotOperational( 'VM is not stopped, running or notcreated') if vm.json_changed(): raise PreconditionRequired( 'VM definition has changed; Update first') ser = VmDcSerializer(request, vm, data=self.data) if not ser.is_valid(): return FailureTaskResponse(request, ser.errors, vm=vm) if vm.tasks: raise VmHasPendingTasks old_dc = vm.dc dc = ser.dc # Change DC for one VM, repeat this for other VM + Recalculate node & storage resources in target and source vm.dc = dc vm.save(update_node_resources=True, update_storage_resources=True) # Change task log entries DC for target VM TaskLogEntry.objects.filter(object_pk=vm.uuid).update(dc=dc) # Change related VM backup's DC Backup.objects.filter(vm=vm).update(dc=dc) for ns in ser.nss: # Issue #chili-885 for i in (dc, old_dc): Backup.update_resources(ns, vm, i) Snapshot.update_resources(ns, vm, i) detail = 'Successfully migrated VM %s from datacenter %s to datacenter %s' % ( vm.hostname, old_dc.name, dc.name) # Will create task log entry in old DC res = SuccessTaskResponse(request, detail, vm=vm, msg=LOG_MIGRATE_DC, detail=detail) # Create task log entry in new DC too task_log_success(task_id_from_task_id(res.data.get('task_id'), dc_id=dc.id), LOG_MIGRATE_DC, obj=vm, detail=detail, update_user_tasks=False) return res
def put(self): """Reinitialize old master VM -> reverse replication""" self._check_master_vm() slave_vm = self.slave_vm if not slave_vm.rep_reinit_required: raise PreconditionRequired('Reinitialization is not required') slave_vm.rep_enabled = True # Enable replication service cmd = self.CMD['reinit'] + ' && ' + self.CMD['svc-create'] % self._esrep_svc_opts return self._run_execute(LOG_REPLICA_REINIT, cmd)
def delete(self): self._check_node() ns, img = self.ns, self.img zpool = ns.zpool for vm in ns.node.vm_set.all(): if img.uuid in vm.get_image_uuids(zpool=zpool): raise PreconditionRequired(_('Image is used by some VMs')) return self._run_execute( LOG_IMG_DELETE, 'imgadm delete -P %s %s 2>&1' % (ns.zpool, img.uuid), img.DELETING)
def delete(self): dc, img = self.request.dc, self.img if img.is_used_by_vms(dc=dc): raise PreconditionRequired(_('Image is used by some VMs')) ser = self.serializer(self.request, img) img.dc.remove(dc) res = SuccessTaskResponse(self.request, None, obj=img, detail_dict=ser.detail_dict(), msg=LOG_IMAGE_DETACH) self._remove_dc_binding(res) return res
def __init__(self, request, name, data): super(DcStorageView, self).__init__(request) self.data = data self.name = name dc = request.dc if name: try: zpool, hostname = name.split('@') if not (zpool and hostname): raise ValueError except ValueError: raise ObjectNotFound(model=NodeStorage) attrs = {'node__hostname': hostname, 'zpool': zpool} if request.method != 'POST': attrs['dc'] = dc ns = get_object(request, NodeStorage, attrs, sr=( 'node', 'storage', 'storage__owner', ), exists_ok=True, noexists_fail=True) ns.set_dc(dc) try: # Bug #chili-525 + checks if node is attached to Dc (must be!) ns.set_dc_node(DcNode.objects.get(node=ns.node, dc=dc)) except DcNode.DoesNotExist: raise PreconditionRequired(_('Compute node is not available')) else: # many ns = NodeStorage.objects.filter(dc=dc).order_by(*self.order_by) if self.full or self.extended: dc_nodes = { dn.node.hostname: dn for dn in DcNode.objects.select_related('node').filter( dc=request.dc) } ns = ns.select_related('node', 'storage', 'storage__owner') for i in ns: # Bug #chili-525 i.set_dc_node(dc_nodes.get(i.node.hostname, None)) i.set_dc(dc) self.ns = ns
def put(self): """Failover to slave VM""" vm, slave_vm = self.vm, self.slave_vm if slave_vm.rep_reinit_required: raise PreconditionRequired('Reinitialization is required') if not slave_vm.reserve_resources: # We need to check whether there is free CPU and RAM on slave VM's node slave_vm_define = SlaveVmDefine(slave_vm) try: slave_vm_define.validate_node_resources(ignore_cpu_ram=False, ignore_disk=True) except APIValidationError: raise PreconditionRequired('Not enough free resources on target node') if slave_vm.sync_status == slave_vm.DIS: # service does not exist cmd = self.CMD['failover'] else: cmd = self.CMD['svc-remove'] + ' && ' + self.CMD['failover'] if vm.tasks: force = ForceSerializer(data=self.data, default=False).is_true() if not force: raise VmHasPendingTasks else: force = False orig_status = vm.status # Set VM to nonready vm.set_notready() try: return self._run_execute(LOG_REPLICA_FAILOVER, cmd, force=force, orig_status=orig_status) finally: if not self._success: vm.revert_notready()
def delete(self, ns): """Update node-storage""" ser = NodeStorageSerializer(self.request, ns) node = ns.node for vm in node.vm_set.all(): if ns.zpool in vm.get_used_disk_pools(): # active + current raise PreconditionRequired(_('Storage is used by some VMs')) if node.is_backup: if ns.backup_set.exists(): raise PreconditionRequired( _('Storage is used by some VM backups')) obj = ns.log_list owner = ns.storage.owner ser.object.delete() # Will delete Storage in post_delete return SuccessTaskResponse(self.request, None, obj=obj, owner=owner, msg=LOG_NS_DELETE, dc_bound=False)
def delete(self): dc, vmt = self.request.dc, self.vmt if dc.vm_set.filter(template=vmt).exists(): raise PreconditionRequired(_('Template is used by some VMs')) ser = self.serializer(self.request, vmt) vmt.dc.remove(dc) res = SuccessTaskResponse(self.request, None, obj=vmt, detail_dict=ser.detail_dict(), msg=LOG_TEMPLATE_DETACH) self._remove_dc_binding(res) return res
def _check_img_server(self, must_exist=False): try: self.img_server = ImageVm() if self.img_server: img_vm = self.img_server.vm if img_vm.status not in (img_vm.RUNNING, img_vm.STOPPED): raise ObjectDoesNotExist elif must_exist: raise ObjectDoesNotExist else: logger.warning('Image server is disabled!') except ObjectDoesNotExist: raise PreconditionRequired(_('Image server is not available'))
def delete(self): dc, net = self.request.dc, self.net if net.is_used_by_vms(dc=dc): raise PreconditionRequired(_('Network is used by some VMs')) ser = NetworkSerializer(self.request, net) net.dc.remove(dc) res = SuccessTaskResponse(self.request, None, obj=net, detail_dict=ser.detail_dict(), msg=LOG_NETWORK_DETACH) self._remove_dc_binding(res) return res
def delete(self): template = self.template ser = TemplateSerializer(self.request, template) if template.vm_set.exists(): raise PreconditionRequired(_('Template is used by some VMs')) owner = template.owner obj = template.log_list ser.object.delete() return SuccessTaskResponse(self.request, None, obj=obj, owner=owner, msg=LOG_TEMPLATE_DELETE, dc_bound=False)
def delete(self): net = self.net ser = NetworkSerializer(self.request, net) if net.is_used_by_vms(): raise PreconditionRequired(_('Network is used by some VMs')) owner = net.owner obj = net.log_list ser.object.delete() return SuccessTaskResponse(self.request, None, obj=obj, owner=owner, msg=LOG_NET_DELETE, dc_bound=False)
def put(self): """Re-create replication service with new settings""" slave_vm = self.slave_vm self._check_master_vm() if slave_vm.rep_reinit_required: raise PreconditionRequired('Reinitialization is required') # Check this before validating the serializer, because it updates the slave_vm.sync_status if slave_vm.sync_status == SlaveVm.DIS: # service does not exist cmd = '' else: cmd = self.CMD['svc-remove'] + ' && ' ser = VmReplicaSerializer(self.request, slave_vm, data=self.data, partial=True) if not ser.is_valid(): return FailureTaskResponse(self.request, ser.errors, vm=self.vm) dd = ser.detail_dict() only_one_attr_changed = len(dd) == 2 if ser.reserve_resources_changed: # We need to save the reserve_resources attribute into SlaveVm; # However, the current slave_vm object may have other attributes modified by the serializer slave_vm_copy = SlaveVm.objects.get(pk=slave_vm.pk) slave_vm_copy.reserve_resources = slave_vm.reserve_resources slave_vm_copy.save(update_fields=('enc_json',)) slave_vm_copy.vm.save(update_node_resources=True) if only_one_attr_changed: return SuccessTaskResponse(self.request, ser.data, vm=self.vm, msg=LOG_REPLICA_UPDATE, detail_dict=dd, status=scode.HTTP_205_RESET_CONTENT) if cmd and only_one_attr_changed and 'enabled' in dd: # Service exists on node and only status change is requested if slave_vm.rep_enabled: cmd = self.CMD['svc-enable'] else: cmd = self.CMD['svc-disable'] else: cmd += self.CMD['svc-create'] % self._esrep_svc_opts return self._run_execute(LOG_REPLICA_UPDATE, cmd, detail_dict=dd)
def put(self): assert self.request.dc.id == DefaultDc().id ser = UpdateSerializer(self.request, data=self.data) if not ser.is_valid(): return FailureTaskResponse(self.request, ser.errors, task_id=self.task_id, dc_bound=False) node = self.node version = ser.object['version'] node_version = node.system_version if not (isinstance(node_version, text_type) and node_version): raise NodeIsNotOperational( 'Node version information could not be retrieved') if version == ('v' + node.system_version): raise PreconditionRequired('Node is already up-to-date') if node.status != node.OFFLINE: raise NodeIsNotOperational( 'Unable to perform update on node that is not in OFFLINE state!' ) lock = TaskLock(self._lock_key, desc='System task') if not lock.acquire(self.task_id, timeout=7200, save_reverse=False): raise TaskIsAlreadyRunning try: return self._update(version, key=ser.object.get('key'), cert=ser.object.get('cert')) finally: lock.delete(fail_silently=True, delete_reverse=False)
def post(self): """Create and initialize slave VM and create replication service""" request, vm = self.request, self.vm if vm.status not in (vm.STOPPED, vm.RUNNING): raise VmIsNotOperational('VM is not stopped or running') if vm.json_changed(): raise PreconditionRequired('VM definition has changed; Update first') ser = VmReplicaSerializer(request, self.slave_vm, data=self.data) if not ser.is_valid(): return FailureTaskResponse(request, ser.errors, vm=vm) if vm.tasks: raise VmHasPendingTasks cmd = self.CMD['init'] % self._esrep_init_opts + ' && ' + self.CMD['svc-create'] % self._esrep_svc_opts slave_vm = None # Set VM to nonready (+"api lock") vm.set_notready() try: # Create slave VM slave_vm = ser.save_slave_vm() stdin = slave_vm.vm.fix_json(resize=True).dump() logger.debug('Creating new slave VM %s on node %s with json: """%s"""', slave_vm, slave_vm.node, stdin) return self._run_execute(LOG_REPLICA_CREATE, cmd, stdin=stdin, detail_dict=ser.detail_dict(), block_key=ser.node_image_import()) finally: if not self._success: vm.revert_notready() if slave_vm: slave_vm.delete()
def put(self): # noqa: R701 request, vm, action = self.request, self.vm, self.action # Cannot change status unless the VM is created on node if vm.status not in self.statuses and action != 'current': raise VmIsNotOperational if action not in self.actions: raise ExpectationFailed('Bad action') apiview = self.apiview f_ser = VmStatusFreezeSerializer(data=self.data) if f_ser.is_valid(): freeze = apiview['freeze'] = f_ser.data['freeze'] unfreeze = apiview['unfreeze'] = f_ser.data['unfreeze'] else: return FailureTaskResponse(request, f_ser.errors, vm=vm) if ((action == 'start' and vm.status == Vm.STOPPED and not freeze) or (action == 'reboot' and vm.status == Vm.RUNNING and not freeze) or (action == 'stop' and vm.status in (Vm.STOPPING, Vm.RUNNING))): pass elif action == 'stop' and vm.status == Vm.STOPPED and freeze: if not request.user.is_admin(request): raise PermissionDenied tid = task_id_from_request(request, owner_id=vm.owner.id, dummy=True) vm_status_changed(tid, vm, vm.FROZEN, save_state=True) res = { 'message': 'VM %s is already stopped. Changing status to frozen.' % vm.hostname } return SuccessTaskResponse(request, res, task_id=tid, vm=vm) elif action == 'stop' and vm.status == Vm.FROZEN and unfreeze: if not request.user.is_admin(request): raise PermissionDenied tid = task_id_from_request(request, owner_id=vm.owner.id, dummy=True) vm_status_changed(tid, vm, vm.STOPPED, save_state=True) res = { 'message': 'Removing frozen status for VM %s.' % vm.hostname } return SuccessTaskResponse(request, res, task_id=tid, vm=vm) elif action == 'current': # Limit PUT /current/ action to be Admins and SuperAdmins if not request.user.is_admin(request): raise PermissionDenied if vm.status in self.statuses_force_change_allowed: return self.get_current_status(force_change=True) elif vm.status in self.stuck_statuses_force_change_allowed: if vm.tasks: raise VmHasPendingTasks else: return self.get_current_status(force_change=True) else: raise VmIsNotOperational else: raise ExpectationFailed('Bad action') dc_settings = request.dc.settings if action in ('stop', 'reboot') and vm.uuid in dc_settings.VMS_NO_SHUTDOWN: raise PreconditionRequired('Internal VM can\'t be stopped') lock = 'vm_status vm:%s' % vm.uuid stdin = None apiview['update'] = False transition_to_stopping = False # The update parameter is used by all actions (start, stop, reboot) ser_update = VmStatusUpdateJSONSerializer(data=self.data, default=(action in ('start', 'reboot'))) if not ser_update.is_valid(): return FailureTaskResponse(request, ser_update.errors, vm=vm) if vm.json_changed(): apiview['update'] = ser_update.data['update'] logger.info('VM %s json != json_active', vm) if not apiview['update']: logger.info('VM %s json_active update disabled', vm) if action == 'start': ser = VmStatusActionIsoSerializer(request, vm, data=self.data) if not ser.is_valid(): return FailureTaskResponse(request, ser.errors, vm=vm) if ser.data and ser.iso: if not request.user.is_admin(request) and vm.is_installed() and \ (ser.iso.name != dc_settings.VMS_ISO_RESCUECD): raise PreconditionRequired('VM is not installed') msg = LOG_START_ISO iso = ser.iso cmd = self._start_cmd(iso=iso, iso2=ser.iso2, once=ser.data['cdimage_once']) else: msg = LOG_START iso = None cmd = self._start_cmd() if apiview['update']: if vm.tasks: raise VmHasPendingTasks cmd, stdin = self._add_update_cmd(cmd, os_cmd_allowed=False) if iso: msg = LOG_START_UPDATE_ISO else: msg = LOG_START_UPDATE else: ser_stop_reboot = VmStatusStopSerializer(request, vm, data=self.data) if not ser_stop_reboot.is_valid(): return FailureTaskResponse(request, ser_stop_reboot.errors, vm=vm) update = apiview.get('update', False) # VmStatusUpdateJSONSerializer force = apiview['force'] = ser_stop_reboot.data.get('force', False) timeout = ser_stop_reboot.data.get('timeout', None) if not force and timeout: apiview['timeout'] = timeout if update: if vm.tasks: raise VmHasPendingTasks # This will always perform a vmadm stop command, followed by a vmadm update command and optionally # followed by a vmadm start command (reboot) pre_cmd = self._action_cmd('stop', force=force, timeout=timeout) if action == 'reboot': if force: msg = LOG_REBOOT_FORCE_UPDATE else: msg = LOG_REBOOT_UPDATE post_cmd = self._action_cmd('start') else: if force: msg = LOG_STOP_FORCE_UPDATE else: msg = LOG_STOP_UPDATE post_cmd = '' cmd, stdin = self._add_update_cmd(post_cmd, os_cmd_allowed=True, pre_cmd=pre_cmd) else: cmd = self._action_cmd(action, force=force, timeout=timeout) if force: if action == 'reboot': msg = LOG_REBOOT_FORCE else: lock += ' force' msg = LOG_STOP_FORCE else: if action == 'reboot': msg = LOG_REBOOT else: msg = LOG_STOP if vm.status == Vm.STOPPING: if update: raise PreconditionRequired( 'Cannot perform update while VM is stopping') if not force: raise VmIsNotOperational( 'VM is already stopping; try to use force') else: transition_to_stopping = True meta = { 'output': { 'returncode': 'returncode', 'stderr': 'message', 'stdout': 'json' }, 'replace_stderr': ((vm.uuid, vm.hostname), ), 'detail': self.detail, 'msg': msg, 'vm_uuid': vm.uuid, 'apiview': apiview, 'last_status': vm.status, } callback = ('api.vm.status.tasks.vm_status_cb', {'vm_uuid': vm.uuid}) tid, err = execute(request, vm.owner.id, cmd, stdin=stdin, meta=meta, lock=lock, callback=callback, queue=vm.node.fast_queue) if err: return FailureTaskResponse(request, err, vm=vm) else: if transition_to_stopping: vm.save_status(Vm.STOPPING) return TaskResponse(request, tid, msg=msg, vm=vm, api_view=apiview, detail=self.detail, data=self.data, api_data={ 'status': vm.status, 'status_display': vm.status_display() })
def post(self): request, vm = self.request, self.vm ser = VmCreateSerializer(data=self.data) if not ser.is_valid(): return FailureTaskResponse(request, ser.errors, vm=vm) if not vm.is_kvm(): if not (vm.dc.settings.VMS_VM_SSH_KEYS_DEFAULT or vm.owner.usersshkey_set.exists()): raise PreconditionRequired('VM owner has no SSH keys available') apiview = self.apiview # noinspection PyTypeChecker cmd = 'vmadm create >&2; e=$? %s; vmadm get %s 2>/dev/null; vmadm start %s >&2; exit $e' % ( self.fix_create(vm), vm.uuid, vm.uuid) recreate = apiview['recreate'] = ser.data['recreate'] # noinspection PyAugmentAssignment if recreate: # recreate should be available to every vm owner if not (request.user and request.user.is_authenticated()): raise PermissionDenied if vm.locked: raise VmIsLocked if vm.status != vm.STOPPED: raise VmIsNotOperational('VM is not stopped') if not ser.data['force']: raise ExpectationFailed('Are you sure?') msg = LOG_VM_RECREATE # noinspection PyAugmentAssignment cmd = 'vmadm delete ' + vm.uuid + ' >&2 && sleep 1; ' + cmd elif vm.status == vm.NOTCREATED: # only admin if not (request.user and request.user.is_admin(request)): raise PermissionDenied if not vm.node: # we need to find a node for this vm now logger.debug('VM %s has no compute node defined. Choosing node automatically', vm) VmDefineView(request).choose_node(vm) logger.info('New compute node %s for VM %s was chosen automatically.', vm.node, vm) msg = LOG_VM_CREATE else: raise VmIsNotOperational('VM is already created') # Check boot flag (KVM) or disk image (OS) (bug #chili-418) if not vm.is_bootable(): raise PreconditionRequired('VM has no bootable disk') if vm.tasks: raise VmHasPendingTasks old_status = vm.status deploy = apiview['deploy'] = vm.is_deploy_needed() resize = apiview['resize'] = vm.is_resize_needed() if not vm.is_blank(): vm.set_root_pw() # Set new status also for blank VM (where deployment is not needed) # This status will be changed in vm_status_event_cb (if everything goes well). vm.status = vm.CREATING vm.save() # save status / node / vnc_port / root_pw stdin = vm.fix_json(deploy=deploy, resize=resize, recreate=recreate).dump() meta = { 'output': {'returncode': 'returncode', 'stderr': 'message', 'stdout': 'json'}, 'replace_stderr': ((vm.uuid, vm.hostname),), 'msg': msg, 'vm_uuid': vm.uuid, 'apiview': apiview } callback = ('api.vm.base.tasks.vm_create_cb', {'vm_uuid': vm.uuid}) err = True try: # Possible node_image import task which will block this task on node worker block_key = self.node_image_import(vm.node, vm.json_get_disks()) logger.debug('Creating new VM %s on node %s with json: """%s"""', vm, vm.node, stdin) tid, err = execute(request, vm.owner.id, cmd, stdin=stdin, meta=meta, expires=VM_VM_EXPIRES, lock=self.lock, callback=callback, queue=vm.node.slow_queue, block_key=block_key) if err: return FailureTaskResponse(request, err, vm=vm) else: # Inform user about creating vm_status_changed(tid, vm, vm.CREATING, save_state=False) return TaskResponse(request, tid, msg=msg, vm=vm, api_view=apiview, data=self.data) finally: if err: # Revert old status vm.status = old_status vm.save_status()