def put(self, vm, data): """Revert json_active (undo). Problematic attributes: - hostname - handled by revert_active() + change requires some post configuration - alias - handled by revert_active() - owner - handled by revert_active() - template - handled by revert_active() - monitored - handled by revert_active(), but mon_vm_sync task must be run via vm_define_reverted signal - tags - wont be reverted (not saved in json) - nics.*.ip - ip reservation is fixed via vm_update_ipaddress_usage() - nics.*.dns + ptr - known bug - wont be reverted """ if vm.is_notcreated(): raise VmIsNotOperational('VM is not created') if vm.json == vm.json_active: raise ExpectationFailed('VM definition unchanged') if vm.tasks: raise VmHasPendingTasks # Prerequisites vm.hostname_is_valid_fqdn( cache=False ) # Cache vm._fqdn hostname/domain pair and find dns record hostname = vm.hostname # Save hostname configured in DB # The magic happens here: get_diff() will run vm.revert_active() and return a diff vm_diff = VmDefineView(self.request).get_diff(vm, full=True) # Save VM hostname_changed = hostname != vm.hostname vm.unlock() # vm saving was locked by revert_active() vm.save(update_hostname=hostname_changed, update_node_resources=True, update_storage_resources=True) # Generate response detail = 'Reverted VM configuration from %s.\n%s' % ( vm.changed.strftime('%Y-%m-%d %H:%M:%S%z'), self.nice_diff(vm_diff)) vm_diff['reverted_from'] = vm.changed res = SuccessTaskResponse(self.request, vm_diff, detail=detail, msg=LOG_DEF_REVERT, vm=vm) # Post-save stuff task_id = TaskID(res.data.get('task_id'), request=self.request) vm_update_ipaddress_usage(vm) vm_define_reverted.send(task_id, vm=vm) # Signal! if hostname_changed: VmDefineHostnameChanged(self.request, vm, hostname).send() # Task event for GUI return res
def _vm_delete_cb_succeeded(task_id, vm): """ Helper function used by vm_delete_cb to run proper vm methods in order to delete the VM. """ vm.status = Vm.NOTCREATED vm.json_active = {} # Let's update resource counters, because they depend on Vm.is_deployed() status and json_active vm.save(update_node_resources=True, update_storage_resources=True) vm_update_ipaddress_usage(vm) Snapshot.objects.filter(vm=vm).delete() SnapshotDefine.objects.filter(vm=vm).delete() BackupDefine.objects.filter(vm=vm).delete() vm_notcreated.send(task_id, vm=vm) # Signal!
def vm_update_cb(result, task_id, vm_uuid=None): """ A callback function for api.vm.base.views.vm_manage. """ vm = Vm.objects.select_related('dc').get(uuid=vm_uuid) _vm_update_cb_done(result, task_id, vm) msg = result.get('message', '') force = result['meta']['apiview']['force'] if result['returncode'] == 0 and (force or msg.find('Successfully updated') >= 0): json = result.pop('json', None) try: # save json from smartos json_active = vm.json.load(json) vm_delete_snapshots_of_removed_disks( vm) # Do this before updating json and json_active vm.json_active = json_active vm.json = json_active except Exception as e: logger.exception(e) logger.error( 'Could not parse json output from PUT vm_manage(%s). Error: %s', vm_uuid, e) raise TaskException(result, 'Could not parse json output') else: vm.save(update_node_resources=True, update_storage_resources=True, update_fields=('enc_json', 'enc_json_active', 'changed')) vm_update_ipaddress_usage(vm) vm_json_active_changed.send(task_id, vm=vm) # Signal! else: logger.error( 'Found nonzero returncode in result from PUT vm_manage(%s). Error: %s', vm_uuid, msg) raise TaskException( result, 'Got bad return code (%s). Error: %s' % (result['returncode'], msg)) task_log_cb_success(result, task_id, vm=vm, **result['meta']) return result
def vm_from_json(request, task_id, json, dc, owner=settings.ADMIN_USER, template=True, save=False, update_ips=True, update_dns=True): """Parse json a create new Vm object @param dict json: loaded json dictionary obtained via vmadm get @param int owner: whether to fetch the vm.owner User object. Also indicates an user id, \ which will be used as fallback @param bool template: True if the template should be set according to internal_metadata.template @param Dc dc: Dc object for the Vm @param bool save: Should the new Vm be saved in DB? @param bool update_ips: Update server <-> IpAddress relations. Only performed if save and update_ips are True. @param bool update_dns: Try to update/create DNS record for server's primary IP. Only performed if save is True. @return: new Vm instance """ # noinspection PyUnresolvedReferences hostname_length = VmDefineSerializer.base_fields['hostname'].max_length # noinspection PyUnresolvedReferences alias_length = VmDefineSerializer.base_fields['alias'].max_length # basic information (KeyError) vm = Vm(uuid=json['uuid'], hostname=json['hostname'][:hostname_length], status=Vm.STATUS_DICT[json['state']], dc=dc) vm.new = True brand = json['brand'] # json and json_active vm.json = vm.json_active = json # node & vnc_port (no check) vm.node_id = json.get('server_uuid', None) vm.vnc_port = json.get('vnc_port', None) # alias try: vm.alias = json['internal_metadata']['alias'][:alias_length] except KeyError: try: alias = json['alias'] except KeyError: alias = vm.hostname vm.alias = alias.split('.')[0][:alias_length] logger.warning( 'Alias for new VM %s could not be auto-detected. Fallback to alias=%s', vm, vm.alias) # ostype try: vm.ostype = int(json['internal_metadata']['ostype']) except KeyError: if brand == 'kvm': vm.ostype = Vm.LINUX elif brand == 'lx': vm.ostype = Vm.LINUX_ZONE else: vm.ostype = Vm.SUNOS_ZONE logger.warning( 'OS type for new VM %s could not be auto-detected. Fallback to ostype=%s', vm, vm.ostype) # owner if owner: try: vm.owner = User.objects.get(id=int(json['owner_uuid'])) except (KeyError, ValueError, User.DoesNotExist): vm.owner = User.objects.get(id=owner) logger.warning( 'Owner for new VM %s could not be auto-detected. Fallback to owner=%s', vm, vm.owner) # template if template: tmpname = None try: tmpname = json['internal_metadata']['template'] if tmpname: vm.template = VmTemplate.objects.get( name=json['internal_metadata']['template'], dc=dc) except (KeyError, VmTemplate.DoesNotExist): vm.template = None if tmpname: logger.warning( 'Template "%s" for new VM %s could not be auto-detected', tmpname, vm) # images for img_uuid in vm.get_image_uuids(): Image.objects.get(uuid=img_uuid, dc=dc) # May raise Image.DoesNotExist # subnets for net_uuid in vm.get_network_uuids(): Subnet.objects.get(uuid=net_uuid, dc=dc) # May raise Subnet.DoesNotExist # Initialize uptime now logger.info(vm.update_uptime(force_init=True)) if save: vm.save(sync_json=True, update_node_resources=True, update_storage_resources=True) logger.info('Server %s (%s) was saved', vm.uuid, vm) exc = None primary_ip = None if update_ips: for i, nic in enumerate(vm.json_get_nics()): if 'network_uuid' not in nic: logger.error( 'Server %s NIC ID %s has no network_uuid defined', vm, i) exc = KeyError('network_uuid') break try: net = Subnet.objects.get(uuid=nic['network_uuid'], dc=dc) except Subnet.DoesNotExist as e: exc = e break else: if net.dhcp_passthrough: logger.info( 'Server %s NIC ID %s uses an externally managed network %s', vm, i, net.name) continue ip, err = _vm_save_ip_from_json(vm, net, nic['ip'], allowed_ips=False) if err: logger.critical(err) exc = SystemError(err) break if i == 0: primary_ip = ip for ipaddress in nic.get('allowed_ips', ()): _, err = _vm_save_ip_from_json(vm, net, ipaddress, allowed_ips=True) if err: logger.critical(err) exc = SystemError(err) break if exc: vm.delete() logger.info('Server %s was deleted', json['uuid']) raise exc try: vm_update_ipaddress_usage(vm) except ValueError as exc: vm.delete() logger.info('Server %s was deleted', json['uuid']) raise exc if update_dns and primary_ip: # This will fail silently (exception is logged) VmDefineNicSerializer.save_a(request, task_id, vm, primary_ip) if primary_ip.subnet.ptr_domain: # This will fail silently (exception is logged) VmDefineNicSerializer.save_ptr(request, task_id, vm, primary_ip, primary_ip.subnet, content=vm.hostname) return vm
def vm_status_cb(result, task_id, vm_uuid=None): """ A callback function for PUT api.vm.status.views.vm_status. Always updates the VM's status in DB. """ vm = Vm.objects.get(uuid=vm_uuid) msg = result.get('message', '') json = result.pop('json', None) if result['returncode'] == 0 and msg and msg.find('Successfully') == 0: # json was updated if result['meta']['apiview']['update'] and msg.find( 'Successfully updated') == 0: try: # save json from smartos json_active = vm.json.load(json) vm_delete_snapshots_of_removed_disks( vm) # Do this before updating json and json_active vm.json_active = json_active vm.json = json_active except Exception as e: logger.exception(e) logger.error( 'Could not parse json output from vm_status(%s). Error: %s', vm_uuid, e) else: vm.save(update_node_resources=True, update_storage_resources=True, update_fields=('enc_json', 'enc_json_active', 'changed')) vm_update_ipaddress_usage(vm) vm_json_active_changed.send(task_id, vm=vm) # Signal! change_time = _get_task_time(result, 'exec_time') if msg.find('Successfully started') >= 0: new_status = Vm.RUNNING elif msg.find('Successfully completed stop') >= 0: if result['meta']['apiview']['freeze']: new_status = Vm.FROZEN change_time = _get_task_time( result, 'finish_time') # Force status save else: new_status = Vm.STOPPED elif msg.find('Successfully completed reboot') >= 0: new_status = Vm.RUNNING else: logger.error( 'Did not find successful status change in result from vm_status(%s). Error: %s', vm_uuid, msg) raise TaskException(result, 'Unknown status (%s)' % msg) else: logger.error( 'Found nonzero returncode in result from vm_status(%s). Error: %s', vm_uuid, msg) if is_vm_missing(vm, msg): logger.critical('VM %s has vanished from compute node!', vm_uuid) if vm.status == Vm.STOPPING: _save_vm_status(task_id, vm, Vm.STOPPED, change_time=_get_task_time( result, 'finish_time')) else: _vm_status_cb_failed(result, task_id, vm) raise TaskException( result, 'Got bad return code (%s). Error: %s' % (result['returncode'], msg)) _save_vm_status(task_id, vm, new_status, change_time=change_time) task_log_cb_success(result, task_id, vm=vm, **result['meta']) return result
def vm_create_cb(result, task_id, vm_uuid=None): """ A callback function for api.vm.base.views.vm_manage. """ vm = Vm.objects.select_related('dc').get(uuid=vm_uuid) msg = result.get('message', '') if result['returncode'] == 0 and msg.find('Successfully created') >= 0: json = result.pop('json', None) try: # save json from smartos json_active = vm.json.load(json) vm.json_active = json_active vm.json = json_active if result['meta']['apiview']['recreate']: Snapshot.objects.filter(vm=vm).delete() SnapshotDefine.objects.filter(vm=vm).delete() BackupDefine.objects.filter(vm=vm).delete() vm.save_metadata('installed', False, save=False) except Exception as e: logger.error( 'Could not parse json output from POST vm_manage(%s). Error: %s', vm_uuid, e) _vm_error(task_id, vm) logger.exception(e) raise TaskException(result, 'Could not parse json output') else: # save all vm.save(update_node_resources=True, update_storage_resources=True) vm_update_ipaddress_usage(vm) # vm_json_active_changed.send(task_id, vm=vm) # Signal! -> not needed because vm_deployed is called below vm_created.send(task_id, vm=vm) # Signal! if msg.find('Successfully started' ) < 0: # VM was created, but could not be started logger.error( 'VM %s was created, but could not be started! Error: %s', vm_uuid, msg) _vm_error(task_id, vm) raise TaskException(result, 'Initial start failed (%s)' % msg) sendmail(vm.owner, 'vm/base/vm_create_subject.txt', 'vm/base/vm_create_email.txt', extra_context={'vm': vm}, user_i18n=True, dc=vm.dc, fail_silently=True) else: logger.error( 'Found nonzero returncode in result from POST vm_manage(%s). Error: %s', vm_uuid, msg) # Revert status and inform user _vm_create_cb_failed(result, task_id, vm) if result['meta']['apiview']['recreate'] and msg.find( 'Successfully deleted') >= 0: _vm_error(task_id, vm) # Something went terribly wrong # and FAIL this task raise TaskException( result, 'Got bad return code (%s). Error: %s' % (result['returncode'], msg)) # So far so good. Now wait for deploy_over in vm_status_event_cb logger.info('VM %s is waiting for deploy_over...', vm_uuid) timer = 0 repeat = 0 while not vm.has_deploy_finished(): if timer > VMS_VM_DEPLOY_TOOLONG: # 10 minutes is too long if repeat == VMS_VM_DEPLOY_TOOLONG_MAX_CYCLES: # 20 minutes is really too long logger.error('VM %s deploy process has timed out!', vm_uuid) _vm_error(task_id, vm) result['message'] = 'VM %s deploy has timed out' % vm.hostname task_log_cb_error(result, task_id, vm=vm, **result['meta']) return result repeat += 1 timer = 0 logger.error( 'VM %s takes too long to deploy. Sending force stop/start', vm_uuid) # noinspection PyUnusedLocal tid, err = vm_reset(vm) sleep(3.0) timer += 3 logger.info('VM %s is completely deployed!', vm_uuid) internal_metadata = vm.json.get( 'internal_metadata', {}).copy() # save internal_metadata for email vm = Vm.objects.select_related('dc', 'template').get(pk=vm.pk) # Reload vm vm_deployed.send(task_id, vm=vm) # Signal! sendmail(vm.owner, 'vm/base/vm_deploy_subject.txt', 'vm/base/vm_deploy_email.txt', fail_silently=True, extra_context={ 'vm': vm, 'internal_metadata': internal_metadata }, user_i18n=True, dc=vm.dc) try: result['message'] = '\n'.join(result['message'].strip().split('\n') [:-1]) # Remove "started" stuff except Exception as e: logger.exception(e) task_log_cb_success(result, task_id, vm=vm, **result['meta']) try: if vm.template: # Try to create snapshot/backup definitions defined by template vm_define_snapshot, vm_define_backup = vm.template.vm_define_snapshot, vm.template.vm_define_backup if vm_define_snapshot or vm_define_backup: user = User.objects.get(id=user_id_from_task_id(task_id)) request = get_dummy_request(vm.dc, method='POST', user=user) SnapshotDefineView.create_from_template(request, vm, vm_define_snapshot, log=logger) BackupDefineView.create_from_template(request, vm, vm_define_backup, log=logger) except Exception as e: logger.exception(e) return result
def vm_update_cb(result, task_id, vm_uuid=None, new_node_uuid=None): """ A callback function for api.vm.base.views.vm_manage. """ vm = Vm.objects.select_related('dc').get(uuid=vm_uuid) _vm_update_cb_done(result, task_id, vm) msg = result.get('message', '') force = result['meta']['apiview']['force'] if result['returncode'] == 0 and (force or msg.find('Successfully updated') >= 0): json = result.pop('json', None) try: # save json from smartos json_active = vm.json.load(json) except Exception as e: logger.exception(e) logger.error( 'Could not parse json output from PUT vm_manage(%s). Error: %s', vm_uuid, e) raise TaskException(result, 'Could not parse json output') vm_delete_snapshots_of_removed_disks( vm) # Do this before updating json and json_active vm.json = json_active update_fields = ['enc_json', 'enc_json_active', 'changed'] ignored_changed_vm_attrs = ( 'set_customer_metadata', 'remove_customer_metadata', 'create_timestamp', 'boot_timestamp', 'autoboot', 'vnc_port', 'update_disks', ) if new_node_uuid: update_dict = vm.json_update() for i in ignored_changed_vm_attrs: update_dict.pop(i, None) if update_dict: raise TaskException( result, 'VM definition on compute node differs from definition in DB in ' 'following attributes: %s' % ','.join(update_dict.keys())) update_fields.append('node_id') old_json_active = vm.json_active vm.json_active = json_active if new_node_uuid: node = Node.objects.get(uuid=new_node_uuid) vm.set_node(node) with transaction.atomic(): vm.save(update_node_resources=True, update_storage_resources=True, update_fields=update_fields) vm_update_ipaddress_usage(vm) vm_json_active_changed.send( task_id, vm=vm, old_json_active=old_json_active) # Signal! if new_node_uuid: vm_node_changed.send(task_id, vm=vm, force_update=True) # Signal! result[ 'message'] = 'Node association successfully changed on VM %s' % vm.hostname if vm.json_changed(): vm_update(vm) else: logger.error( 'Found nonzero returncode in result from PUT vm_manage(%s). Error: %s', vm_uuid, msg) raise TaskException( result, 'Got bad return code (%s). Error: %s' % (result['returncode'], msg)) task_log_cb_success(result, task_id, vm=vm, **result['meta']) return result
def vm_from_json(request, task_id, json, dc, owner=settings.ADMIN_USER, template=True, save=False, # noqa: R701 update_ips=True, update_dns=True): """Parse json a create new Vm object @param dict json: loaded json dictionary obtained via vmadm get @param int owner: whether to fetch the vm.owner User object. Also indicates an user id, \ which will be used as fallback @param bool template: True if the template should be set according to internal_metadata.template @param Dc dc: Dc object for the Vm @param bool save: Should the new Vm be saved in DB? @param bool update_ips: Update server <-> IpAddress relations. Only performed if save and update_ips are True. @param bool update_dns: Try to update/create DNS record for server's primary IP. Only performed if save is True. @return: new Vm instance """ # noinspection PyUnresolvedReferences hostname_length = VmDefineSerializer.base_fields['hostname'].max_length # noinspection PyUnresolvedReferences alias_length = VmDefineSerializer.base_fields['alias'].max_length # basic information (KeyError) vm = Vm(uuid=json['uuid'], hostname=json['hostname'][:hostname_length], status=Vm.STATUS_DICT[json['state']], dc=dc) vm.new = True brand = json['brand'] # json and json_active vm.json = vm.json_active = json # node & vnc_port (no check) vm.node_id = json.get('server_uuid', None) vm.vnc_port = json.get('vnc_port', None) # alias try: vm.alias = json['internal_metadata']['alias'][:alias_length] except KeyError: try: alias = json['alias'] except KeyError: alias = vm.hostname vm.alias = alias.split('.')[0][:alias_length] logger.warning('Alias for new VM %s could not be auto-detected. Fallback to alias=%s', vm, vm.alias) # ostype try: vm.ostype = int(json['internal_metadata']['ostype']) except KeyError: if brand == 'kvm': vm.ostype = Vm.LINUX elif brand == 'lx': vm.ostype = Vm.LINUX_ZONE else: vm.ostype = Vm.SUNOS_ZONE logger.warning('OS type for new VM %s could not be auto-detected. Fallback to ostype=%s', vm, vm.ostype) # owner if owner: try: vm.owner = User.objects.get(id=int(json['owner_uuid'])) except (KeyError, ValueError, User.DoesNotExist): vm.owner = User.objects.get(id=owner) logger.warning('Owner for new VM %s could not be auto-detected. Fallback to owner=%s', vm, vm.owner) # template if template: tmpname = None try: tmpname = json['internal_metadata']['template'] if tmpname: vm.template = VmTemplate.objects.get(name=json['internal_metadata']['template'], dc=dc) except (KeyError, VmTemplate.DoesNotExist): vm.template = None if tmpname: logger.warning('Template "%s" for new VM %s could not be auto-detected', tmpname, vm) # images for img_uuid in vm.get_image_uuids(): Image.objects.get(uuid=img_uuid, dc=dc) # May raise Image.DoesNotExist # subnets for net_uuid in vm.get_network_uuids(): if not Subnet.objects.filter(uuid=net_uuid, dc=dc).exists(): # VM uses subnet that doesn't exist in database. Let's create it. # retrieve nic config for the non-existent network from harvested VM nic = vm.get_nic_config(net_uuid) network = IPAddress.get_net_address(nic['ip'], nic['netmask']) logger.warning('VM "%s" uses undefined network "%s/%s" over nic tag "%s". Trying to create it.', vm.name, network, nic['netmask'], nic['nic_tag']) # take the network name from the nic tag net_name = nic['nic_tag'] if Subnet.objects.filter(name=net_name).exists(): logger.info('Network "%s" already exists, adding number at the end', net_name) net_name_number = 1 while net_name_number < 100: net_name = "%s-%s" % (net_name, net_name_number) if not Subnet.objects.filter(name=net_name).exists(): # this name doesn't exist, let's use it break # if there really exist 99 networks with name "${net_name}-${net_name_number}, # subnet creation will fail net_name_number += 1 gateway = None if 'gateway' in nic: gateway = nic['gateway'] mtu = None if 'mtu' in nic: mtu = nic['mtu'] # create the network new_network = Subnet(name=net_name, uuid=net_uuid, nic_tag=nic['nic_tag'], mtu=mtu, network=network, netmask=nic['netmask'], gateway=gateway, owner_id=settings.ADMIN_USER, alias=net_name) new_network.save() # attach network to specified dc new_network.dc.add(dc) if dc.name == settings.VMS_DC_ADMIN: # when attaching network to admin DC, attach it also to main dc main_dc = Dc.objects.get(name=settings.VMS_DC_MAIN) new_network.dc.add(main_dc) # Initialize uptime now logger.info(vm.update_uptime(force_init=True)) if save: vm.save(sync_json=True, update_node_resources=True, update_storage_resources=True) logger.info('Server %s (%s) was saved', vm.uuid, vm) exc = None primary_ip = None if update_ips: for i, nic in enumerate(vm.json_get_nics()): if 'network_uuid' not in nic: logger.error('Server %s NIC ID %s has no network_uuid defined', vm, i) exc = KeyError('network_uuid') break try: net = Subnet.objects.get(uuid=nic['network_uuid'], dc=dc) except Subnet.DoesNotExist as e: exc = e break else: if net.dhcp_passthrough: logger.info('Server %s NIC ID %s uses an externally managed network %s', vm, i, net.name) continue ip, err = _vm_save_ip_from_json(vm, net, nic['ip'], allowed_ips=False) if err: logger.critical(err) exc = SystemError(err) break if i == 0: primary_ip = ip for ipaddress in nic.get('allowed_ips', []): _, err = _vm_save_ip_from_json(vm, net, ipaddress, allowed_ips=True) if err: logger.critical(err) exc = SystemError(err) break if exc: vm.delete() logger.info('Server %s was deleted', json['uuid']) raise exc try: vm_update_ipaddress_usage(vm) except ValueError as exc: vm.delete() logger.info('Server %s was deleted', json['uuid']) raise exc if update_dns and primary_ip: # This will fail silently (exception is logged) VmDefineNicSerializer.save_a(request, task_id, vm, primary_ip) if primary_ip.subnet.ptr_domain: # This will fail silently (exception is logged) VmDefineNicSerializer.save_ptr(request, task_id, vm, primary_ip, primary_ip.subnet, content=vm.hostname) return vm