def vm_status_event_cb(result, task_id): """ Callback task run by erigonesd-vmon service after detecting a VM status change. """ vm_uuid = result['zonename'] state_cache = cache.get(Vm.status_key(vm_uuid)) zoneid_cache = cache.get(Vm.zoneid_key(vm_uuid)) state = Vm.STATUS_DICT[result['state']] zoneid = result['zoneid'] when = result['when'] change_time = datetime.utcfromtimestamp( float(when) / pow(10, int(log10(when)) - 9)).replace(tzinfo=utc) if state == Vm.STOPPED: zoneid = Vm.STOPPED_ZONEID # The sysevent monitor sends the last known zoneid even for stopped VMs # Check and eventually save VM's status _vm_status_check(task_id, result['node_uuid'], vm_uuid, zoneid, state, state_cache=state_cache, zoneid_cache=zoneid_cache, change_time=change_time)
def mon_vm_delete(task_id, sender, vm_uuid=None, vm_hostname=None, vm_alias=None, dc_id=None, zabbix_sync=None, external_zabbix_sync=None, log=LOG, **kwargs): """ Remove host from zabbix. """ assert vm_uuid assert dc_id assert zabbix_sync is not None assert external_zabbix_sync is not None # Create dummy VM object - used just to get zabbix_id and log things vm = Vm(uuid=vm_uuid, hostname=vm_hostname, alias=vm_alias) log.obj = vm.log_list if zabbix_sync or external_zabbix_sync: dc = Dc.objects.get_by_id(dc_id) return getZabbix(dc).vm_delete(Vm(uuid=vm_uuid, hostname=vm_hostname), internal=zabbix_sync, external=external_zabbix_sync, task_log=log) else: logger.info('Zabbix synchronization completely disabled for VM %s', vm_uuid) return None
def _vm_zoneid_check(task_id, uuid, zoneid, zoneid_cache=None, change_time=None): """Check/save new zoneid and zoneid_change""" if zoneid_cache is not None: zoneid_cache = int(zoneid_cache) if zoneid_cache != zoneid: # Save new zoneid into cache logger.warn('Detected zone ID change %s->%s for vm %s', zoneid_cache, zoneid, uuid) if change_time: old_change_time = cache.get(Vm.zoneid_change_key(uuid)) if old_change_time and old_change_time > change_time: logger.warn( 'Ignoring zone ID change %s->%s of VM %s because it is too old: %s > %s', zoneid_cache, zoneid, uuid, old_change_time, change_time) return cache.set(Vm.zoneid_key(uuid), zoneid) cache.set(Vm.zoneid_change_key(uuid), change_time or timezone.now()) vm_zoneid_changed.send(task_id, vm_uuid=uuid, zoneid=zoneid, old_zoneid=zoneid_cache) # Signal!
def __init__(self, request, vm, *args, **kwargs): self.dns = None self.dns_content = None # Cache vm._fqdn hostname/domain pair and find dns record # This will also fill the _available_domains list if vm and vm.hostname_is_valid_fqdn(cache=False): # Will return False if DNS_ENABLED is False self.dns = Record.get_records_A(vm.hostname, vm.fqdn_domain) if self.dns: self.dns_content = ', '.join([d.content for d in self.dns]) # Initial data if vm and 'initial' not in kwargs: kwargs['initial'] = self._initial_data(request, vm) # Parent constructor super(ServerSettingsForm, self).__init__(request, vm, *args, **kwargs) if vm: # noinspection PyProtectedMember domains = [(i, i) for i in vm._available_domains] # Invalid (empty) domain must be added into domain choices if not vm.fqdn_domain: domains.append(('', '')) self.fields['domain'].required = False else: domains = [(i, i) for i in Vm.available_domains(request.dc, request.user)] if not request.dc.settings.DNS_ENABLED: self.fields['domain'].required = False # Set available domains domains.sort() self.fields['domain'].choices = domains
def vm_status_current_cb(result, task_id, vm_uuid=None): """ A callback function for GET api.vm.status.views.vm_status. It is responsible for displaying the actual VM status to the user and optionally changing status in DB. """ stdout = result.pop('stdout', '') stderr = result.pop('stderr', '') rc = result.pop('returncode') if rc != 0: logger.error( 'Found nonzero returncode in result from GET vm_status(%s). Error: %s', vm_uuid, stderr) raise TaskException( result, 'Got bad return code (%s). Error: %s' % (rc, stderr)) line = stdout.strip().split(':') result['status'] = line[0] vm = Vm.objects.select_related('node').get(uuid=vm_uuid) try: state = Vm.STATUS_DICT[result['status']] zoneid = int(line[1] or Vm.STOPPED_ZONEID) except (KeyError, IndexError): result['message'] = 'Unidentified VM status' else: result['message'] = '' state_cache = cache.get(Vm.status_key(vm_uuid)) zoneid_cache = cache.get(Vm.zoneid_key(vm_uuid)) if state_cache != state or zoneid_cache != zoneid: # Check and eventually save VM's status _vm_status_check(task_id, vm.node.uuid, vm_uuid, zoneid, state, state_cache=state_cache, zoneid_cache=zoneid_cache, change_time=_get_task_time(result, 'exec_time')) vm.tasks_del(task_id) return result
def vm_status_current_cb(result, task_id, vm_uuid=None, force_change=False): """ A callback function for GET api.vm.status.views.vm_status. It is responsible for displaying the actual VM status to the user and optionally changing status in DB. """ stdout = result.pop('stdout', '') stderr = result.pop('stderr', '') rc = result.pop('returncode') if rc != 0: logger.error( 'Found nonzero returncode in result from GET vm_status(%s). Error: %s', vm_uuid, stderr) raise TaskException( result, 'Got bad return code (%s). Error: %s' % (rc, stderr)) line = stdout.strip().split(':') result['status'] = line[0] result['status_changed'] = False vm = Vm.objects.select_related('node', 'slavevm').get(uuid=vm_uuid) try: if result['status']: state = Vm.STATUS_DICT[result['status']] else: # vmadm list returned with rc=0 and empty stdout => VM does not exist on compute node state = Vm.NOTCREATED result['status'] = dict(Vm.STATUS).get(state) except (KeyError, IndexError): result['message'] = 'Unidentified VM status' else: result['message'] = '' state_cache = cache.get(Vm.status_key(vm_uuid)) # Check and eventually save VM's status if _vm_status_check(task_id, vm.node.uuid, vm_uuid, state, state_cache=state_cache, force_change=force_change, change_time=_get_task_time(result, 'exec_time')): result['status_changed'] = True vm.tasks_del(task_id) return result
def vm_status_event_cb(result, task_id): """ Callback task run by erigonesd-vmon service after detecting a VM status change. """ vm_uuid = result['zonename'] state_cache = cache.get(Vm.status_key(vm_uuid)) state = Vm.STATUS_DICT[result['state']] when = result['when'] change_time = datetime.utcfromtimestamp( float(when) / pow(10, int(log10(when)) - 9)).replace(tzinfo=utc) # Check and eventually save VM's status _vm_status_check(task_id, result['node_uuid'], vm_uuid, state, state_cache=state_cache, change_time=change_time)
def vm_from_json(request, task_id, json, dc, owner=settings.ADMIN_USER, template=True, save=False, update_ips=True, update_dns=True): """Parse json a create new Vm object @param dict json: loaded json dictionary obtained via vmadm get @param int owner: whether to fetch the vm.owner User object. Also indicates an user id, \ which will be used as fallback @param bool template: True if the template should be set according to internal_metadata.template @param Dc dc: Dc object for the Vm @param bool save: Should the new Vm be saved in DB? @param bool update_ips: Update server <-> IpAddress relations. Only performed if save and update_ips are True. @param bool update_dns: Try to update/create DNS record for server's primary IP. Only performed if save is True. @return: new Vm instance """ # noinspection PyUnresolvedReferences hostname_length = VmDefineSerializer.base_fields['hostname'].max_length # noinspection PyUnresolvedReferences alias_length = VmDefineSerializer.base_fields['alias'].max_length # basic information (KeyError) vm = Vm(uuid=json['uuid'], hostname=json['hostname'][:hostname_length], status=Vm.STATUS_DICT[json['state']], dc=dc) vm.new = True brand = json['brand'] # json and json_active vm.json = vm.json_active = json # node & vnc_port (no check) vm.node_id = json.get('server_uuid', None) vm.vnc_port = json.get('vnc_port', None) # alias try: vm.alias = json['internal_metadata']['alias'][:alias_length] except KeyError: try: alias = json['alias'] except KeyError: alias = vm.hostname vm.alias = alias.split('.')[0][:alias_length] logger.warning( 'Alias for new VM %s could not be auto-detected. Fallback to alias=%s', vm, vm.alias) # ostype try: vm.ostype = int(json['internal_metadata']['ostype']) except KeyError: if brand == 'kvm': vm.ostype = Vm.LINUX elif brand == 'lx': vm.ostype = Vm.LINUX_ZONE else: vm.ostype = Vm.SUNOS_ZONE logger.warning( 'OS type for new VM %s could not be auto-detected. Fallback to ostype=%s', vm, vm.ostype) # owner if owner: try: vm.owner = User.objects.get(id=int(json['owner_uuid'])) except (KeyError, ValueError, User.DoesNotExist): vm.owner = User.objects.get(id=owner) logger.warning( 'Owner for new VM %s could not be auto-detected. Fallback to owner=%s', vm, vm.owner) # template if template: tmpname = None try: tmpname = json['internal_metadata']['template'] if tmpname: vm.template = VmTemplate.objects.get( name=json['internal_metadata']['template'], dc=dc) except (KeyError, VmTemplate.DoesNotExist): vm.template = None if tmpname: logger.warning( 'Template "%s" for new VM %s could not be auto-detected', tmpname, vm) # images for img_uuid in vm.get_image_uuids(): Image.objects.get(uuid=img_uuid, dc=dc) # May raise Image.DoesNotExist # subnets for net_uuid in vm.get_network_uuids(): Subnet.objects.get(uuid=net_uuid, dc=dc) # May raise Subnet.DoesNotExist # Initialize uptime now logger.info(vm.update_uptime(force_init=True)) if save: vm.save(sync_json=True, update_node_resources=True, update_storage_resources=True) logger.info('Server %s (%s) was saved', vm.uuid, vm) exc = None primary_ip = None if update_ips: for i, nic in enumerate(vm.json_get_nics()): if 'network_uuid' not in nic: logger.error( 'Server %s NIC ID %s has no network_uuid defined', vm, i) exc = KeyError('network_uuid') break try: net = Subnet.objects.get(uuid=nic['network_uuid'], dc=dc) except Subnet.DoesNotExist as e: exc = e break else: if net.dhcp_passthrough: logger.info( 'Server %s NIC ID %s uses an externally managed network %s', vm, i, net.name) continue ip, err = _vm_save_ip_from_json(vm, net, nic['ip'], allowed_ips=False) if err: logger.critical(err) exc = SystemError(err) break if i == 0: primary_ip = ip for ipaddress in nic.get('allowed_ips', ()): _, err = _vm_save_ip_from_json(vm, net, ipaddress, allowed_ips=True) if err: logger.critical(err) exc = SystemError(err) break if exc: vm.delete() logger.info('Server %s was deleted', json['uuid']) raise exc try: vm_update_ipaddress_usage(vm) except ValueError as exc: vm.delete() logger.info('Server %s was deleted', json['uuid']) raise exc if update_dns and primary_ip: # This will fail silently (exception is logged) VmDefineNicSerializer.save_a(request, task_id, vm, primary_ip) if primary_ip.subnet.ptr_domain: # This will fail silently (exception is logged) VmDefineNicSerializer.save_ptr(request, task_id, vm, primary_ip, primary_ip.subnet, content=vm.hostname) return vm
def vm_status_all_cb(result, task_id, node_uuid=None): """ A callback function for api.vm.status.tasks.vm_status_all. Compare actual VM status against cached data. If something changes emit a message and update DB. """ tid = vm_status_all_cb.request.id if result.get('returncode') != 0: logger.warn('Found nonzero returncode for task %s(%s)', 'vm_status_all', node_uuid) return vms = [] r_states = redis.pipeline() r_zoneids = redis.pipeline() for line in result['stdout'].splitlines(): try: i = line.strip().split(':', 3) uuid = i[0] state = Vm.STATUS_DICT[i[1]] # int zoneid = int( i[2] or Vm.STOPPED_ZONEID) # int; "-1" means that the VM not running exit_or_boot_timestamp = i[3] except: try: # noinspection PyUnboundLocalVariable if i[1] in Vm.STATUS_UNUSED: # 255 logger.info( 'Ignoring unusable status ("%s") from output of task %s(%s)', line, 'vm_status_all', node_uuid) continue except: pass logger.error( 'Could not parse line ("%s") from output of task %s(%s)', line, 'vm_status_all', node_uuid) continue vms.append((uuid, state, zoneid, exit_or_boot_timestamp)) r_states.get(KEY_PREFIX + Vm.status_key(uuid)) r_zoneids.get(KEY_PREFIX + Vm.zoneid_key(uuid)) if not vms: return vm_states = r_states.execute() vm_zoneids = r_zoneids.execute() for i, line in enumerate(vms): vm = None uuid, state, zoneid, exit_or_boot_timestamp = line state_cache = vm_states[i] # None or string zoneid_cache = vm_zoneids[i] # None or string # Check and eventually save VM's status _vm_status_check(tid, node_uuid, uuid, zoneid, state, state_cache=state_cache, zoneid_cache=zoneid_cache, change_time=_parse_timestamp(exit_or_boot_timestamp))
def _vm_status_check(task_id, node_uuid, uuid, zoneid, state, state_cache=None, zoneid_cache=None, vm=None, change_time=None, **kwargs): """Helper function for checking VM's new/actual state used by following callbacks: - vm_status_all_cb - vm_status_event_cb - vm_status_current_cb """ if state_cache is None: try: # vm state not in cache, loading from DB... vm = Vm.objects.get(uuid=uuid) except Vm.DoesNotExist: logger.warn('Got status of undefined vm (%s) - ignoring', uuid) return else: # ...and saving to cache state_cache = vm.status cache.set(Vm.status_key(uuid), vm.status) else: state_cache = int(state_cache) _vm_zoneid_check(task_id, uuid, zoneid, zoneid_cache, change_time=change_time) if state_cache == state: return # vm status changed!!! # Deploying stuff deploy = False deploy_finish = False deploy_over = False deploy_dummy = False if state_cache == Vm.CREATING: if state == Vm.RUNNING: logger.warn( 'Detected new status %s for vm %s. We were waiting for this. ' 'Switching state to (A) "running (2)" or (B) "deploying_start (12)" or ' '(C) "deploying_dummy (14)" and running vm_deploy(force_stop=True).', state, uuid) deploy = True else: logger.warn( 'Detected new status %s for vm %s, but vm waiting for deploy (%s). ' 'Awaiting running state.', state, uuid, state_cache) return elif state_cache == Vm.DEPLOYING_DUMMY: if state == Vm.STOPPED: logger.warn( 'Detected new status %s for vm %s. We were waiting for this. Dummy deploy is finished. ' 'Switching state to "stopped".', state, uuid) deploy_over = True else: logger.warn( 'Detected new status %s for vm %s, but vm is dummy deploying (%s). ' 'Awaiting stopped state.', state, uuid, state_cache) return elif state_cache == Vm.DEPLOYING_START: if state == Vm.STOPPED: logger.warn( 'Detected new status %s for vm %s. We were waiting for this. ' 'Switching state to "deploying_finish (13)" and running vm_deploy task.', state, uuid) deploy_finish = True else: logger.warn( 'Detected new status %s for vm %s, but vm is deploying (%s). ' 'Awaiting stopped state.', state, uuid, state_cache) return elif state_cache == Vm.DEPLOYING_FINISH: if state == Vm.RUNNING: logger.warn( 'Detected new status %s for vm %s. We were waiting for this. Deploy is finished. ' 'Switching state to "running".', state, uuid) deploy_over = True else: logger.warn( 'Detected new status %s for vm %s, but vm waiting for finishing deploy (%s). ' 'Awaiting running state.', state, uuid, state_cache) return elif state_cache not in Vm.STATUS_KNOWN: logger.debug('Detected unknown cached status %s for vm %s', state_cache, uuid) return # HERE WE GO logger.warn('Detected status change %s->%s for vm %s', state_cache, state, uuid) try: # get VM if not vm: vm = Vm.objects.select_related('node', 'slavevm').get(uuid=uuid) except Vm.DoesNotExist: logger.error('Status of undefined vm (%s) changed', uuid) return if vm.node.uuid != node_uuid: # double vm protection logger.error( 'Detected status change for vm %s on node %s, but the vm should be on %s!', uuid, vm.node.uuid, node_uuid) return if deploy: # deploy process started (VM is running) if vm.is_deploy_needed(): vm.status = Vm.DEPLOYING_START vm.save_status(status_change_time=change_time) return # The deploy will be over after VM is stopped by itself from inside elif vm.is_blank(): # Empty VM is running -> stop VM via vm_deploy() vm.status = Vm.DEPLOYING_DUMMY vm.save_status(status_change_time=change_time) deploy_dummy = True # The deploy will be over after VM is stopped by vm_deploy() else: # Deploy is not needed, but VM has an image. We are done here -> VM is running deploy_over = True if deploy_finish: # finish deploy process -> the deploy will be over when VM is started by vm_deploy() vm.status = Vm.DEPLOYING_FINISH vm.save_status(status_change_time=change_time) if deploy_finish or deploy_dummy: _tid, _err = vm_deploy(vm, force_stop=deploy_dummy) if _err: logger.error( 'Got error when creating deploy task. Task: %s. Error: %s.', _tid, _err) else: logger.warn('Created deploy task: %s.', _tid) return if vm.is_changing_status(): logger.warn( 'Detected running vm_status task (pending state) for vm %s', uuid) _save_vm_status(task_id, vm, state, old_state=state_cache, deploy_over=deploy_over, change_time=change_time, **kwargs)
def vm_status_changed(tid, vm, state, old_state=None, save_state=True, deploy_over=False, change_time=None): """ This function is something like a dummy callback. It should be called when VM state is changed from a task. """ if change_time: old_change_time = cache.get(Vm.status_change_key(vm.uuid)) if old_change_time and old_change_time > change_time: logger.warn( 'Ignoring status change %s->%s of VM %s (%s) because it is too old: %s > %s', vm.status, state, vm, vm.uuid, old_change_time, change_time) return None # save to DB and also update cache if save_state: vm.status = state if old_state is not None: # if cached status != vm.status (don't remember why we need this) vm._orig_status = old_state vm.save(update_fields=('status', 'status_change', 'uptime', 'uptime_changed'), status_change_time=change_time) if deploy_over: # deploy process ended # Set the deploy_finished flag to inform vm_create_cb vm.set_deploy_finished() if vm.is_slave_vm(): logger.info('Detected status change of slave VM %s - "%s"', vm.uuid, vm) return None # Adjust task ID according to VM parameters tid = task_id_from_task_id(tid, owner_id=vm.owner.id, dc_id=vm.dc_id) # Signals! vm_status_changed_sig.send(tid, vm=vm, old_state=old_state, new_state=state) # Signal! if vm.status == vm.RUNNING: vm_running.send(tid, vm=vm, old_state=old_state) # Signal! elif vm.status == vm.STOPPED: vm_stopped.send(tid, vm=vm, old_state=old_state) # Signal! # data for next operations msg = LOG_STATUS_CHANGE task_event = VmStatusChanged(tid, vm) # log task task_log(tid, msg, vm=vm, owner=vm.owner, task_result=task_event.result, task_status=states.SUCCESS, time=vm.status_change, update_user_tasks=False) # inform users (VM owners logged in GUI) task_event.send()
def vm_from_json(request, task_id, json, dc, owner=settings.ADMIN_USER, template=True, save=False, # noqa: R701 update_ips=True, update_dns=True): """Parse json a create new Vm object @param dict json: loaded json dictionary obtained via vmadm get @param int owner: whether to fetch the vm.owner User object. Also indicates an user id, \ which will be used as fallback @param bool template: True if the template should be set according to internal_metadata.template @param Dc dc: Dc object for the Vm @param bool save: Should the new Vm be saved in DB? @param bool update_ips: Update server <-> IpAddress relations. Only performed if save and update_ips are True. @param bool update_dns: Try to update/create DNS record for server's primary IP. Only performed if save is True. @return: new Vm instance """ # noinspection PyUnresolvedReferences hostname_length = VmDefineSerializer.base_fields['hostname'].max_length # noinspection PyUnresolvedReferences alias_length = VmDefineSerializer.base_fields['alias'].max_length # basic information (KeyError) vm = Vm(uuid=json['uuid'], hostname=json['hostname'][:hostname_length], status=Vm.STATUS_DICT[json['state']], dc=dc) vm.new = True brand = json['brand'] # json and json_active vm.json = vm.json_active = json # node & vnc_port (no check) vm.node_id = json.get('server_uuid', None) vm.vnc_port = json.get('vnc_port', None) # alias try: vm.alias = json['internal_metadata']['alias'][:alias_length] except KeyError: try: alias = json['alias'] except KeyError: alias = vm.hostname vm.alias = alias.split('.')[0][:alias_length] logger.warning('Alias for new VM %s could not be auto-detected. Fallback to alias=%s', vm, vm.alias) # ostype try: vm.ostype = int(json['internal_metadata']['ostype']) except KeyError: if brand == 'kvm': vm.ostype = Vm.LINUX elif brand == 'lx': vm.ostype = Vm.LINUX_ZONE else: vm.ostype = Vm.SUNOS_ZONE logger.warning('OS type for new VM %s could not be auto-detected. Fallback to ostype=%s', vm, vm.ostype) # owner if owner: try: vm.owner = User.objects.get(id=int(json['owner_uuid'])) except (KeyError, ValueError, User.DoesNotExist): vm.owner = User.objects.get(id=owner) logger.warning('Owner for new VM %s could not be auto-detected. Fallback to owner=%s', vm, vm.owner) # template if template: tmpname = None try: tmpname = json['internal_metadata']['template'] if tmpname: vm.template = VmTemplate.objects.get(name=json['internal_metadata']['template'], dc=dc) except (KeyError, VmTemplate.DoesNotExist): vm.template = None if tmpname: logger.warning('Template "%s" for new VM %s could not be auto-detected', tmpname, vm) # images for img_uuid in vm.get_image_uuids(): Image.objects.get(uuid=img_uuid, dc=dc) # May raise Image.DoesNotExist # subnets for net_uuid in vm.get_network_uuids(): if not Subnet.objects.filter(uuid=net_uuid, dc=dc).exists(): # VM uses subnet that doesn't exist in database. Let's create it. # retrieve nic config for the non-existent network from harvested VM nic = vm.get_nic_config(net_uuid) network = IPAddress.get_net_address(nic['ip'], nic['netmask']) logger.warning('VM "%s" uses undefined network "%s/%s" over nic tag "%s". Trying to create it.', vm.name, network, nic['netmask'], nic['nic_tag']) # take the network name from the nic tag net_name = nic['nic_tag'] if Subnet.objects.filter(name=net_name).exists(): logger.info('Network "%s" already exists, adding number at the end', net_name) net_name_number = 1 while net_name_number < 100: net_name = "%s-%s" % (net_name, net_name_number) if not Subnet.objects.filter(name=net_name).exists(): # this name doesn't exist, let's use it break # if there really exist 99 networks with name "${net_name}-${net_name_number}, # subnet creation will fail net_name_number += 1 gateway = None if 'gateway' in nic: gateway = nic['gateway'] mtu = None if 'mtu' in nic: mtu = nic['mtu'] # create the network new_network = Subnet(name=net_name, uuid=net_uuid, nic_tag=nic['nic_tag'], mtu=mtu, network=network, netmask=nic['netmask'], gateway=gateway, owner_id=settings.ADMIN_USER, alias=net_name) new_network.save() # attach network to specified dc new_network.dc.add(dc) if dc.name == settings.VMS_DC_ADMIN: # when attaching network to admin DC, attach it also to main dc main_dc = Dc.objects.get(name=settings.VMS_DC_MAIN) new_network.dc.add(main_dc) # Initialize uptime now logger.info(vm.update_uptime(force_init=True)) if save: vm.save(sync_json=True, update_node_resources=True, update_storage_resources=True) logger.info('Server %s (%s) was saved', vm.uuid, vm) exc = None primary_ip = None if update_ips: for i, nic in enumerate(vm.json_get_nics()): if 'network_uuid' not in nic: logger.error('Server %s NIC ID %s has no network_uuid defined', vm, i) exc = KeyError('network_uuid') break try: net = Subnet.objects.get(uuid=nic['network_uuid'], dc=dc) except Subnet.DoesNotExist as e: exc = e break else: if net.dhcp_passthrough: logger.info('Server %s NIC ID %s uses an externally managed network %s', vm, i, net.name) continue ip, err = _vm_save_ip_from_json(vm, net, nic['ip'], allowed_ips=False) if err: logger.critical(err) exc = SystemError(err) break if i == 0: primary_ip = ip for ipaddress in nic.get('allowed_ips', []): _, err = _vm_save_ip_from_json(vm, net, ipaddress, allowed_ips=True) if err: logger.critical(err) exc = SystemError(err) break if exc: vm.delete() logger.info('Server %s was deleted', json['uuid']) raise exc try: vm_update_ipaddress_usage(vm) except ValueError as exc: vm.delete() logger.info('Server %s was deleted', json['uuid']) raise exc if update_dns and primary_ip: # This will fail silently (exception is logged) VmDefineNicSerializer.save_a(request, task_id, vm, primary_ip) if primary_ip.subnet.ptr_domain: # This will fail silently (exception is logged) VmDefineNicSerializer.save_ptr(request, task_id, vm, primary_ip, primary_ip.subnet, content=vm.hostname) return vm
def vm_status_all_cb(result, task_id, node_uuid=None): """ A callback function for api.vm.status.tasks.vm_status_all. Compare actual VM status against cached data. If something changes emit a message and update DB. """ tid = vm_status_all_cb.request.id if result.get('returncode') != 0: logger.warn('Found nonzero returncode for task %s(%s)', 'vm_status_all', node_uuid) return vms = [] r_states = redis.pipeline() # The result['stdout'] contains output from `vmadm list -o uuid,state,exit_timestamp,boot_timestamp` # Only one property of exit_timestamp and boot_timestamp will be in the command output for line in result['stdout'].splitlines(): i = line.strip().split(':', 2) # uuid:state:exit_timestamp,boot_timestamp if len(i) != 3: logger.error( 'Could not parse line ("%s") from output of task %s(%s)', line, 'vm_status_all', node_uuid) continue if i[1] in Vm.STATUS_UNUSED: # 255 logger.info( 'Ignoring unusable status ("%s") from output of task %s(%s)', line, 'vm_status_all', node_uuid) continue if i[1] not in Vm.STATUS_DICT: logger.error( 'Unknown status in vmadm list output ("%s") from output of task %s(%s)', line, 'vm_status_all', node_uuid) continue uuid = i[0] state = Vm.STATUS_DICT[i[1]] # returns int exit_or_boot_timestamp = i[2] # Save vm status line into vms list for later status check because we need to pair it to the redis result list vms.append((uuid, state, exit_or_boot_timestamp)) r_states.get(KEY_PREFIX + Vm.status_key(uuid)) if not vms: return vm_states = r_states.execute() for i, line in enumerate(vms): uuid, state, exit_or_boot_timestamp = line state_cache = vm_states[i] # None or string # Check and eventually save VM's status _vm_status_check(tid, node_uuid, uuid, state, state_cache=state_cache, change_time=_parse_timestamp(exit_or_boot_timestamp))