def test_igvm_migration_log(self, performance_value, mock_time): for hv in self.hvs: hv.dataset_obj['igvm_migration_log'].clear() hv.dataset_obj.commit() src_hv = self.vm.hypervisor.dataset_obj['hostname'] cpu_usage_vm_src = self.vm.hypervisor.estimate_vm_cpu_usage(self.vm) timestamp = 1234567890 vm_migrate( VM_HOSTNAME, offline=True, offline_transport='drbd', ) src_hv_obj = (Query({ 'hostname': src_hv }, ['igvm_migration_log']).get()) self.assertEqual(list(src_hv_obj['igvm_migration_log']), ['{} -{}'.format(timestamp, round(cpu_usage_vm_src))]) with _get_vm(VM_HOSTNAME) as vm: dest_hv_obj = (Query( { 'hostname': vm.hypervisor.dataset_obj['hostname'] }, ['igvm_migration_log']).get()) cpu_usage_vm_dest = vm.hypervisor.estimate_vm_cpu_usage(vm) self.assertEqual( list(dest_hv_obj['igvm_migration_log']), ['{} +{}'.format(timestamp, round(cpu_usage_vm_dest))])
def get_puppet_ca(vm): puppet_ca_type = Query( { 'hostname': vm['puppet_ca'] }, ['servertype'], ).get()['servertype'] if puppet_ca_type not in ['vm', 'public_domain']: raise ConfigError( 'Servertype {} not supported for puppet_ca'.format( puppet_ca_type, ), ) if puppet_ca_type == 'vm': return vm['puppet_ca'] ca_query = Query( {'domain': vm['puppet_ca']}, [{ 'lb_nodes': ['hostname', 'state'] }], ) ca_hosts = [ lb_node['hostname'] for res in ca_query for lb_node in res['lb_nodes'] if lb_node['state'] in ['online', 'deploy_online'] ] random.shuffle(ca_hosts) return ca_hosts[0]
def test_disk_set(self): if self.datacenter_type == 'kvm.dct': def _get_disk_hv(): return (self.vm.hypervisor.vm_sync_from_hypervisor( self.vm)['disk_size_gib']) def _get_disk_vm(): return parse_size( self.vm.run( "df -h / | tail -n+2 | awk '{ print $2 }'").strip(), 'G') elif self.datacenter_type == 'aws.dct': def _get_disk_vm(): partition = self.vm.run('findmnt -nro SOURCE /') disk = self.vm.run('lsblk -nro PKNAME {}'.format(partition)) disk_size = self.vm.run( 'lsblk -bdnro size /dev/{}'.format(disk)) disk_size_gib = int(disk_size) / 1024**3 return disk_size_gib # Initial size same as built obj = Query({'hostname': VM_HOSTNAME}, ['disk_size_gib']).get() size = obj['disk_size_gib'] if self.datacenter_type == 'kvm.dct': self.assertEqual(_get_disk_hv(), size) self.assertEqual(_get_disk_vm(), size) size = size + 1 disk_set(VM_HOSTNAME, '+1') obj = Query({'hostname': VM_HOSTNAME}, ['disk_size_gib']).get() self.assertEqual(obj['disk_size_gib'], size) if self.datacenter_type == 'kvm.dct': self.assertEqual(_get_disk_hv(), size) self.assertEqual(_get_disk_vm(), size) size = obj['disk_size_gib'] + 1 if self.datacenter_type == 'kvm.dct': disk_set(VM_HOSTNAME, '{}GB'.format(size)) obj = Query({'hostname': VM_HOSTNAME}, ['disk_size_gib']).get() self.assertEqual(obj['disk_size_gib'], size) self.assertEqual(_get_disk_hv(), size) self.assertEqual(_get_disk_vm(), size) elif self.datacenter_type == 'aws.dct': with self.assertRaises(VMError): disk_set(VM_HOSTNAME, '{}GB'.format(size)) if self.datacenter_type == 'kvm.dct': with self.assertRaises(Warning): disk_set(VM_HOSTNAME, '{}GB'.format(size)) obj = Query({'hostname': VM_HOSTNAME}, ['disk_size_gib']).get() size = obj['disk_size_gib'] with self.assertRaises(NotImplementedError): disk_set(VM_HOSTNAME, '{}GB'.format(size - 1)) with self.assertRaises(NotImplementedError): disk_set(VM_HOSTNAME, '-1')
def test_reject_out_of_sync_serveradmin(self): obj = Query({'hostname': VM_HOSTNAME}, ['disk_size_gib']).get() obj['disk_size_gib'] += 1 obj.commit() with self.assertRaises(InconsistentAttributeError): vm_migrate(VM_HOSTNAME)
def get_next_address(vm_net, index): non_vm_hosts = list(Query({ 'project_network': vm_net, 'servertype': Not('vm'), }, ['intern_ip'])) offset = 1 if len(non_vm_hosts) > 0 else 0 subnet_levels = ceil(log(PYTEST_XDIST_WORKER_COUNT + offset, 2)) project_network = Query({'hostname': vm_net}, ['intern_ip']).get() try: subnets = list(project_network['intern_ip'].subnets(subnet_levels)) except ValueError: raise IGVMTestError( 'Can\'t split {} into enough subnets ' 'for {} parallel tests'.format( vm_net, PYTEST_XDIST_WORKER_COUNT, ) ) if len(non_vm_hosts) > subnets[0].num_addresses: raise IGVMTestError( 'Can\'t split {} into enough subnets ' 'for {} parallel tests'.format( vm_net, PYTEST_XDIST_WORKER_COUNT, ) ) return subnets[PYTEST_XDIST_WORKER + 1][index]
def main(): args = parse_args(sys.argv[1:]) attribute_ids_to_print = args.attr if args.attr else ['hostname'] attribute_ids_to_fetch = list(attribute_ids_to_print) if args.reset: attribute_ids_to_fetch.extend(args.reset) if args.update: attribute_ids_to_fetch.extend(u[0] for u in args.update) # TODO: Avoid .join() filters = parse_query(' '.join(args.query)) query = Query(filters, attribute_ids_to_fetch, args.order) if args.one and len(query) > 1: raise Exception( 'Expecting exactly one server, found {} servers' .format(len(query)) ) for server in query: if args.reset: apply_resets(server, args.reset) if args.update: apply_updates(server, args.update) print_server(server, attribute_ids_to_print) if args.reset or args.update: query.commit()
def test_rollback(self): obj = Query({'hostname': VM_HOSTNAME}, ['puppet_environment']).get() obj['puppet_environment'] = 'doesnotexist' obj.commit() with self.assertRaises(VMError): vm_build(VM_HOSTNAME) self.check_vm_absent()
def test_rollback(self): # TODO: consider the usage of self.vm_obj instead of new Query obj = Query({'hostname': VM_HOSTNAME}, ['puppet_environment']).get() obj['puppet_environment'] = 'doesnotexist' obj.commit() with self.assertRaises(VMError): vm_build(VM_HOSTNAME) self.check_vm_absent()
def test_rollback_drbd(self): obj = Query({'hostname': VM_HOSTNAME}, ['puppet_environment']).get() obj['puppet_environment'] = 'doesnotexist' obj.commit() with self.assertRaises(IGVMError): vm_migrate( VM_HOSTNAME, offline=True, run_puppet=True, offline_transport='drbd', ) self.check_vm_present()
def test_rollback_netcat(self): # TODO: consider the usage of self.vm_obj instead of new Query obj = Query({'hostname': VM_HOSTNAME}, ['puppet_environment']).get() obj['puppet_environment'] = 'doesnotexist' obj.commit() with self.assertRaises(IGVMError): vm_migrate( VM_HOSTNAME, offline=True, run_puppet=True, offline_transport='netcat', ) self.check_vm_present()
def test_new_address(self): # We don't have a way to ask for new IP address from Serveradmin # and lock it for us. The method below will usually work fine. # When it starts failing, we must develop retry method. new_address = next( Query({ 'hostname': VM_NET }, ['intern_ip']).get_free_ip_addrs()) change_address(VM_HOSTNAME, new_address, offline=True) obj = Query({'hostname': VM_HOSTNAME}, ['intern_ip']).get() self.assertEqual(obj['intern_ip'], new_address) with _get_vm(VM_HOSTNAME) as vm: vm.run(cmd('ip a | grep {}', new_address)) self.check_vm_present()
def tearDown(self): """Forcibly remove current test's VM from all HVs""" for hv in HYPERVISORS: hv.get_storage_pool().refresh() for domain in hv.conn().listAllDomains(): if domain.name() == self.uid_name: if domain.state()[0] == VIR_DOMAIN_RUNNING: domain.destroy() domain.undefine() for vol_name in hv.get_storage_pool().listVolumes(): if vol_name == self.uid_name: hv.run('mount | grep -q "/dev/{vg}/{vm}" && ' ' umount /dev/{vg}/{vm} || true;'.format( vg=VG_NAME, vm=self.uid_name)) hv.get_storage_pool().storageVolLookupByName( vol_name, ).delete() #Clean up certs after tearing down vm obj = Query({ 'hostname': VM_HOSTNAME }, [ 'hostname', 'puppet_ca', ]).get() clean_cert(obj)
def change_address( vm_hostname, new_address, offline=False, migrate=False, allow_reserved_hv=False, offline_transport='drbd', ): """Change VMs IP address This is done by changing data in Serveradmin, running Puppet in VM and rebooting it. """ if not offline: raise IGVMError('IP address change can be only performed offline') with _get_vm(vm_hostname) as vm: new_address = ip_address(new_address) if vm.dataset_obj['intern_ip'] == new_address: raise ConfigError('New IP address is the same as the old one!') if not vm.hypervisor.get_vlan_network(new_address) and not migrate: err = 'Current hypervisor does not support new subnet!' raise ConfigError(err) new_network = Query( { 'servertype': 'route_network', 'state': 'online', 'network_type': 'internal', 'intern_ip': Contains(new_address), } ).get()['hostname'] vm_was_running = vm.is_running() with Transaction() as transaction: if vm_was_running: vm.shutdown( transaction=transaction, check_vm_up_on_transaction=False, ) vm.change_address( new_address, new_network, transaction=transaction, ) if migrate: vm_migrate( vm_object=vm, run_puppet=True, offline=True, no_shutdown=True, allow_reserved_hv=allow_reserved_hv, offline_transport=offline_transport, ) else: vm.hypervisor.mount_vm_storage(vm, transaction=transaction) vm.run_puppet() vm.hypervisor.redefine_vm(vm) vm.hypervisor.umount_vm_storage(vm) if vm_was_running: vm.start()
def test_vcpu_set(self): def _get_cpus_hv(): data = self.vm.hypervisor.vm_sync_from_hypervisor(self.vm) return data['num_cpu'] def _get_cpus_vm(): return int( self.vm.run('cat /proc/cpuinfo | grep vendor_id | wc -l') .strip() ) # Online self.assertEqual(_get_cpus_hv(), 2) self.assertEqual(_get_cpus_vm(), 2) obj = Query({'hostname': VM_HOSTNAME}, ['num_cpu']).get() self.assertEqual(obj['num_cpu'], 2) vcpu_set(VM_HOSTNAME, 3) self.assertEqual(_get_cpus_hv(), 3) self.assertEqual(_get_cpus_vm(), 3) obj = Query({'hostname': VM_HOSTNAME}, ['num_cpu']).get() self.assertEqual(obj['num_cpu'], 3) with self.assertRaises(Warning): vcpu_set(VM_HOSTNAME, 3) # Online reduce not implemented yet on KVM with self.assertRaises(IGVMError): vcpu_set(VM_HOSTNAME, 2) # Offline vcpu_set(VM_HOSTNAME, 2, offline=True) self.assertEqual(_get_cpus_hv(), 2) self.assertEqual(_get_cpus_vm(), 2) # Impossible amount with self.assertRaises(IGVMError): vcpu_set(VM_HOSTNAME, 9001) with self.assertRaises(IGVMError): vcpu_set(VM_HOSTNAME, 0, offline=True) with self.assertRaises(IGVMError): vcpu_set(VM_HOSTNAME, -5) with self.assertRaises(IGVMError): vcpu_set(VM_HOSTNAME, -5, offline=True)
def setUp(self): self.hardware_models = set([ x['hardware_model'] for x in Query({ 'servertype': 'hypervisor', 'project': 'ndco', 'state': Any('online', 'online_reserved'), }, ['hardware_model']) ])
def setUp(self): """Initialize VM object before every test Get object from Serveradmin and initialize it to safe defaults. Don't assign VM to any of HVs yet! """ # igvm operates always on hostname of VM and queries it from # Serveradmin whenever it needs. Because of that we must never store # any igvm objects and query things anew each time. obj = Query({ 'hostname': VM_HOSTNAME }, [ 'hostname', 'state', 'backup_disabled', 'disk_size_gib', 'memory', 'num_cpu', 'os', 'environment', 'no_monitoring', 'hypervisor', 'repositories', 'puppet_environment', 'puppet_ca', ]).get() # Fill in defaults in Serveradmin obj['state'] = 'online' obj['disk_size_gib'] = 3 obj['memory'] = 2048 obj['num_cpu'] = 2 obj['os'] = 'stretch' obj['environment'] = 'testing' obj['no_monitoring'] = True obj['hypervisor'] = None obj['repositories'] = [ 'int:basestretch:stable', 'int:innogames:stable', ] obj['puppet_environment'] = None obj['backup_disabled'] = True obj.commit() clean_cert(obj) self.uid_name = '{}_{}'.format(obj['object_id'], obj['hostname'])
def test_vm_define(self): vm_dataset_obj = Query({'hostname': VM_HOSTNAME}, VM_ATTRIBUTES).get() hv = Hypervisor(vm_dataset_obj['hypervisor']) vm = VM(vm_dataset_obj, hv) vm_stop(VM_HOSTNAME) hv.undefine_vm(vm, keep_storage=True) self.check_vm_absent() vm_define(VM_HOSTNAME) self.check_vm_present()
def test_disk_set(self): def _get_disk_hv(): return (self.vm.hypervisor.vm_sync_from_hypervisor( self.vm)['disk_size_gib']) def _get_disk_vm(): return parse_size( self.vm.run( "df -h / | tail -n+2 | awk '{ print $2 }'").strip(), 'G') # Initial size same as built obj = Query({'hostname': VM_HOSTNAME}, ['disk_size_gib']).get() size = obj['disk_size_gib'] self.assertEqual(_get_disk_hv(), size) self.assertEqual(_get_disk_vm(), size) size = size + 1 disk_set(VM_HOSTNAME, '+1') obj = Query({'hostname': VM_HOSTNAME}, ['disk_size_gib']).get() self.assertEqual(obj['disk_size_gib'], size) self.assertEqual(_get_disk_hv(), size) self.assertEqual(_get_disk_vm(), size) size = 8 disk_set(VM_HOSTNAME, '{}GB'.format(size)) obj = Query({'hostname': VM_HOSTNAME}, ['disk_size_gib']).get() self.assertEqual(obj['disk_size_gib'], size) self.assertEqual(_get_disk_hv(), size) self.assertEqual(_get_disk_vm(), size) with self.assertRaises(Warning): disk_set(VM_HOSTNAME, '{}GB'.format(size)) with self.assertRaises(NotImplementedError): disk_set(VM_HOSTNAME, '{}GB'.format(size - 1)) with self.assertRaises(NotImplementedError): disk_set(VM_HOSTNAME, '-1')
def test_sync(self): obj = (Query({ 'hostname': VM_HOSTNAME }, ['disk_size_gib', 'memory']).get()) expected_disk_size = obj['disk_size_gib'] obj['disk_size_gib'] += 10 expected_memory = obj['memory'] obj['memory'] += 1024 obj.commit() vm_sync(VM_HOSTNAME) obj = (Query({ 'hostname': VM_HOSTNAME }, ['disk_size_gib', 'memory']).get()) self.assertEqual(obj['memory'], expected_memory) self.assertEqual(obj['disk_size_gib'], expected_disk_size) # Shouldn't do anything, but also shouldn't fail vm_sync(VM_HOSTNAME)
def _get_vm(hostname, unlock=True, allow_retired=False): """Get a server from Serveradmin by hostname to return VM object The function is accepting hostnames in any length as long as it resolves to a single server on Serveradmin. """ object_id = Query({ 'hostname': Any(hostname, StartsWith(hostname + '.')), 'servertype': 'vm', }, ['object_id']).get()['object_id'] def vm_query(): return Query({ 'object_id': object_id, }, VM_ATTRIBUTES).get() dataset_obj = vm_query() hypervisor = None if dataset_obj['hypervisor']: hypervisor = Hypervisor(dataset_obj['hypervisor']) # XXX: Ugly hack until adminapi supports modifying joined objects dict.__setitem__( dataset_obj, 'hypervisor', dataset_obj['hypervisor']['hostname'] ) vm = VM(dataset_obj, hypervisor) vm.acquire_lock() try: if not allow_retired and dataset_obj['state'] == 'retired': raise InvalidStateError( 'VM {} is in state retired, I refuse to work on it!'.format( hostname, ) ) yield vm except (Exception, KeyboardInterrupt): VM(vm_query(), hypervisor).release_lock() raise else: # We re-fetch the VM because we can't risk commiting any other changes # to the VM than unlocking. There can be changes from failed things, # like setting memory. # Most operations require unlocking, the only exception is deleting of # a VM. After object is deleted, it can't be unlocked. if unlock: VM(vm_query(), hypervisor).release_lock()
def clean_aws(vm_hostname): def _get_instance_status(): response = ec2.describe_instances( Filters=[ { 'Name': 'instance-state-code', 'Values': [ str(AWS_RETURN_CODES['pending']), str(AWS_RETURN_CODES['running']), str(AWS_RETURN_CODES['shutting-down']), str(AWS_RETURN_CODES['terminated']), str(AWS_RETURN_CODES['stopping']), str(AWS_RETURN_CODES['stopped']), ] }, ], InstanceIds=[obj['aws_instance_id']], DryRun=False)['Reservations'][0]['Instances'][0]['State']['Code'] return int(response) try: obj = Query({'hostname': vm_hostname}, ['aws_instance_id']).get() except DatasetError: # No object to clean up return if not obj['aws_instance_id']: return timeout = 120 ec2 = boto3.client('ec2') try: ec2.stop_instances( InstanceIds=[obj['aws_instance_id']], DryRun=False ) except ClientError as e: pass # Not running for _ in range(timeout): instance_status = _get_instance_status() if AWS_RETURN_CODES['stopped'] == instance_status: break sleep(1) ec2.terminate_instances(InstanceIds=[obj['aws_instance_id']]) for _ in range(timeout): instance_status = _get_instance_status() if AWS_RETURN_CODES['terminated'] == instance_status: break sleep(1)
def get_next_address(vm_net, index): global PYTEST_XDIST_WORKER, PYTEST_XDIST_WORKER_COUNT subnet_levels = ceil(log(PYTEST_XDIST_WORKER_COUNT, 2)) project_network = Query({'hostname': VM_NET}, ['intern_ip']).get() try: subnets = project_network['intern_ip'].subnets(subnet_levels) except ValueError: raise Exception( ('Can\'t split {} into enough subnets ' 'for {} parallel tests').format(vm_net, PYTEST_XDIST_WORKER_COUNT)) return [s for s in subnets][PYTEST_XDIST_WORKER][index]
def _get_hypervisor(hostname, allow_reserved=False): """Get a server from Serveradmin by hostname to return Hypervisor object""" dataset_obj = Query({ 'hostname': hostname, 'servertype': 'hypervisor', }, HYPERVISOR_ATTRIBUTES).get() if not allow_reserved and dataset_obj['state'] == 'online_reserved': raise InvalidStateError('Server "{0}" is online_reserved.'.format( dataset_obj['hostname'])) hypervisor = Hypervisor(dataset_obj) hypervisor.acquire_lock() try: yield hypervisor finally: hypervisor.release_lock()
def vm_define(vm_hostname): """Define VM on hypervisor This command executes necessary code to just define the VM aka create the domain.xml for libvirt. It is a convenience command to restore a domain in case you lost your SSH session while the domain was not defined. :param: vm_hostname: hostname of VM """ vm_dataset_obj = Query({'hostname': vm_hostname}, VM_ATTRIBUTES).get() hv = Hypervisor(vm_dataset_obj['hypervisor']) vm = VM(vm_dataset_obj, hv) hv.define_vm(vm) vm.start() log.info('VM {} defined and booted on {}'.format( vm_hostname, vm_dataset_obj['hypervisor']['hostname']))
def _get_best_hypervisor(vm, hypervisor_states, offline=False): hypervisors = (Hypervisor(o) for o in Query( { 'servertype': 'hypervisor', 'environment': environ.get('IGVM_MODE', 'production'), 'vlan_networks': vm.dataset_obj['route_network'], 'state': Any(*hypervisor_states), }, HYPERVISOR_ATTRIBUTES)) for hypervisor in sorted_hypervisors(HYPERVISOR_PREFERENCES, vm, hypervisors): # The actual resources are not checked during sorting for performance. # We need to validate the hypervisor using the actual values before # the final decision. try: hypervisor.acquire_lock() except InvalidStateError as error: log.warning(error) continue try: hypervisor.check_vm(vm, offline) except libvirtError as error: hypervisor.release_lock() log.warning('Preferred hypervisor "{}" is skipped: {}'.format( hypervisor, error)) continue except HypervisorError as error: hypervisor.release_lock() log.warning('Preferred hypervisor "{}" is skipped: {}'.format( hypervisor, error)) continue try: yield hypervisor finally: hypervisor.release_lock() break else: raise IGVMError('Cannot find a hypervisor')
def clean_all(route_network, datacenter_type, vm_hostname=None): # Cancelled builds are forcefully killed by Jenkins. They did not have the # opportunity to clean up so we forcibly destroy everything found on any HV # which would interrupt our work in the current JENKINS_EXECUTOR. hvs = [Hypervisor(o) for o in Query({ 'servertype': 'hypervisor', 'environment': 'testing', 'vlan_networks': route_network, 'state': 'online', }, HYPERVISOR_ATTRIBUTES)] # If a VM hostname is given, only that will be cleaned from HVs. if vm_hostname is None: pattern = '^([0-9]+_)?(vm-rename-)?{}$'.format( VM_HOSTNAME_PATTERN.format(JENKINS_EXECUTOR, '[0-9]+'), ) else: pattern = '^([0-9]+_)?(vm-rename-)?{}$'.format(vm_hostname) # Clean HVs one by one. if datacenter_type == 'kvm.dct': for hv in hvs: clean_hv(hv, pattern) if datacenter_type == 'aws.dct': clean_aws(vm_hostname) # Remove all connected Serveradmin objects. clean_serveradmin({'hostname': Regexp(pattern)}) # Try to remove VMs with the same IP in any case because we use custom # logic to assign them and we want to avoid IP address conflicts. # Index 1 is usually used for the test's subject VM, # 2 might be used for testing IP change. ips = [get_next_address(VM_NET, i) for i in [1, 2]] clean_serveradmin({'intern_ip': Any(*ips)})
def _get_best_hypervisor( vm, hypervisor_states, offline=False, enforce_vm_env=False, soft_preferences=False, ): hv_filter = { 'servertype': 'hypervisor', 'vlan_networks': vm.route_network, 'state': Any(*hypervisor_states), } # Enforce IGVM_MODE used for tests if 'IGVM_MODE' in environ: hv_filter['environment'] = environ.get('IGVM_MODE') else: if enforce_vm_env: hv_filter['environment'] = vm.dataset_obj['environment'] # Get all (theoretically) possible HVs sorted by HV preferences hypervisors = ( Hypervisor(o) for o in Query(hv_filter, HYPERVISOR_ATTRIBUTES) ) hypervisors = sort_by_preference( vm, HYPERVISOR_PREFERENCES, hypervisors, soft_preferences, ) possible_hvs = OrderedDict() for possible_hv in hypervisors: possible_hvs[str(possible_hv)] = possible_hv # Check all HVs in parallel. This will check live data on those HVs # but without locking them. This allows us to do a real quick first # filtering round. Below follows another one on the filtered HVs only. chunk_size = 10 iterations = math.ceil(len(possible_hvs) / chunk_size) found_hv = None # We are checking HVs in chunks. This will enable us to select HVs early # without looping through all of them if unnecessary. for i in range(iterations): start_idx = i * chunk_size end_idx = start_idx + chunk_size hv_chunk = dict(list(possible_hvs.items())[start_idx:end_idx]) results = parallel( _check_vm, identifiers=list(hv_chunk.keys()), args=[ [possible_hv, vm, offline] for possible_hv in hv_chunk.values() ], workers=chunk_size, ) # Remove unsupported HVs from the list for checked_hv, success in results.items(): if not success: hv_chunk.pop(checked_hv) # Do another checking iteration, this time with HV locking for possible_hv in hv_chunk.values(): try: possible_hv.acquire_lock() except InvalidStateError as e: log.warning(e) continue if not _check_vm(possible_hv, vm, offline): possible_hv.release_lock() continue # HV found found_hv = possible_hv break if found_hv: break if not found_hv: # No supported HV was found raise IGVMError( 'Automatically finding the best Hypervisor failed! ' 'Can not find a suitable hypervisor with the preferences and ' 'the Query: {}'.format(hv_filter)) # Yield the hypervisor locked for working on it try: log.info('Picked {} as destination Hypervisor'.format(str(found_hv))) yield found_hv finally: found_hv.release_lock()
def vm_query(): return Query({ 'object_id': object_id, }, VM_ATTRIBUTES).get()
def clean_serveradmin(filters): Query(filters).delete().commit()
def clean_aws(vm_hostname): def _get_instance_status( ec2_client, instance_id: Optional[str] = None, name_filter: Optional[str] = None) -> Optional[str]: response = ec2_client.describe_instances( Filters=[ { 'Name': 'tag:Name', 'Values': ['*' if not name_filter else f'?{name_filter}*'] }, { 'Name': 'instance-state-code', 'Values': [ str(AWS_RETURN_CODES['pending']), str(AWS_RETURN_CODES['running']), str(AWS_RETURN_CODES['shutting-down']), str(AWS_RETURN_CODES['terminated']), str(AWS_RETURN_CODES['stopping']), str(AWS_RETURN_CODES['stopped']), ] }, ], InstanceIds=[instance_id] if instance_id else [], DryRun=False) if not response['Reservations']: return instance_status = response['Reservations'][0]['Instances'][0] return instance_status def _wait_for_state_reached(ec2_client, instance_id: str, state: str, timeout: int) -> None: for _ in range(timeout): instance_status = _get_instance_status( ec2_client, instance_id=instance_id) status_code = int(instance_status['State']['Code']) if AWS_RETURN_CODES[state] == status_code: break sleep(1) ec2 = boto3.client('ec2') timeout = 120 try: obj = Query({'hostname': vm_hostname}, ['aws_instance_id']).get() except DatasetError: instance_status = _get_instance_status( ec2_client=ec2, name_filter=vm_hostname) if not instance_status: return instance_id = instance_status['InstanceId'] if len(instance_id) == 0: return ec2.terminate_instances(InstanceIds=[instance_id]) _wait_for_state_reached(ec2_client=ec2, instance_id=instance_id, state='terminated', timeout=timeout) return if not obj['aws_instance_id']: return try: ec2.stop_instances( InstanceIds=[obj['aws_instance_id']], DryRun=False ) _wait_for_state_reached(ec2_client=ec2, instance_id=obj['aws_instance_id'], state='stopped', timeout=timeout) ec2.terminate_instances(InstanceIds=[obj['aws_instance_id']]) _wait_for_state_reached(ec2_client=ec2, instance_id=obj['aws_instance_id'], state='terminated', timeout=timeout) except ClientError as e: if not any(error in str(e) for error in ['InvalidInstanceID', 'IncorrectInstanceState']): raise