def tearDown(self): clean_cert(self.vm_obj) clean_all(self.route_network, self.datacenter_type, VM_HOSTNAME) # Same as in setUp we need to take care of the renamed hosts. vm = self.vm.dataset_obj # Depending on where it aborts it might still be renamed if 'vm-rename' not in vm['hostname']: vm['hostname'] = RenameTest._get_renamed_hostname(vm['hostname']) clean_cert(vm) clean_all(self.route_network, self.datacenter_type, vm['hostname'])
def setUp(self): """Initialize VM object before every test Get object from Serveradmin and initialize it to safe defaults. Don't assign VM to any of HVs yet! """ super().setUp() # Check that enough HVs are available. self.route_network = Query( { 'hostname': VM_NET }, ['route_network'], ).get()['route_network'] self.datacenter_type = Query( { 'hostname': self.route_network }, ['datacenter_type'], ).get()['datacenter_type'] self.hvs = [ Hypervisor(o) for o in Query( { 'environment': 'testing', 'servertype': 'hypervisor', 'state': 'online', 'vlan_networks': self.route_network, }, HYPERVISOR_ATTRIBUTES) ] if self.datacenter_type == 'kvm.dct': assert len(self.hvs) >= 2, 'Not enough testing hypervisors found' # Cleanup all leftovers from previous tests or failures. clean_all(self.route_network, self.datacenter_type, VM_HOSTNAME) # Create subject VM object self.vm_obj = Query().new_object('vm') self.vm_obj['backup_disabled'] = True self.vm_obj['disk_size_gib'] = 3 self.vm_obj['environment'] = 'testing' self.vm_obj['hostname'] = VM_HOSTNAME self.vm_obj['hypervisor'] = None self.vm_obj['intern_ip'] = get_next_address(VM_NET, 1) self.vm_obj['memory'] = 2048 self.vm_obj['no_monitoring'] = True self.vm_obj['num_cpu'] = 2 self.vm_obj['os'] = 'buster' self.vm_obj['project'] = 'test' self.vm_obj['puppet_environment'] = None self.vm_obj['puppet_ca'] = 'testing-puppetca.innogames.de' self.vm_obj['puppet_master'] = 'puppet-lb.test.innogames.net' self.vm_obj['repositories'] = [ 'int:basebuster:stable', 'int:innogames:stable', ] self.vm_obj['state'] = 'online' if self.datacenter_type == 'aws.dct': self.vm_obj['aws_image_id'] = 'ami-0e2b90ca04cae8da5' # buster self.vm_obj['aws_instance_type'] = 't2.micro' self.vm_obj['aws_key_name'] = 'eu-central-1-key' self.vm_obj['disk_size_gib'] = 8 self.vm_obj.commit() # It would be enough to create SGs in AWS once but with parallel runs # we can't really test if sync has already been performed. if self.datacenter_type == 'aws.dct': fw_api = api.get('firewall') fw_api.update_config([self.route_network]) self.uid_name = '{}_{}'.format( self.vm_obj['object_id'], self.vm_obj['hostname'], ) # Make sure we can make a fresh build clean_cert(self.vm_obj)
def tearDown(self): """Forcibly remove current test's VM from all HVs""" super().tearDown() clean_cert(self.vm_obj) clean_all(self.route_network, self.datacenter_type, VM_HOSTNAME)