def test_force_delete_inst_before_deferred_cleanup( self, set_recl_inst_interv, instances, volumes): """Force delete of instance before deferred cleanup Actions: 1. Update '/etc/nova/nova.conf' with long 'reclaim_instance_interval' and restart Nova on all nodes; 2. Create net and subnet; 3. Create and run two instances (vm1, vm2) inside same net; 4. Create a volume and attach it to an instance vm1; 5. Delete instance vm1 and check that it's in 'SOFT_DELETE' state; 6. Delete instance vm1 with 'force' option and check that it's not present. 7. Check that volume is released now and has an Available state; 8. Attach the volume to vm2 instance to ensure that the volume's reuse doesn't call any errors. """ timeout = 60 # (sec) timeout to wait instance for status change # Create two vms vm1, vm2 = instances # Create a volume and attach it to an instance vm1 volume = common_functions.create_volume( self.os_conn.cinder, image_id=None) self.os_conn.nova.volumes.create_server_volume( server_id=vm1.id, volume_id=volume.id, device='/dev/vdb') volumes.append(volume) # Delete instance vm1 and check that it's in "SOFT_DELETED" state common_functions.delete_instance(self.os_conn.nova, vm1.id) assert vm1 not in self.os_conn.get_servers() common_functions.wait( lambda: self.os_conn.server_status_is(vm1, 'SOFT_DELETED'), timeout_seconds=timeout, sleep_seconds=5, waiting_for='instance {0} changes status to SOFT_DELETED'.format( vm1.name)) # Force delete and check vm1 not present common_functions.delete_instance(self.os_conn.nova, vm1.id, force=True) common_functions.wait( lambda: self.os_conn.is_server_deleted(vm1.id), timeout_seconds=timeout, sleep_seconds=5, waiting_for='instance {0} to be forced deleted'.format(vm1.name)) # Check that volume is released now and has an Available state assert common_functions.check_volume_status( self.os_conn.cinder, volume.id, 'available', 1) # Check volume is not attached assert self.os_conn.cinder.volumes.get(volume.id).attachments == [] # Attach the volume to vm2 instance self.os_conn.nova.volumes.create_server_volume( server_id=vm2.id, volume_id=volume.id, device='/dev/vdb') # Check volume status is 'in-use' after re-attach assert common_functions.check_volume_status( self.os_conn.cinder, volume.id, 'in-use', 1) # Check that volume has correct server id volume = self.os_conn.cinder.volumes.get(volume.id) assert volume.attachments[0]['server_id'] == vm2.id
def test_restore_deleted_instance( self, set_recl_inst_interv, instances, volumes): """Restore previously deleted instance. Actions: 1. Update '/etc/nova/nova.conf' with 'reclaim_instance_interval=86400' and restart Nova on all nodes; 2. Create net and subnet; 3. Create and run two instances (vm1, vm2) inside same net; 4. Check that ping are successful between vms; 5. Create a volume and attach it to an instance vm1; 6. Delete instance vm1 and check that it's in 'SOFT_DELETE' state; 7. Restore vm1 instance and check that it's in 'ACTIVE' state; 8. Check that ping are successful between vms; """ timeout = 60 # (sec) timeout to wait instance for status change # Create two vms vm1, vm2 = instances # Ping one vm from another vm1_ip = self.os_conn.get_nova_instance_ips(vm1).values()[0] vm2_ip = self.os_conn.get_nova_instance_ips(vm2).values()[0] network_checks.check_ping_from_vm( self.env, self.os_conn, vm1, ip_to_ping=vm2_ip, timeout=60) # Create a volume and attach it to an instance vm1 volume = common_functions.create_volume( self.os_conn.cinder, image_id=None) self.os_conn.nova.volumes.create_server_volume( server_id=vm1.id, volume_id=volume.id, device='/dev/vdb') volumes.append(volume) # Delete instance vm1 and check that it's in "SOFT_DELETED" state common_functions.delete_instance(self.os_conn.nova, vm1.id) assert vm1 not in self.os_conn.get_servers() common_functions.wait( lambda: self.os_conn.server_status_is(vm1, 'SOFT_DELETED'), timeout_seconds=timeout, sleep_seconds=5, waiting_for='instance {0} changes status to SOFT_DELETED'.format( vm1.name)) # Restore vm1 instance and check that it's in "ACTIVE" state now resp = self.os_conn.nova.servers.restore(vm1.id) assert resp[0].ok common_functions.wait( lambda: self.os_conn.is_server_active(vm1.id), timeout_seconds=timeout, sleep_seconds=5, waiting_for='instance {0} changes status to ACTIVE'.format( vm1.name)) # Ping one vm from another network_checks.check_ping_from_vm( self.env, self.os_conn, vm2, ip_to_ping=vm1_ip, timeout=60)
def tearDown(self): for inst in self.instances: common_functions.delete_instance(self.nova, inst) self.instances = [] for fip in self.floating_ips: common_functions.delete_floating_ip(self.nova, fip) self.floating_ips = [] for volume in self.volumes: self.os_conn.delete_volume(volume) self.volumes = [] for flavor in self.flavors: common_functions.delete_flavor(self.nova, flavor.id) self.flavors = [] for key in self.keys: common_functions.delete_keys(self.nova, key.name) self.keys = [] self.os_conn.delete_security_group(self.sec_group)
def tearDown(self): for inst in self.instances: common_functions.delete_instance(self.nova, inst) self.instances = [] for fip in self.floating_ips: common_functions.delete_floating_ip(self.nova, fip) self.floating_ips = [] for volume in self.volumes: common_functions.delete_volume(self.cinder, volume) self.volumes = [] for flavor in self.flavors: common_functions.delete_flavor(self.nova, flavor.id) self.flavors = [] for key in self.keys: common_functions.delete_keys(self.nova, key.name) self.keys = [] self.nova.security_groups.delete(self.sec_group)
def test_nova_launch_v_m_from_volume_with_all_flavours(self): """This test case checks creation of instance from volume with all types of flavor. For this test we need node with compute role: 8 VCPUs, 16+GB RAM and 160+GB disk for any compute Steps: 1. Create bootable volume 1. Create a floating ip 2. Create an instance from an image with some flavor 3. Add the floating ip to the instance 4. Ping the instance by the floating ip 5. Delete the floating ip 6. delete the instance 7. Repeat all steps for all types of flavor """ image_id = [ image.id for image in self.nova.images.list() if image.name == 'TestVM' ][0] networks = self.neutron.list_networks()['networks'] net = [net['id'] for net in networks if not net['router:external']][0] flavor_list = self.nova.flavors.list() volume = common_functions.create_volume(self.cinder, image_id) self.volumes.append(volume) bdm = {'vda': volume.id} for flavor in flavor_list: floating_ip = self.nova.floating_ips.create() self.floating_ips.append(floating_ip) self.assertIn( floating_ip.ip, [fip_info.ip for fip_info in self.nova.floating_ips.list()]) inst = common_functions.create_instance(self.nova, "inst_543360_{}".format( flavor.name), flavor.id, net, [self.sec_group.name], block_device_mapping=bdm, inst_list=self.instances) inst.add_floating_ip(floating_ip.ip) self.assertTrue( common_functions.check_ip(self.nova, inst.id, floating_ip.ip)) ping = common_functions.ping_command(floating_ip.ip) common_functions.delete_instance(self.nova, inst.id) self.assertTrue(ping, "Instance is not reachable")
def tearDown(self): if self.instance is not None: common_functions.delete_instance(self.nova, self.instance.id) if self.image is not None: common_functions.delete_image(self.glance, self.image.id) if self.our_own_flavor_was_created: common_functions.delete_flavor(self.nova, self.expected_flavor_id) # delete the floating ip self.nova.floating_ips.delete(self.floating_ip) # delete the security group self.nova.security_group_rules.delete(self.icmp_rule) self.nova.security_group_rules.delete(self.tcp_rule) self.nova.security_groups.delete(self.the_security_group.id) # delete security rules from the 'default' group self.nova.security_group_rules.delete(self.icmp_rule_default) self.nova.security_group_rules.delete(self.tcp_rule_default) self.assertEqual(self.amount_of_images_before, len(list(self.glance.images.list())), "Length of list with images should be the same")
def test_nova_launch_v_m_from_volume_with_all_flavours(self): """This test case checks creation of instance from volume with all types of flavor. For this test we need node with compute role: 8 VCPUs, 16+GB RAM and 160+GB disk for any compute Steps: 1. Create bootable volume 1. Create a floating ip 2. Create an instance from an image with some flavor 3. Add the floating ip to the instance 4. Ping the instance by the floating ip 5. Delete the floating ip 6. delete the instance 7. Repeat all steps for all types of flavor """ image_id = [image.id for image in self.nova.images.list() if image.name == 'TestVM'][0] networks = self.neutron.list_networks()['networks'] net = [net['id'] for net in networks if not net['router:external']][0] flavor_list = self.nova.flavors.list() volume = common_functions.create_volume(self.cinder, image_id) self.volumes.append(volume) bdm = {'vda': volume.id} for flavor in flavor_list: floating_ip = self.nova.floating_ips.create() self.floating_ips.append(floating_ip) self.assertIn(floating_ip.ip, [fip_info.ip for fip_info in self.nova.floating_ips.list()]) inst = common_functions.create_instance(self.nova, "inst_543360_{}" .format(flavor.name), flavor.id, net, [self.sec_group.name], block_device_mapping=bdm, inst_list=self.instances) inst.add_floating_ip(floating_ip.ip) self.assertTrue(common_functions.check_ip(self.nova, inst.id, floating_ip.ip)) ping = common_functions.ping_command(floating_ip.ip) common_functions.delete_instance(self.nova, inst.id) self.assertTrue(ping, "Instance is not reachable")
def test_dispatch_external_event_inst_not_found(self, instances, os_conn, env): """Dispatch an external event Actions: 1. Create instance with new net and subnet, boot it. 2. Delete created instance; 3. Check in nova-api log that the external event 'network-vif-deleted' has been created for this instance; 4. Check in nova-api log that the 'Dropping event' message appears for previously-deleted instance. """ # Delete instance instance = instances[0] common.delete_instance(os_conn.nova, instance.id) grep_del = ('grep "server_external_events' '.*name.*network-vif-deleted' '.*server_uuid.*{0}" ' '{1}').format(instance.id, self.nova_log) grep_drop = ('grep "server_external_events' '.*Dropping event network-vif-deleted' '.*instance {0}" {1}').format(instance.id, self.nova_log) # run grep on all controllers del_founded = False drop_founded = False controllers = env.get_nodes_by_role('controller') for controller in controllers: with controller.ssh() as remote: out_del = remote.execute(grep_del, verbose=False) out_drop = remote.execute(grep_drop, verbose=False) if out_del.is_ok: del_founded = True if out_drop.is_ok: drop_founded = True # check that grep has found 2 patterns in nova-api log assert del_founded is True and drop_founded is True, ( 'Grep did not found pattern for deletion ' 'and/or for dropping network-vif-deleted in ' 'nova-api.log')
def test_delete_instance_in_resize_state(self): """Delete an instance while it is in resize state Steps: 1. Create a new instance 2. Resize instance from m1.small to m1.tiny 3. Delete the instance immediately after vm_state is 'RESIZE' 4. Check that the instance was successfully deleted 5. Repeat steps 1-4 some times """ name = 'TestVM_857431_instance_to_resize' admin_net = self.get_admin_int_net_id() initial_flavor = self.nova.flavors.find(name='m1.small') resize_flavor = self.nova.flavors.find(name='m1.tiny') image_id = self.nova.images.find(name='TestVM') for _ in range(10): instance = common_functions.create_instance( self.nova, name, initial_flavor, admin_net, [self.sec_group.id], image_id=image_id, inst_list=self.instances) # resize instance instance.resize(resize_flavor) common_functions.wait( lambda: (self.os_conn.server_status_is(instance, 'RESIZE') or self.os_conn.server_status_is(instance, 'VERIFY_RESIZE')), timeout_seconds=2 * 60, waiting_for='instance state is RESIZE or VERIFY_RESIZE') # check that instance can be deleted common_functions.delete_instance(self.nova, instance.id) assert instance not in self.nova.servers.list()
def test_nova_launch_v_m_from_image_with_all_flavours(self): """This test case checks creation of instance from image with all types of flavor. For this test we need node with compute role: 8 VCPUs, 16+GB RAM and 160+GB disk for any compute Steps: 1. Create a floating ip 2. Create an instance from an image with some flavor 3. Add the floating ip to the instance 4. Ping the instance by the floating ip 5. Delete the floating ip 6. delete the instance 7. Repeat all steps for all types of flavor """ net = self.get_admin_int_net_id() image_id = [image.id for image in self.nova.images.list() if image.name == 'TestVM'][0] flavor_list = self.nova.flavors.list() for flavor in flavor_list: floating_ip = self.nova.floating_ips.create() self.floating_ips.append(floating_ip) self.assertIn(floating_ip.ip, [fip_info.ip for fip_info in self.nova.floating_ips.list()]) inst = common_functions.create_instance(self.nova, "inst_543358_{}" .format(flavor.name), flavor.id, net, [self.sec_group.id], image_id=image_id, inst_list=self.instances) inst.add_floating_ip(floating_ip.ip) self.assertTrue(common_functions.check_ip(self.nova, inst.id, floating_ip.ip)) ping = common_functions.ping_command(floating_ip.ip) common_functions.delete_instance(self.nova, inst.id) self.assertTrue(ping, "Instance is not reachable")
def instance(self, request, os_conn, keypair, ubuntu_image_id, flavors, security_group): zone = os_conn.nova.availability_zones.find(zoneName="nova") compute_fqdn = zone.hosts.keys()[0] network = os_conn.int_networks[0] boot_marker = "nova_856599_boot_done" userdata = '\n'.join([ '#!/bin/bash -v', 'apt-get install -y qemu-utils', 'echo {marker}' ]).format(marker=boot_marker) # create instance instance = os_conn.create_server( name='server-test-ubuntu', availability_zone='nova:{}'.format(compute_fqdn), key_name=keypair.name, image_id=ubuntu_image_id, flavor=flavors[0].id, userdata=userdata, nics=[{'net-id': network['id']}], security_groups=[security_group.id], wait_for_active=False, wait_for_avaliable=False) request.addfinalizer( lambda: common_functions.delete_instance(os_conn.nova, instance.id, True)) os_conn.wait_servers_active([instance]) os_conn.wait_marker_in_servers_log([instance], boot_marker) instance.get() return instance
def test_inst_deleted_reclaim_interval_timeout( self, set_recl_inst_interv, instances, volumes): """Check that softly-deleted instance is totally deleted after reclaim interval timeout. Actions: 1. Update '/etc/nova/nova.conf' with short 'reclaim_instance_interval' and restart Nova on all nodes; 2. Create net and subnet; 3. Create and run two instances (vm1, vm2) inside same net; 4. Create a volume and attach it to an instance vm1; 5. Delete instance vm1 and check that it's in 'SOFT_DELETE' state; 6. Wait for the reclaim instance interval to expire and make sure the vm1 is deleted; 7. Check that volume is released now and has an Available state; 8. Attach the volume to vm2 instance to ensure that the volume's reuse doesn't call any errors. ~! BUG !~ https://bugs.launchpad.net/cinder/+bug/1463856 Cinder volume isn't available after instance soft-deleted timer expired while volume is still attached. """ timeout = 60 # (sec) timeout to wait instance for status change # Create two vms vm1, vm2 = instances # Create a volume and attach it to an instance vm1 volume = common_functions.create_volume( self.os_conn.cinder, image_id=None) self.os_conn.nova.volumes.create_server_volume( server_id=vm1.id, volume_id=volume.id, device='/dev/vdb') volumes.append(volume) # Delete instance vm1 and check that it's in "SOFT_DELETED" state common_functions.delete_instance(self.os_conn.nova, vm1.id) assert vm1 not in self.os_conn.get_servers() common_functions.wait( lambda: self.os_conn.server_status_is(vm1, 'SOFT_DELETED'), timeout_seconds=timeout, sleep_seconds=5, waiting_for='instance {0} changes status to SOFT_DELETED'.format( vm1.name)) # Wait interval and check that instance is not present time_to_sleep = 2.5 * self.recl_interv_short logger.debug(('Sleep to wait for 2.5 reclaim_instance_interval ({0})' ).format(time_to_sleep)) sleep(time_to_sleep) try: self.os_conn.get_instance_detail(vm1.id) except Exception as e: assert e.code == 404 else: raise Exception(('Instance {0} not deleted after ' '"reclaim_interval_timeout"').format(vm1.name)) # Update volume information volume = self.os_conn.cinder.volumes.get(volume.id) # ~! BUG !~: https://bugs.launchpad.net/cinder/+bug/1463856 # Check that volume is released now and has an Available state assert volume.status == 'available' # Check volume is not attached assert volume.attachments == [] # Attach the volume to vm2 instance self.os_conn.nova.volumes.create_server_volume( server_id=vm2.id, volume_id=volume.id, device='/dev/vdb') # Check volume status after re-attach assert self.os_conn.cinder.volumes.get(volume.id).status == 'in-use'