def test_delete_resource(self): resource_mock = mock.MagicMock() self.patch_object(openstack_utils, "resource_removed") openstack_utils.delete_resource(resource_mock, 'e01df65a') resource_mock.delete.assert_called_once_with('e01df65a') self.resource_removed.assert_called_once_with(resource_mock, 'e01df65a', 'resource')
def launch_guest(self, guest_name, userdata=None): """Launch two guests to use in tests. Note that it is up to the caller to have set the RESOURCE_PREFIX class variable prior to calling this method. Also note that this method will remove any already existing instance with same name as what is requested. :param guest_name: Name of instance :type guest_name: str :param userdata: Userdata to attach to instance :type userdata: Optional[str] :returns: Nova instance objects :rtype: Server """ instance_name = '{}-{}'.format(self.RESOURCE_PREFIX, guest_name) instance = self.retrieve_guest(instance_name) if instance: logging.info('Removing already existing instance ({}) with ' 'requested name ({})'.format(instance.id, instance_name)) openstack_utils.delete_resource(self.nova_client.servers, instance.id, msg="server") return configure_guest.launch_instance(glance_setup.LTS_IMAGE_NAME, vm_name=instance_name, userdata=userdata)
def tearDown(cls): """Remove test resources.""" logging.info('Running teardown') for server in cls.nova_client.servers.list(): if server.name.startswith(cls.RESOURCE_PREFIX): openstack_utils.delete_resource(cls.nova_client.servers, server.id, msg="server")
def tearDown(cls): """Remove test resources.""" logging.info('Running teardown') for snapshot in cls.cinder_client.volume_snapshots.list(): if snapshot.name.startswith(cls.RESOURCE_PREFIX): openstack_utils.delete_resource( cls.cinder_client.volume_snapshots, snapshot.id, msg="snapshot") for volume in cls.cinder_client.volumes.list(): if volume.name.startswith(cls.RESOURCE_PREFIX): openstack_utils.delete_resource(cls.cinder_client.volumes, volume.id, msg="volume")
def resource_cleanup(self): """Remove test resources.""" try: logging.info('Removing instances launched by test ({}*)' .format(self.RESOURCE_PREFIX)) for server in self.nova_client.servers.list(): if server.name.startswith(self.RESOURCE_PREFIX): openstack_utils.delete_resource( self.nova_client.servers, server.id, msg="server") except AttributeError: # Test did not define self.RESOURCE_PREFIX, ignore. pass
def launch_guest(self, guest_name, userdata=None, use_boot_volume=False, instance_key=None): """Launch two guests to use in tests. Note that it is up to the caller to have set the RESOURCE_PREFIX class variable prior to calling this method. Also note that this method will remove any already existing instance with same name as what is requested. :param guest_name: Name of instance :type guest_name: str :param userdata: Userdata to attach to instance :type userdata: Optional[str] :param use_boot_volume: Whether to boot guest from a shared volume. :type use_boot_volume: boolean :param instance_key: Key to collect associated config data with. :type instance_key: Optional[str] :returns: Nova instance objects :rtype: Server """ instance_key = instance_key or glance_setup.LTS_IMAGE_NAME instance_name = '{}-{}'.format(self.RESOURCE_PREFIX, guest_name) for attempt in tenacity.Retrying(stop=tenacity.stop_after_attempt(3), wait=tenacity.wait_exponential( multiplier=1, min=2, max=10)): with attempt: old_instance_with_same_name = self.retrieve_guest( instance_name) if old_instance_with_same_name: logging.info( 'Removing already existing instance ({}) with ' 'requested name ({})'.format( old_instance_with_same_name.id, instance_name)) openstack_utils.delete_resource( self.nova_client.servers, old_instance_with_same_name.id, msg="server") return configure_guest.launch_instance( instance_key, vm_name=instance_name, use_boot_volume=use_boot_volume, userdata=userdata)
def _remove_volumes(cls, volumes): """Remove volumes passed as param. :param volumes: the volumes to delete :type volumes: List[volume objects] """ for volume in volumes: if volume.name.startswith(cls.RESOURCE_PREFIX): logging.info("removing volume: {}".format(volume.name)) try: openstack_utils.delete_resource(cls.cinder_client.volumes, volume.id, msg="volume") except Exception as e: logging.error("error removing volume: {}".format(str(e))) raise
def _remove_snapshots(cls, snapshots): """Remove snapshots passed as param. :param volumes: the snapshots to delete :type volumes: List[snapshot objects] """ for snapshot in snapshots: if snapshot.name.startswith(cls.RESOURCE_PREFIX): logging.info("removing snapshot: {}".format(snapshot.name)) try: openstack_utils.delete_resource( cls.cinder_client.volume_snapshots, snapshot.id, msg="snapshot") except Exception as e: logging.error("error removing snapshot: {}".format(str(e))) raise
def resource_cleanup(cls): """Remove test resources.""" logging.info('Running teardown') for alarm in cls.aodh_client.alarm.list(): if alarm['name'].startswith(cls.RESOURCE_PREFIX): logging.info('Removing Alarm {}'.format(alarm['name'])) telemetry_utils.delete_alarm( cls.aodh_client, alarm['name'], cache_wait=False) for server in cls.nova_client.servers.list(): if server.name.startswith(cls.RESOURCE_PREFIX): logging.info('Removing server {}'.format(server.name)) openstack_utils.delete_resource( cls.nova_client.servers, server.id, msg="server")
def _remove_amphorae_instances(self): """Remove amphorae instances forcefully. In some situations Octavia is unable to remove load balancer resources. This helper can be used to remove the underlying instances. """ result = self.octavia_client.amphora_list() for amphora in result.get('amphorae', []): for server in self.nova_client.servers.list(): if 'compute_id' in amphora and server.id == amphora[ 'compute_id']: try: openstack_utils.delete_resource( self.nova_client.servers, server.id, msg="server") except AssertionError as e: logging.warning( 'Gave up waiting for resource cleanup: "{}"'. format(str(e)))
def resource_cleanup(self): """Remove test resources.""" try: logging.info('Removing instances launched by test ({}*)'.format( self.RESOURCE_PREFIX)) for server in self.nova_client.servers.list(): if server.name.startswith(self.RESOURCE_PREFIX): openstack_utils.delete_resource(self.nova_client.servers, server.id, msg="server") except AssertionError as e: # Resource failed to be removed within the expected time frame, # log this fact and carry on. logging.warning( 'Gave up waiting for resource cleanup: "{}"'.format(str(e))) except AttributeError: # Test did not define self.RESOURCE_PREFIX, ignore. pass
def test_snapshot_workload(self): """Ensure that a workload can be created and snapshot'ed.""" # Setup volume and instance and attach one to the other volume = openstack_utils.create_volume( self.cinder_client, size="1", name="{}-100-vol".format(self.RESOURCE_PREFIX), ) instance = guest_utils.launch_instance( glance_setup.CIRROS_IMAGE_NAME, vm_name="{}-server".format(self.RESOURCE_PREFIX), ) # Trilio need direct access to ceph - OMG openstack_utils.attach_volume(self.nova_client, volume.id, instance.id) workloadmgrcli = WorkloadmgrCLIHelper(self.keystone_client) # Create workload using instance logging.info("Creating workload configuration") workload_id = workloadmgrcli.create_workload(instance.id) logging.info("Created workload: {}".format(workload_id)) logging.info("Initiating snapshot") snapshot_id = workloadmgrcli.create_snapshot(workload_id) logging.info("Snapshot of workload {} created: {}".format( workload_id, snapshot_id)) logging.info("Deleting server and volume ready for restore") openstack_utils.delete_resource(self.nova_client.servers, instance.id, "deleting instance") # NOTE: Trilio leaves a snapshot in place - # drop before volume deletion. for (volume_snapshot) in self.cinder_client.volume_snapshots.list(): openstack_utils.delete_resource( self.cinder_client.volume_snapshots, volume_snapshot.id, "deleting snapshot", ) openstack_utils.delete_resource(self.cinder_client.volumes, volume.id, "deleting volume") logging.info("Initiating restore") workloadmgrcli.oneclick_restore(snapshot_id)
def test_410_heat_stack_create_delete(self): """Create stack, confirm nova compute resource, delete stack.""" # Verify new image name images_list = list(self.glance_client.images.list()) self.assertEqual(images_list[0].name, IMAGE_NAME, "glance image create failed or unexpected") # Create a heat stack from a heat template, verify its status logging.info('Creating heat stack...') t_name = 'hot_hello_world.yaml' if (openstack_utils.get_os_release() < openstack_utils.get_os_release('xenial_queens')): os_release = 'icehouse' else: os_release = 'queens' # Get location of template files in charm-heat bundle_path = charm_lifecycle_utils.BUNDLE_DIR if bundle_path[-1:] == "/": bundle_path = bundle_path[0:-1] file_rel_path = os.path.join(os.path.dirname(bundle_path), TEMPLATES_PATH, os_release, t_name) file_abs_path = os.path.abspath(file_rel_path) t_url = urlparse.urlparse(file_abs_path, scheme='file').geturl() logging.info('template url: {}'.format(t_url)) r_req = self.heat_client.http_client t_files, template = template_utils.get_template_contents(t_url, r_req) env_files, env = template_utils.process_environment_and_files( env_path=None) fields = { 'stack_name': STACK_NAME, 'timeout_mins': '15', 'disable_rollback': False, 'parameters': { 'admin_pass': '******', 'key_name': nova_utils.KEYPAIR_NAME, 'image': IMAGE_NAME }, 'template': template, 'files': dict(list(t_files.items()) + list(env_files.items())), 'environment': env } # Create the stack try: stack = self.heat_client.stacks.create(**fields) logging.info('Stack data: {}'.format(stack)) stack_id = stack['stack']['id'] logging.info('Creating new stack, ID: {}'.format(stack_id)) except Exception as e: # Generally, an api or cloud config error if this is hit. msg = 'Failed to create heat stack: {}'.format(e) self.fail(msg) # Confirm stack reaches COMPLETE status. # /!\ Heat stacks reach a COMPLETE status even when nova cannot # find resources (a valid hypervisor) to fit the instance, in # which case the heat stack self-deletes! Confirm anyway... openstack_utils.resource_reaches_status(self.heat_client.stacks, stack_id, expected_status="COMPLETE", msg="Stack status wait") # List stack stacks = list(self.heat_client.stacks.list()) logging.info('All stacks: {}'.format(stacks)) # Get stack information try: stack = self.heat_client.stacks.get(STACK_NAME) except Exception as e: # Generally, a resource availability issue if this is hit. msg = 'Failed to get heat stack: {}'.format(e) self.fail(msg) # Confirm stack name. logging.info('Expected, actual stack name: {}, ' '{}'.format(STACK_NAME, stack.stack_name)) self.assertEqual( stack.stack_name, STACK_NAME, 'Stack name mismatch, ' '{} != {}'.format(STACK_NAME, stack.stack_name)) # Confirm existence of a heat-generated nova compute resource logging.info('Confirming heat stack resource status...') resource = self.heat_client.resources.get(STACK_NAME, RESOURCE_TYPE) server_id = resource.physical_resource_id self.assertTrue(server_id, "Stack failed to spawn a compute resource.") # Confirm nova instance reaches ACTIVE status openstack_utils.resource_reaches_status(self.nova_client.servers, server_id, expected_status="ACTIVE", msg="nova instance") logging.info('Nova instance reached ACTIVE status') # Delete stack logging.info('Deleting heat stack...') openstack_utils.delete_resource(self.heat_client.stacks, STACK_NAME, msg="heat stack")