def wait_for_zone_status(client, zone_id, status, headers=None): """Waits for a zone to reach given status.""" LOG.info('Waiting for zone %s to reach %s', zone_id, status) _, zone = client.show_zone(zone_id, headers=headers) start = int(time.time()) while zone['status'] != status: time.sleep(client.build_interval) _, zone = client.show_zone(zone_id, headers=headers) status_curr = zone['status'] if status_curr == status: LOG.info('Zone %s reached %s', zone_id, status) return if zone['status'] == const.ERROR: raise exceptions.InvalidStatusError('Zone', zone_id, zone['status']) if int(time.time()) - start >= client.build_timeout: message = ('Zone %(zone_id)s failed to reach status=%(status)s ' 'within the required time (%(timeout)s s). Current ' 'status: %(status_curr)s' % {'zone_id': zone_id, 'status': status, 'status_curr': status_curr, 'timeout': client.build_timeout}) caller = test_utils.find_test_caller() if caller: message = '(%s) %s' % (caller, message) raise lib_exc.TimeoutException(message)
def _log_request(self, method, req_url, resp, secs="", req_headers=None, req_body=None, resp_body=None): if req_headers is None: req_headers = {} # if we have the request id, put it in the right part of the log extra = dict(request_id=self._get_request_id(resp)) # NOTE(sdague): while we still have 6 callers to this function # we're going to just provide work around on who is actually # providing timings by gracefully adding no content if they don't. # Once we're down to 1 caller, clean this up. caller_name = test_utils.find_test_caller() if secs: secs = " %.3fs" % secs self.LOG.info( 'Request (%s): %s %s %s%s', caller_name, resp['status'], method, req_url, secs, extra=extra) # Also look everything at DEBUG if you want to filter this # out, don't run at debug. if self.LOG.isEnabledFor(logging.DEBUG): self._log_request_full(resp, req_headers, req_body, resp_body, extra)
def wrapper(self, *args, **kwargs): try: return function(self, *args, **kwargs) except tempest.lib.exceptions.SSHTimeout: try: original_exception = sys.exc_info() caller = test_utils.find_test_caller() or "not found" if self.server: msg = 'Caller: %s. Timeout trying to ssh to server %s' LOG.debug(msg, caller, self.server) if self.log_console and self.servers_client: try: msg = 'Console log for server %s: %s' console_log = ( self.servers_client.get_console_output( self.server['id'])['output']) LOG.debug(msg, self.server['id'], console_log) except Exception: msg = 'Could not get console_log for server %s' LOG.debug(msg, self.server['id']) # re-raise the original ssh timeout exception six.reraise(*original_exception) finally: # Delete the traceback to avoid circular references _, _, trace = original_exception del trace
def wait_for_ptr_status(client, fip_id, status): """Waits for a PTR associated with FIP to reach given status.""" LOG.info('Waiting for PTR %s to reach %s', fip_id, status) ptr = client.show_ptr_record(fip_id) start = int(time.time()) while ptr['status'] != status: time.sleep(client.build_interval) ptr = client.show_ptr_record(fip_id) status_curr = ptr['status'] if status_curr == status: LOG.info('PTR %s reached %s', fip_id, status) return if ptr['status'] == const.ERROR: raise exceptions.InvalidStatusError('PTR', fip_id, ptr['status']) if int(time.time()) - start >= client.build_timeout: message = ('PTR for FIP: %(fip_id)s failed to reach ' 'status=%(status)s within the required time ' '(%(timeout)s s). Current status: %(status_curr)s' % {'fip_id': fip_id, 'status': status, 'status_curr': status_curr, 'timeout': client.build_timeout}) caller = test_utils.find_test_caller() if caller: message = '(%s) %s' % (caller, message) raise lib_exc.TimeoutException(message)
def wrapper(self, *args, **kwargs): try: return function(self, *args, **kwargs) except Exception as e: caller = test_utils.find_test_caller() or "not found" if not isinstance(e, tempest.lib.exceptions.SSHTimeout): message = ('Initializing SSH connection to %(ip)s failed. ' 'Error: %(error)s' % {'ip': self.ip_address, 'error': e}) message = '(%s) %s' % (caller, message) LOG.error(message) raise else: try: original_exception = sys.exc_info() if self.server: msg = 'Caller: %s. Timeout trying to ssh to server %s' LOG.debug(msg, caller, self.server) if self.console_output_enabled and self.servers_client: try: msg = 'Console log for server %s: %s' console_log = ( self.servers_client.get_console_output( self.server['id'])['output']) LOG.debug(msg, self.server['id'], console_log) except Exception: msg = 'Could not get console_log for server %s' LOG.debug(msg, self.server['id']) # re-raise the original ssh timeout exception six.reraise(*original_exception) finally: # Delete the traceback to avoid circular references _, _, trace = original_exception del trace
def wait_for_resource_deletion(self, id, *args, **kwargs): """Waits for a resource to be deleted This method will loop over is_resource_deleted until either is_resource_deleted returns True or the build timeout is reached. This depends on is_resource_deleted being implemented :param str id: The id of the resource to check :raises TimeoutException: If the build_timeout has elapsed and the resource still hasn't been deleted """ start_time = int(time.time()) while True: if self.is_resource_deleted(id, *args, **kwargs): return if int(time.time()) - start_time >= self.build_timeout: message = ('Failed to delete %(resource_type)s %(id)s within ' 'the required time (%(timeout)s s). Timer started ' 'at %(start_time)s. Timer ended at %(end_time)s' 'waited for %(wait_time)s' % {'resource_type': self.resource_type, 'id': id, 'timeout': self.build_timeout, 'start_time': start_time, 'end_time': int(time.time()), 'wait_time': int(time.time()) - start_time}) caller = test_utils.find_test_caller() if caller: message = '(%s) %s' % (caller, message) raise exceptions.TimeoutException(message) time.sleep(self.build_interval)
def wait_for_resource_activation(self, id): """Waits for a resource to become active This method will loop over is_resource_active until either is_resource_active returns True or the build timeout is reached. This depends on is_resource_active being implemented :param str id: The id of the resource to check :raises TimeoutException: If the build_timeout has elapsed and the resource still hasn't been active """ start_time = int(time.time()) while True: if self.is_resource_active(id): return if int(time.time()) - start_time >= self.build_timeout: message = ('Failed to reach active state %(resource_type)s ' '%(id)s within the required time (%(timeout)s s).' % {'resource_type': self.resource_type, 'id': id, 'timeout': self.build_timeout}) caller = test_utils.find_test_caller() if caller: message = '(%s) %s' % (caller, message) raise exceptions.TimeoutException(message) time.sleep(self.build_interval)
def wait_for_image_copied_to_stores(client, image_id): """Waits for an image to be copied on all requested stores. The client should also have build_interval and build_timeout attributes. This return the list of stores where copy is failed. """ start = int(time.time()) store_left = [] while int(time.time()) - start < client.build_timeout: image = client.show_image(image_id) store_left = image.get('os_glance_importing_to_stores') # NOTE(danms): If os_glance_importing_to_stores is None, then # we've raced with the startup of the task and should continue # to wait. if store_left is not None and not store_left: return image['os_glance_failed_import'] if image['status'].lower() == 'killed': raise exceptions.ImageKilledException(image_id=image_id, status=image['status']) time.sleep(client.build_interval) message = ('Image %s failed to finish the copy operation ' 'on stores: %s' % (image_id, str(store_left))) caller = test_utils.find_test_caller() if caller: message = '(%s) %s' % (caller, message) raise lib_exc.TimeoutException(message)
def wrapper(self, *args, **kwargs): try: return function(self, *args, **kwargs) except Exception as e: caller = test_utils.find_test_caller() or "not found" if not isinstance(e, tempest.lib.exceptions.SSHTimeout): message = ('Executing command on %(ip)s failed. ' 'Error: %(error)s' % {'ip': self.ip_address, 'error': e}) message = '(%s) %s' % (caller, message) LOG.error(message) raise else: try: original_exception = sys.exc_info() if self.server: msg = 'Caller: %s. Timeout trying to ssh to server %s' LOG.debug(msg, caller, self.server) if self.console_output_enabled and self.servers_client: try: msg = 'Console log for server %s: %s' console_log = ( self.servers_client.get_console_output( self.server['id'])['output']) LOG.debug(msg, self.server['id'], console_log) except Exception: msg = 'Could not get console_log for server %s' LOG.debug(msg, self.server['id']) # raise the original ssh timeout exception raise finally: # Delete the traceback to avoid circular references _, _, trace = original_exception del trace
def wait_for_resource_deletion(self, id): """Waits for a resource to be deleted This method will loop over is_resource_deleted until either is_resource_deleted returns True or the build timeout is reached. This depends on is_resource_deleted being implemented :param str id: The id of the resource to check :raises TimeoutException: If the build_timeout has elapsed and the resource still hasn't been deleted """ start_time = int(time.time()) while True: if self.is_resource_deleted(id): return if int(time.time()) - start_time >= self.build_timeout: message = ('Failed to delete %(resource_type)s %(id)s within ' 'the required time (%(timeout)s s).' % {'resource_type': self.resource_type, 'id': id, 'timeout': self.build_timeout}) caller = test_utils.find_test_caller() if caller: message = '(%s) %s' % (caller, message) raise exceptions.TimeoutException(message) time.sleep(self.build_interval)
def _log_request(self, method, req_url, resp, secs="", req_headers=None, req_body=None, resp_body=None): if req_headers is None: req_headers = {} # if we have the request id, put it in the right part of the log extra = dict(request_id=self._get_request_id(resp)) # NOTE(sdague): while we still have 6 callers to this function # we're going to just provide work around on who is actually # providing timings by gracefully adding no content if they don't. # Once we're down to 1 caller, clean this up. caller_name = test_utils.find_test_caller() if secs: secs = " %.3fs" % secs self.LOG.info( 'Request (%s): %s %s %s%s' % ( caller_name, resp['status'], method, req_url, secs), extra=extra) # Also look everything at DEBUG if you want to filter this # out, don't run at debug. if self.LOG.isEnabledFor(real_logging.DEBUG): self._log_request_full(method, req_url, resp, secs, req_headers, req_body, resp_body, caller_name, extra)
def wait_for_bm_node_status(client, node_id, attr, status): """Waits for a baremetal node attribute to reach given status. The client should have a show_node(node_uuid) method to get the node. """ _, node = client.show_node(node_id) start = int(time.time()) while node[attr] != status: time.sleep(client.build_interval) _, node = client.show_node(node_id) status_curr = node[attr] if status_curr == status: return if int(time.time()) - start >= client.build_timeout: message = ('Node %(node_id)s failed to reach %(attr)s=%(status)s ' 'within the required time (%(timeout)s s).' % { 'node_id': node_id, 'attr': attr, 'status': status, 'timeout': client.build_timeout }) message += ' Current state of %s: %s.' % (attr, status_curr) caller = test_utils.find_test_caller() if caller: message = '(%s) %s' % (caller, message) raise lib_exc.TimeoutException(message)
def wait_for_bm_node_status(client, node_id, attr, status): """Waits for a baremetal node attribute to reach given status. The client should have a show_node(node_uuid) method to get the node. """ _, node = client.show_node(node_id) start = int(time.time()) while node[attr] != status: time.sleep(client.build_interval) _, node = client.show_node(node_id) status_curr = node[attr] if status_curr == status: return if int(time.time()) - start >= client.build_timeout: message = ('Node %(node_id)s failed to reach %(attr)s=%(status)s ' 'within the required time (%(timeout)s s).' % {'node_id': node_id, 'attr': attr, 'status': status, 'timeout': client.build_timeout}) message += ' Current state of %s: %s.' % (attr, status_curr) caller = test_utils.find_test_caller() if caller: message = '(%s) %s' % (caller, message) raise exceptions.TimeoutException(message)
def wait_for_zone_404(client, zone_id): """Waits for a zone to 404.""" LOG.info('Waiting for zone %s to 404', zone_id) start = int(time.time()) while True: time.sleep(client.build_interval) try: _, zone = client.show_zone(zone_id) except lib_exc.NotFound: LOG.info('Zone %s is 404ing', zone_id) return if int(time.time()) - start >= client.build_timeout: message = ('Zone %(zone_id)s failed to 404 within the required ' 'time (%(timeout)s s). Current status: ' '%(status_curr)s' % {'zone_id': zone_id, 'status_curr': zone['status'], 'timeout': client.build_timeout}) caller = test_utils.find_test_caller() if caller: message = '(%s) %s' % (caller, message) raise lib_exc.TimeoutException(message)
def wait_for_zone_import_status(client, zone_import_id, status): """Waits for an imported zone to reach the given status.""" LOG.info('Waiting for zone import %s to reach %s', zone_import_id, status) _, zone_import = client.show_zone_import(zone_import_id) start = int(time.time()) while zone_import['status'] != status: time.sleep(client.build_interval) _, zone_import = client.show_zone_import(zone_import_id) status_curr = zone_import['status'] if status_curr == status: LOG.info('Zone import %s reached %s', zone_import_id, status) return if int(time.time()) - start >= client.build_timeout: message = ('Zone import %(zone_import_id)s failed to reach ' 'status=%(status)s within the required time ' '(%(timeout)s s). Current ' 'status: %(status_curr)s' % {'zone_import_id': zone_import_id, 'status': status, 'status_curr': status_curr, 'timeout': client.build_timeout}) caller = test_utils.find_test_caller() if caller: message = '(%s) %s' % (caller, message) raise lib_exc.TimeoutException(message)
def wait_for_bm_node_status(client, node_id, attr, status, timeout=None, interval=None, abort_on_error_state=False): """Waits for a baremetal node attribute to reach given status. :param client: an instance of tempest plugin BaremetalClient. :param node_id: identifier of the node. :param attr: node's API-visible attribute to check status of. :param status: desired status. Can be a list of statuses. :param timeout: the timeout after which the check is considered as failed. Defaults to client.build_timeout. :param interval: an interval between show_node calls for status check. Defaults to client.build_interval. :param abort_on_error_state: whether to abort waiting if the node reaches an error state. The client should have a show_node(node_id) method to get the node. """ timeout, interval = _determine_and_check_timeout_interval( timeout, client.build_timeout, interval, client.build_interval) if not isinstance(status, list): status = [status] def is_attr_in_status(): node = utils.get_node(client, node_id=node_id) if node[attr] in status: return True elif (abort_on_error_state and node['provision_state'].endswith(' failed')): msg = ('Node %(node)s reached failure state %(state)s while ' 'waiting for %(attr)s=%(expected)s. ' 'Error: %(error)s' % { 'node': node_id, 'state': node['provision_state'], 'attr': attr, 'expected': status, 'error': node.get('last_error') }) LOG.debug(msg) raise lib_exc.TempestException(msg) return False if not test_utils.call_until_true(is_attr_in_status, timeout, interval): message = ('Node %(node_id)s failed to reach %(attr)s=%(status)s ' 'within the required time (%(timeout)s s).' % { 'node_id': node_id, 'attr': attr, 'status': status, 'timeout': timeout }) caller = test_utils.find_test_caller() if caller: message = '(%s) %s' % (caller, message) LOG.debug(message) raise lib_exc.TimeoutException(message)
def wait_for_zone_404(client, zone_id): """Waits for a zone to 404.""" LOG.info('Waiting for zone %s to 404', zone_id) start = int(time.time()) while True: time.sleep(client.build_interval) try: _, zone = client.show_zone(zone_id) except lib_exc.NotFound: LOG.info('Zone %s is 404ing', zone_id) return if int(time.time()) - start >= client.build_timeout: message = ('Zone %(zone_id)s failed to 404 within the required ' 'time (%(timeout)s s). Current status: ' '%(status_curr)s' % { 'zone_id': zone_id, 'status_curr': zone['status'], 'timeout': client.build_timeout }) caller = test_utils.find_test_caller() if caller: message = '(%s) %s' % (caller, message) raise lib_exc.TimeoutException(message)
def wait_for_image_imported_to_stores(client, image_id, stores=None): """Waits for an image to be imported to all requested stores. Short circuits to fail if the serer reports failure of any store. If stores is None, just wait for status==active. The client should also have build_interval and build_timeout attributes. """ exc_cls = lib_exc.TimeoutException start = int(time.time()) while int(time.time()) - start < client.build_timeout: image = client.show_image(image_id) if image['status'] == 'active' and (stores is None or image['stores'] == stores): return if image.get('os_glance_failed_import'): exc_cls = lib_exc.OtherRestClientException break time.sleep(client.build_interval) message = ('Image %s failed to import on stores: %s' % (image_id, str(image.get('os_glance_failed_import')))) caller = test_utils.find_test_caller() if caller: message = '(%s) %s' % (caller, message) raise exc_cls(message)
def wait_for_zone_import_status(client, zone_import_id, status): """Waits for an imported zone to reach the given status.""" LOG.info('Waiting for zone import %s to reach %s', zone_import_id, status) _, zone_import = client.show_zone_import(zone_import_id) start = int(time.time()) while zone_import['status'] != status: time.sleep(client.build_interval) _, zone_import = client.show_zone_import(zone_import_id) status_curr = zone_import['status'] if status_curr == status: LOG.info('Zone import %s reached %s', zone_import_id, status) return if int(time.time()) - start >= client.build_timeout: message = ('Zone import %(zone_import_id)s failed to reach ' 'status=%(status)s within the required time ' '(%(timeout)s s). Current ' 'status: %(status_curr)s' % { 'zone_import_id': zone_import_id, 'status': status, 'status_curr': status_curr, 'timeout': client.build_timeout }) caller = test_utils.find_test_caller() if caller: message = '(%s) %s' % (caller, message) raise lib_exc.TimeoutException(message)
def get_tenant_network(creds_provider, compute_networks_client, shared_network_name): """Get a network usable by the primary tenant :param creds_provider: instance of credential provider :param compute_networks_client: compute network client. We want to have the compute network client so we can have use a common approach for both neutron and nova-network cases. If this is not an admin network client, set_network_kwargs might fail in case fixed_network_name is the network to be used, and it's not visible to the tenant :param shared_network_name: name of the shared network to be used if no tenant network is available in the creds provider :returns: a dict with 'id' and 'name' of the network """ caller = test_utils.find_test_caller() net_creds = creds_provider.get_primary_creds() network = getattr(net_creds, 'network', None) if not network or not network.get('name'): if shared_network_name: msg = ('No valid network provided or created, defaulting to ' 'fixed_network_name') if caller: msg = '(%s) %s' % (caller, msg) LOG.debug(msg) try: network = get_network_from_name(shared_network_name, compute_networks_client) except exceptions.InvalidTestResource: network = {} msg = ('Found network %s available for tenant' % network) if caller: msg = '(%s) %s' % (caller, msg) LOG.info(msg) return network
def log_request(self, request_kwargs, response, user_name): test_name = test_utils.find_test_caller() str_request = self.stringify_request(request_kwargs, response) if user_name is None: user_info = '' else: user_info = "(user={0})".format(user_name) LOG.info('Request %s (%s)\n %s', user_info, test_name, str_request)
def _log_request_start(self, method, req_url, req_headers=None, req_body=None): if req_headers is None: req_headers = {} caller_name = test_utils.find_test_caller() if self.trace_requests and re.search(self.trace_requests, caller_name): self.LOG.debug('Starting Request (%s): %s %s' % (caller_name, method, req_url))
def wait_for_deleted_status_or_not_found(show_client, id, status_key, check_interval, check_timeout, root_tag=None, **kwargs): """Waits for an object to reach a DELETED status or be not found (404). :param show_client: The tempest service client show method. Ex. cls.os_primary.servers_client.show_server :param id: The id of the object to query. :param status_key: The key of the status field in the response. Ex. provisioning_status :check_interval: How often to check the status, in seconds. :check_timeout: The maximum time, in seconds, to check the status. :root_tag: The root tag on the response to remove, if any. :raises CommandFailed: Raised if the object goes into ERROR and ERROR was not the desired status. :raises TimeoutException: The object did not achieve the status or ERROR in the check_timeout period. :returns: None """ start = int(time.time()) LOG.info('Waiting for {name} status to update to DELETED or be not ' 'found(404)'.format(name=show_client.__name__)) while True: try: response = show_client(id, **kwargs) except exceptions.NotFound: return if root_tag: object_details = response[root_tag] else: object_details = response if object_details[status_key] == const.DELETED: LOG.info('{name}\'s status updated to DELETED.'.format( name=show_client.__name__)) return elif int(time.time()) - start >= check_timeout: message = ( '{name} {field} failed to update to DELETED or become not ' 'found (404) within the required time {timeout}. Current ' 'status of {name}: {status}'.format( name=show_client.__name__, timeout=check_timeout, status=object_details[status_key], field=status_key)) caller = test_utils.find_test_caller() if caller: message = '({caller}) {message}'.format(caller=caller, message=message) raise exceptions.TimeoutException(message) time.sleep(check_interval)
def resource_cleanup(cls): """Deletes any auto_allocated_network and it's associated resources.""" # Find the auto-allocated router for the tenant. # This is a bit hacky since we don't have a great way to find the # auto-allocated router given the private tenant network we have. routers = cls.routers_client.list_routers().get('routers', []) if len(routers) > 1: # This indicates a race where nova is concurrently calling the # neutron auto-allocated-topology API for multiple server builds # at the same time (it's called from nova-compute when setting up # networking for a server). Neutron will detect duplicates and # automatically clean them up, but there is a window where the API # can return multiple and we don't have a good way to filter those # out right now, so we'll just handle them. LOG.info('(%s) Found more than one router for tenant.', test_utils.find_test_caller()) # Remove any networks, duplicate or otherwise, that these tests # created. All such networks will be in the current tenant. Neutron # will cleanup duplicate resources automatically, so ignore 404s. search_opts = {'tenant_id': cls.networks_client.tenant_id} networks = cls.networks_client.list_networks(**search_opts).get( 'networks', []) for router in routers: # Disassociate the subnets from the router. Because of the race # mentioned above the subnets might not be associated with the # router so ignore any 404. for network in networks: for subnet_id in network['subnets']: test_utils.call_and_ignore_notfound_exc( cls.routers_client.remove_router_interface, router['id'], subnet_id=subnet_id) # Delete the router. cls.routers_client.delete_router(router['id']) for network in networks: # Get and delete the ports for the given network. ports = cls.ports_client.list_ports(network_id=network['id']).get( 'ports', []) for port in ports: test_utils.call_and_ignore_notfound_exc( cls.ports_client.delete_port, port['id']) # Delete the subnets. for subnet_id in network['subnets']: test_utils.call_and_ignore_notfound_exc( cls.subnets_client.delete_subnet, subnet_id) # Delete the network. test_utils.call_and_ignore_notfound_exc( cls.networks_client.delete_network, network['id']) super(AutoAllocateNetworkTest, cls).resource_cleanup()
def resource_cleanup(cls): """Deletes any auto_allocated_network and it's associated resources.""" # Find the auto-allocated router for the tenant. # This is a bit hacky since we don't have a great way to find the # auto-allocated router given the private tenant network we have. routers = cls.routers_client.list_routers().get('routers', []) if len(routers) > 1: # This indicates a race where nova is concurrently calling the # neutron auto-allocated-topology API for multiple server builds # at the same time (it's called from nova-compute when setting up # networking for a server). Neutron will detect duplicates and # automatically clean them up, but there is a window where the API # can return multiple and we don't have a good way to filter those # out right now, so we'll just handle them. LOG.info('(%s) Found more than one router for tenant.', test_utils.find_test_caller()) # Remove any networks, duplicate or otherwise, that these tests # created. All such networks will be in the current tenant. Neutron # will cleanup duplicate resources automatically, so ignore 404s. search_opts = {'tenant_id': cls.networks_client.tenant_id} networks = cls.networks_client.list_networks( **search_opts).get('networks', []) for router in routers: # Disassociate the subnets from the router. Because of the race # mentioned above the subnets might not be associated with the # router so ignore any 404. for network in networks: for subnet_id in network['subnets']: test_utils.call_and_ignore_notfound_exc( cls.routers_client.remove_router_interface, router['id'], subnet_id=subnet_id) # Delete the router. cls.routers_client.delete_router(router['id']) for network in networks: # Get and delete the ports for the given network. ports = cls.ports_client.list_ports( network_id=network['id']).get('ports', []) for port in ports: test_utils.call_and_ignore_notfound_exc( cls.ports_client.delete_port, port['id']) # Delete the subnets. for subnet_id in network['subnets']: test_utils.call_and_ignore_notfound_exc( cls.subnets_client.delete_subnet, subnet_id) # Delete the network. test_utils.call_and_ignore_notfound_exc( cls.networks_client.delete_network, network['id']) super(AutoAllocateNetworkTest, cls).resource_cleanup()
def get_network_from_name(name, compute_networks_client): """Get a full network dict from just a network name :param str name: the name of the network to use :param NetworksClient compute_networks_client: The network client object to use for making the network lists api request :return: The full dictionary for the network in question :rtype: dict :raises InvalidTestResource: If the name provided is invalid, the networks list returns a 404, there are no found networks, or the found network is invalid """ caller = test_utils.find_test_caller() if not name: raise exceptions.InvalidTestResource(type='network', name=name) networks = compute_networks_client.list_networks()['networks'] # NOTE(zhufl) compute networks_client uses 'label' as network name field, # while neutron networks_client uses 'name' as network name field. try: networks = [n for n in networks if n['label'] == name] except KeyError: networks = [n for n in networks if n['name'] == name] # Check that a network exists, else raise an InvalidConfigurationException if len(networks) == 1: network = sorted(networks)[0] elif len(networks) > 1: msg = ("Network with name: %s had multiple matching networks in the " "list response: %s\n Unable to specify a single network" % ( name, networks)) if caller: msg = '(%s) %s' % (caller, msg) LOG.warning(msg) raise exceptions.InvalidTestResource(type='network', name=name) else: msg = "Network with name: %s not found" % name if caller: msg = '(%s) %s' % (caller, msg) LOG.warning(msg) raise exceptions.InvalidTestResource(type='network', name=name) # To be consistent between neutron and nova network always use name even # if label is used in the api response. If neither is present than then # the returned network is invalid. name = network.get('name') or network.get('label') if not name: msg = "Network found from list doesn't contain a valid name or label" if caller: msg = '(%s) %s' % (caller, msg) LOG.warning(msg) raise exceptions.InvalidTestResource(type='network', name=name) network['name'] = name return network
def get_network_from_name(name, compute_networks_client): """Get a full network dict from just a network name :param str name: the name of the network to use :param NetworksClient compute_networks_client: The network client object to use for making the network lists api request :return: The full dictionary for the network in question :rtype: dict :raises InvalidTestResource: If the name provided is invalid, the networks list returns a 404, there are no found networks, or the found network is invalid """ caller = test_utils.find_test_caller() if not name: raise exceptions.InvalidTestResource(type='network', name=name) networks = compute_networks_client.list_networks()['networks'] # NOTE(zhufl) compute networks_client uses 'label' as network name field, # while neutron networks_client uses 'name' as network name field. try: networks = [n for n in networks if n['label'] == name] except KeyError: networks = [n for n in networks if n['name'] == name] # Check that a network exists, else raise an InvalidConfigurationException if len(networks) == 1: network = sorted(networks)[0] elif len(networks) > 1: msg = ("Network with name: %s had multiple matching networks in the " "list response: %s\n Unable to specify a single network" % (name, networks)) if caller: msg = '(%s) %s' % (caller, msg) LOG.warning(msg) raise exceptions.InvalidTestResource(type='network', name=name) else: msg = "Network with name: %s not found" % name if caller: msg = '(%s) %s' % (caller, msg) LOG.warning(msg) raise exceptions.InvalidTestResource(type='network', name=name) # To be consistent between neutron and nova network always use name even # if label is used in the api response. If neither is present than then # the returned network is invalid. name = network.get('name') or network.get('label') if not name: msg = "Network found from list doesn't contain a valid name or label" if caller: msg = '(%s) %s' % (caller, msg) LOG.warning(msg) raise exceptions.InvalidTestResource(type='network', name=name) network['name'] = name return network
def ping_ip_address(self, ip_address, should_succeed=True, ping_timeout=None, mtu=None): # the code is taken from tempest/scenario/manager.py in tempest git timeout = ping_timeout or CONF.validation.ping_timeout cmd = ['ping', '-c1', '-w1'] if mtu: cmd += [ # don't fragment '-M', 'do', # ping receives just the size of ICMP payload '-s', str(net_utils.get_ping_payload_size(mtu, 4)) ] cmd.append(ip_address) def ping(): proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) proc.communicate() return (proc.returncode == 0) == should_succeed caller = test_utils.find_test_caller() LOG.debug( '%(caller)s begins to ping %(ip)s in %(timeout)s sec and the' ' expected result is %(should_succeed)s', { 'caller': caller, 'ip': ip_address, 'timeout': timeout, 'should_succeed': 'reachable' if should_succeed else 'unreachable' }) result = test_utils.call_until_true(ping, timeout, 1) # To make sure ping_ip_address called by test works # as expected. self.assertTrue(result) LOG.debug( '%(caller)s finishes ping %(ip)s in %(timeout)s sec and the ' 'ping result is %(result)s', { 'caller': caller, 'ip': ip_address, 'timeout': timeout, 'result': 'expected' if result else 'unexpected' }) return result
def _wait(): try: res = cls.client.get_resource("instances", id) cur_status = res["instance"]["status"] except exceptions.NotFound: if need_delete or "DELETED" in expected_status: LOG.info('Instance %s is deleted', id) raise loopingcall.LoopingCallDone() return if cur_status in expected_status: LOG.info('Instance %s becomes %s', id, cur_status) raise loopingcall.LoopingCallDone() elif "ERROR" not in expected_status and cur_status == "ERROR": # If instance status goes to ERROR but is not expected, stop # waiting res = cls.admin_client.get_resource("instances", id) LOG.info(f'Instance fault msg: {res["instance"].get("fault")}') # Show trove-guestagent log for debug purpose. # Only admin user is able to publish and show the trove guest # agent log. Make sure the container is deleted after fetching # the log. try: LOG.info(f"Publishing guest log for instance {id}") cls.publish_log(id, 'guest') LOG.info(f"Getting guest log content for instance {id}") log_gen = cls.log_generator(id, 'guest', lines=0) log_content = "".join([chunk for chunk in log_gen()]) LOG.info( f"\n=============================================\n" f"Trove guest agent log for instance {id}\n" f"=============================================") LOG.info(log_content) except Exception as err: LOG.warning(f"Failed to get guest log for instance {id}, " f"error: {str(err)}") finally: # Remove the swift container of database logs. LOG.info(f"Deleting swift container " f"{CONF.database.database_log_container}") cls.delete_swift_containers( cls.admin_container_client, cls.admin_object_client, CONF.database.database_log_container) message = "Instance status is ERROR." caller = test_utils.find_test_caller() if caller: message = '({caller}) {message}'.format(caller=caller, message=message) raise exceptions.UnexpectedResponseCode(message)
def wait_for_backup_status(cls, id, expected_status=["COMPLETED"], need_delete=False): def _wait(): try: res = cls.client.get_resource("backups", id) cur_status = res["backup"]["status"] except exceptions.NotFound: if need_delete or "DELETED" in expected_status: LOG.info('Backup %s is deleted', id) raise loopingcall.LoopingCallDone() return if cur_status in expected_status: LOG.info('Backup %s becomes %s', id, cur_status) raise loopingcall.LoopingCallDone() elif "FAILED" not in expected_status and cur_status == "FAILED": # If backup status goes to FAILED but is not expected, stop # waiting message = "Backup status is FAILED." caller = test_utils.find_test_caller() if caller: message = '({caller}) {message}'.format(caller=caller, message=message) raise exceptions.UnexpectedResponseCode(message) if type(expected_status) != list: expected_status = [expected_status] if need_delete: # If resource already removed, return try: cls.client.get_resource("backups", id) except exceptions.NotFound: LOG.info('Backup %s not found', id) return LOG.info(f"Deleting backup {id}") cls.delete_backup(id, ignore_notfound=True) timer = loopingcall.FixedIntervalWithTimeoutLoopingCall(_wait) try: timer.start(interval=10, timeout=CONF.database.backup_wait_timeout).wait() except loopingcall.LoopingCallTimeOut: message = ("Backup %s is not in the expected status: %s" % (id, expected_status)) caller = test_utils.find_test_caller() if caller: message = '({caller}) {message}'.format(caller=caller, message=message) raise exceptions.TimeoutException(message)
def wait_for_image_status(client, image_id, status): """Waits for an image to reach a given status. The client should have a show_image(image_id) method to get the image. The client should also have build_interval and build_timeout attributes. """ if isinstance(client, images_v1_client.ImagesClient): # The 'check_image' method is used here because the show_image method # returns image details plus the image itself which is very expensive. # The 'check_image' method returns just image details. def _show_image_v1(image_id): resp = client.check_image(image_id) return common_image.get_image_meta_from_headers(resp) show_image = _show_image_v1 else: show_image = client.show_image current_status = 'An unknown status' start = int(time.time()) while int(time.time()) - start < client.build_timeout: image = show_image(image_id) # Compute image client returns response wrapped in 'image' element # which is not the case with Glance image client. if 'image' in image: image = image['image'] current_status = image['status'] if current_status == status: return if current_status.lower() == 'killed': raise exceptions.ImageKilledException(image_id=image_id, status=status) if current_status.lower() == 'error': raise exceptions.AddImageException(image_id=image_id) time.sleep(client.build_interval) message = ('Image %(image_id)s failed to reach %(status)s state ' '(current state %(current_status)s) within the required ' 'time (%(timeout)s s).' % { 'image_id': image_id, 'status': status, 'current_status': current_status, 'timeout': client.build_timeout }) caller = test_utils.find_test_caller() if caller: message = '(%s) %s' % (caller, message) raise lib_exc.TimeoutException(message)
def ping_ip_address(self, ip_address, should_succeed=True, ping_timeout=None, mtu=None, server=None): timeout = ping_timeout or CONF.validation.ping_timeout cmd = ['ping', '-c1', '-w1'] if mtu: cmd += [ # don't fragment '-M', 'do', # ping receives just the size of ICMP payload '-s', str(net_utils.get_ping_payload_size(mtu, 4)) ] cmd.append(ip_address) def ping(): proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) proc.communicate() return (proc.returncode == 0) == should_succeed caller = test_utils.find_test_caller() LOG.debug( '%(caller)s begins to ping %(ip)s in %(timeout)s sec and the' ' expected result is %(should_succeed)s', { 'caller': caller, 'ip': ip_address, 'timeout': timeout, 'should_succeed': 'reachable' if should_succeed else 'unreachable' }) result = test_utils.call_until_true(ping, timeout, 1) LOG.debug( '%(caller)s finishes ping %(ip)s in %(timeout)s sec and the ' 'ping result is %(result)s', { 'caller': caller, 'ip': ip_address, 'timeout': timeout, 'result': 'expected' if result else 'unexpected' }) if server: self._log_console_output([server]) return result
def wait_for_bm_node_status(client, node_id, attr, status, timeout=None, interval=None): """Waits for a baremetal node attribute to reach given status. :param client: an instance of tempest plugin BaremetalClient. :param node_id: identifier of the node. :param attr: node's API-visible attribute to check status of. :param status: desired status. :param timeout: the timeout after which the check is considered as failed. Defaults to client.build_timeout. :param interval: an interval between show_node calls for status check. Defaults to client.build_interval. The client should have a show_node(node_id) method to get the node. """ if timeout is None: timeout = client.build_timeout if interval is None: interval = client.build_interval if timeout < 0 or interval < 0: raise lib_exc.InvalidConfiguration( 'timeout and interval should be >= 0 or None, current values are: ' '%(timeout)s, %(interval)s respectively.' % dict(timeout=timeout, interval=interval) ) start = int(time.time()) _, node = client.show_node(node_id) while node[attr] != status: status_curr = node[attr] if status_curr == status: return if int(time.time()) - start >= timeout: message = ('Node %(node_id)s failed to reach %(attr)s=%(status)s ' 'within the required time (%(timeout)s s).' % {'node_id': node_id, 'attr': attr, 'status': status, 'timeout': client.build_timeout}) message += ' Current state of %s: %s.' % (attr, status_curr) caller = test_utils.find_test_caller() if caller: message = '(%s) %s' % (caller, message) raise lib_exc.TimeoutException(message) time.sleep(interval) _, node = client.show_node(node_id)
def wait_for_image_status(client, image_id, status): """Waits for an image to reach a given status. The client should have a show_image(image_id) method to get the image. The client should also have build_interval and build_timeout attributes. """ if isinstance(client, images_v1_client.ImagesClient): # The 'check_image' method is used here because the show_image method # returns image details plus the image itself which is very expensive. # The 'check_image' method returns just image details. def _show_image_v1(image_id): resp = client.check_image(image_id) return common_image.get_image_meta_from_headers(resp) show_image = _show_image_v1 else: show_image = client.show_image current_status = 'An unknown status' start = int(time.time()) while int(time.time()) - start < client.build_timeout: image = show_image(image_id) # Compute image client returns response wrapped in 'image' element # which is not the case with Glance image client. if 'image' in image: image = image['image'] current_status = image['status'] if current_status == status: return if current_status.lower() == 'killed': raise exceptions.ImageKilledException(image_id=image_id, status=status) if current_status.lower() == 'error': raise exceptions.AddImageException(image_id=image_id) time.sleep(client.build_interval) message = ('Image %(image_id)s failed to reach %(status)s state ' '(current state %(current_status)s) within the required ' 'time (%(timeout)s s).' % {'image_id': image_id, 'status': status, 'current_status': current_status, 'timeout': client.build_timeout}) caller = test_utils.find_test_caller() if caller: message = '(%s) %s' % (caller, message) raise exceptions.TimeoutException(message)
def _get_remote_client(self, ip_address, username=None, private_key=None, use_password=False): """Get a SSH client to a remote server @param ip_address the server floating or fixed IP address to use for ssh validation @param username name of the Linux account on the remote server @param private_key the SSH private key to use @return a RemoteClient object """ if username is None: username = CONF.validation.image_ssh_user # Set this with 'keypair' or others to log in with keypair or # username/password. if use_password: password = CONF.validation.image_ssh_password private_key = None else: password = None if private_key is None: private_key = self.keypair['private_key'] linux_client = remote_client.RemoteClient(ip_address, username, pkey=private_key, password=password) try: linux_client.validate_authentication() except Exception as e: message = ('Initializing SSH connection to %(ip)s failed. ' 'Error: %(error)s' % { 'ip': ip_address, 'error': e }) caller = test_utils.find_test_caller() if caller: message = '(%s) %s' % (caller, message) LOG.exception(message) self._log_console_output() raise return linux_client
def wait_for_query(client, name, rdatatype, found=True): """Query nameservers until the record of the given name and type is found. :param client: A QueryClient :param name: The record name for which to query :param rdatatype: The record type for which to query :param found: If True, wait until the record is found. Else, wait until the record disappears. """ state = "found" if found else "removed" LOG.info("Waiting for record %s of type %s to be %s on nameservers %s", name, rdatatype, state, client.nameservers) start = int(time.time()) while True: time.sleep(client.build_interval) responses = client.query(name, rdatatype) if found: all_answers_good = all(r.answer for r in responses) else: all_answers_good = all(not r.answer for r in responses) if not client.nameservers or all_answers_good: LOG.info( "Record %s of type %s was successfully %s on nameservers " "%s", name, rdatatype, state, client.nameservers) return if int(time.time()) - start >= client.build_timeout: message = ('Record %(name)s of type %(rdatatype)s not %(state)s ' 'on nameservers %(nameservers)s within the required ' 'time (%(timeout)s s)' % { 'name': name, 'rdatatype': rdatatype, 'state': state, 'nameservers': client.nameservers, 'timeout': client.build_timeout }) caller = test_utils.find_test_caller() if caller: message = "(%s) %s" % (caller, message) raise lib_exc.TimeoutException(message)
def wait_for_bm_node_status(client, node_id, attr, status, timeout=None, interval=None): """Waits for a baremetal node attribute to reach given status. :param client: an instance of tempest plugin BaremetalClient. :param node_id: identifier of the node. :param attr: node's API-visible attribute to check status of. :param status: desired status. Can be a list of statuses. :param timeout: the timeout after which the check is considered as failed. Defaults to client.build_timeout. :param interval: an interval between show_node calls for status check. Defaults to client.build_interval. The client should have a show_node(node_id) method to get the node. """ timeout, interval = _determine_and_check_timeout_interval( timeout, client.build_timeout, interval, client.build_interval) if not isinstance(status, list): status = [status] def is_attr_in_status(): node = utils.get_node(client, node_id=node_id) if node[attr] in status: return True return False if not test_utils.call_until_true(is_attr_in_status, timeout, interval): message = ('Node %(node_id)s failed to reach %(attr)s=%(status)s ' 'within the required time (%(timeout)s s).' % { 'node_id': node_id, 'attr': attr, 'status': status, 'timeout': timeout }) caller = test_utils.find_test_caller() if caller: message = '(%s) %s' % (caller, message) raise lib_exc.TimeoutException(message)
def wait_for_query(client, name, rdatatype, found=True): """Query nameservers until the record of the given name and type is found. :param client: A QueryClient :param name: The record name for which to query :param rdatatype: The record type for which to query :param found: If True, wait until the record is found. Else, wait until the record disappears. """ state = "found" if found else "removed" LOG.info("Waiting for record %s of type %s to be %s on nameservers %s", name, rdatatype, state, client.nameservers) start = int(time.time()) while True: time.sleep(client.build_interval) responses = client.query(name, rdatatype) if found: all_answers_good = all(r.answer for r in responses) else: all_answers_good = all(not r.answer for r in responses) if not client.nameservers or all_answers_good: LOG.info("Record %s of type %s was successfully %s on nameservers " "%s", name, rdatatype, state, client.nameservers) return if int(time.time()) - start >= client.build_timeout: message = ('Record %(name)s of type %(rdatatype)s not %(state)s ' 'on nameservers %(nameservers)s within the required ' 'time (%(timeout)s s)' % {'name': name, 'rdatatype': rdatatype, 'state': state, 'nameservers': client.nameservers, 'timeout': client.build_timeout}) caller = test_utils.find_test_caller() if caller: message = "(%s) %s" % (caller, message) raise lib_exc.TimeoutException(message)
def ping_ip_address(self, ip_address, should_succeed=True, ping_timeout=None, mtu=None): timeout = ping_timeout or CONF.validation.ping_timeout cmd = ['ping', '-c1', '-w1'] if mtu: cmd += [ # don't fragment '-M', 'do', # ping receives just the size of ICMP payload '-s', str(net_utils.get_ping_payload_size(mtu, 4)) ] cmd.append(ip_address) def ping(): proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) proc.communicate() return (proc.returncode == 0) == should_succeed caller = test_utils.find_test_caller() LOG.debug("{caller} begins to ping {ip} in {timeout} sec and the" " expected result is {should_succeed}".format( caller=caller, ip=ip_address, timeout=timeout, should_succeed=('reachable' if should_succeed else 'unreachable'))) result = test_utils.call_until_true(ping, timeout, 1) LOG.debug("{caller} finishes ping {ip} in {timeout} sec and the " "ping result is {result}".format( caller=caller, ip=ip_address, timeout=timeout, result='expected' if result else 'unexpected')) return result
def get_remote_client(self, ip_address, username=None, private_key=None): """Get a SSH client to a remote server @param ip_address the server floating or fixed IP address to use for ssh validation @param username name of the Linux account on the remote server @param private_key the SSH private key to use @return a RemoteClient object """ if username is None: username = CONF.validation.image_ssh_user # Set this with 'keypair' or others to log in with keypair or # username/password. if CONF.validation.auth_method == 'keypair': password = None if private_key is None: private_key = self.keypair['private_key'] else: password = CONF.validation.image_ssh_password private_key = None linux_client = remote_client.RemoteClient(ip_address, username, pkey=private_key, password=password) try: linux_client.validate_authentication() except Exception as e: message = ("Initializing SSH connection to {ip} failed. " "Error: {error}" .format(ip=ip_address, error=e)) caller = test_utils.find_test_caller() if caller: message = '(%s) %s' % (caller, message) LOG.exception(message) self._log_console_output() raise return linux_client
def _wait(): try: res = cls.client.get_resource("backups", id) cur_status = res["backup"]["status"] except exceptions.NotFound: if need_delete or "DELETED" in expected_status: LOG.info('Backup %s is deleted', id) raise loopingcall.LoopingCallDone() return if cur_status in expected_status: LOG.info('Backup %s becomes %s', id, cur_status) raise loopingcall.LoopingCallDone() elif "FAILED" not in expected_status and cur_status == "FAILED": # If backup status goes to FAILED but is not expected, stop # waiting message = "Backup status is FAILED." caller = test_utils.find_test_caller() if caller: message = '({caller}) {message}'.format(caller=caller, message=message) raise exceptions.UnexpectedResponseCode(message)
def wait_for_bm_node_status(client, node_id, attr, status, timeout=None, interval=None): """Waits for a baremetal node attribute to reach given status. :param client: an instance of tempest plugin BaremetalClient. :param node_id: identifier of the node. :param attr: node's API-visible attribute to check status of. :param status: desired status. Can be a list of statuses. :param timeout: the timeout after which the check is considered as failed. Defaults to client.build_timeout. :param interval: an interval between show_node calls for status check. Defaults to client.build_interval. The client should have a show_node(node_id) method to get the node. """ timeout, interval = _determine_and_check_timeout_interval( timeout, client.build_timeout, interval, client.build_interval) if not isinstance(status, list): status = [status] def is_attr_in_status(): node = utils.get_node(client, node_id=node_id) if node[attr] in status: return True return False if not test_utils.call_until_true(is_attr_in_status, timeout, interval): message = ('Node %(node_id)s failed to reach %(attr)s=%(status)s ' 'within the required time (%(timeout)s s).' % {'node_id': node_id, 'attr': attr, 'status': status, 'timeout': timeout}) caller = test_utils.find_test_caller() if caller: message = '(%s) %s' % (caller, message) raise lib_exc.TimeoutException(message)
def ping_ip_address(self, ip_address, should_succeed=True, ping_timeout=None, mtu=None): timeout = ping_timeout or CONF.validation.ping_timeout cmd = ['ping', '-c1', '-w1'] if mtu: cmd += [ # don't fragment '-M', 'do', # ping receives just the size of ICMP payload '-s', str(net_utils.get_ping_payload_size(mtu, 4)) ] cmd.append(ip_address) def ping(): proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) proc.communicate() return (proc.returncode == 0) == should_succeed caller = test_utils.find_test_caller() LOG.debug("{caller} begins to ping {ip} in {timeout} sec and the" " expected result is {should_succeed}" .format(caller=caller, ip=ip_address, timeout=timeout, should_succeed=('reachable' if should_succeed else 'unreachable'))) result = test_utils.call_until_true(ping, timeout, 1) LOG.debug("{caller} finishes ping {ip} in {timeout} sec and the " "ping result is {result}" .format(caller=caller, ip=ip_address, timeout=timeout, result='expected' if result else 'unexpected')) return result
def ping_ip_address(self, ip_address, should_succeed=True, ping_timeout=None, mtu=None): # the code is taken from tempest/scenario/manager.py in tempest git timeout = ping_timeout or CONF.validation.ping_timeout cmd = ['ping', '-c1', '-w1'] if mtu: cmd += [ # don't fragment '-M', 'do', # ping receives just the size of ICMP payload '-s', str(net_utils.get_ping_payload_size(mtu, 4)) ] cmd.append(ip_address) def ping(): proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) proc.communicate() return (proc.returncode == 0) == should_succeed caller = test_utils.find_test_caller() LOG.debug('%(caller)s begins to ping %(ip)s in %(timeout)s sec and the' ' expected result is %(should_succeed)s', { 'caller': caller, 'ip': ip_address, 'timeout': timeout, 'should_succeed': 'reachable' if should_succeed else 'unreachable' }) result = test_utils.call_until_true(ping, timeout, 1) LOG.debug('%(caller)s finishes ping %(ip)s in %(timeout)s sec and the ' 'ping result is %(result)s', { 'caller': caller, 'ip': ip_address, 'timeout': timeout, 'result': 'expected' if result else 'unexpected' }) return result
def wait_for_resource_status(self, fetch, status, interval=None, timeout=None): """ @summary: Waits for a network resource to reach a status @param fetch: the callable to be used to query the resource status @type fecth: callable that takes no parameters and returns the resource @param status: the status that the resource has to reach @type status: String @param interval: the number of seconds to wait between each status query @type interval: Integer @param timeout: the maximum number of seconds to wait for the resource to reach the desired status @type timeout: Integer """ if not interval: interval = self.build_interval if not timeout: timeout = self.build_timeout start_time = time.time() while time.time() - start_time <= timeout: resource = fetch() if resource['status'] == status: return time.sleep(interval) # At this point, the wait has timed out message = 'Resource %s' % (str(resource)) message += ' failed to reach status %s' % status message += ' (current: %s)' % resource['status'] message += ' within the required time %s' % timeout caller = test_utils.find_test_caller() if caller: message = '(%s) %s' % (caller, message) raise lib_exc.TimeoutException(message)
def _log_request_start(self, method, req_url): caller_name = test_utils.find_test_caller() if self.trace_requests and re.search(self.trace_requests, caller_name): self.LOG.debug('Starting Request (%s): %s %s', caller_name, method, req_url)
def test_find_test_caller_test_case(self): # Calling it from here should give us the method we're in. self.assertEqual('TestTestUtils:test_find_test_caller_test_case', test_utils.find_test_caller())
def setUp(): return test_utils.find_test_caller()
def setUpClass(cls): # noqa return test_utils.find_test_caller()
def tearDown(): return test_utils.find_test_caller()
def tearDownClass(cls): # noqa return test_utils.find_test_caller()
def wait_for_server_status(client, server_id, status, ready_wait=True, extra_timeout=0, raise_on_error=True): """Waits for a server to reach a given status.""" def _get_task_state(body): return body.get('OS-EXT-STS:task_state', None) # NOTE(afazekas): UNKNOWN status possible on ERROR # or in a very early stage. body = client.show_server(server_id)['server'] old_status = server_status = body['status'] old_task_state = task_state = _get_task_state(body) start_time = int(time.time()) timeout = client.build_timeout + extra_timeout while True: # NOTE(afazekas): Now the BUILD status only reached # between the UNKNOWN->ACTIVE transition. # TODO(afazekas): enumerate and validate the stable status set if status == 'BUILD' and server_status != 'UNKNOWN': return if server_status == status: if ready_wait: if status == 'BUILD': return # NOTE(afazekas): The instance is in "ready for action state" # when no task in progress if task_state is None: # without state api extension 3 sec usually enough time.sleep(CONF.compute.ready_wait) return else: return time.sleep(client.build_interval) body = client.show_server(server_id)['server'] server_status = body['status'] task_state = _get_task_state(body) if (server_status != old_status) or (task_state != old_task_state): LOG.info('State transition "%s" ==> "%s" after %d second wait', '/'.join((old_status, str(old_task_state))), '/'.join((server_status, str(task_state))), time.time() - start_time) if (server_status == 'ERROR') and raise_on_error: if 'fault' in body: raise exceptions.BuildErrorException(body['fault'], server_id=server_id) else: raise exceptions.BuildErrorException(server_id=server_id) timed_out = int(time.time()) - start_time >= timeout if timed_out: expected_task_state = 'None' if ready_wait else 'n/a' message = ('Server %(server_id)s failed to reach %(status)s ' 'status and task state "%(expected_task_state)s" ' 'within the required time (%(timeout)s s).' % {'server_id': server_id, 'status': status, 'expected_task_state': expected_task_state, 'timeout': timeout}) message += ' Current status: %s.' % server_status message += ' Current task state: %s.' % task_state caller = test_utils.find_test_caller() if caller: message = '(%s) %s' % (caller, message) raise exceptions.TimeoutException(message) old_status = server_status old_task_state = task_state
def find_test_caller(*args, **kwargs): LOG.warning("tempest.lib.common.utils.misc.find_test_caller is deprecated " "in favor of tempest.lib.common.utils.test_utils." "find_test_caller") test_utils.find_test_caller(*args, **kwargs)