def __init__(self, parse_conf=True): """Initialize a configuration from a conf directory and conf file.""" super(TempestConfigPrivate, self).__init__() config_files = [] failsafe_path = "/etc/tempest/" + self.DEFAULT_CONFIG_FILE # Environment variables override defaults... conf_dir = os.environ.get('TEMPEST_CONFIG_DIR', self.DEFAULT_CONFIG_DIR) conf_file = os.environ.get('TEMPEST_CONFIG', self.DEFAULT_CONFIG_FILE) path = os.path.join(conf_dir, conf_file) if not os.path.isfile(path): path = failsafe_path # only parse the config file if we expect one to exist. This is needed # to remove an issue with the config file up to date checker. if parse_conf: config_files.append(path) cfg.CONF([], project='tempest', default_config_files=config_files) logging.setup('tempest') LOG = logging.getLogger('tempest') LOG.info("Using tempest config file %s" % path) register_opts() self._set_attrs() if parse_conf: cfg.CONF.log_opt_values(LOG, std_logging.DEBUG)
def __init__(self): """Initialize a configuration from a conf directory and conf file.""" config_files = [] failsafe_path = "/etc/tempest/" + self.DEFAULT_CONFIG_FILE # Environment variables override defaults... conf_dir = os.environ.get('TEMPEST_CONFIG_DIR', self.DEFAULT_CONFIG_DIR) conf_file = os.environ.get('TEMPEST_CONFIG', self.DEFAULT_CONFIG_FILE) path = os.path.join(conf_dir, conf_file) if not (os.path.isfile(path) or 'TEMPEST_CONFIG_DIR' in os.environ or 'TEMPEST_CONFIG' in os.environ): path = failsafe_path if not os.path.exists(path): msg = "Config file %s not found" % path print(RuntimeError(msg), file=sys.stderr) else: config_files.append(path) cfg.CONF([], project='tempest', default_config_files=config_files) logging.setup('tempest') LOG = logging.getLogger('tempest') LOG.info("Using tempest config file %s" % path) register_compute_opts(cfg.CONF) register_identity_opts(cfg.CONF) register_image_opts(cfg.CONF) register_network_opts(cfg.CONF) register_volume_opts(cfg.CONF) register_object_storage_opts(cfg.CONF) register_orchestration_opts(cfg.CONF) register_dashboard_opts(cfg.CONF) register_boto_opts(cfg.CONF) register_compute_admin_opts(cfg.CONF) register_stress_opts(cfg.CONF) register_scenario_opts(cfg.CONF) register_service_available_opts(cfg.CONF) register_debug_opts(cfg.CONF) self.compute = cfg.CONF.compute self.identity = cfg.CONF.identity self.images = cfg.CONF.image self.network = cfg.CONF.network self.volume = cfg.CONF.volume self.object_storage = cfg.CONF['object-storage'] self.orchestration = cfg.CONF.orchestration self.dashboard = cfg.CONF.dashboard self.boto = cfg.CONF.boto self.compute_admin = cfg.CONF['compute-admin'] self.stress = cfg.CONF.stress self.scenario = cfg.CONF.scenario self.service_available = cfg.CONF.service_available self.debug = cfg.CONF.debug if not self.compute_admin.username: self.compute_admin.username = self.identity.admin_username self.compute_admin.password = self.identity.admin_password self.compute_admin.tenant_name = self.identity.admin_tenant_name
def setUp(self, **kwargs): method = kwargs['test_method'].split('.') self.test_method = method.pop() self.klass = importutils.import_class('.'.join(method)) self.logger = logging.getLogger('.'.join(method)) # valid options are 'process', 'application' , 'action' self.class_setup_per = kwargs.get('class_setup_per', SetUpClassRunTime.process) SetUpClassRunTime.validate(self.class_setup_per) if self.class_setup_per == SetUpClassRunTime.application: self.klass.setUpClass() self.setupclass_called = False
def decision_maker(): A_I_IMAGES_READY = True # ari,ami,aki S3_CAN_CONNECT_ERROR = None EC2_CAN_CONNECT_ERROR = None secret_matcher = re.compile("[A-Za-z0-9+/]{32,}") # 40 in other system id_matcher = re.compile("[A-Za-z0-9]{20,}") def all_read(*args): return all(map(have_effective_read_access, args)) config = tempest.config.TempestConfig() materials_path = config.boto.s3_materials_path ami_path = materials_path + os.sep + config.boto.ami_manifest aki_path = materials_path + os.sep + config.boto.aki_manifest ari_path = materials_path + os.sep + config.boto.ari_manifest A_I_IMAGES_READY = all_read(ami_path, aki_path, ari_path) boto_logger = logging.getLogger('boto') level = boto_logger.logger.level boto_logger.logger.setLevel(orig_logging.CRITICAL) # suppress logging # for these def _cred_sub_check(connection_data): if not id_matcher.match(connection_data["aws_access_key_id"]): raise Exception("Invalid AWS access Key") if not secret_matcher.match(connection_data["aws_secret_access_key"]): raise Exception("Invalid AWS secret Key") raise Exception("Unknown (Authentication?) Error") openstack = tempest.clients.Manager() try: if urlparse.urlparse(config.boto.ec2_url).hostname is None: raise Exception("Failed to get hostname from the ec2_url") ec2client = openstack.ec2api_client try: ec2client.get_all_regions() except exception.BotoServerError as exc: if exc.error_code is None: raise Exception("EC2 target does not looks EC2 service") _cred_sub_check(ec2client.connection_data) except keystoneclient.exceptions.Unauthorized: EC2_CAN_CONNECT_ERROR = "AWS credentials not set," +\ " faild to get them even by keystoneclient" except Exception as exc: EC2_CAN_CONNECT_ERROR = str(exc) try: if urlparse.urlparse(config.boto.s3_url).hostname is None: raise Exception("Failed to get hostname from the s3_url") s3client = openstack.s3_client try: s3client.get_bucket("^INVALID*#()@INVALID.") except exception.BotoServerError as exc: if exc.status == 403: _cred_sub_check(s3client.connection_data) except Exception as exc: S3_CAN_CONNECT_ERROR = str(exc) except keystoneclient.exceptions.Unauthorized: S3_CAN_CONNECT_ERROR = "AWS credentials not set," +\ " faild to get them even by keystoneclient" boto_logger.logger.setLevel(level) return {'A_I_IMAGES_READY': A_I_IMAGES_READY, 'S3_CAN_CONNECT_ERROR': S3_CAN_CONNECT_ERROR, 'EC2_CAN_CONNECT_ERROR': EC2_CAN_CONNECT_ERROR}
def setup_logging(): global LOG logging.setup(__name__) LOG = logging.getLogger(__name__)
def __init__(self): """Initialize a configuration from a conf directory and conf file.""" config_files = [] failsafe_path = "/etc/tempest/" + self.DEFAULT_CONFIG_FILE # Environment variables override defaults... conf_dir = os.environ.get('TEMPEST_CONFIG_DIR', self.DEFAULT_CONFIG_DIR) conf_file = os.environ.get('TEMPEST_CONFIG', self.DEFAULT_CONFIG_FILE) path = os.path.join(conf_dir, conf_file) if not (os.path.isfile(path) or 'TEMPEST_CONFIG_DIR' in os.environ or 'TEMPEST_CONFIG' in os.environ): path = failsafe_path if not os.path.exists(path): msg = "Config file %s not found" % path print(RuntimeError(msg), file=sys.stderr) else: config_files.append(path) cfg.CONF([], project='tempest', default_config_files=config_files) logging.setup('tempest') LOG = logging.getLogger('tempest') LOG.info("Using tempest config file %s" % path) register_opt_group(cfg.CONF, compute_group, ComputeGroup) register_opt_group(cfg.CONF, compute_features_group, ComputeFeaturesGroup) register_opt_group(cfg.CONF, identity_group, IdentityGroup) register_opt_group(cfg.CONF, image_group, ImageGroup) register_opt_group(cfg.CONF, image_feature_group, ImageFeaturesGroup) register_opt_group(cfg.CONF, network_group, NetworkGroup) register_opt_group(cfg.CONF, network_feature_group, NetworkFeaturesGroup) register_opt_group(cfg.CONF, volume_group, VolumeGroup) register_opt_group(cfg.CONF, volume_feature_group, VolumeFeaturesGroup) register_opt_group(cfg.CONF, object_storage_group, ObjectStoreGroup) register_opt_group(cfg.CONF, object_storage_feature_group, ObjectStoreFeaturesGroup) register_opt_group(cfg.CONF, orchestration_group, OrchestrationGroup) register_opt_group(cfg.CONF, dashboard_group, DashboardGroup) register_opt_group(cfg.CONF, boto_group, BotoGroup) register_opt_group(cfg.CONF, compute_admin_group, ComputeAdminGroup) register_opt_group(cfg.CONF, stress_group, StressGroup) register_opt_group(cfg.CONF, scenario_group, ScenarioGroup) register_opt_group(cfg.CONF, service_available_group, ServiceAvailableGroup) register_opt_group(cfg.CONF, debug_group, DebugGroup) self.compute = cfg.CONF.compute self.compute_feature_enabled = cfg.CONF['compute-feature-enabled'] self.identity = cfg.CONF.identity self.images = cfg.CONF.image self.image_feature_enabled = cfg.CONF['image-feature-enabled'] self.network = cfg.CONF.network self.network_feature_enabled = cfg.CONF['network-feature-enabled'] self.volume = cfg.CONF.volume self.volume_feature_enabled = cfg.CONF['volume-feature-enabled'] self.object_storage = cfg.CONF['object-storage'] self.object_storage_feature_enabled = cfg.CONF[ 'object-storage-feature-enabled'] self.orchestration = cfg.CONF.orchestration self.dashboard = cfg.CONF.dashboard self.boto = cfg.CONF.boto self.compute_admin = cfg.CONF['compute-admin'] self.stress = cfg.CONF.stress self.scenario = cfg.CONF.scenario self.service_available = cfg.CONF.service_available self.debug = cfg.CONF.debug if not self.compute_admin.username: self.compute_admin.username = self.identity.admin_username self.compute_admin.password = self.identity.admin_password self.compute_admin.tenant_name = self.identity.admin_tenant_name
def __init__(self, parse_conf=True): """Initialize a configuration from a conf directory and conf file.""" super(TempestConfigPrivate, self).__init__() config_files = [] failsafe_path = "/etc/tempest/" + self.DEFAULT_CONFIG_FILE # Environment variables override defaults... conf_dir = os.environ.get('TEMPEST_CONFIG_DIR', self.DEFAULT_CONFIG_DIR) conf_file = os.environ.get('TEMPEST_CONFIG', self.DEFAULT_CONFIG_FILE) path = os.path.join(conf_dir, conf_file) if not os.path.isfile(path): path = failsafe_path # only parse the config file if we expect one to exist. This is needed # to remove an issue with the config file up to date checker. if parse_conf: config_files.append(path) cfg.CONF([], project='tempest', default_config_files=config_files) logging.setup('tempest') LOG = logging.getLogger('tempest') LOG.info("Using tempest config file %s" % path) register_opt_group(cfg.CONF, compute_group, ComputeGroup) register_opt_group(cfg.CONF, compute_features_group, ComputeFeaturesGroup) register_opt_group(cfg.CONF, identity_group, IdentityGroup) register_opt_group(cfg.CONF, identity_feature_group, IdentityFeatureGroup) register_opt_group(cfg.CONF, image_group, ImageGroup) register_opt_group(cfg.CONF, image_feature_group, ImageFeaturesGroup) register_opt_group(cfg.CONF, network_group, NetworkGroup) register_opt_group(cfg.CONF, network_feature_group, NetworkFeaturesGroup) register_opt_group(cfg.CONF, volume_group, VolumeGroup) register_opt_group(cfg.CONF, volume_feature_group, VolumeFeaturesGroup) register_opt_group(cfg.CONF, object_storage_group, ObjectStoreGroup) register_opt_group(cfg.CONF, object_storage_feature_group, ObjectStoreFeaturesGroup) register_opt_group(cfg.CONF, orchestration_group, OrchestrationGroup) register_opt_group(cfg.CONF, telemetry_group, TelemetryGroup) register_opt_group(cfg.CONF, dashboard_group, DashboardGroup) register_opt_group(cfg.CONF, data_processing_group, DataProcessingGroup) register_opt_group(cfg.CONF, boto_group, BotoGroup) register_opt_group(cfg.CONF, compute_admin_group, ComputeAdminGroup) register_opt_group(cfg.CONF, stress_group, StressGroup) register_opt_group(cfg.CONF, scenario_group, ScenarioGroup) register_opt_group(cfg.CONF, service_available_group, ServiceAvailableGroup) register_opt_group(cfg.CONF, debug_group, DebugGroup) register_opt_group(cfg.CONF, baremetal_group, BaremetalGroup) register_opt_group(cfg.CONF, input_scenario_group, InputScenarioGroup) register_opt_group(cfg.CONF, cli_group, CLIGroup) self.compute = cfg.CONF.compute self.compute_feature_enabled = cfg.CONF['compute-feature-enabled'] self.identity = cfg.CONF.identity self.identity_feature_enabled = cfg.CONF['identity-feature-enabled'] self.images = cfg.CONF.image self.image_feature_enabled = cfg.CONF['image-feature-enabled'] self.network = cfg.CONF.network self.network_feature_enabled = cfg.CONF['network-feature-enabled'] self.volume = cfg.CONF.volume self.volume_feature_enabled = cfg.CONF['volume-feature-enabled'] self.object_storage = cfg.CONF['object-storage'] self.object_storage_feature_enabled = cfg.CONF[ 'object-storage-feature-enabled'] self.orchestration = cfg.CONF.orchestration self.telemetry = cfg.CONF.telemetry self.dashboard = cfg.CONF.dashboard self.data_processing = cfg.CONF.data_processing self.boto = cfg.CONF.boto self.compute_admin = cfg.CONF['compute-admin'] self.stress = cfg.CONF.stress self.scenario = cfg.CONF.scenario self.service_available = cfg.CONF.service_available self.debug = cfg.CONF.debug self.baremetal = cfg.CONF.baremetal self.input_scenario = cfg.CONF['input-scenario'] self.cli = cfg.CONF.cli if not self.compute_admin.username: self.compute_admin.username = self.identity.admin_username self.compute_admin.password = self.identity.admin_password self.compute_admin.tenant_name = self.identity.admin_tenant_name
def __init__(self, manager, max_runs=None, stop_on_error=False): full_cname = self.__module__ + "." + self.__class__.__name__ self.logger = logging.getLogger(full_cname) self.manager = manager self.max_runs = max_runs self.stop_on_error = stop_on_error
class RestClient(object): TYPE = "json" LOG = logging.getLogger(__name__) def __init__(self, config, user, password, auth_url, tenant_name=None, auth_version='v2'): self.config = config self.user = user self.password = password self.auth_url = auth_url self.tenant_name = tenant_name self.auth_version = auth_version self.service = None self.token = None self.base_url = None self.region = {'compute': self.config.identity.region} self.endpoint_url = 'publicURL' self.headers = { 'Content-Type': 'application/%s' % self.TYPE, 'Accept': 'application/%s' % self.TYPE } self.build_interval = config.compute.build_interval self.build_timeout = config.compute.build_timeout self.general_header_lc = set( ('cache-control', 'connection', 'date', 'pragma', 'trailer', 'transfer-encoding', 'via', 'warning')) self.response_header_lc = set( ('accept-ranges', 'age', 'etag', 'location', 'proxy-authenticate', 'retry-after', 'server', 'vary', 'www-authenticate')) dscv = self.config.identity.disable_ssl_certificate_validation self.http_obj = httplib2.Http(disable_ssl_certificate_validation=dscv) def _set_auth(self): """ Sets the token and base_url used in requests based on the strategy type """ if self.auth_version == 'v3': auth_func = self.identity_auth_v3 else: auth_func = self.keystone_auth self.token, self.base_url = (auth_func(self.user, self.password, self.auth_url, self.service, self.tenant_name)) def clear_auth(self): """ Can be called to clear the token and base_url so that the next request will fetch a new token and base_url. """ self.token = None self.base_url = None def get_auth(self): """Returns the token of the current request or sets the token if none. """ if not self.token: self._set_auth() return self.token def basic_auth(self, user, password, auth_url): """ Provides authentication for the target API. """ params = {} params['headers'] = { 'User-Agent': 'Test-Client', 'X-Auth-User': user, 'X-Auth-Key': password } resp, body = self.http_obj.request(auth_url, 'GET', **params) try: return resp['x-auth-token'], resp['x-server-management-url'] except Exception: raise def keystone_auth(self, user, password, auth_url, service, tenant_name): """ Provides authentication via Keystone using v2 identity API. """ # Normalize URI to ensure /tokens is in it. if 'tokens' not in auth_url: auth_url = auth_url.rstrip('/') + '/tokens' creds = { 'auth': { 'passwordCredentials': { 'username': user, 'password': password, }, 'tenantName': tenant_name, } } headers = {'Content-Type': 'application/json'} body = json.dumps(creds) self._log_request('POST', auth_url, headers, body) resp, resp_body = self.http_obj.request(auth_url, 'POST', headers=headers, body=body) self._log_response(resp, resp_body) if resp.status == 200: try: auth_data = json.loads(resp_body)['access'] token = auth_data['token']['id'] except Exception as e: print("Failed to obtain token for user: %s" % e) raise mgmt_url = None for ep in auth_data['serviceCatalog']: if ep["type"] == service: for _ep in ep['endpoints']: if service in self.region and \ _ep['region'] == self.region[service]: mgmt_url = _ep[self.endpoint_url] if not mgmt_url: mgmt_url = ep['endpoints'][0][self.endpoint_url] break if mgmt_url is None: raise exceptions.EndpointNotFound(service) return token, mgmt_url elif resp.status == 401: raise exceptions.AuthenticationFailure(user=user, password=password, tenant=tenant_name) raise exceptions.IdentityError('Unexpected status code {0}'.format( resp.status)) def identity_auth_v3(self, user, password, auth_url, service, project_name, domain_id='default'): """Provides authentication using Identity API v3.""" req_url = auth_url.rstrip('/') + '/auth/tokens' creds = { "auth": { "identity": { "methods": ["password"], "password": { "user": { "name": user, "password": password, "domain": { "id": domain_id } } } }, "scope": { "project": { "domain": { "id": domain_id }, "name": project_name } } } } headers = {'Content-Type': 'application/json'} body = json.dumps(creds) resp, body = self.http_obj.request(req_url, 'POST', headers=headers, body=body) if resp.status == 201: try: token = resp['x-subject-token'] except Exception: self.LOG.exception("Failed to obtain token using V3" " authentication (auth URL is '%s')" % req_url) raise catalog = json.loads(body)['token']['catalog'] mgmt_url = None for service_info in catalog: if service_info['type'] != service: continue # this isn't the entry for us. endpoints = service_info['endpoints'] # Look for an endpoint in the region if configured. if service in self.region: region = self.region[service] for ep in endpoints: if ep['region'] != region: continue mgmt_url = ep['url'] # FIXME(blk-u): this isn't handling endpoint type # (public, internal, admin). break if not mgmt_url: # Didn't find endpoint for region, use the first. ep = endpoints[0] mgmt_url = ep['url'] # FIXME(blk-u): this isn't handling endpoint type # (public, internal, admin). break return token, mgmt_url elif resp.status == 401: raise exceptions.AuthenticationFailure(user=user, password=password) else: self.LOG.error("Failed to obtain token using V3 authentication" " (auth URL is '%s'), the response status is %s" % (req_url, resp.status)) raise exceptions.AuthenticationFailure(user=user, password=password) def post(self, url, body, headers): return self.request('POST', url, headers, body) def get(self, url, headers=None): return self.request('GET', url, headers) def delete(self, url, headers=None): return self.request('DELETE', url, headers) def patch(self, url, body, headers): return self.request('PATCH', url, headers, body) def put(self, url, body, headers): return self.request('PUT', url, headers, body) def head(self, url, headers=None): return self.request('HEAD', url, headers) def copy(self, url, headers=None): return self.request('COPY', url, headers) def get_versions(self): resp, body = self.get('') body = self._parse_resp(body) body = body['versions'] versions = map(lambda x: x['id'], body) return resp, versions def _log_request(self, method, req_url, headers, body): self.LOG.info('Request: ' + method + ' ' + req_url) if headers: print_headers = headers if 'X-Auth-Token' in headers and headers['X-Auth-Token']: token = headers['X-Auth-Token'] if len(token) > 64 and TOKEN_CHARS_RE.match(token): print_headers = headers.copy() print_headers['X-Auth-Token'] = "<Token omitted>" self.LOG.debug('Request Headers: ' + str(print_headers)) if body: str_body = str(body) length = len(str_body) self.LOG.debug('Request Body: ' + str_body[:2048]) if length >= 2048: self.LOG.debug("Large body (%d) md5 summary: %s", length, hashlib.md5(str_body).hexdigest()) def _log_response(self, resp, resp_body): status = resp['status'] self.LOG.info("Response Status: " + status) headers = resp.copy() del headers['status'] if len(headers): self.LOG.debug('Response Headers: ' + str(headers)) if resp_body: str_body = str(resp_body) length = len(str_body) self.LOG.debug('Response Body: ' + str_body[:2048]) if length >= 2048: self.LOG.debug("Large body (%d) md5 summary: %s", length, hashlib.md5(str_body).hexdigest()) def _parse_resp(self, body): return json.loads(body) def response_checker(self, method, url, headers, body, resp, resp_body): if (resp.status in set((204, 205, 304)) or resp.status < 200 or method.upper() == 'HEAD') and resp_body: raise exceptions.ResponseWithNonEmptyBody(status=resp.status) #NOTE(afazekas): # If the HTTP Status Code is 205 # 'The response MUST NOT include an entity.' # A HTTP entity has an entity-body and an 'entity-header'. # In the HTTP response specification (Section 6) the 'entity-header' # 'generic-header' and 'response-header' are in OR relation. # All headers not in the above two group are considered as entity # header in every interpretation. if (resp.status == 205 and 0 != len( set(resp.keys()) - set(('status', )) - self.response_header_lc - self.general_header_lc)): raise exceptions.ResponseWithEntity() #NOTE(afazekas) # Now the swift sometimes (delete not empty container) # returns with non json error response, we can create new rest class # for swift. # Usually RFC2616 says error responses SHOULD contain an explanation. # The warning is normal for SHOULD/SHOULD NOT case # Likely it will cause an error if not resp_body and resp.status >= 400: self.LOG.warning("status >= 400 response with empty body") def _request(self, method, url, headers=None, body=None): """A simple HTTP request interface.""" req_url = "%s/%s" % (self.base_url, url) self._log_request(method, req_url, headers, body) resp, resp_body = self.http_obj.request(req_url, method, headers=headers, body=body) self._log_response(resp, resp_body) self.response_checker(method, url, headers, body, resp, resp_body) return resp, resp_body def request(self, method, url, headers=None, body=None): retry = 0 if (self.token is None) or (self.base_url is None): self._set_auth() if headers is None: headers = {} headers['X-Auth-Token'] = self.token resp, resp_body = self._request(method, url, headers=headers, body=body) while (resp.status == 413 and 'retry-after' in resp and not self.is_absolute_limit(resp, self._parse_resp(resp_body)) and retry < MAX_RECURSION_DEPTH): retry += 1 delay = int(resp['retry-after']) time.sleep(delay) resp, resp_body = self._request(method, url, headers=headers, body=body) self._error_checker(method, url, headers, body, resp, resp_body) return resp, resp_body def _error_checker(self, method, url, headers, body, resp, resp_body): # NOTE(mtreinish): Check for httplib response from glance_http. The # object can't be used here because importing httplib breaks httplib2. # If another object from a class not imported were passed here as # resp this could possibly fail if str(type(resp)) == "<type 'instance'>": ctype = resp.getheader('content-type') else: try: ctype = resp['content-type'] # NOTE(mtreinish): Keystone delete user responses doesn't have a # content-type header. (They don't have a body) So just pretend it # is set. except KeyError: ctype = 'application/json' # It is not an error response if resp.status < 400: return JSON_ENC = [ 'application/json; charset=UTF-8', 'application/json', 'application/json; charset=utf-8' ] # NOTE(mtreinish): This is for compatibility with Glance and swift # APIs. These are the return content types that Glance api v1 # (and occasionally swift) are using. TXT_ENC = [ 'text/plain; charset=UTF-8', 'text/html; charset=UTF-8', 'text/plain; charset=utf-8' ] XML_ENC = ['application/xml', 'application/xml; charset=UTF-8'] if ctype in JSON_ENC or ctype in XML_ENC: parse_resp = True elif ctype in TXT_ENC: parse_resp = False else: raise exceptions.RestClientException(str(resp.status)) if resp.status == 401 or resp.status == 403: raise exceptions.Unauthorized() if resp.status == 404: raise exceptions.NotFound(resp_body) if resp.status == 400: if parse_resp: resp_body = self._parse_resp(resp_body) raise exceptions.BadRequest(resp_body) if resp.status == 409: if parse_resp: resp_body = self._parse_resp(resp_body) raise exceptions.Duplicate(resp_body) if resp.status == 413: if parse_resp: resp_body = self._parse_resp(resp_body) if self.is_absolute_limit(resp, resp_body): raise exceptions.OverLimit(resp_body) else: raise exceptions.RateLimitExceeded(resp_body) if resp.status == 422: if parse_resp: resp_body = self._parse_resp(resp_body) raise exceptions.UnprocessableEntity(resp_body) if resp.status in (500, 501): message = resp_body if parse_resp: resp_body = self._parse_resp(resp_body) #I'm seeing both computeFault and cloudServersFault come back. #Will file a bug to fix, but leave as is for now. if 'cloudServersFault' in resp_body: message = resp_body['cloudServersFault']['message'] elif 'computeFault' in resp_body: message = resp_body['computeFault']['message'] elif 'error' in resp_body: # Keystone errors message = resp_body['error']['message'] raise exceptions.IdentityError(message) elif 'message' in resp_body: message = resp_body['message'] raise exceptions.ComputeFault(message) if resp.status >= 400: if parse_resp: resp_body = self._parse_resp(resp_body) raise exceptions.RestClientException(str(resp.status)) def is_absolute_limit(self, resp, resp_body): if (not isinstance(resp_body, collections.Mapping) or 'retry-after' not in resp): return True over_limit = resp_body.get('overLimit', None) if not over_limit: return True return 'exceed' in over_limit.get('message', 'blabla') def wait_for_resource_deletion(self, id): """Waits for a resource to be deleted.""" start_time = int(time.time()) while True: if self.is_resource_deleted(id): return if int(time.time()) - start_time >= self.build_timeout: raise exceptions.TimeoutException time.sleep(self.build_interval) def is_resource_deleted(self, id): """ Subclasses override with specific deletion detection. """ message = ('"%s" does not implement is_resource_deleted' % self.__class__.__name__) raise NotImplementedError(message)
def __init__(self): """Initialize a configuration from a conf directory and conf file.""" config_files = [] failsafe_path = "/etc/tempest/" + self.DEFAULT_CONFIG_FILE # Environment variables override defaults... conf_dir = os.environ.get('TEMPEST_CONFIG_DIR', self.DEFAULT_CONFIG_DIR) conf_file = os.environ.get('TEMPEST_CONFIG', self.DEFAULT_CONFIG_FILE) path = os.path.join(conf_dir, conf_file) if not (os.path.isfile(path) or 'TEMPEST_CONFIG_DIR' in os.environ or 'TEMPEST_CONFIG' in os.environ): path = failsafe_path if not os.path.exists(path): msg = "Config file %s not found" % path print(RuntimeError(msg), file=sys.stderr) else: config_files.append(path) cfg.CONF([], project='tempest', default_config_files=config_files) logging.setup('tempest') LOG = logging.getLogger('tempest') LOG.info("Using tempest config file %s" % path) register_opt_group(cfg.CONF, compute_group, ComputeGroup) register_opt_group(cfg.CONF, compute_features_group, ComputeFeaturesGroup) register_opt_group(cfg.CONF, identity_group, IdentityGroup) register_opt_group(cfg.CONF, image_group, ImageGroup) register_opt_group(cfg.CONF, image_feature_group, ImageFeaturesGroup) register_opt_group(cfg.CONF, network_group, NetworkGroup) register_opt_group(cfg.CONF, volume_group, VolumeGroup) register_opt_group(cfg.CONF, volume_feature_group, VolumeFeaturesGroup) register_opt_group(cfg.CONF, object_storage_group, ObjectStoreGroup) register_opt_group(cfg.CONF, object_storage_feature_group, ObjectStoreFeaturesGroup) register_opt_group(cfg.CONF, orchestration_group, OrchestrationGroup) register_opt_group(cfg.CONF, dashboard_group, DashboardGroup) register_opt_group(cfg.CONF, boto_group, BotoGroup) register_opt_group(cfg.CONF, compute_admin_group, ComputeAdminGroup) register_opt_group(cfg.CONF, stress_group, StressGroup) register_opt_group(cfg.CONF, scenario_group, ScenarioGroup) register_opt_group(cfg.CONF, service_available_group, ServiceAvailableGroup) register_opt_group(cfg.CONF, debug_group, DebugGroup) register_opt_group(cfg.CONF, magnetodb_group, MagnetoDBGroup) self.compute = cfg.CONF.compute self.compute_feature_enabled = cfg.CONF['compute-feature-enabled'] self.identity = cfg.CONF.identity self.images = cfg.CONF.image self.image_feature_enabled = cfg.CONF['image-feature-enabled'] self.network = cfg.CONF.network self.volume = cfg.CONF.volume self.volume_feature_enabled = cfg.CONF['volume-feature-enabled'] self.object_storage = cfg.CONF['object-storage'] self.object_storage_feature_enabled = cfg.CONF[ 'object-storage-feature-enabled'] self.orchestration = cfg.CONF.orchestration self.dashboard = cfg.CONF.dashboard self.boto = cfg.CONF.boto self.compute_admin = cfg.CONF['compute-admin'] self.stress = cfg.CONF.stress self.scenario = cfg.CONF.scenario self.service_available = cfg.CONF.service_available self.debug = cfg.CONF.debug self.magnetodb = cfg.CONF.magnetodb if not self.compute_admin.username: self.compute_admin.username = self.identity.admin_username self.compute_admin.password = self.identity.admin_password self.compute_admin.tenant_name = self.identity.admin_tenant_name
# Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from tempest.api.network import base from tempest.common.utils import data_utils from tempest import config from tempest.openstack.common import log as logging from tempest import test CONF = config.CONF LOG = logging.getLogger(__name__) class NetworksTestPortsIPv6JSON(base.BaseNetworkTest): _interface = 'json' _ip_version = 6 @classmethod def setUpClass(cls): super(NetworksTestPortsIPv6JSON, cls).setUpClass() msg = None if not CONF.network_feature_enabled.ipv6: msg = "IPv6 is not enabled" elif not CONF.network_feature_enabled.ipv6_subnet_attributes: msg = "DHCPv6 attributes are not enabled." if msg:
def decision_maker(): A_I_IMAGES_READY = True # ari,ami,aki S3_CAN_CONNECT_ERROR = None EC2_CAN_CONNECT_ERROR = None secret_matcher = re.compile("[A-Za-z0-9+/]{32,}") # 40 in other system id_matcher = re.compile("[A-Za-z0-9]{20,}") def all_read(*args): return all(map(file_utils.have_effective_read_access, args)) materials_path = CONF.boto.s3_materials_path ami_path = materials_path + os.sep + CONF.boto.ami_manifest aki_path = materials_path + os.sep + CONF.boto.aki_manifest ari_path = materials_path + os.sep + CONF.boto.ari_manifest A_I_IMAGES_READY = all_read(ami_path, aki_path, ari_path) boto_logger = logging.getLogger('boto') level = boto_logger.logger.level # suppress logging for boto boto_logger.logger.setLevel(orig_logging.CRITICAL) def _cred_sub_check(connection_data): if not id_matcher.match(connection_data["aws_access_key_id"]): raise Exception("Invalid AWS access Key") if not secret_matcher.match(connection_data["aws_secret_access_key"]): raise Exception("Invalid AWS secret Key") raise Exception("Unknown (Authentication?) Error") openstack = tempest.clients.Manager() try: if urlparse.urlparse(CONF.boto.ec2_url).hostname is None: raise Exception("Failed to get hostname from the ec2_url") ec2client = openstack.ec2api_client try: ec2client.get_all_regions() except exception.BotoServerError as exc: if exc.error_code is None: raise Exception("EC2 target does not looks EC2 service") _cred_sub_check(ec2client.connection_data) except keystoneclient.exceptions.Unauthorized: EC2_CAN_CONNECT_ERROR = "AWS credentials not set," +\ " faild to get them even by keystoneclient" except Exception as exc: EC2_CAN_CONNECT_ERROR = str(exc) try: if urlparse.urlparse(CONF.boto.s3_url).hostname is None: raise Exception("Failed to get hostname from the s3_url") s3client = openstack.s3_client try: s3client.get_bucket("^INVALID*#()@INVALID.") except exception.BotoServerError as exc: if exc.status == 403: _cred_sub_check(s3client.connection_data) except Exception as exc: S3_CAN_CONNECT_ERROR = str(exc) except keystoneclient.exceptions.Unauthorized: S3_CAN_CONNECT_ERROR = "AWS credentials not set," +\ " faild to get them even by keystoneclient" boto_logger.logger.setLevel(level) return { 'A_I_IMAGES_READY': A_I_IMAGES_READY, 'S3_CAN_CONNECT_ERROR': S3_CAN_CONNECT_ERROR, 'EC2_CAN_CONNECT_ERROR': EC2_CAN_CONNECT_ERROR }
class RestClient(object): TYPE = "json" # This is used by _parse_resp method # Redefine it for purposes of your xml service client # List should contain top-xml_tag-names of data, which is like list/array # For example, in keystone it is users, roles, tenants and services # All of it has children with same tag-names list_tags = [] # This is used by _parse_resp method too # Used for selection of dict-like xmls, # like metadata for Vms in nova, and volumes in cinder dict_tags = [ "metadata", ] LOG = logging.getLogger(__name__) def __init__(self, auth_provider): self.auth_provider = auth_provider self.endpoint_url = None self.service = None # The version of the API this client implements self.api_version = None self._skip_path = False self.build_interval = CONF.compute.build_interval self.build_timeout = CONF.compute.build_timeout self.general_header_lc = set( ('cache-control', 'connection', 'date', 'pragma', 'trailer', 'transfer-encoding', 'via', 'warning')) self.response_header_lc = set( ('accept-ranges', 'age', 'etag', 'location', 'proxy-authenticate', 'retry-after', 'server', 'vary', 'www-authenticate')) dscv = CONF.identity.disable_ssl_certificate_validation self.http_obj = http.ClosingHttp( disable_ssl_certificate_validation=dscv) def _get_type(self): return self.TYPE def get_headers(self, accept_type=None, send_type=None): if accept_type is None: accept_type = self._get_type() if send_type is None: send_type = self._get_type() return { 'Content-Type': 'application/%s' % send_type, 'Accept': 'application/%s' % accept_type } def __str__(self): STRING_LIMIT = 80 str_format = ("config:%s, service:%s, base_url:%s, " "filters: %s, build_interval:%s, build_timeout:%s" "\ntoken:%s..., \nheaders:%s...") return str_format % (CONF, self.service, self.base_url, self.filters, self.build_interval, self.build_timeout, str(self.token)[0:STRING_LIMIT], str(self.get_headers())[0:STRING_LIMIT]) def _get_region(self, service): """ Returns the region for a specific service """ service_region = None for cfgname in dir(CONF._config): # Find all config.FOO.catalog_type and assume FOO is a service. cfg = getattr(CONF, cfgname) catalog_type = getattr(cfg, 'catalog_type', None) if catalog_type == service: service_region = getattr(cfg, 'region', None) if not service_region: service_region = CONF.identity.region return service_region def _get_endpoint_type(self, service): """ Returns the endpoint type for a specific service """ # If the client requests a specific endpoint type, then be it if self.endpoint_url: return self.endpoint_url endpoint_type = None for cfgname in dir(CONF._config): # Find all config.FOO.catalog_type and assume FOO is a service. cfg = getattr(CONF, cfgname) catalog_type = getattr(cfg, 'catalog_type', None) if catalog_type == service: endpoint_type = getattr(cfg, 'endpoint_type', 'publicURL') break # Special case for compute v3 service which hasn't its own # configuration group else: if service == CONF.compute.catalog_v3_type: endpoint_type = CONF.compute.endpoint_type return endpoint_type @property def user(self): return self.auth_provider.credentials.username @property def user_id(self): return self.auth_provider.credentials.user_id @property def tenant_name(self): return self.auth_provider.credentials.tenant_name @property def tenant_id(self): return self.auth_provider.credentials.tenant_id @property def password(self): return self.auth_provider.credentials.password @property def base_url(self): return self.auth_provider.base_url(filters=self.filters) @property def token(self): return self.auth_provider.get_token() @property def filters(self): _filters = dict(service=self.service, endpoint_type=self._get_endpoint_type(self.service), region=self._get_region(self.service)) if self.api_version is not None: _filters['api_version'] = self.api_version if self._skip_path: _filters['skip_path'] = self._skip_path return _filters def skip_path(self): """ When set, ignore the path part of the base URL from the catalog """ self._skip_path = True def reset_path(self): """ When reset, use the base URL from the catalog as-is """ self._skip_path = False @classmethod def expected_success(cls, expected_code, read_code): assert_msg = ("This function only allowed to use for HTTP status" "codes which explicitly defined in the RFC 2616. {0}" " is not a defined Success Code!").format(expected_code) if isinstance(expected_code, list): for code in expected_code: assert code in HTTP_SUCCESS, assert_msg else: assert expected_code in HTTP_SUCCESS, assert_msg # NOTE(afazekas): the http status code above 400 is processed by # the _error_checker method if read_code < 400: pattern = """Unexpected http success status code {0}, The expected status code is {1}""" if ((not isinstance(expected_code, list) and (read_code != expected_code)) or (isinstance(expected_code, list) and (read_code not in expected_code))): details = pattern.format(read_code, expected_code) raise exceptions.InvalidHttpSuccessCode(details) def post(self, url, body, headers=None, extra_headers=False): return self.request('POST', url, extra_headers, headers, body) def get(self, url, headers=None, extra_headers=False): return self.request('GET', url, extra_headers, headers) def delete(self, url, headers=None, body=None, extra_headers=False): return self.request('DELETE', url, extra_headers, headers, body) def patch(self, url, body, headers=None, extra_headers=False): return self.request('PATCH', url, extra_headers, headers, body) def put(self, url, body, headers=None, extra_headers=False): return self.request('PUT', url, extra_headers, headers, body) def head(self, url, headers=None, extra_headers=False): return self.request('HEAD', url, extra_headers, headers) def copy(self, url, headers=None, extra_headers=False): return self.request('COPY', url, extra_headers, headers) def get_versions(self): resp, body = self.get('') body = self._parse_resp(body) versions = map(lambda x: x['id'], body) return resp, versions def _get_request_id(self, resp): for i in ('x-openstack-request-id', 'x-compute-request-id'): if i in resp: return resp[i] return "" def _log_request_start(self, method, req_url, req_headers={}, req_body=None): caller_name = misc_utils.find_test_caller() trace_regex = CONF.debug.trace_requests if trace_regex and re.search(trace_regex, caller_name): self.LOG.debug('Starting Request (%s): %s %s' % (caller_name, method, req_url)) def _log_request(self, method, req_url, resp, secs="", req_headers={}, req_body=None, resp_body=None): # if we have the request id, put it in the right part of the log extra = dict(request_id=self._get_request_id(resp)) # NOTE(sdague): while we still have 6 callers to this function # we're going to just provide work around on who is actually # providing timings by gracefully adding no content if they don't. # Once we're down to 1 caller, clean this up. caller_name = misc_utils.find_test_caller() if secs: secs = " %.3fs" % secs self.LOG.info('Request (%s): %s %s %s%s' % (caller_name, resp['status'], method, req_url, secs), extra=extra) # We intentionally duplicate the info content because in a parallel # world this is important to match trace_regex = CONF.debug.trace_requests if trace_regex and re.search(trace_regex, caller_name): if 'X-Auth-Token' in req_headers: req_headers['X-Auth-Token'] = '<omitted>' log_fmt = """Request (%s): %s %s %s%s Request - Headers: %s Body: %s Response - Headers: %s Body: %s""" self.LOG.debug(log_fmt % (caller_name, resp['status'], method, req_url, secs, str(req_headers), filter(lambda x: x in string.printable, str(req_body)[:2048]), str(resp), filter(lambda x: x in string.printable, str(resp_body)[:2048])), extra=extra) def _parse_resp(self, body): if self._get_type() is "json": body = json.loads(body) # We assume, that if the first value of the deserialized body's # item set is a dict or a list, that we just return the first value # of deserialized body. # Essentially "cutting out" the first placeholder element in a body # that looks like this: # # { # "users": [ # ... # ] # } try: # Ensure there are not more than one top-level keys if len(body.keys()) > 1: return body # Just return the "wrapped" element first_key, first_item = body.items()[0] if isinstance(first_item, (dict, list)): return first_item except (ValueError, IndexError): pass return body elif self._get_type() is "xml": element = etree.fromstring(body) if any(s in element.tag for s in self.dict_tags): # Parse dictionary-like xmls (metadata, etc) dictionary = {} for el in element.getchildren(): dictionary[u"%s" % el.get("key")] = u"%s" % el.text return dictionary if any(s in element.tag for s in self.list_tags): # Parse list-like xmls (users, roles, etc) array = [] for child in element.getchildren(): array.append(common.xml_to_json(child)) return array # Parse one-item-like xmls (user, role, etc) return common.xml_to_json(element) def response_checker(self, method, resp, resp_body): if (resp.status in set((204, 205, 304)) or resp.status < 200 or method.upper() == 'HEAD') and resp_body: raise exceptions.ResponseWithNonEmptyBody(status=resp.status) # NOTE(afazekas): # If the HTTP Status Code is 205 # 'The response MUST NOT include an entity.' # A HTTP entity has an entity-body and an 'entity-header'. # In the HTTP response specification (Section 6) the 'entity-header' # 'generic-header' and 'response-header' are in OR relation. # All headers not in the above two group are considered as entity # header in every interpretation. if (resp.status == 205 and 0 != len( set(resp.keys()) - set(('status', )) - self.response_header_lc - self.general_header_lc)): raise exceptions.ResponseWithEntity() # NOTE(afazekas) # Now the swift sometimes (delete not empty container) # returns with non json error response, we can create new rest class # for swift. # Usually RFC2616 says error responses SHOULD contain an explanation. # The warning is normal for SHOULD/SHOULD NOT case # Likely it will cause an error if method != 'HEAD' and not resp_body and resp.status >= 400: self.LOG.warning("status >= 400 response with empty body") def _request(self, method, url, headers=None, body=None): """A simple HTTP request interface.""" # Authenticate the request with the auth provider req_url, req_headers, req_body = self.auth_provider.auth_request( method, url, headers, body, self.filters) # Do the actual request, and time it start = time.time() self._log_request_start(method, req_url) resp, resp_body = self.http_obj.request(req_url, method, headers=req_headers, body=req_body) end = time.time() self._log_request(method, req_url, resp, secs=(end - start), req_headers=req_headers, req_body=req_body, resp_body=resp_body) # Verify HTTP response codes self.response_checker(method, resp, resp_body) return resp, resp_body def request(self, method, url, extra_headers=False, headers=None, body=None): # if extra_headers is True # default headers would be added to headers retry = 0 if headers is None: # NOTE(vponomaryov): if some client do not need headers, # it should explicitly pass empty dict headers = self.get_headers() elif extra_headers: try: headers = headers.copy() headers.update(self.get_headers()) except (ValueError, TypeError): headers = self.get_headers() resp, resp_body = self._request(method, url, headers=headers, body=body) while (resp.status == 413 and 'retry-after' in resp and not self.is_absolute_limit(resp, self._parse_resp(resp_body)) and retry < MAX_RECURSION_DEPTH): retry += 1 delay = int(resp['retry-after']) time.sleep(delay) resp, resp_body = self._request(method, url, headers=headers, body=body) self._error_checker(method, url, headers, body, resp, resp_body) return resp, resp_body def _error_checker(self, method, url, headers, body, resp, resp_body): # NOTE(mtreinish): Check for httplib response from glance_http. The # object can't be used here because importing httplib breaks httplib2. # If another object from a class not imported were passed here as # resp this could possibly fail if str(type(resp)) == "<type 'instance'>": ctype = resp.getheader('content-type') else: try: ctype = resp['content-type'] # NOTE(mtreinish): Keystone delete user responses doesn't have a # content-type header. (They don't have a body) So just pretend it # is set. except KeyError: ctype = 'application/json' # It is not an error response if resp.status < 400: return JSON_ENC = ['application/json', 'application/json; charset=utf-8'] # NOTE(mtreinish): This is for compatibility with Glance and swift # APIs. These are the return content types that Glance api v1 # (and occasionally swift) are using. TXT_ENC = [ 'text/plain', 'text/html', 'text/html; charset=utf-8', 'text/plain; charset=utf-8' ] XML_ENC = ['application/xml', 'application/xml; charset=utf-8'] if ctype.lower() in JSON_ENC or ctype.lower() in XML_ENC: parse_resp = True elif ctype.lower() in TXT_ENC: parse_resp = False else: raise exceptions.InvalidContentType(str(resp.status)) if resp.status == 401 or resp.status == 403: raise exceptions.Unauthorized(resp_body) if resp.status == 404: raise exceptions.NotFound(resp_body) if resp.status == 400: if parse_resp: resp_body = self._parse_resp(resp_body) raise exceptions.BadRequest(resp_body) if resp.status == 409: if parse_resp: resp_body = self._parse_resp(resp_body) raise exceptions.Conflict(resp_body) if resp.status == 413: if parse_resp: resp_body = self._parse_resp(resp_body) if self.is_absolute_limit(resp, resp_body): raise exceptions.OverLimit(resp_body) else: raise exceptions.RateLimitExceeded(resp_body) if resp.status == 422: if parse_resp: resp_body = self._parse_resp(resp_body) raise exceptions.UnprocessableEntity(resp_body) if resp.status in (500, 501): message = resp_body if parse_resp: try: resp_body = self._parse_resp(resp_body) except ValueError: # If response body is a non-json string message. # Use resp_body as is and raise InvalidResponseBody # exception. raise exceptions.InvalidHTTPResponseBody(message) else: if isinstance(resp_body, dict): # I'm seeing both computeFault # and cloudServersFault come back. # Will file a bug to fix, but leave as is for now. if 'cloudServersFault' in resp_body: message = resp_body['cloudServersFault']['message'] elif 'computeFault' in resp_body: message = resp_body['computeFault']['message'] elif 'error' in resp_body: # Keystone errors message = resp_body['error']['message'] raise exceptions.IdentityError(message) elif 'message' in resp_body: message = resp_body['message'] else: message = resp_body raise exceptions.ServerFault(message) if resp.status >= 400: raise exceptions.UnexpectedResponseCode(str(resp.status)) def is_absolute_limit(self, resp, resp_body): if (not isinstance(resp_body, collections.Mapping) or 'retry-after' not in resp): return True if self._get_type() is "json": over_limit = resp_body.get('overLimit', None) if not over_limit: return True return 'exceed' in over_limit.get('message', 'blabla') elif self._get_type() is "xml": return 'exceed' in resp_body.get('message', 'blabla') def wait_for_resource_deletion(self, id): """Waits for a resource to be deleted.""" start_time = int(time.time()) while True: if self.is_resource_deleted(id): return if int(time.time()) - start_time >= self.build_timeout: raise exceptions.TimeoutException time.sleep(self.build_interval) def is_resource_deleted(self, id): """ Subclasses override with specific deletion detection. """ message = ('"%s" does not implement is_resource_deleted' % self.__class__.__name__) raise NotImplementedError(message) @classmethod def validate_response(cls, schema, resp, body): # Only check the response if the status code is a success code # TODO(cyeoh): Eventually we should be able to verify that a failure # code if it exists is something that we expect. This is explicitly # declared in the V3 API and so we should be able to export this in # the response schema. For now we'll ignore it. if resp.status in HTTP_SUCCESS: cls.expected_success(schema['status_code'], resp.status) # Check the body of a response body_schema = schema.get('response_body') if body_schema: try: jsonschema.validate(body, body_schema) except jsonschema.ValidationError as ex: msg = ("HTTP response body is invalid (%s)") % ex raise exceptions.InvalidHTTPResponseBody(msg) else: if body: msg = ("HTTP response body should not exist (%s)") % body raise exceptions.InvalidHTTPResponseBody(msg) # Check the header of a response header_schema = schema.get('response_header') if header_schema: try: jsonschema.validate(resp, header_schema) except jsonschema.ValidationError as ex: msg = ("HTTP response header is invalid (%s)") % ex raise exceptions.InvalidHTTPResponseHeader(msg)
def setUpClass(cls): cls.LOG = logging.getLogger(cls._get_full_case_name()) super(BaseTestCase, cls).setUpClass()
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from tempest import cli from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SimpleReadOnlyKeystoneClientTest(cli.ClientTestBase): """Basic, read-only tests for Keystone CLI client. Checks return values and output of read-only commands. These tests do not presume any content, nor do they create their own. They only verify the structure of output if present. """ def test_admin_fake_action(self): self.assertRaises(exceptions.CommandFailed, self.keystone, 'this-does-not-exist')
def __init__(self, parse_conf=True): """Initialize a configuration from a conf directory and conf file.""" super(TempestConfigPrivate, self).__init__() config_files = [] failsafe_path = "/etc/tempest/" + self.DEFAULT_CONFIG_FILE # Environment variables override defaults... conf_dir = os.environ.get('TEMPEST_CONFIG_DIR', self.DEFAULT_CONFIG_DIR) conf_file = os.environ.get('TEMPEST_CONFIG', self.DEFAULT_CONFIG_FILE) path = os.path.join(conf_dir, conf_file) if not os.path.isfile(path): path = failsafe_path # only parse the config file if we expect one to exist. This is needed # to remove an issue with the config file up to date checker. if parse_conf: config_files.append(path) cfg.CONF([], project='tempest', default_config_files=config_files) logging.setup('tempest') LOG = logging.getLogger('tempest') LOG.info("Using tempest config file %s" % path) register_opt_group(cfg.CONF, compute_group, ComputeGroup) register_opt_group(cfg.CONF, compute_features_group, ComputeFeaturesGroup) register_opt_group(cfg.CONF, identity_group, IdentityGroup) register_opt_group(cfg.CONF, identity_feature_group, IdentityFeatureGroup) register_opt_group(cfg.CONF, image_group, ImageGroup) register_opt_group(cfg.CONF, image_feature_group, ImageFeaturesGroup) register_opt_group(cfg.CONF, network_group, NetworkGroup) register_opt_group(cfg.CONF, network_feature_group, NetworkFeaturesGroup) register_opt_group(cfg.CONF, volume_group, VolumeGroup) register_opt_group(cfg.CONF, volume_feature_group, VolumeFeaturesGroup) register_opt_group(cfg.CONF, object_storage_group, ObjectStoreGroup) register_opt_group(cfg.CONF, object_storage_feature_group, ObjectStoreFeaturesGroup) register_opt_group(cfg.CONF, orchestration_group, OrchestrationGroup) register_opt_group(cfg.CONF, telemetry_group, TelemetryGroup) register_opt_group(cfg.CONF, dashboard_group, DashboardGroup) register_opt_group(cfg.CONF, data_processing_group, DataProcessingGroup) register_opt_group(cfg.CONF, boto_group, BotoGroup) register_opt_group(cfg.CONF, compute_admin_group, ComputeAdminGroup) register_opt_group(cfg.CONF, stress_group, StressGroup) register_opt_group(cfg.CONF, scenario_group, ScenarioGroup) register_opt_group(cfg.CONF, service_available_group, ServiceAvailableGroup) register_opt_group(cfg.CONF, debug_group, DebugGroup) register_opt_group(cfg.CONF, baremetal_group, BaremetalGroup) register_opt_group(cfg.CONF, input_scenario_group, InputScenarioGroup) register_opt_group(cfg.CONF, cli_group, CLIGroup) self.compute = cfg.CONF.compute self.compute_feature_enabled = cfg.CONF['compute-feature-enabled'] self.identity = cfg.CONF.identity self.identity_feature_enabled = cfg.CONF['identity-feature-enabled'] self.image = cfg.CONF.image self.image_feature_enabled = cfg.CONF['image-feature-enabled'] self.network = cfg.CONF.network self.network_feature_enabled = cfg.CONF['network-feature-enabled'] self.volume = cfg.CONF.volume self.volume_feature_enabled = cfg.CONF['volume-feature-enabled'] self.object_storage = cfg.CONF['object-storage'] self.object_storage_feature_enabled = cfg.CONF[ 'object-storage-feature-enabled'] self.orchestration = cfg.CONF.orchestration self.telemetry = cfg.CONF.telemetry self.dashboard = cfg.CONF.dashboard self.data_processing = cfg.CONF.data_processing self.boto = cfg.CONF.boto self.compute_admin = cfg.CONF['compute-admin'] self.stress = cfg.CONF.stress self.scenario = cfg.CONF.scenario self.service_available = cfg.CONF.service_available self.debug = cfg.CONF.debug self.baremetal = cfg.CONF.baremetal self.input_scenario = cfg.CONF['input-scenario'] self.cli = cfg.CONF.cli if not self.compute_admin.username: self.compute_admin.username = self.identity.admin_username self.compute_admin.password = self.identity.admin_password self.compute_admin.tenant_name = self.identity.admin_tenant_name