def ssh_execute(ssh, cmd, process_input=None, addl_env=None, check_exit_code=True): sanitized_cmd = strutils.mask_password(cmd) LOG.debug('Running cmd (SSH): %s', sanitized_cmd) if addl_env: raise InvalidArgumentError(_('Environment not supported over SSH')) if process_input: # This is (probably) fixable if we need it... raise InvalidArgumentError(_('process_input not supported over SSH')) stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd) channel = stdout_stream.channel # NOTE(justinsb): This seems suspicious... # ...other SSH clients have buffering issues with this approach stdout = stdout_stream.read() sanitized_stdout = strutils.mask_password(stdout) stderr = stderr_stream.read() sanitized_stderr = strutils.mask_password(stderr) stdin_stream.close() exit_status = channel.recv_exit_status() # exit_status == -1 if no exit code was returned if exit_status != -1: LOG.debug('Result was %s' % exit_status) if check_exit_code and exit_status != 0: raise ProcessExecutionError(exit_code=exit_status, stdout=sanitized_stdout, stderr=sanitized_stderr, cmd=sanitized_cmd) return (sanitized_stdout, sanitized_stderr)
def edit(self, req, id, body, tenant_id): """ Updates the instance to set or unset one or more attributes. """ LOG.info(_LI("Editing instance for tenant id %s."), tenant_id) LOG.debug("req: %s", strutils.mask_password(req)) LOG.debug("body: %s", strutils.mask_password(body)) context = req.environ[wsgi.CONTEXT_KEY] instance = models.Instance.load(context, id) args = {} args['detach_replica'] = ('replica_of' in body['instance'] or 'slave_of' in body['instance']) if 'name' in body['instance']: args['name'] = body['instance']['name'] if 'configuration' in body['instance']: args['configuration_id'] = self._configuration_parse(context, body) if 'datastore_version' in body['instance']: args['datastore_version'] = body['instance'].get( 'datastore_version') self._modify_instance(context, req, instance, **args) return wsgi.Result(None, 202)
def _gpfs_ssh_execute(self, ssh, cmd, ignore_exit_code=None, check_exit_code=True): sanitized_cmd = strutils.mask_password(cmd) LOG.debug('Running cmd (SSH): %s', sanitized_cmd) stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd) channel = stdout_stream.channel stdout = stdout_stream.read() sanitized_stdout = strutils.mask_password(stdout) stderr = stderr_stream.read() sanitized_stderr = strutils.mask_password(stderr) stdin_stream.close() exit_status = channel.recv_exit_status() # exit_status == -1 if no exit code was returned if exit_status != -1: LOG.debug('Result was %s' % exit_status) if ((check_exit_code and exit_status != 0) and (ignore_exit_code is None or exit_status not in ignore_exit_code)): raise exception.ProcessExecutionError(exit_code=exit_status, stdout=sanitized_stdout, stderr=sanitized_stderr, cmd=sanitized_cmd) return (sanitized_stdout, sanitized_stderr)
def create(self, req, body, tenant_id): # TODO(hub-cap): turn this into middleware LOG.info(_LI("Creating a database instance for tenant '%s'"), tenant_id) LOG.debug("req : '%s'\n\n", strutils.mask_password(req)) LOG.debug("body : '%s'\n\n", strutils.mask_password(body)) context = req.environ[wsgi.CONTEXT_KEY] context.notification = notification.DBaaSInstanceCreate(context, request=req) datastore_args = body['instance'].get('datastore', {}) datastore, datastore_version = ( datastore_models.get_datastore_version(**datastore_args)) image_id = datastore_version.image_id name = body['instance']['name'] flavor_ref = body['instance']['flavorRef'] flavor_id = utils.get_id_from_href(flavor_ref) configuration = self._configuration_parse(context, body) databases = populate_validated_databases( body['instance'].get('databases', [])) database_names = [database.get('_name', '') for database in databases] users = None try: users = populate_users(body['instance'].get('users', []), database_names) except ValueError as ve: raise exception.BadRequest(msg=ve) if 'volume' in body['instance']: volume_info = body['instance']['volume'] volume_size = int(volume_info['size']) volume_type = volume_info.get('type') else: volume_size = None volume_type = None if 'restorePoint' in body['instance']: backupRef = body['instance']['restorePoint']['backupRef'] backup_id = utils.get_id_from_href(backupRef) else: backup_id = None availability_zone = body['instance'].get('availability_zone') nics = body['instance'].get('nics') slave_of_id = body['instance'].get('replica_of', # also check for older name body['instance'].get('slave_of')) replica_count = body['instance'].get('replica_count') instance = models.Instance.create(context, name, flavor_id, image_id, databases, users, datastore, datastore_version, volume_size, backup_id, availability_zone, nics, configuration, slave_of_id, replica_count=replica_count, volume_type=volume_type) view = views.InstanceDetailView(instance, req=req) return wsgi.Result(view.data(), 200)
def ssh_execute(ssh, cmd, process_input=None, addl_env=None, check_exit_code=True, binary=False): """Run a command through SSH. .. versionchanged:: 1.9 Added *binary* optional parameter. """ sanitized_cmd = strutils.mask_password(cmd) LOG.debug("Running cmd (SSH): %s", sanitized_cmd) if addl_env: raise InvalidArgumentError(_("Environment not supported over SSH")) if process_input: # This is (probably) fixable if we need it... raise InvalidArgumentError(_("process_input not supported over SSH")) stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd) channel = stdout_stream.channel # NOTE(justinsb): This seems suspicious... # ...other SSH clients have buffering issues with this approach stdout = stdout_stream.read() stderr = stderr_stream.read() stdin_stream.close() exit_status = channel.recv_exit_status() if six.PY3: # Decode from the locale using using the surrogateescape error handler # (decoding cannot fail). Decode even if binary is True because # mask_password() requires Unicode on Python 3 stdout = os.fsdecode(stdout) stderr = os.fsdecode(stderr) stdout = strutils.mask_password(stdout) stderr = strutils.mask_password(stderr) # exit_status == -1 if no exit code was returned if exit_status != -1: LOG.debug("Result was %s" % exit_status) if check_exit_code and exit_status != 0: raise ProcessExecutionError(exit_code=exit_status, stdout=stdout, stderr=stderr, cmd=sanitized_cmd) if binary: if six.PY2: # On Python 2, stdout is a bytes string if mask_password() failed # to decode it, or an Unicode string otherwise. Encode to the # default encoding (ASCII) because mask_password() decodes from # the same encoding. if isinstance(stdout, unicode): stdout = stdout.encode() if isinstance(stderr, unicode): stderr = stderr.encode() else: # fsencode() is the reverse operation of fsdecode() stdout = os.fsencode(stdout) stderr = os.fsencode(stderr) return (stdout, stderr)
def wrapper(*args, **kwargs): instance = args[0] data = {"class_name": (instance.__class__.__module__ + '.' + instance.__class__.__name__), "method_name": method.__name__, "args": strutils.mask_password(args[1:]), "kwargs": strutils.mask_password(kwargs)} LOG.debug('%(class_name)s method %(method_name)s' ' called with arguments %(args)s %(kwargs)s', data) return method(*args, **kwargs)
def trace_logging_wrapper(*args, **kwargs): filter_function = dec_kwargs.get('filter_function') if len(args) > 0: maybe_self = args[0] else: maybe_self = kwargs.get('self', None) if maybe_self and hasattr(maybe_self, '__module__'): logger = logging.getLogger(maybe_self.__module__) else: logger = LOG # NOTE(ameade): Don't bother going any further if DEBUG log level # is not enabled for the logger. if not logger.isEnabledFor(py_logging.DEBUG): return f(*args, **kwargs) all_args = inspect.getcallargs(f, *args, **kwargs) pass_filter = filter_function is None or filter_function(all_args) if pass_filter: logger.debug('==> %(func)s: call %(all_args)r', {'func': func_name, 'all_args': strutils.mask_password( six.text_type(all_args))}) start_time = time.time() * 1000 try: result = f(*args, **kwargs) except Exception as exc: total_time = int(round(time.time() * 1000)) - start_time logger.debug('<== %(func)s: exception (%(time)dms) %(exc)r', {'func': func_name, 'time': total_time, 'exc': exc}) raise total_time = int(round(time.time() * 1000)) - start_time if isinstance(result, dict): mask_result = strutils.mask_dict_password(result) elif isinstance(result, six.string_types): mask_result = strutils.mask_password(result) else: mask_result = result if pass_filter: logger.debug('<== %(func)s: return (%(time)dms) %(result)r', {'func': func_name, 'time': total_time, 'result': mask_result}) return result
def create(self, req, body, tenant_id, instance_id): """Creates a set of users.""" LOG.info(_("Creating users for instance '%s'") % instance_id) LOG.info(_("req : '%s'\n\n") % strutils.mask_password(req)) LOG.info(_("body : '%s'\n\n") % strutils.mask_password(body)) context = req.environ[wsgi.CONTEXT_KEY] users = body['users'] try: model_users = populate_users(users) models.User.create(context, instance_id, model_users) except (ValueError, AttributeError) as e: raise exception.BadRequest(msg=str(e)) return wsgi.Result(None, 202)
def test_json_message(self): payload = """body: {"changePassword": {"adminPass": "******"}}""" expected = """body: {"changePassword": {"adminPass": "******"}}""" self.assertEqual(expected, strutils.mask_password(payload)) payload = """body: {"rescue": {"admin_pass": "******"}}""" expected = """body: {"rescue": {"admin_pass": "******"}}""" self.assertEqual(expected, strutils.mask_password(payload)) payload = """body: {"rescue": {"admin_password": "******"}}""" expected = """body: {"rescue": {"admin_password": "******"}}""" self.assertEqual(expected, strutils.mask_password(payload)) payload = """body: {"rescue": {"password": "******"}}""" expected = """body: {"rescue": {"password": "******"}}""" self.assertEqual(expected, strutils.mask_password(payload))
def synchronize(self): LOG.debug("Synchronizing running datasources") cage = d6cage.d6Cage() datasources = self.datasource_mgr.get_datasources(filter_secret=False) # Look for datasources in the db, but not in the cage. for configured_ds in datasources: active_ds = cage.service_object(configured_ds['name']) if active_ds is not None: if not configured_ds['enabled']: LOG.info('Datasource %s now disabled, just delete it.', configured_ds['name']) self.datasource_mgr.delete_datasource(configured_ds['id'], update_db=False) continue active_config = cage.getservice(name=configured_ds['name']) if not self._config_eq(configured_ds, active_config): LOG.debug('configured and active disagree: (%s) %s %s', strutils.mask_password(active_ds), strutils.mask_password(configured_ds), strutils.mask_password(active_config)) LOG.info('Reloading datasource: %s', strutils.mask_password(configured_ds)) self.datasource_mgr.delete_datasource(configured_ds['id'], update_db=False) self.datasource_mgr.add_datasource( configured_ds, update_db=False) else: if configured_ds['enabled']: LOG.info('Configured datasource is not active, adding: %s', strutils.mask_password(configured_ds)) self.datasource_mgr.add_datasource(configured_ds, update_db=False) else: LOG.info('Configured datasource is not active but ' + 'disabled, not adding: %s', strutils.mask_password(configured_ds)) # Look for datasources in the cage, but not in the db. This # need not compare the configuration, because the above # comparison would have already checked the configuration. configured_dicts = dict((ds['name'], ds) for ds in datasources) LOG.debug("configured dicts: %s", strutils.mask_password(configured_dicts)) LOG.debug("active services: %s", strutils.mask_password(cage.getservices())) for name, service in cage.getservices().items(): LOG.debug('active datasource: %s', service['name']) if (service['type'] == 'datasource_driver' and not configured_dicts.get(service['name'], None)): LOG.info('Active datasource is not configured, removing: %s', service['name']) cage.deleteservice(service['name']) engine = cage.service_object('engine') engine.delete_policy(service['name'])
def log_curl_request(self, method, url, kwargs): curl = ['curl -i -X %s' % method] for (key, value) in kwargs['headers'].items(): header = '-H \'%s: %s\'' % (key, value) curl.append(header) conn_params_fmt = [ ('key_file', '--key %s'), ('cert_file', '--cert %s'), ('ca_file', '--cacert %s'), ] for (key, fmt) in conn_params_fmt: value = self.connection_params[2].get(key) if value: curl.append(fmt % value) if self.connection_params[2].get('insecure'): curl.append('-k') if 'body' in kwargs: body = strutils.mask_password(kwargs['body']) curl.append('-d \'%s\'' % body) curl.append(urlparse.urljoin(self.endpoint_trimmed, url)) LOG.debug(' '.join(curl))
def update_all(self, req, body, tenant_id, instance_id): """Change the password of one or more users.""" LOG.info(_LI("Updating user password for instance '%(id)s'\n" "req : '%(req)s'\n\n") % {"id": instance_id, "req": strutils.mask_password(req)}) context = req.environ[wsgi.CONTEXT_KEY] self.authorize_target_action(context, 'user:update_all', instance_id) context.notification = notification.DBaaSUserChangePassword( context, request=req) users = body['users'] usernames = [user['name'] for user in users] client = self.create_guest_client(context, instance_id) with StartNotification(context, instance_id=instance_id, username="******".join(usernames)): try: user_models = self.parse_users_from_request(users) for model in user_models: user_id = self.get_user_id(model) if self.is_reserved_id(user_id): raise exception.ReservedUserId(name=user_id) if not self.find_user(client, user_id): raise exception.UserNotFound(uuid=user_id) self.change_passwords(client, user_models) except (ValueError, AttributeError) as e: raise exception.BadRequest(str(e)) return wsgi.Result(None, 202)
def update(self, req, body, tenant_id, instance_id, id): LOG.info(_LI("Updating user attributes for instance '%(id)s'\n" "req : '%(req)s'\n\n") % {"id": instance_id, "req": strutils.mask_password(req)}) context = req.environ[wsgi.CONTEXT_KEY] self.authorize_target_action(context, 'user:update', instance_id) user_id = correct_id_with_req(id, req) updates = body['user'] context.notification = notification.DBaaSUserUpdateAttributes( context, request=req) client = self.create_guest_client(context, instance_id) with StartNotification(context, instance_id=instance_id, username=user_id): try: if self.is_reserved_id(user_id): raise exception.ReservedUserId(name=user_id) model = self.find_user(client, user_id) if not model: raise exception.UserNotFound(uuid=user_id) new_user_id = self.apply_user_updates(model, updates) if (new_user_id is not None and self.find_user(client, new_user_id)): raise exception.UserAlreadyExists(name=new_user_id) self.update_user(client, user_id, updates) except (ValueError, AttributeError) as e: raise exception.BadRequest(str(e)) return wsgi.Result(None, 202)
def _http_log_response(self, response, logger): if not logger.isEnabledFor(logging.DEBUG): return # NOTE(samueldmq): If the response does not provide enough info about # the content type to decide whether it is useful and safe to log it # or not, just do not log the body. Trying to# read the response body # anyways may result on reading a long stream of bytes and getting an # unexpected MemoryError. See bug 1616105 for further details. content_type = response.headers.get('content-type', None) # NOTE(lamt): Per [1], the Content-Type header can be of the form # Content-Type := type "/" subtype *[";" parameter] # [1] https://www.w3.org/Protocols/rfc1341/4_Content-Type.html for log_type in _LOG_CONTENT_TYPES: if content_type is not None and content_type.startswith(log_type): text = _remove_service_catalog(response.text) break else: text = ('Omitted, Content-Type is set to %s. Only ' '%s responses have their bodies logged.') text = text % (content_type, ', '.join(_LOG_CONTENT_TYPES)) string_parts = [ 'RESP:', '[%s]' % response.status_code ] for header in response.headers.items(): string_parts.append('%s: %s' % self._process_header(header)) string_parts.append('\nRESP BODY: %s\n' % strutils.mask_password(text)) logger.debug(' '.join(string_parts))
def _build_policy_check_credentials(self, action, context, kwargs): kwargs_str = ', '.join(['%s=%s' % (k, kwargs[k]) for k in kwargs]) kwargs_str = strutils.mask_password(kwargs_str) LOG.debug('RBAC: Authorizing %(action)s(%(kwargs)s)', { 'action': action, 'kwargs': kwargs_str}) # see if auth context has already been created. If so use it. if ('environment' in context and authorization.AUTH_CONTEXT_ENV in context['environment']): LOG.debug('RBAC: using auth context from the request environment') return context['environment'].get(authorization.AUTH_CONTEXT_ENV) # There is no current auth context, build it from the incoming token. # TODO(morganfainberg): Collapse this logic with AuthContextMiddleware # in a sane manner as this just mirrors the logic in AuthContextMiddleware try: LOG.debug('RBAC: building auth context from the incoming auth token') token_ref = token_model.KeystoneToken( token_id=context['token_id'], token_data=self.token_provider_api.validate_token( context['token_id'])) # NOTE(jamielennox): whilst this maybe shouldn't be within this # function it would otherwise need to reload the token_ref from # backing store. wsgi.validate_token_bind(context, token_ref) except exception.TokenNotFound: LOG.warning(_LW('RBAC: Invalid token')) raise exception.Unauthorized() auth_context = authorization.token_to_auth_context(token_ref) return auth_context
def create_vim(self, context, vim): LOG.debug('Create vim called with parameters %s', strutils.mask_password(vim)) vim_obj = vim['vim'] vim_type = vim_obj['type'] if vim_type == 'openstack': vim_obj['auth_url'] = utils.get_auth_url_v3(vim_obj['auth_url']) vim_obj['id'] = uuidutils.generate_uuid() vim_obj['status'] = 'PENDING' try: self._vim_drivers.invoke(vim_type, 'register_vim', vim_obj=vim_obj) res = super(NfvoPlugin, self).create_vim(context, vim_obj) except Exception: with excutils.save_and_reraise_exception(): self._vim_drivers.invoke(vim_type, 'delete_vim_auth', vim_id=vim_obj['id'], auth=vim_obj['auth_cred']) try: self.monitor_vim(context, vim_obj) except Exception: LOG.warning("Failed to set up vim monitoring") return res
def execute(*cmd, **kwargs): """NB: Raises processutils.ProcessExecutionError on failure.""" run_as_root = kwargs.pop('run_as_root', False) kwargs.pop('root_helper', None) try: if run_as_root: return execute_root(*cmd, **kwargs) else: return putils.execute(*cmd, **kwargs) except OSError as e: # Note: # putils.execute('bogus', run_as_root=True) # raises ProcessExecutionError(exit_code=1) (because there's a # "sh -c bogus" involved in there somewhere, but: # putils.execute('bogus', run_as_root=False) # raises OSError(not found). # # Lots of code in os-brick catches only ProcessExecutionError # and never encountered the latter when using rootwrap. # Rather than fix all the callers, we just always raise # ProcessExecutionError here :( sanitized_cmd = strutils.mask_password(' '.join(cmd)) raise putils.ProcessExecutionError( cmd=sanitized_cmd, description=six.text_type(e))
def create_datasource_service(self, datasource): """Create a new DataService on this node. :param: datasource: datsource object. """ # get the driver info for the datasource ds_dict = self.make_datasource_dict(datasource) if not ds_dict['enabled']: LOG.info("datasource %s not enabled, skip loading", ds_dict['name']) return driver = self.loaded_drivers.get(ds_dict['driver']) if not driver: raise exception.DriverNotFound(id=ds_dict['driver']) if ds_dict['config'] is None: args = {'ds_id': ds_dict['id']} else: args = dict(ds_dict['config'], ds_id=ds_dict['id']) kwargs = {'name': ds_dict['name'], 'args': args} LOG.info("creating service %s with class %s and args %s", ds_dict['name'], driver.plugin, strutils.mask_password(kwargs, "****")) try: service = driver.plugin(**kwargs) except Exception: msg = ("Error loading instance of module '%s'") LOG.exception(msg, driver.plugin) raise exception.DataServiceError(msg % driver.plugin) return service
def update_all(self, req, body, tenant_id, instance_id): """Change the password of one or more users.""" LOG.info(_("Updating user passwords for instance '%s'") % instance_id) LOG.info(_("req : '%s'\n\n") % strutils.mask_password(req)) context = req.environ[wsgi.CONTEXT_KEY] users = body['users'] model_users = [] for user in users: try: mu = guest_models.MySQLUser() mu.name = user['name'] mu.host = user.get('host') mu.password = user['password'] found_user = models.User.load(context, instance_id, mu.name, mu.host) if not found_user: user_and_host = mu.name if mu.host: user_and_host += '@' + mu.host raise exception.UserNotFound(uuid=user_and_host) model_users.append(mu) except (ValueError, AttributeError) as e: raise exception.BadRequest(msg=str(e)) models.User.change_password(context, instance_id, model_users) return wsgi.Result(None, 202)
def _parse_volume_info(connection_data): """Parse device_path and mountpoint as they can be used by XenAPI. In particular, the mountpoint (e.g. /dev/sdc) must be translated into a numeric literal. """ volume_id = connection_data["volume_id"] target_portal = connection_data["target_portal"] target_host = _get_target_host(target_portal) target_port = _get_target_port(target_portal) target_iqn = connection_data["target_iqn"] log_params = {"vol_id": volume_id, "host": target_host, "port": target_port, "iqn": target_iqn} LOG.debug("(vol_id,host,port,iqn): " "(%(vol_id)s,%(host)s,%(port)s,%(iqn)s)", log_params) if volume_id is None or target_host is None or target_iqn is None: raise exception.StorageError( reason=_("Unable to obtain target information %s") % strutils.mask_password(connection_data) ) volume_info = {} volume_info["id"] = volume_id volume_info["target"] = target_host volume_info["port"] = target_port volume_info["targetIQN"] = target_iqn if "auth_method" in connection_data and connection_data["auth_method"] == "CHAP": volume_info["chapuser"] = connection_data["auth_username"] volume_info["chappassword"] = connection_data["auth_password"] return volume_info
def _convert_with_links(node, url, fields=None, show_password=True): # NOTE(lucasagomes): Since we are able to return a specified set of # fields the "uuid" can be unset, so we need to save it in another # variable to use when building the links node_uuid = node.uuid if fields is not None: node.unset_fields_except(fields) else: node.ports = [link.Link.make_link('self', url, 'nodes', node_uuid + "/ports"), link.Link.make_link('bookmark', url, 'nodes', node_uuid + "/ports", bookmark=True) ] if not show_password and node.driver_info != wtypes.Unset: node.driver_info = ast.literal_eval(strutils.mask_password( node.driver_info, "******")) # NOTE(lucasagomes): The numeric ID should not be exposed to # the user, it's internal only. node.chassis_id = wtypes.Unset node.links = [link.Link.make_link('self', url, 'nodes', node_uuid), link.Link.make_link('bookmark', url, 'nodes', node_uuid, bookmark=True) ] return node
def create_service(self, class_path, kwargs): """Create a new DataService on this node. :param name is the name of the service. Must be unique across all services :param classPath is a string giving the path to the class name, e.g. congress.datasources.fake_datasource.FakeDataSource :param args is the list of arguments to give the DataService constructor :param type_ is the kind of service :param id_ is an optional parameter for specifying the uuid. """ # split class_path into module and class name pieces = class_path.split(".") module_name = ".".join(pieces[:-1]) class_name = pieces[-1] LOG.info("creating service %s with class %s and args %s", kwargs['name'], module_name, strutils.mask_password(kwargs, "****")) # import the module try: module = importutils.import_module(module_name) service = getattr(module, class_name)(**kwargs) except Exception: msg = ("Error loading instance of module '%s'") LOG.exception(msg % class_path) raise exception.DataServiceError(msg % class_path) return service
def _build_policy_check_credentials(self, action, context, kwargs): kwargs_str = ', '.join(['%s=%s' % (k, kwargs[k]) for k in kwargs]) kwargs_str = strutils.mask_password(kwargs_str) LOG.debug('RBAC: Authorizing %(action)s(%(kwargs)s)', { 'action': action, 'kwargs': kwargs_str}) # see if auth context has already been created. If so use it. if ('environment' in context and authorization.AUTH_CONTEXT_ENV in context['environment']): LOG.debug('RBAC: using auth context from the request environment') return context['environment'].get(authorization.AUTH_CONTEXT_ENV) # There is no current auth context, build it from the incoming token. # TODO(morganfainberg): Collapse this logic with AuthContextMiddleware # in a sane manner as this just mirrors the logic in AuthContextMiddleware try: token_ref = self.token_api.get_token(context['token_id']) except exception.TokenNotFound: LOG.warning('RBAC: Invalid token') raise exception.Unauthorized() auth_context = token_ref return auth_context
def sanitize_secrets(content, mask="****"): """Extends oslo_utils strutils to make mask passwords more robust.""" def mask_dict_password(dictionary, secret="***"): """Overriding strutils.mask_dict_password. Overriding mask_dict_password to accept CaseInsenstiveDict as well. """ out = deepcopy(dictionary) for k, v in dictionary.items(): if is_dict(v): out[k] = mask_dict_password(v, secret=secret) continue for sani_key in strutils._SANITIZE_KEYS: if sani_key in k: out[k] = secret break else: if isinstance(v, six.string_types): out[k] = strutils.mask_password(v, secret=secret) return out strutils.mask_dict_password = mask_dict_password if is_dict(content): return strutils.mask_dict_password(content, mask) if is_string(content): return strutils.mask_password(content, mask)
def _api_request(self, path, *args, **kargs): """Performs an HTTP request on the device, with locking. Raises a DotHillRequestError if the device returned but the status is not 0. The device error message will be used in the exception message. If the status is OK, returns the XML data for further processing. """ url = self._build_request_url(path, *args, **kargs) # Don't log the created URL since it may contain chap secret LOG.debug("Array Request path: %s, args: %s, kargs: %s (session %s)", path, args, strutils.mask_password(kargs), self._session_key) headers = {'dataType': 'api', 'sessionKey': self._session_key} try: xml = requests.get(url, headers=headers, verify=self.ssl_verify, timeout=60) tree = etree.XML(xml.text.encode('utf8')) except Exception as e: message = _("Exception handling URL %(url)s: %(msg)s") % { 'url': url, 'msg': e} raise exception.DotHillConnectionError(message=message) if path == "/show/volumecopy-status": return tree self._assert_response_ok(tree) return tree
def _http_log_response(self, response=None, json=None, status_code=None, headers=None, text=None, logger=_logger): if not logger.isEnabledFor(logging.DEBUG): return if response: if not status_code: status_code = response.status_code if not headers: headers = response.headers if not text: text = _remove_service_catalog(response.text) if json: text = jsonutils.dumps(json) string_parts = ['RESP:'] if status_code: string_parts.append('[%s]' % status_code) if headers: for header in six.iteritems(headers): string_parts.append('%s: %s' % Session.process_header(header)) if text: string_parts.append('\nRESP BODY: %s\n' % strutils.mask_password(text)) logger.debug(' '.join(string_parts))
def _build_policy_check_credentials(self, action, context, kwargs): kwargs_str = ', '.join(['%s=%s' % (k, kwargs[k]) for k in kwargs]) kwargs_str = strutils.mask_password(kwargs_str) msg = 'RBAC: Authorizing %(action)s(%(kwargs)s)' LOG.debug(msg, {'action': action, 'kwargs': kwargs_str}) return context['environment'].get(authorization.AUTH_CONTEXT_ENV, {})
def _convert_with_links(node, url, expand=True, show_password=True): if not expand: except_list = ['instance_uuid', 'maintenance', 'power_state', 'provision_state', 'uuid', 'name'] node.unset_fields_except(except_list) else: if not show_password: node.driver_info = ast.literal_eval(strutils.mask_password( node.driver_info, "******")) node.ports = [link.Link.make_link('self', url, 'nodes', node.uuid + "/ports"), link.Link.make_link('bookmark', url, 'nodes', node.uuid + "/ports", bookmark=True) ] # NOTE(lucasagomes): The numeric ID should not be exposed to # the user, it's internal only. node.chassis_id = wtypes.Unset node.links = [link.Link.make_link('self', url, 'nodes', node.uuid), link.Link.make_link('bookmark', url, 'nodes', node.uuid, bookmark=True) ] return node
def custom_execute(*cmd, **kwargs): try: return processutils.execute(*cmd, **kwargs) except processutils.ProcessExecutionError as e: sanitized_cmd = strutils.mask_password(' '.join(cmd)) raise exception.CommandError(cmd=sanitized_cmd, error=six.text_type(e))
def get_volume_encryptor(connection_info, **kwargs): """Creates a VolumeEncryptor used to encrypt the specified volume. :param: the connection information used to attach the volume :returns VolumeEncryptor: the VolumeEncryptor for the volume """ encryptor = nop.NoOpEncryptor(connection_info, **kwargs) location = kwargs.get('control_location', None) if location and location.lower() == 'front-end': # case insensitive provider = kwargs.get('provider') if provider == 'LuksEncryptor': provider = 'jacket.compute.volume.encryptors.luks.' + provider elif provider == 'CryptsetupEncryptor': provider = 'jacket.compute.volume.encryptors.cryptsetup.' + provider elif provider == 'NoOpEncryptor': provider = 'jacket.compute.volume.encryptors.nop.' + provider try: encryptor = importutils.import_object(provider, connection_info, **kwargs) except Exception as e: LOG.error(_LE("Error instantiating %(provider)s: %(exception)s"), {'provider': provider, 'exception': e}) raise msg = ("Using volume encryptor '%(encryptor)s' for connection: " "%(connection_info)s" % {'encryptor': encryptor, 'connection_info': connection_info}) LOG.debug(strutils.mask_password(msg)) return encryptor
def initialize_app(self, argv): """Global app init bits: * set up API versions * validate authentication info * authenticate against Identity if requested """ # Parent __init__ parses argv into self.options super(OpenStackShell, self).initialize_app(argv) self.log.info("START with options: %s", strutils.mask_password(self.command_options)) self.log.debug("options: %s", strutils.mask_password(self.options)) # Callout for stuff between superclass init and o-c-c self._final_defaults() # Do configuration file handling # Ignore the default value of interface. Only if it is set later # will it be used. try: self.cloud_config = cloud_config.OSC_Config(override_defaults={ 'interface': None, 'auth_type': self._auth_type, }, ) except (IOError, OSError) as e: self.log.critical("Could not read clouds.yaml configuration file") self.print_help_if_requested() raise e # TODO(thowe): Change cliff so the default value for debug # can be set to None. if not self.options.debug: self.options.debug = None # NOTE(dtroyer): Need to do this with validate=False to defer the # auth plugin handling to ClientManager.setup_auth() self.cloud = self.cloud_config.get_one_cloud( cloud=self.options.cloud, argparse=self.options, validate=False, ) self.log_configurator.configure(self.cloud) self.dump_stack_trace = self.log_configurator.dump_trace self.log.debug("defaults: %s", self.cloud_config.defaults) self.log.debug("cloud cfg: %s", strutils.mask_password(self.cloud.config)) # Callout for stuff between o-c-c and ClientManager # self._initialize_app_2(self.options) self._load_plugins() self._load_commands() # Handle deferred help and exit self.print_help_if_requested() self.client_manager = clientmanager.ClientManager( cli_options=self.cloud, api_version=self.api_version, pw_func=prompt_for_password, )
def execute(self, *cmd, **kwargs): # NOTE(dims): This method is to provide compatibility with the # processutils.execute interface. So that calling daemon or direct # rootwrap to honor the same set of flags in kwargs and to ensure # that we don't regress any current behavior. cmd = [str(c) for c in cmd] loglevel = kwargs.pop('loglevel', logging.DEBUG) log_errors = kwargs.pop('log_errors', None) process_input = kwargs.pop('process_input', None) delay_on_retry = kwargs.pop('delay_on_retry', True) attempts = kwargs.pop('attempts', 1) check_exit_code = kwargs.pop('check_exit_code', [0]) ignore_exit_code = False if isinstance(check_exit_code, bool): ignore_exit_code = not check_exit_code check_exit_code = [0] elif isinstance(check_exit_code, int): check_exit_code = [check_exit_code] sanitized_cmd = strutils.mask_password(' '.join(cmd)) LOG.info( _LI('Executing RootwrapDaemonHelper.execute ' 'cmd=[%(cmd)r] kwargs=[%(kwargs)r]'), { 'cmd': sanitized_cmd, 'kwargs': kwargs }) while attempts > 0: attempts -= 1 try: start_time = time.time() LOG.log(loglevel, _('Running cmd (subprocess): %s'), sanitized_cmd) (returncode, out, err) = self.client.execute(cmd, process_input) end_time = time.time() - start_time LOG.log( loglevel, 'CMD "%(sanitized_cmd)s" returned: %(return_code)s ' 'in %(end_time)0.3fs', { 'sanitized_cmd': sanitized_cmd, 'return_code': returncode, 'end_time': end_time }) if not ignore_exit_code and returncode not in check_exit_code: out = strutils.mask_password(out) err = strutils.mask_password(err) raise processutils.ProcessExecutionError( exit_code=returncode, stdout=out, stderr=err, cmd=sanitized_cmd) return (out, err) except processutils.ProcessExecutionError as err: # if we want to always log the errors or if this is # the final attempt that failed and we want to log that. if log_errors == processutils.LOG_ALL_ERRORS or ( log_errors == processutils.LOG_FINAL_ERROR and not attempts): format = _('%(desc)r\ncommand: %(cmd)r\n' 'exit code: %(code)r\nstdout: %(stdout)r\n' 'stderr: %(stderr)r') LOG.log( loglevel, format, { "desc": err.description, "cmd": err.cmd, "code": err.exit_code, "stdout": err.stdout, "stderr": err.stderr }) if not attempts: LOG.log(loglevel, _('%r failed. Not Retrying.'), sanitized_cmd) raise else: LOG.log(loglevel, _('%r failed. Retrying.'), sanitized_cmd) if delay_on_retry: time.sleep(random.randint(20, 200) / 100.0)
def create(self, req, body, tenant_id): # TODO(hub-cap): turn this into middleware LOG.info(_LI("Creating a database instance for tenant '%s'"), tenant_id) LOG.debug("req : '%s'\n\n", strutils.mask_password(req)) LOG.debug("body : '%s'\n\n", strutils.mask_password(body)) context = req.environ[wsgi.CONTEXT_KEY] datastore_args = body['instance'].get('datastore', {}) datastore, datastore_version = (datastore_models.get_datastore_version( **datastore_args)) image_id = datastore_version.image_id name = body['instance']['name'] flavor_ref = body['instance']['flavorRef'] flavor_id = utils.get_id_from_href(flavor_ref) configuration = self._configuration_parse(context, body) databases = populate_validated_databases(body['instance'].get( 'databases', [])) database_names = [database.get('_name', '') for database in databases] users = None try: users = populate_users(body['instance'].get('users', []), database_names) except ValueError as ve: raise exception.BadRequest(msg=ve) if 'volume' in body['instance']: volume_size = int(body['instance']['volume']['size']) else: volume_size = None if 'restorePoint' in body['instance']: backupRef = body['instance']['restorePoint']['backupRef'] backup_id = utils.get_id_from_href(backupRef) else: backup_id = None availability_zone = body['instance'].get('availability_zone') nics = body['instance'].get('nics') slave_of_id = body['instance'].get( 'replica_of', # also check for older name body['instance'].get('slave_of')) replica_count = body['instance'].get('replica_count') instance = models.Instance.create(context, name, flavor_id, image_id, databases, users, datastore, datastore_version, volume_size, backup_id, availability_zone, nics, configuration, slave_of_id, replica_count=replica_count) view = views.InstanceDetailView(instance, req=req) return wsgi.Result(view.data(), 200)
def _get_single_iscsi_data(self, volume, connector, lun_id, chap_secret): LOG.debug( 'enter: _get_single_iscsi_data: volume %(vol)s with ' 'connector %(conn)s lun_id %(lun_id)s', { 'vol': volume.id, 'conn': connector, 'lun_id': lun_id }) if volume.display_name == 'backup-snapshot': LOG.debug('It is a virtual volume %(vol)s for attach snapshot', {'vol': volume.name}) volume_name = volume.name backend_helper = self._helpers node_state = self._state else: volume_name, backend_helper, node_state = self._get_vol_sys_info( volume) volume_attributes = backend_helper.get_vdisk_attributes(volume_name) if volume_attributes is None: msg = (_('_get_single_iscsi_data: Failed to get attributes' ' for volume %s.') % volume_name) LOG.error(msg) raise exception.VolumeDriverException(message=msg) try: preferred_node = volume_attributes['preferred_node_id'] IO_group = volume_attributes['IO_group_id'] except KeyError as e: msg = (_('_get_single_iscsi_data: Did not find expected column' ' name in %(volume)s: %(key)s %(error)s.'), { 'volume': volume_name, 'key': e.args[0], 'error': e }) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Get preferred node and other nodes in I/O group preferred_node_entry = None io_group_nodes = [] for node in node_state['storage_nodes'].values(): if self.protocol not in node['enabled_protocols']: continue if node['IO_group'] != IO_group: continue io_group_nodes.append(node) if node['id'] == preferred_node: preferred_node_entry = node if not len(io_group_nodes): msg = (_('_get_single_iscsi_data: No node found in ' 'I/O group %(gid)s for volume %(vol)s.') % { 'gid': IO_group, 'vol': volume_name }) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if not preferred_node_entry: # Get 1st node in I/O group preferred_node_entry = io_group_nodes[0] LOG.warning( '_get_single_iscsi_data: Did not find a ' 'preferred node for volume %s.', volume_name) properties = { 'target_discovered': False, 'target_lun': lun_id, 'volume_id': volume.id } if preferred_node_entry['ipv4']: ipaddr = preferred_node_entry['ipv4'][0] else: ipaddr = preferred_node_entry['ipv6'][0] properties['target_portal'] = '%s:%s' % (ipaddr, '3260') properties['target_iqn'] = preferred_node_entry['iscsi_name'] if chap_secret: properties.update(auth_method='CHAP', auth_username=connector['initiator'], auth_password=chap_secret, discovery_auth_method='CHAP', discovery_auth_username=connector['initiator'], discovery_auth_password=chap_secret) # properties may contain chap secret so must be masked LOG.debug( 'leave: _get_single_iscsi_data:\n volume: %(vol)s\n ' 'connector: %(conn)s\n lun_id: %(lun_id)s\n ' 'properties: %(prop)s', { 'vol': volume.id, 'conn': connector, 'lun_id': lun_id, 'prop': strutils.mask_password(properties) }) return properties
def _do_initialize_connection(self, volume, connector): """Perform necessary work to make an iSCSI connection. To be able to create an iSCSI connection from a given host to a volume, we must: 1. Translate the given iSCSI name to a host name 2. Create new host on the storage system if it does not yet exist 3. Map the volume to the host if it is not already done 4. Return the connection information for relevant nodes (in the proper I/O group) """ LOG.debug( 'enter: initialize_connection: volume %(vol)s with connector' ' %(conn)s', { 'vol': volume.id, 'conn': connector }) if volume.display_name == 'backup-snapshot': LOG.debug('It is a virtual volume %(vol)s for attach snapshot.', {'vol': volume.id}) volume_name = volume.name backend_helper = self._helpers node_state = self._state else: volume_name, backend_helper, node_state = self._get_vol_sys_info( volume) host_site = self._get_volume_host_site_from_conf(volume, connector, iscsi=True) is_hyper_volume = backend_helper.is_volume_hyperswap(volume_name) if is_hyper_volume and host_site is None: msg = (_('There is no correct storwize_preferred_host_site ' 'configured for a hyperswap volume %s.') % volume.name) LOG.error(msg) raise exception.VolumeDriverException(message=msg) # Check if a host object is defined for this host name host_name = backend_helper.get_host_from_connector(connector, iscsi=True) if host_name is None: # Host does not exist - add a new host to Storwize/SVC host_name = backend_helper.create_host(connector, iscsi=True, site=host_site) elif is_hyper_volume: self._update_host_site_for_hyperswap_volume(host_name, host_site) chap_secret = backend_helper.get_chap_secret_for_host(host_name) chap_enabled = self.configuration.storwize_svc_iscsi_chap_enabled if chap_enabled and chap_secret is None: chap_secret = backend_helper.add_chap_secret_to_host(host_name) elif not chap_enabled and chap_secret: LOG.warning('CHAP secret exists for host but CHAP is disabled.') multihostmap = self.configuration.storwize_svc_multihostmap_enabled lun_id = backend_helper.map_vol_to_host(volume_name, host_name, multihostmap) try: properties = self._get_single_iscsi_data(volume, connector, lun_id, chap_secret) multipath = connector.get('multipath', False) if multipath: properties = self._get_multi_iscsi_data( volume, connector, lun_id, properties, backend_helper, node_state) except Exception as ex: with excutils.save_and_reraise_exception(): LOG.error( 'initialize_connection: Failed to export volume ' '%(vol)s due to %(ex)s.', { 'vol': volume.name, 'ex': ex }) self._do_terminate_connection(volume, connector) LOG.error( 'initialize_connection: Failed ' 'to collect return ' 'properties for volume %(vol)s and connector ' '%(conn)s.\n', { 'vol': volume, 'conn': connector }) # properties may contain chap secret so must be masked LOG.debug( 'leave: initialize_connection:\n volume: %(vol)s\n ' 'connector: %(conn)s\n properties: %(prop)s', { 'vol': volume.id, 'conn': connector, 'prop': strutils.mask_password(properties) }) return { 'driver_volume_type': 'iscsi', 'data': properties, }
def setup_auth(self, required_scope=True): """Set up authentication :param required_scope: indicate whether a scoped token is required This is deferred until authentication is actually attempted because it gets in the way of things that do not require auth. """ if self._auth_setup_completed: return # If no auth type is named by the user, select one based on # the supplied options self.auth_plugin_name = auth.select_auth_plugin(self._cli_options) # Basic option checking to avoid unhelpful error messages auth.check_valid_auth_options(self._cli_options, self.auth_plugin_name, required_scope=required_scope) # Horrible hack alert...must handle prompt for null password if # password auth is requested. if (self.auth_plugin_name.endswith('password') and not self._cli_options.auth.get('password')): self._cli_options.auth['password'] = self._pw_callback() (auth_plugin, self._auth_params) = auth.build_auth_params( self.auth_plugin_name, self._cli_options, ) # TODO(mordred): This is a usability improvement that's broadly useful # We should port it back up into os-client-config. default_domain = self._cli_options.default_domain # NOTE(stevemar): If PROJECT_DOMAIN_ID or PROJECT_DOMAIN_NAME is # present, then do not change the behaviour. Otherwise, set the # PROJECT_DOMAIN_ID to 'OS_DEFAULT_DOMAIN' for better usability. if (self._api_version.get('identity') == '3' and self.auth_plugin_name.endswith('password') and not self._auth_params.get('project_domain_id') and not self.auth_plugin_name.startswith('v2') and not self._auth_params.get('project_domain_name')): self._auth_params['project_domain_id'] = default_domain # NOTE(stevemar): If USER_DOMAIN_ID or USER_DOMAIN_NAME is present, # then do not change the behaviour. Otherwise, set the USER_DOMAIN_ID # to 'OS_DEFAULT_DOMAIN' for better usability. if (self._api_version.get('identity') == '3' and self.auth_plugin_name.endswith('password') and not self.auth_plugin_name.startswith('v2') and not self._auth_params.get('user_domain_id') and not self._auth_params.get('user_domain_name')): self._auth_params['user_domain_id'] = default_domain # For compatibility until all clients can be updated if 'project_name' in self._auth_params: self._project_name = self._auth_params['project_name'] elif 'tenant_name' in self._auth_params: self._project_name = self._auth_params['tenant_name'] LOG.info('Using auth plugin: %s', self.auth_plugin_name) LOG.debug('Using parameters %s', strutils.mask_password(self._auth_params)) self.auth = auth_plugin.load_from_options(**self._auth_params) # needed by SAML authentication request_session = requests.session() self.session = osc_session.TimingSession( auth=self.auth, session=request_session, verify=self._verify, user_agent=USER_AGENT, ) self._auth_setup_completed = True
def stringify(self, value): return super(SensitiveString, self).stringify( strutils.mask_password(value))
def test_mask_password(self): payload = "test = 'password' : 'aaaaaa'" expected = "test = 'password' : '111'" self.assertEqual(expected, strutils.mask_password(payload, secret='111')) payload = 'mysqld --password "aaaaaa"' expected = 'mysqld --password "****"' self.assertEqual(expected, strutils.mask_password(payload, secret='****')) payload = 'mysqld --password aaaaaa' expected = 'mysqld --password ???' self.assertEqual(expected, strutils.mask_password(payload, secret='???')) payload = 'mysqld --password = "******"' expected = 'mysqld --password = "******"' self.assertEqual(expected, strutils.mask_password(payload, secret='****')) payload = "mysqld --password = '******'" expected = "mysqld --password = '******'" self.assertEqual(expected, strutils.mask_password(payload, secret='****')) payload = "mysqld --password = aaaaaa" expected = "mysqld --password = ****" self.assertEqual(expected, strutils.mask_password(payload, secret='****')) payload = "test = password = aaaaaa" expected = "test = password = 111" self.assertEqual(expected, strutils.mask_password(payload, secret='111')) payload = "test = password= aaaaaa" expected = "test = password= 111" self.assertEqual(expected, strutils.mask_password(payload, secret='111')) payload = "test = password =aaaaaa" expected = "test = password =111" self.assertEqual(expected, strutils.mask_password(payload, secret='111')) payload = "test = password=aaaaaa" expected = "test = password=111" self.assertEqual(expected, strutils.mask_password(payload, secret='111')) payload = 'test = "original_password" : "aaaaaaaaa"' expected = 'test = "original_password" : "***"' self.assertEqual(expected, strutils.mask_password(payload)) payload = 'test = "param1" : "value"' expected = 'test = "param1" : "value"' self.assertEqual(expected, strutils.mask_password(payload)) payload = """{'adminPass':'******'}""" payload = six.text_type(payload) expected = """{'adminPass':'******'}""" self.assertEqual(expected, strutils.mask_password(payload)) payload = """{'token':'mytoken'}""" payload = six.text_type(payload) expected = """{'token':'***'}""" self.assertEqual(expected, strutils.mask_password(payload)) payload = ("test = 'node.session.auth.password','-v','TL0EfN33'," "'nomask'") expected = ("test = 'node.session.auth.password','-v','***'," "'nomask'") self.assertEqual(expected, strutils.mask_password(payload)) payload = ("test = 'node.session.auth.password', '--password', " "'TL0EfN33', 'nomask'") expected = ("test = 'node.session.auth.password', '--password', " "'***', 'nomask'") self.assertEqual(expected, strutils.mask_password(payload)) payload = ("test = 'node.session.auth.password', '--password', " "'TL0EfN33'") expected = ("test = 'node.session.auth.password', '--password', " "'***'") self.assertEqual(expected, strutils.mask_password(payload)) payload = "test = node.session.auth.password -v TL0EfN33 nomask" expected = "test = node.session.auth.password -v *** nomask" self.assertEqual(expected, strutils.mask_password(payload)) payload = ("test = node.session.auth.password --password TL0EfN33 " "nomask") expected = ("test = node.session.auth.password --password *** " "nomask") self.assertEqual(expected, strutils.mask_password(payload)) payload = ("test = node.session.auth.password --password TL0EfN33") expected = ("test = node.session.auth.password --password ***") self.assertEqual(expected, strutils.mask_password(payload)) payload = "test = cmd --password my\xe9\x80\x80pass" expected = ("test = cmd --password ***") self.assertEqual(expected, strutils.mask_password(payload))
def create(self, req, body, tenant_id): # TODO(hub-cap): turn this into middleware LOG.info(_LI("Creating a database instance for tenant '%s'"), tenant_id) LOG.debug("req : '%s'\n\n", strutils.mask_password(req)) LOG.debug("body : '%s'\n\n", strutils.mask_password(body)) context = req.environ[wsgi.CONTEXT_KEY] policy.authorize_on_tenant(context, 'instance:create') context.notification = notification.DBaaSInstanceCreate(context, request=req) datastore_args = body['instance'].get('datastore', {}) datastore, datastore_version = (datastore_models.get_datastore_version( **datastore_args)) image_id = datastore_version.image_id name = body['instance']['name'] flavor_ref = body['instance']['flavorRef'] flavor_id = utils.get_id_from_href(flavor_ref) configuration = self._configuration_parse(context, body) databases = populate_validated_databases(body['instance'].get( 'databases', [])) database_names = [database.get('_name', '') for database in databases] users = None try: users = populate_users(body['instance'].get('users', []), database_names) except ValueError as ve: raise exception.BadRequest(msg=ve) # The following operations have their own API calls. # We need to make sure the same policies are enforced when # creating an instance. # i.e. if attaching configuration group to an existing instance is not # allowed, it should not be possible to create a new instance with the # group attached either if configuration: policy.authorize_on_tenant(context, 'configuration:update') if users: policy.authorize_on_tenant(context, 'instance:extension:user:create') if databases: policy.authorize_on_tenant(context, 'instance:extension:database:create') if 'volume' in body['instance']: volume_info = body['instance']['volume'] volume_size = int(volume_info['size']) volume_type = volume_info.get('type') else: volume_size = None volume_type = None if 'restorePoint' in body['instance']: backupRef = body['instance']['restorePoint']['backupRef'] backup_id = utils.get_id_from_href(backupRef) else: backup_id = None availability_zone = body['instance'].get('availability_zone') nics = body['instance'].get('nics') slave_of_id = body['instance'].get( 'replica_of', # also check for older name body['instance'].get('slave_of')) replica_count = body['instance'].get('replica_count') modules = body['instance'].get('modules') locality = body['instance'].get('locality') if locality: locality_domain = ['affinity', 'anti-affinity'] locality_domain_msg = ("Invalid locality '%s'. " "Must be one of ['%s']" % (locality, "', '".join(locality_domain))) if locality not in locality_domain: raise exception.BadRequest(msg=locality_domain_msg) if slave_of_id: dupe_locality_msg = ( 'Cannot specify locality when adding replicas to existing ' 'master.') raise exception.BadRequest(msg=dupe_locality_msg) region_name = body['instance'].get('region_name', CONF.os_region_name) instance = models.Instance.create(context, name, flavor_id, image_id, databases, users, datastore, datastore_version, volume_size, backup_id, availability_zone, nics, configuration, slave_of_id, replica_count=replica_count, volume_type=volume_type, modules=modules, locality=locality, region_name=region_name) view = views.InstanceDetailView(instance, req=req) return wsgi.Result(view.data(), 200)
def _process_stack(self, request, action, action_args, content_type, body, accept): """Implement the processing stack.""" # Get the implementing method try: meth = self.get_method(request, action, content_type, body) except (AttributeError, TypeError): return Fault(webob.exc.HTTPNotFound()) except KeyError as ex: msg = _("There is no such action: %s") % ex.args[0] return Fault(webob.exc.HTTPBadRequest(explanation=msg)) except exception.MalformedRequestBody: msg = _("Malformed request body") return Fault(webob.exc.HTTPBadRequest(explanation=msg)) if body: msg = _("Action: '%(action)s', calling method: %(meth)s, body: " "%(body)s") % { 'action': action, 'body': str(body, 'utf-8'), 'meth': str(meth) } LOG.debug(strutils.mask_password(msg)) else: LOG.debug("Calling method '%(meth)s'", {'meth': str(meth)}) # Now, deserialize the request body... try: contents = self._get_request_content(body, request) except exception.MalformedRequestBody: msg = _("Malformed request body") return Fault(webob.exc.HTTPBadRequest(explanation=msg)) # Update the action args action_args.update(contents) project_id = action_args.pop("project_id", None) context = request.environ.get('nova.context') if (context and project_id and (project_id != context.project_id)): msg = _("Malformed request URL: URL's project_id '%(project_id)s'" " doesn't match Context's project_id" " '%(context_project_id)s'") % \ {'project_id': project_id, 'context_project_id': context.project_id} return Fault(webob.exc.HTTPBadRequest(explanation=msg)) response = None try: with ResourceExceptionHandler(): action_result = self.dispatch(meth, request, action_args) except Fault as ex: LOG.debug( f'Request method failure captured:\n' f' request: {request}\n' f' method: {meth}\n' f' exception: {ex}\n' f' action_args: {action_args}\n', exc_info=1) response = ex if not response: # No exceptions; convert action_result into a # ResponseObject resp_obj = None if type(action_result) is dict or action_result is None: resp_obj = ResponseObject(action_result) elif isinstance(action_result, ResponseObject): resp_obj = action_result else: response = action_result # Run post-processing extensions if resp_obj: # Do a preserialize to set up the response object if hasattr(meth, 'wsgi_code'): resp_obj._default_code = meth.wsgi_code if resp_obj and not response: response = resp_obj.serialize(request, accept) if hasattr(response, 'headers'): for hdr, val in list(response.headers.items()): if not isinstance(val, str): val = str(val) # In Py3.X Headers must be a string response.headers[hdr] = encodeutils.safe_decode( encodeutils.safe_encode(val)) if not request.api_version_request.is_null(): response.headers[API_VERSION_REQUEST_HEADER] = \ 'compute ' + request.api_version_request.get_string() response.headers[LEGACY_API_VERSION_REQUEST_HEADER] = \ request.api_version_request.get_string() response.headers.add('Vary', API_VERSION_REQUEST_HEADER) response.headers.add('Vary', LEGACY_API_VERSION_REQUEST_HEADER) return response
def enforce_call(cls, enforcer=None, action=None, target_attr=None, member_target_type=None, member_target=None, filters=None, build_target=None): """Enforce RBAC on the current request. This will do some legwork and then instantiate the Enforcer if an enforcer is not passed in. :param enforcer: A pre-instantiated Enforcer object (optional) :type enforcer: :class:`RBACEnforcer` :param action: the name of the rule/policy enforcement to be checked against, e.g. `identity:get_user` (optional may be replaced by decorating the method/function with `policy_enforcer_action`. :type action: str :param target_attr: complete override of the target data. This will replace all other generated target data meaning `member_target_type` and `member_target` are ignored. This will also prevent extraction of data from the X-Subject-Token. The `target` dict should contain a series of key-value pairs such as `{'user': user_ref_dict}`. :type target_attr: dict :param member_target_type: the type of the target, e.g. 'user'. Both this and `member_target` must be passed if either is passed. :type member_target_type: str :param member_target: the (dict form) reference of the member object. Both this and `member_target_type` must be passed if either is passed. :type member_target: dict :param filters: A variable number of optional string filters, these are used to extract values from the query params. The filters are added to the reques data that is passed to the enforcer and may be used to determine policy action. In practice these are mainly supplied in the various "list" APIs and are un-used in the default supplied policies. :type filters: iterable :param build_target: A function to build the target for enforcement. This is explicitly done after authentication in order to not leak existance data before auth. :type build_target: function """ # NOTE(morgan) everything in the policy_dict may be used by the policy # DSL to action on RBAC and request information/response data. policy_dict = {} # If "action" has not explicitly been overridden, see if it is set in # Flask.g app-context (per-request thread local) meaning the # @policy_enforcer_action decorator was used. action = action or getattr(flask.g, cls.ACTION_STORE_ATTR, None) if action not in _POSSIBLE_TARGET_ACTIONS: LOG.warning( 'RBAC: Unknown enforcement action name `%s`. ' 'Rejecting as Forbidden, this is a programming error ' 'and a bug should be filed with as much information ' 'about the request that caused this as possible.', action) # NOTE(morgan): While this is an internal error, a 500 is never # desirable, we have handled the case and the most appropriate # response here is to issue a 403 (FORBIDDEN) to any API calling # enforce_call with an inappropriate action/name to look up the # policy rule. This is simply a short-circuit as the enforcement # code raises a 403 on an unknown action (in keystone) by default. raise exception.Forbidden(message=_( 'Internal RBAC enforcement error, invalid rule (action) ' 'name.')) # Mark flask.g as "enforce_call" has been called. This should occur # before anything except the "is this a valid action" check, ensuring # all proper "after request" checks pass, showing that the API has # enforcement. setattr(flask.g, _ENFORCEMENT_CHECK_ATTR, True) # Assert we are actually authenticated cls._assert_is_authenticated() # Check if "is_admin", this is in support of the old "admin auth token" # middleware with a shared "admin" token for auth if cls._shared_admin_auth_token_set(): LOG.warning('RBAC: Bypassing authorization') return # NOTE(morgan): !!! ORDER OF THESE OPERATIONS IS IMPORTANT !!! # The lowest priority values are set first and the highest priority # values are set last. # Populate the input attributes (view args) directly to the policy # dict. This is to allow the policy engine to have access to the # view args for substitution. This is to mirror the old @protected # mechanism and ensure current policy files continue to work as # expected. policy_dict.update(flask.request.view_args) # Get the Target Data Set. if target_attr is None and build_target is None: try: policy_dict.update( cls._extract_member_target_data(member_target_type, member_target)) except exception.NotFound: # DEBUG LOG and bubble up the 404 error. This is expected # behavior. This likely should be specific in each API. This # should be revisited in the future and each API should make # the explicit "existence" checks before enforcement. LOG.debug('Extracting inferred target data resulted in ' '"NOT FOUND (404)".') raise except Exception as e: # nosec # NOTE(morgan): Errors should never bubble up at this point, # if there is an error getting the target, log it and move # on. Raise an explicit 403, we have failed policy checks. LOG.warning('Unable to extract inferred target data during ' 'enforcement') LOG.debug(e, exc_info=True) raise exception.ForbiddenAction(action=action) # Special Case, extract and add subject_token data. subj_token_target_data = cls._extract_subject_token_target_data() if subj_token_target_data: policy_dict.setdefault('target', {}).update(subj_token_target_data) else: if target_attr and build_target: raise ValueError('Programming Error: A target_attr or ' 'build_target must be provided, but not both') policy_dict['target'] = target_attr or build_target() # Pull the data from the submitted json body to generate # appropriate input/target attributes, we take an explicit copy here # to ensure we're not somehow corrupting json_input = flask.request.get_json(force=True, silent=True) or {} policy_dict.update(json_input.copy()) # Generate the filter_attr dataset. policy_dict.update(cls._extract_filter_values(filters)) flattened = utils.flatten_dict(policy_dict) if LOG.logger.getEffectiveLevel() <= log.DEBUG: # LOG the Args args_str = ', '.join([ '%s=%s' % (k, v) for k, v in (flask.request.view_args or {}).items() ]) args_str = strutils.mask_password(args_str) LOG.debug('RBAC: Authorizing `%(action)s(%(args)s)`', { 'action': action, 'args': args_str }) ctxt = cls._get_oslo_req_context() # Instantiate the enforcer object if needed. enforcer_obj = enforcer or cls() enforcer_obj._enforce(credentials=ctxt, action=action, target=flattened) LOG.debug('RBAC: Authorization granted')
def initialize_connection(self, volume, connector): """Attach volume to initiator/host. During this call VPSA exposes volume to particular Initiator. It also creates a 'server' entity for Initiator (if it was not created before) All necessary connection information is returned, including auth data. Connection data (target, LUN) is not stored in the DB. """ # Get/Create server name for IQN initiator_name = connector['initiator'] vpsa_srv = self._create_vpsa_server(initiator_name) if not vpsa_srv: raise exception.ZadaraServerCreateFailure(name=initiator_name) # Get volume name name = self.configuration.zadara_vol_name_template % volume['name'] vpsa_vol = self._get_vpsa_volume_name(name) if not vpsa_vol: raise exception.VolumeNotFound(volume_id=volume['id']) # Get Active controller details ctrl = self._get_active_controller_details() if not ctrl: raise exception.ZadaraVPSANoActiveController() xml_tree = self.vpsa.send_cmd('list_vol_attachments', vpsa_vol=vpsa_vol) attach = self._xml_parse_helper(xml_tree, 'servers', ('name', vpsa_srv)) # Attach volume to server if attach is None: self.vpsa.send_cmd('attach_volume', vpsa_srv=vpsa_srv, vpsa_vol=vpsa_vol) # Get connection info xml_tree = self.vpsa.send_cmd('list_vol_attachments', vpsa_vol=vpsa_vol) server = self._xml_parse_helper(xml_tree, 'servers', ('iqn', initiator_name)) if server is None: raise exception.ZadaraAttachmentsNotFound(name=name) target = server.findtext('target') lun = int(server.findtext('lun')) if target is None or lun is None: raise exception.ZadaraInvalidAttachmentInfo( name=name, reason=_('target=%(target)s, lun=%(lun)s') % { 'target': target, 'lun': lun }) properties = {} properties['target_discovered'] = False properties['target_portal'] = '%s:%s' % (ctrl['ip'], '3260') properties['target_iqn'] = target properties['target_lun'] = lun properties['volume_id'] = volume['id'] properties['auth_method'] = 'CHAP' properties['auth_username'] = ctrl['chap_user'] properties['auth_password'] = ctrl['chap_passwd'] LOG.debug('Attach properties: %(properties)s', {'properties': strutils.mask_password(properties)}) return { 'driver_volume_type': ('iser' if (self.configuration.safe_get('zadara_use_iser')) else 'iscsi'), 'data': properties }
def prepare_request_body(context, body, is_create, resource, attr_info, allow_bulk=False): """Verifies required attributes are in request body. Also checking that an attribute is only specified if it is allowed for the given operation (create/update). Attribute with default values are considered to be optional. body argument must be the deserialized body. """ collection = resource + "s" if not body: raise webob.exc.HTTPBadRequest(_("Resource body required")) LOG.debug("Request body: %(body)s", {'body': strutils.mask_password(body)}) prep_req_body = lambda x: Controller.prepare_request_body( # noqa context, x if resource in x else {resource: x}, is_create, resource, attr_info, allow_bulk) if collection in body: if not allow_bulk: raise webob.exc.HTTPBadRequest(_("Bulk operation " "not supported")) bulk_body = [prep_req_body(item) for item in body[collection]] if not bulk_body: raise webob.exc.HTTPBadRequest(_("Resources required")) return {collection: bulk_body} res_dict = body.get(resource) if res_dict is None: msg = _("Unable to find '%s' in request body") % resource raise webob.exc.HTTPBadRequest(msg) Controller._populate_tenant_id(context, res_dict, is_create) Controller._verify_attributes(res_dict, attr_info) if is_create: # POST for attr, attr_vals in (attr_info).items(): if attr_vals['allow_post']: if ('default' not in attr_vals and attr not in res_dict): msg = _("Failed to parse request. Required " "attribute '%s' not specified") % attr raise webob.exc.HTTPBadRequest(msg) res_dict[attr] = res_dict.get(attr, attr_vals.get('default')) else: if attr in res_dict: msg = _("Attribute '%s' not allowed in POST") % attr raise webob.exc.HTTPBadRequest(msg) else: # PUT for attr, attr_vals in (attr_info).items(): if attr in res_dict and not attr_vals['allow_put']: msg = _("Cannot update read-only attribute %s") % attr raise webob.exc.HTTPBadRequest(msg) for attr, attr_vals in (attr_info).items(): if (attr not in res_dict or res_dict[attr] is attributes.ATTR_NOT_SPECIFIED): continue # Convert values if necessary if 'convert_to' in attr_vals: res_dict[attr] = attr_vals['convert_to'](res_dict[attr]) # Check that configured values are correct if 'validate' not in attr_vals: continue for rule in attr_vals['validate']: # skip validating vnfd_id when vnfd_template is specified to # create vnf if (resource == 'vnf') and ('vnfd_template' in body['vnf'])\ and (attr == "vnfd_id") and is_create: continue # skip validating vnffgd_id when vnffgd_template is provided if ((resource == 'vnffg') and ('vnffgd_template' in body['vnffg']) and (attr == 'vnffgd_id') and is_create): continue # skip validating nsd_id when nsd_template is provided if (resource == 'ns') and ('nsd_template' in body['ns'])\ and (attr == 'nsd_id') and is_create: continue res = attributes.validators[rule](res_dict[attr], attr_vals['validate'][rule]) if res: msg_dict = dict(attr=attr, reason=res) msg = _("Invalid input for %(attr)s. " "Reason: %(reason)s.") % msg_dict raise webob.exc.HTTPBadRequest(msg) return body
def _process_stack(self, request, action, action_args, content_type, body, accept): """Implement the processing stack.""" # Get the implementing method try: meth, extensions = self.get_method(request, action, content_type, body) except (AttributeError, TypeError): return Fault(webob.exc.HTTPNotFound()) except KeyError as ex: msg = _("There is no such action: %s") % ex.args[0] return Fault(webob.exc.HTTPBadRequest(explanation=msg)) except exception.MalformedRequestBody: msg = _("Malformed request body") return Fault(webob.exc.HTTPBadRequest(explanation=msg)) if body: msg = ("Action: '%(action)s', calling method: %(meth)s, body: " "%(body)s") % { 'action': action, 'body': six.text_type(body), 'meth': six.text_type(meth) } LOG.debug(strutils.mask_password(msg)) else: LOG.debug("Calling method '%(meth)s'", {'meth': six.text_type(meth)}) # Now, deserialize the request body... try: if content_type: contents = self.deserialize(meth, content_type, body) else: contents = {} except exception.InvalidContentType: msg = _("Unsupported Content-Type") return Fault(webob.exc.HTTPBadRequest(explanation=msg)) except exception.MalformedRequestBody: msg = _("Malformed request body") return Fault(webob.exc.HTTPBadRequest(explanation=msg)) # Update the action args action_args.update(contents) project_id = action_args.pop("project_id", None) context = request.environ.get('manila.context') if (context and project_id and (project_id != context.project_id)): msg = _("Malformed request url") return Fault(webob.exc.HTTPBadRequest(explanation=msg)) # Run pre-processing extensions response, post = self.pre_process_extensions(extensions, request, action_args) if not response: try: with ResourceExceptionHandler(): action_result = self.dispatch(meth, request, action_args) except Fault as ex: response = ex if not response: # No exceptions; convert action_result into a # ResponseObject resp_obj = None if type(action_result) is dict or action_result is None: resp_obj = ResponseObject(action_result) elif isinstance(action_result, ResponseObject): resp_obj = action_result else: response = action_result # Run post-processing extensions if resp_obj: _set_request_id_header(request, resp_obj) # Do a preserialize to set up the response object serializers = getattr(meth, 'wsgi_serializers', {}) resp_obj._bind_method_serializers(serializers) if hasattr(meth, 'wsgi_code'): resp_obj._default_code = meth.wsgi_code resp_obj.preserialize(accept, self.default_serializers) # Process post-processing extensions response = self.post_process_extensions( post, resp_obj, request, action_args) if resp_obj and not response: response = resp_obj.serialize(request, accept, self.default_serializers) try: msg_dict = dict(url=request.url, status=response.status_int) msg = _("%(url)s returned with HTTP %(status)d") % msg_dict except AttributeError as e: msg_dict = dict(url=request.url, e=e) msg = _("%(url)s returned a fault: %(e)s") % msg_dict LOG.info(msg) if hasattr(response, 'headers'): for hdr, val in response.headers.items(): # Headers must be utf-8 strings response.headers[hdr] = six.text_type(val) if not request.api_version_request.is_null(): response.headers[API_VERSION_REQUEST_HEADER] = ( request.api_version_request.get_string()) if request.api_version_request.experimental: response.headers[EXPERIMENTAL_API_REQUEST_HEADER] = ( request.api_version_request.experimental) response.headers['Vary'] = API_VERSION_REQUEST_HEADER return response
def test_json(self): # Test 'adminPass' w/o spaces payload = """{'adminPass':'******'}""" expected = """{'adminPass':'******'}""" self.assertEqual(expected, strutils.mask_password(payload)) # Test 'adminPass' with spaces payload = """{ 'adminPass' : 'TL0EfN33' }""" expected = """{ 'adminPass' : '***' }""" self.assertEqual(expected, strutils.mask_password(payload)) # Test 'admin_pass' w/o spaces payload = """{'admin_pass':'******'}""" expected = """{'admin_pass':'******'}""" self.assertEqual(expected, strutils.mask_password(payload)) # Test 'admin_pass' with spaces payload = """{ 'admin_pass' : 'TL0EfN33' }""" expected = """{ 'admin_pass' : '***' }""" self.assertEqual(expected, strutils.mask_password(payload)) # Test 'admin_password' w/o spaces payload = """{'admin_password':'******'}""" expected = """{'admin_password':'******'}""" self.assertEqual(expected, strutils.mask_password(payload)) # Test 'admin_password' with spaces payload = """{ 'admin_password' : 'TL0EfN33' }""" expected = """{ 'admin_password' : '***' }""" self.assertEqual(expected, strutils.mask_password(payload)) # Test 'password' w/o spaces payload = """{'password':'******'}""" expected = """{'password':'******'}""" self.assertEqual(expected, strutils.mask_password(payload)) # Test 'password' with spaces payload = """{ 'password' : 'TL0EfN33' }""" expected = """{ 'password' : '***' }""" self.assertEqual(expected, strutils.mask_password(payload)) # Test 'auth_password' w/o spaces payload = """{'auth_password':'******'}""" expected = """{'auth_password':'******'}""" self.assertEqual(expected, strutils.mask_password(payload)) # Test 'auth_password' with spaces payload = """{ 'auth_password' : 'TL0EfN33' }""" expected = """{ 'auth_password' : '***' }""" self.assertEqual(expected, strutils.mask_password(payload)) # Test 'secret_uuid' w/o spaces payload = """{'secret_uuid':'myuuid'}""" expected = """{'secret_uuid':'***'}""" self.assertEqual(expected, strutils.mask_password(payload)) # Test 'secret_uuid' with spaces payload = """{ 'secret_uuid' : 'myuuid' }""" expected = """{ 'secret_uuid' : '***' }""" self.assertEqual(expected, strutils.mask_password(payload)) # Test 'token' w/o spaces payload = """{'token':'token'}""" expected = """{'token':'***'}""" self.assertEqual(expected, strutils.mask_password(payload)) # Test 'token' with spaces payload = """{ 'token' : 'token' }""" expected = """{ 'token' : '***' }""" self.assertEqual(expected, strutils.mask_password(payload))
def get_volume_encryptor(root_helper, connection_info, keymgr, execute=None, *args, **kwargs): """Creates a VolumeEncryptor used to encrypt the specified volume. :param: the connection information used to attach the volume :returns VolumeEncryptor: the VolumeEncryptor for the volume """ encryptor = nop.NoOpEncryptor(root_helper=root_helper, connection_info=connection_info, keymgr=keymgr, execute=execute, *args, **kwargs) location = kwargs.get('control_location', None) if location and location.lower() == 'front-end': # case insensitive provider = kwargs.get('provider') # TODO(lyarwood): Remove the following in Queens and raise an # ERROR if provider is not a key in SUPPORTED_ENCRYPTION_PROVIDERS. # Until then continue to allow both the class name and path to be used. if provider in LEGACY_PROVIDER_CLASS_TO_FORMAT_MAP: LOG.warning( "Use of the in tree encryptor class %(provider)s" " by directly referencing the implementation class" " will be blocked in the Queens release of" " os-brick.", {'provider': provider}) provider = LEGACY_PROVIDER_CLASS_TO_FORMAT_MAP[provider] if provider in FORMAT_TO_FRONTEND_ENCRYPTOR_MAP: provider = FORMAT_TO_FRONTEND_ENCRYPTOR_MAP[provider] elif provider is None: provider = "os_brick.encryptors.nop.NoOpEncryptor" else: LOG.warning( "Use of the out of tree encryptor class " "%(provider)s will be blocked with the Queens " "release of os-brick.", {'provider': provider}) try: encryptor = importutils.import_object(provider, root_helper, connection_info, keymgr, execute, **kwargs) except Exception as e: LOG.error("Error instantiating %(provider)s: %(exception)s", { 'provider': provider, 'exception': e }) raise msg = ("Using volume encryptor '%(encryptor)s' for connection: " "%(connection_info)s" % { 'encryptor': encryptor, 'connection_info': connection_info }) LOG.debug(strutils.mask_password(msg)) return encryptor
def test_xml_attribute(self): # Test 'adminPass' w/o spaces payload = """adminPass='******'""" expected = """adminPass='******'""" self.assertEqual(expected, strutils.mask_password(payload)) # Test 'adminPass' with spaces payload = """adminPass = '******'""" expected = """adminPass = '******'""" self.assertEqual(expected, strutils.mask_password(payload)) # Test 'adminPass' with double quotes payload = """adminPass = "******"""" expected = """adminPass = "******"""" self.assertEqual(expected, strutils.mask_password(payload)) # Test 'admin_pass' w/o spaces payload = """admin_pass='******'""" expected = """admin_pass='******'""" self.assertEqual(expected, strutils.mask_password(payload)) # Test 'admin_pass' with spaces payload = """admin_pass = '******'""" expected = """admin_pass = '******'""" self.assertEqual(expected, strutils.mask_password(payload)) # Test 'admin_pass' with double quotes payload = """admin_pass = "******"""" expected = """admin_pass = "******"""" self.assertEqual(expected, strutils.mask_password(payload)) # Test 'admin_password' w/o spaces payload = """admin_password='******'""" expected = """admin_password='******'""" self.assertEqual(expected, strutils.mask_password(payload)) # Test 'admin_password' with spaces payload = """admin_password = '******'""" expected = """admin_password = '******'""" self.assertEqual(expected, strutils.mask_password(payload)) # Test 'admin_password' with double quotes payload = """admin_password = "******"""" expected = """admin_password = "******"""" self.assertEqual(expected, strutils.mask_password(payload)) # Test 'password' w/o spaces payload = """password='******'""" expected = """password='******'""" self.assertEqual(expected, strutils.mask_password(payload)) # Test 'password' with spaces payload = """password = '******'""" expected = """password = '******'""" self.assertEqual(expected, strutils.mask_password(payload)) # Test 'password' with double quotes payload = """password = "******"""" expected = """password = "******"""" self.assertEqual(expected, strutils.mask_password(payload))
def ssh_execute(ssh, cmd, process_input=None, addl_env=None, check_exit_code=True, binary=False, timeout=None): """Run a command through SSH. .. versionchanged:: 1.9 Added *binary* optional parameter. """ sanitized_cmd = strutils.mask_password(cmd) LOG.debug('Running cmd (SSH): %s', sanitized_cmd) if addl_env: raise InvalidArgumentError(_('Environment not supported over SSH')) if process_input: # This is (probably) fixable if we need it... raise InvalidArgumentError(_('process_input not supported over SSH')) stdin_stream, stdout_stream, stderr_stream = ssh.exec_command( cmd, timeout=timeout) channel = stdout_stream.channel # NOTE(justinsb): This seems suspicious... # ...other SSH clients have buffering issues with this approach stdout = stdout_stream.read() stderr = stderr_stream.read() stdin_stream.close() exit_status = channel.recv_exit_status() if six.PY3: # Decode from the locale using using the surrogateescape error handler # (decoding cannot fail). Decode even if binary is True because # mask_password() requires Unicode on Python 3 stdout = os.fsdecode(stdout) stderr = os.fsdecode(stderr) stdout = strutils.mask_password(stdout) stderr = strutils.mask_password(stderr) # exit_status == -1 if no exit code was returned if exit_status != -1: LOG.debug('Result was %s' % exit_status) if check_exit_code and exit_status != 0: raise ProcessExecutionError(exit_code=exit_status, stdout=stdout, stderr=stderr, cmd=sanitized_cmd) if binary: if six.PY2: # On Python 2, stdout is a bytes string if mask_password() failed # to decode it, or an Unicode string otherwise. Encode to the # default encoding (ASCII) because mask_password() decodes from # the same encoding. if isinstance(stdout, unicode): stdout = stdout.encode() if isinstance(stderr, unicode): stderr = stderr.encode() else: # fsencode() is the reverse operation of fsdecode() stdout = os.fsencode(stdout) stderr = os.fsencode(stderr) return (stdout, stderr)
def initialize_connection(self, volume, connector): """Map a volume to a host and return target iSCSI information.""" lun_id, lun_type = self.get_lun_id_and_type(volume) initiator_name = connector['initiator'] LOG.info('initiator name: %(initiator_name)s, ' 'LUN ID: %(lun_id)s.', { 'initiator_name': initiator_name, 'lun_id': lun_id }) (iscsi_iqns, target_ips, portgroup_id) = self.client.get_iscsi_params(connector) LOG.info( 'initialize_connection, iscsi_iqn: %(iscsi_iqn)s, ' 'target_ip: %(target_ip)s, ' 'portgroup_id: %(portgroup_id)s.', { 'iscsi_iqn': iscsi_iqns, 'target_ip': target_ips, 'portgroup_id': portgroup_id }, ) # Create hostgroup if not exist. host_id = self.client.add_host_with_check(connector['host']) # Add initiator to the host. self.client.ensure_initiator_added(initiator_name, host_id) hostgroup_id = self.client.add_host_to_hostgroup(host_id) # Mapping lungroup and hostgroup to view. self.client.do_mapping(lun_id, hostgroup_id, host_id, portgroup_id, lun_type) hostlun_id = self.client.get_host_lun_id(host_id, lun_id, lun_type) LOG.info("initialize_connection, host lun id is: %s.", hostlun_id) chapinfo = self.client.find_chap_info(self.client.iscsi_info, initiator_name) # Return iSCSI properties. properties = {} properties['target_discovered'] = False properties['volume_id'] = volume.id multipath = connector.get('multipath', False) hostlun_id = int(hostlun_id) if not multipath: properties['target_portal'] = ('%s:3260' % target_ips[0]) properties['target_iqn'] = iscsi_iqns[0] properties['target_lun'] = hostlun_id else: properties['target_iqns'] = [iqn for iqn in iscsi_iqns] properties['target_portals'] = [ '%s:3260' % ip for ip in target_ips ] properties['target_luns'] = [hostlun_id] * len(target_ips) # If use CHAP, return CHAP info. if chapinfo: chap_username, chap_password = chapinfo.split(';') properties['auth_method'] = 'CHAP' properties['auth_username'] = chap_username properties['auth_password'] = chap_password LOG.info("initialize_connection success. Return data: %s.", strutils.mask_password(properties)) return {'driver_volume_type': 'iscsi', 'data': properties}
def __repr__(self): msg = "<" + self.__class__.__name__ + ":" + str(self.__dict__) + ">" return strutils.mask_password(msg)
def execute(*cmd, **kwargs): """Helper method to shell out and execute a command through subprocess. Allows optional retry. :param cmd: Passed to subprocess.Popen. :type cmd: string :param cwd: Set the current working directory :type cwd: string :param process_input: Send to opened process. :type process_input: string :param env_variables: Environment variables and their values that will be set for the process. :type env_variables: dict :param check_exit_code: Single bool, int, or list of allowed exit codes. Defaults to [0]. Raise :class:`ProcessExecutionError` unless program exits with one of these code. :type check_exit_code: boolean, int, or [int] :param delay_on_retry: True | False. Defaults to True. If set to True, wait a short amount of time before retrying. :type delay_on_retry: boolean :param attempts: How many times to retry cmd. :type attempts: int :param run_as_root: True | False. Defaults to False. If set to True, the command is prefixed by the command specified in the root_helper kwarg. :type run_as_root: boolean :param root_helper: command to prefix to commands called with run_as_root=True :type root_helper: string :param shell: whether or not there should be a shell used to execute this command. Defaults to false. :type shell: boolean :param loglevel: log level for execute commands. :type loglevel: int. (Should be logging.DEBUG or logging.INFO) :param log_errors: Should stdout and stderr be logged on error? Possible values are None=default, LOG_FINAL_ERROR, or LOG_ALL_ERRORS. None implies no logging on errors. The values LOG_FINAL_ERROR and LOG_ALL_ERRORS are relevant when multiple attempts of command execution are requested using the 'attempts' parameter. If LOG_FINAL_ERROR is specified then only log an error on the last attempt, and LOG_ALL_ERRORS requires logging on each occurence of an error. :type log_errors: integer. :param binary: On Python 3, return stdout and stderr as bytes if binary is True, as Unicode otherwise. :type binary: boolean :returns: (stdout, stderr) from process execution :raises: :class:`UnknownArgumentError` on receiving unknown arguments :raises: :class:`ProcessExecutionError` :raises: :class:`OSError` """ cwd = kwargs.pop('cwd', None) process_input = kwargs.pop('process_input', None) env_variables = kwargs.pop('env_variables', None) check_exit_code = kwargs.pop('check_exit_code', [0]) ignore_exit_code = False delay_on_retry = kwargs.pop('delay_on_retry', True) attempts = kwargs.pop('attempts', 1) run_as_root = kwargs.pop('run_as_root', False) root_helper = kwargs.pop('root_helper', '') shell = kwargs.pop('shell', False) loglevel = kwargs.pop('loglevel', logging.DEBUG) log_errors = kwargs.pop('log_errors', None) binary = kwargs.pop('binary', False) if isinstance(check_exit_code, bool): ignore_exit_code = not check_exit_code check_exit_code = [0] elif isinstance(check_exit_code, int): check_exit_code = [check_exit_code] if kwargs: raise UnknownArgumentError(_('Got unknown keyword args: %r') % kwargs) if log_errors not in [None, LOG_ALL_ERRORS, LOG_FINAL_ERROR]: raise InvalidArgumentError(_('Got invalid arg log_errors: %r') % log_errors) if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0: if not root_helper: raise NoRootWrapSpecified( message=_('Command requested root, but did not ' 'specify a root helper.')) if shell: # root helper has to be injected into the command string cmd = [' '.join((root_helper, cmd[0]))] + list(cmd[1:]) else: # root helper has to be tokenized into argument list cmd = shlex.split(root_helper) + list(cmd) cmd = [str(c) for c in cmd] sanitized_cmd = strutils.mask_password(' '.join(cmd)) while attempts > 0: attempts -= 1 try: start_time = time.time() LOG.log(loglevel, _('Running cmd (subprocess): %s'), sanitized_cmd) _PIPE = subprocess.PIPE # pylint: disable=E1101 if os.name == 'nt': preexec_fn = None close_fds = False else: preexec_fn = _subprocess_setup close_fds = True obj = subprocess.Popen(cmd, stdin=_PIPE, stdout=_PIPE, stderr=_PIPE, close_fds=close_fds, preexec_fn=preexec_fn, shell=shell, cwd=cwd, env=env_variables) result = obj.communicate(process_input) obj.stdin.close() # pylint: disable=E1101 _returncode = obj.returncode # pylint: disable=E1101 end_time = time.time() - start_time LOG.log(loglevel, 'CMD "%s" returned: %s in %0.3fs' % (sanitized_cmd, _returncode, end_time)) if not ignore_exit_code and _returncode not in check_exit_code: (stdout, stderr) = result if six.PY3: stdout = os.fsdecode(stdout) stderr = os.fsdecode(stderr) sanitized_stdout = strutils.mask_password(stdout) sanitized_stderr = strutils.mask_password(stderr) raise ProcessExecutionError(exit_code=_returncode, stdout=sanitized_stdout, stderr=sanitized_stderr, cmd=sanitized_cmd) if six.PY3 and not binary and result is not None: (stdout, stderr) = result # Decode from the locale using using the surrogateescape error # handler (decoding cannot fail) stdout = os.fsdecode(stdout) stderr = os.fsdecode(stderr) return (stdout, stderr) else: return result except (ProcessExecutionError, OSError) as err: # if we want to always log the errors or if this is # the final attempt that failed and we want to log that. if log_errors == LOG_ALL_ERRORS or ( log_errors == LOG_FINAL_ERROR and not attempts): if isinstance(err, ProcessExecutionError): format = _('%(desc)r\ncommand: %(cmd)r\n' 'exit code: %(code)r\nstdout: %(stdout)r\n' 'stderr: %(stderr)r') LOG.log(loglevel, format, {"desc": err.description, "cmd": err.cmd, "code": err.exit_code, "stdout": err.stdout, "stderr": err.stderr}) else: format = _('Got an OSError\ncommand: %(cmd)r\n' 'errno: %(errno)r') LOG.log(loglevel, format, {"cmd": sanitized_cmd, "errno": err.errno}) if not attempts: LOG.log(loglevel, _('%r failed. Not Retrying.'), sanitized_cmd) raise else: LOG.log(loglevel, _('%r failed. Retrying.'), sanitized_cmd) if delay_on_retry: time.sleep(random.randint(20, 200) / 100.0) finally: # NOTE(termie): this appears to be necessary to let the subprocess # call clean something up in between calls, without # it two execute calls in a row hangs the second one # NOTE(bnemec): termie's comment above is probably specific to the # eventlet subprocess module, but since we still # have to support that we're leaving the sleep. It # won't hurt anything in the stdlib case anyway. time.sleep(0)
def execute(*cmd, **kwargs): """Helper method to shell out and execute a command through subprocess. Allows optional retry. :param cmd: Passed to subprocess.Popen. :type cmd: string :param cwd: Set the current working directory :type cwd: string :param process_input: Send to opened process. :type process_input: string :param env_variables: Environment variables and their values that will be set for the process. :type env_variables: dict :param check_exit_code: Single bool, int, or list of allowed exit codes. Defaults to [0]. Raise :class:`ProcessExecutionError` unless program exits with one of these code. :type check_exit_code: boolean, int, or [int] :param delay_on_retry: True | False. Defaults to True. If set to True, wait a short amount of time before retrying. :type delay_on_retry: boolean :param attempts: How many times to retry cmd. :type attempts: int :param run_as_root: True | False. Defaults to False. If set to True, the command is prefixed by the command specified in the root_helper kwarg. :type run_as_root: boolean :param root_helper: command to prefix to commands called with run_as_root=True :type root_helper: string :param shell: whether or not there should be a shell used to execute this command. Defaults to false. :type shell: boolean :param loglevel: log level for execute commands. :type loglevel: int. (Should be logging.DEBUG or logging.INFO) :param log_errors: Should stdout and stderr be logged on error? Possible values are :py:attr:`~.LogErrors.DEFAULT`, :py:attr:`~.LogErrors.FINAL`, or :py:attr:`~.LogErrors.ALL`. Note that the values :py:attr:`~.LogErrors.FINAL` and :py:attr:`~.LogErrors.ALL` are **only** relevant when multiple attempts of command execution are requested using the ``attempts`` parameter. :type log_errors: :py:class:`~.LogErrors` :param binary: On Python 3, return stdout and stderr as bytes if binary is True, as Unicode otherwise. :type binary: boolean :param on_execute: This function will be called upon process creation with the object as a argument. The Purpose of this is to allow the caller of `processutils.execute` to track process creation asynchronously. :type on_execute: function(:class:`subprocess.Popen`) :param on_completion: This function will be called upon process completion with the object as a argument. The Purpose of this is to allow the caller of `processutils.execute` to track process completion asynchronously. :type on_completion: function(:class:`subprocess.Popen`) :param preexec_fn: This function will be called in the child process just before the child is executed. WARNING: On windows, we silently drop this preexec_fn as it is not supported by subprocess.Popen on windows (throws a ValueError) :type preexec_fn: function() :param prlimit: Set resource limits on the child process. See below for a detailed description. :type prlimit: :class:`ProcessLimits` :returns: (stdout, stderr) from process execution :raises: :class:`UnknownArgumentError` on receiving unknown arguments :raises: :class:`ProcessExecutionError` :raises: :class:`OSError` The *prlimit* parameter can be used to set resource limits on the child process. If this parameter is used, the child process will be spawned by a wrapper process which will set limits before spawning the command. .. versionchanged:: 3.4 Added *prlimit* optional parameter. .. versionchanged:: 1.5 Added *cwd* optional parameter. .. versionchanged:: 1.9 Added *binary* optional parameter. On Python 3, *stdout* and *stdout* are now returned as Unicode strings by default, or bytes if *binary* is true. .. versionchanged:: 2.1 Added *on_execute* and *on_completion* optional parameters. .. versionchanged:: 2.3 Added *preexec_fn* optional parameter. """ cwd = kwargs.pop('cwd', None) process_input = kwargs.pop('process_input', None) env_variables = kwargs.pop('env_variables', None) check_exit_code = kwargs.pop('check_exit_code', [0]) ignore_exit_code = False delay_on_retry = kwargs.pop('delay_on_retry', True) attempts = kwargs.pop('attempts', 1) run_as_root = kwargs.pop('run_as_root', False) root_helper = kwargs.pop('root_helper', '') shell = kwargs.pop('shell', False) loglevel = kwargs.pop('loglevel', logging.DEBUG) log_errors = kwargs.pop('log_errors', None) if log_errors is None: log_errors = LogErrors.DEFAULT binary = kwargs.pop('binary', False) on_execute = kwargs.pop('on_execute', None) on_completion = kwargs.pop('on_completion', None) preexec_fn = kwargs.pop('preexec_fn', None) prlimit = kwargs.pop('prlimit', None) if isinstance(check_exit_code, bool): ignore_exit_code = not check_exit_code check_exit_code = [0] elif isinstance(check_exit_code, int): check_exit_code = [check_exit_code] if kwargs: raise UnknownArgumentError(_('Got unknown keyword args: %r') % kwargs) if isinstance(log_errors, six.integer_types): log_errors = LogErrors(log_errors) if not isinstance(log_errors, LogErrors): raise InvalidArgumentError(_('Got invalid arg log_errors: %r') % log_errors) if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0: if not root_helper: raise NoRootWrapSpecified( message=_('Command requested root, but did not ' 'specify a root helper.')) if shell: # root helper has to be injected into the command string cmd = [' '.join((root_helper, cmd[0]))] + list(cmd[1:]) else: # root helper has to be tokenized into argument list cmd = shlex.split(root_helper) + list(cmd) cmd = [str(c) for c in cmd] if prlimit: if os.name == 'nt': LOG.log(loglevel, _('Process resource limits are ignored as ' 'this feature is not supported on Windows.')) else: args = [sys.executable, '-m', 'oslo_concurrency.prlimit'] args.extend(prlimit.prlimit_args()) args.append('--') args.extend(cmd) cmd = args sanitized_cmd = strutils.mask_password(' '.join(cmd)) watch = timeutils.StopWatch() while attempts > 0: attempts -= 1 watch.restart() try: LOG.log(loglevel, _('Running cmd (subprocess): %s'), sanitized_cmd) _PIPE = subprocess.PIPE # pylint: disable=E1101 if os.name == 'nt': on_preexec_fn = None close_fds = False else: on_preexec_fn = functools.partial(_subprocess_setup, preexec_fn) close_fds = True obj = subprocess.Popen(cmd, stdin=_PIPE, stdout=_PIPE, stderr=_PIPE, close_fds=close_fds, preexec_fn=on_preexec_fn, shell=shell, cwd=cwd, env=env_variables) if on_execute: on_execute(obj) try: result = obj.communicate(process_input) obj.stdin.close() # pylint: disable=E1101 _returncode = obj.returncode # pylint: disable=E1101 LOG.log(loglevel, 'CMD "%s" returned: %s in %0.3fs', sanitized_cmd, _returncode, watch.elapsed()) finally: if on_completion: on_completion(obj) if not ignore_exit_code and _returncode not in check_exit_code: (stdout, stderr) = result if six.PY3: stdout = os.fsdecode(stdout) stderr = os.fsdecode(stderr) sanitized_stdout = strutils.mask_password(stdout) sanitized_stderr = strutils.mask_password(stderr) raise ProcessExecutionError(exit_code=_returncode, stdout=sanitized_stdout, stderr=sanitized_stderr, cmd=sanitized_cmd) if six.PY3 and not binary and result is not None: (stdout, stderr) = result # Decode from the locale using using the surrogateescape error # handler (decoding cannot fail) stdout = os.fsdecode(stdout) stderr = os.fsdecode(stderr) return (stdout, stderr) else: return result except (ProcessExecutionError, OSError) as err: # if we want to always log the errors or if this is # the final attempt that failed and we want to log that. if log_errors == LOG_ALL_ERRORS or ( log_errors == LOG_FINAL_ERROR and not attempts): if isinstance(err, ProcessExecutionError): format = _('%(desc)r\ncommand: %(cmd)r\n' 'exit code: %(code)r\nstdout: %(stdout)r\n' 'stderr: %(stderr)r') LOG.log(loglevel, format, {"desc": err.description, "cmd": err.cmd, "code": err.exit_code, "stdout": err.stdout, "stderr": err.stderr}) else: format = _('Got an OSError\ncommand: %(cmd)r\n' 'errno: %(errno)r') LOG.log(loglevel, format, {"cmd": sanitized_cmd, "errno": err.errno}) if not attempts: LOG.log(loglevel, _('%r failed. Not Retrying.'), sanitized_cmd) raise else: LOG.log(loglevel, _('%r failed. Retrying.'), sanitized_cmd) if delay_on_retry: time.sleep(random.randint(20, 200) / 100.0) finally: # NOTE(termie): this appears to be necessary to let the subprocess # call clean something up in between calls, without # it two execute calls in a row hangs the second one # NOTE(bnemec): termie's comment above is probably specific to the # eventlet subprocess module, but since we still # have to support that we're leaving the sleep. It # won't hurt anything in the stdlib case anyway. time.sleep(0)
def _process_stack(self, request, action, action_args, content_type, body, accept): """Implement the processing stack.""" # Get the implementing method try: meth, extensions = self.get_method(request, action, content_type, body) except (AttributeError, TypeError): return Fault(webob.exc.HTTPNotFound()) except KeyError as ex: msg = _("There is no such action: %s") % ex.args[0] return Fault(webob.exc.HTTPBadRequest(explanation=msg)) except exception.MalformedRequestBody: msg = _("Malformed request body") return Fault(webob.exc.HTTPBadRequest(explanation=msg)) if body: msg = _("Action: '%(action)s', calling method: %(meth)s, body: " "%(body)s") % { 'action': action, 'body': six.text_type(body, 'utf-8'), 'meth': str(meth) } LOG.debug(strutils.mask_password(msg)) else: LOG.debug("Calling method '%(meth)s'", {'meth': str(meth)}) # Now, deserialize the request body... try: contents = {} if self._should_have_body(request): # allow empty body with PUT and POST if request.content_length == 0: contents = {'body': None} else: contents = self.deserialize(body) except exception.MalformedRequestBody: msg = _("Malformed request body") return Fault(webob.exc.HTTPBadRequest(explanation=msg)) # Update the action args action_args.update(contents) project_id = action_args.pop("project_id", None) context = request.environ.get('nova.context') if (context and project_id and (project_id != context.project_id)): msg = _("Malformed request URL: URL's project_id '%(project_id)s'" " doesn't match Context's project_id" " '%(context_project_id)s'") % \ {'project_id': project_id, 'context_project_id': context.project_id} return Fault(webob.exc.HTTPBadRequest(explanation=msg)) # Run pre-processing extensions response, post = self.pre_process_extensions(extensions, request, action_args) if not response: try: with ResourceExceptionHandler(): action_result = self.dispatch(meth, request, action_args) except Fault as ex: response = ex if not response: # No exceptions; convert action_result into a # ResponseObject resp_obj = None if type(action_result) is dict or action_result is None: resp_obj = ResponseObject(action_result) elif isinstance(action_result, ResponseObject): resp_obj = action_result else: response = action_result # Run post-processing extensions if resp_obj: # Do a preserialize to set up the response object if hasattr(meth, 'wsgi_code'): resp_obj._default_code = meth.wsgi_code # Process post-processing extensions response = self.post_process_extensions( post, resp_obj, request, action_args) if resp_obj and not response: response = resp_obj.serialize(request, accept) if hasattr(response, 'headers'): for hdr, val in list(response.headers.items()): # Headers must be utf-8 strings response.headers[hdr] = utils.utf8(val) if not request.api_version_request.is_null(): response.headers[API_VERSION_REQUEST_HEADER] = \ 'compute ' + request.api_version_request.get_string() response.headers[LEGACY_API_VERSION_REQUEST_HEADER] = \ request.api_version_request.get_string() response.headers.add('Vary', API_VERSION_REQUEST_HEADER) response.headers.add('Vary', LEGACY_API_VERSION_REQUEST_HEADER) return response
def sync(self): discovered_config = self._discover_config(self.get_gm_member()) if discovered_config: self._update_fields(discovered_config) LOG.debug(_LI("grid config synced: %s"), strutils.mask_password(self.__dict__, secret="********"))
def notify(self, ctxt, message, priority, retry): logger = logging.getLogger('%s.%s' % (self.LOGGER_BASE, message['event_type'])) method = getattr(logger, priority.lower(), None) if method: method(strutils.mask_password(jsonutils.dumps(message)))
def _process_stack(self, request, action, action_args, content_type, body, accept): """Implement the processing stack.""" # Get the implementing method try: meth, extensions = self.get_method(request, action, content_type, body) except (AttributeError, TypeError): return Fault(webob.exc.HTTPNotFound()) except KeyError as ex: msg = "There is no such action: %s" % ex.args[0] return Fault(webob.exc.HTTPBadRequest(explanation=msg)) except exception.MalformedRequestBody: msg = "Malformed request body" return Fault(webob.exc.HTTPBadRequest(explanation=msg)) if body: msg = ("Action: '%(action)s', calling method: %(meth)s, body: " "%(body)s") % { 'action': action, 'body': six.text_type(body), 'meth': six.text_type(meth) } LOG.debug(strutils.mask_password(msg)) else: LOG.debug("Calling method '%(meth)s'", {'meth': six.text_type(meth)}) # Now, deserialize the request body... try: if content_type: contents = self.deserialize(meth, content_type, body) else: contents = body except exception.InvalidContentType: msg = "Unsupported Content-Type" return Fault(webob.exc.HTTPBadRequest(explanation=msg)) except exception.MalformedRequestBody: msg = "Malformed request body" return Fault(webob.exc.HTTPBadRequest(explanation=msg)) # Update the action args action_args.update(contents) response = None try: with ResourceExceptionHandler(): action_result = self.dispatch(meth, request, action_args) except Fault as ex: response = ex if not response: # No exceptions; convert action_result into a # ResponseObject resp_obj = None if isinstance(action_result, dict) or action_result is None: resp_obj = ResponseObject(action_result) elif isinstance(action_result, ResponseObject): resp_obj = action_result else: response = action_result # Run post-processing extensions if resp_obj: # Do a preserialize to set up the response object serializers = getattr(meth, 'wsgi_serializers', {}) resp_obj._bind_method_serializers(serializers) if hasattr(meth, 'wsgi_code'): resp_obj._default_code = meth.wsgi_code resp_obj.preserialize(accept, self.default_serializers) if resp_obj: response = resp_obj.serialize(request, accept, self.default_serializers) try: msg_dict = dict(url=request.url, status=response.status_int) msg = "%(url)s returned with HTTP %(status)d" except AttributeError as e: msg_dict = dict(url=request.url, e=e) msg = "%(url)s returned a fault: %(e)s" LOG.info(msg, msg_dict) if hasattr(response, 'headers'): for hdr, val in response.headers.items(): # Headers must be utf-8 strings try: # python 2.x response.headers[hdr] = val.encode('utf-8') except Exception: # pylint: disable=broad-except # python 3.x response.headers[hdr] = six.text_type(val) return response
def _allow_access(self, context, share, access): """Allow access to the share.""" access_type = access['access_type'] share_proto = share['share_proto'].upper() if share_proto == 'CIFS': share_proto = 'SMB' if access_type != 'ip': raise manila_exception.ZadaraInvalidShareAccessType() access_ip = access['access_to'] access_level = 'YES' if access['access_level'] == 'rw': access_level = 'NO' # First: Check Active controller: if not valid, raise exception ctrl = self.vpsa._get_active_controller_details() if not ctrl: raise manila_exception.ZadaraVPSANoActiveController() # Get volume name vol_name = self._get_zadara_share_template_name(share['id']) vpsa_volume = self.vpsa._get_vpsa_volume(vol_name) if not vpsa_volume: msg = (_('VPSA volume for share %s ' 'could not be found.') % share['id']) LOG.error(msg) raise manila_exception.ZadaraShareNotFound(name=share['id']) # Get/Create server name for given IP vpsa_srv = self.vpsa._create_vpsa_server(iscsi_ip=access_ip) if not vpsa_srv: raise manila_exception.ZadaraServerCreateFailure(name=access_ip) servers = self.vpsa._get_servers_attached_to_volume(vpsa_volume) attach = None for server in servers: if server == vpsa_srv: attach = server break # Attach volume to server if attach is None: self.vpsa_send_cmd('attach_volume', vpsa_srv=vpsa_srv, vpsa_vol=vpsa_volume['name'], share_proto=share_proto, read_only=access_level) data = self.vpsa_send_cmd('list_vol_attachments', vpsa_vol=vpsa_volume['name']) server = None servers = data.get('servers', []) for srv in servers: if srv['iscsi_ip'] == access_ip: server = srv break if server is None: raise manila_exception.ZadaraAttachmentsNotFound( name=vpsa_volume['name']) ctrl_ip = self.vpsa._get_target_host(ctrl['ip']) properties = { 'target_discovered': False, 'target_portal': (('%s:%s') % (ctrl_ip, '3260')), 'target_ip': server['iscsi_ip'], 'id': share['id'], 'auth_method': 'CHAP', 'auth_username': ctrl['chap_user'], 'auth_password': ctrl['chap_passwd'] } LOG.debug('Attach properties: %(properties)s', {'properties': strutils.mask_password(properties)}) return {'driver_volume_type': share['share_proto'], 'data': properties}
def initialize_app(self, argv): """Global app init bits: * set up API versions * validate authentication info * authenticate against Identity if requested """ # Parent __init__ parses argv into self.options super(ECLClient, self).initialize_app(argv) self.log.info("START with options: %s", strutils.mask_password(self.command_options)) self.log.debug("options: %s", strutils.mask_password(self.options)) # Set the default plugin to token_endpoint if url and token are given # if (self.options.url and self.options.token): # Use service token authentication # auth_type = 'token_endpoint' # else: # auth_type = 'password' auth_type = 'password' project_id = getattr(self.options, 'project_id', None) project_name = getattr(self.options, 'project_name', None) tenant_id = getattr(self.options, 'tenant_id', None) tenant_name = getattr(self.options, 'tenant_name', None) # handle some v2/v3 authentication inconsistencies by just acting like # both the project and tenant information are both present. This can # go away if we stop registering all the argparse options together. if project_id and not tenant_id: self.options.tenant_id = project_id if project_name and not tenant_name: self.options.tenant_name = project_name if tenant_id and not project_id: self.options.project_id = tenant_id if tenant_name and not project_name: self.options.project_name = tenant_name # Do configuration file handling # Ignore the default value of interface. Only if it is set later # will it be used. try: cc = cloud_config.OpenStackConfig(override_defaults={ 'interface': None, 'auth_type': auth_type, }, ) except (IOError, OSError) as e: self.log.critical("Could not read clouds.yaml configuration file") self.print_help_if_requested() raise e # TODO(thowe): Change cliff so the default value for debug # can be set to None. if not self.options.debug: self.options.debug = None self.cloud = cc.get_one_cloud( # cloud=self.options.cloud, cloud='', argparse=self.options, ) self.log_configurator.configure(self.cloud) self.dump_stack_trace = self.log_configurator.dump_trace self.log.debug("defaults: %s", cc.defaults) self.log.debug("cloud cfg: %s", strutils.mask_password(self.cloud.config)) # Set up client TLS # NOTE(dtroyer): --insecure is the non-default condition that # overrides any verify setting in clouds.yaml # so check it first, then fall back to any verify # setting provided. self.verify = not self.cloud.config.get( 'insecure', not self.cloud.config.get('verify', True), ) # NOTE(dtroyer): Per bug https://bugs.launchpad.net/bugs/1447784 # --insecure now overrides any --os-cacert setting, # where before --insecure was ignored if --os-cacert # was set. if self.verify and self.cloud.cacert: self.verify = self.cloud.cacert # Save default domain # self.default_domain = self.options.default_domain self.default_domain = DEFAULT_DOMAIN # Loop through extensions to get API versions for mod in clientmanager.PLUGIN_MODULES: default_version = getattr(mod, 'DEFAULT_API_VERSION', None) option = mod.API_VERSION_OPTION.replace('os_', '') version_opt = str(self.cloud.config.get(option, default_version)) if version_opt: api = mod.API_NAME self.api_version[api] = version_opt # Add a plugin interface to let the module validate the version # requested by the user skip_old_check = False mod_check_api_version = getattr(mod, 'check_api_version', None) if mod_check_api_version: # this throws an exception if invalid skip_old_check = mod_check_api_version(version_opt) mod_versions = getattr(mod, 'API_VERSIONS', None) if not skip_old_check and mod_versions: if version_opt not in mod_versions: self.log.warning( "%s version %s is not in supported versions %s" % (api, version_opt, ', '.join( mod.API_VERSIONS.keys()))) # Command groups deal only with major versions version = '.v' + version_opt.replace('.', '_').split('_')[0] cmd_group = 'ecl.' + api.replace('-', '_') + version self.command_manager.add_command_group(cmd_group) self.log.debug( '%(name)s API version %(version)s, cmd group %(group)s', { 'name': api, 'version': version_opt, 'group': cmd_group }) # Commands that span multiple APIs self.command_manager.add_command_group('ecl.common') self.command_manager.add_command_group('ecl.extension') # call InitializeXxx() here # set up additional clients to stuff in to client_manager?? # Handle deferred help and exit self.print_help_if_requested() self.client_manager = clientmanager.ClientManager( cli_options=self.cloud, verify=self.verify, api_version=self.api_version, pw_func=prompt_for_password, )
def create(self, req, body, tenant_id): LOG.info("Creating a database instance for tenant '%s'", tenant_id) LOG.debug("req : '%s'\n\n", strutils.mask_password(req)) LOG.debug("body : '%s'\n\n", strutils.mask_password(body)) context = req.environ[wsgi.CONTEXT_KEY] policy.authorize_on_tenant(context, 'instance:create') context.notification = notification.DBaaSInstanceCreate(context, request=req) name = body['instance']['name'] slave_of_id = body['instance'].get('replica_of') replica_count = body['instance'].get('replica_count') flavor_ref = body['instance'].get('flavorRef') datastore_args = body['instance'].get('datastore', {}) volume_info = body['instance'].get('volume', {}) availability_zone = body['instance'].get('availability_zone') nics = body['instance'].get('nics', []) locality = body['instance'].get('locality') region_name = body['instance'].get( 'region_name', CONF.service_credentials.region_name) access = body['instance'].get('access', None) if slave_of_id: if flavor_ref: msg = 'Cannot specify flavor when creating replicas.' raise exception.BadRequest(message=msg) if datastore_args: msg = 'Cannot specify datastore when creating replicas.' raise exception.BadRequest(message=msg) if volume_info: msg = 'Cannot specify volume when creating replicas.' raise exception.BadRequest(message=msg) if locality: msg = 'Cannot specify locality when creating replicas.' raise exception.BadRequest(message=msg) backup_model.verify_swift_auth_token(context) else: if replica_count and replica_count > 1: msg = (f"Replica count only valid when creating replicas. " f"Cannot create {replica_count} instances.") raise exception.BadRequest(message=msg) flavor_id = utils.get_id_from_href(flavor_ref) if volume_info: volume_size = int(volume_info.get('size')) volume_type = volume_info.get('type') else: volume_size = None volume_type = None if slave_of_id: try: replica_source = models.DBInstance.find_by(context, id=slave_of_id, deleted=False) flavor_id = replica_source.flavor_id except exception.ModelNotFoundError: LOG.error(f"Cannot create a replica of {slave_of_id} as that " f"instance could not be found.") raise exception.NotFound(uuid=slave_of_id) if replica_source.slave_of_id: raise exception.Forbidden( f"Cannot create a replica of a replica {slave_of_id}") datastore_version = ds_models.DatastoreVersion.load_by_uuid( replica_source.datastore_version_id) datastore = ds_models.Datastore.load( datastore_version.datastore_id) else: datastore, datastore_version = ds_models.get_datastore_version( **datastore_args) # If only image_tags is configured in the datastore version, get # the image ID using the tags. glance_client = clients.create_glance_client(context) image_id = common_glance.get_image_id(glance_client, datastore_version.image_id, datastore_version.image_tags) LOG.info(f'Using image {image_id} for creating instance') databases = populate_validated_databases(body['instance'].get( 'databases', [])) database_names = [database.get('_name', '') for database in databases] users = None try: users = populate_users(body['instance'].get('users', []), database_names) except ValueError as ve: raise exception.BadRequest(message=str(ve)) if slave_of_id and (databases or users): raise exception.ReplicaCreateWithUsersDatabasesError() configuration = self._configuration_parse(context, body) modules = body['instance'].get('modules') # The following operations have their own API calls. # We need to make sure the same policies are enforced when # creating an instance. # i.e. if attaching configuration group to an existing instance is not # allowed, it should not be possible to create a new instance with the # group attached either if configuration: policy.authorize_on_tenant(context, 'instance:update') if modules: policy.authorize_on_tenant(context, 'instance:module_apply') if users: policy.authorize_on_tenant(context, 'instance:extension:user:create') if databases: policy.authorize_on_tenant(context, 'instance:extension:database:create') if 'restorePoint' in body['instance']: backupRef = body['instance']['restorePoint']['backupRef'] backup_id = utils.get_id_from_href(backupRef) else: backup_id = None # Only 1 nic is allowed as defined in API jsonschema. # Use list just for backward compatibility. if len(nics) > 0: nic = nics[0] LOG.info('Checking user provided instance network %s', nic) if slave_of_id and nic.get('ip_address'): msg = "Cannot specify IP address when creating replicas." raise exception.BadRequest(message=msg) self._check_nic(context, nic) if locality: locality_domain = ['affinity', 'anti-affinity'] locality_domain_msg = ("Invalid locality '%s'. " "Must be one of ['%s']" % (locality, "', '".join(locality_domain))) if locality not in locality_domain: raise exception.BadRequest(message=locality_domain_msg) instance = models.Instance.create(context, name, flavor_id, image_id, databases, users, datastore, datastore_version, volume_size, backup_id, availability_zone, nics, configuration, slave_of_id, replica_count=replica_count, volume_type=volume_type, modules=modules, locality=locality, region_name=region_name, access=access) view = views.InstanceDetailView(instance, req=req) return wsgi.Result(view.data(), 200)