def to_bytes(text, default=0): """Converts a string into an integer of bytes. Looks at the last characters of the text to determine what conversion is needed to turn the input text into a byte number. Supports "B, K(B), M(B), G(B), and T(B)". (case insensitive) :param text: String input for bytes size conversion. :param default: Default return value when text is blank. """ match = BYTE_REGEX.search(text) if match: magnitude = int(match.group(1)) mult_key_org = match.group(2) if not mult_key_org: return magnitude elif text: msg = _('Invalid string format: %s') % text raise TypeError(msg) else: return default mult_key = mult_key_org.lower().replace('b', '', 1) multiplier = BYTE_MULTIPLIERS.get(mult_key) if multiplier is None: msg = _('Unknown byte multiplier: %s') % mult_key_org raise TypeError(msg) return magnitude * multiplier
def inner(*args, **kwargs): with lock(name, lock_file_prefix, external, lock_path): LOG.debug(_('Got semaphore / lock "%(function)s"'), {'function': f.__name__}) return f(*args, **kwargs) LOG.debug(_('Semaphore / lock released "%(function)s"'), {'function': f.__name__})
def enforce(self, rule, target, creds, do_raise=False, exc=None, *args, **kwargs): """Checks authorization of a rule against the target and credentials. :param rule: A string or BaseCheck instance specifying the rule to evaluate. :param target: As much information about the object being operated on as possible, as a dictionary. :param creds: As much information about the user performing the action as possible, as a dictionary. :param do_raise: Whether to raise an exception or not if check fails. :param exc: Class of the exception to raise if the check fails. Any remaining arguments passed to check() (both positional and keyword arguments) will be passed to the exception class. If not specified, PolicyNotAuthorized will be used. :return: Returns False if the policy does not allow the action and exc is not provided; otherwise, returns a value that evaluates to True. Note: for rules using the "case" expression, this True value will be the specified string from the expression. """ # NOTE(flaper87): Not logging target or creds to avoid # potential security issues. LOG.debug(_("Rule %s will be now enforced") % rule) self.load_rules() # Allow the rule to be a Check tree if isinstance(rule, BaseCheck): result = rule(target, creds, self) elif not self.rules: # No rules to reference means we're going to fail closed result = False else: try: # Evaluate the rule result = self.rules[rule](target, creds, self) except KeyError: LOG.debug(_("Rule [%s] doesn't exist") % rule) # If the rule doesn't exist, fail closed result = False # If it is False, raise the exception if requested if do_raise and not result: if exc: raise exc(*args, **kwargs) raise PolicyNotAuthorized(rule) return result
def _get_not_supported_column(col_name_col_instance, column_name): try: column = col_name_col_instance[column_name] except KeyError: msg = _("Please specify column %s in col_name_col_instance " "param. It is required because column has unsupported " "type by sqlite).") raise exception.OpenstackException(message=msg % column_name) if not isinstance(column, Column): msg = _("col_name_col_instance param has wrong type of " "column instance for column %s It should be instance " "of sqlalchemy.Column.") raise exception.OpenstackException(message=msg % column_name) return column
def bool_from_string(subject, strict=False): """Interpret a string as a boolean. A case-insensitive match is performed such that strings matching 't', 'true', 'on', 'y', 'yes', or '1' are considered True and, when `strict=False`, anything else is considered False. Useful for JSON-decoded stuff and config file parsing. If `strict=True`, unrecognized values, including None, will raise a ValueError which is useful when parsing values passed in from an API call. Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'. """ if not isinstance(subject, six.string_types): subject = str(subject) lowered = subject.strip().lower() if lowered in TRUE_STRINGS: return True elif lowered in FALSE_STRINGS: return False elif strict: acceptable = ', '.join( "'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS)) msg = _("Unrecognized value '%(val)s', acceptable values are:" " %(acceptable)s") % {'val': subject, 'acceptable': acceptable} raise ValueError(msg) else: return False
def _get_default_deleted_value(table): if isinstance(table.c.id.type, Integer): return 0 if isinstance(table.c.id.type, String): return "" raise exception.OpenstackException( message=_("Unsupported id columns type"))
def inner_func(*args, **kwargs): last_log_time = 0 last_exc_message = None exc_count = 0 while True: try: return infunc(*args, **kwargs) except Exception as exc: if exc.message == last_exc_message: exc_count += 1 else: exc_count = 1 # Do not log any more frequently than once a minute unless # the exception message changes cur_time = int(time.time()) if (cur_time - last_log_time > 60 or exc.message != last_exc_message): logging.exception( _('Unexpected exception occurred %d time(s)... ' 'retrying.') % exc_count) last_log_time = cur_time last_exc_message = exc.message exc_count = 0 # This should be a very rare event. In case it isn't, do # a sleep. time.sleep(1)
def deprecated(self, msg, *args, **kwargs): stdmsg = _("Deprecated: %s") % msg if CONF.fatal_deprecations: self.critical(stdmsg, *args, **kwargs) raise DeprecatedConfig(msg=stdmsg) else: self.warn(stdmsg, *args, **kwargs)
def __init__(self, message=None, **kwargs): self.kwargs = kwargs if 'code' not in self.kwargs: try: self.kwargs['code'] = self.code except AttributeError: pass if not message: try: message = self.message % kwargs except Exception as e: # kwargs doesn't match a variable in the message # log the issue and the kwargs LOG.exception(_('Exception in string format operation')) for name, value in kwargs.iteritems(): LOG.error("%s: %s" % (name, value)) if CONF.fatal_exception_format_errors: raise e else: # at least get the core message out if something happened message = self.message super(ExException, self).__init__(message)
def __exit__(self, exc_type, exc_val, exc_tb): try: self.unlock() self.lockfile.close() except IOError: LOG.exception(_("Could not release the acquired lock `%s`"), self.fname)
def _wrap(*args, **kw): try: return f(*args, **kw) except Exception as e: if not isinstance(e, Error): logging.exception(_('Uncaught exception')) raise Error(str(e)) raise
def __exit__(self, exc_type, exc_val, exc_tb): if exc_type is not None: logging.error(_('Original exception being dropped: %s'), traceback.format_exception(self.type_, self.value, self.tb)) return False if self.reraise: raise self.type_, self.value, self.tb
def _ping_listener(dbapi_conn, connection_rec, connection_proxy): """Ensures that MySQL connections checked out of the pool are alive. Borrowed from: http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f """ try: dbapi_conn.cursor().execute('select 1') except dbapi_conn.OperationalError as ex: if ex.args[0] in (2006, 2013, 2014, 2045, 2055): LOG.warn(_('Got mysql server has gone away: %s'), ex) raise sqla_exc.DisconnectionError("Database server went away") else: raise
def drop_old_duplicate_entries_from_table(migrate_engine, table_name, use_soft_delete, *uc_column_names): """Drop all old rows having the same values for columns in uc_columns. This method drop (or mark ad `deleted` if use_soft_delete is True) old duplicate rows form table with name `table_name`. :param migrate_engine: Sqlalchemy engine :param table_name: Table with duplicates :param use_soft_delete: If True - values will be marked as `deleted`, if False - values will be removed from table :param uc_column_names: Unique constraint columns """ meta = MetaData() meta.bind = migrate_engine table = Table(table_name, meta, autoload=True) columns_for_group_by = [table.c[name] for name in uc_column_names] columns_for_select = [func.max(table.c.id)] columns_for_select.extend(columns_for_group_by) duplicated_rows_select = select(columns_for_select, group_by=columns_for_group_by, having=func.count(table.c.id) > 1) for row in migrate_engine.execute(duplicated_rows_select): # NOTE(boris-42): Do not remove row that has the biggest ID. delete_condition = table.c.id != row[0] is_none = None # workaround for pyflakes delete_condition &= table.c.deleted_at == is_none for name in uc_column_names: delete_condition &= table.c[name] == row[name] rows_to_delete_select = select([table.c.id]).where(delete_condition) for row in migrate_engine.execute(rows_to_delete_select).fetchall(): LOG.info(_("Deleting duplicated row with id: %(id)s from table: " "%(table)s") % dict(id=row[0], table=table_name)) if use_soft_delete: delete_statement = table.update().\ where(delete_condition).\ values({ 'deleted': literal_column('id'), 'updated_at': literal_column('updated_at'), 'deleted_at': timeutils.utcnow() }) else: delete_statement = table.delete().where(delete_condition) migrate_engine.execute(delete_statement)
def _parse_check(rule): """Parse a single base check rule into an appropriate Check object.""" # Handle the special checks if rule == '!': return FalseCheck() elif rule == '@': return TrueCheck() try: kind, match = rule.split(':', 1) except Exception: LOG.exception(_("Failed to understand rule %s") % rule) # If the rule is invalid, we'll fail closed return FalseCheck() # Find what implements the check if kind in _checks: return _checks[kind](kind, match) elif None in _checks: return _checks[None](kind, match) else: LOG.error(_("No handler for matches of kind %s") % kind) return FalseCheck()
def set_rules(self, rules, overwrite=True): """Create a new Rules object based on the provided dict of rules. :param rules: New rules to use. It should be an instance of dict. :param overwrite: Whether to overwrite current rules or update them with the new rules. """ if not isinstance(rules, dict): raise TypeError(_("Rules must be an instance of dict or Rules, " "got %s instead") % type(rules)) if overwrite: self.rules = Rules(rules) else: self.update(rules)
def load_rules(self, force_reload=False): """Loads policy_path's rules. Policy file is cached and will be reloaded if modified. :param force_reload: Whether to overwrite current rules. """ if not self.policy_path: self.policy_path = self._get_policy_path() reloaded, data = fileutils.read_cached_file(self.policy_path, force_reload=force_reload) if reloaded: rules = Rules.load_json(data, self.default_rule) self.set_rules(rules) LOG.debug(_("Rules successfully reloaded"))
def _find_facility_from_conf(): facility_names = logging.handlers.SysLogHandler.facility_names facility = getattr(logging.handlers.SysLogHandler, CONF.syslog_log_facility, None) if facility is None and CONF.syslog_log_facility in facility_names: facility = facility_names.get(CONF.syslog_log_facility) if facility is None: valid_facilities = facility_names.keys() consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON', 'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS', 'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP', 'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3', 'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7'] valid_facilities.extend(consts) raise TypeError(_('syslog facility must be one of: %s') % ', '.join("'%s'" % fac for fac in valid_facilities)) return facility
def read_cached_file(filename, force_reload=False): """Read from a file if it has been modified. :param force_reload: Whether to reload the file. :returns: A tuple with a boolean specifying if the data is fresh or not. """ global _FILE_CACHE if force_reload and filename in _FILE_CACHE: del _FILE_CACHE[filename] reloaded = False mtime = os.path.getmtime(filename) cache_info = _FILE_CACHE.setdefault(filename, {}) if not cache_info or mtime > cache_info.get("mtime", 0): LOG.debug(_("Reloading cached file %s") % filename) with open(filename) as fap: cache_info["data"] = fap.read() cache_info["mtime"] = mtime reloaded = True return (reloaded, cache_info["data"])
def _wrap(*args, **kwargs): try: return f(*args, **kwargs) except UnicodeEncodeError: raise exception.DBInvalidUnicodeParameter() # note(boris-42): We should catch unique constraint violation and # wrap it by our own DBDuplicateEntry exception. Unique constraint # violation is wrapped by IntegrityError. except sqla_exc.OperationalError as e: _raise_if_deadlock_error(e, get_engine().name) # NOTE(comstud): A lot of code is checking for OperationalError # so let's not wrap it for now. raise except sqla_exc.IntegrityError as e: # note(boris-42): SqlAlchemy doesn't unify errors from different # DBs so we must do this. Also in some tables (for example # instance_types) there are more than one unique constraint. This # means we should get names of columns, which values violate # unique constraint, from error message. _raise_if_duplicate_entry_error(e, get_engine().name) raise exception.DBError(e) except Exception as e: LOG.exception(_('DB exception wrapped.')) raise exception.DBError(e)
def _parse_text_rule(rule): """Parses policy to the tree. Translates a policy written in the policy language into a tree of Check objects. """ # Empty rule means always accept if not rule: return TrueCheck() # Parse the token stream state = ParseState() for tok, value in _parse_tokenize(rule): state.shift(tok, value) try: return state.result except ValueError: # Couldn't parse the rule LOG.exception(_("Failed to understand rule %r") % rule) # Fail closed return FalseCheck()
def create_engine(sql_connection, sqlite_fk=False): """Return a new SQLAlchemy engine.""" # NOTE(geekinutah): At this point we could be connecting to the normal # db handle or the slave db handle. Things like # _wrap_db_error aren't going to work well if their # backends don't match. Let's check. _assert_matching_drivers() connection_dict = sqlalchemy.engine.url.make_url(sql_connection) engine_args = { "pool_recycle": CONF.database.idle_timeout, "echo": False, 'convert_unicode': True, } # Map our SQL debug level to SQLAlchemy's options if CONF.database.connection_debug >= 100: engine_args['echo'] = 'debug' elif CONF.database.connection_debug >= 50: engine_args['echo'] = True if "sqlite" in connection_dict.drivername: if sqlite_fk: engine_args["listeners"] = [SqliteForeignKeysListener()] engine_args["poolclass"] = NullPool if CONF.database.connection == "sqlite://": engine_args["poolclass"] = StaticPool engine_args["connect_args"] = {'check_same_thread': False} else: if CONF.database.max_pool_size is not None: engine_args['pool_size'] = CONF.database.max_pool_size if CONF.database.max_overflow is not None: engine_args['max_overflow'] = CONF.database.max_overflow if CONF.database.pool_timeout is not None: engine_args['pool_timeout'] = CONF.database.pool_timeout engine = sqlalchemy.create_engine(sql_connection, **engine_args) sqlalchemy.event.listen(engine, 'checkin', _greenthread_yield) if 'mysql' in connection_dict.drivername: sqlalchemy.event.listen(engine, 'checkout', _ping_listener) elif 'sqlite' in connection_dict.drivername: if not CONF.sqlite_synchronous: sqlalchemy.event.listen(engine, 'connect', _synchronous_switch_listener) sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener) if (CONF.database.connection_trace and engine.dialect.dbapi.__name__ == 'MySQLdb'): _patch_mysqldb_with_stacktrace_comments() try: engine.connect() except sqla_exc.OperationalError as e: if not _is_db_connection_error(e.args[0]): raise remaining = CONF.database.max_retries if remaining == -1: remaining = 'infinite' while True: msg = _('SQL connection failed. %s attempts left.') LOG.warn(msg % remaining) if remaining != 'infinite': remaining -= 1 time.sleep(CONF.database.retry_interval) try: engine.connect() break except sqla_exc.OperationalError as e: if (remaining != 'infinite' and remaining == 0) or \ not _is_db_connection_error(e.args[0]): raise return engine
def __init__(self, rule): msg = _("Policy doesn't allow %s to be performed.") % rule super(PolicyNotAuthorized, self).__init__(msg)
import re import urllib import urllib2 from oslo.config import cfg import six from mimic.openstack.common import fileutils from mimic.openstack.common.gettextutils import _ # noqa from mimic.openstack.common import jsonutils from mimic.openstack.common import log as logging policy_opts = [ cfg.StrOpt('policy_file', default='policy.json', help=_('JSON file containing policy')), cfg.StrOpt('policy_default_rule', default='default', help=_('Rule enforced when requested rule is not found')), ] CONF = cfg.CONF CONF.register_opts(policy_opts) LOG = logging.getLogger(__name__) _checks = {} class PolicyNotAuthorized(Exception):
def paginate_query(query, model, limit, sort_keys, marker=None, sort_dir=None, sort_dirs=None): """Returns a query with sorting / pagination criteria added. Pagination works by requiring a unique sort_key, specified by sort_keys. (If sort_keys is not unique, then we risk looping through values.) We use the last row in the previous page as the 'marker' for pagination. So we must return values that follow the passed marker in the order. With a single-valued sort_key, this would be easy: sort_key > X. With a compound-values sort_key, (k1, k2, k3) we must do this to repeat the lexicographical ordering: (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3) We also have to cope with different sort_directions. Typically, the id of the last row is used as the client-facing pagination marker, then the actual marker object must be fetched from the db and passed in to us as marker. :param query: the query object to which we should add paging/sorting :param model: the ORM model class :param limit: maximum number of items to return :param sort_keys: array of attributes by which results should be sorted :param marker: the last item of the previous page; we returns the next results after this value. :param sort_dir: direction in which results should be sorted (asc, desc) :param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys :rtype: sqlalchemy.orm.query.Query :return: The query with sorting/pagination added. """ if 'id' not in sort_keys: # TODO(justinsb): If this ever gives a false-positive, check # the actual primary key, rather than assuming its id LOG.warn(_('Id not in sort_keys; is sort_keys unique?')) assert(not (sort_dir and sort_dirs)) # Default the sort direction to ascending if sort_dirs is None and sort_dir is None: sort_dir = 'asc' # Ensure a per-column sort direction if sort_dirs is None: sort_dirs = [sort_dir for _sort_key in sort_keys] assert(len(sort_dirs) == len(sort_keys)) # Add sorting for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs): try: sort_dir_func = { 'asc': sqlalchemy.asc, 'desc': sqlalchemy.desc, }[current_sort_dir] except KeyError: raise ValueError(_("Unknown sort direction, " "must be 'desc' or 'asc'")) try: sort_key_attr = getattr(model, current_sort_key) except AttributeError: raise InvalidSortKey() query = query.order_by(sort_dir_func(sort_key_attr)) # Add pagination if marker is not None: marker_values = [] for sort_key in sort_keys: v = getattr(marker, sort_key) marker_values.append(v) # Build up an array of sort criteria as in the docstring criteria_list = [] for i in range(0, len(sort_keys)): crit_attrs = [] for j in range(0, i): model_attr = getattr(model, sort_keys[j]) crit_attrs.append((model_attr == marker_values[j])) model_attr = getattr(model, sort_keys[i]) if sort_dirs[i] == 'desc': crit_attrs.append((model_attr < marker_values[i])) else: crit_attrs.append((model_attr > marker_values[i])) criteria = sqlalchemy.sql.and_(*crit_attrs) criteria_list.append(criteria) f = sqlalchemy.sql.or_(*criteria_list) query = query.filter(f) if limit is not None: query = query.limit(limit) return query
def lock(name, lock_file_prefix=None, external=False, lock_path=None): """Context based lock This function yields a `semaphore.Semaphore` instance unless external is True, in which case, it'll yield an InterProcessLock instance. :param lock_file_prefix: The lock_file_prefix argument is used to provide lock files on disk with a meaningful prefix. :param external: The external keyword argument denotes whether this lock should work across multiple processes. This means that if two different workers both run a a method decorated with @synchronized('mylock', external=True), only one of them will execute at a time. :param lock_path: The lock_path keyword argument is used to specify a special location for external lock files to live. If nothing is set, then CONF.lock_path is used as a default. """ # NOTE(soren): If we ever go natively threaded, this will be racy. # See http://stackoverflow.com/questions/5390569/dyn # amically-allocating-and-destroying-mutexes sem = _semaphores.get(name, semaphore.Semaphore()) if name not in _semaphores: # this check is not racy - we're already holding ref locally # so GC won't remove the item and there was no IO switch # (only valid in greenthreads) _semaphores[name] = sem with sem: LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name}) # NOTE(mikal): I know this looks odd if not hasattr(local.strong_store, 'locks_held'): local.strong_store.locks_held = [] local.strong_store.locks_held.append(name) try: if external and not CONF.disable_process_locking: LOG.debug(_('Attempting to grab file lock "%(lock)s"'), {'lock': name}) # We need a copy of lock_path because it is non-local local_lock_path = lock_path or CONF.lock_path if not local_lock_path: raise cfg.RequiredOptError('lock_path') if not os.path.exists(local_lock_path): fileutils.ensure_tree(local_lock_path) LOG.info(_('Created lock path: %s'), local_lock_path) def add_prefix(name, prefix): if not prefix: return name sep = '' if prefix.endswith('-') else '-' return '%s%s%s' % (prefix, sep, name) # NOTE(mikal): the lock name cannot contain directory # separators lock_file_name = add_prefix(name.replace(os.sep, '_'), lock_file_prefix) lock_file_path = os.path.join(local_lock_path, lock_file_name) try: lock = InterProcessLock(lock_file_path) with lock as lock: LOG.debug(_('Got file lock "%(lock)s" at %(path)s'), {'lock': name, 'path': lock_file_path}) yield lock finally: LOG.debug(_('Released file lock "%(lock)s" at %(path)s'), {'lock': name, 'path': lock_file_path}) else: yield sem finally: local.strong_store.locks_held.remove(name)