コード例 #1
0
def ssh_execute(ssh, cmd, process_input=None,
                addl_env=None, check_exit_code=True):
    LOG.debug(_('Running cmd (SSH): %s'), cmd)
    if addl_env:
        raise InvalidArgumentError(_('Environment not supported over SSH'))

    if process_input:
        # This is (probably) fixable if we need it...
        raise InvalidArgumentError(_('process_input not supported over SSH'))

    stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd)
    channel = stdout_stream.channel

    # NOTE(justinsb): This seems suspicious...
    # ...other SSH clients have buffering issues with this approach
    stdout = stdout_stream.read()
    stderr = stderr_stream.read()
    stdin_stream.close()

    exit_status = channel.recv_exit_status()

    # exit_status == -1 if no exit code was returned
    if exit_status != -1:
        LOG.debug(_('Result was %s') % exit_status)
        if check_exit_code and exit_status != 0:
            raise ProcessExecutionError(exit_code=exit_status,
                                        stdout=stdout,
                                        stderr=stderr,
                                        cmd=cmd)

    return (stdout, stderr)
コード例 #2
0
ファイル: policy.py プロジェクト: tantexian/sps-2014-12-4
def _parse_check(rule):
    """
    Parse a single base check rule into an appropriate Check object.
    """

    # Handle the special checks
    if rule == '!':
        return FalseCheck()
    elif rule == '@':
        return TrueCheck()

    try:
        kind, match = rule.split(':', 1)
    except Exception:
        LOG.exception(_("Failed to understand rule %(rule)s") % locals())
        # If the rule is invalid, we'll fail closed
        return FalseCheck()

    # Find what implements the check
    if kind in _checks:
        return _checks[kind](kind, match)
    elif None in _checks:
        return _checks[None](kind, match)
    else:
        LOG.error(_("No handler for matches of kind %s") % kind)
        return FalseCheck()
コード例 #3
0
ファイル: policy.py プロジェクト: tantexian/sps-2014-12-4
def _parse_check(rule):
    """
    Parse a single base check rule into an appropriate Check object.
    """

    # Handle the special checks
    if rule == '!':
        return FalseCheck()
    elif rule == '@':
        return TrueCheck()

    try:
        kind, match = rule.split(':', 1)
    except Exception:
        LOG.exception(_("Failed to understand rule %(rule)s") % locals())
        # If the rule is invalid, we'll fail closed
        return FalseCheck()

    # Find what implements the check
    if kind in _checks:
        return _checks[kind](kind, match)
    elif None in _checks:
        return _checks[None](kind, match)
    else:
        LOG.error(_("No handler for matches of kind %s") % kind)
        return FalseCheck()
コード例 #4
0
 def wait_on_children(self):
     while self.running:
         try:
             pid, status = os.wait()
             if os.WIFEXITED(status) or os.WIFSIGNALED(status):
                 self.logger.info(_('Removing dead child %s') % pid)
                 self.children.remove(pid)
                 if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0:
                     self.logger.error(
                         _('Not respawning child %d, cannot '
                           'recover from termination') % pid)
                     if not self.children:
                         self.logger.info(
                             _('All workers have terminated. Exiting'))
                         self.running = False
                 else:
                     self.run_child()
         except OSError as err:
             if err.errno not in (errno.EINTR, errno.ECHILD):
                 raise
         except KeyboardInterrupt:
             self.logger.info(_('Caught keyboard interrupt. Exiting.'))
             break
     eventlet.greenio.shutdown_safe(self.sock)
     self.sock.close()
     self.logger.debug('Exited')
コード例 #5
0
ファイル: strutils.py プロジェクト: tantexian/sps-2014-12-4
def to_bytes(text, default=0):
    """Converts a string into an integer of bytes.

    Looks at the last characters of the text to determine
    what conversion is needed to turn the input text into a byte number.
    Supports "B, K(B), M(B), G(B), and T(B)". (case insensitive)

    :param text: String input for bytes size conversion.
    :param default: Default return value when text is blank.

    """
    match = BYTE_REGEX.search(text)
    if match:
        magnitude = int(match.group(1))
        mult_key_org = match.group(2)
        if not mult_key_org:
            return magnitude
    elif text:
        msg = _('Invalid string format: %s') % text
        raise TypeError(msg)
    else:
        return default
    mult_key = mult_key_org.lower().replace('b', '', 1)
    multiplier = BYTE_MULTIPLIERS.get(mult_key)
    if multiplier is None:
        msg = _('Unknown byte multiplier: %s') % mult_key_org
        raise TypeError(msg)
    return magnitude * multiplier
コード例 #6
0
ファイル: wsgi.py プロジェクト: tantexian/sps-2014-12-4
 def wait_on_children(self):
     while self.running:
         try:
             pid, status = os.wait()
             if os.WIFEXITED(status) or os.WIFSIGNALED(status):
                 self.logger.info(_('Removing dead child %s') % pid)
                 self.children.remove(pid)
                 if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0:
                     self.logger.error(_('Not respawning child %d, cannot '
                                         'recover from termination') % pid)
                     if not self.children:
                         self.logger.info(
                             _('All workers have terminated. Exiting'))
                         self.running = False
                 else:
                     self.run_child()
         except OSError as err:
             if err.errno not in (errno.EINTR, errno.ECHILD):
                 raise
         except KeyboardInterrupt:
             self.logger.info(_('Caught keyboard interrupt. Exiting.'))
             break
     eventlet.greenio.shutdown_safe(self.sock)
     self.sock.close()
     self.logger.debug('Exited')
コード例 #7
0
 def inner(*args, **kwargs):
     try:
         with lock(name, lock_file_prefix, external, lock_path):
             LOG.debug(_('Got semaphore / lock "%(function)s"'),
                       {'function': f.__name__})
             return f(*args, **kwargs)
     finally:
         LOG.debug(_('Semaphore / lock released "%(function)s"'),
                   {'function': f.__name__})
コード例 #8
0
ファイル: api.py プロジェクト: tantexian/sps-2014-12-4
def model_query(context, model, *args, **kwargs):
    """Query helper that accounts for context's `read_deleted` field.

    :param context: context to query under
    :param use_slave: If true, use slave_connection
    :param session: if present, the session to use
    :param read_deleted: if present, overrides context's read_deleted field.
    :param project_only: if present and context is user-type, then restrict
            query to match the context's project_id. If set to 'allow_none',
            restriction includes project_id = None.
    :param base_model: Where model_query is passed a "model" parameter which is
            not a subclass of NovaBase, we should pass an extra base_model
            parameter that is a subclass of NovaBase and corresponds to the
            model parameter.
    """

    use_slave = kwargs.get('use_slave') or False
    if CONF.database.slave_connection == '':
        use_slave = False

    session = kwargs.get('session') or get_session()
    read_deleted = kwargs.get('read_deleted') or context.read_deleted
    project_only = kwargs.get('project_only', False)

    def issubclassof_sps_base(obj):
        return isinstance(obj, type) and issubclass(obj, models.SpsBase)

    base_model = model
    if not issubclassof_sps_base(base_model):
        base_model = kwargs.get('base_model', None)
        if not issubclassof_sps_base(base_model):
            raise Exception(
                _("model or base_model parameter should be "
                  "subclass of NovaBase"))

    query = session.query(model, *args)

    default_deleted_value = base_model.__mapper__.c.deleted.default.arg
    if read_deleted == 'no':
        query = query.filter(base_model.deleted == default_deleted_value)
    elif read_deleted == 'yes':
        pass  # omit the filter to include deleted and active
    elif read_deleted == 'only':
        query = query.filter(base_model.deleted != default_deleted_value)
    else:
        raise Exception(
            _("Unrecognized read_deleted value '%s'") % read_deleted)

    if sps.context.is_user_context(context) and project_only:
        if project_only == 'allow_none':
            query = query.\
                filter(or_(base_model.project_id == context.project_id,
                           base_model.project_id == None))
        else:
            query = query.filter_by(project_id=context.project_id)

    return query
コード例 #9
0
ファイル: api.py プロジェクト: tantexian/sps-2014-12-4
def model_query(context, model, *args, **kwargs):
    """Query helper that accounts for context's `read_deleted` field.

    :param context: context to query under
    :param use_slave: If true, use slave_connection
    :param session: if present, the session to use
    :param read_deleted: if present, overrides context's read_deleted field.
    :param project_only: if present and context is user-type, then restrict
            query to match the context's project_id. If set to 'allow_none',
            restriction includes project_id = None.
    :param base_model: Where model_query is passed a "model" parameter which is
            not a subclass of NovaBase, we should pass an extra base_model
            parameter that is a subclass of NovaBase and corresponds to the
            model parameter.
    """

    use_slave = kwargs.get('use_slave') or False
    if CONF.database.slave_connection == '':
        use_slave = False

    session = kwargs.get('session') or get_session()
    read_deleted = kwargs.get('read_deleted') or context.read_deleted
    project_only = kwargs.get('project_only', False)

    def issubclassof_sps_base(obj):
        return isinstance(obj, type) and issubclass(obj, models.SpsBase)

    base_model = model
    if not issubclassof_sps_base(base_model):
        base_model = kwargs.get('base_model', None)
        if not issubclassof_sps_base(base_model):
            raise Exception(_("model or base_model parameter should be "
                              "subclass of NovaBase"))

    query = session.query(model, *args)

    default_deleted_value = base_model.__mapper__.c.deleted.default.arg
    if read_deleted == 'no':
        query = query.filter(base_model.deleted == default_deleted_value)
    elif read_deleted == 'yes':
        pass  # omit the filter to include deleted and active
    elif read_deleted == 'only':
        query = query.filter(base_model.deleted != default_deleted_value)
    else:
        raise Exception(_("Unrecognized read_deleted value '%s'")
                            % read_deleted)

    if sps.context.is_user_context(context) and project_only:
        if project_only == 'allow_none':
            query = query.\
                filter(or_(base_model.project_id == context.project_id,
                           base_model.project_id == None))
        else:
            query = query.filter_by(project_id=context.project_id)

    return query
コード例 #10
0
ファイル: utils.py プロジェクト: tantexian/sps-2014-12-4
def _get_not_supported_column(col_name_col_instance, column_name):
    try:
        column = col_name_col_instance[column_name]
    except KeyError:
        msg = _("Please specify column %s in col_name_col_instance "
                "param. It is required because column has unsupported "
                "type by sqlite).")
        raise ColumnError(msg % column_name)

    if not isinstance(column, Column):
        msg = _("col_name_col_instance param has wrong type of "
                "column instance for column %s It should be instance "
                "of sqlalchemy.Column.")
        raise ColumnError(msg % column_name)
    return column
コード例 #11
0
def _get_not_supported_column(col_name_col_instance, column_name):
    try:
        column = col_name_col_instance[column_name]
    except KeyError:
        msg = _("Please specify column %s in col_name_col_instance "
                "param. It is required because column has unsupported "
                "type by sqlite).")
        raise ColumnError(msg % column_name)

    if not isinstance(column, Column):
        msg = _("col_name_col_instance param has wrong type of "
                "column instance for column %s It should be instance "
                "of sqlalchemy.Column.")
        raise ColumnError(msg % column_name)
    return column
コード例 #12
0
ファイル: log.py プロジェクト: tantexian/sps-2014-12-4
 def deprecated(self, msg, *args, **kwargs):
     stdmsg = _("Deprecated: %s") % msg
     if CONF.fatal_deprecations:
         self.critical(stdmsg, *args, **kwargs)
         raise DeprecatedConfig(msg=stdmsg)
     else:
         self.warn(stdmsg, *args, **kwargs)
コード例 #13
0
ファイル: strutils.py プロジェクト: tantexian/sps-2014-12-4
def bool_from_string(subject, strict=False):
    """Interpret a string as a boolean.

    A case-insensitive match is performed such that strings matching 't',
    'true', 'on', 'y', 'yes', or '1' are considered True and, when
    `strict=False`, anything else is considered False.

    Useful for JSON-decoded stuff and config file parsing.

    If `strict=True`, unrecognized values, including None, will raise a
    ValueError which is useful when parsing values passed in from an API call.
    Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'.
    """
    if not isinstance(subject, six.string_types):
        subject = str(subject)

    lowered = subject.strip().lower()

    if lowered in TRUE_STRINGS:
        return True
    elif lowered in FALSE_STRINGS:
        return False
    elif strict:
        acceptable = ', '.join(
            "'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS))
        msg = _("Unrecognized value '%(val)s', acceptable values are:"
                " %(acceptable)s") % {'val': subject,
                                      'acceptable': acceptable}
        raise ValueError(msg)
    else:
        return False
コード例 #14
0
ファイル: excutils.py プロジェクト: tantexian/sps-2014-12-4
 def inner_func(*args, **kwargs):
     last_log_time = 0
     last_exc_message = None
     exc_count = 0
     while True:
         try:
             return infunc(*args, **kwargs)
         except Exception as exc:
             this_exc_message = six.u(str(exc))
             if this_exc_message == last_exc_message:
                 exc_count += 1
             else:
                 exc_count = 1
             # Do not log any more frequently than once a minute unless
             # the exception message changes
             cur_time = int(time.time())
             if (cur_time - last_log_time > 60 or
                     this_exc_message != last_exc_message):
                 logging.exception(
                     _('Unexpected exception occurred %d time(s)... '
                       'retrying.') % exc_count)
                 last_log_time = cur_time
                 last_exc_message = this_exc_message
                 exc_count = 0
             # This should be a very rare event. In case it isn't, do
             # a sleep.
             time.sleep(1)
コード例 #15
0
class spsException(Exception):
    """
    Base sps Exception

    To correctly use this class, inherit from it and define
    a 'message' property. That message will get printf'd
    with the keyword arguments provided to the constructor.
    """
    message = _("An unknown exception occurred")

    def __init__(self, message=None, *args, **kwargs):
        if not message:
            message = self.message
        try:
            if kwargs:
                message = message % kwargs
        except Exception:
            if _FATAL_EXCEPTION_FORMAT_ERRORS:
                raise
            else:
                # at least get the core message out if something happened
                pass
        self.msg = message
        super(spsException, self).__init__(message)

    def __unicode__(self):
        # NOTE(flwang): By default, self.msg is an instance of Message, which
        # can't be converted by str(). Based on the definition of
        # __unicode__, it should return unicode always.
        return six.text_type(self.msg)
コード例 #16
0
ファイル: excutils.py プロジェクト: tantexian/sps-2014-12-4
 def inner_func(*args, **kwargs):
     last_log_time = 0
     last_exc_message = None
     exc_count = 0
     while True:
         try:
             return infunc(*args, **kwargs)
         except Exception as exc:
             this_exc_message = six.u(str(exc))
             if this_exc_message == last_exc_message:
                 exc_count += 1
             else:
                 exc_count = 1
             # Do not log any more frequently than once a minute unless
             # the exception message changes
             cur_time = int(time.time())
             if (cur_time - last_log_time > 60
                     or this_exc_message != last_exc_message):
                 logging.exception(
                     _('Unexpected exception occurred %d time(s)... '
                       'retrying.') % exc_count)
                 last_log_time = cur_time
                 last_exc_message = this_exc_message
                 exc_count = 0
             # This should be a very rare event. In case it isn't, do
             # a sleep.
             time.sleep(1)
コード例 #17
0
def db_sync(engine, abs_path, version=None, init_version=0, sanity_check=True):
    """Upgrade or downgrade a database.

    Function runs the upgrade() or downgrade() functions in change scripts.

    :param engine:       SQLAlchemy engine instance for a given database
    :param abs_path:     Absolute path to migrate repository.
    :param version:      Database will upgrade/downgrade until this version.
                         If None - database will update to the latest
                         available version.
    :param init_version: Initial database version
    :param sanity_check: Require schema sanity checking for all tables
    """

    if version is not None:
        try:
            version = int(version)
        except ValueError:
            raise exception.DbMigrationError(
                message=_("version should be an integer"))

    current_version = db_version(engine, abs_path, init_version)
    repository = _find_migrate_repo(abs_path)
    if sanity_check:
        _db_schema_sanity_check(engine)
    if version is None or version > current_version:
        return versioning_api.upgrade(engine, repository, version)
    else:
        return versioning_api.downgrade(engine, repository, version)
コード例 #18
0
 def __exit__(self, exc_type, exc_val, exc_tb):
     try:
         self.unlock()
         self.lockfile.close()
     except IOError:
         LOG.exception(_("Could not release the acquired lock `%s`"),
                       self.fname)
コード例 #19
0
ファイル: migration.py プロジェクト: tantexian/sps-2014-12-4
def db_sync(engine, abs_path, version=None, init_version=0, sanity_check=True):
    """Upgrade or downgrade a database.

    Function runs the upgrade() or downgrade() functions in change scripts.

    :param engine:       SQLAlchemy engine instance for a given database
    :param abs_path:     Absolute path to migrate repository.
    :param version:      Database will upgrade/downgrade until this version.
                         If None - database will update to the latest
                         available version.
    :param init_version: Initial database version
    :param sanity_check: Require schema sanity checking for all tables
    """

    if version is not None:
        try:
            version = int(version)
        except ValueError:
            raise exception.DbMigrationError(
                message=_("version should be an integer"))

    current_version = db_version(engine, abs_path, init_version)
    repository = _find_migrate_repo(abs_path)
    if sanity_check:
        _db_schema_sanity_check(engine)
    if version is None or version > current_version:
        return versioning_api.upgrade(engine, repository, version)
    else:
        return versioning_api.downgrade(engine, repository,
                                        version)
コード例 #20
0
def model_query(context,
                model,
                session,
                args=None,
                project_only=False,
                read_deleted=None):
    """Query helper that accounts for context's `read_deleted` field.

    :param context:      context to query under

    :param model:        Model to query. Must be a subclass of ModelBase.
    :type model:         models.ModelBase

    :param session:      The session to use.
    :type session:       sqlalchemy.orm.session.Session

    :param args:         Arguments to query. If None - model is used.
    :type args:          tuple

    :param project_only: If present and context is user-type, then restrict
                         query to match the context's project_id. If set to
                         'allow_none', restriction includes project_id = None.
    :type project_only:  bool

    :param read_deleted: If present, overrides context's read_deleted field.
    :type read_deleted:   bool

    Usage:

    ..code:: python

        result = (utils.model_query(context, models.Instance, session=session)
                       .filter_by(uuid=instance_uuid)
                       .all())

        query = utils.model_query(
                    context, Node,
                    session=session,
                    args=(func.count(Node.id), func.sum(Node.ram))
                    ).filter_by(project_id=project_id)

    """

    if not read_deleted:
        if hasattr(context, 'read_deleted'):
            # NOTE(viktors): some projects use `read_deleted` attribute in
            # their contexts instead of `show_deleted`.
            read_deleted = context.read_deleted
        else:
            read_deleted = context.show_deleted

    if not issubclass(model, models.ModelBase):
        raise TypeError(_("model should be a subclass of ModelBase"))

    query = session.query(model) if not args else session.query(*args)
    query = _read_deleted_filter(query, model, read_deleted)
    query = _project_filter(query, model, context, project_only)

    return query
コード例 #21
0
 def run_child(self):
     pid = os.fork()
     if pid == 0:
         signal.signal(signal.SIGHUP, signal.SIG_DFL)
         signal.signal(signal.SIGTERM, signal.SIG_DFL)
         # ignore the interrupt signal to avoid a race whereby
         # a child worker receives the signal before the parent
         # and is respawned unnecessarily as a result
         signal.signal(signal.SIGINT, signal.SIG_IGN)
         self.run_server()
         self.logger.info(_('Child %d exiting normally') % os.getpid())
         # self.pool.waitall() has been called by run_server, so
         # its safe to exit here
         sys.exit(0)
     else:
         self.logger.info(_('Started child %s') % pid)
         self.children.append(pid)
コード例 #22
0
ファイル: utils.py プロジェクト: tantexian/sps-2014-12-4
def _read_deleted_filter(query, db_model, read_deleted):
    if 'deleted' not in db_model.__table__.columns:
        raise ValueError(_("There is no `deleted` column in `%s` table. "
                           "Project doesn't use soft-deleted feature.")
                         % db_model.__name__)

    default_deleted_value = db_model.__table__.c.deleted.default.arg
    if read_deleted == 'no':
        query = query.filter(db_model.deleted == default_deleted_value)
    elif read_deleted == 'yes':
        pass  # omit the filter to include deleted and active
    elif read_deleted == 'only':
        query = query.filter(db_model.deleted != default_deleted_value)
    else:
        raise ValueError(_("Unrecognized read_deleted value '%s'")
                         % read_deleted)
    return query
コード例 #23
0
ファイル: wsgi.py プロジェクト: tantexian/sps-2014-12-4
 def run_child(self):
     pid = os.fork()
     if pid == 0:
         signal.signal(signal.SIGHUP, signal.SIG_DFL)
         signal.signal(signal.SIGTERM, signal.SIG_DFL)
         # ignore the interrupt signal to avoid a race whereby
         # a child worker receives the signal before the parent
         # and is respawned unnecessarily as a result
         signal.signal(signal.SIGINT, signal.SIG_IGN)
         self.run_server()
         self.logger.info(_('Child %d exiting normally') % os.getpid())
         # self.pool.waitall() has been called by run_server, so
         # its safe to exit here
         sys.exit(0)
     else:
         self.logger.info(_('Started child %s') % pid)
         self.children.append(pid)
コード例 #24
0
 def _single_run(self, application, sock):
     """Start a WSGI server in a new green thread."""
     self.logger.info(_("Starting single process server"))
     eventlet.wsgi.server(sock,
                          application,
                          custom_pool=self.pool,
                          log=logging.WritableLogger(self.logger),
                          debug=False)
コード例 #25
0
def _read_deleted_filter(query, db_model, read_deleted):
    if 'deleted' not in db_model.__table__.columns:
        raise ValueError(
            _("There is no `deleted` column in `%s` table. "
              "Project doesn't use soft-deleted feature.") % db_model.__name__)

    default_deleted_value = db_model.__table__.c.deleted.default.arg
    if read_deleted == 'no':
        query = query.filter(db_model.deleted == default_deleted_value)
    elif read_deleted == 'yes':
        pass  # omit the filter to include deleted and active
    elif read_deleted == 'only':
        query = query.filter(db_model.deleted != default_deleted_value)
    else:
        raise ValueError(
            _("Unrecognized read_deleted value '%s'") % read_deleted)
    return query
コード例 #26
0
ファイル: excutils.py プロジェクト: tantexian/sps-2014-12-4
 def __exit__(self, exc_type, exc_val, exc_tb):
     if exc_type is not None:
         logging.error(
             _('Original exception being dropped: %s'),
             traceback.format_exception(self.type_, self.value, self.tb))
         return False
     if self.reraise:
         six.reraise(self.type_, self.value, self.tb)
コード例 #27
0
class LimitExceeded(spsException):
    message = _("The request returned a 413 Request Entity Too Large. This "
                "generally means that rate limiting or a quota threshold was "
                "breached.\n\nThe response body:\n%(body)s")

    def __init__(self, *args, **kwargs):
        self.retry_after = (int(kwargs['retry']) if kwargs.get('retry')
                            else None)
        super(LimitExceeded, self).__init__(*args, **kwargs)
コード例 #28
0
class ServiceUnavailable(spsException):
    message = _("The request returned 503 Service Unavilable. This "
                "generally occurs on service overload or other transient "
                "outage.")

    def __init__(self, *args, **kwargs):
        self.retry_after = (int(kwargs['retry']) if kwargs.get('retry')
                            else None)
        super(ServiceUnavailable, self).__init__(*args, **kwargs)
コード例 #29
0
ファイル: excutils.py プロジェクト: tantexian/sps-2014-12-4
 def __exit__(self, exc_type, exc_val, exc_tb):
     if exc_type is not None:
         logging.error(_('Original exception being dropped: %s'),
                       traceback.format_exception(self.type_,
                                                  self.value,
                                                  self.tb))
         return False
     if self.reraise:
         six.reraise(self.type_, self.value, self.tb)
コード例 #30
0
    def start(self, application, default_port):
        """
        Run a WSGI server with the given application.

        :param application: The application to be run in the WSGI server
        :param default_port: Port to bind to if none is specified in conf
        """
        pgid = os.getpid()
        try:
            # NOTE(flaper87): Make sure this process
            # runs in its own process group.
            os.setpgid(pgid, pgid)
        except OSError:
            # NOTE(flaper87): When running sps-control,
            # (sps's functional tests, for example)
            # setpgid fails with EPERM as sps-control
            # creates a fresh session, of which the newly
            # launched service becomes the leader (session
            # leaders may not change process groups)
            #
            # Running sps-(api|registry) is safe and
            # shouldn't raise any error here.
            pgid = 0

        def kill_children(*args):
            """Kills the entire process group."""
            signal.signal(signal.SIGTERM, signal.SIG_IGN)
            signal.signal(signal.SIGINT, signal.SIG_IGN)
            self.running = False
            os.killpg(pgid, signal.SIGTERM)

        def hup(*args):
            """
            Shuts down the server, but allows running requests to complete
            """
            signal.signal(signal.SIGHUP, signal.SIG_IGN)
            self.running = False

        self.application = application
        self.sock = get_socket(default_port)

        os.umask(0o27)  # ensure files are created with the correct privileges
        self.logger = logging.getLogger('sps.wsgi.server')

        if CONF.workers == 0:
            # Useful for profiling, test, debug etc.
            self.pool = self.create_pool()
            self.pool.spawn_n(self._single_run, self.application, self.sock)
            return
        else:
            self.logger.info(_("Starting %d workers") % CONF.workers)
            signal.signal(signal.SIGTERM, kill_children)
            signal.signal(signal.SIGINT, kill_children)
            signal.signal(signal.SIGHUP, hup)
            while len(self.children) < CONF.workers:
                self.run_child()
コード例 #31
0
ファイル: wsgi.py プロジェクト: tantexian/sps-2014-12-4
    def start(self, application, default_port):
        """
        Run a WSGI server with the given application.

        :param application: The application to be run in the WSGI server
        :param default_port: Port to bind to if none is specified in conf
        """
        pgid = os.getpid()
        try:
            # NOTE(flaper87): Make sure this process
            # runs in its own process group.
            os.setpgid(pgid, pgid)
        except OSError:
            # NOTE(flaper87): When running sps-control,
            # (sps's functional tests, for example)
            # setpgid fails with EPERM as sps-control
            # creates a fresh session, of which the newly
            # launched service becomes the leader (session
            # leaders may not change process groups)
            #
            # Running sps-(api|registry) is safe and
            # shouldn't raise any error here.
            pgid = 0

        def kill_children(*args):
            """Kills the entire process group."""
            signal.signal(signal.SIGTERM, signal.SIG_IGN)
            signal.signal(signal.SIGINT, signal.SIG_IGN)
            self.running = False
            os.killpg(pgid, signal.SIGTERM)

        def hup(*args):
            """
            Shuts down the server, but allows running requests to complete
            """
            signal.signal(signal.SIGHUP, signal.SIG_IGN)
            self.running = False

        self.application = application
        self.sock = get_socket(default_port)

        os.umask(0o27)  # ensure files are created with the correct privileges
        self.logger = logging.getLogger('sps.wsgi.server')

        if CONF.workers == 0:
            # Useful for profiling, test, debug etc.
            self.pool = self.create_pool()
            self.pool.spawn_n(self._single_run, self.application, self.sock)
            return
        else:
            self.logger.info(_("Starting %d workers") % CONF.workers)
            signal.signal(signal.SIGTERM, kill_children)
            signal.signal(signal.SIGINT, kill_children)
            signal.signal(signal.SIGHUP, hup)
            while len(self.children) < CONF.workers:
                self.run_child()
コード例 #32
0
ファイル: utils.py プロジェクト: tantexian/sps-2014-12-4
def model_query(context, model, session, args=None, project_only=False,
                read_deleted=None):
    """Query helper that accounts for context's `read_deleted` field.

    :param context:      context to query under

    :param model:        Model to query. Must be a subclass of ModelBase.
    :type model:         models.ModelBase

    :param session:      The session to use.
    :type session:       sqlalchemy.orm.session.Session

    :param args:         Arguments to query. If None - model is used.
    :type args:          tuple

    :param project_only: If present and context is user-type, then restrict
                         query to match the context's project_id. If set to
                         'allow_none', restriction includes project_id = None.
    :type project_only:  bool

    :param read_deleted: If present, overrides context's read_deleted field.
    :type read_deleted:   bool

    Usage:

    ..code:: python

        result = (utils.model_query(context, models.Instance, session=session)
                       .filter_by(uuid=instance_uuid)
                       .all())

        query = utils.model_query(
                    context, Node,
                    session=session,
                    args=(func.count(Node.id), func.sum(Node.ram))
                    ).filter_by(project_id=project_id)

    """

    if not read_deleted:
        if hasattr(context, 'read_deleted'):
            # NOTE(viktors): some projects use `read_deleted` attribute in
            # their contexts instead of `show_deleted`.
            read_deleted = context.read_deleted
        else:
            read_deleted = context.show_deleted

    if not issubclass(model, models.ModelBase):
        raise TypeError(_("model should be a subclass of ModelBase"))

    query = session.query(model) if not args else session.query(*args)
    query = _read_deleted_filter(query, model, read_deleted)
    query = _project_filter(query, model, context, project_only)

    return query
コード例 #33
0
ファイル: config.py プロジェクト: tantexian/sps-2014-12-4
def _get_deployment_config_file():
    """
    Retrieve the deployment_config_file config item, formatted as an
    absolute pathname.
    """
    path = CONF.paste_deploy.config_file
    if not path:
        path = _get_paste_config_path()
    if not path:
        msg = _("Unable to locate paste config file for %s.") % CONF.prog
        raise RuntimeError(msg)
    return os.path.abspath(path)
コード例 #34
0
ファイル: utils.py プロジェクト: tantexian/sps-2014-12-4
def _project_filter(query, db_model, context, project_only):
    if project_only and 'project_id' not in db_model.__table__.columns:
        raise ValueError(_("There is no `project_id` column in `%s` table.")
                         % db_model.__name__)

    if request_context.is_user_context(context) and project_only:
        if project_only == 'allow_none':
            is_none = None
            query = query.filter(or_(db_model.project_id == context.project_id,
                                     db_model.project_id == is_none))
        else:
            query = query.filter(db_model.project_id == context.project_id)

    return query
コード例 #35
0
def _project_filter(query, db_model, context, project_only):
    if project_only and 'project_id' not in db_model.__table__.columns:
        raise ValueError(
            _("There is no `project_id` column in `%s` table.") %
            db_model.__name__)

    if request_context.is_user_context(context) and project_only:
        if project_only == 'allow_none':
            is_none = None
            query = query.filter(
                or_(db_model.project_id == context.project_id,
                    db_model.project_id == is_none))
        else:
            query = query.filter(db_model.project_id == context.project_id)

    return query
コード例 #36
0
ファイル: migration.py プロジェクト: tantexian/sps-2014-12-4
def _db_schema_sanity_check(engine):
    """Ensure all database tables were created with required parameters.

    :param engine:  SQLAlchemy engine instance for a given database

    """

    if engine.name == 'mysql':
        onlyutf8_sql = ('SELECT TABLE_NAME,TABLE_COLLATION '
                        'from information_schema.TABLES '
                        'where TABLE_SCHEMA=%s and '
                        'TABLE_COLLATION NOT LIKE "%%utf8%%"')

        table_names = [res[0] for res in engine.execute(onlyutf8_sql,
                                                        engine.url.database)]
        if len(table_names) > 0:
            raise ValueError(_('Tables "%s" have non utf8 collation, '
                               'please make sure all tables are CHARSET=utf8'
                               ) % ','.join(table_names))
コード例 #37
0
ファイル: config.py プロジェクト: tantexian/sps-2014-12-4
def load_paste_app(app_name, flavor=None, conf_file=None):
    """
    Builds and returns a WSGI app from a paste config file.

    We assume the last config file specified in the supplied ConfigOpts
    object is the paste config file, if conf_file is None.

    :param app_name: name of the application to load
    :param flavor: name of the variant of the application to load
    :param conf_file: path to the paste config file

    :raises RuntimeError when config file cannot be located or application
            cannot be loaded from config file
    """
    # append the deployment flavor to the application name,
    # in order to identify the appropriate paste pipeline
    app_name += _get_deployment_flavor(flavor)

    if not conf_file:
        conf_file = _get_deployment_config_file()

    try:
        logger = logging.getLogger(__name__)
        logger.debug("Loading %(app_name)s from %(conf_file)s",
                     {'conf_file': conf_file, 'app_name': app_name})

        app = deploy.loadapp("config:%s" % conf_file, name=app_name)

        # Log the options used when starting if we're in debug mode...
        if CONF.debug:
            CONF.log_opt_values(logger, logging.DEBUG)

        return app
    except (LookupError, ImportError) as e:
        msg = (_("Unable to load %(app_name)s from "
                 "configuration file %(conf_file)s."
                 "\nGot: %(e)r") % {'app_name': app_name,
                                    'conf_file': conf_file,
                                    'e': e})
        logger.error(msg)
        raise RuntimeError(msg)
コード例 #38
0
def _db_schema_sanity_check(engine):
    """Ensure all database tables were created with required parameters.

    :param engine:  SQLAlchemy engine instance for a given database

    """

    if engine.name == 'mysql':
        onlyutf8_sql = ('SELECT TABLE_NAME,TABLE_COLLATION '
                        'from information_schema.TABLES '
                        'where TABLE_SCHEMA=%s and '
                        'TABLE_COLLATION NOT LIKE "%%utf8%%"')

        table_names = [
            res[0] for res in engine.execute(onlyutf8_sql, engine.url.database)
        ]
        if len(table_names) > 0:
            raise ValueError(
                _('Tables "%s" have non utf8 collation, '
                  'please make sure all tables are CHARSET=utf8') %
                ','.join(table_names))
コード例 #39
0
ファイル: log.py プロジェクト: tantexian/sps-2014-12-4
def _find_facility_from_conf():
    facility_names = logging.handlers.SysLogHandler.facility_names
    facility = getattr(logging.handlers.SysLogHandler,
                       CONF.syslog_log_facility,
                       None)

    if facility is None and CONF.syslog_log_facility in facility_names:
        facility = facility_names.get(CONF.syslog_log_facility)

    if facility is None:
        valid_facilities = facility_names.keys()
        consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
                  'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
                  'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
                  'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
                  'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
        valid_facilities.extend(consts)
        raise TypeError(_('syslog facility must be one of: %s') %
                        ', '.join("'%s'" % fac
                                  for fac in valid_facilities))

    return facility
コード例 #40
0
def db_version(engine, abs_path, init_version):
    """Show the current version of the repository.

    :param engine:  SQLAlchemy engine instance for a given database
    :param abs_path: Absolute path to migrate repository
    :param version:  Initial database version
    """
    repository = _find_migrate_repo(abs_path)
    try:
        return versioning_api.db_version(engine, repository)
    except versioning_exceptions.DatabaseNotControlledError:
        meta = sqlalchemy.MetaData()
        meta.reflect(bind=engine)
        tables = meta.tables
        if len(tables) == 0 or 'alembic_version' in tables:
            db_version_control(engine, abs_path, version=init_version)
            return versioning_api.db_version(engine, repository)
        else:
            raise exception.DbMigrationError(message=_(
                "The database is not under version control, but has "
                "tables. Please stamp the current version of the schema "
                "manually."))
コード例 #41
0
def read_cached_file(filename, force_reload=False):
    """Read from a file if it has been modified.

    :param force_reload: Whether to reload the file.
    :returns: A tuple with a boolean specifying if the data is fresh
              or not.
    """
    global _FILE_CACHE

    if force_reload and filename in _FILE_CACHE:
        del _FILE_CACHE[filename]

    reloaded = False
    mtime = os.path.getmtime(filename)
    cache_info = _FILE_CACHE.setdefault(filename, {})

    if not cache_info or mtime > cache_info.get('mtime', 0):
        LOG.debug(_("Reloading cached file %s") % filename)
        with open(filename) as fap:
            cache_info['data'] = fap.read()
        cache_info['mtime'] = mtime
        reloaded = True
    return (reloaded, cache_info['data'])
コード例 #42
0
ファイル: migration.py プロジェクト: tantexian/sps-2014-12-4
def db_version(engine, abs_path, init_version):
    """Show the current version of the repository.

    :param engine:  SQLAlchemy engine instance for a given database
    :param abs_path: Absolute path to migrate repository
    :param version:  Initial database version
    """
    repository = _find_migrate_repo(abs_path)
    try:
        return versioning_api.db_version(engine, repository)
    except versioning_exceptions.DatabaseNotControlledError:
        meta = sqlalchemy.MetaData()
        meta.reflect(bind=engine)
        tables = meta.tables
        if len(tables) == 0 or 'alembic_version' in tables:
            db_version_control(engine, abs_path, version=init_version)
            return versioning_api.db_version(engine, repository)
        else:
            raise exception.DbMigrationError(
                message=_(
                    "The database is not under version control, but has "
                    "tables. Please stamp the current version of the schema "
                    "manually."))
コード例 #43
0
ファイル: policy.py プロジェクト: tantexian/sps-2014-12-4
def _parse_text_rule(rule):
    """
    Translates a policy written in the policy language into a tree of
    Check objects.
    """

    # Empty rule means always accept
    if not rule:
        return TrueCheck()

    # Parse the token stream
    state = ParseState()
    for tok, value in _parse_tokenize(rule):
        state.shift(tok, value)

    try:
        return state.result
    except ValueError:
        # Couldn't parse the rule
        LOG.exception(_("Failed to understand rule %(rule)r") % locals())

        # Fail closed
        return FalseCheck()
コード例 #44
0
ファイル: policy.py プロジェクト: tantexian/sps-2014-12-4
def _parse_text_rule(rule):
    """
    Translates a policy written in the policy language into a tree of
    Check objects.
    """

    # Empty rule means always accept
    if not rule:
        return TrueCheck()

    # Parse the token stream
    state = ParseState()
    for tok, value in _parse_tokenize(rule):
        state.shift(tok, value)

    try:
        return state.result
    except ValueError:
        # Couldn't parse the rule
        LOG.exception(_("Failed to understand rule %(rule)r") % locals())

        # Fail closed
        return FalseCheck()
コード例 #45
0
ファイル: wsgi.py プロジェクト: tantexian/sps-2014-12-4
    def run_server(self):
        """Run a WSGI server."""
        if cfg.CONF.pydev_worker_debug_host:
            utils.setup_remote_pydev_debug(cfg.CONF.pydev_worker_debug_host,
                                           cfg.CONF.pydev_worker_debug_port)

        eventlet.wsgi.HttpProtocol.default_request_version = "HTTP/1.0"
        try:
            eventlet.hubs.use_hub(cfg.CONF.eventlet_hub)
        except Exception:
            msg = _("eventlet '%s' hub is not available on this platform")
            raise exception.WorkerCreationFailure(
                reason=msg % cfg.CONF.eventlet_hub)
        self.pool = self.create_pool()
        try:
            eventlet.wsgi.server(self.sock,
                                 self.application,
                                 log=logging.WritableLogger(self.logger),
                                 custom_pool=self.pool,
                                 debug=False)
        except socket.error as err:
            if err[0] != errno.EINVAL:
                raise
        self.pool.waitall()
コード例 #46
0
    def run_server(self):
        """Run a WSGI server."""
        if cfg.CONF.pydev_worker_debug_host:
            utils.setup_remote_pydev_debug(cfg.CONF.pydev_worker_debug_host,
                                           cfg.CONF.pydev_worker_debug_port)

        eventlet.wsgi.HttpProtocol.default_request_version = "HTTP/1.0"
        try:
            eventlet.hubs.use_hub(cfg.CONF.eventlet_hub)
        except Exception:
            msg = _("eventlet '%s' hub is not available on this platform")
            raise exception.WorkerCreationFailure(reason=msg %
                                                  cfg.CONF.eventlet_hub)
        self.pool = self.create_pool()
        try:
            eventlet.wsgi.server(self.sock,
                                 self.application,
                                 log=logging.WritableLogger(self.logger),
                                 custom_pool=self.pool,
                                 debug=False)
        except socket.error as err:
            if err[0] != errno.EINVAL:
                raise
        self.pool.waitall()
コード例 #47
0
ファイル: wsgi.py プロジェクト: tantexian/sps-2014-12-4
def get_socket(default_port):
    """
    Bind socket to bind ip:port in conf

    note: Mostly comes from Swift with a few small changes...

    :param default_port: port to bind to if none is specified in conf

    :returns : a socket object as returned from socket.listen or
               ssl.wrap_socket if conf specifies cert_file
    """
    bind_addr = get_bind_addr(default_port)

    # TODO(jaypipes): eventlet's greened socket module does not actually
    # support IPv6 in getaddrinfo(). We need to get around this in the
    # future or monitor upstream for a fix
    address_family = [
        addr[0] for addr in socket.getaddrinfo(bind_addr[0],
                                               bind_addr[1],
                                               socket.AF_UNSPEC,
                                               socket.SOCK_STREAM)
        if addr[0] in (socket.AF_INET, socket.AF_INET6)
    ][0]

    cert_file = CONF.cert_file
    key_file = CONF.key_file
    use_ssl = cert_file or key_file
    if use_ssl and (not cert_file or not key_file):
        raise RuntimeError(_("When running server in SSL mode, you must "
                             "specify both a cert_file and key_file "
                             "option value in your configuration file"))

    def wrap_ssl(sock):
        utils.validate_key_cert(key_file, cert_file)

        ssl_kwargs = {
            'server_side': True,
            'certfile': cert_file,
            'keyfile': key_file,
            'cert_reqs': ssl.CERT_NONE,
        }

        if CONF.ca_file:
            ssl_kwargs['ca_certs'] = CONF.ca_file
            ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED

        return ssl.wrap_socket(sock, **ssl_kwargs)

    sock = utils.get_test_suite_socket()
    retry_until = time.time() + 30

    if sock and use_ssl:
        sock = wrap_ssl(sock)
    while not sock and time.time() < retry_until:
        try:
            sock = eventlet.listen(bind_addr,
                                   backlog=CONF.backlog,
                                   family=address_family)
            if use_ssl:
                sock = wrap_ssl(sock)

        except socket.error as err:
            if err.args[0] != errno.EADDRINUSE:
                raise
            eventlet.sleep(0.1)
    if not sock:
        raise RuntimeError(_("Could not bind to %(host)s:%(port)s after"
                             " trying for 30 seconds") %
                           {'host': bind_addr[0],
                            'port': bind_addr[1]})
    sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    # in my experience, sockets can hang around forever without keepalive
    sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)

    # This option isn't available in the OS X version of eventlet
    if hasattr(socket, 'TCP_KEEPIDLE'):
        sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE,
                        CONF.tcp_keepidle)

    return sock
コード例 #48
0
ファイル: config.py プロジェクト: tantexian/sps-2014-12-4
import logging
import logging.config
import logging.handlers
import os

from oslo.config import cfg
from paste import deploy

from sps.version import version_info as version
from sps.openstack.common.gettextutils import _

paste_deploy_opts = [
    cfg.StrOpt('flavor',
               help=_('Partial name of a pipeline in your paste configuration '
                      'file with the service name removed. For example, if '
                      'your paste section name is '
                      '[pipeline:sps-api-keystone] use the value '
                      '"keystone"')),
    cfg.StrOpt('config_file',
               help=_('Name of the paste configuration file.')),
]
image_format_opts = [
    cfg.ListOpt('container_formats',
                default=['ami', 'ari', 'aki', 'bare', 'ovf', 'ova'],
                help=_("Supported values for the 'container_format' "
                       "image attribute"),
                deprecated_opts=[cfg.DeprecatedOpt('container_formats',
                                                   group='DEFAULT')]),
    cfg.ListOpt('disk_formats',
                default=['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2',
                         'vdi', 'iso'],
コード例 #49
0
ファイル: utils.py プロジェクト: tantexian/sps-2014-12-4
def paginate_query(query, model, limit, sort_keys, marker=None,
                   sort_dir=None, sort_dirs=None):
    """Returns a query with sorting / pagination criteria added.

    Pagination works by requiring a unique sort_key, specified by sort_keys.
    (If sort_keys is not unique, then we risk looping through values.)
    We use the last row in the previous page as the 'marker' for pagination.
    So we must return values that follow the passed marker in the order.
    With a single-valued sort_key, this would be easy: sort_key > X.
    With a compound-values sort_key, (k1, k2, k3) we must do this to repeat
    the lexicographical ordering:
    (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3)

    We also have to cope with different sort_directions.

    Typically, the id of the last row is used as the client-facing pagination
    marker, then the actual marker object must be fetched from the db and
    passed in to us as marker.

    :param query: the query object to which we should add paging/sorting
    :param model: the ORM model class
    :param limit: maximum number of items to return
    :param sort_keys: array of attributes by which results should be sorted
    :param marker: the last item of the previous page; we returns the next
                    results after this value.
    :param sort_dir: direction in which results should be sorted (asc, desc)
    :param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys

    :rtype: sqlalchemy.orm.query.Query
    :return: The query with sorting/pagination added.
    """

    if 'id' not in sort_keys:
        # TODO(justinsb): If this ever gives a false-positive, check
        # the actual primary key, rather than assuming its id
        LOG.warning(_LW('Id not in sort_keys; is sort_keys unique?'))

    assert(not (sort_dir and sort_dirs))

    # Default the sort direction to ascending
    if sort_dirs is None and sort_dir is None:
        sort_dir = 'asc'

    # Ensure a per-column sort direction
    if sort_dirs is None:
        sort_dirs = [sort_dir for _sort_key in sort_keys]

    assert(len(sort_dirs) == len(sort_keys))

    # Add sorting
    for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs):
        try:
            sort_dir_func = {
                'asc': sqlalchemy.asc,
                'desc': sqlalchemy.desc,
            }[current_sort_dir]
        except KeyError:
            raise ValueError(_("Unknown sort direction, "
                               "must be 'desc' or 'asc'"))
        try:
            sort_key_attr = getattr(model, current_sort_key)
        except AttributeError:
            raise InvalidSortKey()
        query = query.order_by(sort_dir_func(sort_key_attr))

    # Add pagination
    if marker is not None:
        marker_values = []
        for sort_key in sort_keys:
            v = getattr(marker, sort_key)
            marker_values.append(v)

        # Build up an array of sort criteria as in the docstring
        criteria_list = []
        for i in range(len(sort_keys)):
            crit_attrs = []
            for j in range(i):
                model_attr = getattr(model, sort_keys[j])
                crit_attrs.append((model_attr == marker_values[j]))

            model_attr = getattr(model, sort_keys[i])
            if sort_dirs[i] == 'desc':
                crit_attrs.append((model_attr < marker_values[i]))
            else:
                crit_attrs.append((model_attr > marker_values[i]))

            criteria = sqlalchemy.sql.and_(*crit_attrs)
            criteria_list.append(criteria)

        f = sqlalchemy.sql.or_(*criteria_list)
        query = query.filter(f)

    if limit is not None:
        query = query.limit(limit)

    return query
コード例 #50
0
ファイル: utils.py プロジェクト: tantexian/sps-2014-12-4
def _get_default_deleted_value(table):
    if isinstance(table.c.id.type, Integer):
        return 0
    if isinstance(table.c.id.type, String):
        return ""
    raise ColumnError(_("Unsupported id columns type"))
コード例 #51
0
ファイル: wsgi.py プロジェクト: tantexian/sps-2014-12-4
 def _single_run(self, application, sock):
     """Start a WSGI server in a new green thread."""
     self.logger.info(_("Starting single process server"))
     eventlet.wsgi.server(sock, application, custom_pool=self.pool,
                          log=logging.WritableLogger(self.logger),
                          debug=False)
コード例 #52
0
ファイル: wsgi.py プロジェクト: tantexian/sps-2014-12-4
 def from_json(self, datastring):
     try:
         return json.loads(datastring, object_hook=self._sanitizer)
     except ValueError:
         msg = _('Malformed JSON in request body.')
         raise webob.exc.HTTPBadRequest(explanation=msg)
コード例 #53
0
def lock(name, lock_file_prefix=None, external=False, lock_path=None):
    """Context based lock

    This function yields a `threading.Semaphore` instance (if we don't use
    eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is
    True, in which case, it'll yield an InterProcessLock instance.

    :param lock_file_prefix: The lock_file_prefix argument is used to provide
      lock files on disk with a meaningful prefix.

    :param external: The external keyword argument denotes whether this lock
      should work across multiple processes. This means that if two different
      workers both run a a method decorated with @synchronized('mylock',
      external=True), only one of them will execute at a time.

    :param lock_path: The lock_path keyword argument is used to specify a
      special location for external lock files to live. If nothing is set, then
      CONF.lock_path is used as a default.
    """
    with _semaphores_lock:
        try:
            sem = _semaphores[name]
        except KeyError:
            sem = threading.Semaphore()
            _semaphores[name] = sem

    with sem:
        LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name})

        # NOTE(mikal): I know this looks odd
        if not hasattr(local.strong_store, 'locks_held'):
            local.strong_store.locks_held = []
        local.strong_store.locks_held.append(name)

        try:
            if external and not CONF.disable_process_locking:
                LOG.debug(_('Attempting to grab file lock "%(lock)s"'),
                          {'lock': name})

                # We need a copy of lock_path because it is non-local
                local_lock_path = lock_path or CONF.lock_path
                if not local_lock_path:
                    raise cfg.RequiredOptError('lock_path')

                if not os.path.exists(local_lock_path):
                    fileutils.ensure_tree(local_lock_path)
                    LOG.info(_('Created lock path: %s'), local_lock_path)

                def add_prefix(name, prefix):
                    if not prefix:
                        return name
                    sep = '' if prefix.endswith('-') else '-'
                    return '%s%s%s' % (prefix, sep, name)

                # NOTE(mikal): the lock name cannot contain directory
                # separators
                lock_file_name = add_prefix(name.replace(os.sep, '_'),
                                            lock_file_prefix)

                lock_file_path = os.path.join(local_lock_path, lock_file_name)

                try:
                    lock = InterProcessLock(lock_file_path)
                    with lock as lock:
                        LOG.debug(_('Got file lock "%(lock)s" at %(path)s'),
                                  {'lock': name, 'path': lock_file_path})
                        yield lock
                finally:
                    LOG.debug(_('Released file lock "%(lock)s" at %(path)s'),
                              {'lock': name, 'path': lock_file_path})
            else:
                yield sem

        finally:
            local.strong_store.locks_held.remove(name)