Exemplo n.º 1
0
    def wait(self):
        """Loop waiting on children to die and respawning as necessary."""

        LOG.debug(_('Full set of CONF:'))
        CONF.log_opt_values(LOG, std_logging.DEBUG)

        while True:
            self.handle_signal()
            self._respawn_children()
            if self.sigcaught:
                signame = {signal.SIGTERM: 'SIGTERM',
                           signal.SIGINT: 'SIGINT',
                           signal.SIGHUP: 'SIGHUP'}[self.sigcaught]
                LOG.info(_('Caught %s, stopping children'), signame)
            if self.sigcaught != signal.SIGHUP:
                break

            for pid in self.children:
                os.kill(pid, signal.SIGHUP)
            self.running = True
            self.sigcaught = None

        for pid in self.children:
            try:
                os.kill(pid, signal.SIGTERM)
            except OSError as exc:
                if exc.errno != errno.ESRCH:
                    raise

        # Wait for children to die
        if self.children:
            LOG.info(_('Waiting on %d children to exit'), len(self.children))
            while self.children:
                self._wait_child()
Exemplo n.º 2
0
    def _wait_for_exit_or_signal(self):
        status = None
        signo = 0

        LOG.debug(_('Full set of CONF:'))
        CONF.log_opt_values(LOG, std_logging.DEBUG)

        try:
            super(ServiceLauncher, self).wait()
        except SignalExit as exc:
            signame = {signal.SIGTERM: 'SIGTERM',
                       signal.SIGINT: 'SIGINT',
                       signal.SIGHUP: 'SIGHUP'}[exc.signo]
            LOG.info(_('Caught %s, exiting'), signame)
            status = exc.code
            signo = exc.signo
        except SystemExit as exc:
            status = exc.code
        finally:
            self.stop()
            if rpc:
                try:
                    rpc.cleanup()
                except Exception:
                    # We're shutting down, so it doesn't matter at this point.
                    LOG.exception(_('Exception during rpc cleanup.'))

        return status, signo
        def _inner():
            if initial_delay:
                greenthread.sleep(initial_delay)

            try:
                while self._running:
                    start = timeutils.utcnow()
                    self.f(*self.args, **self.kw)
                    end = timeutils.utcnow()
                    if not self._running:
                        break
                    delay = interval - timeutils.delta_seconds(start, end)
                    if delay <= 0:
                        LOG.warn(_('task run outlasted interval by %s sec') %
                                 -delay)
                    greenthread.sleep(delay if delay > 0 else 0)
            except LoopingCallDone as e:
                self.stop()
                done.send(e.retvalue)
            except Exception:
                LOG.exception(_('in fixed duration looping call'))
                done.send_exception(*sys.exc_info())
                return
            else:
                done.send(True)
Exemplo n.º 4
0
    def enforce(self,
                rule,
                target,
                creds,
                do_raise=False,
                exc=None,
                *args,
                **kwargs):
        """Checks authorization of a rule against the target and credentials.

        :param rule: A string or BaseCheck instance specifying the rule
                    to evaluate.
        :param target: As much information about the object being operated
                    on as possible, as a dictionary.
        :param creds: As much information about the user performing the
                    action as possible, as a dictionary.
        :param do_raise: Whether to raise an exception or not if check
                        fails.
        :param exc: Class of the exception to raise if the check fails.
                    Any remaining arguments passed to check() (both
                    positional and keyword arguments) will be passed to
                    the exception class. If not specified, PolicyNotAuthorized
                    will be used.

        :return: Returns False if the policy does not allow the action and
                exc is not provided; otherwise, returns a value that
                evaluates to True.  Note: for rules using the "case"
                expression, this True value will be the specified string
                from the expression.
        """

        # NOTE(flaper87): Not logging target or creds to avoid
        # potential security issues.
        LOG.debug(_("Rule %s will be now enforced") % rule)

        self.load_rules()

        # Allow the rule to be a Check tree
        if isinstance(rule, BaseCheck):
            result = rule(target, creds, self)
        elif not self.rules:
            # No rules to reference means we're going to fail closed
            result = False
        else:
            try:
                # Evaluate the rule
                result = self.rules[rule](target, creds, self)
            except KeyError:
                LOG.debug(_("Rule [%s] doesn't exist") % rule)
                # If the rule doesn't exist, fail closed
                result = False

        # If it is False, raise the exception if requested
        if do_raise and not result:
            if exc:
                raise exc(*args, **kwargs)

            raise PolicyNotAuthorized(rule)

        return result
Exemplo n.º 5
0
        def _inner():
            if initial_delay:
                greenthread.sleep(initial_delay)

            try:
                while self._running:
                    idle = self.f(*self.args, **self.kw)
                    if not self._running:
                        break

                    if periodic_interval_max is not None:
                        idle = min(idle, periodic_interval_max)
                    LOG.debug(
                        _('Dynamic looping call sleeping for %.02f '
                          'seconds'), idle)
                    greenthread.sleep(idle)
            except LoopingCallDone as e:
                self.stop()
                done.send(e.retvalue)
            except Exception:
                LOG.exception(_('in dynamic looping call'))
                done.send_exception(*sys.exc_info())
                return
            else:
                done.send(True)
Exemplo n.º 6
0
        def _inner():
            if initial_delay:
                greenthread.sleep(initial_delay)

            try:
                while self._running:
                    start = timeutils.utcnow()
                    self.f(*self.args, **self.kw)
                    end = timeutils.utcnow()
                    if not self._running:
                        break
                    delay = interval - timeutils.delta_seconds(start, end)
                    if delay <= 0:
                        LOG.warn(
                            _('task run outlasted interval by %s sec') %
                            -delay)
                    greenthread.sleep(delay if delay > 0 else 0)
            except LoopingCallDone as e:
                self.stop()
                done.send(e.retvalue)
            except Exception:
                LOG.exception(_('in fixed duration looping call'))
                done.send_exception(*sys.exc_info())
                return
            else:
                done.send(True)
Exemplo n.º 7
0
    def _start_child(self, wrap):
        if len(wrap.forktimes) > wrap.workers:
            # Limit ourselves to one process a second (over the period of
            # number of workers * 1 second). This will allow workers to
            # start up quickly but ensure we don't fork off children that
            # die instantly too quickly.
            if time.time() - wrap.forktimes[0] < wrap.workers:
                LOG.info(_('Forking too fast, sleeping'))
                time.sleep(1)

            wrap.forktimes.pop(0)

        wrap.forktimes.append(time.time())

        pid = os.fork()
        if pid == 0:
            # NOTE(johannes): All exceptions are caught to ensure this
            # doesn't fallback into the loop spawning children. It would
            # be bad for a child to spawn more children.
            launcher = self._child_process(wrap.service)
            while True:
                self._child_process_handle_signal()
                status, signo = self._child_wait_for_exit_or_signal(launcher)
                if signo != signal.SIGHUP:
                    break
                launcher.restart()

            os._exit(status)

        LOG.info(_('Started child %d'), pid)

        wrap.children.add(pid)
        self.children[pid] = wrap

        return pid
def ssh_execute(ssh, cmd, process_input=None,
                addl_env=None, check_exit_code=True):
    LOG.debug(_('Running cmd (SSH): %s'), cmd)
    if addl_env:
        raise InvalidArgumentError(_('Environment not supported over SSH'))

    if process_input:
        # This is (probably) fixable if we need it...
        raise InvalidArgumentError(_('process_input not supported over SSH'))

    stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd)
    channel = stdout_stream.channel

    # NOTE(justinsb): This seems suspicious...
    # ...other SSH clients have buffering issues with this approach
    stdout = stdout_stream.read()
    stderr = stderr_stream.read()
    stdin_stream.close()

    exit_status = channel.recv_exit_status()

    # exit_status == -1 if no exit code was returned
    if exit_status != -1:
        LOG.debug(_('Result was %s') % exit_status)
        if check_exit_code and exit_status != 0:
            raise ProcessExecutionError(exit_code=exit_status,
                                        stdout=stdout,
                                        stderr=stderr,
                                        cmd=cmd)

    return (stdout, stderr)
Exemplo n.º 9
0
    def _wait_child(self):
        try:
            # Don't block if no child processes have exited
            pid, status = os.waitpid(0, os.WNOHANG)
            if not pid:
                return None
        except OSError as exc:
            if exc.errno not in (errno.EINTR, errno.ECHILD):
                raise
            return None

        if os.WIFSIGNALED(status):
            sig = os.WTERMSIG(status)
            LOG.info(_('Child %(pid)d killed by signal %(sig)d'),
                     dict(pid=pid, sig=sig))
        else:
            code = os.WEXITSTATUS(status)
            LOG.info(_('Child %(pid)s exited with status %(code)d'),
                     dict(pid=pid, code=code))

        if pid not in self.children:
            LOG.warning(_('pid %d not in child list'), pid)
            return None

        wrap = self.children.pop(pid)
        wrap.children.remove(pid)
        return wrap
def ssh_execute(ssh,
                cmd,
                process_input=None,
                addl_env=None,
                check_exit_code=True):
    LOG.debug(_('Running cmd (SSH): %s'), cmd)
    if addl_env:
        raise InvalidArgumentError(_('Environment not supported over SSH'))

    if process_input:
        # This is (probably) fixable if we need it...
        raise InvalidArgumentError(_('process_input not supported over SSH'))

    stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd)
    channel = stdout_stream.channel

    # NOTE(justinsb): This seems suspicious...
    # ...other SSH clients have buffering issues with this approach
    stdout = stdout_stream.read()
    stderr = stderr_stream.read()
    stdin_stream.close()

    exit_status = channel.recv_exit_status()

    # exit_status == -1 if no exit code was returned
    if exit_status != -1:
        LOG.debug(_('Result was %s') % exit_status)
        if check_exit_code and exit_status != 0:
            raise ProcessExecutionError(exit_code=exit_status,
                                        stdout=stdout,
                                        stderr=stderr,
                                        cmd=cmd)

    return (stdout, stderr)
        def inner(*args, **kwargs):
            with lock(name, lock_file_prefix, external, lock_path):
                LOG.debug(_('Got semaphore / lock "%(function)s"'),
                          {'function': f.__name__})
                return f(*args, **kwargs)

            LOG.debug(_('Semaphore / lock released "%(function)s"'),
                      {'function': f.__name__})
    def enforce(self, rule, target, creds, do_raise=False,
                exc=None, *args, **kwargs):
        """Checks authorization of a rule against the target and credentials.

        :param rule: A string or BaseCheck instance specifying the rule
                    to evaluate.
        :param target: As much information about the object being operated
                    on as possible, as a dictionary.
        :param creds: As much information about the user performing the
                    action as possible, as a dictionary.
        :param do_raise: Whether to raise an exception or not if check
                        fails.
        :param exc: Class of the exception to raise if the check fails.
                    Any remaining arguments passed to check() (both
                    positional and keyword arguments) will be passed to
                    the exception class. If not specified, PolicyNotAuthorized
                    will be used.

        :return: Returns False if the policy does not allow the action and
                exc is not provided; otherwise, returns a value that
                evaluates to True.  Note: for rules using the "case"
                expression, this True value will be the specified string
                from the expression.
        """

        # NOTE(flaper87): Not logging target or creds to avoid
        # potential security issues.
        LOG.debug(_("Rule %s will be now enforced") % rule)

        self.load_rules()

        # Allow the rule to be a Check tree
        if isinstance(rule, BaseCheck):
            result = rule(target, creds, self)
        elif not self.rules:
            # No rules to reference means we're going to fail closed
            result = False
        else:
            try:
                # Evaluate the rule
                result = self.rules[rule](target, creds, self)
            except KeyError:
                LOG.debug(_("Rule [%s] doesn't exist") % rule)
                # If the rule doesn't exist, fail closed
                result = False

        # If it is False, raise the exception if requested
        if do_raise and not result:
            if exc:
                raise exc(*args, **kwargs)

            raise PolicyNotAuthorized(rule)

        return result
Exemplo n.º 13
0
def notify(context, publisher_id, event_type, priority, payload):
    """Sends a notification using the specified driver

    :param publisher_id: the source worker_type.host of the message
    :param event_type:   the literal type of event (ex. Instance Creation)
    :param priority:     patterned after the enumeration of Python logging
                         levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL)
    :param payload:       A python dictionary of attributes

    Outgoing message format includes the above parameters, and appends the
    following:

    message_id
      a UUID representing the id for this notification

    timestamp
      the GMT timestamp the notification was sent at

    The composite message will be constructed as a dictionary of the above
    attributes, which will then be sent via the transport mechanism defined
    by the driver.

    Message example::

        {'message_id': str(uuid.uuid4()),
         'publisher_id': 'compute.host1',
         'timestamp': timeutils.utcnow(),
         'priority': 'WARN',
         'event_type': 'compute.create_instance',
         'payload': {'instance_id': 12, ... }}

    """
    if priority not in log_levels:
        raise BadPriorityException(
            _('%s not in valid priorities') % priority)

    # Ensure everything is JSON serializable.
    payload = jsonutils.to_primitive(payload, convert_instances=True)

    msg = dict(message_id=str(uuid.uuid4()),
               publisher_id=publisher_id,
               event_type=event_type,
               priority=priority,
               payload=payload,
               timestamp=str(timeutils.utcnow()))

    for driver in _get_drivers():
        try:
            driver.notify(context, msg)
        except Exception as e:
            LOG.exception(_("Problem '%(e)s' attempting to "
                            "send to notification system. "
                            "Payload=%(payload)s")
                          % dict(e=e, payload=payload))
def _get_not_supported_column(col_name_col_instance, column_name):
    try:
        column = col_name_col_instance[column_name]
    except KeyError:
        msg = _("Please specify column %s in col_name_col_instance "
                "param. It is required because column has unsupported "
                "type by sqlite).")
        raise exception.OpenstackException(message=msg % column_name)

    if not isinstance(column, Column):
        msg = _("col_name_col_instance param has wrong type of "
                "column instance for column %s It should be instance "
                "of sqlalchemy.Column.")
        raise exception.OpenstackException(message=msg % column_name)
    return column
Exemplo n.º 15
0
def _get_not_supported_column(col_name_col_instance, column_name):
    try:
        column = col_name_col_instance[column_name]
    except KeyError:
        msg = _("Please specify column %s in col_name_col_instance "
                "param. It is required because column has unsupported "
                "type by sqlite).")
        raise exception.OpenstackException(message=msg % column_name)

    if not isinstance(column, Column):
        msg = _("col_name_col_instance param has wrong type of "
                "column instance for column %s It should be instance "
                "of sqlalchemy.Column.")
        raise exception.OpenstackException(message=msg % column_name)
    return column
def _get_default_deleted_value(table):
    if isinstance(table.c.id.type, Integer):
        return 0
    if isinstance(table.c.id.type, String):
        return ""
    raise exception.OpenstackException(
        message=_("Unsupported id columns type"))
Exemplo n.º 17
0
def _get_default_deleted_value(table):
    if isinstance(table.c.id.type, Integer):
        return 0
    if isinstance(table.c.id.type, String):
        return ""
    raise exception.OpenstackException(
        message=_("Unsupported id columns type"))
Exemplo n.º 18
0
def notify(context, message):
    """Deprecated in Grizzly. Please use rpc_notifier instead."""

    LOG.deprecated(
        _("The rabbit_notifier is now deprecated."
          " Please use rpc_notifier instead."))
    rpc_notifier.notify(context, message)
 def deprecated(self, msg, *args, **kwargs):
     stdmsg = _("Deprecated: %s") % msg
     if CONF.fatal_deprecations:
         self.critical(stdmsg, *args, **kwargs)
         raise DeprecatedConfig(msg=stdmsg)
     else:
         self.warn(stdmsg, *args, **kwargs)
Exemplo n.º 20
0
def _populate_resource_types(resource_type_table):
    default_resource_types = {
        'entry1':
        dict(name='instance',
             default_action_id=2,
             created_at=str(datetime.datetime.now()),
             updated_at=str(datetime.datetime.now())),
        'entry2':
        dict(name='volume',
             default_action_id=1,
             created_at=str(datetime.datetime.now()),
             updated_at=str(datetime.datetime.now())),
        'entry3':
        dict(name='image',
             default_action_id=3,
             created_at=str(datetime.datetime.now()),
             updated_at=str(datetime.datetime.now()))
    }

    try:
        i = resource_type_table.insert()
        type_names = sorted(default_resource_types.keys())
        for index in range(0, len(type_names)):
            values = default_resource_types[type_names[index]]
            i.execute({
                'name': values['name'],
                'default_action_id': values['default_action_id'],
                'created_at': values['created_at'],
                'updated_at': values['updated_at']
            })

    except Exception:
        LOG.info(repr(resource_type_table))
        LOG.exception(_('Exception while seeding resource_types table'))
        raise
Exemplo n.º 21
0
def downgrade(migrate_engine):

    meta = MetaData()
    meta.bind = migrate_engine

    action_resource = Table('action_resource', meta, autoload=True)
    action_execution = Table('action_execution', meta, autoload=True)
    workload_policy_execution = Table('workload_policy_execution',
                                      meta,
                                      autoload=True)
    workload_policies = Table('workload_policies', meta, autoload=True)
    resources = Table('resources', meta, autoload=True)
    actions = Table('actions', meta, autoload=True)
    resource_type = Table('resource_type', meta, autoload=True)

    tables = [
        action_resource, action_execution, workload_policy_execution,
        workload_policies, resources, actions, resource_type
    ]

    for table in tables:
        try:
            table.drop()
        except Exception:
            LOG.exception(_('Exception while dropping table %s.'), repr(table))
            raise
 def deprecated(self, msg, *args, **kwargs):
     stdmsg = _("Deprecated: %s") % msg
     if CONF.fatal_deprecations:
         self.critical(stdmsg, *args, **kwargs)
         raise DeprecatedConfig(msg=stdmsg)
     else:
         self.warn(stdmsg, *args, **kwargs)
Exemplo n.º 23
0
def get(url, allowed_schemes=('http', 'https')):
    '''
    Get the data at the specifier URL.

    The URL must use the http: or https: schemes.
    The file: scheme is also supported if you override
    the allowed_schemes argument.
    Raise an IOError if getting the data fails.
    '''
    logger.info(_('Fetching data from %s') % url)

    components = urlparse.urlparse(url)

    if components.scheme not in allowed_schemes:
        raise IOError('Invalid URL scheme %s' % components.scheme)

    if components.scheme == 'file':
        try:
            return urllib2.urlopen(url).read()
        except urllib2.URLError as uex:
            raise IOError('Failed to retrieve template: %s' % str(uex))

    try:
        resp = requests.get(url)
        resp.raise_for_status()
        return resp.text
    except exceptions.RequestException as ex:
        raise IOError('Failed to retrieve template: %s' % str(ex))
 def inner_func(*args, **kwargs):
     last_log_time = 0
     last_exc_message = None
     exc_count = 0
     while True:
         try:
             return infunc(*args, **kwargs)
         except Exception as exc:
             if exc.message == last_exc_message:
                 exc_count += 1
             else:
                 exc_count = 1
             # Do not log any more frequently than once a minute unless
             # the exception message changes
             cur_time = int(time.time())
             if (cur_time - last_log_time > 60 or
                     exc.message != last_exc_message):
                 logging.exception(
                     _('Unexpected exception occurred %d time(s)... '
                       'retrying.') % exc_count)
                 last_log_time = cur_time
                 last_exc_message = exc.message
                 exc_count = 0
             # This should be a very rare event. In case it isn't, do
             # a sleep.
             time.sleep(1)
def _populate_resource_types(resource_type_table):
    default_resource_types = {
        'entry1': dict(name='instance', default_action_id=2,
                       created_at=str(datetime.datetime.now()),
                       updated_at=str(datetime.datetime.now())),
        'entry2': dict(name='volume', default_action_id=1,
                       created_at=str(datetime.datetime.now()),
                       updated_at=str(datetime.datetime.now())),
        'entry3': dict(name='image', default_action_id=3,
                       created_at=str(datetime.datetime.now()),
                       updated_at=str(datetime.datetime.now()))
    }

    try:
        i = resource_type_table.insert()
        type_names = sorted(default_resource_types.keys())
        for index in range(0, len(type_names)):
            values = default_resource_types[type_names[index]]
            i.execute({'name': values['name'],
                       'default_action_id': values['default_action_id'],
                       'created_at': values['created_at'],
                       'updated_at': values['updated_at']
                       })

    except Exception:
        LOG.info(repr(resource_type_table))
        LOG.exception(_('Exception while seeding resource_types table'))
        raise
 def __exit__(self, exc_type, exc_val, exc_tb):
     try:
         self.unlock()
         self.lockfile.close()
     except IOError:
         LOG.exception(_("Could not release the acquired lock `%s`"),
                       self.fname)
 def inner_func(*args, **kwargs):
     last_log_time = 0
     last_exc_message = None
     exc_count = 0
     while True:
         try:
             return infunc(*args, **kwargs)
         except Exception as exc:
             if exc.message == last_exc_message:
                 exc_count += 1
             else:
                 exc_count = 1
             # Do not log any more frequently than once a minute unless
             # the exception message changes
             cur_time = int(time.time())
             if (cur_time - last_log_time > 60
                     or exc.message != last_exc_message):
                 logging.exception(
                     _('Unexpected exception occurred %d time(s)... '
                       'retrying.') % exc_count)
                 last_log_time = cur_time
                 last_exc_message = exc.message
                 exc_count = 0
             # This should be a very rare event. In case it isn't, do
             # a sleep.
             time.sleep(1)
def initialize_if_enabled():
    backdoor_locals = {
        'exit': _dont_use_this,      # So we don't exit the entire process
        'quit': _dont_use_this,      # So we don't exit the entire process
        'fo': _find_objects,
        'pgt': _print_greenthreads,
        'pnt': _print_nativethreads,
    }

    if CONF.backdoor_port is None:
        return None

    start_port, end_port = _parse_port_range(str(CONF.backdoor_port))

    # NOTE(johannes): The standard sys.displayhook will print the value of
    # the last expression and set it to __builtin__._, which overwrites
    # the __builtin__._ that gettext sets. Let's switch to using pprint
    # since it won't interact poorly with gettext, and it's easier to
    # read the output too.
    def displayhook(val):
        if val is not None:
            pprint.pprint(val)
    sys.displayhook = displayhook

    sock = _listen('localhost', start_port, end_port, eventlet.listen)

    # In the case of backdoor port being zero, a port number is assigned by
    # listen().  In any case, pull the port number out here.
    port = sock.getsockname()[1]
    LOG.info(_('Eventlet backdoor listening on %(port)s for process %(pid)d') %
             {'port': port, 'pid': os.getpid()})
    eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
                     locals=backdoor_locals)
    return port
 def __exit__(self, exc_type, exc_val, exc_tb):
     if exc_type is not None:
         logging.error(
             _('Original exception being dropped: %s'),
             traceback.format_exception(self.type_, self.value, self.tb))
         return False
     if self.reraise:
         raise self.type_, self.value, self.tb
Exemplo n.º 30
0
    def _pipe_watcher(self):
        # This will block until the write end is closed when the parent
        # dies unexpectedly
        self.readpipe.read()

        LOG.info(_('Parent process has died unexpectedly, exiting'))

        sys.exit(1)
def notify(context, message):
    """Passes notification to multiple notifiers in a list."""
    for driver in _get_drivers():
        try:
            driver.notify(context, message)
        except Exception as e:
            LOG.exception(_("Problem '%(e)s' attempting to send to "
                            "notification driver %(driver)s."), locals())
 def stop(self):
     # Stop rpc connection at first for preventing new requests
     LOG.info(_("Attempting to stop engine service..."))
     try:
         self.conn.close()
     except Exception:
         pass
     super(EngineService, self).stop()
 def __exit__(self, exc_type, exc_val, exc_tb):
     if exc_type is not None:
         logging.error(_('Original exception being dropped: %s'),
                       traceback.format_exception(self.type_,
                                                  self.value,
                                                  self.tb))
         return False
     if self.reraise:
         raise self.type_, self.value, self.tb
class LogConfigError(Exception):

    message = _('Error loading logging config %(log_config)s: %(err_msg)s')

    def __init__(self, log_config, err_msg):
        self.log_config = log_config
        self.err_msg = err_msg

    def __str__(self):
        return self.message % dict(log_config=self.log_config,
                                   err_msg=self.err_msg)
def notify(context, message):
    """Sends a notification via RPC."""
    if not context:
        context = req_context.get_admin_context()
    priority = message.get("priority", CONF.default_notification_level)
    priority = priority.lower()
    for topic in CONF.notification_topics:
        topic = "%s.%s" % (topic, priority)
        try:
            rpc.notify(context, topic, message)
        except Exception:
            LOG.exception(_("Could not send notification to %(topic)s. " "Payload=%(message)s"), locals())
def is_enabled():
    cert_file = CONF.ssl.cert_file
    key_file = CONF.ssl.key_file
    ca_file = CONF.ssl.ca_file
    use_ssl = cert_file or key_file

    if cert_file and not os.path.exists(cert_file):
        raise RuntimeError(_("Unable to find cert_file : %s") % cert_file)

    if ca_file and not os.path.exists(ca_file):
        raise RuntimeError(_("Unable to find ca_file : %s") % ca_file)

    if key_file and not os.path.exists(key_file):
        raise RuntimeError(_("Unable to find key_file : %s") % key_file)

    if use_ssl and (not cert_file or not key_file):
        raise RuntimeError(_("When running server in SSL mode, you must "
                             "specify both a cert_file and key_file "
                             "option value in your configuration file"))

    return use_ssl
    def __init__(self, task_runner, timeout):
        """
        Initialise with the TaskRunner and a timeout period in seconds.
        """
        message = _('%s Timed out') % task_runner
        super(Timeout, self).__init__(message)

        # Note that we don't attempt to handle leap seconds or large clock
        # jumps here. The latter are assumed to be rare and the former
        # negligible in the context of the timeout. Time zone adjustments,
        # Daylight Savings and the like *are* handled. PEP 418 adds a proper
        # monotonic clock, but only in Python 3.3.
        self._endtime = wallclock() + timeout
def db_sync(version=None):
    if version is not None:
        try:
            version = int(version)
        except ValueError:
            raise exception.Error(_("version should be an integer"))
    current_version = db_version()
    repository = _find_migrate_repo()
    if version is None or version > current_version:
        return versioning_api.upgrade(get_engine(), repository, version)
    else:
        return versioning_api.downgrade(get_engine(), repository,
                                        version)
Exemplo n.º 39
0
    def __init__(self, task_runner, timeout):
        """
        Initialise with the TaskRunner and a timeout period in seconds.
        """
        message = _('%s Timed out') % task_runner
        super(Timeout, self).__init__(message)

        # Note that we don't attempt to handle leap seconds or large clock
        # jumps here. The latter are assumed to be rare and the former
        # negligible in the context of the timeout. Time zone adjustments,
        # Daylight Savings and the like *are* handled. PEP 418 adds a proper
        # monotonic clock, but only in Python 3.3.
        self._endtime = wallclock() + timeout
Exemplo n.º 40
0
    def _child_wait_for_exit_or_signal(self, launcher):
        status = None
        signo = 0

        try:
            launcher.wait()
        except SignalExit as exc:
            signame = {signal.SIGTERM: 'SIGTERM',
                       signal.SIGINT: 'SIGINT',
                       signal.SIGHUP: 'SIGHUP'}[exc.signo]
            LOG.info(_('Caught %s, exiting'), signame)
            status = exc.code
            signo = exc.signo
        except SystemExit as exc:
            status = exc.code
        except BaseException:
            LOG.exception(_('Unhandled exception'))
            status = 2
        finally:
            launcher.stop()

        return status, signo
Exemplo n.º 41
0
def drop_old_duplicate_entries_from_table(migrate_engine, table_name,
                                          use_soft_delete, *uc_column_names):
    """Drop all old rows having the same values for columns in uc_columns.

    This method drop (or mark ad `deleted` if use_soft_delete is True) old
    duplicate rows form table with name `table_name`.

    :param migrate_engine:  Sqlalchemy engine
    :param table_name:      Table with duplicates
    :param use_soft_delete: If True - values will be marked as `deleted`,
                            if False - values will be removed from table
    :param uc_column_names: Unique constraint columns
    """
    meta = MetaData()
    meta.bind = migrate_engine

    table = Table(table_name, meta, autoload=True)
    columns_for_group_by = [table.c[name] for name in uc_column_names]

    columns_for_select = [func.max(table.c.id)]
    columns_for_select.extend(columns_for_group_by)

    duplicated_rows_select = select(columns_for_select,
                                    group_by=columns_for_group_by,
                                    having=func.count(table.c.id) > 1)

    for row in migrate_engine.execute(duplicated_rows_select):
        # NOTE(boris-42): Do not remove row that has the biggest ID.
        delete_condition = table.c.id != row[0]
        is_none = None  # workaround for pyflakes
        delete_condition &= table.c.deleted_at == is_none
        for name in uc_column_names:
            delete_condition &= table.c[name] == row[name]

        rows_to_delete_select = select([table.c.id]).where(delete_condition)
        for row in migrate_engine.execute(rows_to_delete_select).fetchall():
            LOG.info(
                _("Deleting duplicated row with id: %(id)s from table: "
                  "%(table)s") % dict(id=row[0], table=table_name))

        if use_soft_delete:
            delete_statement = table.update().\
                where(delete_condition).\
                values({
                    'deleted': literal_column('id'),
                    'updated_at': literal_column('updated_at'),
                    'deleted_at': timeutils.utcnow()
                })
        else:
            delete_statement = table.delete().where(delete_condition)
        migrate_engine.execute(delete_statement)
Exemplo n.º 42
0
def notify(context, message):
    """Sends a notification via RPC."""
    if not context:
        context = req_context.get_admin_context()
    priority = message.get('priority',
                           CONF.default_notification_level)
    priority = priority.lower()
    for topic in CONF.rpc_notifier2.topics:
        topic = '%s.%s' % (topic, priority)
        try:
            rpc.notify(context, topic, message, envelope=True)
        except Exception:
            LOG.exception(_("Could not send notification to %(topic)s. "
                            "Payload=%(message)s"), locals())
def drop_old_duplicate_entries_from_table(migrate_engine, table_name,
                                          use_soft_delete, *uc_column_names):
    """Drop all old rows having the same values for columns in uc_columns.

    This method drop (or mark ad `deleted` if use_soft_delete is True) old
    duplicate rows form table with name `table_name`.

    :param migrate_engine:  Sqlalchemy engine
    :param table_name:      Table with duplicates
    :param use_soft_delete: If True - values will be marked as `deleted`,
                            if False - values will be removed from table
    :param uc_column_names: Unique constraint columns
    """
    meta = MetaData()
    meta.bind = migrate_engine

    table = Table(table_name, meta, autoload=True)
    columns_for_group_by = [table.c[name] for name in uc_column_names]

    columns_for_select = [func.max(table.c.id)]
    columns_for_select.extend(columns_for_group_by)

    duplicated_rows_select = select(columns_for_select,
                                    group_by=columns_for_group_by,
                                    having=func.count(table.c.id) > 1)

    for row in migrate_engine.execute(duplicated_rows_select):
        # NOTE(boris-42): Do not remove row that has the biggest ID.
        delete_condition = table.c.id != row[0]
        is_none = None  # workaround for pyflakes
        delete_condition &= table.c.deleted_at == is_none
        for name in uc_column_names:
            delete_condition &= table.c[name] == row[name]

        rows_to_delete_select = select([table.c.id]).where(delete_condition)
        for row in migrate_engine.execute(rows_to_delete_select).fetchall():
            LOG.info(_("Deleting duplicated row with id: %(id)s from table: "
                       "%(table)s") % dict(id=row[0], table=table_name))

        if use_soft_delete:
            delete_statement = table.update().\
                where(delete_condition).\
                values({
                    'deleted': literal_column('id'),
                    'updated_at': literal_column('updated_at'),
                    'deleted_at': timeutils.utcnow()
                })
        else:
            delete_statement = table.delete().where(delete_condition)
        migrate_engine.execute(delete_statement)
        def _inner():
            if initial_delay:
                greenthread.sleep(initial_delay)

            try:
                while self._running:
                    idle = self.f(*self.args, **self.kw)
                    if not self._running:
                        break

                    if periodic_interval_max is not None:
                        idle = min(idle, periodic_interval_max)
                    LOG.debug(_('Dynamic looping call sleeping for %.02f '
                                'seconds'), idle)
                    greenthread.sleep(idle)
            except LoopingCallDone as e:
                self.stop()
                done.send(e.retvalue)
            except Exception:
                LOG.exception(_('in dynamic looping call'))
                done.send_exception(*sys.exc_info())
                return
            else:
                done.send(True)
def _parse_check(rule):
    """Parse a single base check rule into an appropriate Check object."""

    # Handle the special checks
    if rule == '!':
        return FalseCheck()
    elif rule == '@':
        return TrueCheck()

    try:
        kind, match = rule.split(':', 1)
    except Exception:
        LOG.exception(_("Failed to understand rule %s") % rule)
        # If the rule is invalid, we'll fail closed
        return FalseCheck()

    # Find what implements the check
    if kind in _checks:
        return _checks[kind](kind, match)
    elif None in _checks:
        return _checks[None](kind, match)
    else:
        LOG.error(_("No handler for matches of kind %s") % kind)
        return FalseCheck()
def load_modules(package, ignore_error=False):
    '''Dynamically load all modules from a given package.'''
    path = package.__path__
    pkg_prefix = package.__name__ + '.'

    for importer, module_name, is_package in pkgutil.walk_packages(path,
                                                                   pkg_prefix):
        try:
            module = _import_module(importer, module_name, package)
        except ImportError as ex:
            logger.error(_('Failed to import module %s') % module_name)
            if not ignore_error:
                raise
        else:
            if module is not None:
                yield module
    def set_rules(self, rules, overwrite=True):
        """Create a new Rules object based on the provided dict of rules.

        :param rules: New rules to use. It should be an instance of dict.
        :param overwrite: Whether to overwrite current rules or update them
                          with the new rules.
        """

        if not isinstance(rules, dict):
            raise TypeError(_("Rules must be an instance of dict or Rules, "
                            "got %s instead") % type(rules))

        if overwrite:
            self.rules = Rules(rules, self.default_rule)
        else:
            self.rules.update(rules)