Example #1
0
File: mail.py Project: yarda/bodhi
def send(to, msg_type, update, sender=None, agent=None):
    """ Send an update notification email to a given recipient """
    assert agent, 'No agent given'

    critpath = getattr(update, 'critpath', False) and '[CRITPATH] ' or ''
    headers = {}

    if msg_type != 'buildroot_override':
        headers = {
            "X-Bodhi-Update-Type": update.type.description,
            "X-Bodhi-Update-Release": update.release.name,
            "X-Bodhi-Update-Status": update.status.description,
            "X-Bodhi-Update-Builds": ",".join([b.nvr for b in update.builds]),
            "X-Bodhi-Update-Title": update.title,
            "X-Bodhi-Update-Pushed": update.pushed,
            "X-Bodhi-Update-Submitter": update.user.name,
        }
        if update.request:
            headers["X-Bodhi-Update-Request"] = update.request.description
        initial_message_id = "<bodhi-update-%s-%s-%s@%s>" % (
            update.id, update.user.name, update.release.name,
            config.get('message_id_email_domain'))

        if msg_type == 'new':
            headers["Message-ID"] = initial_message_id
        else:
            headers["References"] = initial_message_id
            headers["In-Reply-To"] = initial_message_id

    subject_template = '[Fedora Update] %s[%s] %s'
    for person in iterate(to):
        subject = subject_template % (critpath, msg_type, update.title)
        fields = MESSAGES[msg_type]['fields'](agent, update)
        body = MESSAGES[msg_type]['body'] % fields
        send_mail(sender, person, subject, body, headers=headers)
Example #2
0
    def run(self):
        # Do just like in fedmsg.commands.hub and mangle fedmsg.d/ to work
        # with moksha's expected configuration.
        moksha_options = dict(
            zmq_subscribe_endpoints=",".join(list(iterate(
                self.config['relay_inbound']
            ))),
            zmq_subscribe_method="bind",
        )
        self.config.update(moksha_options)

        # Flip the special bit that allows the RelayConsumer to run
        self.config[RelayConsumer.config_key] = True

        from moksha.hub import main
        for publish_endpoint in self.config['endpoints']['relay_outbound']:
            self.config['zmq_publish_endpoints'] = publish_endpoint
            try:
                return main(
                    # Pass in our config dict
                    options=self.config,
                    # Only run this *one* consumer
                    consumers=[RelayConsumer],
                    # Tell moksha to quiet its logging.
                    framework=False,
                )
            except zmq.ZMQError:
                self.log.debug("Failed to bind to %r" % publish_endpoint)

        raise IOError("Failed to bind to any outbound endpoints.")
Example #3
0
    def run(self):
        # Do just like in fedmsg.commands.hub and mangle fedmsg.d/ to work
        # with moksha's expected configuration.
        moksha_options = dict(
            zmq_subscribe_endpoints=",".join(
                list(iterate(self.config['relay_inbound']))),
            zmq_subscribe_method="bind",
        )
        self.config.update(moksha_options)

        # Flip the special bit that allows the RelayConsumer to run
        self.config[RelayConsumer.config_key] = True

        from moksha.hub import main
        for publish_endpoint in self.config['endpoints']['relay_outbound']:
            self.config['zmq_publish_endpoints'] = publish_endpoint
            try:
                return main(
                    # Pass in our config dict
                    options=self.config,
                    # Only run this *one* consumer
                    consumers=[RelayConsumer],
                    # Tell moksha to quiet its logging.
                    framework=False,
                )
            except zmq.ZMQError:
                self.log.debug("Failed to bind to %r" % publish_endpoint)

        raise IOError("Failed to bind to any outbound endpoints.")
Example #4
0
def relay(**kw):
    """ Relay connections from active loggers to the bus.

    ``fedmsg-relay`` is a service which binds to two ports, listens for
    messages on one and emits them on the other.  ``fedmsg-logger``
    requires that an instance of ``fedmsg-relay`` be running *somewhere*
    and that it's inbound address be listed in the config as one of the entries
    in :term:`relay_inbound`.

    ``fedmsg-relay`` becomes a necessity for integration points that cannot
    bind consistently to and serve from a port.  See :doc:`topology` for the
    mile-high view.  More specifically, ``fedmsg-relay`` is a
    SUB.bind()->PUB.bind() relay.
    """

    # Do just like in fedmsg.commands.hub and mangle fedmsg-config.py to work
    # with moksha's expected configuration.
    moksha_options = dict(
        zmq_publish_endpoints=",".join(kw["endpoints"]["relay_outbound"]),
        zmq_subscribe_endpoints=",".join(list(iterate(kw["relay_inbound"]))),
        zmq_subscribe_method="bind",
    )
    kw.update(moksha_options)

    # Flip the special bit that allows the RelayConsumer to run
    kw[RelayConsumer.config_key] = True

    from moksha.hub import main

    main(options=kw, consumers=[RelayConsumer])
Example #5
0
def relay(**kw):
    """ Relay connections from active loggers to the bus.

    ``fedmsg-relay`` is a service which binds to two ports, listens for
    messages on one and emits them on the other.  ``fedmsg-logger``
    requires that an instance of ``fedmsg-relay`` be running *somewhere*
    and that it's inbound address be listed in the config as one of the entries
    in :term:`relay_inbound`.

    ``fedmsg-relay`` becomes a necessity for integration points that cannot
    bind consistently to and serve from a port.  See :doc:`topology` for the
    mile-high view.  More specifically, ``fedmsg-relay`` is a
    SUB.bind()->PUB.bind() relay.
    """

    # Do just like in fedmsg.commands.hub and mangle fedmsg-config.py to work
    # with moksha's expected configuration.
    moksha_options = dict(
        zmq_publish_endpoints=",".join(kw['endpoints']["relay_outbound"]),
        zmq_subscribe_endpoints=",".join(list(iterate(kw['relay_inbound']))),
        zmq_subscribe_method="bind",
    )
    kw.update(moksha_options)

    # Flip the special bit that allows the RelayConsumer to run
    kw[RelayConsumer.config_key] = True

    from moksha.hub import main
    main(options=kw, consumers=[RelayConsumer])
Example #6
0
File: util.py Project: hanzz/bodhi
def splitter(value):
    """
    Parse a string or list of comma or space delimited builds, returning a list of the values.

    Examples:
        >>> util.splitter('one,two,,three,')
        ['one', 'two', 'three']
        >>> util.splitter(['one,two,,three,,', 'four'])
        ['one', 'two', 'three', 'four']

    Args:
        value (basestring, colander.null, or iterable): The value to interpret as a list.
    Returns:
        list: A list of strings.
    """
    if value == colander.null:
        return

    items = []
    for v in iterate(value):
        if isinstance(v, six.string_types):
            for item in v.replace(',', ' ').split():
                items.append(item)

        elif v is not None:
            items.append(v)

    return items
Example #7
0
def send(to, msg_type, update, sender=None, agent=None):
    """ Send an update notification email to a given recipient """
    assert agent, 'No agent given'

    critpath = getattr(update, 'critpath', False) and '[CRITPATH] ' or ''
    headers = {}

    if msg_type != 'buildroot_override':
        headers = {
            "X-Bodhi-Update-Type": update.type.description,
            "X-Bodhi-Update-Release": update.release.name,
            "X-Bodhi-Update-Status": update.status.description,
            "X-Bodhi-Update-Builds": ",".join([b.nvr for b in update.builds]),
            "X-Bodhi-Update-Title": update.title,
            "X-Bodhi-Update-Pushed": update.pushed,
            "X-Bodhi-Update-Submitter": update.user.name,
        }
        if update.request:
            headers["X-Bodhi-Update-Request"] = update.request.description
        initial_message_id = "<bodhi-update-%s-%s-%s@%s>" % (
            update.id, update.user.name, update.release.name,
            config.get('message_id_email_domain'))

        if msg_type == 'new':
            headers["Message-ID"] = initial_message_id
        else:
            headers["References"] = initial_message_id
            headers["In-Reply-To"] = initial_message_id

    subject_template = '[Fedora Update] %s[%s] %s'
    for person in iterate(to):
        subject = subject_template  % (critpath, msg_type, update.title)
        fields = MESSAGES[msg_type]['fields'](agent, update)
        body = MESSAGES[msg_type]['body'] % fields
        send_mail(sender, person, subject, body, headers=headers)
Example #8
0
def collectd(**kw):
    """ Print machine-readable information for collectd to monitor the bus. """

    # Initialize the processors before CollectdConsumer is instantiated.
    fedmsg.text.make_processors(**kw)

    # Do just like in fedmsg.commands.hub and mangle fedmsg-config.py to work
    # with moksha's expected configuration.
    moksha_options = dict(
        zmq_publish_endpoints=",".join(kw['endpoints']["relay_outbound"]),
        zmq_subscribe_endpoints=",".join(list(iterate(kw['relay_inbound']))),
        zmq_subscribe_method="bind",
    )
    kw.update(moksha_options)
    kw[CollectdConsumer.config_key] = True

    CollectdProducer.frequency = datetime.timedelta(
        seconds=kw['collectd_interval']
    )

    # Turn off moksha logging.
    logging.disable(logging.INFO)

    from moksha.hub import main
    main(kw, [CollectdConsumer], [CollectdProducer])
Example #9
0
    def test_iterate(self):
        iterutils.iterate(None)
        for item in self.non_iterable_data:
            tools.ok_(list(iterutils.iterate(item)) == [item])

        for item in self.iterable_data[:-1]:
            tools.ok_(list(iterutils.iterate(item)) == list(item))

        # iter() is exhausted after use so we have to test separately
        tools.ok_(list(iterutils.iterate(iter([1, 2, 3]))) == [1, 2, 3])

        # strings
        tools.ok_(list(iterutils.iterate(b'abc')) == [b'abc'])
        tools.eq_(list(iterutils.iterate(b'abc', include_string=True)), [ord(b'a'), ord(b'b'), ord(b'c')])
        tools.ok_(list(iterutils.iterate('abc')) == ['abc'])
        tools.ok_(list(iterutils.iterate('abc', include_string=True)) == ['a', 'b', 'c'])
Example #10
0
    def test_iterate(self):
        iterutils.iterate(None)
        for item in self.non_iterable_data:
            tools.ok_(list(iterutils.iterate(item)) == [item])

        for item in self.iterable_data[:-1]:
            tools.ok_(list(iterutils.iterate(item)) == list(item))

        # iter() is exhausted after use so we have to test separately
        tools.ok_(list(iterutils.iterate(iter([1, 2, 3]))) == [1, 2, 3])

        # strings
        tools.ok_(list(iterutils.iterate('abc')) == ['abc'])
        tools.ok_(list(iterutils.iterate('abc', include_string=True)) == ['a', 'b', 'c'])
        tools.ok_(list(iterutils.iterate(u'abc')) == [u'abc'])
        tools.ok_(list(iterutils.iterate(u'abc', include_string=True)) == [u'a', u'b', u'c'])
Example #11
0
    def test_iterate(self):
        iterutils.iterate(None)
        for item in self.non_iterable_data:
            tools.ok_(list(iterutils.iterate(item)) == [item])

        for item in self.iterable_data[:-1]:
            tools.ok_(list(iterutils.iterate(item)) == list(item))

        # iter() is exhausted after use so we have to test separately
        tools.ok_(list(iterutils.iterate(iter([1, 2, 3]))) == [1, 2, 3])

        # strings
        tools.ok_(list(iterutils.iterate(b"abc")) == [b"abc"])
        tools.eq_(list(iterutils.iterate(b"abc", include_string=True)), [ord(b"a"), ord(b"b"), ord(b"c")])
        tools.ok_(list(iterutils.iterate("abc")) == ["abc"])
        tools.ok_(list(iterutils.iterate("abc", include_string=True)) == ["a", "b", "c"])
Example #12
0
def amqp_subscribe(topic):
    """ Return a javascript callback that subscribes to a given topic,
        or a list of topics.
    """
    sub = """
        moksha.debug("Subscribing to the '%(topic)s' topic");
        var receiver = moksha_amqp_session.receiver('amq.topic/%(topic)s')
        receiver.onReady = raw_msg_callback;
        receiver.capacity(0xFFFFFFFF);
    """
    return ''.join([sub % {'topic': t} for t in iterate(topic)])
Example #13
0
    def _create_poller(self, topic="", passive=False, **kw):
        # TODO -- do the zmq_strict logic dance with "topic" here.
        # It is buried in moksha.hub, but we need it to work the same way
        # here.

        # TODO -- the 'passive' here and the 'active' are ambiguous.  They
        # don't actually mean the same thing.  This should be resolved.
        method = passive and 'bind' or 'connect'

        failed_hostnames = []
        subs = {}
        for _name, endpoint_list in six.iteritems(self.c['endpoints']):

            # You never want to actually subscribe to this thing, but sometimes
            # it appears in the endpoints list due to a hack where it gets
            # added in __init__ above.
            if _name == 'relay_inbound':
                continue

            # Listify endpoint_list in case it is a single string
            endpoint_list = iterate(endpoint_list)
            for endpoint in endpoint_list:
                # First, some sanity checking.  zeromq will potentially
                # segfault if we don't do this check.
                hostname = endpoint.split(':')[1][2:]
                if hostname in failed_hostnames:
                    continue

                if hostname != '*':
                    try:
                        socket.gethostbyname_ex(hostname)
                    except:
                        failed_hostnames.append(hostname)
                        self.log.warn("Couldn't resolve %r" % hostname)
                        continue

                # OK, sanity checks pass.  Create the subscriber and connect.
                subscriber = self.context.socket(zmq.SUB)
                subscriber.setsockopt(zmq.SUBSCRIBE, topic.encode('utf-8'))

                set_high_water_mark(subscriber, self.c)
                set_tcp_keepalive(subscriber, self.c)
                set_tcp_reconnect(subscriber, self.c)

                getattr(subscriber, method)(endpoint)
                subs[subscriber] = (_name, endpoint)

        # Register the sockets we just built with a zmq Poller.
        poller = zmq.Poller()
        for subscriber in subs:
            poller.register(subscriber, zmq.POLLIN)

        return (poller, subs)
Example #14
0
def splitter(value):
    """Parse a string or list of comma or space delimited builds"""
    if value == colander.null:
        return

    items = []
    for v in iterate(value):
        if isinstance(v, basestring):
            for item in v.replace(',', ' ').split():
                items.append(item)

        elif v is not None:
            items.append(v)

    return items
Example #15
0
    def run(self):
        # Do just like in fedmsg.commands.hub and mangle fedmsg-config.py to work
        # with moksha's expected configuration.
        moksha_options = dict(
            zmq_publish_endpoints=",".join(self.config['endpoints']["relay_outbound"]),
            zmq_subscribe_endpoints=",".join(list(iterate(self.config['relay_inbound']))),
            zmq_subscribe_method="bind",
        )
        self.config.update(moksha_options)

        # Flip the special bit that allows the RelayConsumer to run
        self.config[RelayConsumer.config_key] = True

        from moksha.hub import main
        main(options=self.config, consumers=[RelayConsumer])
Example #16
0
def splitter(value):
    """Parse a string or list of comma or space delimited builds"""
    if value == colander.null:
        return

    items = []
    for v in iterate(value):
        if isinstance(v, basestring):
            for item in v.replace(',', ' ').split():
                items.append(item)

        elif v is not None:
            items.append(v)

    return items
Example #17
0
def send(to, msg_type, update, sender=None, agent=None):
    """
    Send an update notification email to a given recipient.

    Args:
        to (iterable): An iterable of e-mail addresses to send an update e-mail to.
        msg_type (basestring): The message template to use. Should be one of the keys in the
            MESSAGES template.
        update (bodhi.server.models.Update): The Update we are mailing people about.
        sender (basestring or None): The address to use in the From: header. If None, the
            "bodhi_email" setting will be used as the From: header.
        agent (basestring): The username that performed the action that generated this e-mail.
    """
    assert agent, 'No agent given'

    critpath = getattr(update, 'critpath', False) and '[CRITPATH] ' or ''
    headers = {}

    if msg_type != 'buildroot_override':
        headers = {
            "X-Bodhi-Update-Type": update.type.description,
            "X-Bodhi-Update-Release": update.release.name,
            "X-Bodhi-Update-Status": update.status.description,
            "X-Bodhi-Update-Builds": ",".join([b.nvr for b in update.builds]),
            "X-Bodhi-Update-Title": update.beautify_title(nvr=True),
            "X-Bodhi-Update-Pushed": update.pushed,
            "X-Bodhi-Update-Submitter": update.user.name,
        }
        if update.request:
            headers["X-Bodhi-Update-Request"] = update.request.description
        initial_message_id = "<bodhi-update-%s-%s-%s@%s>" % (
            update.id, update.user.name, update.release.name,
            config.get('message_id_email_domain'))

        if msg_type == 'new':
            headers["Message-ID"] = initial_message_id
        else:
            headers["References"] = initial_message_id
            headers["In-Reply-To"] = initial_message_id

    subject_template = u'[Fedora Update] %s[%s] %s'
    for person in iterate(to):
        subject = subject_template % (critpath, msg_type,
                                      update.beautify_title(nvr=True))
        fields = MESSAGES[msg_type]['fields'](agent, update)
        body = MESSAGES[msg_type]['body'] % fields
        send_mail(sender, person, subject, body, headers=headers)
Example #18
0
def _find_config(conf_files=tuple()):
    """
    Return a list of config files that actually exist on the filesystem.

    :arg config_file: Manually specified config_file
    :returns: a list of config_files.  Configuration in the last files in the
        list should override the first ones.
    """
    mlog.debug('Entered _find_config()')

    paths = itertools.chain((SYSTEM_CONFIG_FILE, USER_CONFIG_FILE),
                            (os.path.expanduser(os.path.expandvars(p))
                             for p in iterate(conf_files)))

    config_files = []
    for conf_path in paths:
        if os.path.exists(conf_path):
            config_files.append(conf_path)

    mlog.fields(cfg_files=config_files).debug('Leaving _find_config()')
    return config_files
Example #19
0
    def __init__(self, hub):
        self.hub = hub
        self.log = log

        callback = self._consume
        if self.jsonify:
            callback = self._consume_json

        for topic in iterate(self.topic):
            log.debug('Subscribing to consumer topic %s' % topic)
            self.hub.subscribe(topic, callback)

        # If the consumer specifies an 'app', then setup `self.engine` to
        # be a SQLAlchemy engine, along with a configured DBSession
        app = getattr(self, 'app', None)
        self.engine = self.DBSession = None
        if app:
            log.debug("Setting up individual engine for consumer")
            from sqlalchemy.orm import sessionmaker
            self.engine = create_app_engine(app, hub.config)
            self.DBSession = sessionmaker(bind=self.engine)()

        self._initialized = True
Example #20
0
def collectd(**kw):
    """ Print machine-readable information for collectd to monitor the bus. """

    # Initialize the processors before CollectdConsumer is instantiated.
    fedmsg.text.make_processors(**kw)

    # Do just like in fedmsg.commands.hub and mangle fedmsg-config.py to work
    # with moksha's expected configuration.
    moksha_options = dict(
        zmq_publish_endpoints=",".join(kw['endpoints']["relay_outbound"]),
        zmq_subscribe_endpoints=",".join(list(iterate(kw['relay_inbound']))),
        zmq_subscribe_method="bind",
    )
    kw.update(moksha_options)
    kw[CollectdConsumer.config_key] = True

    CollectdProducer.frequency = datetime.timedelta(
        seconds=kw['collectd_interval'])

    # Turn off moksha logging.
    logging.disable(logging.INFO)

    from moksha.hub import main
    main(kw, [CollectdConsumer], [CollectdProducer])
Example #21
0
    def __init__(self, hub):
        self.hub = hub
        self.log = log

        callback = self._consume
        if self.jsonify:
            callback = self._consume_json

        for topic in iterate(self.topic):
            log.debug("Subscribing to consumer topic %s" % topic)
            self.hub.subscribe(topic, callback)

        # If the consumer specifies an 'app', then setup `self.engine` to
        # be a SQLAlchemy engine, along with a configured DBSession
        app = getattr(self, "app", None)
        self.engine = self.DBSession = None
        if app:
            log.debug("Setting up individual engine for consumer")
            from sqlalchemy.orm import sessionmaker

            self.engine = create_app_engine(app, hub.config)
            self.DBSession = sessionmaker(bind=self.engine)()

        self._initialized = True
Example #22
0
def load_config(extra_args=None,
                doc=None,
                filenames=None,
                invalidate_cache=False,
                fedmsg_command=False,
                disable_defaults=False):
    """ Setup a runtime config dict by integrating the following sources
    (ordered by precedence):

      - defaults (unless disable_defaults = True)
      - config file
      - command line arguments

    If the ``fedmsg_command`` argument is False, no command line arguments are
    checked.

    """
    global __cache

    if invalidate_cache:
        __cache = {}

    if __cache:
        return __cache

    # Coerce defaults if arguments are not supplied.
    extra_args = extra_args or []
    doc = doc or ""

    if not disable_defaults:
        config = copy.deepcopy(defaults)
    else:
        config = {}

    config.update(_process_config_file(filenames=filenames))

    # This is optional (and defaults to false) so that only 'fedmsg-*' commands
    # are required to provide these arguments.
    # For instance, the moksha-hub command takes a '-v' argument and internally
    # makes calls to fedmsg.  We don't want to impose all of fedmsg's CLI
    # option constraints on programs that use fedmsg, so we make it optional.
    if fedmsg_command:
        config.update(_process_arguments(extra_args, doc, config))

    # If the user specified a config file on the command line, then start over
    # but read in that file instead.
    if not filenames and config.get('config_filename', None):
        return load_config(extra_args,
                           doc,
                           filenames=[config['config_filename']])

    # Just a little debug option.  :)
    if config.get('print_config'):
        print(pretty_dumps(config))
        sys.exit(0)

    if config.get('environment', 'prod') not in VALID_ENVIRONMENTS:
        raise ValueError("%r not one of %r" %
                         (config['environment'], VALID_ENVIRONMENTS))

    if not disable_defaults and 'endpoints' not in config:
        raise ValueError("No config value 'endpoints' found.")

    if not isinstance(config.get('endpoints', {}), dict):
        raise ValueError("The 'endpoint' config value must be a dict.")

    if 'endpoints' in config:
        config['endpoints'] = dict([(k, list(iterate(v)))
                                    for k, v in config['endpoints'].items()])

    if 'srv_endpoints' in config and len(config['srv_endpoints']) > 0:
        from dns.resolver import query, NXDOMAIN, Timeout, NoNameservers
        for e in config['srv_endpoints']:
            urls = []
            try:
                records = query('_fedmsg._tcp.{0}'.format(e), 'SRV')
            except NXDOMAIN:
                warnings.warn("There is no appropriate SRV records " +
                              "for {0}".format(e))
                continue
            except Timeout:
                warnings.warn("The DNS query for the SRV records of" +
                              " {0} timed out.".format(e))
                continue
            except NoNameservers:
                warnings.warn("No name server is available, please " +
                              "check the configuration")
                break

            for rec in records:
                urls.append('tcp://{hostname}:{port}'.format(
                    hostname=rec.target.to_text(), port=rec.port))
            config['endpoints'][e] = list(iterate(urls))

    if 'topic_prefix_re' not in config and 'topic_prefix' in config:
        # Turn "org.fedoraproject" into "org\.fedoraproject\.(dev|stg|prod)"
        config['topic_prefix_re'] = config['topic_prefix'].replace('.', '\.')\
            + '\.(%s)' % '|'.join(VALID_ENVIRONMENTS)

    __cache = config
    return config
Example #23
0
    def __init__(self, **config):
        super(FedMsgContext, self).__init__()
        self.log = logging.getLogger("fedmsg")

        self.c = config
        self.hostname = socket.gethostname().split('.', 1)[0]

        # Prepare our context and publisher
        self.context = zmq.Context(config['io_threads'])
        method = ['bind', 'connect'][config['active']]

        # If no name is provided, use the calling module's __name__ to decide
        # which publishing endpoint to use.
        if not config.get("name", None):
            module_name = guess_calling_module(default="fedmsg")
            config["name"] = module_name + '.' + self.hostname

            if any(map(config["name"].startswith, ['fedmsg'])):
                config["name"] = None

        # Find my message-signing cert if I need one.
        if self.c.get('sign_messages', False) and config.get("name"):
            if not config.get("crypto_backend") == "gpg":
                if 'cert_prefix' in config:
                    cert_index = "%s.%s" % (config['cert_prefix'],
                                            self.hostname)
                else:
                    cert_index = config['name']
                    if cert_index == 'relay_inbound':
                        cert_index = "shell.%s" % self.hostname

                self.c['certname'] = self.c['certnames'][cert_index]
            else:
                self.c['gpg_signing_key'] = self.c['gpg_keys'][cert_index]

        # Do a little special-case mangling.  We never want to "listen" to the
        # relay_inbound address, but in the special case that we want to emit
        # our messages there, we add it to the :term:`endpoints` dict so that
        # the code below where we "Actually set up our publisher" can be
        # simplified.  See Issue #37 - http://bit.ly/KN6dEK
        if config.get('active', False):
            # If the user has called us with "active=True" then presumably they
            # have given us a "name" as well.
            name = config.get("name", "relay_inbound")
            config['endpoints'][name] = config[name]

        # Actually set up our publisher
        if (
            not config.get("mute", False) and
            config.get("name", None) and
            config.get("endpoints", None) and
            config['endpoints'].get(config['name'])
        ):
            # Construct it.
            self.publisher = self.context.socket(zmq.PUB)

            set_high_water_mark(self.publisher, config)
            set_tcp_keepalive(self.publisher, config)

            # Set a zmq_linger, thus doing a little bit more to ensure that our
            # message gets to the fedmsg-relay (*if* we're talking to the relay
            # which is the case when method == 'connect').
            if method == 'connect':
                self.publisher.setsockopt(zmq.LINGER, config['zmq_linger'])

            # "Listify" our endpoints.  If we're given a list, good.  If we're
            # given a single item, turn it into a list of length 1.
            config['endpoints'][config['name']] = list(iterate(
                config['endpoints'][config['name']]))

            # Try endpoint after endpoint in the list of endpoints.  If we
            # succeed in establishing one, then stop.  *That* is our publishing
            # endpoint.
            _established = False
            for endpoint in config['endpoints'][config['name']]:

                if method == 'bind':
                    endpoint = "tcp://*:{port}".format(
                        port=endpoint.rsplit(':')[-1]
                    )

                try:
                    # Call either bind or connect on the new publisher.
                    # This will raise an exception if there's another process
                    # already using the endpoint.
                    getattr(self.publisher, method)(endpoint)
                    # If we can do this successfully, then stop trying.
                    _established = True
                    break
                except zmq.ZMQError:
                    # If we fail to bind or connect, there's probably another
                    # process already using that endpoint port.  Try the next
                    # one.
                    pass

            # If we make it through the loop without establishing our
            # connection, then there are not enough endpoints listed in the
            # config for the number of processes attempting to use fedmsg.
            if not _established:
                raise IOError(
                    "Couldn't find an available endpoint "
                    "for name %r" % config.get("name", None))

        elif config.get('mute', False):
            # Our caller doesn't intend to send any messages.  Pass silently.
            pass
        else:
            # Something is wrong.
            warnings.warn(
                "fedmsg is not configured to send any messages "
                "for name %r" % config.get("name", None))

        # Cleanup.  See http://bit.ly/SaGeOr for discussion.
        weakref.ref(threading.current_thread(), self.destroy)

        # Sleep just to make sure that the socket gets set up before anyone
        # tries anything.  This is a documented zmq 'feature'.
        time.sleep(config['post_init_sleep'])
Example #24
0
    def tail_messages(self, topic="", passive=False, **kw):
        """ Tail messages on the bus.

        Generator that yields tuples of the form:
        ``(name, endpoint, topic, message)``
        """

        # TODO -- do the zmq_strict logic dance with "topic" here.
        # It is buried in moksha.hub, but we need it to work the same way
        # here.

        # TODO -- the 'passive' here and the 'active' are ambiguous.  They
        # don't actually mean the same thing.  This should be resolved.
        method = passive and 'bind' or 'connect'

        failed_hostnames = []
        subs = {}
        watched_names = {}
        for _name, endpoint_list in self.c['endpoints'].iteritems():
            # Listify endpoint_list in case it is a single string
            endpoint_list = iterate(endpoint_list)
            for endpoint in endpoint_list:
                # First, some sanity checking.  zeromq will potentially
                # segfault if we don't do this check.
                hostname = endpoint.split(':')[1][2:]
                if hostname in failed_hostnames:
                    continue

                if hostname != '*':
                    try:
                        socket.gethostbyname_ex(hostname)
                    except:
                        failed_hostnames.append(hostname)
                        self.log.warn("Couldn't resolve %r" % hostname)
                        continue

                # OK, sanity checks pass.  Create the subscriber and connect.
                subscriber = self.context.socket(zmq.SUB)
                subscriber.setsockopt(zmq.SUBSCRIBE, topic)

                set_high_water_mark(subscriber, self.c)
                set_tcp_keepalive(subscriber, self.c)

                getattr(subscriber, method)(endpoint)
                subs[subscriber] = (_name, endpoint)
            if _name in self.c.get("replay_endpoints", {}):
                # At first we don't know where the sequence is at.
                watched_names[_name] = -1

        # Register the sockets we just built with a zmq Poller.
        poller = zmq.Poller()
        for subscriber in subs:
            poller.register(subscriber, zmq.POLLIN)

        # TODO -- what if user wants to pass in validate_signatures in **kw?
        validate = self.c.get('validate_signatures', False)

        # Poll that poller.  This is much more efficient than it used to be.
        try:
            while True:
                sockets = dict(poller.poll())
                for s in sockets:
                    _name, ep = subs[s]
                    _topic, message = s.recv_multipart()
                    msg = fedmsg.encoding.loads(message)
                    if not validate or fedmsg.crypto.validate(msg, **self.c):
                        # If there is even a slight change of replay, use
                        # check_for_replay
                        if len(self.c.get('replay_endpoints', {})) > 0:
                            for m in check_for_replay(
                                    _name, watched_names,
                                    msg, self.c, self.context):

                                # Revalidate all the replayed messages.
                                if not validate or \
                                        fedmsg.crypto.validate(m, **self.c):
                                    yield _name, ep, m['topic'], m
                                else:
                                    warnings.warn("!! invalid message " +
                                                  "received: %r" % msg)
                        else:
                            yield _name, ep, _topic, msg
                    else:
                        # Else.. we are supposed to be validating, but the
                        # message failed validation.

                        # Warn, but don't throw an exception.  Keep tailing.
                        warnings.warn("!! invalid message received: %r" % msg)

        finally:
            for subscriber in subs:
                subscriber.close()
Example #25
0
    def __init__(self, **config):
        super(FedMsgContext, self).__init__()
        self.log = logging.getLogger("fedmsg")

        self.c = config
        self.hostname = socket.gethostname().split('.', 1)[0]

        # Prepare our context and publisher
        self.context = zmq.Context(config['io_threads'])
        method = ['bind', 'connect'][config['active']]

        # If no name is provided, use the calling module's __name__ to decide
        # which publishing endpoint to use (unless active=True, in which case
        # we use "relay_inbound" as set in the subsequent code block).
        if not config.get("name", None) and not config.get('active', False):
            module_name = guess_calling_module(default="fedmsg")
            config["name"] = module_name + '.' + self.hostname

            if any(map(config["name"].startswith, ['fedmsg'])):
                config["name"] = None

        # Do a little special-case mangling.  We never want to "listen" to the
        # relay_inbound address, but in the special case that we want to emit
        # our messages there, we add it to the :term:`endpoints` dict so that
        # the code below where we "Actually set up our publisher" can be
        # simplified.  See Issue #37 - https://bit.ly/KN6dEK
        if config.get('active', False):
            try:
                name = config['name'] = config.get("name", "relay_inbound")
                config['endpoints'][name] = config[name]
            except KeyError:
                raise KeyError("Could not find endpoint for fedmsg-relay."
                               " Try installing fedmsg-relay.")

        # Actually set up our publisher
        if (not config.get("mute", False) and config.get("name", None)
                and config.get("endpoints", None)
                and config['endpoints'].get(config['name'])):
            # Construct it.
            self.publisher = self.context.socket(zmq.PUB)

            set_high_water_mark(self.publisher, config)
            set_tcp_keepalive(self.publisher, config)

            # Set a zmq_linger, thus doing a little bit more to ensure that our
            # message gets to the fedmsg-relay (*if* we're talking to the relay
            # which is the case when method == 'connect').
            if method == 'connect':
                self.publisher.setsockopt(zmq.LINGER, config['zmq_linger'])

            # "Listify" our endpoints.  If we're given a list, good.  If we're
            # given a single item, turn it into a list of length 1.
            config['endpoints'][config['name']] = list(
                iterate(config['endpoints'][config['name']]))

            # Try endpoint after endpoint in the list of endpoints.  If we
            # succeed in establishing one, then stop.  *That* is our publishing
            # endpoint.
            _established = False
            for endpoint in config['endpoints'][config['name']]:
                self.log.debug("Trying to %s to %s" % (method, endpoint))
                if method == 'bind':
                    endpoint = "tcp://*:{port}".format(
                        port=endpoint.rsplit(':')[-1])

                try:
                    # Call either bind or connect on the new publisher.
                    # This will raise an exception if there's another process
                    # already using the endpoint.
                    getattr(self.publisher, method)(endpoint)
                    # If we can do this successfully, then stop trying.
                    _established = True
                    break
                except zmq.ZMQError:
                    # If we fail to bind or connect, there's probably another
                    # process already using that endpoint port.  Try the next
                    # one.
                    pass

            # If we make it through the loop without establishing our
            # connection, then there are not enough endpoints listed in the
            # config for the number of processes attempting to use fedmsg.
            if not _established:
                raise IOError("Couldn't find an available endpoint "
                              "for name %r" % config.get("name", None))

        elif config.get('mute', False):
            # Our caller doesn't intend to send any messages.  Pass silently.
            pass
        else:
            # Something is wrong.
            warnings.warn("fedmsg is not configured to send any messages "
                          "for name %r" % config.get("name", None))

        # Cleanup.  See https://bit.ly/SaGeOr for discussion.
        weakref.ref(threading.current_thread(), self.destroy)

        # Sleep just to make sure that the socket gets set up before anyone
        # tries anything.  This is a documented zmq 'feature'.
        time.sleep(config['post_init_sleep'])
Example #26
0
    def tail_messages(self, topic="", passive=False, **kw):
        """ Tail messages on the bus.

        Generator that yields tuples of the form:
        ``(name, endpoint, topic, message)``
        """

        # TODO -- do the zmq_strict logic dance with "topic" here.
        # It is buried in moksha.hub, but we need it to work the same way
        # here.

        # TODO -- the 'passive' here and the 'active' are ambiguous.  They
        # don't actually mean the same thing.  This should be resolved.
        method = passive and 'bind' or 'connect'

        failed_hostnames = []
        subs = {}
        watched_names = {}
        for _name, endpoint_list in six.iteritems(self.c['endpoints']):

            # You never want to actually subscribe to this thing, but sometimes
            # it appears in the endpoints list due to a hack where it gets
            # added in __init__ above.
            if _name == 'relay_inbound':
                continue

            # Listify endpoint_list in case it is a single string
            endpoint_list = iterate(endpoint_list)
            for endpoint in endpoint_list:
                # First, some sanity checking.  zeromq will potentially
                # segfault if we don't do this check.
                hostname = endpoint.split(':')[1][2:]
                if hostname in failed_hostnames:
                    continue

                if hostname != '*':
                    try:
                        socket.gethostbyname_ex(hostname)
                    except:
                        failed_hostnames.append(hostname)
                        self.log.warn("Couldn't resolve %r" % hostname)
                        continue

                # OK, sanity checks pass.  Create the subscriber and connect.
                subscriber = self.context.socket(zmq.SUB)
                subscriber.setsockopt(zmq.SUBSCRIBE, topic)

                set_high_water_mark(subscriber, self.c)
                set_tcp_keepalive(subscriber, self.c)
                set_tcp_reconnect(subscriber, self.c)

                getattr(subscriber, method)(endpoint)
                subs[subscriber] = (_name, endpoint)
            if _name in self.c.get("replay_endpoints", {}):
                # At first we don't know where the sequence is at.
                watched_names[_name] = -1

        # Register the sockets we just built with a zmq Poller.
        poller = zmq.Poller()
        for subscriber in subs:
            poller.register(subscriber, zmq.POLLIN)

        # TODO -- what if user wants to pass in validate_signatures in **kw?
        validate = self.c.get('validate_signatures', False)

        # Poll that poller.  This is much more efficient than it used to be.
        try:
            while True:
                sockets = dict(poller.poll())
                for s in sockets:
                    _name, ep = subs[s]
                    _topic, message = s.recv_multipart()
                    msg = fedmsg.encoding.loads(message)
                    if not validate or fedmsg.crypto.validate(msg, **self.c):
                        # If there is even a slight change of replay, use
                        # check_for_replay
                        if len(self.c.get('replay_endpoints', {})) > 0:
                            for m in check_for_replay(_name, watched_names,
                                                      msg, self.c,
                                                      self.context):

                                # Revalidate all the replayed messages.
                                if not validate or \
                                        fedmsg.crypto.validate(m, **self.c):
                                    yield _name, ep, m['topic'], m
                                else:
                                    warnings.warn("!! invalid message " +
                                                  "received: %r" % msg)
                        else:
                            yield _name, ep, _topic, msg
                    else:
                        # Else.. we are supposed to be validating, but the
                        # message failed validation.

                        # Warn, but don't throw an exception.  Keep tailing.
                        warnings.warn("!! invalid message received: %r" % msg)

        finally:
            for subscriber in subs:
                subscriber.close()
Example #27
0
def load_config(extra_args=None,
                doc=None,
                filenames=None,
                invalidate_cache=False,
                fedmsg_command=False,
                disable_defaults=False):
    """ Setup a runtime config dict by integrating the following sources
    (ordered by precedence):

      - defaults (unless disable_defaults = True)
      - config file
      - command line arguments

    If the ``fedmsg_command`` argument is False, no command line arguments are
    checked.

    """
    global __cache

    if invalidate_cache:
        __cache = {}

    if __cache:
        return __cache

    # Coerce defaults if arguments are not supplied.
    extra_args = extra_args or []
    doc = doc or ""

    if not disable_defaults:
        config = copy.deepcopy(defaults)
    else:
        config = {}

    config.update(_process_config_file(filenames=filenames))

    # This is optional (and defaults to false) so that only 'fedmsg-*' commands
    # are required to provide these arguments.
    # For instance, the moksha-hub command takes a '-v' argument and internally
    # makes calls to fedmsg.  We don't want to impose all of fedmsg's CLI
    # option constraints on programs that use fedmsg, so we make it optional.
    if fedmsg_command:
        config.update(_process_arguments(extra_args, doc, config))

    # If the user specified a config file on the command line, then start over
    # but read in that file instead.
    if not filenames and config.get('config_filename', None):
        return load_config(extra_args, doc,
                           filenames=[config['config_filename']])

    # Just a little debug option.  :)
    if config.get('print_config'):
        print(pretty_dumps(config))
        sys.exit(0)

    if config.get('environment', 'prod') not in VALID_ENVIRONMENTS:
        raise ValueError("%r not one of %r" % (
            config['environment'], VALID_ENVIRONMENTS))

    if not disable_defaults and 'endpoints' not in config:
        raise ValueError("No config value 'endpoints' found.")

    if not isinstance(config.get('endpoints', {}), dict):
        raise ValueError("The 'endpoint' config value must be a dict.")

    if 'endpoints' in config:
        config['endpoints'] = dict([
            (k, list(iterate(v))) for k, v in config['endpoints'].items()
        ])

    if 'srv_endpoints' in config and len(config['srv_endpoints']) > 0:
        from dns.resolver import query, NXDOMAIN, Timeout, NoNameservers
        for e in config['srv_endpoints']:
            urls = []
            try:
                records = query('_fedmsg._tcp.{0}'.format(e), 'SRV')
            except NXDOMAIN:
                warnings.warn("There is no appropriate SRV records " +
                              "for {0}".format(e))
                continue
            except Timeout:
                warnings.warn("The DNS query for the SRV records of" +
                              " {0} timed out.".format(e))
                continue
            except NoNameservers:
                warnings.warn("No name server is available, please " +
                              "check the configuration")
                break

            for rec in records:
                urls.append('tcp://{hostname}:{port}'.format(
                    hostname=rec.target.to_text(),
                    port=rec.port
                ))
            config['endpoints'][e] = list(iterate(urls))

    if 'topic_prefix_re' not in config and 'topic_prefix' in config:
        # Turn "org.fedoraproject" into "org\.fedoraproject\.(dev|stg|prod)"
        config['topic_prefix_re'] = config['topic_prefix'].replace('.', '\.')\
            + '\.(%s)' % '|'.join(VALID_ENVIRONMENTS)

    __cache = config
    return config
Example #28
0
    def __init__(self, **config):
        super(FedMsgContext, self).__init__()

        self.c = config
        self.hostname = socket.gethostname().split('.', 1)[0]

        # Prepare our context and publisher
        self.context = zmq.Context(config['io_threads'])
        method = config.get('active', False) or 'bind' and 'connect'
        method = ['bind', 'connect'][config['active']]

        # If no name is provided, use the calling module's __name__ to decide
        # which publishing endpoint to use.
        if not config.get("name", None):
            module_name = self.guess_calling_module(default="fedmsg")
            config["name"] = module_name + '.' + self.hostname

            if any(map(config["name"].startswith, ['fedmsg'])):
                config["name"] = None

        # Find my message-signing cert if I need one.
        if self.c.get('sign_messages', False) and config.get("name"):
            if 'cert_prefix' in config:
                cert_index = "%s.%s" % (config['cert_prefix'], self.hostname)
            else:
                cert_index = config['name']
                if cert_index == 'relay_inbound':
                    cert_index = "shell.%s" % self.hostname

            self.c['certname'] = self.c['certnames'][cert_index]

        # Do a little special-case mangling.  We never want to "listen" to the
        # relay_inbound address, but in the special case that we want to emit
        # our messages there, we add it to the :term:`endpoints` dict so that
        # the code below where we "Actually set up our publisher" can be
        # simplified.  See Issue #37 - http://bit.ly/KN6dEK
        if config.get('active', False):
            # If the user has called us with "active=True" then presumably they
            # have given us a "name" as well.
            name = config.get("name", "relay_inbound")
            config['endpoints'][name] = config[name]

        # Actually set up our publisher
        if (not config.get("mute", False) and config.get("name", None)
                and config.get("endpoints", None)
                and config['endpoints'].get(config['name'])):
            self.publisher = self.context.socket(zmq.PUB)

            if config['high_water_mark']:
                self.publisher.setsockopt(zmq.HWM, config['high_water_mark'])

            if method == 'connect':
                self.publisher.setsockopt(zmq.LINGER, config['zmq_linger'])

            config['endpoints'][config['name']] = list(
                iterate(config['endpoints'][config['name']]))

            _established = False
            for endpoint in config['endpoints'][config['name']]:

                if method == 'bind':
                    endpoint = "tcp://*:{port}".format(
                        port=endpoint.rsplit(':')[-1])

                try:
                    # Call either bind or connect on the new publisher.
                    # This will raise an exception if there's another process
                    # already using the endpoint.
                    getattr(self.publisher, method)(endpoint)
                    # If we can do this successfully, then stop trying.
                    _established = True
                    break
                except zmq.ZMQError:
                    # If we fail to bind or connect, there's probably another
                    # process already using that endpoint port.  Try the next
                    # one.
                    pass

            # If we make it through the loop without establishing our
            # connection, then there are not enough endpoints listed in the
            # config for the number of processes attempting to use fedmsg.
            if not _established:
                raise IOError("Couldn't find an available endpoint.")

        elif config.get('mute', False):
            # Our caller doesn't intend to send any messages.  Pass silently.
            pass
        else:
            # Something is wrong.
            warnings.warn("fedmsg is not configured to send any messages")

        # Cleanup.  See http://bit.ly/SaGeOr for discussion.
        weakref.ref(threading.current_thread(), self.destroy)

        # Sleep just to make sure that the socket gets set up before anyone
        # tries anything.  This is a documented zmq 'feature'.
        time.sleep(config['post_init_sleep'])
Example #29
0
        if expiration:
            if datetime.utcnow() > expiration:
                flash('Cannot set an expiration in the past')
                if request_format() == 'json': return dict()
                raise redirect('/override/new')

        try:
            koji = get_session()
        except Exception, e:
            flash('Unable to connect to Koji')
            if request_format() == 'json':
                return dict()
            raise redirect('/override/new')

        for build in iterate(builds):
            release = None
            n, v, r = get_nvr(build)

            # Make sure the build is tagged correctly
            try:
                tags = [tag['name'] for tag in koji.listTags(build)]
            except Exception, e:
                flash(str(e))
                if request_format() == 'json': return dict()
                raise redirect('/override/new')

            # Determine the release by the tag, and sanity check the builds
            for tag in tags:
                for rel in Release.select():
                    if tag in (rel.candidate_tag, rel.testing_tag):
Example #30
0
def websocket_subscribe(topic):
    """ Return a javascript callback that subscribes to a given topic,
        or a list of topics.
    """
    sub = "moksha.topic_subscribe('%(topic)s');"
    return ''.join([sub % {'topic': t} for t in iterate(topic)])
Example #31
0
        if expiration:
            if datetime.utcnow() > expiration:
                flash("Cannot set an expiration in the past")
                if request_format() == "json":
                    return dict()
                raise redirect("/override/new")

        try:
            koji = get_session()
        except Exception, e:
            flash("Unable to connect to Koji")
            if request_format() == "json":
                return dict()
            raise redirect("/override/new")

        for build in iterate(builds):
            release = None
            n, v, r = get_nvr(build)

            # Make sure the build is tagged correctly
            try:
                tags = [tag["name"] for tag in koji.listTags(build)]
            except Exception, e:
                flash(str(e))
                if request_format() == "json":
                    return dict()
                raise redirect("/override/new")

            # Determine the release by the tag, and sanity check the builds
            for tag in tags:
                for rel in Release.select():