コード例 #1
0
def run(args):
    from django.conf import settings
    import django
    django.setup()

    markus_backends = [{
        'class': 'markus.backends.logging.LoggingMetrics',
        'options': {
            'logger_name': 'markus',
            'leader': 'ELMO_METRICS',
        }
    }]
    if hasattr(settings, 'DATADOG_NAMESPACE'):
        markus_backends.append({
            'class': 'markus.backends.datadog.DatadogMetrics',
            'options': {
                'statsd_namespace': settings.DATADOG_NAMESPACE
            }
        })
    markus.configure(markus_backends)
    logging.getLogger('markus').setLevel(logging.INFO)

    with Connection(settings.TRANSPORT) as conn:
        try:
            Worker(conn, settings).run()
        except KeyboardInterrupt:
            print('bye bye')
コード例 #2
0
ファイル: socorro_app.py プロジェクト: rocketsroger/socorro
def setup_metrics(config):
    """Set up Markus."""
    backends = []

    for backend in config.metricscfg.markus_backends:
        if backend == "markus.backends.statsd.StatsdMetrics":
            backends.append({
                "class": "markus.backends.statsd.StatsdMetrics",
                "options": {
                    "statsd_host": config.metricscfg.statsd_host,
                    "statsd_port": config.metricscfg.statsd_port,
                },
            })
        elif backend == "markus.backends.datadog.DatadogMetrics":
            backends.append({
                "class": "markus.backends.datadog.DatadogMetrics",
                "options": {
                    "statsd_host": config.metricscfg.statsd_host,
                    "statsd_port": config.metricscfg.statsd_port,
                },
            })
        elif backend == "markus.backends.logging.LoggingMetrics":
            backends.append(
                {"class": "markus.backends.logging.LoggingMetrics"})
        else:
            raise ValueError('Invalid markus backend "%s"' % backend)

    markus.configure(backends=backends)
コード例 #3
0
def setup_metrics(config):
    """Set up Markus."""
    backends = []

    for backend in config.metricscfg.markus_backends:
        if backend == 'markus.backends.statsd.StatsdMetrics':
            backends.append({
                'class': 'markus.backends.statsd.StatsdMetrics',
                'options': {
                    'statsd_host': config.metricscfg.statsd_host,
                    'statsd_port': config.metricscfg.statsd_port,
                }
            })
        elif backend == 'markus.backends.datadog.DatadogMetrics':
            backends.append({
                'class': 'markus.backends.datadog.DatadogMetrics',
                'options': {
                    'statsd_host': config.metricscfg.statsd_host,
                    'statsd_port': config.metricscfg.statsd_port,
                }
            })
        elif backend == 'markus.backends.logging.LoggingMetrics':
            backends.append({
                'class': 'markus.backends.logging.LoggingMetrics',
            })
        else:
            raise ValueError('Invalid markus backend "%s"' % backend)

    markus.configure(backends=backends)
コード例 #4
0
ファイル: apps.py プロジェクト: uglide/socorro
    def ready(self):
        if settings.LOCAL_DEV_ENV:
            # If we're in the local development environment, then use the
            # logging and statsd backends
            backends = [{
                'class': 'markus.backends.logging.LoggingMetrics',
            }, {
                'class': 'markus.backends.statsd.StatsdMetrics',
                'options': {
                    'statsd_host': settings.STATSD_HOST,
                    'statsd_port': settings.STATSD_PORT,
                    'statsd_prefix': settings.STATSD_PREFIX,
                }
            }]
        else:
            # Otherwise we're in a server environment and we use the datadog
            # backend there
            backends = [{
                # Log metrics to Datadog
                'class': 'markus.backends.datadog.DatadogMetrics',
                'options': {
                    'statsd_host': settings.STATSD_HOST,
                    'statsd_port': settings.STATSD_PORT,
                    'statsd_namespace': settings.STATSD_PREFIX,
                }
            }]

        markus.configure(backends=backends)
コード例 #5
0
ファイル: socorro_app.py プロジェクト: stephendonner/socorro
def setup_metrics(config, local_unused, args_unused):
    """Sets up markus and adds a metrics client to config

    :returns: Markus MetricsInterface

    """
    backends = []

    for backend in config.metricscfg.markus_backends:
        if backend == 'markus.backends.statsd.StatsdMetrics':
            backends.append({
                'class': 'markus.backends.statsd.StatsdMetrics',
                'options': {
                    'statsd_host': config.metricscfg.statsd_host,
                    'statsd_port': config.metricscfg.statsd_port,
                }
            })
        elif backend == 'markus.backends.datadog.DatadogMetrics':
            backends.append({
                'class': 'markus.backends.datadog.DatadogMetrics',
                'options': {
                    'statsd_host': config.metricscfg.statsd_host,
                    'statsd_port': config.metricscfg.statsd_port,
                }
            })
        elif backend == 'markus.backends.logging.LoggingMetrics':
            backends.append({
                'class': 'markus.backends.logging.LoggingMetrics',
            })
        else:
            raise ValueError('Invalid markus backend "%s"' % backend)

    markus.configure(backends=backends)

    return markus.get_metrics('')
コード例 #6
0
def setup_metrics(config, local_unused, args_unused):
    """Sets up markus and adds a metrics client to config

    :returns: Markus MetricsInterface

    """
    backends = []

    for backend in config.metricscfg.markus_backends:
        if backend == 'markus.backends.statsd.StatsdMetrics':
            backends.append({
                'class': 'markus.backends.statsd.StatsdMetrics',
                'options': {
                    'statsd_host': config.metricscfg.statsd_host,
                    'statsd_port': config.metricscfg.statsd_port,
                }
            })
        elif backend == 'markus.backends.datadog.DatadogMetrics':
            backends.append({
                'class': 'markus.backends.datadog.DatadogMetrics',
                'options': {
                    'statsd_host': config.metricscfg.statsd_host,
                    'statsd_port': config.metricscfg.statsd_port,
                }
            })
        elif backend == 'markus.backends.logging.LoggingMetrics':
            backends.append({
                'class': 'markus.backends.logging.LoggingMetrics',
            })
        else:
            raise ValueError('Invalid markus backend "%s"' % backend)

    markus.configure(backends=backends)

    return markus.get_metrics('')
コード例 #7
0
ファイル: apps.py プロジェクト: stephendonner/socorro
    def ready(self):
        if settings.LOCAL_DEV_ENV:
            # If we're in the local development environment, then use the
            # logging and statsd backends
            backends = [
                {
                    'class': 'markus.backends.logging.LoggingMetrics',
                },
                {
                    'class': 'markus.backends.statsd.StatsdMetrics',
                    'options': {
                        'statsd_host': settings.STATSD_HOST,
                        'statsd_port': settings.STATSD_PORT,
                        'statsd_prefix': settings.STATSD_PREFIX,
                    }
                }
            ]
        else:
            # Otherwise we're in a server environment and we use the datadog
            # backend there
            backends = [
                {
                    # Log metrics to Datadog
                    'class': 'markus.backends.datadog.DatadogMetrics',
                    'options': {
                        'statsd_host': settings.STATSD_HOST,
                        'statsd_port': settings.STATSD_PORT,
                        'statsd_namespace': settings.STATSD_PREFIX,
                    }
                }
            ]

        markus.configure(backends=backends)
コード例 #8
0
ファイル: test_mock.py プロジェクト: bradykieffer/markus
    def test_configure_doesnt_affect_override(self):
        with MetricsMock() as mm:
            markus.configure([{"class": "markus.backends.logging.LoggingMetrics"}])
            mymetrics = markus.get_metrics("foobar")
            mymetrics.incr("key1", value=1)

            assert mm.has_record(fun_name="incr", stat="foobar.key1", value=1)

            assert not mm.has_record(fun_name="incr", stat="foobar.key1", value=5)
def setup_metrics(metrics_classes, config, logger=None):
    """Initializes the metrics system"""
    logger.info('Setting up metrics: %s', metrics_classes)

    markus_configuration = []
    for cls in metrics_classes:
        backend = cls(config)
        log_config(logger, backend)
        markus_configuration.append(backend.to_markus())

    markus.configure(markus_configuration)
コード例 #10
0
ファイル: app.py プロジェクト: mozilla/antenna
def setup_metrics(metrics_classes, config, logger=None):
    """Initialize and configures the metrics system."""
    logger.info('Setting up metrics: %s', metrics_classes)

    markus_configuration = []
    for cls in metrics_classes:
        backend = cls(config)
        log_config(logger, backend)
        markus_configuration.append(backend.to_markus())

    markus.configure(markus_configuration)
def pytest_runtest_setup():
    # Make sure we set up logging and metrics to sane default values.
    setup_logging(
        ConfigManager.from_dict({
            'HOST_ID': '',
            'LOGGING_LEVEL': 'DEBUG',
            'LOCAL_DEV_ENV': 'False',
        }))
    markus.configure([{'class': 'markus.backends.logging.LoggingMetrics'}])

    # Wipe any registered heartbeat functions
    reset_hb_funs()
コード例 #12
0
def setup_metrics(statsd_host):
    markus.configure(backends=[{
        # Log metrics to statd to telegraf to influx.
        # We use datadog backend class to get support for tags and histograms
        # https://github.com/willkg/markus/blob/master/markus/backends/statsd.py#L58-L64
        'class': 'markus.backends.datadog.DatadogMetrics',
        'options': {
            'statsd_host': statsd_host,
            'statsd_port': 8125,
            'statsd_namespace': 'influx'
        }
    }])
コード例 #13
0
ファイル: conftest.py プロジェクト: kutayzorlu/antenna
def pytest_runtest_setup():
    # Make sure we set up logging and metrics to sane default values.
    setup_logging(
        ConfigManager.from_dict({
            "HOST_ID": "",
            "LOGGING_LEVEL": "DEBUG",
            "LOCAL_DEV_ENV": "True"
        }))
    markus.configure([{"class": "markus.backends.logging.LoggingMetrics"}])

    # Wipe any registered heartbeat functions
    reset_hb_funs()
コード例 #14
0
ファイル: conftest.py プロジェクト: mozilla/antenna
def pytest_runtest_setup():
    # Make sure we set up logging and metrics to sane default values.
    setup_logging(ConfigManager.from_dict({
        'HOST_ID': '',
        'LOGGING_LEVEL': 'DEBUG',
        'LOCAL_DEV_ENV': 'False',
    }))
    markus.configure([
        {'class': 'markus.backends.logging.LoggingMetrics'}
    ])

    # Wipe any registered heartbeat functions
    reset_hb_funs()
コード例 #15
0
def get_metrics(namespace):
    global _configured
    if not _configured:
        STATSD_HOST = config('STATSD_HOST', 'localhost')
        STATSD_PORT = config('STATSD_PORT', default=8125)
        STATSD_NAMESPACE = config('STATSD_NAMESPACE', default='')

        FILE_METRICS_BASE_DIR = config('MARKUS_FILE_METRICS_BASE_DIR',
                                       default='/tmp')

        # For more options see
        # http://markus.readthedocs.io/en/latest/usage.html#markus-configure
        log_metrics_config = config('LOG_METRICS', default='datadog')
        if log_metrics_config == 'logging':
            markus.configure([{
                'class': 'markus.backends.logging.LoggingMetrics',
                'options': {
                    'logger_name': 'metrics'
                }
            }])
        elif log_metrics_config == 'cloudwatch':
            markus.configure([{
                'class':
                'markus.backends.cloudwatch.CloudwatchMetrics',
            }])
        elif log_metrics_config == 'datadog':
            markus.configure([{
                'class': 'markus.backends.datadog.DatadogMetrics',
                'options': {
                    'statsd_host': STATSD_HOST,
                    'statsd_port': STATSD_PORT,
                    'statsd_namespace': STATSD_NAMESPACE,
                }
            }])
        elif log_metrics_config == 'void':
            markus.configure([{
                'class': 'buildhub.configure_markus.VoidMetrics',
            }])
        elif log_metrics_config == 'file':
            markus.configure([{
                'class': 'buildhub.configure_markus.FileMetrics',
                'options': {
                    'base_dir': FILE_METRICS_BASE_DIR,
                }
            }])
        else:
            raise NotImplementedError(
                f'Unrecognized LOG_METRICS value {log_metrics_config}')
        _configured = True

    return markus.get_metrics(namespace)
コード例 #16
0
ファイル: test_mock.py プロジェクト: bradykieffer/markus
    def test_histogram_helpers(self):
        with MetricsMock() as mm:
            markus.configure([{"class": "markus.backends.logging.LoggingMetrics"}])
            mymetrics = markus.get_metrics("foobar")
            mymetrics.histogram("key1", value=1)
            mymetrics.histogram("keymultiple", value=1)
            mymetrics.histogram("keymultiple", value=1)

            mm.assert_histogram(stat="foobar.key1")

            mm.assert_histogram_once(stat="foobar.key1")
            with pytest.raises(AssertionError):
                mm.assert_histogram_once(stat="foobar.keymultiple")

            mm.assert_not_histogram(stat="foobar.keynot")
            mm.assert_not_histogram(stat="foobar.key1", value=5)
            with pytest.raises(AssertionError):
                mm.assert_not_histogram(stat="foobar.key1")
コード例 #17
0
ファイル: test_mock.py プロジェクト: willkg/markus
    def test_print_on_failure(self, capsys):
        with MetricsMock() as mm:
            markus.configure([{"class": "markus.backends.logging.LoggingMetrics"}])
            mymetrics = markus.get_metrics("foobar")
            mymetrics.histogram("keymultiple", value=1)
            mymetrics.histogram("keymultiple", value=1)

            with pytest.raises(AssertionError):
                mm.assert_histogram_once(stat="foobar.keymultiple")

            # On assertion error, the assert_* methods will print the metrics
            # records to stdout.
            captured = capsys.readouterr()
            expected = (
                "<MetricsRecord type=histogram key=foobar.keymultiple value=1 tags=[]>\n"
                "<MetricsRecord type=histogram key=foobar.keymultiple value=1 tags=[]>\n"
            )
            assert captured.out == expected
コード例 #18
0
ファイル: metrics.py プロジェクト: mythmon/normandy
def register():
    backends = []

    if settings.METRICS_USE_DEBUG_LOGS:
        backends.append({"class": "normandy.base.metrics.DebugLogsBackend"})

    if settings.METRICS_USE_STATSD:
        backends.append(
            {
                "class": "markus.backends.datadog.DatadogMetrics",
                "options": {
                    "statsd_host": settings.METRICS_STATSD_HOST,
                    "statsd_port": settings.METRICS_STATSD_PORT,
                    "statsd_namespace": settings.METRICS_STATSD_NAMESPACE,
                },
            }
        )

    markus.configure(backends=backends)
コード例 #19
0
    def test_configure_doesnt_affect_override(self):
        with MetricsMock() as mm:
            markus.configure([{
                'class': 'markus.backends.logging.LoggingMetrics'
            }])
            mymetrics = markus.get_metrics('foobar')
            mymetrics.incr('key1', value=1)

            assert mm.has_record(
                fun_name='incr',
                stat='foobar.key1',
                value=1,
            )

            assert not mm.has_record(
                fun_name='incr',
                stat='foobar.key1',
                value=5,
            )
コード例 #20
0
ファイル: log.py プロジェクト: helfi92/ichnaea
def configure_stats():
    """Configure Markus for metrics."""
    local_dev_env = settings("local_dev_env")
    if local_dev_env:
        markus.configure(
            backends=[{
                "class": "markus.backends.logging.LoggingMetrics"
            }])
        return

    if settings("statsd_host"):
        markus.configure(backends=[{
            "class": "markus.backends.datadog.DatadogMetrics",
            "options": {
                "statsd_host": settings("statsd_host"),
                "statsd_port": settings("statsd_port"),
                "statsd_namespace": "location",
            },
        }])
    else:
        logging.getLogger(__name__).warning(
            "STATSD_HOST not set; no statsd configured")
コード例 #21
0
ファイル: metrics.py プロジェクト: 10allday-services/autopush
    def __init__(self,
                 hostname,
                 statsd_host=None,
                 statsd_port=None,
                 namespace="autopush",
                 flush_interval=10):
        markus.configure(backends=[{
            'class': 'markus.backends.datadog.DatadogMetrics',
            'options': {
                'statsd_host': statsd_host,
                'statsd_port': statsd_port,
            }
        }])
        self._client = markus.get_metrics(namespace)
        self._host = hostname
        self._namespace = namespace

        self._metrics = []
        self._flush_interval = flush_interval
        self._thread = None
        self._lock = threading.RLock()

        self.start()
コード例 #22
0
import requests

# Maximum number of retry attempts
MAX_ATTEMPTS = 5

# Number of seconds to wait for a response from server
CONNECTION_TIMEOUT = 60


class StdoutMetrics(BackendBase):
    def emit(self, record):
        click.echo("Elapsed time: %s %s %s" %
                   (record.stat_type, record.key, record.value))


markus.configure([{"class": StdoutMetrics}], raise_errors=True)
METRICS = markus.get_metrics()


@click.command()
@click.option(
    "--base-url",
    default="https://symbols.mozilla.org/",
    help="Base url to use for uploading SYM files.",
)
@click.option(
    "--auth-token",
    required=True,
    help="Auth token for uploading SYM files.",
)
@click.argument("symbolsfile")
コード例 #23
0
ファイル: __init__.py プロジェクト: Pike/elmo
# check ldap config
if all('LDAP_{}'.format(s) in globals() for s in ('HOST', 'DN', 'PASSWORD')):
    import ldap  # noqa
    AUTHENTICATION_BACKENDS.append('lib.auth.backends.MozLdapBackend')
if not AUTHENTICATION_BACKENDS:
    import warnings
    warnings.warn("No authentication")

# hook up markus to datadog, if set
if (
        'ELMO_DATADOG_NAMESPACE' in os.environ
        and os.environ['ELMO_DATADOG_NAMESPACE']
):
    markus.configure(backends=[{
        'class': 'markus.backends.datadog.DatadogMetrics',
        'options': {
            'statsd_namespace': os.environ['ELMO_DATADOG_NAMESPACE']
        }
    }])

# generic django settings, good for DEBUG etc
boolmapper = {
    'true': True,
    '1': True,
    'false': False,
    '0': False,
}
for key, value in os.environ.items():
    if not key.startswith('DJANGO_'):
        continue
    globals()[key[len('DJANGO_'):]] = boolmapper.get(value.lower(), value)
コード例 #24
0
 def _configure_markus():
     """Must be done once and only once."""
     markus.configure(settings.MARKUS_BACKENDS)
コード例 #25
0
    def ready(self):
        # Import signals kicking off signal registration
        from crashstats.crashstats import signals  # noqa

        # Set up markus metrics
        markus.configure(backends=settings.MARKUS_BACKENDS)
コード例 #26
0
 def ready(self):
     markus.configure(settings.MARKUS_BACKEND)
コード例 #27
0
def getPoller(options):
    os.environ["DJANGO_SETTINGS_MODULE"] = (options["settings"]
                                            or "a10n.settings")
    import django
    django.setup()
    from django.conf import settings
    from life.models import Repository, Forest, Locale
    from django.db import connection as db_connection
    from django.db.models import Max

    markus_backends = [{
        'class': 'markus.backends.logging.LoggingMetrics',
        'options': {
            'logger_name': 'markus',
            'leader': 'ELMO_METRICS',
        }
    }]
    if hasattr(settings, 'DATADOG_NAMESPACE'):
        markus_backends.append({
            'class': 'markus.backends.datadog.DatadogMetrics',
            'options': {
                'statsd_namespace': settings.DATADOG_NAMESPACE
            }
        })
    markus.configure(markus_backends)
    logging.getLogger('markus').setLevel(logging.INFO)

    class PushPoller(object):
        '''PushPoller stores the state of our coopertive iterator.

        The actual worker is poll().
        '''

        debug = False

        def __init__(self, opts):
            self.limit = int(opts.get('limit', 200))
            self.timeout = 10
            self.repos = []
            self.cache = {}
            self.moredata = {}
            self.latest_push = {}
            self.start_cycle = None
            self.sentry = None
            if hasattr(settings, 'RAVEN_CONFIG'):
                from raven import Client
                self.sentry = Client(**settings.RAVEN_CONFIG)
            pass

        def wait_for_empty_queue(self):
            connection = Connection(settings.TRANSPORT)
            d = defer.Deferred()

            def check():
                log.msg('Startup: Checking for empty queues')
                total_count = 0
                for queue in hg_queues:
                    try:
                        count = (queue.bind(connection).queue_declare(
                            passive=True).message_count)
                        log.msg('Queue ({}) size: {}'.format(
                            queue.name, count))
                    except Exception as e:
                        log.msg('Queue ({}) failed: {}'.format(
                            queue.name, repr(e)))
                        count = 1  # bump
                    total_count += count
                if total_count > 0:
                    log.msg('Not empty, waiting a sec')
                    reactor.callLater(1, check)
                else:
                    log.msg('All queues empty, starting to poll')
                    d.callback(None)

            reactor.callLater(0, check)
            return d

        def getURL(self, repo, limit):
            # If we haven't seen this repo yet
            # only store last-known-push if there
            # are existing pushes.
            # Otherwise, let's keep this unset
            # and use that to trigger the initial clone
            # after loading the initial json-pushes.
            # That will then set latest_push to something,
            # maybe even just 0 for an empty/unpushed repo.
            if repo.id not in self.latest_push:
                lkp = repo.last_known_push()
                if lkp:
                    self.latest_push[repo.id] = lkp
            else:
                lkp = self.latest_push[repo.id]
            return '%sjson-pushes?startID=%d&endID=%d' % \
                (repo.url, lkp, lkp + limit)

        def handlePushes(self, repo_id, submits):
            connection = Connection(settings.TRANSPORT)
            msg = {
                'type': 'hg-push',
                'repository_id': repo_id,
                'pushes': submits
            }
            try:
                log.msg("Going to connect to rmq")
                with producers[connection].acquire(block=True,
                                                   timeout=1) as producer:
                    log.msg("Connection acquired")
                    exchange = hg_exchange
                    if not exchange.is_bound:
                        exchange = exchange.bind(producer.channel)
                    maybe_declare(exchange, retry=True)
                    log.msg("Declared to rmq")
                    producer.publish(msg,
                                     exchange=exchange,
                                     retry=True,
                                     routing_key='hg')
                    log.msg("Published to rmq")
            except KeyboardInterrupt:
                raise
            except Exception:
                if self.sentry:
                    self.sentry.captureException()
                raise

        def poll(self):
            '''poll iterates over the repos and updates the local database.

            The actual updates are done in processPushes.

            The iterator stores the latest json from the upstream repos,
            and submits those on the next round. That way, we can add
            all pushes in that timewindow in order, which helps when
            actually polling the db to get the changes in
            chronological order.

            For repos that have more pushes than the current limit, we
            poll them again immediately to get further data.

            This iterator doesn't terminate, but gets killed together
            with the service.
            '''
            yield self.wait_for_empty_queue()
            # get our last-known push IDs for all non-archived repos
            self.latest_push.update(
                Repository.objects.filter(
                    archived=False, push__isnull=False).annotate(
                        last_push=Max('push__push_id')).values_list(
                            'id', 'last_push'))
            while True:
                n = datetime.utcnow()
                if self.start_cycle is not None:
                    lag = n - self.start_cycle
                    log.msg("Cycle took %d seconds" % lag.seconds)
                    metrics.gauge('cycle_time',
                                  lag.seconds * 1000 + lag.microseconds / 1000)
                self.start_cycle = n
                db_connection.close()
                repos = list(
                    Repository.objects.filter(forest__isnull=True,
                                              archived=False))
                nonarchived_forests = (Forest.objects.filter(archived=False))
                self.forests = pushback_iter(nonarchived_forests)
                for forest in self.forests:
                    url = str(forest.url + '?style=json')
                    d = getPage(url, timeout=self.timeout)
                    d.addCallback(self.gotForest, forest, repos)
                    d.addErrback(self.failedForest, forest)
                    yield d
                self.repos = pushback_iter(repos)
                for repo in self.repos:
                    d = None
                    if repo.id in self.cache:
                        pushes = self.cache.pop(repo.id)
                        self.processPushes(pushes, repo)
                        if pushes:
                            if self.debug:
                                log.msg(
                                    "Still have %s left for %s" %
                                    (", ".join(map(str, pushes)), repo.name))
                            self.cache[repo.id] = pushes
                            d = defer.succeed(None)
                    if d is None:
                        jsonurl = self.getURL(repo, self.limit)
                        if self.debug:
                            log.msg(jsonurl)
                        d = getPage(str(jsonurl), timeout=self.timeout)
                        d.addCallback(self.loadJSON, repo)
                        d.addErrback(self.jsonErr, repo)
                    yield d

        def loadJSON(self, page, repo):
            pushes = json.loads(page)
            if not pushes and repo.id in self.latest_push:
                # No new pushes to an existing repo
                return
            log.msg("%s got %d pushes" % (repo.name, len(pushes)))
            # convert pushes to sorted list
            if repo.id not in self.cache:
                self.cache[repo.id] = []
            # pushes maps string keys to pushes, we want to order by number
            push_blobs = [
                dict(list(pushes[id].items()) + [('id', int(id))])
                for id in six.iterkeys(pushes)
            ]
            push_blobs.sort(key=lambda blob: blob['id'])
            self.cache[repo.id] += push_blobs
            if push_blobs:
                self.latest_push[repo.id] = push_blobs[-1]['id']
            else:
                self.latest_push[repo.id] = 0
            # signal to load more data if this push hit the limits
            if len(pushes) == self.limit:
                self.moredata[repo.id] = True

        def processPushes(self, pushes, repo):
            '''process the pushes for the given repository.

            This code also adds all pushes that are older than the
            newest push on this repo, in order. If the amount of pushes
            exceeds the limit, pushback the current repo to get more changes.
            If we're emptying another's repo push cache, re-poll???.
            '''
            if len(pushes) == self.limit:
                self.repos.pushback(repo)
            if self.debug:
                log.msg("submitting %s to %s" %
                        (', '.join(map(str, pushes)), repo.name))

            tips = sorted(((id, p[0]['date'])
                           for id, p in six.iteritems(self.cache) if p),
                          key=lambda t: t[1])
            if not pushes:
                # This is a new repository, let's notify the hg worker
                # to get us a clone
                self.handlePushes(repo.id, [])
            while pushes:
                if tips and pushes[0]['date'] > tips[0][1]:
                    # other repos come first, get them done
                    other = self.cache[tips[0][0]]
                    if len(tips) > 1:
                        stopdate = min(pushes[0]['date'],
                                       self.cache[tips[1][0]][0]['date'])
                    else:
                        stopdate = pushes[0]['date']
                    i = 0
                    while i < len(other) and \
                            other[i]['date'] <= stopdate:
                        i += 1
                    submits = other[:i]
                    if self.debug:
                        log.msg("pushing %s to %d" %
                                (", ".join(map(str, submits)), tips[0][0]))
                    self.handlePushes(tips[0][0], submits)
                    del other[:i]
                    if not other:
                        # other repo is empty
                        self.cache.pop(tips[0][0])
                        if self.debug:
                            log.msg(('Repo id %d fully pushed as part of '
                                     'other repo') % tips[0][0])
                        # let's see if we need to load more
                        if tips[0][0] in self.moredata:
                            self.moredata.pop(tips[0][0])
                            other_repo = Repository.objects.get(id=tips[0][0])
                            self.repos.pushback(other_repo)
                            return
                    tips = sorted(
                        ((id, p[0]['date'])
                         for id, p in six.iteritems(self.cache) if p),
                        key=lambda t: t[1])
                else:
                    i = 0
                    if tips:
                        stopdate = self.cache[tips[0][0]][0]['date']
                        while i < len(pushes) and \
                                pushes[i]['date'] <= stopdate:
                            i += 1
                    else:
                        i = len(pushes)
                    submits = pushes[:i]
                    if self.debug:
                        log.msg("pushing %s to %d" %
                                (", ".join(map(str, submits)), repo.id))
                    self.handlePushes(repo.id, submits)
                    del pushes[:i]

        def gotForest(self, page, forest, repos):
            entries = json.loads(page)['entries']
            locale_codes = [entry['name'] for entry in entries]
            q = forest.repositories.filter(locale__code__in=locale_codes)
            repos += list(q.filter(archived=False))
            known_locales = set(q.values_list('locale__code', flat=True))
            new_locales = set(locale_codes) - known_locales
            for locale in new_locales:
                locale, _ = Locale.objects.get_or_create(code=locale)
                name = forest.name + u'/' + locale.code
                url = u'%s%s/' % (forest.url, locale.code)
                if self.debug:
                    log.msg(u"adding %s: %s" % (name, url))
                # Forests are holding l10n repos, set locale
                r = Repository.objects.create(name=name,
                                              url=url,
                                              locale=locale,
                                              forest=forest)
                repos.append(r)
            # if there's a repo in the forest that's removed upstream,
            # it should be archived
            cnt = (forest.repositories.exclude(
                locale__code__in=locale_codes).exclude(archived=True).update(
                    archived=True))
            if cnt:
                log.msg(u"Archived %s repos in %s" % (cnt, forest.name))

        def failedForest(self, failure, forest):
            if failure.check(task.SchedulerStopped):
                failure.raiseException()
            log.err(failure, "failed to load %s" % forest.name)
            self.forests.pushback(forest)
            return self.backoff()

        def jsonErr(self, failure, repo):
            if failure.check(task.SchedulerStopped):
                failure.raiseException()
            if failure.check(WebError) and failure.value.status == '404':
                repo.archived = True
                repo.save()
                log.msg('Archived %s, it is not available upstream no more' %
                        repo.name)
                return
            log.err(failure,
                    "failed to load json for %s, adding back" % repo.name)
            self.repos.pushback(repo)
            return self.backoff()

        def backoff(self):
            # back off a little
            d = defer.Deferred()
            reactor.callLater(5, lambda: d.callback(None))
            return d

    pp = PushPoller(options)
    return pp.poll()
コード例 #28
0
ファイル: __init__.py プロジェクト: mozilla/elmo
    CACHES['default']['LOCATION'] = os.environ['ELMO_MEMCACHED']

AUTHENTICATION_BACKENDS = []
if OIDC_DISABLE:
    # enable local users and passwords
    AUTHENTICATION_BACKENDS.append('django.contrib.auth.backends.ModelBackend')
else:
    # Add 'mozilla_django_oidc' authentication backend
    AUTHENTICATION_BACKENDS.append('lib.auth.backends.ElmoOIDCBackend')

# hook up markus to datadog, if set
if ('ELMO_DATADOG_NAMESPACE' in os.environ
        and os.environ['ELMO_DATADOG_NAMESPACE']):
    markus.configure(backends=[{
        'class': 'markus.backends.datadog.DatadogMetrics',
        'options': {
            'statsd_namespace': os.environ['ELMO_DATADOG_NAMESPACE']
        }
    }])

# create ES_KWARGS, if needed
if 'ES_COMPARE_HOST' in globals():
    import certifi
    ES_KWARGS = {
        'hosts': [globals()['ES_COMPARE_HOST']],
        'verify_certs': True,
        'ca_certs': certifi.where(),
    }

# generic django settings, good for DEBUG etc
boolmapper = {
    'true': True,
コード例 #29
0
def configure_metrics():
    markus.configure(
        backends=get_markus_options()
    )
コード例 #30
0
        'rest_framework.renderers.BrowsableAPIRenderer',
        'rest_framework.renderers.JSONRenderer',
    )
else:
    DRF_RENDERERS = ('rest_framework.renderers.JSONRenderer', )

REST_FRAMEWORK = {
    'DEFAULT_AUTHENTICATION_CLASSES': [
        'rest_framework.authentication.TokenAuthentication',
        'rest_framework.authentication.SessionAuthentication',
    ],
    'DEFAULT_PERMISSION_CLASSES':
    ['rest_framework.permissions.IsAuthenticated'],
    'DEFAULT_RENDERER_CLASSES':
    DRF_RENDERERS,
}

sentry_sdk.init(dsn=config('SENTRY_DSN', None),
                integrations=[DjangoIntegration()],
                debug=DEBUG,
                with_locals=DEBUG)

markus.configure(backends=[{
    'class': 'markus.backends.datadog.DatadogMetrics',
    'options': {
        'statsd_host': STATSD_HOST,
        'statsd_port': STATSD_PORT,
        'statsd_prefix': STATSD_PREFIX,
    }
}])
コード例 #31
0
def configure_plugin(app):  # noqa: C901
    """
    This is a factory function that configures all the routes for
    flask given a particular library.
    """

    markus.configure(backends=[{
        # Log metrics to local instance of statsd
        # server. Use DatadogMetrics client
        "class": "markus.backends.datadog.DatadogMetrics",
        "options": {
            "statsd_host": AppSettings.STATSD_HOST,
            "statsd_port": AppSettings.STATSD_PORT,
            "statsd_namespace": "",
        },
    }])

    @app.route("/taarlite/api/v1/addon_recommendations/<string:guid>/")
    def taarlite_recommendations(guid):
        """Return a list of recommendations provided a telemetry client_id."""
        # Use the module global PROXY_MANAGER
        global PROXY_MANAGER
        taarlite_recommender = acquire_taarlite_singleton(PROXY_MANAGER)

        cdict = {"guid": guid}
        normalization_type = request.args.get("normalize", None)
        if normalization_type is not None:
            cdict["normalize"] = normalization_type

        def set_extra(record):
            record.url = request.path
            record.guid = guid

        with ContextFilter(taarlite_recommender.logger, set_extra):
            recommendations = taarlite_recommender.recommend(
                client_data=cdict, limit=AppSettings.TAARLITE_MAX_RESULTS)

        if len(recommendations) != AppSettings.TAARLITE_MAX_RESULTS:
            recommendations = []

        # Strip out weights from TAAR results to maintain compatibility
        # with TAAR 1.0
        jdata = {"results": [x[0] for x in recommendations]}

        response = app.response_class(response=json.dumps(jdata),
                                      status=200,
                                      mimetype="application/json")
        return response

    @app.route(
        "/v1/api/client_has_addon/<hashed_client_id>/<addon_id>/",
        methods=["GET"],
    )
    def client_has_addon(hashed_client_id, addon_id):
        # Use the module global PROXY_MANAGER
        global PROXY_MANAGER
        recommendation_manager = acquire_taar_singleton(PROXY_MANAGER)
        pf = recommendation_manager._ctx["profile_fetcher"]

        client_meta = pf.get(hashed_client_id)
        if client_meta is None:
            # no valid client metadata was found for the given
            # clientId
            result = {"results": False, "error": "No client found"}
            response = app.response_class(
                response=json.dumps(result),
                status=200,
                mimetype="application/json",
            )
            return response

        result = {
            "results": addon_id in client_meta.get("installed_addons", [])
        }
        response = app.response_class(response=json.dumps(result),
                                      status=200,
                                      mimetype="application/json")
        return response

    @app.route("/v1/api/recommendations/<hashed_client_id>/",
               methods=["GET", "POST"])
    def recommendations(hashed_client_id):
        """Return a list of recommendations provided a telemetry client_id."""
        # Use the module global PROXY_MANAGER
        global PROXY_MANAGER

        extra_data = {}
        extra_data["options"] = {}
        extra_data["options"]["promoted"] = []

        try:
            if request.method == "POST":
                json_data = request.data
                # At least Python3.5 returns request.data as bytes
                # type instead of a string type.
                # Both Python2.7 and Python3.7 return a string type
                if type(json_data) == bytes:
                    json_data = json_data.decode("utf8")

                if json_data != "":
                    post_data = json.loads(json_data)
                    raw_promoted_guids = post_data.get("options",
                                                       {}).get("promoted", [])
                    promoted_guids = clean_promoted_guids(raw_promoted_guids)
                    extra_data["options"]["promoted"] = promoted_guids

        except Exception as e:
            jdata = {}
            jdata["results"] = []
            jdata["error"] = "Invalid JSON in POST: {}".format(e)
            capture_exception(e)
            return app.response_class(response=json.dumps(
                jdata, status=400, mimetype="application/json"))

        # Coerce the uuid.UUID type into a string
        client_id = str(hashed_client_id)

        locale = request.args.get("locale", None)
        if locale is not None:
            extra_data["locale"] = locale

        platform = request.args.get("platform", None)
        if platform is not None:
            extra_data["platform"] = platform

        recommendation_manager = acquire_taar_singleton(PROXY_MANAGER)

        def set_extra(record):
            record.url = request.path
            if locale:
                record.locale = locale
            if platform:
                record.platform = platform
            record.client_id = client_id
            record.method = request.method

        with ContextFilter(recommendation_manager.logger, set_extra):
            recommendations = recommendation_manager.recommend(
                client_id=client_id,
                limit=AppSettings.TAAR_MAX_RESULTS,
                extra_data=extra_data)

        promoted_guids = extra_data.get("options", {}).get("promoted", [])
        recommendations = merge_promoted_guids(promoted_guids, recommendations)

        # Strip out weights from TAAR results to maintain compatibility
        # with TAAR 1.0
        jdata = {"results": [x[0] for x in recommendations]}

        response = app.response_class(response=json.dumps(jdata),
                                      status=200,
                                      mimetype="application/json")
        return response

    class MyPlugin:
        def set(self, config_options):
            """
            This setter is primarily so that we can instrument the
            cached RecommendationManager implementation under test.

            All plugins should implement this set method to enable
            overwriting configuration options with a TAAR library.
            """
            global PROXY_MANAGER
            if "PROXY_RESOURCE" in config_options:
                PROXY_MANAGER._resource = config_options["PROXY_RESOURCE"]

    return MyPlugin()