コード例 #1
0
ファイル: conftest.py プロジェクト: clarammdantas/services
def mock_stats(mock_config):
    '''
    Mock Datadog authentication and stats management
    '''
    from shipit_static_analysis import stats

    # Configure Datadog with a dummy token
    # and an ultra fast flushing cycle
    stats.auth('test_token')
    stats.api.stop()
    stats.api.start(flush_interval=0.001)
    assert not stats.api._disabled
    assert stats.api._is_auto_flushing

    class MemoryReporter(object):
        '''
        A reporting class that reports to memory for testing.
        Used in datadog unit tests:
        https://github.com/DataDog/datadogpy/blob/master/tests/unit/threadstats/test_threadstats.py
        '''
        def __init__(self, api):
            self.metrics = []
            self.events = []
            self.api = api

        def flush_metrics(self, metrics):
            self.metrics += metrics

        def flush_events(self, events):
            self.events += events

        def flush(self):
            # Helper for unit tests to force flush
            self.api.flush(time.time() + 20)

        def get_metrics(self, metric_name):
            return list(
                itertools.chain(*[[[t, point * m['interval']]
                                   for t, point in m['points']]
                                  for m in self.metrics
                                  if m['metric'] == metric_name]))

    # Gives reporter access to unit tests to access metrics
    stats.api.reporter = MemoryReporter(stats.api)
    yield stats.api.reporter
コード例 #2
0
def main(
    phabricator,
    mozreview,
    cache_root,
    taskcluster_secret,
    taskcluster_client_id,
    taskcluster_access_token,
):

    assert (phabricator is None) ^ (mozreview is None), \
        'Specify a phabricator XOR mozreview parameters'

    secrets = get_secrets(
        taskcluster_secret,
        config.PROJECT_NAME,
        required=(
            'APP_CHANNEL',
            'REPORTERS',
            'ANALYZERS',
        ),
        existing={
            'APP_CHANNEL': 'development',
            'REPORTERS': [],
            'ANALYZERS': [
                'clang-tidy',
            ],
        },
        taskcluster_client_id=taskcluster_client_id,
        taskcluster_access_token=taskcluster_access_token,
    )

    init_logger(
        config.PROJECT_NAME,
        PAPERTRAIL_HOST=secrets.get('PAPERTRAIL_HOST'),
        PAPERTRAIL_PORT=secrets.get('PAPERTRAIL_PORT'),
        SENTRY_DSN=secrets.get('SENTRY_DSN'),
        MOZDEF=secrets.get('MOZDEF'),
    )

    # Setup settings before stats
    settings.setup(secrets['APP_CHANNEL'])

    # Setup statistics
    datadog_api_key = secrets.get('DATADOG_API_KEY')
    if datadog_api_key:
        stats.auth(datadog_api_key)

    # Load reporters
    reporters = get_reporters(
        secrets['REPORTERS'],
        taskcluster_client_id,
        taskcluster_access_token,
    )

    # Load revisions
    revisions = []
    if phabricator:
        # Only one phabricator revision at a time
        api = reporters.get('phabricator')
        assert api is not None, \
            'Cannot use a phabricator revision without a phabricator reporter'
        revisions.append(PhabricatorRevision(phabricator, api))
    if mozreview:
        # Multiple mozreview revisions are possible
        revisions += [MozReviewRevision(r) for r in mozreview.split(' ')]

    w = Workflow(cache_root, reporters, secrets['ANALYZERS'])
    for revision in revisions:
        try:
            w.run(revision)
        except Exception as e:
            # Log errors to papertrail
            logger.error(
                'Static analysis failure',
                revision=revision,
                error=e,
            )