Пример #1
0
def tocsv(filename,
          channel,
          versions=None,
          product='Firefox',
          start_date=None,
          end_date='today',
          duration=30,
          platforms=None):
    with open(filename, 'w') as Out:
        writer = csv.writer(Out, delimiter=',')
        data = get(channel, versions, product, start_date, end_date, duration,
                   platforms)
        data = [(utils.get_date_str(d), data[d]) for d in sorted(data)]
        head = [
            'date', 'adi', 'browser', 'content', 'b+c', 'plugin',
            'browser_rate', 'content_rate', 'b+c_rate', 'plugin_rate'
        ]
        writer.writerow(head)

        for d in data:
            row = [
                d[0], d[1]['adi'], d[1]['browser'], d[1]['content'],
                d[1]['b+c'], d[1]['plugin'], d[1]['browser_rate'],
                d[1]['content_rate'], d[1]['b+c_rate'], d[1]['plugin_rate']
            ]
            writer.writerow(row)
Пример #2
0
def tocsv(filename, channel, versions=None, product='Firefox', start_date=None, end_date='today', duration=30, platforms=None):
    with open(filename, 'w') as Out:
        writer = csv.writer(Out, delimiter=',')
        data = get(channel, versions, product, start_date, end_date, duration, platforms)
        data = [(utils.get_date_str(d), data[d]) for d in sorted(data)]
        head = ['date', 'adi', 'browser', 'content', 'b+c', 'plugin', 'browser_rate', 'content_rate', 'b+c_rate', 'plugin_rate']
        writer.writerow(head)

        for d in data:
            row = [d[0], d[1]['adi'], d[1]['browser'], d[1]['content'], d[1]['b+c'], d[1]['plugin'], d[1]['browser_rate'], d[1]['content_rate'], d[1]['b+c_rate'], d[1]['plugin_rate']]
            writer.writerow(row)
Пример #3
0
def get(channel, date, versions=None, product='Firefox', duration=1):
    """Get stability info

    Args:
        channel (str): the channel
        date (str): the final date
        versions (Optional[List[str]]): the versions to treat
        product (Optional[str]): the product
        duration (Optional[int]): the duration to retrieve the data

    Returns:
        dict: contains all the info relative to stability
    """
    channel = channel.lower()
    cycle = duration <= 0
    versions_info = socorro.ProductVersions.get_version_info(versions,
                                                             channel=channel,
                                                             product=product)

    versions = versions_info.keys()
    throttle = set(map(lambda p: p[1], versions_info.values()))
    diff_throttle = len(throttle) != 1
    # normally the throttle is 10% for release and 100% for others channel
    if not diff_throttle:
        throttle = throttle.pop()

    platforms = socorro.Platforms.get_cached_all()

    end_date_dt = utils.get_date_ymd(date)
    if cycle:
        # we get all the start date for each versions and get the min
        start_date_dt = min(
            map(lambda p: utils.get_date_ymd(p[0]), versions_info.values()))
        duration = (end_date_dt - start_date_dt).days + 1
    else:
        start_date_dt = end_date_dt - timedelta(duration - 1)

    start_date_str = utils.get_date_str(start_date_dt)
    end_date_str = utils.get_date_str(end_date_dt)

    # First, we get the ADI
    adi = socorro.ADI.get(version=versions,
                          product=product,
                          end_date=end_date_str,
                          duration=duration,
                          platforms=platforms)
    adi = [adi[key] for key in sorted(adi.keys(), reverse=False)]

    # Get the khours
    khours = Redash.get_khours(start_date_dt, end_date_dt, channel, versions,
                               product)
    khours = [khours[key] for key in sorted(khours.keys(), reverse=False)]

    # Get the # of crashes (crash pings)
    crash_pings = Redash.get_number_of_crash(start_date_dt, end_date_dt,
                                             channel, versions, product)

    crashes = {}
    stats = {'m+c': 0., 'main': 0., 'content': 0., 'plugin': 0., 'all': 0.}
    for i in range(duration):
        d = end_date_dt - timedelta(i)
        crashes[d] = {}
        crashes[d]['socorro'] = {
            'global': stats.copy(),
            'startup': stats.copy()
        }
        crashes[d]['telemetry'] = crash_pings[d]

    base = {
        'product': product,
        'version': None,
        'date': socorro.SuperSearch.get_search_date(start_date_str,
                                                    end_date_str),
        'release_channel': channel,
        '_results_number': 1,
        '_histogram.date': ['product', 'process_type'],
        '_facets_size': 3
    }

    if diff_throttle:
        # in this case each version could have a different throttle so we need to compute stats for each version
        queries = []
        for v, t in versions_info.items():
            cparams = base.copy()
            cparams['version'] = v
            queries.append(
                Query(socorro.SuperSearch.URL, cparams,
                      functools.partial(__crash_handler, t[1]), crashes))
            cparams = copy.deepcopy(cparams)
            cparams['uptime'] = '<60'
            cparams['_histogram.date'].append('uptime')
            queries.append(
                Query(socorro.SuperSearch.URL, cparams,
                      functools.partial(__crash_handler, t[1]), crashes))
    else:
        base['version'] = versions
        queries = []
        queries.append(
            Query(socorro.SuperSearch.URL, base,
                  functools.partial(__crash_handler, throttle), crashes))
        cparams = copy.deepcopy(base)
        cparams['uptime'] = '<60'
        cparams['_histogram.date'].append('uptime')
        queries.append(
            Query(socorro.SuperSearch.URL, cparams,
                  functools.partial(__crash_handler, throttle), crashes))

    socorro.SuperSearch(queries=queries).wait()
    crashes = [crashes[key] for key in sorted(crashes.keys(), reverse=False)]

    # Now we compute the rates and the averages
    stats = {
        'm+c': [0., 0., 0., 0.],
        'main': [0., 0., 0., 0.],
        'content': [0., 0., 0., 0.],
        'plugin': [0., 0., 0., 0.],
        'all': [0., 0., 0., 0.]
    }
    averages = {}
    averages['socorro'] = {'global': stats, 'startup': copy.deepcopy(stats)}
    averages['telemetry'] = copy.deepcopy(stats)
    N = len(adi)

    # sum
    for i in range(N):
        crash_soc = crashes[i]['socorro']
        for k1, v1 in averages['socorro'].items():
            for k2, av in v1.items():
                c = crash_soc[k1][k2]
                # the rate is computed for 100 adi
                x = utils.rate(100. * c, adi[i])
                av[0] += x
                av[1] += x**2
                y = utils.rate(c, khours[i])
                av[2] += y
                av[3] += y**2
                crash_soc[k1][k2] = (c, x, y)
        crash_tel = crashes[i]['telemetry']
        for k1, av in averages['telemetry'].items():
            c = crash_tel[k1]
            # the rate is computed for 100 adi
            x = utils.rate(100. * c, adi[i])
            av[0] += x
            av[1] += x**2
            y = utils.rate(c, khours[i])
            av[2] += y
            av[3] += y**2
            crash_tel[k1] = (c, x, y)

    N = float(N)
    averages_old = {'socorro': {}, 'telemetry': {}}
    averages_new = copy.deepcopy(averages_old)

    # mean & standard deviation
    av_new_soc = averages_new['socorro']
    av_old_soc = averages_old['socorro']
    for k1, v1 in averages['socorro'].items():
        d1 = {}
        av_old_soc[k1] = d1
        d2 = {}
        av_new_soc[k1] = d2
        for k2, av in v1.items():
            m = av[0] / N
            d1[k2] = (m, math.sqrt(av[1] / N - m**2))
            m = av[2] / N
            d2[k2] = (m, math.sqrt(av[3] / N - m**2))

    av_new_tel = averages_new['telemetry']
    av_old_tel = averages_old['telemetry']
    for k1, av in averages['telemetry'].items():
        m = av[0] / N
        av_old_tel[k1] = (m, math.sqrt(av[1] / N - m**2))
        m = av[2] / N
        av_new_tel[k1] = (m, math.sqrt(av[3] / N - m**2))

    return {
        'start_date': start_date_str,
        'end_date': end_date_str,
        'versions': versions,
        'adi': adi,
        'khours': khours,
        'crashes': crashes,
        'averages_old': averages_old,
        'averages_new': averages_new
    }
Пример #4
0
 def test_get_date_str(self):
     date = '1991-04-16'
     self.assertEqual(utils.get_date_str(datetime.datetime.strptime(date, '%Y-%m-%d')), date)
Пример #5
0
def get(channel, date, product='Firefox', duration=11, tc_limit=50, crash_type='all', startup=False):
    """Get crashes info

    Args:
        channel (str): the channel
        date (str): the final date
        product (Optional[str]): the product
        duration (Optional[int]): the duration to retrieve the data
        tc_limit (Optional[int]): the number of topcrashes to load
        crash_type (Optional[str]): 'all' (default) or 'browser' or 'content' or 'plugin'

    Returns:
        dict: contains all the info relative to the crashes
    """
    channel = channel.lower()
    version = v[channel]
    versions_info = socorro.ProductVersions.get_version_info(version, channel=channel, product=product)
    versions = versions_info.keys()
    platforms = socorro.Platforms.get_cached_all()

    if crash_type and isinstance(crash_type, six.string_types):
        crash_type = [crash_type]

    throttle = set(map(lambda p: p[1], versions_info.values()))
    if len(throttle) == 1:
        throttle = throttle.pop()
    else:
        return

    _date = utils.get_date_ymd(date)
    start_date = utils.get_date_str(_date - timedelta(duration - 1))
    end_date = utils.get_date_str(_date)

    # First, we get the ADI
    adi = socorro.ADI.get(version=versions, product=product, end_date=end_date, duration=duration, platforms=platforms)
    adi = [adi[key] for key in sorted(adi.keys(), reverse=True)]

    # get the khours
    khours = Redash.get_khours(utils.get_date_ymd(start_date), utils.get_date_ymd(end_date), channel, versions, product)
    khours = [khours[key] for key in sorted(khours.keys(), reverse=True)]

    overall_crashes_by_day = []
    signatures = {}

    def signature_handler(json):
        for signature in json['facets']['signature']:
            signatures[signature['term']] = [signature['count'], 0, 0, 0, 0]

            for platform in signature['facets']['platform']:
                if platform['term'] == 'Linux':
                    signatures[signature['term']][3] = platform['count']
                elif platform['term'] == 'Windows NT':
                    signatures[signature['term']][1] = platform['count']
                elif platform['term'] == 'Mac OS X':
                    signatures[signature['term']][2] = platform['count']

            for uptime in signature['facets']['uptime']:
                if int(uptime['term']) < 60:
                    signatures[signature['term']][4] += uptime['count']

        for facets in json['facets']['histogram_date']:
            overall_crashes_by_day.insert(0, facets['count'])

    params = {
        'product': product,
        'version': versions,
        'date': socorro.SuperSearch.get_search_date(start_date, end_date),
        'release_channel': channel,
        '_aggs.signature': ['platform', 'uptime'],
        '_results_number': 0,
        '_facets_size': tc_limit,
        '_histogram.date': ['product'],
        '_histogram_interval': 1
    }

    if startup:
        params['uptime'] = '<=60'

    socorro.SuperSearch(params=params, handler=signature_handler).wait()

    bug_flags = ['resolution', 'id', 'last_change_time', 'cf_tracking_firefox' + str(version)]
    for i in range(int(version), int(v['nightly']) + 1):
        bug_flags.append('cf_status_firefox' + str(i))

    # TODO: too many requests... should be improved with chunks
    bugs = {}
    # TODO: Use regexp, when the Bugzilla bug that prevents them from working will be fixed.
    base = {
        'j_top': 'OR',
        'o1': 'substring',
        'f1': 'cf_crash_signature',
        'v1': None,
        'o2': 'substring',
        'f2': 'cf_crash_signature',
        'v2': None,
        'o3': 'substring',
        'f3': 'cf_crash_signature',
        'v3': None,
        'o4': 'substring',
        'f4': 'cf_crash_signature',
        'v4': None,
        'include_fields': bug_flags
    }

    queries = []
    for sgn in signatures.keys():
        cparams = base.copy()
        cparams['v1'] = '[@' + sgn + ']'
        cparams['v2'] = '[@ ' + sgn + ' ]'
        cparams['v3'] = '[@ ' + sgn + ']'
        cparams['v4'] = '[@' + sgn + ' ]'
        bugs[sgn] = []
        queries.append(Query(Bugzilla.API_URL, cparams, __bug_handler, bugs[sgn]))
    res_bugs = Bugzilla(queries=queries)

    # we have stats by signature in self.signatures
    # for each signature get the number of crashes on the last X days
    # so get the signature trend
    trends = {}
    default_trend = {}
    for i in range(duration):
        default_trend[_date - timedelta(i)] = 0

    base = {'product': product,
            'version': versions,
            'signature': None,
            'date': socorro.SuperSearch.get_search_date(start_date, end_date),
            'release_channel': channel,
            '_results_number': 0,
            '_histogram.date': ['signature'],
            '_histogram_interval': 1}

    queries = []
    for sgns in Connection.chunks(list(map(lambda sgn: '=' + sgn, signatures.keys())), 10):
        sgn_group = []
        for sgn in sgns:
            if sum(len(s) for s in sgn_group) >= 1000:
                cparams = base.copy()
                cparams['signature'] = sgn_group
                queries.append(Query(socorro.SuperSearch.URL, cparams, functools.partial(__trend_handler, default_trend), trends))
                sgn_group = []

            sgn_group.append(sgn)

        if len(sgn_group) > 0:
            cparams = base.copy()
            cparams['signature'] = sgn_group
            queries.append(Query(socorro.SuperSearch.URL, cparams, functools.partial(__trend_handler, default_trend), trends))

    socorro.SuperSearch(queries=queries).wait()

    for sgn, trend in trends.items():
        signatures[sgn] = (signatures[sgn], [trend[key] for key in sorted(trend.keys(), reverse=True)])

    _signatures = {}
    # order self.signatures by crash count
    l = sorted(signatures.items(), key=lambda x: x[1][0][0], reverse=True)
    i = 1
    for s in l:
        _signatures[s[0]] = i  # top crash rank
        i += 1

    res_bugs.wait()

    # TODO: In the first query to get the bugs, also get dupe_of and avoid the first query
    #       in follow_dup (so modify follow_dup to accept both a bug ID or a bug object).
    queries = []
    for sgn in signatures.keys():
        duplicate_ids = [bug['id'] for bug in bugs[sgn] if bug['resolution'] == 'DUPLICATE']

        # Remove bugs resolved as DUPLICATE from the list of bugs associated to the signature.
        bugs[sgn] = [bug for bug in bugs[sgn] if bug['id'] not in duplicate_ids]

        # Find duplicates for bugs resolved as DUPLICATE.
        duplicates = {k: v for k, v in Bugzilla.follow_dup(duplicate_ids).items() if v is not None}
        duplicate_targets = [bug_id for bug_id in duplicates.values() if int(bug_id) not in [bug['id'] for bug in bugs[sgn]]]
        if len(duplicate_targets) == 0:
            continue

        # Get info about bugs that the DUPLICATE bugs have been duped to.
        params = {
            'id': ','.join(duplicate_targets),
            'include_fields': bug_flags,
        }
        queries.append(Query(Bugzilla.API_URL, params, __bug_handler, bugs[sgn]))
    Bugzilla(queries=queries).wait()

    for sgn, stats in signatures.items():
        # stats is 2-uple: ([count, win_count, mac_count, linux_count, startup_count], trend)
        startup_percent = float(stats[0][4]) / float(stats[0][0])
        _signatures[sgn] = {'tc_rank': _signatures[sgn],
                            'crash_count': stats[0][0],
                            'startup_percent': startup_percent,
                            'crash_by_day': stats[1],
                            'bugs': bugs[sgn]}

    return {
        'start_date': start_date,
        'end_date': end_date,
        'versions': list(versions),
        'adi': adi,
        'khours': khours,
        'crash_by_day': overall_crashes_by_day,
        'signatures': _signatures,
        'throttle': float(throttle)
    }
Пример #6
0
def get(channel, date, versions=None, product='Firefox', duration=1):
    """Get stability info

    Args:
        channel (str): the channel
        date (str): the final date
        versions (Optional[List[str]]): the versions to treat
        product (Optional[str]): the product
        duration (Optional[int]): the duration to retrieve the data

    Returns:
        dict: contains all the info relative to stability
    """
    channel = channel.lower()
    cycle = duration <= 0
    versions_info = socorro.ProductVersions.get_version_info(versions, channel=channel, product=product)

    versions = versions_info.keys()
    throttle = set(map(lambda p: p[1], versions_info.values()))
    diff_throttle = len(throttle) != 1
    # normally the throttle is 10% for release and 100% for others channel
    if not diff_throttle:
        throttle = throttle.pop()

    platforms = socorro.Platforms.get_cached_all()

    end_date_dt = utils.get_date_ymd(date)
    if cycle:
        # we get all the start date for each versions and get the min
        start_date_dt = min(map(lambda p: utils.get_date_ymd(p[0]), versions_info.values()))
        duration = (end_date_dt - start_date_dt).days + 1
    else:
        start_date_dt = end_date_dt - timedelta(duration - 1)

    start_date_str = utils.get_date_str(start_date_dt)
    end_date_str = utils.get_date_str(end_date_dt)

    # First, we get the ADI
    adi = socorro.ADI.get(version=versions, product=product, end_date=end_date_str, duration=duration, platforms=platforms)
    adi = [adi[key] for key in sorted(adi.keys(), reverse=False)]

    # Get the khours
    khours = Redash.get_khours(start_date_dt, end_date_dt, channel, versions, product)
    khours = [khours[key] for key in sorted(khours.keys(), reverse=False)]

    # Get the # of crashes (crash pings)
    crash_pings = Redash.get_number_of_crash(start_date_dt, end_date_dt, channel, versions, product)

    crashes = {}
    stats = {'m+c': 0.,
             'main': 0.,
             'content': 0.,
             'plugin': 0.,
             'all': 0.}
    for i in range(duration):
        d = end_date_dt - timedelta(i)
        crashes[d] = {}
        crashes[d]['socorro'] = {'global': stats.copy(), 'startup': stats.copy()}
        crashes[d]['telemetry'] = crash_pings[d]

    base = {'product': product,
            'version': None,
            'date': socorro.SuperSearch.get_search_date(start_date_str, end_date_str),
            'release_channel': channel,
            '_results_number': 1,
            '_histogram.date': ['product', 'process_type'],
            '_facets_size': 3}

    if diff_throttle:
        # in this case each version could have a different throttle so we need to compute stats for each version
        queries = []
        for v, t in versions_info.items():
            cparams = base.copy()
            cparams['version'] = v
            queries.append(Query(socorro.SuperSearch.URL, cparams, functools.partial(__crash_handler, t[1]), crashes))
            cparams = copy.deepcopy(cparams)
            cparams['uptime'] = '<60'
            cparams['_histogram.date'].append('uptime')
            queries.append(Query(socorro.SuperSearch.URL, cparams, functools.partial(__crash_handler, t[1]), crashes))
    else:
        base['version'] = versions
        queries = []
        queries.append(Query(socorro.SuperSearch.URL, base, functools.partial(__crash_handler, throttle), crashes))
        cparams = copy.deepcopy(base)
        cparams['uptime'] = '<60'
        cparams['_histogram.date'].append('uptime')
        queries.append(Query(socorro.SuperSearch.URL, cparams, functools.partial(__crash_handler, throttle), crashes))

    socorro.SuperSearch(queries=queries).wait()
    crashes = [crashes[key] for key in sorted(crashes.keys(), reverse=False)]

    # Now we compute the rates and the averages
    stats = {'m+c': [0., 0., 0., 0.],
             'main': [0., 0., 0., 0.],
             'content': [0., 0., 0., 0.],
             'plugin': [0., 0., 0., 0.],
             'all': [0., 0., 0., 0.]}
    averages = {}
    averages['socorro'] = {'global': stats, 'startup': copy.deepcopy(stats)}
    averages['telemetry'] = copy.deepcopy(stats)
    N = len(adi)

    # sum
    for i in range(N):
        crash_soc = crashes[i]['socorro']
        for k1, v1 in averages['socorro'].items():
            for k2, av in v1.items():
                c = crash_soc[k1][k2]
                # the rate is computed for 100 adi
                x = utils.rate(100. * c, adi[i])
                av[0] += x
                av[1] += x ** 2
                y = utils.rate(c, khours[i])
                av[2] += y
                av[3] += y ** 2
                crash_soc[k1][k2] = (c, x, y)
        crash_tel = crashes[i]['telemetry']
        for k1, av in averages['telemetry'].items():
            c = crash_tel[k1]
            # the rate is computed for 100 adi
            x = utils.rate(100. * c, adi[i])
            av[0] += x
            av[1] += x ** 2
            y = utils.rate(c, khours[i])
            av[2] += y
            av[3] += y ** 2
            crash_tel[k1] = (c, x, y)

    N = float(N)
    averages_old = {'socorro': {}, 'telemetry': {}}
    averages_new = copy.deepcopy(averages_old)

    # mean & standard deviation
    av_new_soc = averages_new['socorro']
    av_old_soc = averages_old['socorro']
    for k1, v1 in averages['socorro'].items():
        d1 = {}
        av_old_soc[k1] = d1
        d2 = {}
        av_new_soc[k1] = d2
        for k2, av in v1.items():
            m = av[0] / N
            d1[k2] = (m, math.sqrt(av[1] / N - m ** 2))
            m = av[2] / N
            d2[k2] = (m, math.sqrt(av[3] / N - m ** 2))

    av_new_tel = averages_new['telemetry']
    av_old_tel = averages_old['telemetry']
    for k1, av in averages['telemetry'].items():
        m = av[0] / N
        av_old_tel[k1] = (m, math.sqrt(av[1] / N - m ** 2))
        m = av[2] / N
        av_new_tel[k1] = (m, math.sqrt(av[3] / N - m ** 2))

    return {'start_date': start_date_str,
            'end_date': end_date_str,
            'versions': versions,
            'adi': adi,
            'khours': khours,
            'crashes': crashes,
            'averages_old': averages_old,
            'averages_new': averages_new}
Пример #7
0
def reformat_data(data):
    _data = {}
    for k, v in data.items():
        _data[utils.get_date_str(k)] = v
    return _data
Пример #8
0
 def test_get_date_str(self):
     date = '1991-04-16'
     self.assertEqual(
         utils.get_date_str(datetime.datetime.strptime(date, '%Y-%m-%d')),
         date)
Пример #9
0
def update_status_flags(info, update=False):
    status_flags_by_channel = info['status_flags']
    base_versions = info['base_versions']
    channel_order = {
        'nightly': 0,
        'aurora': 1,
        'beta': 2,
        'release': 3,
        'esr': 4
    }
    platform_order = {'Windows': 0, 'Mac OS X': 1, 'Linux': 2}
    start_date_by_channel = info['start_dates']

    for c, d in start_date_by_channel.items():
        start_date_by_channel[c] = utils.get_date_str(d)

    bugids = []
    default_volumes = {c: 0 for c in channel_order.keys()}

    for sgn, i in info['signatures'].items():
        if i['firefox']:
            volumes = default_volumes.copy()
            data = {}
            bugid = i['bugid']
            bugids.append(str(bugid))
            for channel, volume in i['affected']:
                data[status_flags_by_channel[channel]] = 'affected'
                volumes[channel] = volume
            for channel, volume in i['leftovers']:
                volumes[channel] = volume
            if volumes:
                comment = 'Crash volume for signature \'%s\':\n' % sgn
                table = []
                for p in sorted(volumes.items(),
                                key=lambda k: channel_order[k[0]]):
                    affected_chan = p[0]
                    affected_version = base_versions[p[0]]
                    start_date = start_date_by_channel[p[0]]
                    volume = p[1]
                    plural = 'es' if volume > 1 else ''
                    table.append([
                        '- %s' % affected_chan,
                        '(version %d):' % affected_version,
                        '%d crash%s from %s.' % (volume, plural, start_date)
                    ])
                comment += __mk_volume_table(table)

                table = []
                empty = False
                N = -1
                for chan, trend in sorted(i['trend'].items(),
                                          key=lambda k: channel_order[k[0]]):
                    if len(trend) >= 1:
                        # we remove data for this week
                        del (trend[0])
                    if len(trend) >= 8:  # keep only the last seven weeks
                        trend = trend[:7]

                    if not trend:
                        empty = True
                        break

                    N = max(N, len(trend))
                    row = [str(n) for n in trend]
                    row.insert(0, '- %s' % chan)
                    table.append(row)

                if not empty:
                    comment += '\n\nCrash volume on the last weeks:\n'
                    headers = ['']
                    for w in range(1, N + 1):
                        headers.append('Week N-%d' % w)
                    comment += __mk_volume_table(table, headers=headers)

                platforms = i['platforms']
                if platforms:
                    comment += '\n\nAffected platform'
                    if len(platforms) >= 2:
                        comment += 's'
                        platforms = sorted(platforms,
                                           key=lambda k: platform_order[k])
                    comment += ': ' + ', '.join(platforms)
                print(comment)
                data['comment'] = {'body': comment}
            if update:
                Bugzilla([str(bugid)]).put(data)
                pprint((bugid, data))
            else:
                pprint((bugid, data))

    if update:
        links = '\n'.join(Bugzilla.get_links(bugids))
        print(links)
Пример #10
0
def get(product='Firefox',
        limit=1000,
        verbose=False,
        search_start_date='',
        signatures=[],
        bug_ids=[],
        max_bugs=-1):
    """Get crashes info

    Args:
        product (Optional[str]): the product
        limit (Optional[int]): the number of crashes to get from tcbs

    Returns:
        dict: contains all the info about how to update flags
    """
    p = product.lower()
    if p == 'firefox':
        product = 'Firefox'
    elif p == 'fennecandroid':
        product = 'FennecAndroid'

    channel = ['release', 'beta', 'aurora', 'nightly']
    if product == 'Firefox':
        channel.append('esr')

    base_versions = clouseau.versions.get(base=True)
    versions_by_channel = socorro.ProductVersions.get_info_from_major(
        base_versions, product=product)
    channel_by_version = {}
    all_versions = []
    start_date_by_channel = {}
    start_date = utils.get_date_ymd('today')
    for chan, versions in versions_by_channel.iteritems():
        start_date_by_channel[chan] = utils.get_date_ymd('tomorrow')
        for v in versions:
            channel_by_version[v['version']] = chan
            d = utils.get_date_ymd(v['start_date'])
            all_versions.append(v['version'])
            if d < start_date:
                start_date = d
            if d < start_date_by_channel[chan]:
                start_date_by_channel[chan] = d

    __warn('Versions: %s' % ', '.join(all_versions), verbose)
    __warn('Start dates: %s' % start_date_by_channel, verbose)

    end_date = utils.get_date('today')
    if search_start_date:
        search_date = socorro.SuperSearch.get_search_date(
            search_start_date, end_date)
    else:
        search_date = socorro.SuperSearch.get_search_date(
            utils.get_date_str(start_date), end_date)

    signatures = __get_signatures(limit, product, all_versions, channel,
                                  search_date, signatures, bug_ids, verbose)

    __warn('Collected signatures: %d' % len(signatures), verbose)

    # get the bugs for each signatures
    bugs_by_signature = socorro.Bugs.get_bugs(signatures.keys())

    # if we've some bugs in bug_ids then we must remove the other ones for a given signature
    if bug_ids:
        bids = set(bug_ids)
        for s, bugids in bugs_by_signature.items():
            inter = bids.intersection(bugids)
            if inter:
                bugs_by_signature[s] = inter

    __warn('Collected bugs in Socorro: Ok', verbose)

    # we remove dup bugs
    # for example if we've {1,2,3,4,5} and if 2 is a dup of 5 then the set will be reduced to {1,3,4,5}
    bugs = set()
    for v in bugs_by_signature.values():
        bugs = bugs.union(v)
    dups = Bugzilla.follow_dup(bugs, only_final=False)
    bugs_count = 0
    bugs.clear()
    for s, bugids in bugs_by_signature.items():
        _bugids = set(bugids)
        toremove = set()
        for bugid in bugids:
            chain = dups[str(bugid)]
            if chain:
                elems = []
                for e in chain:
                    e = int(e)
                    if e in _bugids:
                        elems.append(e)
                if elems:
                    elems[
                        -1] = bugid  # we remove the final and put the initial
                    toremove = toremove.union(elems)
        diff = _bugids - toremove
        bugs_by_signature[s] = list(diff)
        bugs_count += len(diff)
        bugs = bugs.union(diff)

    __warn('Remove duplicates: Ok', verbose)
    __warn('Bugs to analyze: %d' % bugs_count, verbose)

    # we filter the bugs to remove meaningless ones
    if not bug_ids:
        bugs = filter_bugs(bugs, product)

    # we get the "better" bug where to update the info
    bugs_history_info = __get_bugs_info(bugs)

    crashes_to_reopen = []
    bugs.clear()
    tomorrow = utils.get_date_ymd('tomorrow')
    for s, v in bugs_by_signature.items():
        info = signatures[s]
        if v:
            min_date = tomorrow
            for i in info['affected_channels']:
                if i[0] != 'esr':
                    d = start_date_by_channel[i[0]]
                    if d < min_date:
                        min_date = d

            bug_to_touch = get_last_bug(v, bugs_history_info, min_date)
            if not bug_to_touch:
                crashes_to_reopen.append(s)
        else:
            bug_to_touch = None

        info['selected_bug'] = bug_to_touch
        info['bugs'] = v
        if bug_to_touch:
            bugs.add(bug_to_touch)

    __warn('Collected last bugs: %d' % len(bugs), verbose)

    # get bug info
    include_fields = ['status', 'id', 'cf_crash_signature']
    status_flags = {}
    for c, v in base_versions.iteritems():
        v = str(v)
        if c != 'esr':
            f1 = 'cf_status_firefox' + v
        else:
            f1 = 'cf_status_firefox_esr' + v
        include_fields.append(f1)
        status_flags[c] = f1

    bug_info = {}

    def bug_handler(bug, data):
        data[str(bug['id'])] = bug

    Bugzilla(list(bugs),
             include_fields=include_fields,
             bughandler=bug_handler,
             bugdata=bug_info).get_data().wait()

    __warn('Collected bug info: Ok', verbose)

    for info in signatures.values():
        bug = info['selected_bug']
        if bug:
            if bug in bug_info:
                info['selected_bug'] = bug_info[bug]
            else:
                info['selected_bug'] = 'private'

    analysis = __analyze(signatures, status_flags)

    if max_bugs > 0:
        __analysis = {}
        count = 0
        for signature, info in analysis.items():
            if info['firefox']:
                __analysis[signature] = info
                count += 1
                if count == max_bugs:
                    analysis = __analysis
                    break

    __warn('Analysis: Ok', verbose)

    # Now get the number of crashes for each signature
    queries = []
    trends = {}
    signatures_by_chan = {}
    default_trend_by_chan = {}
    today = utils.get_date_ymd('today')
    ref_w = today.isocalendar()[1]

    def get_past_week(date):
        isodate = date.isocalendar()
        w = isodate[1]
        if w > ref_w:
            return ref_w - w + 53
        else:
            return ref_w - w

    for chan in channel:
        past_w = get_past_week(start_date_by_channel[chan])
        default_trend_by_chan[chan] = {i: 0 for i in range(past_w + 1)}

    for signature, info in analysis.items():
        if info['firefox']:
            data = {}
            trends[signature] = data
            # for chan, volume in info['affected']:
            for chan in channel:
                if chan in signatures_by_chan:
                    signatures_by_chan[chan].append(signature)
                else:
                    signatures_by_chan[chan] = [signature]
                data[chan] = default_trend_by_chan[chan].copy()

    def handler_ss(chan, json, data):
        for facets in json['facets']['histogram_date']:
            d = utils.get_date_ymd(facets['term'])
            w = get_past_week(d)
            s = facets['facets']['signature']
            for signature in s:
                count = signature['count']
                sgn = signature['term']
                data[sgn][chan][w] += count

    for chan, signatures in signatures_by_chan.items():
        if search_start_date:
            search_date = socorro.SuperSearch.get_search_date(
                search_start_date, end_date)
        else:
            search_date = socorro.SuperSearch.get_search_date(
                utils.get_date_str(start_date_by_channel[chan]), end_date)

        for sgns in Connection.chunks(signatures, 10):
            queries.append(
                Query(socorro.SuperSearch.URL, {
                    'signature': ['=' + s for s in sgns],
                    'product': product,
                    'version': all_versions,
                    'release_channel': chan,
                    'date': search_date,
                    '_histogram.date': 'signature',
                    '_histogram_interval': 1,
                    '_results_number': 0
                },
                      handler=functools.partial(handler_ss, chan),
                      handlerdata=trends))
    socorro.SuperSearch(queries=queries).wait()

    __warn('Collected trends: Ok\n', verbose)

    # replace dictionary containing trends by a list
    for signature, i in trends.items():
        for chan, trend in i.items():
            i[chan] = [
                trend[week] for week in sorted(trend.keys(), reverse=False)
            ]
        analysis[signature]['trend'] = i

    return {
        'status_flags': status_flags,
        'base_versions': base_versions,
        'start_dates': start_date_by_channel,
        'signatures': analysis
    }
Пример #11
0
def reformat_data(data):
    _data = {}
    for k, v in data.items():
        _data[utils.get_date_str(k)] = v
    return _data
Пример #12
0
def get(channel,
        date,
        product='Firefox',
        duration=11,
        tc_limit=50,
        crash_type='all',
        startup=False):
    """Get crashes info

    Args:
        channel (str): the channel
        date (str): the final date
        product (Optional[str]): the product
        duration (Optional[int]): the duration to retrieve the data
        tc_limit (Optional[int]): the number of topcrashes to load
        crash_type (Optional[str]): 'all' (default) or 'browser' or 'content' or 'plugin'

    Returns:
        dict: contains all the info relative to the crashes
    """
    channel = channel.lower()
    version = v[channel]
    versions_info = socorro.ProductVersions.get_version_info(version,
                                                             channel=channel,
                                                             product=product)
    versions = versions_info.keys()
    platforms = socorro.Platforms.get_cached_all()

    if crash_type and isinstance(crash_type, six.string_types):
        crash_type = [crash_type]

    throttle = set(map(lambda p: p[1], versions_info.values()))
    if len(throttle) == 1:
        throttle = throttle.pop()
    else:
        return

    _date = utils.get_date_ymd(date)
    start_date = utils.get_date_str(_date - timedelta(duration - 1))
    end_date = utils.get_date_str(_date)

    # First, we get the ADI
    adi = socorro.ADI.get(version=versions,
                          product=product,
                          end_date=end_date,
                          duration=duration,
                          platforms=platforms)
    adi = [adi[key] for key in sorted(adi.keys(), reverse=True)]

    # get the khours
    khours = Redash.get_khours(utils.get_date_ymd(start_date),
                               utils.get_date_ymd(end_date), channel, versions,
                               product)
    khours = [khours[key] for key in sorted(khours.keys(), reverse=True)]

    overall_crashes_by_day = []
    signatures = {}

    def signature_handler(json):
        for signature in json['facets']['signature']:
            signatures[signature['term']] = [signature['count'], 0, 0, 0, 0]

            for platform in signature['facets']['platform']:
                if platform['term'] == 'Linux':
                    signatures[signature['term']][3] = platform['count']
                elif platform['term'] == 'Windows NT':
                    signatures[signature['term']][1] = platform['count']
                elif platform['term'] == 'Mac OS X':
                    signatures[signature['term']][2] = platform['count']

            for uptime in signature['facets']['uptime']:
                if int(uptime['term']) < 60:
                    signatures[signature['term']][4] += uptime['count']

        for facets in json['facets']['histogram_date']:
            overall_crashes_by_day.insert(0, facets['count'])

    params = {
        'product': product,
        'version': versions,
        'date': socorro.SuperSearch.get_search_date(start_date, end_date),
        'release_channel': channel,
        '_aggs.signature': ['platform', 'uptime'],
        '_results_number': 0,
        '_facets_size': tc_limit,
        '_histogram.date': ['product'],
        '_histogram_interval': 1
    }

    if startup:
        params['uptime'] = '<=60'

    socorro.SuperSearch(params=params, handler=signature_handler).wait()

    bug_flags = [
        'resolution', 'id', 'last_change_time',
        'cf_tracking_firefox' + str(version)
    ]
    for i in range(int(version), int(v['nightly']) + 1):
        bug_flags.append('cf_status_firefox' + str(i))

    # TODO: too many requests... should be improved with chunks
    bugs = {}
    # TODO: Use regexp, when the Bugzilla bug that prevents them from working will be fixed.
    base = {
        'j_top': 'OR',
        'o1': 'substring',
        'f1': 'cf_crash_signature',
        'v1': None,
        'o2': 'substring',
        'f2': 'cf_crash_signature',
        'v2': None,
        'o3': 'substring',
        'f3': 'cf_crash_signature',
        'v3': None,
        'o4': 'substring',
        'f4': 'cf_crash_signature',
        'v4': None,
        'include_fields': bug_flags
    }

    queries = []
    for sgn in signatures.keys():
        cparams = base.copy()
        cparams['v1'] = '[@' + sgn + ']'
        cparams['v2'] = '[@ ' + sgn + ' ]'
        cparams['v3'] = '[@ ' + sgn + ']'
        cparams['v4'] = '[@' + sgn + ' ]'
        bugs[sgn] = []
        queries.append(
            Query(Bugzilla.API_URL, cparams, __bug_handler, bugs[sgn]))
    res_bugs = Bugzilla(queries=queries)

    # we have stats by signature in self.signatures
    # for each signature get the number of crashes on the last X days
    # so get the signature trend
    trends = {}
    default_trend = {}
    for i in range(duration):
        default_trend[_date - timedelta(i)] = 0

    base = {
        'product': product,
        'version': versions,
        'signature': None,
        'date': socorro.SuperSearch.get_search_date(start_date, end_date),
        'release_channel': channel,
        '_results_number': 0,
        '_histogram.date': ['signature'],
        '_histogram_interval': 1
    }

    queries = []
    for sgns in Connection.chunks(
            list(map(lambda sgn: '=' + sgn, signatures.keys())), 10):
        sgn_group = []
        for sgn in sgns:
            if sum(len(s) for s in sgn_group) >= 1000:
                cparams = base.copy()
                cparams['signature'] = sgn_group
                queries.append(
                    Query(socorro.SuperSearch.URL, cparams,
                          functools.partial(__trend_handler, default_trend),
                          trends))
                sgn_group = []

            sgn_group.append(sgn)

        if len(sgn_group) > 0:
            cparams = base.copy()
            cparams['signature'] = sgn_group
            queries.append(
                Query(socorro.SuperSearch.URL, cparams,
                      functools.partial(__trend_handler, default_trend),
                      trends))

    socorro.SuperSearch(queries=queries).wait()

    for sgn, trend in trends.items():
        signatures[sgn] = (signatures[sgn], [
            trend[key] for key in sorted(trend.keys(), reverse=True)
        ])

    _signatures = {}
    # order self.signatures by crash count
    l = sorted(signatures.items(), key=lambda x: x[1][0][0], reverse=True)
    i = 1
    for s in l:
        _signatures[s[0]] = i  # top crash rank
        i += 1

    res_bugs.wait()

    # TODO: In the first query to get the bugs, also get dupe_of and avoid the first query
    #       in follow_dup (so modify follow_dup to accept both a bug ID or a bug object).
    queries = []
    for sgn in signatures.keys():
        duplicate_ids = [
            bug['id'] for bug in bugs[sgn] if bug['resolution'] == 'DUPLICATE'
        ]

        # Remove bugs resolved as DUPLICATE from the list of bugs associated to the signature.
        bugs[sgn] = [
            bug for bug in bugs[sgn] if bug['id'] not in duplicate_ids
        ]

        # Find duplicates for bugs resolved as DUPLICATE.
        duplicates = {
            k: v
            for k, v in Bugzilla.follow_dup(duplicate_ids).items()
            if v is not None
        }
        duplicate_targets = [
            bug_id for bug_id in duplicates.values()
            if int(bug_id) not in [bug['id'] for bug in bugs[sgn]]
        ]
        if len(duplicate_targets) == 0:
            continue

        # Get info about bugs that the DUPLICATE bugs have been duped to.
        params = {
            'id': ','.join(duplicate_targets),
            'include_fields': bug_flags,
        }
        queries.append(
            Query(Bugzilla.API_URL, params, __bug_handler, bugs[sgn]))
    Bugzilla(queries=queries).wait()

    for sgn, stats in signatures.items():
        # stats is 2-uple: ([count, win_count, mac_count, linux_count, startup_count], trend)
        startup_percent = float(stats[0][4]) / float(stats[0][0])
        _signatures[sgn] = {
            'tc_rank': _signatures[sgn],
            'crash_count': stats[0][0],
            'startup_percent': startup_percent,
            'crash_by_day': stats[1],
            'bugs': bugs[sgn]
        }

    return {
        'start_date': start_date,
        'end_date': end_date,
        'versions': list(versions),
        'adi': adi,
        'khours': khours,
        'crash_by_day': overall_crashes_by_day,
        'signatures': _signatures,
        'throttle': float(throttle)
    }