コード例 #1
0
def parse_dir(directory, default_settings, revision, rep):
    """Parses bench data from bench logs files.
       revision can be either svn revision or git commit hash.
    """
    revision_data_points = []  # list of BenchDataPoint
    file_list = os.listdir(directory)
    file_list.sort()
    for bench_file in file_list:
        scalar_type = None
        # Scalar type, if any, is in the bench filename after revision
        if (len(revision) > MAX_SVN_REV_LENGTH
                and bench_file.startswith('bench_' + revision + '_')):
            # The revision is GIT commit hash.
            scalar_type = bench_file[len(revision) + len('bench_') + 1:]
        elif (bench_file.startswith('bench_r' + revision + '_')
              and revision.isdigit()):
            # The revision is SVN number
            scalar_type = bench_file[len(revision) + len('bench_r') + 1:]
        else:
            continue

        file_handle = open(directory + '/' + bench_file, 'r')

        default_settings['scalar'] = scalar_type
        revision_data_points.extend(
            bench_util.parse(default_settings, file_handle, rep))
        file_handle.close()
    return revision_data_points
コード例 #2
0
def parse_dir(directory, default_settings, revision, rep):
    """Parses bench data from bench logs files.
       revision can be either svn revision or git commit hash.
    """
    revision_data_points = []  # list of BenchDataPoint
    file_list = os.listdir(directory)
    file_list.sort()
    for bench_file in file_list:
        scalar_type = None
        # Scalar type, if any, is in the bench filename after revision
        if (len(revision) > MAX_SVN_REV_LENGTH and
            bench_file.startswith('bench_' + revision + '_')):
            # The revision is GIT commit hash.
            scalar_type = bench_file[len(revision) + len('bench_') + 1:]
        elif (bench_file.startswith('bench_r' + revision + '_') and
              revision.isdigit()):
            # The revision is SVN number
            scalar_type = bench_file[len(revision) + len('bench_r') + 1:]
        else:
            continue

        file_handle = open(directory + '/' + bench_file, 'r')

        default_settings['scalar'] = scalar_type
        revision_data_points.extend(
                        bench_util.parse(default_settings, file_handle, rep))
        file_handle.close()
    return revision_data_points
コード例 #3
0
def OutputBenchExpectations(bench_type, rev_min, rev_max, representation_alg):
  """Reads bench data from google storage, and outputs expectations.

  Ignores data with revisions outside [rev_min, rev_max] integer range. For
  bench data with multiple revisions, we use higher revisions to calculate
  expected bench values.
  bench_type is either 'micro' or 'skp', according to the flag '-b'.
  Uses the provided representation_alg for calculating bench representations.
  """
  if bench_type not in BENCH_TYPES:
    raise Exception('Not valid bench_type! (%s)' % BENCH_TYPES)
  expectation_dic = {}
  uri = boto.storage_uri(URI_BUCKET, GOOGLE_STORAGE_URI_SCHEME)
  for obj in uri.get_bucket():
    # Filters out non-bench files.
    if ((not obj.name.startswith('perfdata/%s' % BENCH_BUILDER_PREFIX) and
         not obj.name.startswith(
             'playback/perfdata/%s' % BENCH_BUILDER_PREFIX)) or
         obj.name.find('_data') < 0):
      continue
    if ((bench_type == 'micro' and obj.name.find('_data_skp_') > 0) or
        (bench_type == 'skp' and obj.name.find('_skp_') < 0)):
      # Skips wrong bench type.
      continue
    # Ignores uninterested platforms.
    platform = obj.name.split('/')[1]
    if not platform.startswith(BENCH_BUILDER_PREFIX):
      platform = obj.name.split('/')[2]
    if not platform.startswith(BENCH_BUILDER_PREFIX):
      continue  # Ignores non-platform object
    if platform not in PLATFORMS:
      continue
    # Filters by revision.
    to_filter = True
    for rev in range(rev_min, rev_max + 1):
      if '_r%s_' % rev in obj.name:
        to_filter = False
        break
    if to_filter:
      continue
    contents = cStringIO.StringIO()
    obj.get_file(contents)
    for point in bench_util.parse('', contents.getvalue().split('\n'),
                                  representation_alg):
      if point.config in CONFIGS_TO_FILTER:
        continue

      key = '%s_%s_%s,%s-%s' % (point.bench, point.config, point.time_type,
                                platform, representation_alg)
      # It is fine to have later revisions overwrite earlier benches, since we
      # only use the latest bench within revision range to set expectations.
      expectation_dic[key] = point.time
  keys = expectation_dic.keys()
  keys.sort()
  for key in keys:
    bench_val = expectation_dic[key]
    # Prints out expectation lines.
    print '%s,%.3f,%.3f,%.3f' % (key, bench_val,
                                 bench_val * BENCH_LB - BENCH_ALLOWED_NOISE,
                                 bench_val * BENCH_UB + BENCH_ALLOWED_NOISE)
コード例 #4
0
def parse_dir(directory, default_settings, oldest_revision, newest_revision,
              rep):
    """Parses bench data from files like bench_r<revision>_<scalar>.
    
    (str, {str, str}, Number, Number) -> {int:[BenchDataPoints]}"""
    revision_data_points = {}  # {revision : [BenchDataPoints]}
    file_list = os.listdir(directory)
    file_list.sort()
    for bench_file in file_list:
        file_name_match = re.match('bench_r(\d+)_(\S+)', bench_file)
        if (file_name_match is None):
            continue

        revision = int(file_name_match.group(1))
        scalar_type = file_name_match.group(2)

        if (revision < oldest_revision or revision > newest_revision):
            continue

        file_handle = open(directory + '/' + bench_file, 'r')

        if (revision not in revision_data_points):
            revision_data_points[revision] = []
        default_settings['scalar'] = scalar_type
        revision_data_points[revision].extend(
            bench_util.parse(default_settings, file_handle, rep))
        file_handle.close()
    return revision_data_points
コード例 #5
0
ファイル: bench_graph_svg.py プロジェクト: Cue/skia
def parse_dir(directory, default_settings, oldest_revision, newest_revision,
              rep):
    """Parses bench data from files like bench_r<revision>_<scalar>.
    
    (str, {str, str}, Number, Number) -> {int:[BenchDataPoints]}"""
    revision_data_points = {} # {revision : [BenchDataPoints]}
    for bench_file in os.listdir(directory):
        file_name_match = re.match('bench_r(\d+)_(\S+)', bench_file)
        if (file_name_match is None):
            continue

        revision = int(file_name_match.group(1))
        scalar_type = file_name_match.group(2)

        if (revision < oldest_revision or revision > newest_revision):
            continue

        file_handle = open(directory + '/' + bench_file, 'r')

        if (revision not in revision_data_points):
            revision_data_points[revision] = []
        default_settings['scalar'] = scalar_type
        revision_data_points[revision].extend(
                        bench_util.parse(default_settings, file_handle, rep))
        file_handle.close()
    return revision_data_points
コード例 #6
0
def OutputSkpBenchExpectations(rev_min, rev_max, representation_alg):
    """Reads skp bench data from google storage, and outputs expectations.

  Ignores data with revisions outside [rev_min, rev_max] integer range. For
  bench data with multiple revisions, we use higher revisions to calculate
  expected bench values.
  Uses the provided representation_alg for calculating bench representations.
  """
    expectation_dic = {}
    uri = boto.storage_uri(URI_BUCKET, GOOGLE_STORAGE_URI_SCHEME)
    for obj in uri.get_bucket():
        # Filters out non-skp-bench files.
        if ((not obj.name.startswith('perfdata/Skia_')
             and not obj.name.startswith('playback/perfdata/Skia_'))
                or obj.name.find('_data_skp_') < 0):
            continue
        # Ignores uninterested platforms.
        platform = obj.name.split('/')[1]
        if not platform.startswith('Skia_'):
            platform = obj.name.split('/')[2]
        if not platform.startswith('Skia_'):
            continue  # Ignores non-platform object
        platform = platform[5:]  # Removes "Skia_" prefix.
        if platform not in PLATFORMS:
            continue
        # Filters by revision.
        to_filter = True
        for rev in range(rev_min, rev_max + 1):
            if '_r%s_' % rev in obj.name:
                to_filter = False
                break
        if to_filter:
            continue
        contents = cStringIO.StringIO()
        obj.get_file(contents)
        for point in bench_util.parse('',
                                      contents.getvalue().split('\n'),
                                      representation_alg):
            if point.config in CONFIGS_TO_FILTER:
                continue

            key = '%s_%s_%s,%s-%s' % (point.bench, point.config,
                                      point.time_type, platform,
                                      representation_alg)
            # It is fine to have later revisions overwrite earlier benches, since we
            # only use the latest bench within revision range to set expectations.
            expectation_dic[key] = point.time
    keys = expectation_dic.keys()
    keys.sort()
    for key in keys:
        bench_val = expectation_dic[key]
        # Prints out expectation lines.
        print '%s,%.3f,%.3f,%.3f' % (
            key, bench_val, bench_val * BENCH_LB - BENCH_ALLOWED_NOISE,
            bench_val * BENCH_UB + BENCH_ALLOWED_NOISE)
コード例 #7
0
ファイル: gen_skp_ranges.py プロジェクト: jdm/skia
def OutputSkpBenchExpectations(rev_min, rev_max, representation_alg):
    """Reads skp bench data from google storage, and outputs expectations.

  Ignores data with revisions outside [rev_min, rev_max] integer range. For
  bench data with multiple revisions, we use higher revisions to calculate
  expected bench values.
  Uses the provided representation_alg for calculating bench representations.
  """
    expectation_dic = {}
    uri = boto.storage_uri(URI_BUCKET, GOOGLE_STORAGE_URI_SCHEME)
    for obj in uri.get_bucket():
        # Filters out non-skp-bench files.
        if (
            not obj.name.startswith("perfdata/Skia_") and not obj.name.startswith("playback/perfdata/Skia_")
        ) or obj.name.find("_data_skp_") < 0:
            continue
        # Ignores uninterested platforms.
        platform = obj.name.split("/")[1]
        if not platform.startswith("Skia_"):
            platform = obj.name.split("/")[2]
        if not platform.startswith("Skia_"):
            continue  # Ignores non-platform object
        platform = platform[5:]  # Removes "Skia_" prefix.
        if platform not in PLATFORMS:
            continue
        # Filters by revision.
        to_filter = True
        for rev in range(rev_min, rev_max + 1):
            if "_r%s_" % rev in obj.name:
                to_filter = False
                break
        if to_filter:
            continue
        contents = cStringIO.StringIO()
        obj.get_file(contents)
        for point in bench_util.parse("", contents.getvalue().split("\n"), representation_alg):
            if point.config in CONFIGS_TO_FILTER:
                continue

            key = "%s_%s_%s,%s-%s" % (point.bench, point.config, point.time_type, platform, representation_alg)
            # It is fine to have later revisions overwrite earlier benches, since we
            # only use the latest bench within revision range to set expectations.
            expectation_dic[key] = point.time
    keys = expectation_dic.keys()
    keys.sort()
    for key in keys:
        bench_val = expectation_dic[key]
        # Prints out expectation lines.
        print "%s,%.3f,%.3f,%.3f" % (
            key,
            bench_val,
            bench_val * BENCH_LB - BENCH_ALLOWED_NOISE,
            bench_val * BENCH_UB + BENCH_ALLOWED_NOISE,
        )
コード例 #8
0
def OutputSkpBenchExpectations(rev_min, rev_max, representation_alg):
    """Reads skp bench data from google storage, and outputs expectations.

  Ignores data with revisions outside [rev_min, rev_max] integer range. For
  bench data with multiple revisions, we use higher revisions to calculate
  expected bench values.
  Uses the provided representation_alg for calculating bench representations.
  """
    expectation_dic = {}
    uri = boto.storage_uri(URI_BUCKET, GOOGLE_STORAGE_URI_SCHEME)
    for obj in uri.get_bucket():
        # Filters out non-skp-bench files.
        if (not obj.name.startswith('perfdata/Skia_')
                or obj.name.find('_data_skp_') < 0):
            continue
        # Ignores uninterested platforms.
        platform = obj.name.split('/')[1][5:]  # Removes "Skia_" prefix.
        if platform not in PLATFORMS:
            continue
        # Filters by revision.
        for rev in range(rev_min, rev_max + 1):
            if '_r%s_' % rev not in obj.name:
                continue

        contents = cStringIO.StringIO()
        obj.get_file(contents)
        for point in bench_util.parse('',
                                      contents.getvalue().split('\n'),
                                      representation_alg):
            if point.config in CONFIGS_TO_FILTER:
                continue
            # TODO(bensong): the filtering below is only needed during skp generation
            # system transitioning. Change it once the new system (bench name starts
            # with http) is stable for the switch-over, and delete it once we
            # deprecate the old ones.
            if point.bench.startswith('http'):
                continue

            key = '%s_%s_%s,%s-%s' % (point.bench, point.config,
                                      point.time_type, platform,
                                      representation_alg)
            # It is fine to have later revisions overwrite earlier benches, since we
            # only use the latest bench within revision range to set expectations.
            expectation_dic[key] = point.time
    keys = expectation_dic.keys()
    keys.sort()
    for key in keys:
        bench_val = expectation_dic[key]
        # Prints out expectation lines.
        print '%s,%.3f,%.3f,%.3f' % (
            key, bench_val, bench_val * BENCH_LB - BENCH_ALLOWED_NOISE,
            bench_val * BENCH_UB + BENCH_ALLOWED_NOISE)
コード例 #9
0
ファイル: gen_skp_ranges.py プロジェクト: Cue/skia
def OutputSkpBenchExpectations(rev_min, rev_max, representation_alg):
  """Reads skp bench data from google storage, and outputs expectations.

  Ignores data with revisions outside [rev_min, rev_max] integer range. For
  bench data with multiple revisions, we use higher revisions to calculate
  expected bench values.
  Uses the provided representation_alg for calculating bench representations.
  """
  expectation_dic = {}
  uri = boto.storage_uri(URI_BUCKET, GOOGLE_STORAGE_URI_SCHEME)
  for obj in uri.get_bucket():
    # Filters out non-skp-bench files.
    if (not obj.name.startswith('perfdata/Skia_') or
        obj.name.find('_data_skp_') < 0):
      continue
    # Ignores uninterested platforms.
    platform = obj.name.split('/')[1][5:]  # Removes "Skia_" prefix.
    if platform not in PLATFORMS:
      continue
    # Filters by revision.
    for rev in range(rev_min, rev_max + 1):
      if '_r%s_' % rev not in obj.name:
        continue

    contents = cStringIO.StringIO()
    obj.get_file(contents)
    for point in bench_util.parse('', contents.getvalue().split('\n'),
                                  representation_alg):
      if point.config in CONFIGS_TO_FILTER:
        continue
      # TODO(bensong): the filtering below is only needed during skp generation
      # system transitioning. Change it once the new system (bench name starts
      # with http) is stable for the switch-over, and delete it once we
      # deprecate the old ones.
      if point.bench.startswith('http'):
        continue

      key = '%s_%s_%s,%s-%s' % (point.bench, point.config, point.time_type,
                                platform, representation_alg)
      # It is fine to have later revisions overwrite earlier benches, since we
      # only use the latest bench within revision range to set expectations.
      expectation_dic[key] = point.time
  keys = expectation_dic.keys()
  keys.sort()
  for key in keys:
    bench_val = expectation_dic[key]
    # Prints out expectation lines.
    print '%s,%.3f,%.3f,%.3f' % (key, bench_val,
                                 bench_val * BENCH_LB - BENCH_ALLOWED_NOISE,
                                 bench_val * BENCH_UB + BENCH_ALLOWED_NOISE)
コード例 #10
0
     else:
         usage()
         sys.exit(2)
 
 if header:
     print header_format.format(
         bench='bench'
         , config='conf'
         , time_type='time'
         , old_time='old'
         , new_time='new'
         , diff='diff'
         , diffp='diffP'
     )
 
 old_benches = bench_util.parse({}, open(old, 'r'))
 new_benches = bench_util.parse({}, open(new, 'r'))
 
 bench_diffs = []
 for old_bench in old_benches:
     #filter new_benches for benches that match old_bench
     new_bench_match = [bench for bench in new_benches
         if old_bench.bench == bench.bench and
            old_bench.config == bench.config and
            old_bench.time_type == bench.time_type
     ]
     if (len(new_bench_match) < 1):
         continue
     bench_diffs.append(BenchDiff(old_bench, new_bench_match[0]))
 
 bench_diffs.sort(key=lambda d : [d.diffp,
コード例 #11
0
ファイル: bench_compare.py プロジェクト: AHPlankton/skia
            columns = value
        elif option == "-s":
            stat_type = value
        elif option == "-t":
            use_tabs = True
        elif option == "--match":
            match_bench = value
        else:
            usage()
            assert False, "unhandled option"

    if old is None or new is None:
        usage()
        sys.exit(2)

    old_benches = bench_util.parse({}, open(old, 'r'), stat_type)
    new_benches = bench_util.parse({}, open(new, 'r'), stat_type)

    bench_diffs = []
    for old_bench in old_benches:
        #filter benches by the match criteria
        if match_bench and not old_bench.bench.startswith(match_bench):
            continue

        #filter new_benches for benches that match old_bench
        new_bench_match = [bench for bench in new_benches
            if old_bench.bench == bench.bench and
               old_bench.config == bench.config and
               old_bench.time_type == bench.time_type
        ]
        if (len(new_bench_match) < 1):
コード例 #12
0
def OutputTileAnalysis(rev, representation_alg, bench_dir, platform):
    """Reads skp bench data and outputs tile vs. viewport analysis for the given
  platform.

  Ignores data with revisions other than rev. If bench_dir is not empty, read
  from the local directory instead of Google Storage.
  Uses the provided representation_alg for calculating bench representations.

  Returns (js_codes, body_codes): strings of js/html codes for stats and
  visualization.
  """
    js_codes = ''
    body_codes = ('}</script></head><body>'
                  '<h3>PLATFORM: %s REVISION: %s</h3><br>' % (platform, rev))
    bench_dic = {}  # [bench][config] -> [layout, [values]]
    file_dic = GetFiles(rev, bench_dir, platform)
    for f in file_dic:
        for point in bench_util.parse('', file_dic[f].split('\n'),
                                      representation_alg):
            if point.time_type:  # Ignores non-walltime time_type.
                continue
            bench = point.bench.replace('.skp', '')
            config = point.config.replace('simple_', '')
            components = config.split('_')
            if components[0] == 'viewport':
                bench_dic.setdefault(
                    bench, {})[config] = [components[1], [point.time]]
            else:  # Stores per-tile benches.
                bench_dic.setdefault(bench, {})[config] = [
                    point.tile_layout, point.per_tile_values
                ]
    benches = bench_dic.keys()
    benches.sort()
    for bench in benches:
        body_codes += '<h4>%s</h4><br><table><tr>' % bench
        heat_plots = ''  # For table row of heatmap plots.
        table_plots = ''  # For table row of data table plots.
        # For bar plot legends and values in URL string.
        legends = ''
        values = ''
        keys = bench_dic[bench].keys()
        keys.sort()
        if not keys[-1].startswith(
                'viewport'):  # No viewport to analyze; skip.
            continue
        else:
            # Extracts viewport size, which for all viewport configs is the same.
            viewport = bench_dic[bench][keys[-1]][0]
        for config in keys:
            [layout, value_li] = bench_dic[bench][config]
            if config.startswith(
                    'tile_'):  # For per-tile data, visualize tiles.
                tile_size = config.split('_')[1]
                if (not re.search(DIMENSIONS_RE, layout)
                        or not re.search(DIMENSIONS_RE, tile_size)
                        or not re.search(DIMENSIONS_RE, viewport)):
                    continue  # Skip unrecognized formats.
                [viewport_tile_sum,
                 matrix] = GetTileMatrix(layout, tile_size, value_li, viewport)
                values += '%s|' % viewport_tile_sum
                [this_js, row1,
                 row2] = GetTileVisCodes(config + '_' + bench, matrix)
                heat_plots += row1
                table_plots += row2
                js_codes += this_js
            else:  # For viewport data, there is only one element in value_li.
                values += '%s|' % sum(value_li)
            legends += '%s:%s|' % (config, sum(value_li))
        body_codes += (heat_plots + '</tr><tr>' + table_plots +
                       '</tr></table>' + '<br>' + BAR_CHART_TEMPLATE %
                       (legends[:-1], values[:-1]))

    return (js_codes, body_codes)
コード例 #13
0
            column_format += column_formats[column_char]
            header_format += header_formats[column_char]
        else:
            usage()
            sys.exit(2)

    if header:
        print header_format.format(bench='bench',
                                   config='conf',
                                   time_type='time',
                                   old_time='old',
                                   new_time='new',
                                   diff='diff',
                                   diffp='diffP')

    old_benches = bench_util.parse({}, open(old, 'r'))
    new_benches = bench_util.parse({}, open(new, 'r'))

    bench_diffs = []
    for old_bench in old_benches:
        #filter new_benches for benches that match old_bench
        new_bench_match = [
            bench for bench in new_benches
            if old_bench.bench == bench.bench and old_bench.config ==
            bench.config and old_bench.time_type == bench.time_type
        ]
        if (len(new_bench_match) < 1):
            continue
        bench_diffs.append(BenchDiff(old_bench, new_bench_match[0]))

    bench_diffs.sort(key=lambda d: [
コード例 #14
0
            columns = value
        elif option == "-s":
            stat_type = value
        elif option == "-t":
            use_tabs = True
        elif option == "--match":
            match_bench = value
        else:
            usage()
            assert False, "unhandled option"

    if old is None or new is None:
        usage()
        sys.exit(2)

    old_benches = bench_util.parse({}, open(old, 'r'), stat_type)
    new_benches = bench_util.parse({}, open(new, 'r'), stat_type)

    bench_diffs = []
    for old_bench in old_benches:
        #filter benches by the match criteria
        if match_bench and not old_bench.bench.startswith(match_bench):
            continue

        #filter new_benches for benches that match old_bench
        new_bench_match = [
            bench for bench in new_benches
            if old_bench.bench == bench.bench and old_bench.config ==
            bench.config and old_bench.time_type == bench.time_type
        ]
        if (len(new_bench_match) < 1):
コード例 #15
0
ファイル: tile_analyze.py プロジェクト: DiamondLovesYou/skia
def OutputTileAnalysis(rev, representation_alg, bench_dir, platform):
    """Reads skp bench data and outputs tile vs. viewport analysis for the given
  platform.

  Ignores data with revisions other than rev. If bench_dir is not empty, read
  from the local directory instead of Google Storage.
  Uses the provided representation_alg for calculating bench representations.

  Returns (js_codes, body_codes): strings of js/html codes for stats and
  visualization.
  """
    js_codes = ""
    body_codes = "}</script></head><body>" "<h3>PLATFORM: %s REVISION: %s</h3><br>" % (platform, rev)
    bench_dic = {}  # [bench][config] -> [layout, [values]]
    file_dic = GetFiles(rev, bench_dir, platform)
    for f in file_dic:
        for point in bench_util.parse("", file_dic[f].split("\n"), representation_alg):
            if point.time_type:  # Ignores non-walltime time_type.
                continue
            bench = point.bench.replace(".skp", "")
            config = point.config.replace("simple_", "")
            components = config.split("_")
            if components[0] == "viewport":
                bench_dic.setdefault(bench, {})[config] = [components[1], [point.time]]
            else:  # Stores per-tile benches.
                bench_dic.setdefault(bench, {})[config] = [point.tile_layout, point.per_tile_values]
    benches = bench_dic.keys()
    benches.sort()
    for bench in benches:
        body_codes += "<h4>%s</h4><br><table><tr>" % bench
        heat_plots = ""  # For table row of heatmap plots.
        table_plots = ""  # For table row of data table plots.
        # For bar plot legends and values in URL string.
        legends = ""
        values = ""
        keys = bench_dic[bench].keys()
        keys.sort()
        if not keys[-1].startswith("viewport"):  # No viewport to analyze; skip.
            continue
        else:
            # Extracts viewport size, which for all viewport configs is the same.
            viewport = bench_dic[bench][keys[-1]][0]
        for config in keys:
            [layout, value_li] = bench_dic[bench][config]
            if config.startswith("tile_"):  # For per-tile data, visualize tiles.
                tile_size = config.split("_")[1]
                if (
                    not re.search(DIMENSIONS_RE, layout)
                    or not re.search(DIMENSIONS_RE, tile_size)
                    or not re.search(DIMENSIONS_RE, viewport)
                ):
                    continue  # Skip unrecognized formats.
                [viewport_tile_sum, matrix] = GetTileMatrix(layout, tile_size, value_li, viewport)
                values += "%s|" % viewport_tile_sum
                [this_js, row1, row2] = GetTileVisCodes(config + "_" + bench, matrix)
                heat_plots += row1
                table_plots += row2
                js_codes += this_js
            else:  # For viewport data, there is only one element in value_li.
                values += "%s|" % sum(value_li)
            legends += "%s:%s|" % (config, sum(value_li))
        body_codes += (
            heat_plots
            + "</tr><tr>"
            + table_plots
            + "</tr></table>"
            + "<br>"
            + BAR_CHART_TEMPLATE % (legends[:-1], values[:-1])
        )

    return (js_codes, body_codes)
コード例 #16
0
ファイル: bench_compare.py プロジェクト: ghub/NVprSDK
        sys.exit(2)

    for column_char in columns:
        if column_formats[column_char]:
            column_format += column_formats[column_char]
            header_format += header_formats[column_char]
        else:
            usage()
            sys.exit(2)

    if header:
        print header_format.format(
            bench="bench", config="conf", time_type="time", old_time="old", new_time="new", diff="diff", diffp="diffP"
        )

    old_benches = bench_util.parse({}, open(old, "r"))
    new_benches = bench_util.parse({}, open(new, "r"))

    bench_diffs = []
    for old_bench in old_benches:
        # filter new_benches for benches that match old_bench
        new_bench_match = [
            bench
            for bench in new_benches
            if old_bench.bench == bench.bench
            and old_bench.config == bench.config
            and old_bench.time_type == bench.time_type
        ]
        if len(new_bench_match) < 1:
            continue
        bench_diffs.append(BenchDiff(old_bench, new_bench_match[0]))
コード例 #17
0
def OutputBenchExpectations(bench_type, rev_min, rev_max, representation_alg):
    """Reads bench data from google storage, and outputs expectations.

  Ignores data with revisions outside [rev_min, rev_max] integer range. For
  bench data with multiple revisions, we use higher revisions to calculate
  expected bench values.
  bench_type is either 'micro' or 'skp', according to the flag '-b'.
  Uses the provided representation_alg for calculating bench representations.
  """
    if bench_type not in BENCH_TYPES:
        raise Exception('Not valid bench_type! (%s)' % BENCH_TYPES)
    expectation_dic = {}
    uri = boto.storage_uri(URI_BUCKET, GOOGLE_STORAGE_URI_SCHEME)
    for obj in uri.get_bucket():
        # Filters out non-bench files.
        if ((not obj.name.startswith('perfdata/%s' % BENCH_BUILDER_PREFIX)
             and not obj.name.startswith(
                 'playback/perfdata/%s' % BENCH_BUILDER_PREFIX))
                or obj.name.find('_data') < 0):
            continue
        if ((bench_type == 'micro' and obj.name.find('_data_skp_') > 0)
                or (bench_type == 'skp' and obj.name.find('_skp_') < 0)):
            # Skips wrong bench type.
            continue
        # Ignores uninterested platforms.
        platform = obj.name.split('/')[1]
        if not platform.startswith(BENCH_BUILDER_PREFIX):
            platform = obj.name.split('/')[2]
        if not platform.startswith(BENCH_BUILDER_PREFIX):
            continue  # Ignores non-platform object
        if platform not in PLATFORMS:
            continue
        # Filters by revision.
        to_filter = True
        for rev in range(rev_min, rev_max + 1):
            if '_r%s_' % rev in obj.name:
                to_filter = False
                break
        if to_filter:
            continue
        contents = cStringIO.StringIO()
        obj.get_file(contents)
        for point in bench_util.parse('',
                                      contents.getvalue().split('\n'),
                                      representation_alg):
            if point.config in CONFIGS_TO_FILTER:
                continue

            key = '%s_%s_%s,%s-%s' % (point.bench, point.config,
                                      point.time_type, platform,
                                      representation_alg)
            # It is fine to have later revisions overwrite earlier benches, since we
            # only use the latest bench within revision range to set expectations.
            expectation_dic[key] = point.time
    keys = expectation_dic.keys()
    keys.sort()
    for key in keys:
        bench_val = expectation_dic[key]
        # Prints out expectation lines.
        print '%s,%.3f,%.3f,%.3f' % (
            key, bench_val, bench_val * BENCH_LB - BENCH_ALLOWED_NOISE,
            bench_val * BENCH_UB + BENCH_ALLOWED_NOISE)