def _LoadTimingData(args):
  builder, timing_file_path = args
  data = retrieve_story_timing.FetchAverageStoryTimingData(
      configurations=[builder.name], num_last_days=5)
  for executable in builder.executables:
    data.append({unicode('duration'): unicode(
                    float(executable.estimated_runtime)),
                 unicode('name'): unicode(
                     executable.name + '/' + bot_platforms.GTEST_STORY_NAME)})
  _DumpJson(data, timing_file_path)
  print('Finished retrieving story timing data for %s' % repr(builder.name))
Beispiel #2
0
def _LoadTimingData(args):
    builder, timing_file_path = args
    data = retrieve_story_timing.FetchAverageStoryTimingData(
        configurations=[builder.name], num_last_days=5)
    # Running against a reference build doubles our runtime.
    # Double the expected duration of each story to account
    # for this. Note that gtest perf tests can't run against
    # reference builds, so this does not apply to them.
    if builder.run_reference_build:
        for story in data:
            story['duration'] = unicode(float(story['duration']) * 2.0)
    for executable in builder.executables:
        data.append({
            unicode('duration'):
            unicode(float(executable.estimated_runtime)),
            unicode('name'):
            unicode(executable.name + '/' + bot_platforms.GTEST_STORY_NAME)
        })
    _DumpJson(data, timing_file_path)
    print('Finished retrieving story timing data for %s' % repr(builder.name))