Пример #1
0
def ConvertProtoTracesToJson(trace_processor_path, proto_files, json_path):
    if not os.path.isfile(trace_processor_path):
        raise RuntimeError("Can't find trace processor executable at %s" %
                           trace_processor_path)

    with tempfile_ext.NamedTemporaryFile() as concatenated_trace:
        for trace_file in proto_files:
            if trace_file.endswith('.pb.gz'):
                with gzip.open(trace_file, 'rb') as f:
                    shutil.copyfileobj(f, concatenated_trace)
            else:
                with open(trace_file, 'rb') as f:
                    shutil.copyfileobj(f, concatenated_trace)
        concatenated_trace.close()

        with tempfile_ext.NamedTemporaryFile() as query_file:
            query_file.write(EXPORT_JSON_QUERY_TEMPLATE %
                             _SqlString(json_path))
            query_file.close()
            subprocess.check_call([
                trace_processor_path,
                concatenated_trace.name,
                '-q',
                query_file.name,
            ])

    logging.info('Converted json trace written to %s', json_path)

    return json_path
Пример #2
0
 def testFoundBrokenExpectations(self):
     test_expectations = ('# tags: [ mac ]\n'
                          '# results: [ Failure ]\n'
                          '[ mac ] a/b/d [ Failure ]\n'
                          'a/c/* [ Failure ]\n')
     options = gpu_helper.GetMockArgs()
     test_class = gpu_integration_test.GpuIntegrationTest
     with tempfile_ext.NamedTemporaryFile() as expectations_file,            \
          mock.patch.object(
              test_class, 'GenerateGpuTests', return_value=[('a/b/c', ())]), \
          mock.patch.object(
              test_class,
              'ExpectationsFiles', return_value=[expectations_file.name]):
         expectations_file.write(test_expectations)
         expectations_file.close()
         with self.assertRaises(AssertionError) as context:
             CheckTestExpectationsAreForExistingTests(
                 self, test_class, options)
         self.assertIn(
             'The following expectations were found to not apply'
             ' to any tests in the GpuIntegrationTest test suite',
             str(context.exception))
         self.assertIn(
             '4: Expectation with pattern \'a/c/*\' does not match'
             ' any tests in the GpuIntegrationTest test suite',
             str(context.exception))
         self.assertIn(
             '3: Expectation with pattern \'a/b/d\' does not match'
             ' any tests in the GpuIntegrationTest test suite',
             str(context.exception))
Пример #3
0
def StartPinpointJobs(state, date):
    """Start new pinpoint jobs for the last commit on the given date."""
    revision, timestamp = GetLastCommitOfDate(date)
    if any(item['revision'] == revision for item in state):
        logging.info('No new jobs to start.')
        return

    # Add a new item to the state with info about jobs for this revision.
    logging.info('Starting jobs for %s (%s):', timestamp[:10], revision)
    item = {'revision': revision, 'timestamp': timestamp, 'jobs': []}
    configs = LoadJsonFile(JOB_CONFIGS_PATH)
    for config in configs:
        config['base_git_hash'] = revision
        with tempfile_ext.NamedTemporaryFile() as tmp:
            json.dump(config, tmp)
            tmp.close()
            output = subprocess.check_output(
                ['vpython', PINPOINT_CLI, 'start-job', tmp.name],
                universal_newlines=True).strip()
        logging.info(output)
        assert 'https://pinpoint' in output
        bot = config['configuration']
        patch = config['patch']
        item['jobs'].append({
            'id': output.split('/')[-1],
            'status': 'queued',
            'bot': bot,
            'patch': patch,
        })
    state.append(item)
    state.sort(key=lambda p: p['timestamp'])  # Keep items sorted by date.
Пример #4
0
def ConvertProtoTraceToJson(trace_processor_path, proto_file, json_path):
    """Convert proto trace to json using trace processor.

  Args:
    trace_processor_path: path to the trace_processor executable.
    proto_file: path to the proto trace file.
    json_path: path to the output file.

  Returns:
    Output path.
  """
    _CheckTraceProcessor(trace_processor_path)
    with tempfile_ext.NamedTemporaryFile() as query_file:
        query_file.write(EXPORT_JSON_QUERY_TEMPLATE % _SqlString(json_path))
        query_file.close()
        subprocess.check_call([
            trace_processor_path,
            proto_file,
            '-q',
            query_file.name,
        ])

    logging.info('Converted json trace written to %s', json_path)

    return json_path
def GetLatestPath(binary_name, platform):
    with tempfile_ext.NamedTemporaryFile() as latest_file:
        latest_file.close()
        remote_path = posixpath.join(BINARY_CS_FOLDER, binary_name, platform,
                                     LATEST_FILENAME)
        cloud_storage.Get(BINARY_BUCKET, remote_path, latest_file.name)
        with open(latest_file.name) as latest:
            return latest.read()
def _SetLatestPathForBinary(binary_name, platform, latest_path):
    with tempfile_ext.NamedTemporaryFile() as latest_file:
        latest_file.write(latest_path)
        latest_file.close()
        remote_latest_file = posixpath.join(BINARY_CS_FOLDER, binary_name,
                                            platform, LATEST_FILENAME)
        cloud_storage.Insert(BINARY_BUCKET,
                             remote_latest_file,
                             latest_file.name,
                             publicly_readable=True)
Пример #7
0
  def testNamedTemporaryFile(self):
    with tempfile_ext.NamedTemporaryFile() as f:
      self.assertTrue(os.path.isfile(f.name))
      f.write('<data>')
      f.close()
      self.assertTrue(os.path.exists(f.name))
      with open(f.name) as f2:
        self.assertEqual(f2.read(), '<data>')

    self.assertFalse(os.path.exists(f.name))
Пример #8
0
    def testUploadTestArtifacts(self, mock_gsh):
        link = self._obj._UploadTestArtifacts(mock.MagicMock(), None)
        self.assertFalse(mock_gsh.called)
        self.assertIsNone(link)

        result = 'A/10/warthog/path'
        mock_gsh.return_value = result
        with tempfile_ext.NamedTemporaryFile() as temp_f:
            link = self._obj._UploadTestArtifacts(mock.MagicMock(), temp_f)
        self.assertTrue(mock_gsh.called)
        self.assertEqual(result, link)
Пример #9
0
def UpdateJobsState(state):
  """Write back the updated state of pinpoint jobs.

  If there were any changes to the state, i.e. new jobs were created or
  existing ones completed, both the local cached copy and the backup in cloud
  storage are updated.
  """
  local_path = CachedFilePath(JOBS_STATE_FILE)
  with tempfile_ext.NamedTemporaryFile() as tmp:
    json.dump(state, tmp, sort_keys=True, indent=2, separators=(',', ': '))
    tmp.close()
    if not os.path.exists(local_path) or not filecmp.cmp(tmp.name, local_path):
      shutil.copyfile(tmp.name, local_path)
      UploadToCloudStorage(local_path)
Пример #10
0
def ProcessHistogramDicts(histogram_dicts, options):
    """Convert histogram dicts to CSV and write output in output_dir."""
    with tempfile_ext.NamedTemporaryFile() as hist_file:
        json.dump(histogram_dicts, hist_file)
        hist_file.close()
        vinn_result = histograms_to_csv.HistogramsToCsv(hist_file.name)
        csv_dicts = _ReadCsv(vinn_result.stdout.splitlines())

    output_file = os.path.join(options.output_dir, OUTPUT_FILENAME)
    if not options.reset_results and os.path.isfile(output_file):
        with open(output_file) as input_stream:
            csv_dicts += _ReadCsv(input_stream)

    with open(output_file, 'w') as output_stream:
        _WriteCsv(csv_dicts, output_stream)
Пример #11
0
def _GetModuleIDFromBinary(dump_syms_path, symbol_binary):
    """Gets module ID of symbol binary.

  Args:
    dump_syms_path: The path to the dump_syms binary that should be run.
    symbol_binary: path to symbol binary.

  Returns:
    Module ID from symbol binary, or None if fails to extract.
  """
    # Creates temp file because |_RunDumpSyms| pipes result into a file.
    # After extracting the module ID, we do not need this output file.
    with tempfile_ext.NamedTemporaryFile(mode='w+') as output_file:
        output_file.close()  # RunDumpsyms opens the file again.
        if not _RunDumpSyms(dump_syms_path,
                            symbol_binary,
                            output_file.name,
                            only_module_header=True):
            return None
        return rename_breakpad.ExtractModuleIdIfValidBreakpad(output_file.name)
Пример #12
0
def ConvertProtoTraceToJson(trace_processor_path, proto_file, json_path):
  """Convert proto trace to json using trace processor.

  Args:
    trace_processor_path: path to the trace_processor executable.
    proto_file: path to the proto trace file.
    json_path: path to the output file.

  Returns:
    Output path.
  """
  trace_processor_path = _EnsureTraceProcessor(trace_processor_path)
  with tempfile_ext.NamedTemporaryFile() as query_file:
    query_file.write(EXPORT_JSON_QUERY_TEMPLATE % _SqlString(json_path))
    query_file.close()
    _RunTraceProcessor(
        trace_processor_path,
        '-q', query_file.name,
        proto_file,
    )

  return json_path
def _CalculateHash(remote_path):
    with tempfile_ext.NamedTemporaryFile() as f:
        f.close()
        cloud_storage.Get(BINARY_BUCKET, remote_path, f.name)
        return cloud_storage.CalculateHash(f.name)
Пример #14
0
def RunQuery(trace_processor_path, trace_file, sql_command):
    """Run SQL query on trace using trace processor and return result.

  Args:
    trace_processor_path: path to the trace_processor executable.
    trace_file: path to the trace file.
    sql_command: string SQL command

  Returns:
    SQL query output table when executed on the proto trace as a
    list of dictionaries. Each item in the list represents a row
    in the output table. All values in the dictionary are
    represented as strings. Null is represented as None.
    Booleans are represented as '0' and '1'. Empty queries
    or rows return [].

    For example, for a SQL output table that looks like this:
      | "string_col" | "long_col" | "double_col" | "bool_col" | "maybe_null_col"
      | "StringVal1" |  123       | 12.34        | true       | "[NULL]"
      | "StringVal2" |  124       | 34.56        | false      |  25
      | "StringVal3" |  125       | 68.92        | false      | "[NULL]"

    The list of dictionaries result will look like this:
      [{
        'string_col': 'StringVal1',
        'long_col': '123',
        'double_col': '12.34',
        'bool_col': '1',
        'maybe_null_col': None,
      }, {
        'string_col': 'StringVal2',
        'long_col': '124',
        'double_col': '34.56',
        'bool_col': '0',
        'maybe_null_col': '25',
      }, {
        'string_col': 'StringVal3',
        'long_col': '125',
        'double_col': '68.92',
        'bool_col': '0',
        'maybe_null_col': None,
      }]
  """
    trace_processor_path = _EnsureTraceProcessor(trace_processor_path)

    # Write query to temporary file because trace processor accepts
    # SQL query in a file.
    tp_output = None
    with tempfile_ext.NamedTemporaryFile(mode="w+") as sql_file:
        sql_file.write(sql_command)
        sql_file.close()
        # Run Trace Processor
        command_args = [
            trace_processor_path,
            '--query-file',
            sql_file.name,
            trace_file,
        ]
        tp_output = _RunTraceProcessor(*command_args)

    # Trace Processor returns output string in csv format. Write
    # string to temporary file because reader accepts csv files.
    # Parse csv file into list of dictionaries because DictReader
    # object inconveniently requires open csv file to access data.
    csv_output = []
    # tempfile creates and opens the file
    with tempfile.NamedTemporaryFile(mode="w+") as csv_file:
        csv_file.write(tp_output)
        csv_file.flush()
        csv_file.seek(0)
        csv_reader = csv.DictReader(csv_file)
        for row in csv_reader:
            # CSV file represents null values as the string '[NULL]'.
            # Parse these null values to None type.
            row_parsed = dict(row)
            for key, val in row_parsed.items():
                if val == '[NULL]':
                    row_parsed[key] = None
            csv_output.append(row_parsed)

    return csv_output