Esempio n. 1
0
    def MeasureMemory(self, deterministic_mode=False):
        """Add a memory measurement to the trace being recorded.

    Behaves as a no-op if tracing is not enabled.

    TODO(perezju): Also behave as a no-op if tracing is enabled but
    memory-infra is not.

    Args:
      deterministic_mode: A boolean indicating whether to attempt or not to
          control the environment (force GCs, clear caches) before making the
          measurement in an attempt to obtain more deterministic results.

    Returns:
      GUID of the generated dump if one was triggered, None otherwise.
    """
        platform = self.tab.browser.platform
        if not platform.tracing_controller.is_tracing_running:
            logging.warning(
                'Tracing is off. No memory dumps are being recorded.')
            return None
        if deterministic_mode:
            self.Wait(_DUMP_WAIT_TIME)
            self.ForceGarbageCollection()
            if platform.SupportFlushEntireSystemCache():
                platform.FlushEntireSystemCache()
            self.Wait(_DUMP_WAIT_TIME)
        dump_id = self.tab.browser.DumpMemory()
        if not dump_id:
            raise exceptions.Error('Unable to obtain memory dump')
        return dump_id
Esempio n. 2
0
def _CrxHashIfChanged(crx_path, extension_info):
    """Checks whether downloaded Crx has been altered.

  Compares stored hash with hash of downloaded Crx. If different, alerts user
  that CRX version has changed and will be updated in CSV file.

  Args:
    crx_path: Path to downloaded CRX.
    extension_info: Info from CSV (including id and previous hash) about CRX.

  Returns:
    New hash and version if extension differed. Otherwise, returns (None, None)
  """
    downloaded_hash = _Base64Hash(crx_path)
    new_version = _GetVersionFromCrx(crx_path)
    if downloaded_hash != extension_info['hash']:
        if new_version != extension_info['version']:
            ans = raw_input(
                '\tWarning: Extension %s version from Web Store differs '
                'from CSV version.\n\tIf continued, script will write '
                'new hash and version to CSV.\n\tContinue? (y/n) ' %
                extension_info['id']).lower()
        else:
            raise exceptions.Error(
                'Extension %s hash from Web Store differs from '
                '\nhash stored in CSV, but versions are the same.')
        if not ans.startswith('y'):
            sys.exit('Web Store extension %s hash differs from hash in CSV.' %
                     extension_info['id'])
        return (downloaded_hash, new_version)
    return (None, None)
    def StopTracing(self):
        assert self.is_tracing_running, 'Can only stop tracing when tracing is on.'
        self._IssueClockSyncMarker()
        builder = self._current_state.builder

        raised_exception_messages = []
        for agent in self._active_agents_instances + [self]:
            try:
                with trace_event.trace('StopAgentTracing',
                                       agent=str(agent.__class__.__name__)):
                    agent.StopAgentTracing()
            except Exception:  # pylint: disable=broad-except
                raised_exception_messages.append(''.join(
                    traceback.format_exception(*sys.exc_info())))

        for agent in self._active_agents_instances + [self]:
            try:
                with trace_event.trace('CollectAgentTraceData',
                                       agent=str(agent.__class__.__name__)):
                    agent.CollectAgentTraceData(builder)
            except Exception:  # pylint: disable=broad-except
                raised_exception_messages.append(''.join(
                    traceback.format_exception(*sys.exc_info())))

        self._telemetry_info = None
        self._active_agents_instances = []
        self._current_state = None

        if raised_exception_messages:
            raise exceptions.Error(
                'Exceptions raised when trying to stop tracing:\n' +
                '\n'.join(raised_exception_messages))

        return builder.AsData()
Esempio n. 4
0
  def testRunStoryAndProcessErrorIfNeeded_tryError_finallyException(self):
    root_mock = self._CreateErrorProcessingMock(method_exceptions={
      'state.WillRunStory': exceptions.Error('foo'),
      'test.DidRunStory': Exception('bar')
    })

    with self.assertRaisesRegexp(exceptions.Error, 'foo'):
      story_runner._RunStoryAndProcessErrorIfNeeded(
          root_mock.story, root_mock.results, root_mock.state, root_mock.test)

    self.assertEquals(root_mock.method_calls, [
      mock.call.test.WillRunStory(root_mock.state.platform),
      mock.call.state.WillRunStory(root_mock.story),
      mock.call.state.DumpStateUponFailure(root_mock.story, root_mock.results),
      mock.call.results.AddValue(FailureValueMatcher('foo')),
      mock.call.state.DidRunStory(root_mock.results),
      mock.call.test.DidRunStory(root_mock.state.platform)
    ])
Esempio n. 5
0
  def testRunStoryAndProcessErrorIfNeeded_tryError_finallyException(self):
    root_mock = self._CreateErrorProcessingMock(method_exceptions={
        'state.WillRunStory': exceptions.Error('foo'),
        'test.DidRunStory': Exception('bar')
    })

    with self.assertRaisesRegexp(exceptions.Error, 'foo'):
      story_runner._RunStoryAndProcessErrorIfNeeded(
          root_mock.story, root_mock.results, root_mock.state, root_mock.test)

    self.assertEquals(root_mock.method_calls, [
        mock.call.results.CreateArtifact('logs.txt'),
        mock.call.test.WillRunStory(root_mock.state.platform, root_mock.story),
        mock.call.state.WillRunStory(root_mock.story),
        mock.call.state.DumpStateUponStoryRunFailure(root_mock.results),
        mock.call.results.Fail(
            'Exception raised running %s' % root_mock.story.name),
        mock.call.test.DidRunStory(root_mock.state.platform, root_mock.results),
    ])
Esempio n. 6
0
    def _IssueCommand(self, command_string, timeout, retries=None):
        del retries  # handled by the decorator
        logging.info('Issuing command to ts_proxy_server: %s', command_string)
        command_output = []
        self._proc.stdin.write('%s\n' % command_string)
        self._proc.stdin.flush()
        self._proc.stdout.flush()

        def CommandStatusIsRead():
            command_output.append(self._ReadLineTsProxyStdout(timeout))
            return command_output[-1] == 'OK' or command_output[-1] == 'ERROR'

        py_utils.WaitFor(CommandStatusIsRead, timeout)

        success = 'OK' in command_output
        logging.log(logging.DEBUG if success else logging.ERROR,
                    'TsProxy output:\n%s', '\n'.join(command_output))
        if not success:
            raise exceptions.Error('Failed to execute command: %s',
                                   command_string)
Esempio n. 7
0
    def FlushTracing(self):
        assert self.is_tracing_running, 'Can only flush tracing when tracing is on.'
        self._IssueClockSyncMarker()

        raised_exception_messages = []
        # Flushing the controller's pytrace is not supported.
        for agent in self._active_agents_instances:
            try:
                if agent.SupportsFlushingAgentTracing():
                    agent.FlushAgentTracing(self._current_state.config,
                                            self._current_state.timeout,
                                            self._current_state.builder)
            except Exception:  # pylint: disable=broad-except
                raised_exception_messages.append(''.join(
                    traceback.format_exception(*sys.exc_info())))

        if raised_exception_messages:
            raise exceptions.Error(
                'Exceptions raised when trying to flush tracing:\n' +
                '\n'.join(raised_exception_messages))
Esempio n. 8
0
    def StartServer(self, timeout=10, retries=None):
        """Start TsProxy server and verify that it started.
    """
        del retries  # handled by the decorator
        cmd_line = [sys.executable, _TSPROXY_PATH]
        cmd_line.extend([
            '--port=0'
        ])  # Use port 0 so tsproxy picks a random available port.
        if self._host_ip:
            cmd_line.append('--desthost=%s' % self._host_ip)
        if self._http_port:
            cmd_line.append('--mapports=443:%s,*:%s' %
                            (self._https_port, self._http_port))
        logging.info('Tsproxy commandline: %r' % cmd_line)
        self._proc = subprocess.Popen(cmd_line,
                                      stdout=subprocess.PIPE,
                                      stdin=subprocess.PIPE,
                                      stderr=subprocess.PIPE,
                                      bufsize=1)
        self._non_blocking = False
        if fcntl:
            logging.info('fcntl is supported, try setting '
                         'non blocking I/O for the ts_proxy process')
            fd = self._proc.stdout.fileno()
            fl = fcntl.fcntl(fd, fcntl.F_GETFL)
            fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
            self._non_blocking = True

        atexit_with_log.Register(self.StopServer)
        try:
            py_utils.WaitFor(self._IsStarted, timeout)
            logging.info('TsProxy port: %s', self._port)
            self._is_running = True
        except py_utils.TimeoutException:
            # TODO(nedn): remove this debug log once crbug.com/766877 is resolved
            ps_util.ListAllSubprocesses()
            err = self.StopServer()
            if err:
                logging.error('Error stopping WPR server:\n%s', err)
            raise exceptions.Error(
                'Error starting tsproxy: timed out after %s seconds' % timeout)
Esempio n. 9
0
 def PrepareBrowserCache(cls, page, browser, previous_page):
   raise exceptions.Error('Prepare browser cache not supported')
Esempio n. 10
0
def _UpdateExtensionsInCloud(local_extensions_dir, extensions_csv, remote_dir):
    """Updates set of extensions in Cloud Storage from a CSV of extension ids.

  From well-formatted CSV file containing some set of extensions
  (extensions_csv), download them, compress into archive, and update
  the remote extension archive under REMOTE_DIR in CHROME-PARTNER-TELEMETRY
  bucket. This script expects 2nd column of CSV file to contain extension ids.

  Args:
      local_extensions_dir: directory to download CRX files into.
      extension_csv: CSV to pull extension_ids from.
      remote_dir: remote directory to put extension archive in cloud storage.

  Raises:
      Exception if a CRX download fails.
  """

    # Download CRX to temp files and compress into archive
    zip_path = os.path.join(local_extensions_dir, ZIP_NAME)
    extension_zip = zipfile.ZipFile(zip_path, 'w')
    update_csv = False
    extensions_info = []
    with open(extensions_csv, 'rb') as csv_file:
        reader = csv.reader(csv_file)
        # Stores comments (in case CSV needs to be updated/rewritten)
        # and skips header line.
        comments = []
        line = ','.join(reader.next())
        while line.startswith('#'):
            comments.append(line)
            line = ','.join(reader.next())
        # Extract info from CSV.
        for row in reader:
            extension_info = {
                'extension_name': row[0],
                'id': row[1],
                'hash': row[2],
                'version': row[3]
            }

            print 'Fetching extension %s...' % extension_info['id']
            crx_path = _DownloadCrxFromCws(extension_info['id'],
                                           local_extensions_dir)
            if crx_path is None:
                raise exceptions.Error(
                    '\tCould not fetch %s.\n\n'
                    'If this extension dl consistently fails, '
                    'remove this entry from %s.' %
                    (extension_info['id'], extensions_csv))
            (new_hash,
             new_version) = _CrxHashIfChanged(crx_path, extension_info)
            if new_hash is not None:
                update_csv = True
                extension_info['hash'] = new_hash
                extension_info['version'] = new_version
            extensions_info.append(extension_info)
            extension_zip.write(crx_path,
                                arcname='%s.crx' % extension_info['id'])
    extension_zip.close()

    if update_csv:
        print 'Updating CSV...'
        _UpdateCsv(comments, extensions_csv, extensions_info)

    print 'Uploading extensions to cloud...'
    remote_zip_path = os.path.join(remote_dir, ZIP_NAME)
    cloud_storage.Insert(cloud_storage.PARTNER_BUCKET, remote_zip_path,
                         zip_path)