Example #1
0
  def testMediaScrubPerformance(self):
    """Launches HTML test which runs the scrub test and records performance."""
    self.NavigateToURL(self.GetFileURLForDataPath(_TEST_HTML_PATH))

    for media in _TEST_MEDIA:
      file_name = self.GetFileURLForDataPath(
          os.path.join(_TEST_MEDIA_PATH, media))

      # Some tests take more than the default PyAuto calls timeout, so we start
      # each test and wait until 'testDone' flag is set by the test.
      self.CallJavascriptFunc('startTest', [file_name])

      if not self.WaitUntil(self.GetDOMValue, args=['testDone'],
                            retry_sleep=5, timeout=180, debug=False):
        error_msg = 'Scrubbing tests timed out.'
      else:
        error_msg = self.GetDOMValue('errorMsg')
      if error_msg:
        self.fail('Error while running the test: %s' % error_msg)

      forward_scrub_time = float(self.GetDOMValue('forwardScrubTime'))
      backward_scrub_time = float(self.GetDOMValue('backwardScrubTime'))
      mixed_scrub_time = float(self.GetDOMValue('mixedScrubTime'))
      pyauto_utils.PrintPerfResult('scrubbing', os.path.basename(file_name) +
                                   '_forward', forward_scrub_time, 'ms')
      pyauto_utils.PrintPerfResult('scrubbing', os.path.basename(file_name) +
                                   '_backward', backward_scrub_time, 'ms')
      pyauto_utils.PrintPerfResult('scrubbing', os.path.basename(file_name) +
                                   '_mixed', mixed_scrub_time, 'ms')
  def _ProcessPsnrAndSsimOutput(self, output):
    """Processes the analyzer output to extract the PSNR and SSIM values.

    The frame analyzer produces PSNR and SSIM results for every unique frame
    that has been captured. This method forms a list of all the psnr and ssim
    values and passes it to PrintPerfResult() for printing on the Perf Graph.

    Args:
      output(string): The output from the frame analyzer to be processed.
    """
    # The output is in the format:
    # BSTATS
    # psnr ssim; psnr ssim; ... psnr ssim;
    # ESTATS
    stats_beginning = output.find('BSTATS')  # Get the beginning of the stats
    stats_ending = output.find('ESTATS')  # Get the end of the stats
    stats_str = output[(stats_beginning + len('BSTATS')):stats_ending]

    stats_list = stats_str.split(';')

    psnr = []
    ssim = []

    for item in stats_list:
      item = item.strip()
      if item != '':
        entry = item.split(' ')
        psnr.append(float(entry[0]))
        ssim.append(float(entry[1]))

    pyauto_utils.PrintPerfResult('PSNR', 'VGA', psnr, '')
    pyauto_utils.PrintPerfResult('SSIM', 'VGA', ssim, '')
  def RunTask(self, unique_url, task):
    """Runs the specific task on the url given.

    It is assumed that a tab with the unique_url is already loaded.
    Args:
      unique_url: A unique identifier of the test page.
      task: A (series_name, settings, file_name, run_epp) tuple.
    Returns:
      True if the tests run as expected.
    """
    ttp_results = []
    epp_results = []
    # Build video source URL.  Values <= 0 mean the setting is disabled.
    series_name, settings, (file_name, run_epp) = task
    video_url = cns_test_base.GetFileURL(
        file_name, bandwidth=settings[0], latency=settings[1],
        loss=settings[2], new_port=True)

    graph_name = series_name + '_' + os.path.basename(file_name)
    for iter_num in xrange(self._test_iterations):
      # Start the test!
      self.CallJavascriptFunc('startTest', [video_url], url=unique_url)

      # Wait until the necessary metrics have been collected.
      self._metrics['epp'] = self._metrics['ttp'] = -1
      self.WaitUntil(self._HaveMetricOrError, args=['ttp', unique_url],
                     retry_sleep=1, timeout=_TEST_EPP_TIMEOUT, debug=False)
      # Do not wait for epp if ttp is not available.
      if self._metrics['ttp'] >= 0:
        ttp_results.append(self._metrics['ttp'])
        if run_epp:
          self.WaitUntil(
              self._HaveMetricOrError, args=['epp', unique_url], retry_sleep=2,
              timeout=_TEST_EPP_TIMEOUT, debug=False)

          if self._metrics['epp'] >= 0:
            epp_results.append(self._metrics['epp'])

          logging.debug('Iteration:%d - Test %s ended with %d%% of the video '
                        'played.', iter_num, graph_name,
                        self._GetVideoProgress(unique_url),)

      if self._metrics['ttp'] < 0 or (run_epp and self._metrics['epp'] < 0):
        logging.error('Iteration:%d - Test %s failed to end gracefully due '
                      'to time-out or error.\nVideo events fired:\n%s',
                      iter_num, graph_name, self._GetEventsLog(unique_url))

    # End of iterations, print results,
    pyauto_utils.PrintPerfResult('ttp', graph_name, ttp_results, 'ms')

    if run_epp:
      pyauto_utils.PrintPerfResult('epp', graph_name, epp_results, '%')

    # Check if any of the tests failed to report the metrics.
    return len(ttp_results) == len(epp_results) == self._test_iterations
 def MeasureQuality(output_no_silence):
     results = audio_tools.RunPESQ(_REFERENCE_FILE,
                                   output_no_silence,
                                   sample_rate=16000)
     self.assertTrue(results,
                     msg=('Failed to compute PESQ (most likely, we '
                          'recorded only silence)'))
     pyauto_utils.PrintPerfResult('audio_pesq', 'raw_mos', results[0],
                                  'score')
     pyauto_utils.PrintPerfResult('audio_pesq', 'mos_lqo', results[1],
                                  'score')
  def _ProcessFramesCountOutput(self, output):
    """Processes the analyzer output for the different frame counts.

    The frame analyzer outputs additional information about the number of unique
    frames captured, The max number of repeated frames in a sequence and the
    max number of skipped frames. These values are then written to the Perf
    Graph. (Note: Some of the repeated or skipped frames will probably be due to
    the imperfection of JavaScript timers.)

    Args:
      output(string): The output from the frame analyzer to be processed.
    """
    # The output from frame analyzer will be in the format:
    # <PSNR and SSIM stats>
    # Unique_frames_count:<value>
    # Max_repeated:<value>
    # Max_skipped:<value>
    unique_fr_pos = output.rfind('Unique_frames_count')
    result_str = output[unique_fr_pos:]

    result_list = result_str.split()

    for result in result_list:
      colon_pos = result.find(':')
      key = result[:colon_pos]
      value = result[colon_pos+1:]
      pyauto_utils.PrintPerfResult(key, 'VGA', value, '')
    def testBasicAudioPlaybackRecord(self):
        """Plays an audio file and verifies its output against expected."""
        # The 2 temp files that will be potentially used in the test.
        temp_file = None
        file_no_silence = None
        # Rebase test expectation file is REBASE=1 env variable is set.
        rebase = 'REBASE' in os.environ

        try:
            temp_file = GetTempFilename()
            record_thread = audio_tools.AudioRecorderThread(
                _RECORD_DURATION, temp_file)
            record_thread.start()
            self.NavigateToURL(_TEST_HTML_PATH)
            self.assertTrue(
                self.ExecuteJavascript("startTest('%s');" % _TEST_AUDIO))
            record_thread.join()

            if record_thread.error:
                self.fail(record_thread.error)
            file_no_silence = _TEST_EXPECTED_AUDIO if rebase else GetTempFilename(
            )
            audio_tools.RemoveSilence(temp_file, file_no_silence)

            # Exit if we just want to rebase expected output.
            if rebase:
                return

            pesq_values = audio_tools.RunPESQ(_TEST_EXPECTED_AUDIO,
                                              file_no_silence)
            if not pesq_values:
                self.fail('Test failed to get pesq results.')
            pyauto_utils.PrintPerfResult('audio_pesq', 'ref', pesq_values[0],
                                         'score')
            pyauto_utils.PrintPerfResult('audio_pesq', 'actual',
                                         pesq_values[1], 'score')
        except:
            logging.error('Test failed: %s', self.GetDOMValue('error'))
        finally:
            # Delete the temporary files used by the test.
            if temp_file:
                os.remove(temp_file)
            if not rebase and file_no_silence:
                os.remove(file_no_silence)
Example #7
0
    def RunTask(self, unique_url, task):
        """Runs the specific task on the url given.

    It is assumed that a tab with the unique_url is already loaded.
    Args:
      unique_url: A unique identifier of the test page.
      task: A (series_name, settings, file_name) tuple to run the test on.
    """
        series_name, settings, file_name = task

        video_url = cns_test_base.GetFileURL(file_name,
                                             bandwidth=settings[0],
                                             latency=settings[1],
                                             loss=settings[2])

        # Start the test!
        self.CallJavascriptFunc('startTest', [video_url], unique_url)

        logging.debug('Running perf test for %s.', video_url)
        # Time out is dependent on (seeking time * iterations).  For 3 iterations
        # per seek we get total of 18 seeks per test.  We expect buffered and
        # cached seeks to be fast.  Through experimentation an average of 10 secs
        # per seek was found to be adequate.
        if not self.WaitUntil(self.GetDOMValue,
                              args=['endTest', unique_url],
                              retry_sleep=5,
                              timeout=300,
                              debug=False):
            error_msg = 'Seek tests timed out.'
        else:
            error_msg = self.GetDOMValue('errorMsg', unique_url)

        cached_states = self.GetDOMValue("Object.keys(CachedState).join(',')",
                                         unique_url).split(',')
        seek_test_cases = self.GetDOMValue(
            "Object.keys(SeekTestCase).join(',')", unique_url).split(',')

        graph_name = series_name + '_' + os.path.basename(file_name)
        for state in cached_states:
            for seek_case in seek_test_cases:
                values = self.GetDOMValue(
                    "seekRecords[CachedState.%s][SeekTestCase.%s].join(',')" %
                    (state, seek_case), unique_url)
                if values:
                    results = [float(value) for value in values.split(',')]
                else:
                    results = []
                pyauto_utils.PrintPerfResult(
                    'seek', '%s_%s_%s' % (state, seek_case, graph_name),
                    results, 'sec')

        if error_msg:
            logging.error('Error while running %s: %s.', graph_name, error_msg)
            return False
        else:
            return True
Example #8
0
    def testMediaPerformance(self):
        """Launches HTML test which plays each video and records statistics."""
        for file_name in _TEST_VIDEOS:
            # Append a tab and delete it at the end of the test to free its memory.
            self.AppendTab(
                pyauto.GURL(self.GetFileURLForDataPath(_TEST_HTML_PATH)))

            file_url = self.GetFileURLForDataPath(
                os.path.join(_TEST_MEDIA_PATH, file_name))
            logging.debug('Running perf test for %s.', file_url)

            renderer_process = self._GetChromeRendererProcess()
            # Call to set a starting time to record CPU usage by the renderer.
            renderer_process.get_cpu_percent()

            self.assertTrue(
                self.CallJavascriptFunc('startTest', [file_url], tab_index=1))

            cpu_usage = renderer_process.get_cpu_percent()
            mem_usage_mb = renderer_process.get_memory_info()[0] / 1024
            pyauto_utils.PrintPerfResult('cpu', file_name, cpu_usage, '%')
            pyauto_utils.PrintPerfResult('memory', file_name, mem_usage_mb,
                                         'KB')

            decoded_fps = [
                float(value)
                for value in self.GetDOMValue("decodedFPS.join(',')",
                                              tab_index=1).split(',')
            ]
            dropped_frames = self.GetDOMValue('droppedFrames', tab_index=1)
            dropped_fps = [
                float(value)
                for value in self.GetDOMValue("droppedFPS.join(',')",
                                              tab_index=1).split(',')
            ]

            pyauto_utils.PrintPerfResult('fps', file_name, decoded_fps, 'fps')
            pyauto_utils.PrintPerfResult('dropped_fps', file_name, dropped_fps,
                                         'fps')
            pyauto_utils.PrintPerfResult('dropped_frames', file_name,
                                         dropped_frames, 'frames')

            self.GetBrowserWindow(0).GetTab(1).Close(True)
Example #9
0
    def PrintTestResult(self, hostname, snapshot):
        total = snapshot.GetProcessPrivateMemorySize()
        unknown = snapshot.GetUnknownSize()
        logging.info('Got data for: %s, total size = %d, unknown size = %d' %
                     (hostname, total, unknown))

        graph_name = 'Native Memory Unknown Bytes'
        pyauto_utils.PrintPerfResult(graph_name, hostname + ': total', total,
                                     'bytes')
        pyauto_utils.PrintPerfResult(graph_name, hostname + ': unknown',
                                     unknown, 'bytes')

        if total == 0:
            logging.info('No total process memory size')
            return
        unknown_percent = (float(unknown) / total) * 100
        unknown_percent_string = '%.1f' % unknown_percent
        pyauto_utils.PrintPerfResult('Native Memory Unknown %', hostname,
                                     unknown_percent_string, '%')
Example #10
0
 def PrintTestResult(self, hostname, snapshot):
   total = snapshot.GetProcessPrivateMemorySize()
   counted_objects = snapshot.GetInstrumentedObjectsCount()
   counted_unknown_objects = snapshot.GetNumberOfInstrumentedObjectsNotInHeap()
   if not counted_objects or not counted_unknown_objects:
     logging.info('No information about number of instrumented objects.')
     return
   logging.info('Got data for: %s, objects count = %d (unknown = %d) ' %
       (hostname, counted_objects, counted_unknown_objects))
   pyauto_utils.PrintPerfResult('DevTools Unknown Instrumented Objects',
       hostname, counted_unknown_objects, 'objects')
Example #11
0
    def testWebrtcJsep01CallAndMeasureCpu20Seconds(self):
        if not _HAS_CORRECT_PSUTIL_VERSION:
            print(
                'WARNING: Can not run cpu/mem measurements with this version of '
                'psutil. You must have at least psutil 0.4.1 installed for the '
                'version of python you are running this test with.')
            return

        self.LoadTestPageInTwoTabs(test_page='webrtc_jsep01_test.html')

        # Prepare CPU measurements.
        renderer_process = self._GetChromeRendererProcess(tab_index=0)
        renderer_process.get_cpu_percent()

        self._SimpleWebrtcCall(request_video=True,
                               request_audio=True,
                               duration_seconds=20)

        cpu_usage = renderer_process.get_cpu_percent(interval=0)
        mem_usage_bytes = renderer_process.get_memory_info()[0]
        mem_usage_kb = float(mem_usage_bytes) / 1024
        pyauto_utils.PrintPerfResult('cpu', 'jsep01_call', cpu_usage, '%')
        pyauto_utils.PrintPerfResult('memory', 'jsep01_call', mem_usage_kb,
                                     'KiB')
Example #12
0
    def testMediaJerkyPerformance(self):
        """Launches Jerky tool and records jerkiness for HTML5 videos.

    For each video, the test starts up jerky tool then plays until the Jerky
    tool collects enough information.  Next the capture log is analyzed using
    Jerky's analyze command.  If the computed fps matches the expected fps the
    jerkiness metric is recorded.

    The test will run up to _JERKY_ITERATIONS_MAX times in an attempt to get at
    least _JERKY_ITERATIONS_MIN valid values.  The results are recorded under
    the 'jerkiness' variable for graphing on the bots.
    """
        self.NavigateToURL(self.GetFileURLForDataPath(_TEST_HTML_PATH))

        # Xvfb on the bots is restricted to 1024x768 at present.  Ensure we're using
        # all of the real estate we can.  Jerky tool needs a clear picture of every
        # frame, so we can't clip the video in any way.
        self.SetWindowDimensions(0, 0, 1024, 768)

        for name, width, height, expected_fps in _TEST_VIDEOS:
            jerkiness = []
            torn_frames = []
            file_url = self.GetFileURLForDataPath(
                os.path.join(_TEST_MEDIA_PATH, name))

            # Initialize the calibration area for Jerky tool.
            self.assertTrue(
                self.ExecuteJavascript('initializeTest(%d, %d);' %
                                       (width, height)))

            runs_left = _JERKY_ITERATIONS_MIN
            runs_total = 0
            while runs_left > 0 and runs_total < _JERKY_ITERATIONS_MAX:
                runs_total += 1
                logging.info('Running Jerky perf test #%d for %s.', runs_total,
                             name)

                # Startup Jerky tool in capture mode.
                jerky_process, jerky_log = self.StartJerkyCapture()

                # Start playback of the test video.
                self.assertTrue(
                    self.ExecuteJavascript("startTest('%s');" % file_url))

                # Wait for jerky tool to finish if it hasn't already.
                self.assertTrue(jerky_process.wait() == 0)

                # Stop playback of the test video so the next run can cleanly find the
                # calibration zone.
                self.assertTrue(self.ExecuteJavascript('stopTest();'))

                # Analyze the results.
                jerky_fps, jerky_percent, jerky_torn_frames = self.AnalyzeJerkyCapture(
                    jerky_log)
                if (jerky_fps is None or jerky_percent is None
                        or jerky_torn_frames is None):
                    logging.error('No metrics recorded for this run.')
                    continue

                # Ensure the results for this run are valid.
                if jerky_fps != expected_fps:
                    logging.error(
                        'Invalid fps detected (actual: %f, expected: %f, jerkiness: %f). '
                        'Discarding results for this run.', jerky_fps,
                        expected_fps, jerky_percent)
                    continue

                jerkiness.append(jerky_percent)
                torn_frames.append(jerky_torn_frames)
                runs_left -= 1

            pyauto_utils.PrintPerfResult('jerkiness', name, jerkiness, '%')
Example #13
0
  def run(self):
    """Opens tab, starts HTML test, and records metrics for each queue entry.

    No exception handling is done to make sure the main thread exits properly
    during Chrome crashes or other failures.  Doing otherwise has the potential
    to leave the CNS server running in the background.

    For a clean shutdown, put the magic exit value (None, None) in the queue.
    """
    while True:
      series_name, settings = self._tasks.get()

      # Check for magic exit values.
      if (series_name, settings) == (None, None):
        break

      # Build video source URL.  Values <= 0 mean the setting is disabled.
      video_url = [_CNS_BASE_URL, 'f=' + _TEST_VIDEO]
      if settings[0] > 0:
        video_url.append('bandwidth=%d' % settings[0])
      if settings[1] > 0:
        video_url.append('latency=%d' % settings[1])
      if settings[2] > 0:
        video_url.append('loss=%d' % settings[2])
      video_url.append('new_port=true')
      video_url = '&'.join(video_url)

      ttp_results = []
      epp_results = []
      for iter_num in xrange(_TEST_ITERATIONS):
        # Make the test URL unique so we can figure out our tab index later.
        unique_url = '%s?%d' % (self._url, TestWorker._task_id.next())
        # Start the test!
        with self._automation_lock:
          self._pyauto.AppendTab(pyauto.GURL(unique_url))
          self._pyauto.CallJavascriptFunc(
              'startTest', [video_url],
              tab_index=self._FindTabLocked(unique_url))

        # Wait until the necessary metrics have been collected. Okay to not lock
        # here since pyauto.WaitUntil doesn't call into Chrome.
        self._metrics['epp'] = self._metrics['ttp'] = -1
        self._pyauto.WaitUntil(
            self._HaveMetricOrError, args=['ttp', unique_url], retry_sleep=1,
            timeout=10, debug=False)

        # Do not wait for epp if ttp is not available.
        if self._metrics['ttp'] >= 0:
          ttp_results.append(self._metrics['ttp'])
          self._pyauto.WaitUntil(
              self._HaveMetricOrError, args=['epp', unique_url], retry_sleep=2,
              timeout=_TEST_VIDEO_DURATION_SEC * 10, debug=False)

          if self._metrics['epp'] >= 0:
            epp_results.append(self._metrics['epp'])

          logging.debug('Iteration:%d - Test %s ended with %d%% of the video '
                        'played.', iter_num, series_name,
                        self._GetVideoProgress(unique_url),)

        if self._metrics['ttp'] < 0 or self._metrics['epp'] < 0:
          logging.error('Iteration:%d - Test %s failed to end gracefully due '
                        'to time-out or error.\nVideo events fired:\n%s',
                        iter_num, series_name, self._GetEventsLog(unique_url))

        # Close the tab.
        with self._automation_lock:
          self._pyauto.GetBrowserWindow(0).GetTab(
              self._FindTabLocked(unique_url)).Close(True)

      # Check if any of the tests failed to report the metrics.
      if not len(ttp_results) == len(epp_results) == _TEST_ITERATIONS:
        self.fail_count += 1
      # End of iterations, print results,
      logging.debug('TTP results: %s', ttp_results)
      logging.debug('EPP results: %s', epp_results)
      pyauto_utils.PrintPerfResult('epp', series_name, max(epp_results), '%')
      pyauto_utils.PrintPerfResult('ttp', series_name,
                                   Median(ttp_results), 'ms')

      # TODO(dalecurtis): Check results for regressions.
      self._tasks.task_done()