Пример #1
0
    def RunTask(self, unique_url, task):
        """Runs the specific task on the url given.

    It is assumed that a tab with the unique_url is already loaded.
    Args:
      unique_url: A unique identifier of the test page.
      task: A (series_name, settings, file_name) tuple to run the test on.
    """
        series_name, settings, file_name = task

        video_url = cns_test_base.GetFileURL(file_name,
                                             bandwidth=settings[0],
                                             latency=settings[1],
                                             loss=settings[2])

        # Start the test!
        self.CallJavascriptFunc('startTest', [video_url], unique_url)

        logging.debug('Running perf test for %s.', video_url)
        # Time out is dependent on (seeking time * iterations).  For 3 iterations
        # per seek we get total of 18 seeks per test.  We expect buffered and
        # cached seeks to be fast.  Through experimentation an average of 10 secs
        # per seek was found to be adequate.
        if not self.WaitUntil(self.GetDOMValue,
                              args=['endTest', unique_url],
                              retry_sleep=5,
                              timeout=300,
                              debug=False):
            error_msg = 'Seek tests timed out.'
        else:
            error_msg = self.GetDOMValue('errorMsg', unique_url)

        cached_states = self.GetDOMValue("Object.keys(CachedState).join(',')",
                                         unique_url).split(',')
        seek_test_cases = self.GetDOMValue(
            "Object.keys(SeekTestCase).join(',')", unique_url).split(',')

        graph_name = series_name + '_' + os.path.basename(file_name)
        for state in cached_states:
            for seek_case in seek_test_cases:
                values = self.GetDOMValue(
                    "seekRecords[CachedState.%s][SeekTestCase.%s].join(',')" %
                    (state, seek_case), unique_url)
                if values:
                    results = [float(value) for value in values.split(',')]
                else:
                    results = []
                pyauto_utils.PrintPerfResult(
                    'seek', '%s_%s_%s' % (state, seek_case, graph_name),
                    results, 'sec')

        if error_msg:
            logging.error('Error while running %s: %s.', graph_name, error_msg)
            return False
        else:
            return True
  def RunTask(self, unique_url, task):
    """Runs the specific task on the url given.

    It is assumed that a tab with the unique_url is already loaded.
    Args:
      unique_url: A unique identifier of the test page.
      task: A (series_name, settings, file_name, run_epp) tuple.
    Returns:
      True if the tests run as expected.
    """
    ttp_results = []
    epp_results = []
    # Build video source URL.  Values <= 0 mean the setting is disabled.
    series_name, settings, (file_name, run_epp) = task
    video_url = cns_test_base.GetFileURL(
        file_name, bandwidth=settings[0], latency=settings[1],
        loss=settings[2], new_port=True)

    graph_name = series_name + '_' + os.path.basename(file_name)
    for iter_num in xrange(self._test_iterations):
      # Start the test!
      self.CallJavascriptFunc('startTest', [video_url], url=unique_url)

      # Wait until the necessary metrics have been collected.
      self._metrics['epp'] = self._metrics['ttp'] = -1
      self.WaitUntil(self._HaveMetricOrError, args=['ttp', unique_url],
                     retry_sleep=1, timeout=_TEST_EPP_TIMEOUT, debug=False)
      # Do not wait for epp if ttp is not available.
      if self._metrics['ttp'] >= 0:
        ttp_results.append(self._metrics['ttp'])
        if run_epp:
          self.WaitUntil(
              self._HaveMetricOrError, args=['epp', unique_url], retry_sleep=2,
              timeout=_TEST_EPP_TIMEOUT, debug=False)

          if self._metrics['epp'] >= 0:
            epp_results.append(self._metrics['epp'])

          logging.debug('Iteration:%d - Test %s ended with %d%% of the video '
                        'played.', iter_num, graph_name,
                        self._GetVideoProgress(unique_url),)

      if self._metrics['ttp'] < 0 or (run_epp and self._metrics['epp'] < 0):
        logging.error('Iteration:%d - Test %s failed to end gracefully due '
                      'to time-out or error.\nVideo events fired:\n%s',
                      iter_num, graph_name, self._GetEventsLog(unique_url))

    # End of iterations, print results,
    pyauto_utils.PrintPerfResult('ttp', graph_name, ttp_results, 'ms')

    if run_epp:
      pyauto_utils.PrintPerfResult('epp', graph_name, epp_results, '%')

    # Check if any of the tests failed to report the metrics.
    return len(ttp_results) == len(epp_results) == self._test_iterations