def testPerformanceSummaryEventCollection(self):
    """Test the collection of PerformanceSummary GA events."""
    # PerformanceSummaries are only collected for cp and rsync.
    self.collector.ga_params[metrics._GA_LABEL_MAP['Command Name']] = 'cp'
    # GetDiskCounters is called at initialization of _PerformanceSummaryParams,
    # which occurs during the first call to LogPerformanceSummaryParams.
    with mock.patch('gslib.metrics.GetDiskCounters',
                    return_value={'fake-disk': (0, 0, 0, 0, 0, 0)}):
      metrics.LogPerformanceSummaryParams(
          uses_fan=True, uses_slice=True, avg_throughput=10,
          is_daisy_chain=True, has_file_dst=False, has_cloud_dst=True,
          has_file_src=False, has_cloud_src=True, total_bytes_transferred=100,
          total_elapsed_time=10, thread_idle_time=40, thread_execution_time=10,
          num_processes=2, num_threads=3, num_objects_transferred=3,
          provider_types=['gs'])

    # Log a retryable service error and two retryable network errors.
    service_retry_msg = RetryableErrorMessage(
        apitools_exceptions.CommunicationError(), 0)
    network_retry_msg = RetryableErrorMessage(socket.error(), 0)
    metrics.LogRetryableError(service_retry_msg)
    metrics.LogRetryableError(network_retry_msg)
    metrics.LogRetryableError(network_retry_msg)

    # Log some thread throughput.
    start_file_msg = FileMessage('src', 'dst', 0, size=100)
    end_file_msg = FileMessage('src', 'dst', 10, finished=True)
    start_file_msg.thread_id = end_file_msg.thread_id = 1
    start_file_msg.process_id = end_file_msg.process_id = 1
    metrics.LogPerformanceSummaryParams(file_message=start_file_msg)
    metrics.LogPerformanceSummaryParams(file_message=end_file_msg)
    self.assertEqual(self.collector.perf_sum_params.thread_throughputs[
        (1, 1)].GetThroughput(), 10)

    # GetDiskCounters is called a second time during collection.
    with mock.patch('gslib.metrics.GetDiskCounters',
                    return_value={'fake-disk': (0, 0, 0, 0, 10, 10)}):
      self.collector._CollectPerformanceSummaryMetric()

    # Check for all the expected parameters.
    metric_body = self.collector._metrics[0].body
    label_and_value_pairs = [
        ('Event Category', metrics._GA_PERFSUM_CATEGORY),
        ('Event Action', 'CloudToCloud%2CDaisyChain'), ('Execution Time', '10'),
        ('Parallelism Strategy', 'both'), ('Source URL Type', 'cloud'),
        ('Provider Types', 'gs'), ('Num Processes', '2'), ('Num Threads', '3'),
        ('Number of Files/Objects Transferred', '3'),
        ('Size of Files/Objects Transferred', '100'),
        ('Average Overall Throughput', '10'),
        ('Num Retryable Service Errors', '1'),
        ('Num Retryable Network Errors', '2'),
        ('Thread Idle Time Percent', '0.8'),
        ('Slowest Thread Throughput', '10'),
        ('Fastest Thread Throughput', '10'),
    ]
    if IS_LINUX:  # Disk I/O time is only available on Linux.
      label_and_value_pairs.append(('Disk I/O Time', '20'))
    for label, exp_value in label_and_value_pairs:
      self.assertIn('{0}={1}'.format(metrics._GA_LABEL_MAP[label], exp_value),
                    metric_body)
Beispiel #2
0
def _LogAllTestMetrics():
    """Logs all the common metrics for a test."""
    metrics.LogCommandParams(command_name='cmd1',
                             subcommands=['action1'],
                             global_opts=[('-y', 'value'), ('-z', ''),
                                          ('-x', '')],
                             sub_opts=[('optb', ''), ('opta', '')])
    metrics.LogRetryableError('retryable_error_type_1')
    metrics.LogRetryableError('retryable_error_type_1')
    metrics.LogRetryableError('retryable_error_type_2')
    metrics.LogFatalError(gslib.exception.CommandException('test'))
def _LogAllTestMetrics():
  """Logs all the common metrics for a test."""
  metrics.LogCommandParams(
      command_name='cmd1', subcommands=['action1'],
      global_opts=[('-y', 'value'), ('-z', ''), ('-x', '')],
      sub_opts=[('optb', ''), ('opta', '')])
  retry_msg_1 = RetryableErrorMessage(Exception(), 0)
  retry_msg_2 = RetryableErrorMessage(ValueError(), 0)
  metrics.LogRetryableError(retry_msg_1)
  metrics.LogRetryableError(retry_msg_1)
  metrics.LogRetryableError(retry_msg_2)
  metrics.LogFatalError(gslib.exception.CommandException('test'))
Beispiel #4
0
    def testExceptionCatchingDecorator(self):
        """Tests the exception catching decorator CaptureAndLogException."""
        original_log_level = self.root_logger.getEffectiveLevel()
        self.root_logger.setLevel(logging.DEBUG)

        # Test that a wrapped function with an exception doesn't stop the process.
        mock_exc_fn = mock.MagicMock(__name__='mock_exc_fn',
                                     side_effect=Exception())
        wrapped_fn = metrics.CaptureAndLogException(mock_exc_fn)
        wrapped_fn()
        self.assertEqual(1, mock_exc_fn.call_count)
        with open(self.log_handler_file) as f:
            log_output = f.read()
            self.assertIn(
                'Exception captured in mock_exc_fn during metrics '
                'collection', log_output)

        mock_err_fn = mock.MagicMock(__name__='mock_err_fn',
                                     side_effect=TypeError())
        wrapped_fn = metrics.CaptureAndLogException(mock_err_fn)
        wrapped_fn()
        self.assertEqual(1, mock_err_fn.call_count)
        with open(self.log_handler_file) as f:
            log_output = f.read()
            self.assertIn(
                'Exception captured in mock_err_fn during metrics '
                'collection', log_output)

        # Test that exceptions in the unprotected metrics functions are caught.
        with mock.patch.object(MetricsCollector,
                               'GetCollector',
                               return_value='not a collector'):
            # These calls should all fail, but the exceptions shouldn't propagate up.
            metrics.Shutdown()
            metrics.LogCommandParams()
            metrics.LogRetryableError()
            metrics.LogFatalError()
            metrics.LogPerformanceSummary()
            metrics.CheckAndMaybePromptForAnalyticsEnabling('invalid argument')
            with open(self.log_handler_file) as f:
                log_output = f.read()
                self.assertIn(
                    'Exception captured in Shutdown during metrics collection',
                    log_output)
                self.assertIn(
                    'Exception captured in LogCommandParams during metrics collection',
                    log_output)
                self.assertIn(
                    'Exception captured in LogRetryableError during metrics collection',
                    log_output)
                self.assertIn(
                    'Exception captured in LogFatalError during metrics collection',
                    log_output)
                self.assertIn(
                    'Exception captured in LogPerformanceSummary during metrics '
                    'collection', log_output)
                self.assertIn(
                    'Exception captured in CheckAndMaybePromptForAnalyticsEnabling '
                    'during metrics collection', log_output)
        self.root_logger.setLevel(original_log_level)
  def testExceptionCatchingDecorator(self):
    """Tests the exception catching decorator CaptureAndLogException."""

    # A wrapped function with an exception should not stop the process.
    mock_exc_fn = mock.MagicMock(__name__='mock_exc_fn',
                                 side_effect=Exception())
    wrapped_fn = metrics.CaptureAndLogException(mock_exc_fn)
    wrapped_fn()

    debug_messages = self.log_handler.messages['debug']
    self.assertIn('Exception captured in mock_exc_fn during metrics collection',
                  debug_messages[0])
    self.log_handler.reset()

    self.assertEqual(1, mock_exc_fn.call_count)

    mock_err_fn = mock.MagicMock(__name__='mock_err_fn',
                                 side_effect=TypeError())
    wrapped_fn = metrics.CaptureAndLogException(mock_err_fn)
    wrapped_fn()
    self.assertEqual(1, mock_err_fn.call_count)

    debug_messages = self.log_handler.messages['debug']
    self.assertIn('Exception captured in mock_err_fn during metrics collection',
                  debug_messages[0])
    self.log_handler.reset()

    # Test that exceptions in the unprotected metrics functions are caught.
    with mock.patch.object(MetricsCollector, 'GetCollector',
                           return_value='not a collector'):
      # These calls should all fail, but the exceptions shouldn't propagate up.
      metrics.Shutdown()
      metrics.LogCommandParams()
      metrics.LogRetryableError()
      metrics.LogFatalError()
      metrics.LogPerformanceSummaryParams()
      metrics.CheckAndMaybePromptForAnalyticsEnabling('invalid argument')

      debug_messages = self.log_handler.messages['debug']
      message_index = 0
      for func_name in ('Shutdown', 'LogCommandParams', 'LogRetryableError',
                        'LogFatalError', 'LogPerformanceSummaryParams',
                        'CheckAndMaybePromptForAnalyticsEnabling'):
        self.assertIn(
            'Exception captured in %s during metrics collection' % func_name,
            debug_messages[message_index])
        message_index += 1

      self.log_handler.reset()
Beispiel #6
0
 def put(self, status_item):  # pylint: disable=invalid-name
     if isinstance(status_item, RetryableErrorMessage):
         metrics.LogRetryableError(status_item.error_type)