def make_same_node(self, seconds):
    """Makes the same wrapped node object with the given timestamp.

    Args:
      seconds: timestamp in seconds since the epoch.

    Returns:
    A wrapped Node object with the given 'timestamp' and 'lastHeartbeatTime'.
    """
    assert isinstance(seconds, (int, long, float))
    return utilities.wrap_object(
        {'uid': KEY,
         'lastHeartbeatTime': utilities.seconds_to_timestamp(seconds)},
        'Node', KEY, seconds)
  def make_different_node(self, seconds):
    """Makes the a different wrapped node object with the given timestamp.

    Args:
      seconds: timestamp in seconds since the epoch.

    Returns:
    A wrapped Node object with the given 'timestamp' and 'creationTimestamp'.
    """
    assert isinstance(seconds, (int, long, float))
    return utilities.wrap_object(
        {'uid': KEY,
         'creationTimestamp': utilities.seconds_to_timestamp(seconds)},
        'Node', KEY, seconds)
Beispiel #3
0
    def make_different_node(self, seconds):
        """Makes the a different wrapped node object with the given timestamp.

    Args:
      seconds: timestamp in seconds since the epoch.

    Returns:
    A wrapped Node object with the given 'timestamp' and 'creationTimestamp'.
    """
        assert isinstance(seconds, (int, long, float))
        return utilities.wrap_object(
            {
                'uid': KEY,
                'creationTimestamp': utilities.seconds_to_timestamp(seconds)
            }, 'Node', KEY, seconds)
Beispiel #4
0
    def make_same_node(self, seconds):
        """Makes the same wrapped node object with the given timestamp.

    Args:
      seconds: timestamp in seconds since the epoch.

    Returns:
    A wrapped Node object with the given 'timestamp' and 'lastHeartbeatTime'.
    """
        assert isinstance(seconds, (int, long, float))
        return utilities.wrap_object(
            {
                'uid': KEY,
                'lastHeartbeatTime': utilities.seconds_to_timestamp(seconds)
            }, 'Node', KEY, seconds)
  def make_fancy_blob(self, name, timestamp_seconds, value):
    """Makes a blob containing "name", "timestamp" and "value" attributes.

    Args:
      name: the name of this object (the value of the 'id' attribute).
      timestamp_seconds: a timestamp in seconds.
      value: a value of any type.

    Returns:
    A dictionary containing 'id', 'timestamp', and 'value' key/value pairs.
    """
    assert isinstance(name, types.StringTypes)
    assert isinstance(timestamp_seconds, float)
    return {'id': name,
            'timestamp': utilities.seconds_to_timestamp(timestamp_seconds),
            'value': value}
Beispiel #6
0
    def make_fancy_blob(self, name, timestamp_seconds, value):
        """Makes a blob containing "name", "timestamp" and "value" attributes.

    Args:
      name: the name of this object (the value of the 'id' attribute).
      timestamp_seconds: a timestamp in seconds.
      value: a value of any type.

    Returns:
    A dictionary containing 'id', 'timestamp', and 'value' key/value pairs.
    """
        assert isinstance(name, types.StringTypes)
        assert isinstance(timestamp_seconds, float)
        return {
            'id': name,
            'timestamp': utilities.seconds_to_timestamp(timestamp_seconds),
            'value': value
        }
Beispiel #7
0
def return_elapsed(gs):
    """Returns a description of the elapsed time of recent operations.

  Args:
    gs: global state.

  Returns:
  A dictionary containing the count, minimum elapsed time,
  maximum elapsed time, average elapsed time, and list of elapsed time
  records.
  """
    assert isinstance(gs, global_state.GlobalState)
    elapsed_list = []
    elapsed_sum = 0.0
    elapsed_min = None
    elapsed_max = None
    for elapsed_record in gs.get_elapsed():
        duration = elapsed_record.elapsed_seconds
        elapsed_list.append({
            'start_time':
            utilities.seconds_to_timestamp(elapsed_record.start_time),
            'what':
            elapsed_record.what,
            'threadIdentifier':
            elapsed_record.thread_identifier,
            'elapsed_seconds':
            duration
        })
        elapsed_sum += duration
        if (elapsed_min is None) or (elapsed_max is None):
            elapsed_min = duration
            elapsed_max = duration
        else:
            elapsed_min = min(elapsed_min, duration)
            elapsed_max = max(elapsed_max, duration)

    return {
        'count': len(elapsed_list),
        'min': elapsed_min,
        'max': elapsed_max,
        'average': elapsed_sum / len(elapsed_list) if elapsed_list else None,
        'items': elapsed_list
    }
Beispiel #8
0
def return_elapsed(gs):
  """Returns a description of the elapsed time of recent operations.

  Args:
    gs: global state.

  Returns:
  A dictionary containing the count, minimum elapsed time,
  maximum elapsed time, average elapsed time, and list of elapsed time
  records.
  """
  assert isinstance(gs, global_state.GlobalState)
  elapsed_list = []
  elapsed_sum = 0.0
  elapsed_min = None
  elapsed_max = None
  for elapsed_record in gs.get_elapsed():
    duration = elapsed_record.elapsed_seconds
    elapsed_list.append(
        {'start_time': utilities.seconds_to_timestamp(
            elapsed_record.start_time),
         'what': elapsed_record.what,
         'threadIdentifier': elapsed_record.thread_identifier,
         'elapsed_seconds': duration})
    elapsed_sum += duration
    if (elapsed_min is None) or (elapsed_max is None):
      elapsed_min = duration
      elapsed_max = duration
    else:
      elapsed_min = min(elapsed_min, duration)
      elapsed_max = max(elapsed_max, duration)

  return {'count': len(elapsed_list),
          'min': elapsed_min,
          'max': elapsed_max,
          'average': elapsed_sum / len(elapsed_list) if elapsed_list else None,
          'items': elapsed_list}
  def test_cluster(self):
    """Test the '/cluster' endpoint."""
    start_time = utilities.now()
    end_time = None
    for _ in range(2):
      # Exercise the collector. Read data from golden files and compute
      # a context graph.
      # The second iteration should read from the cache.
      ret_value = self.app.get('/cluster')
      if end_time is None:
        end_time = utilities.now()
      result = json.loads(ret_value.data)
      # The timestamps of the second iteration should be the same as in the
      # first iteration, because the data of the 2nd iteration should be
      # fetched from the cache, and it did not change.
      # Even if fetching the data caused an explicit reading from the files
      # in the second iteration, the data did not change, so it should keep
      # its original timestamp.
      self.verify_resources(result, start_time, end_time)

      self.assertEqual(5, self.count_relations(
          result, 'contains', 'Cluster', 'Node'))
      self.assertEqual(6, self.count_relations(
          result, 'contains', 'Cluster', 'Service'))
      self.assertEqual(3, self.count_relations(
          result, 'contains', 'Cluster', 'ReplicationController'))
      self.assertEqual(1, self.count_relations(
          result, 'contains', 'Node', 'Container'))
      self.assertEqual(4, self.count_relations(
          result, 'contains', 'Pod', 'Container'))
      self.assertEqual(7, self.count_relations(
          result, 'contains', 'Container', 'Process'))

      self.assertEqual(26, self.count_relations(result, 'contains'))
      self.assertEqual(3, self.count_relations(result, 'createdFrom'))
      self.assertEqual(7, self.count_relations(result, 'loadBalances'))
      self.assertEqual(6, self.count_relations(result, 'monitors'))
      self.assertEqual(14, self.count_relations(result, 'runs'))

      # Verify that all relations contain a timestamp in the range
      # [start_time, end_time].
      self.assertTrue(isinstance(result.get('relations'), types.ListType))
      for r in result['relations']:
        self.assertTrue(isinstance(r, types.DictType))
        timestamp = r.get('timestamp')
        self.assertTrue(utilities.valid_string(timestamp))
        self.assertTrue(start_time <= timestamp <= end_time)

      # The overall timestamp must be in the expected range.
      self.assertTrue(utilities.valid_string(result.get('timestamp')))
      self.assertTrue(start_time <= result['timestamp'] <= end_time)

      json_output = json.dumps(result, sort_keys=True)
      self.assertEqual(2, json_output.count('"alternateLabel": '))
      self.assertEqual(99, json_output.count('"createdBy": '))

      # Wait a little to ensure that the current time is greater than
      # end_time
      time.sleep(1)
      self.assertTrue(utilities.now() > end_time)

    # Change the timestamp of the nodes in the cache.
    timestamp_before_update = utilities.now()
    gs = collector.app.context_graph_global_state
    nodes, timestamp_seconds = gs.get_nodes_cache().lookup('')
    self.assertTrue(isinstance(nodes, types.ListType))
    self.assertTrue(start_time <=
                    utilities.seconds_to_timestamp(timestamp_seconds) <=
                    end_time)
    # Change the first node to force the timestamp in the cache to change.
    # We have to change both the properties of the first node and its
    # timestamp, so the cache will store the new value (including the new
    # timestamp).
    self.assertTrue(len(nodes) >= 1)
    self.assertTrue(utilities.is_wrapped_object(nodes[0], 'Node'))
    nodes[0]['properties']['newAttribute123'] = 'the quick brown fox jumps over'
    nodes[0]['timestamp'] = utilities.now()
    gs.get_nodes_cache().update('', nodes)
    timestamp_after_update = utilities.now()
    _, timestamp_seconds = gs.get_nodes_cache().lookup('')
    self.assertTrue(timestamp_before_update <=
                    utilities.seconds_to_timestamp(timestamp_seconds) <=
                    timestamp_after_update)

    # Build the context graph again.
    ret_value = self.app.get('/cluster')
    result = json.loads(ret_value.data)
    self.verify_resources(result, start_time, timestamp_after_update)

    # Verify that all relations contain a timestamp in the range
    # [start_time, end_time].
    self.assertTrue(isinstance(result.get('relations'), types.ListType))
    for r in result['relations']:
      self.assertTrue(isinstance(r, types.DictType))
      timestamp = r.get('timestamp')
      self.assertTrue(utilities.valid_string(timestamp))
      self.assertTrue(start_time <= timestamp <= end_time)

    # The overall timestamp must be in the expected range.
    self.assertTrue(utilities.valid_string(result.get('timestamp')))
    self.assertTrue(timestamp_before_update <= result['timestamp'] <=
                    timestamp_after_update)
Beispiel #10
0
  def test_cluster(self):
    """Test the '/cluster' endpoint."""
    start_time = utilities.now()
    end_time = None
    for _ in range(2):
      # Exercise the collector. Read data from golden files and compute
      # a context graph.
      # The second iteration should read from the cache.
      ret_value = self.app.get('/cluster')
      if end_time is None:
        end_time = utilities.now()
      result = json.loads(ret_value.data)
      # The timestamps of the second iteration should be the same as in the
      # first iteration, because the data of the 2nd iteration should be
      # fetched from the cache, and it did not change.
      # Even if fetching the data caused an explicit reading from the files
      # in the second iteration, the data did not change, so it should keep
      # its original timestamp.
      self.verify_resources(result, start_time, end_time)

      self.assertEqual(5, self.count_relations(
          result, 'contains', 'Cluster', 'Node'))
      self.assertEqual(6, self.count_relations(
          result, 'contains', 'Cluster', 'Service'))
      self.assertEqual(3, self.count_relations(
          result, 'contains', 'Cluster', 'ReplicationController'))
      self.assertEqual(16, self.count_relations(
          result, 'contains', 'Pod', 'Container'))

      self.assertEqual(30, self.count_relations(result, 'contains'))
      self.assertEqual(16, self.count_relations(result, 'createdFrom'))
      self.assertEqual(7, self.count_relations(result, 'loadBalances'))
      self.assertEqual(6, self.count_relations(result, 'monitors'))
      self.assertEqual(14, self.count_relations(result, 'runs'))

      # Verify that all relations contain a timestamp in the range
      # [start_time, end_time].
      self.assertTrue(isinstance(result.get('relations'), list))
      for r in result['relations']:
        self.assertTrue(isinstance(r, dict))
        timestamp = r.get('timestamp')
        self.assertTrue(utilities.valid_string(timestamp))
        self.assertTrue(start_time <= timestamp <= end_time)

      # The overall timestamp must be in the expected range.
      self.assertTrue(utilities.valid_string(result.get('timestamp')))
      self.assertTrue(start_time <= result['timestamp'] <= end_time)

      # Wait a little to ensure that the current time is greater than
      # end_time
      time.sleep(1)
      self.assertTrue(utilities.now() > end_time)

    # Change the timestamp of the nodes in the cache.
    timestamp_before_update = utilities.now()
    gs = collector.app.context_graph_global_state
    nodes, timestamp_seconds = gs.get_nodes_cache().lookup('')
    self.assertTrue(isinstance(nodes, list))
    self.assertTrue(start_time <=
                    utilities.seconds_to_timestamp(timestamp_seconds) <=
                    end_time)
    # Change the first node to force the timestamp in the cache to change.
    # We have to change both the properties of the first node and its
    # timestamp, so the cache will store the new value (including the new
    # timestamp).
    self.assertTrue(len(nodes) >= 1)
    self.assertTrue(utilities.is_wrapped_object(nodes[0], 'Node'))
    nodes[0]['properties']['newAttribute123'] = 'the quick brown fox jumps over'
    nodes[0]['timestamp'] = utilities.now()
    gs.get_nodes_cache().update('', nodes)
    timestamp_after_update = utilities.now()
    _, timestamp_seconds = gs.get_nodes_cache().lookup('')
    self.assertTrue(timestamp_before_update <=
                    utilities.seconds_to_timestamp(timestamp_seconds) <=
                    timestamp_after_update)

    # Build the context graph again.
    ret_value = self.app.get('/cluster')
    result = json.loads(ret_value.data)
    self.verify_resources(result, start_time, timestamp_after_update)

    # Verify that all relations contain a timestamp in the range
    # [start_time, end_time].
    self.assertTrue(isinstance(result.get('relations'), list))
    for r in result['relations']:
      self.assertTrue(isinstance(r, dict))
      timestamp = r.get('timestamp')
      self.assertTrue(utilities.valid_string(timestamp))
      self.assertTrue(start_time <= timestamp <= end_time)

    # The overall timestamp must be in the expected range.
    self.assertTrue(utilities.valid_string(result.get('timestamp')))
    self.assertTrue(timestamp_before_update <= result['timestamp'] <=
                    timestamp_after_update)