def test_container_id_to_fname(self):
   """Tests container_id_to_fname()."""
   self.assertEqual(
       'k8s-guestbook-node-3-container-8dcdfec8',
       utilities.container_id_to_fname(
           'k8s-guestbook-node-3.c.rising-apricot-840.internal',
           'container',
           'k8s_php-redis.b317029a_guestbook-controller-ls6k1.default.api_'
           'f991d53e-b949-11e4-8246-42010af0c3dd_8dcdfec8'))
   self.assertEqual(
       'k8s-guestbook-node-3-container-cluster-insight',
       utilities.container_id_to_fname(
           'k8s-guestbook-node-3.c.rising-apricot-840.internal',
           'container',
           'cluster-insight'))
   self.assertEqual(
       'k8s-guestbook-node-3-processes-8dcdfec8',
       utilities.container_id_to_fname(
           'k8s-guestbook-node-3',
           'processes',
           'k8s_php-redis.b317029a_guestbook-controller-ls6k1.default.api_'
           'f991d53e-b949-11e4-8246-42010af0c3dd_8dcdfec8'))
Beispiel #2
0
def _inspect_container(gs, docker_host, container_id):
  """Fetch detailed information about the given container in the given host.

  Args:
    gs: global state.
    docker_host: Docker host name. Must not be empty.
    container_id: container ID. Must not be empty.

  Returns:
    (container_information, timestamp_in_seconds) if the container was found.
    (None, None) if the container was not found.

  Raises:
    CollectorError in case of failure to fetch data from Docker.
    Other exceptions may be raised due to exectution errors.
  """
  url = 'http://{docker_host}:{port}/containers/{container_id}/json'.format(
      docker_host=docker_host, port=gs.get_docker_port(),
      container_id=container_id)
  fname = utilities.container_id_to_fname(
      docker_host, 'container', container_id)
  try:
    result = fetch_data(gs, url, fname, expect_missing=True)
  except ValueError:
    # TODO(vasbala): this container does not exist anymore.
    # What should we do here?
    return (None, time.time())
  except collector_error.CollectorError:
    raise
  except:
    msg = 'fetching %s failed with exception %s' % (url, sys.exc_info()[0])
    gs.logger_exception(msg)
    raise collector_error.CollectorError(msg)

  if not isinstance(result, types.DictType):
    msg = 'fetching %s returns invalid data' % url
    gs.logger_exception(msg)
    raise collector_error.CollectorError(msg)

  # Sort the "Env" attribute because it tends to contain elements in
  # a different order each time you fetch the container information.
  if isinstance(utilities.get_attribute(result, ['Config', 'Env']),
                types.ListType):
    # Sort the contents of the 'Env' list in place.
    result['Config']['Env'].sort()

  return (result, time.time())
Beispiel #3
0
def get_processes(gs, docker_host, container_id):
  """Gets the list of all processes in the 'docker_host' and 'container_id'.

  If the container is not found, returns an empty list of processes.

  Args:
    gs: global state.
    docker_host: the Docker host running the container.
    container_id: the container running the processes.

  Returns:
    list of wrapped process objects.
    Each element in the list is the result of
    utilities.wrap_object(process, 'Process', ...)

  Raises:
    CollectorError in case of failure to fetch data from Docker.
    Other exceptions may be raised due to exectution errors.
  """
  processes_label = '%s/%s' % (docker_host, container_id)
  processes, timestamp_secs = gs.get_processes_cache().lookup(
      processes_label)
  if timestamp_secs is not None:
    gs.logger_info(
        'get_processes(docker_host=%s, container_id=%s) cache hit',
        docker_host, container_id)
    return processes

  container = get_one_container(gs, docker_host, container_id)
  if container is not None:
    assert utilities.is_wrapped_object(container, 'Container')
    container_short_hex_id = utilities.object_to_hex_id(container['properties'])
    assert utilities.valid_string(container_short_hex_id)
  else:
    # Parent container not found. Container might have crashed while we were
    # looking for it.
    return []

  container_name = utilities.get_container_name(container)
  if not utilities.valid_string(container_name):
    msg = 'Invalid container "Name" attribute in container %s' % container_id
    gs.logger_error(msg)
    raise collector_error.CollectorError(msg)

  # NOTE: there is no trailing /json in this URL - this looks like a bug in the
  # Docker API
  # Note that the {container_id} in the URL must be the internal container
  # name in container['properties']['Name'][1:]
  # and not the container name in container['id'] which may contain an extra
  # suffix.
  url = ('http://{docker_host}:{port}/containers/{container_name}/top?'
         'ps_args=aux'.format(docker_host=docker_host,
                              port=gs.get_docker_port(),
                              container_name=container_name))
  fname = utilities.container_id_to_fname(
      docker_host, 'processes', container_name)

  try:
    # TODO(vasbala): what should we do in cases where the container is gone
    # (and replaced by a different one)?
    result = fetch_data(gs, url, fname, expect_missing=True)
  except ValueError:
     # this container does not exist anymore
    return []
  except collector_error.CollectorError:
    raise
  except:
    msg = 'fetching %s failed with exception %s' % (url, sys.exc_info()[0])
    gs.logger_exception(msg)
    raise collector_error.CollectorError(msg)

  if not isinstance(utilities.get_attribute(result, ['Titles']),
                    types.ListType):
    invalid_processes(gs, url)
  if not isinstance(utilities.get_attribute(result, ['Processes']),
                    types.ListType):
    invalid_processes(gs, url)

  pstats = result['Titles']
  processes = []
  now = time.time()
  for pvalues in result['Processes']:
    process = {}
    if not isinstance(pvalues, types.ListType):
      invalid_processes(gs, url)
    if len(pstats) != len(pvalues):
      invalid_processes(gs, url)
    for pstat, pvalue in zip(pstats, pvalues):
      process[pstat] = pvalue

    # Prefix with container Id to ensure uniqueness across the whole graph.
    process_id = '%s/%s' % (container_short_hex_id, process['PID'])
    processes.append(utilities.wrap_object(
        process, 'Process', process_id, now, label=process['PID']))

  ret_value = gs.get_processes_cache().update(
      processes_label, processes, now)
  gs.logger_info(
      'get_processes(docker_host=%s, container_id=%s) returns %d processes',
      docker_host, container_id, len(processes))
  return ret_value