Example #1
0
def assemble():
    """Creates a build/$version directory with the artifacts to bake into the docker image"""

    v     = get_version(env)
    prof  = cfg.profile(env)
    files = env.PROJECT_FILES
    build = cfg.build_dir(v)

    run("mkdir -p %s" % build)

    for f in files:
        expanded_path = f % env
        recurse = "-r " if path.isdir(expanded_path) else " "

        run("cp %s%s %s" % (recurse, expanded_path, build))
Example #2
0
def get_version():
  """Computes the response of the '/version' endpoint.

  Returns:
    The value of the docker.get_version() or an error message.
  """
  gs = app.context_graph_global_state
  try:
    version = docker.get_version(gs)
    return flask.jsonify(utilities.make_response(version, 'version'))
  except collector_error.CollectorError as e:
    return flask.jsonify(utilities.make_error(str(e)))
  except:
    msg = ('get_version() failed with exception %s' % sys.exc_info()[0])
    app.logger.exception(msg)
    return flask.jsonify(utilities.make_error(msg))
Example #3
0
def get_version():
    """Computes the response of the '/version' endpoint.

  Returns:
    The value of the docker.get_version() or an error message.
  """
    gs = app.context_graph_global_state
    try:
        version = docker.get_version(gs)
        return flask.jsonify(utilities.make_response(version, 'version'))
    except collector_error.CollectorError as e:
        return flask.jsonify(utilities.make_error(str(e)))
    except:
        msg = ('get_version() failed with exception %s' % sys.exc_info()[0])
        app.logger.exception(msg)
        return flask.jsonify(utilities.make_error(msg))
Example #4
0
def update_task(task_def):
    version = get_version(env)
    yes = env.get("yes") or False

    image = image_name_from_version(version, env.DOCKER_REPO)
    task_def = mutate_image(task_def, image)

    print("Updated the definition to the following:\n%s\n" % pformat(task_def))
    if not yes: confirm("Continue?")

    task_json = json.dumps(task_def, separators=(',', ':'))
    created = check_output([
        "aws", "ecs", "register-task-definition", "--cli-input-json", task_json
    ])
    rev = json.loads(created)["taskDefinition"]["revision"]

    env.revision = rev
    print "Success! Created revision %d" % rev
Example #5
0
def update_task_def(task_def=None):
    """Copies the most recent task definition (in ECS) to use a given docker image version. Reqs: version"""

    if task_def:
        execute(update_task, task_def=task_def)
        return

    version = get_version(env)

    task_defs_cmd = capture(
        "aws ecs list-task-definitions --family-prefix %s --sort DESC" %
        env.APP)
    all_defs = json.loads(task_defs_cmd)["taskDefinitionArns"]

    app_defs = filter(lambda d: ("task-definition/%s" % env.APP) in d,
                      all_defs)
    last_def = sorted(app_defs, key=task_def_revision)[-1]
    last_pretty = pretty_def(last_def)
    yes = env.get("yes") or False

    print(
        "Creating definition based on\n\n\t%(last_pretty)s\n\nusing docker image version %(version)s"
        % locals())
    if not yes: confirm("Continue?")

    old_def = json.loads(
        capture("aws ecs describe-task-definition --task-definition %s" %
                last_def))
    new_def = updated_def_from_old(
        old_def, image_name_from_version(version, env.DOCKER_REPO))

    print("Updated the definition to the following:\n%s\n" % pformat(new_def))
    if not yes: confirm("Continue?")

    new_def_json = json.dumps(new_def, separators=(',', ':'))
    created = check_output([
        "aws", "ecs", "register-task-definition", "--cli-input-json",
        new_def_json
    ])
    rev = json.loads(created)["taskDefinition"]["revision"]

    env.revision = rev

    print "Success! Created revision %d" % rev
Example #6
0
def _do_compute_graph(gs, input_queue, output_queue, output_format):
  """Returns the context graph in the specified format.

  Args:
    gs: the global state.
    input_queue: the input queue for the worker threads.
    output_queue: output queue containing exceptions data from the worker
        threads.
    output_format: one of 'graph', 'dot', 'context_graph', or 'resources'.

  Returns:
    A successful response in the specified format.

  Raises:
    CollectorError: inconsistent or invalid graph data.
  """
  assert isinstance(gs, global_state.GlobalState)
  assert isinstance(input_queue, Queue.PriorityQueue)
  assert isinstance(output_queue, Queue.Queue)
  assert utilities.valid_string(output_format)

  g = ContextGraph()
  g.set_version(docker.get_version(gs))
  g.set_metadata({'timestamp': datetime.datetime.now().isoformat()})

  # Nodes
  nodes_list = kubernetes.get_nodes_with_metrics(gs)
  if not nodes_list:
    return g.dump(gs, output_format)

  # Get the cluster name from the first node.
  # The cluster name is an approximation. It is not a big deal if it
  # is incorrect, since the aggregator knows the cluster name.
  cluster_name = utilities.node_id_to_cluster_name(nodes_list[0]['id'])
  cluster_guid = 'Cluster:' + cluster_name
  g.set_title(cluster_name)
  g.add_resource(cluster_guid, {'label': cluster_name}, 'Cluster',
                 nodes_list[0]['timestamp'], {})

  # Nodes
  for node in nodes_list:
    input_queue.put((
        gs.get_random_priority(),
        _do_compute_node,
        {'gs': gs, 'input_queue': input_queue, 'cluster_guid': cluster_guid,
         'node': node, 'g': g}))

  # Services
  for service in kubernetes.get_services(gs):
    input_queue.put((
        gs.get_random_priority(),
        _do_compute_service,
        {'gs': gs, 'cluster_guid': cluster_guid, 'service': service, 'g': g}))

  # ReplicationControllers
  rcontrollers_list = kubernetes.get_rcontrollers(gs)
  for rcontroller in rcontrollers_list:
    input_queue.put((
        gs.get_random_priority(),
        _do_compute_rcontroller,
        {'gs': gs, 'cluster_guid': cluster_guid, 'rcontroller': rcontroller,
         'g': g}))

  # Wait until worker threads finished processing all outstanding requests.
  # Once we return from the join(), all output was generated already.
  input_queue.join()

  # Convert any exception caught by the worker threads to an exception
  # raised by the current thread.
  if not output_queue.empty():
    msg = output_queue.get_nowait()  # should not fail.
    gs.logger_error(msg)
    raise collector_error.CollectorError(msg)

  # Dump the resulting graph
  return g.dump(gs, output_format)
Example #7
0
def _do_compute_graph(gs, input_queue, output_queue, output_format):
    """Returns the context graph in the specified format.

  Args:
    gs: the global state.
    input_queue: the input queue for the worker threads.
    output_queue: output queue containing exceptions data from the worker
        threads.
    output_format: one of 'graph', 'dot', 'context_graph', or 'resources'.

  Returns:
    A successful response in the specified format.

  Raises:
    CollectorError: inconsistent or invalid graph data.
  """
    assert isinstance(gs, global_state.GlobalState)
    assert isinstance(input_queue, Queue.PriorityQueue)
    assert isinstance(output_queue, Queue.Queue)
    assert utilities.valid_string(output_format)

    g = ContextGraph()
    g.set_version(docker.get_version(gs))
    g.set_metadata({'timestamp': utilities.now()})
    g.set_relations_to_timestamps(gs.get_relations_to_timestamps())

    # Nodes
    nodes_list = kubernetes.get_nodes_with_metrics(gs)
    if not nodes_list:
        return g.dump(gs, output_format)

    # Find the timestamp of the oldest node. This will be the timestamp of
    # the cluster.
    oldest_timestamp = utilities.now()
    for node in nodes_list:
        assert utilities.is_wrapped_object(node, 'Node')
        # note: we cannot call min(oldest_timestamp, node['timestamp']) here
        # because min(string) returnes the smallest character in the string.
        if node['timestamp'] < oldest_timestamp:
            oldest_timestamp = node['timestamp']

    # Get the cluster name from the first node.
    # The cluster name is an approximation. It is not a big deal if it
    # is incorrect, since the aggregator knows the cluster name.
    cluster_name = utilities.node_id_to_cluster_name(nodes_list[0]['id'])
    cluster_guid = 'Cluster:' + cluster_name
    g.set_title(cluster_name)
    g.add_resource(cluster_guid, {'label': cluster_name}, 'Cluster',
                   oldest_timestamp, {})

    # Nodes
    for node in nodes_list:
        input_queue.put((gs.get_random_priority(), _do_compute_node, {
            'gs': gs,
            'input_queue': input_queue,
            'cluster_guid': cluster_guid,
            'node': node,
            'g': g
        }))

    # Services
    for service in kubernetes.get_services(gs):
        input_queue.put((gs.get_random_priority(), _do_compute_service, {
            'gs': gs,
            'cluster_guid': cluster_guid,
            'service': service,
            'g': g
        }))

    # ReplicationControllers
    rcontrollers_list = kubernetes.get_rcontrollers(gs)
    for rcontroller in rcontrollers_list:
        input_queue.put((gs.get_random_priority(), _do_compute_rcontroller, {
            'gs': gs,
            'cluster_guid': cluster_guid,
            'rcontroller': rcontroller,
            'g': g
        }))

    # Wait until worker threads finished processing all outstanding requests.
    # Once we return from the join(), all output was generated already.
    input_queue.join()

    # Convert any exception caught by the worker threads to an exception
    # raised by the current thread.
    if not output_queue.empty():
        msg = output_queue.get_nowait()  # should not fail.
        gs.logger_error(msg)
        raise collector_error.CollectorError(msg)

    # Keep the relations_to_timestamps mapping for next call.
    gs.set_relations_to_timestamps(g.get_relations_to_timestamps())

    # Dump the resulting graph
    return g.dump(gs, output_format)
Example #8
0
def _do_compute_graph(gs, input_queue, output_queue, output_format):
  """Returns the context graph in the specified format.

  Args:
    gs: the global state.
    input_queue: the input queue for the worker threads.
    output_queue: output queue containing exceptions data from the worker
        threads.
    output_format: one of 'graph', 'dot', 'context_graph', or 'resources'.

  Returns:
    A successful response in the specified format.

  Raises:
    CollectorError: inconsistent or invalid graph data.
  """
  assert isinstance(gs, global_state.GlobalState)
  assert isinstance(input_queue, Queue.PriorityQueue)
  assert isinstance(output_queue, Queue.Queue)
  assert utilities.valid_string(output_format)

  g = ContextGraph()
  try:
    version = docker.get_version(gs)
  except Exception as e:
    exc_type, value, _ = sys.exc_info()
    msg = ('get_version() failed with exception %s: %s' %
           (exc_type, value))
    gs.logger_error(msg)
    version = '_unknown_'

  g.set_version(version)
  g.set_relations_to_timestamps(gs.get_relations_to_timestamps())

  # Nodes
  nodes_list = kubernetes.get_nodes_with_metrics(gs)
  if not nodes_list:
    return g.dump(gs, output_format)

  # Find the timestamp of the oldest node. This will be the timestamp of
  # the cluster.
  oldest_timestamp = utilities.now()
  for node in nodes_list:
    assert utilities.is_wrapped_object(node, 'Node')
    # note: we cannot call min(oldest_timestamp, node['timestamp']) here
    # because min(string) returnes the smallest character in the string.
    if node['timestamp'] < oldest_timestamp:
      oldest_timestamp = node['timestamp']

  # Get the cluster name from the first node.
  # The cluster name is an approximation. It is not a big deal if it
  # is incorrect, since the aggregator knows the cluster name.
  cluster_name = utilities.node_id_to_cluster_name(nodes_list[0]['id'])
  cluster_guid = 'Cluster:' + cluster_name
  g.set_title(cluster_name)
  g.add_resource(cluster_guid, {'label': cluster_name}, 'Cluster',
                 oldest_timestamp, {})

  # Nodes
  for node in nodes_list:
    input_queue.put((
        gs.get_random_priority(),
        _do_compute_node,
        {'gs': gs, 'input_queue': input_queue, 'cluster_guid': cluster_guid,
         'node': node, 'g': g}))

  # Services
  for service in kubernetes.get_services(gs):
    input_queue.put((
        gs.get_random_priority(),
        _do_compute_service,
        {'gs': gs, 'cluster_guid': cluster_guid, 'service': service, 'g': g}))

  # ReplicationControllers
  rcontrollers_list = kubernetes.get_rcontrollers(gs)
  for rcontroller in rcontrollers_list:
    input_queue.put((
        gs.get_random_priority(),
        _do_compute_rcontroller,
        {'gs': gs, 'cluster_guid': cluster_guid, 'rcontroller': rcontroller,
         'g': g}))

  # Pods running on the master node.
  input_queue.put((
      gs.get_random_priority(),
      _do_compute_master_pods,
      {'gs': gs, 'cluster_guid': cluster_guid, 'nodes_list': nodes_list,
       'oldest_timestamp': oldest_timestamp, 'g': g}))

  # Wait until worker threads finished processing all outstanding requests.
  # Once we return from the join(), all output was generated already.
  input_queue.join()

  # Convert any exception caught by the worker threads to an exception
  # raised by the current thread.
  if not output_queue.empty():
    msg = output_queue.get_nowait()  # should not fail.
    gs.logger_error(msg)
    raise collector_error.CollectorError(msg)

  # Keep the relations_to_timestamps mapping for next call.
  gs.set_relations_to_timestamps(g.get_relations_to_timestamps())
  g.set_metadata({'timestamp': g.max_resources_and_relations_timestamp()})

  # Dump the resulting graph
  return g.dump(gs, output_format)