def test_node_id_to_cluster_name(self): """Tests node_id_to_cluster_name().""" self.assertEqual( 'guestbook', utilities.node_id_to_cluster_name( 'k8s-guestbook-node-1.c.rising-apricot-840.internal')) self.assertEqual( 'guestbook', utilities.node_id_to_cluster_name('k8s-guestbook-node-1')) self.assertEqual( 'guestbook', utilities.node_id_to_cluster_name('Node:k8s-guestbook-node-1')) self.assertEqual( '_unknown_', utilities.node_id_to_cluster_name( 'kubernetes-minion-dlc9.c.spartan-alcove-89517.google.com.' 'internal')) self.assertEqual( '_unknown_', utilities.node_id_to_cluster_name('x.y.z.w')) self.assertEqual( '_unknown_', utilities.node_id_to_cluster_name('Node:x.y.z.w')) self.assertEqual( '_unknown_', utilities.node_id_to_cluster_name('Node:')) with self.assertRaises(AssertionError): utilities.node_id_to_cluster_name('')
def test_node_id_to_cluster_name(self): """Tests node_id_to_cluster_name().""" self.assertEqual( 'guestbook', utilities.node_id_to_cluster_name( 'k8s-guestbook-node-1.c.rising-apricot-840.internal')) self.assertEqual( 'guestbook', utilities.node_id_to_cluster_name('k8s-guestbook-node-1')) self.assertEqual( 'guestbook', utilities.node_id_to_cluster_name('Node:k8s-guestbook-node-1')) self.assertEqual( '_unknown_', utilities.node_id_to_cluster_name( 'kubernetes-minion-dlc9.c.spartan-alcove-89517.google.com.' 'internal')) self.assertEqual( '_unknown_', utilities.node_id_to_cluster_name('x.y.z.w')) self.assertEqual( '_unknown_', utilities.node_id_to_cluster_name('')) self.assertEqual( '_unknown_', utilities.node_id_to_cluster_name('Node:x.y.z.w')) self.assertEqual( '_unknown_', utilities.node_id_to_cluster_name('Node:'))
def _do_compute_graph(gs, input_queue, output_queue, output_format): """Returns the context graph in the specified format. Args: gs: the global state. input_queue: the input queue for the worker threads. output_queue: output queue containing exceptions data from the worker threads. output_format: one of 'graph', 'dot', 'context_graph', or 'resources'. Returns: A successful response in the specified format. Raises: CollectorError: inconsistent or invalid graph data. """ assert isinstance(gs, global_state.GlobalState) assert isinstance(input_queue, Queue.PriorityQueue) assert isinstance(output_queue, Queue.Queue) assert utilities.valid_string(output_format) g = ContextGraph() g.set_version(docker.get_version(gs)) g.set_metadata({'timestamp': datetime.datetime.now().isoformat()}) # Nodes nodes_list = kubernetes.get_nodes_with_metrics(gs) if not nodes_list: return g.dump(gs, output_format) # Get the cluster name from the first node. # The cluster name is an approximation. It is not a big deal if it # is incorrect, since the aggregator knows the cluster name. cluster_name = utilities.node_id_to_cluster_name(nodes_list[0]['id']) cluster_guid = 'Cluster:' + cluster_name g.set_title(cluster_name) g.add_resource(cluster_guid, {'label': cluster_name}, 'Cluster', nodes_list[0]['timestamp'], {}) # Nodes for node in nodes_list: input_queue.put(( gs.get_random_priority(), _do_compute_node, {'gs': gs, 'input_queue': input_queue, 'cluster_guid': cluster_guid, 'node': node, 'g': g})) # Services for service in kubernetes.get_services(gs): input_queue.put(( gs.get_random_priority(), _do_compute_service, {'gs': gs, 'cluster_guid': cluster_guid, 'service': service, 'g': g})) # ReplicationControllers rcontrollers_list = kubernetes.get_rcontrollers(gs) for rcontroller in rcontrollers_list: input_queue.put(( gs.get_random_priority(), _do_compute_rcontroller, {'gs': gs, 'cluster_guid': cluster_guid, 'rcontroller': rcontroller, 'g': g})) # Wait until worker threads finished processing all outstanding requests. # Once we return from the join(), all output was generated already. input_queue.join() # Convert any exception caught by the worker threads to an exception # raised by the current thread. if not output_queue.empty(): msg = output_queue.get_nowait() # should not fail. gs.logger_error(msg) raise collector_error.CollectorError(msg) # Dump the resulting graph return g.dump(gs, output_format)
def _do_compute_graph(gs, input_queue, output_queue, output_format): """Returns the context graph in the specified format. Args: gs: the global state. input_queue: the input queue for the worker threads. output_queue: output queue containing exceptions data from the worker threads. output_format: one of 'graph', 'dot', 'context_graph', or 'resources'. Returns: A successful response in the specified format. Raises: CollectorError: inconsistent or invalid graph data. """ assert isinstance(gs, global_state.GlobalState) assert isinstance(input_queue, Queue.PriorityQueue) assert isinstance(output_queue, Queue.Queue) assert utilities.valid_string(output_format) g = ContextGraph() g.set_version(docker.get_version(gs)) g.set_metadata({'timestamp': utilities.now()}) g.set_relations_to_timestamps(gs.get_relations_to_timestamps()) # Nodes nodes_list = kubernetes.get_nodes_with_metrics(gs) if not nodes_list: return g.dump(gs, output_format) # Find the timestamp of the oldest node. This will be the timestamp of # the cluster. oldest_timestamp = utilities.now() for node in nodes_list: assert utilities.is_wrapped_object(node, 'Node') # note: we cannot call min(oldest_timestamp, node['timestamp']) here # because min(string) returnes the smallest character in the string. if node['timestamp'] < oldest_timestamp: oldest_timestamp = node['timestamp'] # Get the cluster name from the first node. # The cluster name is an approximation. It is not a big deal if it # is incorrect, since the aggregator knows the cluster name. cluster_name = utilities.node_id_to_cluster_name(nodes_list[0]['id']) cluster_guid = 'Cluster:' + cluster_name g.set_title(cluster_name) g.add_resource(cluster_guid, {'label': cluster_name}, 'Cluster', oldest_timestamp, {}) # Nodes for node in nodes_list: input_queue.put((gs.get_random_priority(), _do_compute_node, { 'gs': gs, 'input_queue': input_queue, 'cluster_guid': cluster_guid, 'node': node, 'g': g })) # Services for service in kubernetes.get_services(gs): input_queue.put((gs.get_random_priority(), _do_compute_service, { 'gs': gs, 'cluster_guid': cluster_guid, 'service': service, 'g': g })) # ReplicationControllers rcontrollers_list = kubernetes.get_rcontrollers(gs) for rcontroller in rcontrollers_list: input_queue.put((gs.get_random_priority(), _do_compute_rcontroller, { 'gs': gs, 'cluster_guid': cluster_guid, 'rcontroller': rcontroller, 'g': g })) # Wait until worker threads finished processing all outstanding requests. # Once we return from the join(), all output was generated already. input_queue.join() # Convert any exception caught by the worker threads to an exception # raised by the current thread. if not output_queue.empty(): msg = output_queue.get_nowait() # should not fail. gs.logger_error(msg) raise collector_error.CollectorError(msg) # Keep the relations_to_timestamps mapping for next call. gs.set_relations_to_timestamps(g.get_relations_to_timestamps()) # Dump the resulting graph return g.dump(gs, output_format)
def _do_compute_graph(gs, input_queue, output_queue, output_format): """Returns the context graph in the specified format. Args: gs: the global state. input_queue: the input queue for the worker threads. output_queue: output queue containing exceptions data from the worker threads. output_format: one of 'graph', 'dot', 'context_graph', or 'resources'. Returns: A successful response in the specified format. Raises: CollectorError: inconsistent or invalid graph data. """ assert isinstance(gs, global_state.GlobalState) assert isinstance(input_queue, Queue.PriorityQueue) assert isinstance(output_queue, Queue.Queue) assert utilities.valid_string(output_format) g = ContextGraph() try: version = docker.get_version(gs) except Exception as e: exc_type, value, _ = sys.exc_info() msg = ('get_version() failed with exception %s: %s' % (exc_type, value)) gs.logger_error(msg) version = '_unknown_' g.set_version(version) g.set_relations_to_timestamps(gs.get_relations_to_timestamps()) # Nodes nodes_list = kubernetes.get_nodes_with_metrics(gs) if not nodes_list: return g.dump(gs, output_format) # Find the timestamp of the oldest node. This will be the timestamp of # the cluster. oldest_timestamp = utilities.now() for node in nodes_list: assert utilities.is_wrapped_object(node, 'Node') # note: we cannot call min(oldest_timestamp, node['timestamp']) here # because min(string) returnes the smallest character in the string. if node['timestamp'] < oldest_timestamp: oldest_timestamp = node['timestamp'] # Get the cluster name from the first node. # The cluster name is an approximation. It is not a big deal if it # is incorrect, since the aggregator knows the cluster name. cluster_name = utilities.node_id_to_cluster_name(nodes_list[0]['id']) cluster_guid = 'Cluster:' + cluster_name g.set_title(cluster_name) g.add_resource(cluster_guid, {'label': cluster_name}, 'Cluster', oldest_timestamp, {}) # Nodes for node in nodes_list: input_queue.put(( gs.get_random_priority(), _do_compute_node, {'gs': gs, 'input_queue': input_queue, 'cluster_guid': cluster_guid, 'node': node, 'g': g})) # Services for service in kubernetes.get_services(gs): input_queue.put(( gs.get_random_priority(), _do_compute_service, {'gs': gs, 'cluster_guid': cluster_guid, 'service': service, 'g': g})) # ReplicationControllers rcontrollers_list = kubernetes.get_rcontrollers(gs) for rcontroller in rcontrollers_list: input_queue.put(( gs.get_random_priority(), _do_compute_rcontroller, {'gs': gs, 'cluster_guid': cluster_guid, 'rcontroller': rcontroller, 'g': g})) # Pods running on the master node. input_queue.put(( gs.get_random_priority(), _do_compute_master_pods, {'gs': gs, 'cluster_guid': cluster_guid, 'nodes_list': nodes_list, 'oldest_timestamp': oldest_timestamp, 'g': g})) # Wait until worker threads finished processing all outstanding requests. # Once we return from the join(), all output was generated already. input_queue.join() # Convert any exception caught by the worker threads to an exception # raised by the current thread. if not output_queue.empty(): msg = output_queue.get_nowait() # should not fail. gs.logger_error(msg) raise collector_error.CollectorError(msg) # Keep the relations_to_timestamps mapping for next call. gs.set_relations_to_timestamps(g.get_relations_to_timestamps()) g.set_metadata({'timestamp': g.max_resources_and_relations_timestamp()}) # Dump the resulting graph return g.dump(gs, output_format)