def show_cluster_storm_user_metrics(request, id): cluster = dbutil.get_cluster(id) storm_tasks = dbutil.get_storm_task_by_cluster(cluster) # user metrics format is <storm_id, component_id:task_id, <key, value>>>; storm_metrics = {} for storm_task in storm_tasks: if storm_task.job.name != 'metricserver': continue try: json_metrics = json.loads(storm_task.last_metrics_raw) except: logger.warning("Failed to parse metrics of task: %s", storm_task) return HttpResponse('') for storm_id, topology_metrics in json_metrics.iteritems(): topology_metrics_dict = storm_metrics.setdefault(storm_id, {}) for group_name, group_metrics in topology_metrics.iteritems(): if group_name.find("STORM_SYSTEM_") == 0 or group_name == "STORM_BUILTIN_SPOUT_METRICS" or group_name == "STORM_BUILTIN_BOLT_METRICS": continue group_component_id = group_name.split(":")[0] group_task_id = group_name.split(":")[1] group_metrics_dict = topology_metrics_dict.setdefault(group_component_id, {}) task_metrics_dict = group_metrics_dict.setdefault(group_task_id, {}); for metrics_name, metrics in group_metrics.iteritems(): task_metrics_dict[metrics_name] = metrics # after upper handle, storm_metrics in format: <storm_id, <component_id, <task_id, <key, value>>>> format_storm_metrics = {} for storm_id in storm_metrics: topology_metrics = storm_metrics.get(storm_id) format_topology_metrics = format_storm_metrics.setdefault(storm_id, {}) for component_id in topology_metrics: group_metrics = topology_metrics.get(component_id) format_group_metrics = format_topology_metrics.setdefault(component_id, []) for task_id in group_metrics: metrics = group_metrics.get(task_id) key_set, value_set = add_key_set_for_format_group_metrics(format_group_metrics, metrics.keys()) format_metrics_list = [task_id] for key in key_set: if key == "TaskID": continue format_metrics_list.append(metrics.get(key, " ")) value_set.append(format_metrics_list) # after upper handle, format_storm_metrics in format: # <storm_id, <component_id,[<"key_set": [key1, key2, ...... ,keyn], "value_sets": # [[v11, v12, ...... v1n], ...... ,[vm1, vm2, ...... vmn]],> ...... <"key_set": [], "value_sets": []>] > > params = { 'cluster' : cluster, 'storm_metrics' : format_storm_metrics, } return respond(request, 'monitor/storm_user_board.html', params)
def show_cluster_storm_builtin_metrics(request, id): cluster = dbutil.get_cluster(id) storm_tasks = dbutil.get_storm_task_by_cluster(cluster) type = request.GET.get('type') type_dict = { "Spout": "STORM_BUILTIN_SPOUT_METRICS", "Bolt": "STORM_BUILTIN_BOLT_METRICS", } # builtin metrics format is <storm_id, STORM_BUILTIN_SPOUT_METRICS|STORM_BUILTIN_BOLT_METRICS, <key, value>>> storm_metrics = [] for storm_task in storm_tasks: if storm_task.job.name != 'metricserver': continue try: json_metrics = json.loads(storm_task.last_metrics_raw) except: logger.warning("Failed to parse metrics of task: %s", storm_task) return HttpResponse('') for storm_id, topology_metrics in json_metrics.iteritems(): element = {"storm_id": storm_id} for group_name, group_metrics in topology_metrics.iteritems(): if group_name == type_dict.get(type): for metrics_name, metrics in group_metrics.iteritems(): metrics_name = metrics_name.lstrip('_') metrics_name = metrics_name.replace('-', '_') element[metrics_name] = metrics storm_metrics.append(element) params = { 'cluster': cluster, 'storm_metrics': storm_metrics, } if type == "Spout": return respond(request, 'monitor/storm_spout_board.html', params) elif type == "Bolt": return respond(request, 'monitor/storm_bolt_board.html', params) else: return HttpResponse('Unsupported type: ' + type)
def show_cluster_storm_builtin_metrics(request, id): cluster = dbutil.get_cluster(id) storm_tasks = dbutil.get_storm_task_by_cluster(cluster) type = request.GET.get('type') type_dict = { "Spout": "STORM_BUILTIN_SPOUT_METRICS", "Bolt": "STORM_BUILTIN_BOLT_METRICS", } # builtin metrics format is <storm_id, STORM_BUILTIN_SPOUT_METRICS|STORM_BUILTIN_BOLT_METRICS, <key, value>>> storm_metrics = []; for storm_task in storm_tasks: if storm_task.job.name != 'metricserver': continue try: json_metrics = json.loads(storm_task.last_metrics_raw) except: logger.warning("Failed to parse metrics of task: %s", storm_task) return HttpResponse('') for storm_id, topology_metrics in json_metrics.iteritems(): element = {"storm_id": storm_id} for group_name, group_metrics in topology_metrics.iteritems(): if group_name == type_dict.get(type): for metrics_name, metrics in group_metrics.iteritems(): metrics_name = metrics_name.lstrip('_') metrics_name = metrics_name.replace('-', '_') element[metrics_name] = metrics storm_metrics.append(element) params = { 'cluster' : cluster, 'storm_metrics' : storm_metrics, } if type == "Spout": return respond(request, 'monitor/storm_spout_board.html', params) elif type == "Bolt": return respond(request, 'monitor/storm_bolt_board.html', params) else: return HttpResponse('Unsupported type: ' + type)
def show_cluster_storm_user_metrics(request, id): cluster = dbutil.get_cluster(id) storm_tasks = dbutil.get_storm_task_by_cluster(cluster) # user metrics format is <storm_id, component_id:task_id, <key, value>>>; storm_metrics = {} for storm_task in storm_tasks: if storm_task.job.name != 'metricserver': continue try: json_metrics = json.loads(storm_task.last_metrics_raw) except: logger.warning("Failed to parse metrics of task: %s", storm_task) return HttpResponse('') for storm_id, topology_metrics in json_metrics.iteritems(): topology_metrics_dict = storm_metrics.setdefault(storm_id, {}) for group_name, group_metrics in topology_metrics.iteritems(): if group_name.find( "STORM_SYSTEM_" ) == 0 or group_name == "STORM_BUILTIN_SPOUT_METRICS" or group_name == "STORM_BUILTIN_BOLT_METRICS": continue group_component_id = group_name.split(":")[0] group_task_id = group_name.split(":")[1] group_metrics_dict = topology_metrics_dict.setdefault( group_component_id, {}) task_metrics_dict = group_metrics_dict.setdefault( group_task_id, {}) for metrics_name, metrics in group_metrics.iteritems(): task_metrics_dict[metrics_name] = metrics # after upper handle, storm_metrics in format: <storm_id, <component_id, <task_id, <key, value>>>> format_storm_metrics = {} for storm_id in storm_metrics: topology_metrics = storm_metrics.get(storm_id) format_topology_metrics = format_storm_metrics.setdefault(storm_id, {}) for component_id in topology_metrics: group_metrics = topology_metrics.get(component_id) format_group_metrics = format_topology_metrics.setdefault( component_id, []) for task_id in group_metrics: metrics = group_metrics.get(task_id) key_set, value_set = add_key_set_for_format_group_metrics( format_group_metrics, metrics.keys()) format_metrics_list = [task_id] for key in key_set: if key == "TaskID": continue format_metrics_list.append(metrics.get(key, " ")) value_set.append(format_metrics_list) # after upper handle, format_storm_metrics in format: # <storm_id, <component_id,[<"key_set": [key1, key2, ...... ,keyn], "value_sets": # [[v11, v12, ...... v1n], ...... ,[vm1, vm2, ...... vmn]],> ...... <"key_set": [], "value_sets": []>] > > params = { 'cluster': cluster, 'storm_metrics': format_storm_metrics, } return respond(request, 'monitor/storm_user_board.html', params)
def show_cluster_storm_system_metrics(request, id): cluster = dbutil.get_cluster(id) storm_tasks = dbutil.get_storm_task_by_cluster(cluster) # system metrics format is <storm_id, STORM_SYSTEM_*, <key, value>>>; # and key may in format: "GC/*", "memory/heap:*", ""memory/nonHeap:*" or ".*"; storm_metrics = [] for storm_task in storm_tasks: try: json_metrics = json.loads(storm_task.last_metrics_raw) except: logger.warning("Failed to parse metrics of task: %s", storm_task.last_metrics_raw) return HttpResponse('') for storm_id, topology_metrics in json_metrics.iteritems(): topology_element = [] for group_name, group_metrics in topology_metrics.iteritems(): if group_name.find("STORM_SYSTEM_") != 0: continue group_name = group_name.lstrip("STORM_SYSTEM_") element = { "worker_endpoint": group_name } gc_value = "" memory_heap_value = "" memory_non_heap_value = "" for metrics_name, metrics in group_metrics.iteritems(): if metrics_name.find("GC/") == 0: if len(gc_value) != 0: gc_value += ", \n" gc_value += metrics_name.lstrip("GC/") + ":" + str( metrics) if metrics_name.find("memory/heap:") == 0: if len(memory_heap_value) != 0: memory_heap_value += ", \n" memory_heap_value += metrics_name.lstrip( "memory/heap:") + ":" + str(metrics) if metrics_name.find("memory/nonHeap:") == 0: if len(memory_non_heap_value) != 0: memory_non_heap_value += ", \n" memory_non_heap_value += metrics_name.lstrip( "memory/nonHeap:") + ":" + str(metrics) if metrics_name == "startTimeSecs": element["start_time_sec"] = metrics if metrics_name == "uptimeSecs": element["uptime_sec"] = metrics element["GC"] = gc_value element["memory_heap"] = memory_heap_value element["memory_non_heap"] = memory_non_heap_value topology_element.append(element) metrics = { "storm_id": storm_id, "topology_metrics": topology_element } storm_metrics.append(metrics) params = { 'cluster': cluster, 'storm_metrics': storm_metrics, } return respond(request, 'monitor/storm_system_metrics_board.html', params)
def show_cluster_storm_system_metrics(request, id): cluster = dbutil.get_cluster(id) storm_tasks = dbutil.get_storm_task_by_cluster(cluster) # system metrics format is <storm_id, STORM_SYSTEM_*, <key, value>>>; # and key may in format: "GC/*", "memory/heap:*", ""memory/nonHeap:*" or ".*"; storm_metrics = [] for storm_task in storm_tasks: try: json_metrics = json.loads(storm_task.last_metrics_raw) except: logger.warning("Failed to parse metrics of task: %s", storm_task.last_metrics_raw) return HttpResponse('') for storm_id, topology_metrics in json_metrics.iteritems(): topology_element = [] for group_name, group_metrics in topology_metrics.iteritems(): if group_name.find("STORM_SYSTEM_") != 0: continue group_name = group_name.lstrip("STORM_SYSTEM_") element = {"worker_endpoint" : group_name}; gc_value = "" memory_heap_value = "" memory_non_heap_value = "" for metrics_name, metrics in group_metrics.iteritems(): if metrics_name.find("GC/") == 0: if len(gc_value) != 0: gc_value += ", \n" gc_value += metrics_name.lstrip("GC/") + ":" + str(metrics) if metrics_name.find("memory/heap:") == 0: if len(memory_heap_value) != 0: memory_heap_value += ", \n" memory_heap_value += metrics_name.lstrip("memory/heap:") + ":" + str(metrics) if metrics_name.find("memory/nonHeap:") == 0: if len(memory_non_heap_value) != 0: memory_non_heap_value += ", \n" memory_non_heap_value += metrics_name.lstrip("memory/nonHeap:") + ":" + str(metrics) if metrics_name == "startTimeSecs": element["start_time_sec"] = metrics if metrics_name == "uptimeSecs": element["uptime_sec"] = metrics element["GC"] = gc_value element["memory_heap"] = memory_heap_value element["memory_non_heap"] = memory_non_heap_value topology_element.append(element) metrics = { "storm_id" : storm_id, "topology_metrics" : topology_element } storm_metrics.append(metrics) params = { 'cluster' : cluster, 'storm_metrics' : storm_metrics, } return respond(request, 'monitor/storm_system_metrics_board.html', params)