def _summarize_ram_usage(end_date, steps, nodes): """Create a list of datetime points containing the amounts of RAM usage per node per hour, day, week up to the given date""" result = {} for step_name, step in steps.iteritems(): past = end_date - datetime.timedelta(minutes=step.get("step") * step.get("times")) result[step_name] = { node.name: { "info": { "max_points": step.get("times"), "step_size": step.get("step"), "start": past.strftime("%Y-%m-%d %H:%M:%S"), "finish": end_date.strftime("%Y-%m-%d %H:%M:%S"), "max": 100, }, "points": [], } for node in nodes } for x in range(step.get("times")): later = past + datetime.timedelta(minutes=step.get("step")) for node in nodes: # Query for latest entry for current node in given time range status = StatsCache().get_stat("status", later, step.get("step"), key_prefix=node.name) if status is None: q = NodeStatus.query.filter( NodeStatus.name == node.name, NodeStatus.timestamp >= past).order_by( NodeStatus.timestamp.asc()).first() if q is not None: status = q.status StatsCache().update("status", step.get("step"), set_value=status, set_dt=later, key_prefix=node.name) if not status or status == {} or status.get("memory") is None: continue try: memory = int(status.get("memory")) except ValueError: continue time_key = later.strftime("%Y-%m-%d %H:%M:%S") result[step_name][node.name]["points"].append({ "datetime": time_key, "value": memory }) past = later return result
def _summarize_cpu_usage(end_date, steps, nodes): """Create a list of datetime points containing the amounts of CPU usage in percent per node per hour, day, week up to the given date""" result = {} for step_name, step in steps.iteritems(): past = end_date - datetime.timedelta(minutes=step.get("step") * step.get("times")) result[step_name] = { node.name: { "info": { "max_points": step.get("times"), "step_size": step.get("step"), "start": past.strftime("%Y-%m-%d %H:%M:%S"), "finish": end_date.strftime("%Y-%m-%d %H:%M:%S"), "max": 100 }, "points": [] } for node in nodes } for x in range(step.get("times")): later = past + datetime.timedelta(minutes=step.get("step")) for node in nodes: # Query for latest entry for current node in given time range status = StatsCache().get_stat("status", later, step.get("step"), key_prefix=node.name) if status is None: q = NodeStatus.query.filter( NodeStatus.name == node.name, NodeStatus.timestamp >= past).order_by( NodeStatus.timestamp.asc()).first() if q is not None: status = q.status StatsCache().update("status", step.get("step"), set_value=status, set_dt=later, key_prefix=node.name) if not status or not status.get("cpu_count"): continue cpu_count = status.get("cpu_count") cpu_load = status.get("cpuload") # Use average load of last minute. See doc (os.getloadavg) load = int(cpu_load[0] / cpu_count * 100) time_key = later.strftime("%Y-%m-%d %H:%M:%S") result[step_name][node.name]["points"].append({ "datetime": time_key, "value": load }) past = later return result
def _summarize_disk_usage(end_date, steps, nodes): """Create a list of datetime points containing the amounts of currently used disk space per node by last hour, day, week up to the given date""" results = {} # For each node, determine which storages there are and their total # storage volume storage_nodes = {} for node in nodes: node_status = NodeStatus.query.filter( NodeStatus.name == node.name).order_by( NodeStatus.timestamp.desc()).first() if node_status: storage_nodes[node.name] = { disk_n: { "total": val["total"] } for disk_n, val in node_status.status.get( "diskspace").iteritems() } for step_name, step in steps.iteritems(): past = end_date - datetime.timedelta(minutes=step.get("step") * step.get("times")) results[step_name] = { node.name: { "info": { "disks": storage_nodes[node.name], "max_points": step.get("times"), "step_size": step.get("step"), "start": past.strftime("%Y-%m-%d %H:%M:%S"), "finish": end_date.strftime("%Y-%m-%d %H:%M:%S") }, "points": {} } for node in nodes } for x in range(step.get("times")): later = past + datetime.timedelta(minutes=step.get("step")) for node in nodes: # Query for latest entry for current node in given time range status = StatsCache().get_stat("status", later, step.get("step"), key_prefix=node.name) if status is None: q = NodeStatus.query.filter( NodeStatus.name == node.name, NodeStatus.timestamp >= past).order_by( NodeStatus.timestamp.asc()).first() if q is not None: status = q.status StatsCache().update("status", step.get("step"), set_value=status, set_dt=later, key_prefix=node.name) if not status: continue time_key = later.strftime("%Y-%m-%d %H:%M:%S") current = results[step_name][node.name]["points"] for st_name, val in status.get("diskspace").iteritems(): storage_name = "%s_used" % st_name if storage_name not in current: current[storage_name] = [] current[storage_name].append({ "datetime": time_key, "value": val["used"] }) past = later return results
def _summarize_ram_usage(end_date, steps, nodes): """Create a list of datetime points containing the amounts of RAM usage per node per hour, day, week up to the given date""" result = {} for step_name, step in steps.iteritems(): past = end_date - datetime.timedelta( minutes=step.get("step") * step.get("times") ) result[step_name] = { node.name: { "info": { "max_points": step.get("times"), "step_size": step.get("step"), "start": past.strftime("%Y-%m-%d %H:%M:%S"), "finish": end_date.strftime("%Y-%m-%d %H:%M:%S"), "max": 100, }, "points": [], } for node in nodes } for x in range(step.get("times")): later = past + datetime.timedelta(minutes=step.get("step")) for node in nodes: # Query for latest entry for current node in given time range status = StatsCache().get_stat( "status", later, step.get("step"), key_prefix=node.name ) if status is None: q = NodeStatus.query.filter( NodeStatus.name == node.name, NodeStatus.timestamp >= past ).order_by(NodeStatus.timestamp.asc()).first() if q is not None: status = q.status StatsCache().update( "status", step.get("step"), set_value=status, set_dt=later, key_prefix=node.name ) if not status or status == {} or status.get("memory") is None: continue try: memory = int(status.get("memory")) except ValueError: continue time_key = later.strftime("%Y-%m-%d %H:%M:%S") result[step_name][node.name]["points"].append({ "datetime": time_key, "value": memory }) past = later return result
def _summarize_cpu_usage(end_date, steps, nodes): """Create a list of datetime points containing the amounts of CPU usage in percent per node per hour, day, week up to the given date""" result = {} for step_name, step in steps.iteritems(): past = end_date - datetime.timedelta( minutes=step.get("step") * step.get("times") ) result[step_name] = { node.name: { "info": { "max_points": step.get("times"), "step_size": step.get("step"), "start": past.strftime("%Y-%m-%d %H:%M:%S"), "finish": end_date.strftime("%Y-%m-%d %H:%M:%S"), "max": 100 }, "points": [] } for node in nodes } for x in range(step.get("times")): later = past + datetime.timedelta(minutes=step.get("step")) for node in nodes: # Query for latest entry for current node in given time range status = StatsCache().get_stat( "status", later, step.get("step"), key_prefix=node.name ) if status is None: q = NodeStatus.query.filter( NodeStatus.name == node.name, NodeStatus.timestamp >= past ).order_by(NodeStatus.timestamp.asc()).first() if q is not None: status = q.status StatsCache().update( "status", step.get("step"), set_value=status, set_dt=later, key_prefix=node.name ) if not status or not status.get("cpu_count"): continue cpu_count = status.get("cpu_count") cpu_load = status.get("cpuload") # Use average load of last minute. See doc (os.getloadavg) load = int(cpu_load[0] / cpu_count * 100) time_key = later.strftime("%Y-%m-%d %H:%M:%S") result[step_name][node.name]["points"].append({ "datetime": time_key, "value": load }) past = later return result
def _summarize_disk_usage(end_date, steps, nodes): """Create a list of datetime points containing the amounts of currently used disk space per node by last hour, day, week up to the given date""" results = {} # For each node, determine which storages there are and their total # storage volume storage_nodes = {} for node in nodes: node_status = NodeStatus.query.filter( NodeStatus.name == node.name ).order_by(NodeStatus.timestamp.desc()).first() if node_status: storage_nodes[node.name] = { disk_n: { "total": val["total"] } for disk_n, val in node_status.status.get("diskspace").iteritems() } for step_name, step in steps.iteritems(): past = end_date - datetime.timedelta( minutes=step.get("step") * step.get("times") ) results[step_name] = { node.name: { "info": { "disks": storage_nodes[node.name], "max_points": step.get("times"), "step_size": step.get("step"), "start": past.strftime("%Y-%m-%d %H:%M:%S"), "finish": end_date.strftime("%Y-%m-%d %H:%M:%S") }, "points": {} } for node in nodes } for x in range(step.get("times")): later = past + datetime.timedelta(minutes=step.get("step")) for node in nodes: # Query for latest entry for current node in given time range status = StatsCache().get_stat( "status", later, step.get("step"), key_prefix=node.name ) if status is None: q = NodeStatus.query.filter( NodeStatus.name == node.name, NodeStatus.timestamp >= past ).order_by(NodeStatus.timestamp.asc()).first() if q is not None: status = q.status StatsCache().update( "status", step.get("step"), set_value=status, set_dt=later, key_prefix=node.name ) if not status: continue time_key = later.strftime("%Y-%m-%d %H:%M:%S") current = results[step_name][node.name]["points"] for st_name, val in status.get("diskspace").iteritems(): storage_name = "%s_used" % st_name if storage_name not in current: current[storage_name] = [] current[storage_name].append({ "datetime": time_key, "value": val["used"] }) past = later return results