def get_cluster_nodeperform_detail_logic(host_name, start_time, end_time): """ GET 请求节点执行数据处理逻辑 :return: resp, status resp: json格式的响应数据 status: 响应码 """ data = '' status = '' message = '' resp = {"status": status, "data": data, "message": message} if is_less_than_nhours(start_time, end_time, 1): node_perform_detail = SfoNodePerformMethod.query_node_per_by_host_name(host_name, start_time, end_time) elif is_less_than_nhours(start_time, end_time, 8): node_perform_detail = SfoNodePerform5MinMethod.query_node_per_by_host_name(host_name, start_time, end_time) elif is_less_than_nhours(start_time, end_time, 24): node_perform_detail = SfoNodePerformHourMethod.query_node_per_by_host_name(host_name, start_time, end_time) else: node_perform_detail = SfoNodePerformDayMethod.query_node_per_by_host_name(host_name, start_time, end_time) if node_perform_detail: status = 200 message = 'OK' node_perform_detail_ins = node_perform_detail[-1] node_perform_detail_ins.list = node_perform_detail data = node_perform_detail_ins else: status = 404 message = 'Not Found Record Between %s and %s Cluster NodePerform By %s' % (start_time, end_time, host_name) resp.update({"status": status, "data": data, "message": message}) return resp, status
def get_cluster_cpu_frequency_logic(cluster_name, starttime, endtime): _data = {} status = '' message = '' cpu_frequency_val = {} resp = {"status": status, "data": _data, "message": message} if is_less_than_nhours(starttime, endtime, 1): region = 60 data = SfoNodeStatMethod.query_cpu_frequency_region( cluster_name, starttime, endtime) elif is_less_than_nhours(starttime, endtime, 8): region = 600 data = SfoNodeStat5MinMethod.query_cpu_frequency_region( cluster_name, starttime, endtime) elif is_less_than_nhours(starttime, endtime, 24): region = 7200 data = SfoNodeStatHourMethod.query_cpu_frequency_region( cluster_name, starttime, endtime) elif is_less_than_nhours(starttime, endtime, 24 * 7): region = 86400 data = SfoNodeStatHourMethod.query_cpu_frequency_region( cluster_name, starttime, endtime) elif is_less_than_nhours(starttime, endtime, 24 * 31): region = 86400 * 30 data = SfoNodeStatDayMethod.query_cpu_frequency_region( cluster_name, starttime, endtime) else: region = 86400 * 365 data = SfoNodeStatDayMethod.query_cpu_frequency_region( cluster_name, starttime, endtime) if data: try: dot_list = [] dot_group_list, region_time_list = group_data(data, region) for host_group in dot_group_list: cluster_core_used_sum = 0 group_list = host_group.group_by_host_name() try: for _host_list in group_list.values(): cpu_us_map = map(lambda x: float(x.cpu_us), _host_list) # 不存在的数据默认0.0 sum_host = sum(cpu_us_map) // len(_host_list) cluster_core_used_sum += sum_host except Exception: dot_list.append('N/A') else: _cpu_frequency_val = cluster_core_used_sum / len( group_list.keys()) if len(group_list.keys()) else 1 _cpu_frequency_val = '%0.2f' % _cpu_frequency_val dot_list.append(_cpu_frequency_val) cpu_frequency_val.update({ "cpu_frq": dot_list, "add_time": region_time_list }) status = 200 message = 'OK' except Exception, error: status = 501 message = 'get exception %s from cpu frequency' % str(error)
def get_requests_count_logic(cluster_name, start_time, end_time): """ GET 请求集群信息列表处理逻辑 :param start_time: str 字符串日期格式 :param end_time: str 字符串日期格式 :return: resp, status resp: json格式的响应数据 status: 响应码 """ data = {"request_stats": [], "add_times": []} status = '' message = '' resp = {"status": status, "data": data, "message": message} if is_less_than_nhours(start_time, end_time, 1): request_stats = SfoProxyStatsDMethod.query_proxt_stat_st2et( start_time, end_time) elif is_less_than_nhours(start_time, end_time, 8): request_stats = SfoProxyStatsD5MinMethod.query_proxt_stat_st2et( start_time, end_time) elif is_less_than_nhours(start_time, end_time, 24): request_stats = SfoProxyStatsDHourMethod.query_proxt_stat_st2et( start_time, end_time) else: request_stats = SfoProxyStatsDDayMethod.query_proxt_stat_st2et( start_time, end_time) sfo_nodes = SfoClusterNodesMethod.query_host_list_by_cluster_name( cluster_name=cluster_name) for stat in request_stats: add_time = stat.add_time hostname_httpmethod_map = proxy_server_stat(stat.req_timing) for host_name in hostname_httpmethod_map.keys(): if host_name.upper() not in [ node.node_host_name.upper() for node in sfo_nodes ]: hostname_httpmethod_map.pop(host_name) if hostname_httpmethod_map.values(): data['request_stats'].append( reduce(lambda x, y: sum_times_group_by_httpmethod(x, y), hostname_httpmethod_map.values())) data['add_times'].append(add_time) if request_stats: status = 200 message = 'OK' if data['request_stats']: data['request_stats'] = dict( zip(data['request_stats'][0].keys(), zip(*map(lambda x: x.values(), data['request_stats'])))) else: status = 404 message = 'Not Found Record By %s' % cluster_name else: status = 404 message = 'Not Found Record By %s' % cluster_name resp.update({"status": status, "data": data, "message": message}) return resp, status
def get_avg_mem_used_rate_logic(cluster_name, starttime, endtime): _data = {} status = '' message = '' cluster_mem_total_rate_avg = {} resp = {"status": status, "data": _data, "message": message} if is_less_than_nhours(starttime, endtime, 1): region = 60 data, host_name_list = SfoNodeStatMethod.query_node_mem_info( cluster_name, starttime, endtime) elif is_less_than_nhours(starttime, endtime, 8): region = 600 data, host_name_list = SfoNodeStat5MinMethod.query_node_mem_info( cluster_name, starttime, endtime) elif is_less_than_nhours(starttime, endtime, 24): region = 7200 data, host_name_list = SfoNodeStatHourMethod.query_node_mem_info( cluster_name, starttime, endtime) elif is_less_than_nhours(starttime, endtime, 24 * 7): region = 86400 data, host_name_list = SfoNodeStatHourMethod.query_node_mem_info( cluster_name, starttime, endtime) elif is_less_than_nhours(starttime, endtime, 24 * 31): region = 86400 * 30 data, host_name_list = SfoNodeStatDayMethod.query_node_mem_info( cluster_name, starttime, endtime) else: region = 86400 * 365 data, host_name_list = SfoNodeStatDayMethod.query_node_mem_info( cluster_name, starttime, endtime) if data and host_name_list: try: dot_list = [] dot_group_list, region_time_list = group_data(data, region) for group in dot_group_list: try: mem_avg = group.avg_mem_used() except Exception: dot_list.append('N/A') else: dot_list.append(round(mem_avg, 2)) cluster_mem_total_rate_avg.update({ "avg_mem": dot_list, "add_time": region_time_list }) status = 200 message = 'OK' except Exception, error: status = 501 message = 'get exception %s from avg mem used' % str(error)
def server_net_used(node_list, starttime, endtime): send_bytes = [] recv_bytes = [] if is_less_than_nhours(starttime, endtime, 1): region = 60 data, host_name_list = SfoNodeStatMethod.query_net_used_by_cluster_name( node_list, starttime, endtime) elif is_less_than_nhours(starttime, endtime, 8): region = 600 data, host_name_list = SfoNodeStat5MinMethod.query_net_used_by_cluster_name( node_list, starttime, endtime) elif is_less_than_nhours(starttime, endtime, 24): region = 7200 data, host_name_list = SfoNodeStatHourMethod.query_net_used_by_cluster_name( node_list, starttime, endtime) elif is_less_than_nhours(starttime, endtime, 24 * 7): region = 86400 data, host_name_list = SfoNodeStatHourMethod.query_net_used_by_cluster_name( node_list, starttime, endtime) elif is_less_than_nhours(starttime, endtime, 24 * 31): region = 86400 * 30 data, host_name_list = SfoNodeStatDayMethod.query_net_used_by_cluster_name( node_list, starttime, endtime) else: region = 86400 * 365 data, host_name_list = SfoNodeStatDayMethod.query_net_used_by_cluster_name( node_list, starttime, endtime) dot_group_list, region_time_list = group_data(data, region) for i in dot_group_list: try: cluster_net_total_send, cluster_net_total_recv = i.server_net_used( ) cluster_net_total_send = round(cluster_net_total_send / region, 2) cluster_net_total_recv = round(cluster_net_total_recv / region, 2) except Exception: send_bytes.append('N/A') recv_bytes.append('N/A') else: send_bytes.append(cluster_net_total_send) recv_bytes.append(cluster_net_total_recv) return send_bytes, recv_bytes, region_time_list
def get_cluster_async_pending_logic(cluster_name, starttime, endtime): """ GET 请求集群信息列表处理逻辑 :return: resp, status resp: json格式的响应数据 status: 响应码 """ data = { 'sync_num_list': [], "add_time_list": [], 'cluster_condition': '', "cluster_virtual": '' } status = '' message = '' resp = {"status": status, "data": data, "message": message} if is_less_than_nhours(starttime, endtime, 1): cluster_set = SfoClusterInfoMethod.query_start2end_region_list_info( cluster_name=cluster_name, start_time=starttime, end_time=endtime) elif is_less_than_nhours(starttime, endtime, 24): cluster_set = SfoClusterInfoHourMethod.query_start2end_region_list_info( cluster_name=cluster_name, start_time=starttime, end_time=endtime) else: cluster_set = SfoClusterInfoDayMethod.query_start2end_region_list_info( cluster_name=cluster_name, start_time=starttime, end_time=endtime) instance = SfoClusterInfoMethod.query_by_cluster_name( cluster_name=cluster_name) if cluster_set: status = 200 message = 'OK' data['sync_num_list'] = cluster_set data['add_time_list'] = cluster_set if instance: data['cluster_condition'] = instance data['cluster_virtual'] = instance else: status = 404 message = 'Not Found Record' resp.update({"status": status, "data": data, "message": message}) return resp, status
def get_cluster_disk_logic(host_name, starttime, endtime): """ GET 请求集群节点信息处理逻辑 :return: resp, status resp: json格式的响应数据 status: 响应码 """ data = [] status = '' message = '' resp = {"status": status, "data": data, "message": message} if is_less_than_nhours(starttime, endtime, 1): last_disk_perform_set = SfoDiskPerformMethod.query_disk_list_by_hostname( host_name, starttime, endtime) elif is_less_than_nhours(starttime, endtime, 8): last_disk_perform_set = SfoDiskPerform5MinMethod.query_disk_list_by_hostname( host_name, starttime, endtime) elif is_less_than_nhours(starttime, endtime, 24): last_disk_perform_set = SfoDiskPerformHourMethod.query_disk_list_by_hostname( host_name, starttime, endtime) else: last_disk_perform_set = SfoDiskPerformDayMethod.query_disk_list_by_hostname( host_name, starttime, endtime) disk_dict = {} add_time_list = [] add_time_set = map(lambda x: str(x.add_time), last_disk_perform_set) for disk in last_disk_perform_set: disk_name = str(disk.disk_name) if not disk_dict.get(disk_name, ''): disk_dict.setdefault(disk_name, [disk]) else: disk_dict[disk_name].append(disk) for add_time in add_time_set: if not add_time_list: add_time_list.append(add_time) continue if add_time not in add_time_list: time_diff = int(strft_2_timestamp(add_time)) - int( strft_2_timestamp(add_time_list[-1])) if time_diff <= config.disk_refresh - 3: pass else: add_time_list.append(add_time) for disk_name, disk_list in disk_dict.items(): obj = {'disk': {}} read_bytes_list = [] read_count_list = [] write_count_list = [] write_bytes_list = [] await_list = [] for idx, disk_obj in enumerate(disk_list): read_bytes_series_data = [] write_bytes_series_data = [] read_count_series_data = [] write_count_series_data = [] await_list_series_data = [] if idx == len(disk_list) - 1: filter_key = filter(lambda x: not x.startswith('_'), disk_obj.__dict__) for key in filter_key: obj['disk'].update({key: disk_obj.__dict__[key]}) continue read_bytes = round(disk_obj.read_bytes_diff(disk_list[idx + 1]), 2) write_bytes = round(disk_obj.write_bytes_diff(disk_list[idx + 1]), 2) read_counts = round(disk_obj.read_count_diff(disk_list[idx + 1])) write_counts = round(disk_obj.write_count_diff(disk_list[idx + 1])) await_time = round(disk_obj.await_diff(disk_list[idx + 1]), 2) read_bytes_series_data.append(add_time_list[idx]) read_bytes_series_data.append(read_bytes) read_bytes_series_data.append('%s %s' % (host_name, disk_name)) write_bytes_series_data.append(add_time_list[idx]) write_bytes_series_data.append(write_bytes) write_bytes_series_data.append('%s %s' % (host_name, disk_name)) read_count_series_data.append(add_time_list[idx]) read_count_series_data.append(read_counts) read_count_series_data.append('%s %s' % (host_name, disk_name)) write_count_series_data.append(add_time_list[idx]) write_count_series_data.append(write_counts) write_count_series_data.append('%s %s' % (host_name, disk_name)) await_list_series_data.append(add_time_list[idx]) await_list_series_data.append(await_time) await_list_series_data.append('%s %s' % (host_name, disk_name)) read_bytes_list.append(read_bytes_series_data) write_bytes_list.append(write_bytes_series_data) read_count_list.append(read_count_series_data) write_count_list.append(write_count_series_data) await_list.append(await_list_series_data) #响应时间 obj['disk'].update({ 'read': read_bytes_list, "write": write_bytes_list, "read_count": read_count_list, "write_count": write_count_list, "await": await_list }) data.append(obj) if last_disk_perform_set: data = data status = 200 message = 'OK' else: status = 404 message = 'Not Found Record Cluster Disk By %s' % host_name resp.update({"status": status, "data": data, "message": message}) return resp, status
def cluster_disk_io(cluster_name, starttime, endtime): read_list = [] write_list = [] read_mbps_list = [] write_mbps_list = [] await_list = [] nodes = SfoClusterNodesMethod.query_host_list_by_cluster_name(cluster_name=cluster_name) if is_less_than_nhours(starttime, endtime, 1): region = 60 data = SfoDiskPerformMethod.query_disk_io(starttime, endtime) elif is_less_than_nhours(starttime, endtime, 8): region = 600 data = SfoDiskPerform5MinMethod.query_disk_io(starttime, endtime) elif is_less_than_nhours(starttime, endtime, 24): region = 7200 data = SfoDiskPerformHourMethod.query_disk_io(starttime, endtime) elif is_less_than_nhours(starttime, endtime, 24*7): region = 86400 data = SfoDiskPerformHourMethod.query_disk_io(starttime, endtime) elif is_less_than_nhours(starttime, endtime, 24*31): region = 86400 * 30 data = SfoDiskPerformDayMethod.query_disk_io(starttime, endtime) else: region = 86400 * 365 data = SfoDiskPerformDayMethod.query_disk_io(starttime, endtime) if not nodes: raise ValueError('Cluster Not Found nodes') data = filter(lambda x: x.host_name in map(lambda x: x.node_host_name, nodes), data) if data: dot_group_list, region_time_list = group_data(data, region) for idx, group in enumerate(dot_group_list): clu_disk_read_series_data = [] clu_disk_write_series_data = [] clu_disk_read_mbps_series_data = [] clu_disk_write_mbps_series_data = [] clu_disk_await_series_data = [] clu_disk_read, clu_disk_write = group.disk_io_count() clu_disk_read_mbps, clu_disk_write_mbps = group.disk_io_used() clu_disk_await = group.disk_io_await() clu_disk_read_series_data.append(region_time_list[idx]) clu_disk_read_series_data.append(clu_disk_read//region) clu_disk_read_series_data.append(cluster_name) clu_disk_write_series_data.append(region_time_list[idx]) clu_disk_write_series_data.append(clu_disk_write//region) clu_disk_write_series_data.append(cluster_name) clu_disk_read_mbps_series_data.append(region_time_list[idx]) clu_disk_read_mbps_series_data.append(clu_disk_read_mbps//region) clu_disk_read_mbps_series_data.append(cluster_name) clu_disk_write_mbps_series_data.append(region_time_list[idx]) clu_disk_write_mbps_series_data.append(clu_disk_write_mbps // region) clu_disk_write_mbps_series_data.append(cluster_name) clu_disk_await_series_data.append(region_time_list[idx]) clu_disk_await_series_data.append(clu_disk_await) clu_disk_await_series_data.append(cluster_name) read_list.append(clu_disk_read_series_data) write_list.append(clu_disk_write_series_data) read_mbps_list.append(clu_disk_read_mbps_series_data) write_mbps_list.append(clu_disk_write_mbps_series_data) await_list.append(clu_disk_await_series_data) return {"read_list": read_list, "write_list": write_list, "read_mbps_list": read_mbps_list, "await_list": await_list, "write_mbps_list": write_mbps_list} return None
def get_cluster_nodestat_detail_logic(host_name, start_time, end_time): """ GET 请求集群节点信息处理逻辑 :return: resp, status resp: json格式的响应数据 status: 响应码 """ data = '' status = '' message = '' resp = {"status": status, "data": data, "message": message} if is_less_than_nhours(start_time, end_time, 1): node_stat_detail = SfoNodeStatMethod.query_node_stat_list_by_hostname( host_name, start_time, end_time) elif is_less_than_nhours(start_time, end_time, 8): node_stat_detail = SfoNodeStat5MinMethod.query_node_stat_list_by_hostname( host_name, start_time, end_time) elif is_less_than_nhours(start_time, end_time, 24): node_stat_detail = SfoNodeStatHourMethod.query_node_stat_list_by_hostname( host_name, start_time, end_time) else: node_stat_detail = SfoNodeStatDayMethod.query_node_stat_list_by_hostname( host_name, start_time, end_time) net_map = {"net_send_bytes": {}, "net_recv_bytes": {}} for idx, node in enumerate(node_stat_detail): if idx == len(node_stat_detail) - 1: continue else: node_send_bytes_map = node.diff_net_card_send_bytes( node_stat_detail[idx + 1]) node_recv_bytes_map = node.diff_net_card_recv_bytes( node_stat_detail[idx + 1]) for net_card in node_send_bytes_map: if net_map['net_send_bytes'].get(net_card) < 0: net_map['net_send_bytes'].update( {net_card: [node_send_bytes_map[net_card]]}) net_map['net_recv_bytes'].update( {net_card: [node_recv_bytes_map[net_card]]}) else: net_map['net_send_bytes'][net_card].append( node_send_bytes_map[net_card]) net_map['net_recv_bytes'][net_card].append( node_recv_bytes_map[net_card]) if node_stat_detail: status = 200 message = 'OK' node_stat_detail_deepcopy = deepcopy(node_stat_detail) net_map_deepcopy = deepcopy(net_map) ins = SfoNodeStatMethod.query_last_node_stat(host_name) if ins: node_stat_ins = ins else: node_stat_ins = node_stat_detail_deepcopy[-1] node_stat_ins.list = node_stat_detail_deepcopy node_stat_ins.map = net_map_deepcopy data = node_stat_ins else: status = 404 message = 'Not Found Record Cluster NodeStat By %s' % host_name resp.update({"status": status, "data": data, "message": message}) return resp, status