def start(self): ''' start monitor server frontend and backend :return: ''' reactor = data_processing.DataHandler(settings) reactor.looping()
def service_report(request): print("client data:", request.POST) try: print( 'host=%s, services=%s' % (request.POST.get('client_id'), request.POST.get('service_name'))) data = json.loads(request.POST['data']) client_id = request.POST.get('client_id') service_name = request.POST.get('service_name') #存数据 data_saveing_obj = data_optimization.DataStore(client_id, service_name, data, REDIS_OBJ) #同时触发监控 host_obj = models.Host.objects.get(id=client_id) service_triggers = get_host_triggers(host_obj) trigger_handler = data_processing.DataHandler(settings, connect_redis=False) for trigger in service_triggers: trigger_handler.load_service_data_and_calulating( host_obj, trigger, REDIS_OBJ) print("service trigger::", service_triggers) except IndexError as e: print('------>err:', e) return HttpResponse(json.dumps("---report success---")) # return HttpResponse(json.dumps('received service data')) #服务端传回给客户端的数据转为json格式
def service_data_report(request): """ 处理客户端发送的服务监控报告,由于是POST请求所有需要处理CSRF,这里就使用@csrf_exempt,略去不考虑了 :param request: :return: """ if request.method == 'POST': try: print('---->host=%s, service=%s' % (request.POST.get('client_id'), request.POST.get('service_name'))) data = json.loads(request.POST['data']) # StatusData_1_memory_latest client_id = request.POST.get('client_id') service_name = request.POST.get('service_name') # 每接收到一次服务监控报告都要触发一次数据优化,符合优化标准将优化 # data_saveing_obj = data_optimization.DataStore(client_id, service_name, data, REDIS_OBJ) data_optimization.DataStore(client_id, service_name, data, REDIS_OBJ) # 由于DataStore已经为实时数据做过存储这里还往列表首部塞数据不知道为什么先注释了 # redis_key_format为Redis中KEY的格式,最新的数据不需要存储优化 # redis_key_format = "StatusData_%s_%s_latest" % (client_id, service_name) # data['report_time'] = time.time() # 将客户端发来的最新报告存储在Redis中 # REDIS_OBJ.lpush(redis_key_format, json.dumps(data)) # 在这里同时触发监控(在这里触发的好处是什么呢?) # 实时监控触发报警系统 host_obj = models.Host.objects.get(id=client_id) service_triggers = get_host_triggers(host_obj) trigger_handler = data_processing.DataHandler(settings, connect_redis=False) for trigger in service_triggers: # 循环主机关联的触发器,判断是否报警被触发 trigger_handler.load_service_data_and_calulating( host_obj, trigger, REDIS_OBJ) print("service trigger:", service_triggers) # 順便更新主机存活状态 host_alive_key = "HostAliveFlag_%s" % client_id REDIS_OBJ.set(host_alive_key, time.time()) REDIS_OBJ.expire(host_alive_key, 3) except IndexError as e: print('----->err:', e) return HttpResponse(json.dumps("{'success':1,'data':'report success'}"))
def service_report(request): print("client data:", request.POST) if request.method == 'POST': print("---->", request.POST) REDIS_OBJ.set("test_alex", 'hahaha') try: print('host=%s, service=%s' % (request.POST.get('client_id'), request.POST.get('service_name'))) data = json.loads(request.POST['data']) # print(data) #StatusData_1_memory_latest client_id = request.POST.get('client_id') service_name = request.POST.get('service_name') #把数据存下来 data_saveing_obj = data_optimization.DataStore( client_id, service_name, data, REDIS_OBJ) # redis_key_format = "StatusData_%s_%s_latest" %(client_id,service_name) # data['report_time'] = time.time() # REDIS_OBJ.lpush(redis_key_format,json.dumps(data)) # #在这里同时触发监控(在这里触发的好处是什么呢?) host_obj = models.Host.objects.get(id=client_id) service_triggers = get_host_triggers(host_obj) trigger_handler = data_processing.DataHandler(settings, connect_redis=False) for trigger in service_triggers: trigger_handler.load_service_data_and_calulating( host_obj, trigger, REDIS_OBJ) print("service trigger::", service_triggers) #更新主机存活状态 #host_alive_key = "HostAliveFlag_%s" % client_id #REDIS_OBJ.set(host_alive_key,time.time()) except IndexError as e: print('----->err:', e) return HttpResponse(json.dumps("---report success---"))
def service_data_report(request): if request.method == 'POST': # print("Receive data ->", request.POST) try: # print('host=%s, service=%s' % (request.POST.get('client_id'), request.POST.get('service_name'))) data = json.loads(request.POST['data']) client_id = request.POST.get('client_id') service_name = request.POST.get('service_name') # 数据优化及存储 data_saveing_obj = data_optimization.DataStore( client_id, service_name, data, REDIS_OBJ) data_saveing_obj.process_and_save() # redis_key_format = "StatusData_%s_%s_latest" % (client_id, service_name) # data['report_time'] = time.time() # REDIS_OBJ.lpush(redis_key_format, json.dumps(data)) # 在这里同时触发监控(在这里触发的好处是什么呢?) # 获取目标主机 host_obj = models.Host.objects.get(id=client_id) # 获取触发器 service_triggers = get_host_triggers(host_obj) # {<Trigger: <serice:LinuxCPU, severity:Average>>} trigger_handler = data_processing.DataHandler(settings, connect_redis=False) for trigger in service_triggers: trigger_handler.load_service_data_and_calulating( host_obj, trigger, REDIS_OBJ) print("service trigger::", service_triggers) # 更新主机存活状态 host_alive_key = "HostAliveFlag_%s" % client_id REDIS_OBJ.set(host_alive_key, time.time()) except IndexError as e: print('----->err:', e) return HttpResponse(json.dumps("---report success---"))
def start(self): '''start to listen triggers,检测主机和服务是否正常运行''' reactor = data_processing.DataHandler(settings) reactor.looping()
#_*_coding:utf-8_*_ import sys,os import django BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.append(BASE_DIR) os.environ.setdefault("DJANGO_SETTINGS_MODULE", "CrazyMonitor.settings") from CrazyMonitor import settings django.setup() from monitor import models from monitor.backends import data_processing if __name__ == '__main__': reactor = data_processing.DataHandler(settings) reactor.loopping()
def start(): print("start") catty = data_processing.DataHandler(settings) catty.looping()
def main(): reactor = data_processing.DataHandler(settings) reactor.looping()