def instnaceInfo(): """Instance Inforamtion""" print("/instanceInfo <- ") token = request.args.get('token') server_names, server_uuid = oa.get_server_list(token) # 인스턴스 리스트 가져오기 data = [] for i in range(len(server_uuid)): try: res = oa.get_resource_list(token, server_uuid[i]) # 인스턴스의 리소스 리스트 가져오기 temp = list(oa.get_mesuare_list(token, res)) # 각 리소스의 사용량 가져오기 element = {} element['name'] = server_names[i] element['cpu'] = round(temp[0],0) element['memory'] = round(temp[1]*100,0) element['disk'] = round(temp[2]*100, 0) element['flavor_cpu'] , element['flavor_memory'] ,element['flavor_storage'] = oa.get_resource_size(token,server_uuid[i]) element['project_id'] = oa.get_server_info(token,server_uuid[i])['server']['tenant_id'] data.append(element) except Exception as e: # 측정 실패한 경우 element = {} element['name'] = server_names[i] element['flavor_cpu'] , element['flavor_memory'] ,element['flavor_storage'] = oa.get_resource_size(token,server_uuid[i]) element['project_id'] = oa.get_server_info(token,server_uuid[i])['server']['tenant_id'] data.append(element) print(e) pass jsonResult = { 'data': data } res = make_response(jsonResult) print("/instanceInfo -> ") # print(resJson) return res
def instnaceInfo(): """Instance Inforamtion""" print("/instanceInfo <- ") token = request.args.get('token') server_names, server_uuid = oa.get_server_list(token) data = [] print(token) for i in range(len(server_uuid)): try: res = oa.get_resource_list(token, server_uuid[i]) temp = list(oa.get_mesuare_list(token, res)) element = {} element['name'] = server_names[i] element['cpu'] = round(temp[0] * 100, 0) element['memory'] = round(temp[1] * 100, 0) element['disk'] = round(temp[2] * 100, 0) data.append(element) except Exception as e: pass jsonResult = {'data': data} res = make_response(jsonResult) resJson = json.dumps(jsonResult) print("/instanceInfo -> ") print(resJson) return res
def alarmAlter(): print("/alarmAlter <- ") print(request.get_json()) data = request.get_json() alarm_id = data['alarm_id'] if(os.path.isfile('token.json')): # admin의 토큰 값 받아오기 with open('token.json') as feedsjson: feeds = json.load(feedsjson) token = feeds['token'] else: # 저장된 admin의 토큰 없을 시 res = {'result': 'False'} return res server_id = oa.get_server_id_by_alarm(alarm_id, token) # 알람 ID 값으로 알람 발생 인스턴스 ID 조회 filePath = os.getcwd()+'/rating_log/'+str(server_id)+'.json' if(os.path.isfile(filePath)): # 알람 발생 인스턴스의 rating_log 받아오기 with open(filePath,"r") as feedsjson : feeds = json.load(feedsjson) else: # 저장된 rating_log가 없을 시 res = {'result': 'False'} return res instance_info = feeds[-1] # rating_log의 가장 최근 값 저장 project_id = instance_info['project_id'] rating = instance_info['rating'] server_name = instance_info['server_name'] with graph.as_default(): # stackUpdate와 같은 과정 반복 set_session(sess) try: res = oa.get_resource_list(token, server_id) temp = list(oa.get_mesuare_list(token, res)) cpu = round(temp[0],0) memory = round(temp[1]*100,0) storage = round(temp[2]*100, 0) print(cpu,memory, storage) with graph.as_default(): try: # 현재 사용량, 피드백 정보를 바탕으로 CPU, Memory, DISK의 필요량 예측 pred_cpu, pred_memory, pred_storage = [ round(x,1) for x in om.predict( cpu, memory, storage, rating, model)] print(pred_cpu, pred_memory, pred_storage) # 필요량을 비탕으로 스택 업데이트 res = oa.stackUpdate(token, project_id, server_id, server_name, pred_cpu, pred_memory, pred_storage, rating) res = json.dumps(res) return res except Exception as e: print(e) return {'reslut': False} except Exception as e: print(e) return {'reslut': False}
def stackUpdate(): print("/stackUpdate <- ") global model global sess global graph with graph.as_default(): set_session(sess) body = request.get_json() token = body['token'] server_name = body['server_name'] rating = int(body['rating']) project_id = body['project_id'] server_id = oa.get_server_id(token, server_name) # 인스턴스 이름을 이용해 UUID 가져오기 filePath = os.getcwd()+'/rating_log/'+str(server_id)+'.json' feeds= [] if(os.path.isfile(filePath)): # 인스턴스의 이전 로그가 존재한다면, 데이터 읽어오기 with open(filePath, "r") as feedsjson: feeds = json.load(feedsjson) with open(filePath,"w") as json_file: # 인스턴스 로그 생성 entry = {'time': str(datetime.datetime.now()), 'rating': rating, 'token': token, 'project_id':project_id, 'server_name':server_name} feeds.append(entry) json.dump(feeds, json_file) try: res = oa.get_resource_list(token, server_id) temp = list(oa.get_mesuare_list(token, res)) cpu = round(temp[0],0) memory = round(temp[1]*100,0) storage = round(temp[2]*100, 0) # cpu, memory, storage = 30, 80, 30 # 테스트 값 print(cpu,memory,storage) with graph.as_default(): try: # 현재 사용량, 피드백 정보를 바탕으로 CPU, Memory, DISK의 필요량 예측 pred_cpu, pred_memory, pred_storage = [ round(x,1) for x in om.predict( cpu, memory, storage, rating, model)] print(pred_cpu, pred_memory, pred_storage) # 필요량을 비탕으로 스택 업데이트 res = oa.stackUpdate(token, project_id, server_id, server_name, pred_cpu, pred_memory, pred_storage, rating) return res except Exception as e: print(e) return {'reslut': False} except Exception as e: print(e) return {'reslut': False}
def get_resource_info(token,uuid): res = sa.get_resource_list(token,uuid) return sa.get_mesuare_list(token, res)
def stackUpdate(): print("/stackUpdate <- ") global model global sess global graph with graph.as_default(): set_session(sess) server_name = request.args.get('name') server_uuid = request.args.get('uuid') rating = request.args.get('rating') token = request.args.get('token') project_id = request.args.get('project_id') # print( oh.extractTemplate("admin","test","admin-openrc.sh",token) ) try: res = oa.get_resource_list(token, server_uuid) temp = list(oa.get_mesuare_list(token, res)) cpu = round(temp[0] * 100, 0) memory = round(temp[1] * 100, 0) storage = round(temp[2] * 100, 0) cpu = 80 memory = 80 storage = 80 # data store ( Object file ) Swift print(cpu, memory, storage) with graph.as_default(): try: pred_cpu, pred_memory, pred_storage = [ round(x, 1) for x in om.predict( cpu, memory, storage, rating, model) ] print(pred_cpu, pred_memory, pred_storage) if (pred_cpu != 1 or pred_memory != 1 or pred_storage != 1): print("Need to Change") print(oa.get_resource_size(token, server_uuid)) cpu, memory, storage = oa.get_resource_size( token, server_uuid) cpu *= pred_cpu.round() memory *= pred_memory.round(1) storage *= pred_storage memory = memory.round(1) * 1024 storage = storage.round(1) print(cpu, memory, storage) print("Asdf") try: print( oa.create_flavor(token, 'tetttt', int(cpu), int(memory), int(storage))) except Exception as e: print(e) pass print("hihi") oh.resizeTemplate(project_id, server_name, server_uuid, 'tetttt', token) # resize here else: if (rating <= 20): print("Need to copy and move") else: print("Don't need change") jsonResult = { 'pred_cpu': pred_cpu, 'pred_memory': pred_memory, 'pred_disk': pred_storage } resJson = json.dumps(str(jsonResult)) print("/stackUpdate -> ") print(resJson) res = {'result': True} return res except Exception as e: print(e) return {'reslut': False} except Exception as e: return {'reslut': False}
def get_resource_info(token, uuid): print("get_resource_info") res = sa.get_resource_list(token, uuid) print(res) return sa.get_mesuare_list(token, res)