Пример #1
0
def getStatusServers(data):
    rack_id = int(request.args.get('rackId', '').split(",")[0])
    room_id = int(request.args.get('roomId', '').split(",")[0])
    daoServersStatus = machinesDAO.MachinesDAOImplementation()
    #daoServersStatus = serverStatusDAO.ServersStatusDAOImpl()
    daoServersStatus.create_session(ip_cassandra)
    daoServersStatus.set_logger()
    daoServersStatus.load_keyspace(keyspace)
    # DAO anomalies
    daoServersAnomalies = anomaliesDAO.AnomaliesInMachinesDAOImpl()
    # daoServersAnomalies.create_table()
    daoServersAnomalies.create_session(ip_cassandra)
    daoServersAnomalies.set_logger()
    daoServersAnomalies.load_keyspace(keyspace)

    currentTime = settings.current_time  #datetime.datetime.now()
    someMinutesAgo = currentTime - datetime.timedelta(
        minutes=settings.minutes_ago_5)  #minutes=5
    set_servers = daoServersStatus.select_server_ids_by_roomAndRackid(
        room_id, rack_id)
    if (not set_servers):
        return jsonify([])
    else:
        list_servers = []
        metrics = ["temperature", "energy", "utilization"]
        avg_status_rack = {"temperature": 0, "energy": 0, "utilization": 0}
        number_of_servers_in_rack = len(set_servers)
        for server_id in set_servers:
            dict_status, color, anomaly_type, anomaly_value = status_machines_processor.get_status_servers(
                daoServersStatus,
                daoServersAnomalies,
                room_id,
                rack_id,
                server_id,
                someMinutesAgo,
                currentTime,
                metrics=metrics)
            for metric in metrics:
                avg_status_rack[metric] = avg_status_rack[metric] + (
                    dict_status[metric] / number_of_servers_in_rack)
            list_servers.append({
                "server_id": server_id,
                "server_status": {
                    "status": dict_status["total"],
                    "color": color
                },
                "anomaly": {
                    "type": anomaly_type,
                    "value": anomaly_value
                }
            })
        rack_information = {
            "rack_id": rack_id,
            "server_status": list_servers,
            "avg_performance": avg_status_rack
        }
        return jsonify(rack_information)  #fields: server_id, status, color
Пример #2
0
def getStatusDC():
    daoServersStatus = machinesDAO.MachinesDAOImplementation()
    # daoServersStatus.create_table()
    daoServersStatus.create_session(ip_cassandra)
    daoServersStatus.set_logger()
    daoServersStatus.load_keyspace(keyspace)
    currentTime = settings.current_time  #datetime.datetime.now()
    someMinutesAgo = currentTime - datetime.timedelta(
        minutes=settings.minutes_ago_5)

    #Room ids & rack ids:
    dict_rooms_racks = daoServersStatus.select_all_roomAndRacksIds()
    if (not dict_rooms_racks):
        return jsonify([])
    else:
        status_DC = 0
        num_racks = 0
        for room_id in dict_rooms_racks:
            set_racks_in_room = dict_rooms_racks[room_id]
            for rack_id in set_racks_in_room:
                # previous variables
                status_rack = 100
                set_servers = daoServersStatus.select_server_ids_by_roomAndRackid(
                    room_id, rack_id)
                for server_id in set_servers:
                    dict_status, color, _, _ = status_machines_processor.get_status_servers(
                        daoServersStatus,
                        None,
                        room_id,
                        rack_id,
                        server_id,
                        someMinutesAgo,
                        currentTime,
                        metrics=["temperature", "energy", "utilization"])
                    if (status_rack > dict_status["total"]
                        ):  #we keep the worst server status as the rack status
                        status_rack = dict_status["total"]
                #End of processing of servers -> We keep the value of the DC
                status_DC += status_rack
                num_racks += 1
        avg_DC = status_DC / num_racks  #avg of the worst server of each rack
        color_DC = statusAndAnomDetect.color_status(avg_DC)
        return jsonify({"DC_status": avg_DC, "color": color_DC})
Пример #3
0
def getStatusRackInTime(data):
    rack_id = int(request.args.get('rackId', '').split(",")[0])
    room_id = int(request.args.get('roomId', '').split(",")[0])
    # num_samples = int(request.args.get('num_samples', '').split(",")[0] or '50')
    initial_time = datetime.datetime.fromtimestamp(
        float(request.args.get('initial_time', '').split(",")[0]))
    final_time = datetime.datetime.fromtimestamp(
        float(request.args.get('final_time', '').split(",")[0]))
    daoServersStatus = machinesDAO.MachinesDAOImplementation()
    # daoServersStatus = serverStatusDAO.ServersStatusDAOImpl()
    daoServersStatus.create_session(ip_cassandra)
    daoServersStatus.set_logger()
    daoServersStatus.load_keyspace(keyspace)
    daoServersStatus.create_spark_context(ip_cassandra)
    daoServersStatus.load_spark_table()
    metric = "temperature"
    set_servers = daoServersStatus.select_server_ids_by_roomAndRackid(
        room_id, rack_id)
    results = daoServersStatus.select_MAXmetric_and_date(
        room_id,
        rack_id,
        list(set_servers),
        initial_time,
        final_time,
        metric_type=metric
    )  #select_metricAndDate_by_roomAndRackAndServerIds_andComponent
    metric_2_send = []
    if (not results):
        return jsonify([])
    else:
        for row in results.rdd.collect():
            metric_2_send.append({
                "date": row["new_date"],
                metric: row["max(metric_value)"]
            })
        return jsonify(metric_2_send)
Пример #4
0
    # #daoAlexaQuestions.delete_table()
    # #daoAlexaQuestions.delete_keyspace('prueba')
    #
    # print(daoAlexaQuestions.select_question(1))
    #
    # daoStatus = dao.EmployeeDAOImpl()
    # daoStatus.createsession('localhost')
    # daoStatus.setlogger()
    # daoStatus.loadkeyspace('cyberOps')
    # currentTime = datetime.datetime.now()
    # someMinutesAgo = currentTime - datetime.timedelta(minutes=5)
    # print(currentTime)
    # print(someMinutesAgo)
    # daoStatus.select_hr_inRange('Cris', someMinutesAgo, currentTime)

    daoServerStatus = machiniesDAO.MachinesDAOImplementation(
    )  # dao.MachinesDAOImplementation()
    daoServerStatus.create_session('10.40.39.33')  # ip_dcos'10.40.39.33'
    daoServerStatus.set_logger()
    daoServerStatus.load_keyspace(keyspace)
    daoServerStatus.create_table()
    daoServerStatus.create_spark_context('10.40.39.33')  #ip_dcos
    daoServerStatus.load_spark_table()
    currentTime = datetime.datetime.now()  #+ datetime.timedelta(hours=2)
    cuTime = time.time()
    someMinutesAgo = currentTime - datetime.timedelta(minutes=20)
    ts = time.mktime(currentTime.timetuple())
    print(time.mktime(currentTime.timetuple()))

    # newDate = currentTime - datetime.timedelta(minutes=1)
    # metric_log = [0,'test','Core5', 'temperature', 3,  newDate]#.strftime("%Y-%m-%d %H:%M:%S")
    # daoServerStatus.insert_metric(1, 1,metric_log=metric_log)
Пример #5
0
def getStatusRacks():
    daoServersStatus = machinesDAO.MachinesDAOImplementation()
    #daoServersStatus.create_table()
    daoServersStatus.create_session(ip_cassandra)
    daoServersStatus.set_logger()
    daoServersStatus.load_keyspace(keyspace)
    #DAO anomalies
    daoServersAnomalies = anomaliesDAO.AnomaliesInMachinesDAOImpl()
    # daoServersAnomalies.create_table()
    daoServersAnomalies.create_session(ip_cassandra)
    daoServersAnomalies.set_logger()
    daoServersAnomalies.load_keyspace(keyspace)

    currentTime = settings.current_time  #datetime.datetime.now()
    someMinutesAgo = currentTime - datetime.timedelta(
        minutes=settings.minutes_ago_5)  #minutes=5
    dict_rooms_racks = daoServersStatus.select_all_roomAndRacksIds()
    list_status_racks = []
    profile_room = []

    if (not dict_rooms_racks):
        return jsonify([])
    else:
        list_of_rooms = []
        for room_id in dict_rooms_racks:
            #room_id_dict = {"room_id": room_id, "rack_status": {}}
            set_racks_in_room = dict_rooms_racks[room_id]
            #get servers in each rack
            list_of_racks_by_room = []
            for rack_id in set_racks_in_room:
                #previous variables
                status_rack = 100
                color_rack = "red"
                rack_anomaly_type = "None"
                rack_anomaly_value = False
                set_servers = daoServersStatus.select_server_ids_by_roomAndRackid(
                    room_id, rack_id)
                for server_id in set_servers:
                    dict_status, color, anomaly_type, anomaly_value = status_machines_processor.get_status_servers(
                        daoServersStatus,
                        daoServersAnomalies,
                        room_id,
                        rack_id,
                        server_id,
                        someMinutesAgo,
                        currentTime,
                        metrics=["temperature", "energy", "utilization"])
                    if (status_rack > dict_status["total"]):
                        status_rack = dict_status["total"]
                        color_rack = color
                    if (anomaly_value):
                        rack_anomaly_type = anomaly_type  #Para el rack no tiene tanto sentido porque cada server podría tener una diferente
                        rack_anomaly_value = anomaly_value
                list_of_racks_by_room.append({
                    "rack_id":
                    rack_id,
                    "status":
                    status_rack,
                    "color":
                    color_rack,
                    "anomaly_type":
                    rack_anomaly_type,
                    "anomaly_value":
                    rack_anomaly_value
                })
            list_of_rooms.append({
                "room_id": room_id,
                "rack_status": list_of_racks_by_room
            })
            #room_id_dict[room_id]["rack_status"].append({"rack_id": rack_id, "status": status_rack, "color": color_rack})
            #value = profile_room["room_status"]
            #value
            #list_servers = daoServersStatus.select_server_ids_by_rack_id(rack_id)
            #status_rack = daoServersStatus.select_statusRack_by_rack_servers_date_profile(rack_id,
            #list_servers, someMinutesAgo, currentTime,profile=[1, 2, 3, 4, 5])
        return jsonify(list_of_rooms)
Пример #6
0
import pandas as pd
import time
import settings
from datetime import timedelta
os.environ[
    'TZ'] = 'UTC'  #con esto se soluciona el tema de rescatar bien la hora - estamos diciendo a python
#  que coja esa hora para este script (y los uqe dependen de el) (FALTA COMPROBAR QUE FUNCIONE BIEN TMABIeN CON EL MODELO DE ANOMALIAS Y QUE GUARDE LOS DATOS BIEN EN LA TABLA DE ANOMALiAS DE MANERA QUE SE ACCEDA BIEN DESDE LA GUI , ES DEXIR, QUE SE COJA LA HORA UQE ES BIEN) -initial y current time van a estas como "adelantadas ,2horas menos," pero asi no tenemos que estar restando y demas

PATH = os.path.join(
    os.path.dirname(os.path.dirname(os.path.realpath(__file__))).rsplit(
        settings.operative_system, 1)[0], "MAQUINAS", "Anomaly")
# import model_params
ip_cassandra = settings.ip_DCOS_cassandra
keyspace = settings.keyspace_cassandra

databaseallData = DAO_Maquinas.MachinesDAOImplementation()
databaseallData.create_session(ip_cassandra)
databaseallData.set_logger()
databaseallData.load_keyspace(keyspace)
databaseallData.create_table()
databaseallData.create_spark_context(ip_cassandra)
databaseallData.load_spark_table()

daoServerStatus = daoAnomaly.AnomaliesInMachinesDAOImpl(
)  # dao.MachinesDAOImplementation()
daoServerStatus.create_session(ip_cassandra)  # '10.40.39.33'
daoServerStatus.set_logger()
daoServerStatus.load_keyspace(keyspace)
daoServerStatus.create_table()

metrics = ["temperature", "utilization"]  #, "utilization" temperature
Пример #7
0
import settings as settings

# nodetool tablestats -H proteus_prueba  -> Table info (esp. byte size)

if __name__ == "__main__":
    ### Variables ###
    ip_DCOS_cassandra = settings.ip_DCOS_cassandra  #"localhost"#settings.ip_DCOS_cassandra#'localhost'#
    keyspace = settings.keyspace_cassandra  #'cyberops'#
    topic = 'collectd'
    field2Extract = 'machines'

    strategy = 'SimpleStrategy'
    replication_factor = 2
    ### Cassandra ###
    daoStatus = dao.MachinesDAOImplementation()
    daoStatus.create_session(ip_DCOS_cassandra)
    daoStatus.set_logger()
    daoStatus.load_keyspace(keyspace)
    daoStatus.create_table()  # only if table is not created previously

    ## Kafka Consumer ###
    #print("runing")
    consumer_machines = consumer.Consumer(topic=topic,
                                          field2Extract=field2Extract,
                                          DAO=daoStatus,
                                          ip_kafka_DCOS=settings.ip_kafka_DCOS)
    consumer_machines.run()

    #daoStatus.select_all_roomAndRacksIds()
    # end = datetime.datetime.now()