예제 #1
0
def dfs_stop():
    '''
    Stops dfs services (namenode, datanode, zkfc, journalnode)
    :return: success if dfs_stop_cmd runs successfully else error message is shown
    '''
    log.info("\nStopping DFS Services\n")
    header_key = request.headers.get('API-KEY')
    api_status = check_apiKey(header_key)
    if api_status == 'success':
        conn = get_postgres_connection()
        cur = conn.cursor()
        sql = "select status from hdfs_hdfs where type=1"
        cur.execute(sql)
        rows = cur.fetchall()
        cur.close()
        conn.close()
        status_list = [' '.join(item) for item in rows]
        if "RUNNING" in status_list:
            result = run_services.run_bigdata_services(dfs_stop_cmd, 'Stopped DFS Services', 'Error Stopping DFS Services')
            if json.loads(result)["success"]:
                update_namenode_info()
                return result
        else:
            update_namenode_info()
            return '{"success": 1}'
    else:
        return api_status
예제 #2
0
def kill():
    try:
        updated_at = int(str(time.time()).split(".")[0])
        header_key = request.headers.get('API-KEY')
        api_status = check_apiKey(header_key)
        if api_status == 'success':
            conn = get_postgres_connection()
            cur = conn.cursor()
            loaded_json = json.loads(request.data.decode())
            service_name = loaded_json['service_name']
            node_id = loaded_json['node_id']
            table_name = loaded_json['table_name']
            kill_result = kill_service(service_name, user_pass)
            kill_result = json.loads(kill_result)
            if kill_result["success"]:
                sql = """UPDATE %s set status='SHUTDOWN', updated_at=%d where id=%d;""" % (table_name, updated_at, int(node_id))
                cur.execute(sql)
                conn.commit()
                cur.close()
                conn.close()
                return '{"success": 1, "msg": ["%s killed forcefully"]}' % service_name
            else:
                sql = """UPDATE %s set status='RUNNING' where id=%d;""" % (table_name, int(node_id))
                cur.execute(sql)
                conn.commit()
                cur.close()
                conn.close()
                return '{"success": 0, "msg": ["Error killing %s forcefully"]}' % service_name
        else:
            return api_status
    except Exception as e:
        log.error("Error in kill()")
        log.error(e)
        return '{"success": 0, "msg": ["%s"]}' % e
예제 #3
0
def hdfs_deadnode_update(deadnode):
    cur = conn = None
    try:
        xferaddr_deadnode = json_deadnodes[deadnode]['xferaddr']
        ip = xferaddr_deadnode.split(":")[0]

        conn = get_postgres_connection()
        cur = conn.cursor()
        sql = "select id from hdfs_hdfs where ip='%s' and type=0 and cluster_id='%s'" % (
            ip, cluster_id)
        cur.execute(sql)
        row = cur.fetchone()
        if row is None:
            sql = """INSERT INTO hdfs_hdfs (ip, type, status, state, cluster_id, updated_at, web_port, rpyc_port, safemode) VALUES('{0}', {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}) RETURNING id;""" \
                .format(ip, 0, "SHUTDOWN", 0, cluster_id, updated_at, 0, 0, 0)
        else:
            node_id = row[0]
            sql = """UPDATE hdfs_hdfs set status='{0}', updated_at={1} WHERE id={2} RETURNING id;""". \
                format("SHUTDOWN", updated_at, node_id)

        node_id = execute_hdfs_sql(sql)

        metrics_sql = """INSERT INTO hdfs_metrics (capacity, non_dfs_used, num_blocks, used_space, decommissioned, 
            updated_at, node_id) VALUES({0}, {1}, {2}, {3}, {4}, {5}, {6}) RETURNING id;""".format(
            0, 0, 0, 0, 0, updated_at, node_id)
        execute_hdfs_sql(metrics_sql)
    except Exception as e:
        log.error(e)

    if cur is not None and conn is not None:
        cur.close()
        conn.close()
예제 #4
0
def stop_spark():
    '''
    Stops spark
    :return: Returns success if spark_stop command executes successfully or error message is shown
    '''
    log.info("\nStopping spark\n")
    header_key = request.headers.get('API-KEY')
    api_status = check_apiKey(header_key)
    if api_status == 'success':
        conn = get_postgres_connection()
        cur = conn.cursor()
        sql = "select status from spark_spark where type=1"
        cur.execute(sql)
        rows = cur.fetchall()
        status_list = [' '.join(item) for item in rows]
        if "RUNNING" in status_list:
            result = run_services.run_bigdata_services(spark_stop_cmd,
                                                       'spark Stopped',
                                                       'Error Stopping spark')
            if json.loads(result)["success"]:
                sql = """UPDATE spark_spark set status='SHUTDOWN' where type=1 and state=1;"""
                cur.execute(sql)
                conn.commit()
                cur.close()
                conn.close()
                return result
        else:
            cur.close()
            conn.close()
            return '{"success": 1}'
    else:
        return api_status
예제 #5
0
def stop_all_hbase():
    header_key = request.headers.get('API-KEY')
    api_status = check_apiKey(header_key)
    if api_status == 'success':
        conn = get_postgres_connection()
        cur = conn.cursor()
        sql = "select status from hbase_hbase where type=1"
        cur.execute(sql)
        rows = cur.fetchall()
        status_list = [' '.join(item) for item in rows]
        if "RUNNING" in status_list:
            result = run_services.run_bigdata_services(
                hbase_stop_cmd, 'HBase Service Stopped',
                'Error Stopping HBase Services')
            update_hbase_master()
            if json.loads(result)["success"]:
                cur.execute(sql)
                rows = cur.fetchall()
                status_list = [' '.join(item) for item in rows]
                if "RUNNING" in status_list:
                    cur.close()
                    conn.close()
                    return '{"success": 2, "msg": ["HBase Service not Stopped!!!"]}'
                else:
                    cur.close()
                    conn.close()
                    return result
            else:
                cur.close()
                conn.close()
                return result
        else:
            return '{"success": 1}'
    else:
        return api_status
예제 #6
0
def execute_psql(sql):
    conn = get_postgres_connection()
    cur = conn.cursor()
    cur.execute(sql)
    rows = cur.fetchall()
    cur.close()
    conn.close()
    return rows
예제 #7
0
def execute_spark_sql(sql):
    conn = get_postgres_connection()
    cur = conn.cursor()
    cur.execute(sql)
    conn.commit()
    id = (cur.fetchone())[0]
    cur.close()
    conn.close()
    return id
예제 #8
0
def drop_master(sql):
    try:
        conn = get_postgres_connection()
        cur = conn.cursor()
        cur.execute(sql)
        conn.commit()
        cur.close()
        conn.close()
    except Exception as e:
        log.error(e)
예제 #9
0
def get_hbase_db_info(ip):
    conn = get_postgres_connection()
    cur = conn.cursor()
    sql = "select id, cluster_id from hbase_hbase where ip='%s' and type=1" % ip

    cur.execute(sql)
    row = cur.fetchone()
    cur.close()
    conn.close()
    return row
예제 #10
0
def get_service_status(sql):
    try:
        conn = get_postgres_connection()
        cur = conn.cursor()
        cur.execute(sql)
        row = cur.fetchone()
        cur.close()
        conn.close()
        return row
    except Exception as e:
        log.error(e)
예제 #11
0
def change_service_status(table_name, status, id):
    updated_at = int(str(time.time()).split(".")[0])
    try:
        conn = get_postgres_connection()
        cur = conn.cursor()
        sql = """UPDATE {0} set status='{1}', updated_at={2} WHERE id={3}""".format(table_name, status, updated_at, id)
        cur.execute(sql)
        conn.commit()
        cur.close()
        conn.close()
    except Exception as e:
        log.error(e)
예제 #12
0
def get_es_nodes_info():
    try:
        conn = get_postgres_connection()
        cur = conn.cursor()
        sql = "select ip from elastic_search_elastic_search where type=0 and state=0"
        cur.execute(sql)
        row = cur.fetchall()
        cur.close()
        conn.close()
        return row
    except Exception as e:
        log.error(e)
예제 #13
0
def update_rm_info(updated_at=None):
    cur = conn = None
    try:
        if updated_at is None:
            updated_at = int(str(time.time()).split(".")[0])

        result = subprocess.check_output([get_yarn_master_cmd],
                                         stderr=subprocess.STDOUT,
                                         shell=True).decode("utf-8")
        result = result.strip().split("\n")

        [
            result.remove(result[i]) for i, s in enumerate(result)
            if 'Retrying connect to server' in s
        ]

        result = result[-2:]
        for r in result:
            r = r.strip().split("                              ")
            host = ((r[0]).split(":")[0]).strip()
            state = (r[1]).strip()

            ip = get_ip(host)

            conn = get_postgres_connection()
            cur = conn.cursor()
            sql = "select id, cluster_id from yarn_yarn where ip='%s' and type=1" % ip
            cur.execute(sql)
            row = cur.fetchone()
            id = row[0]
            global cluster_id
            cluster_id = row[1]

            cur.close()
            conn.close()

            if state == "active" or state == "standby":
                status = "RUNNING"
            else:
                status = "SHUTDOWN"

            sql = """UPDATE yarn_yarn set status='{0}', state={1}, updated_at={2} WHERE id={3} RETURNING id;""". \
                format(status, get_rm_state(state),
                       updated_at,
                       id)

            execute_yarn_sql(sql)
    except Exception as e:
        log.error(e)

    if cur is not None and conn is not None:
        cur.close()
        conn.close()
예제 #14
0
def insert_update_yarn(ip, node_info, rpyc_port, web_port):
    cur = conn = None
    try:
        type = 0
        status = node_info.get("Node-State")
        state = 0
        cpu_capacity = float((node_info.get("CPU-Capacity")).split()[0])
        cpu_used = float((node_info.get("CPU-Used")).split()[0])
        last_health_update = node_info.get("Last-Health-Update")
        memory_capacity = [
            float(s) for s in re.findall(r'-?\d+\.?\d*',
                                         str(node_info.get("Memory-Capacity")))
        ][0]
        memory_used = [
            float(s) for s in re.findall(r'-?\d+\.?\d*',
                                         str(node_info.get("Memory-Used")))
        ][0]
        rack = node_info.get("Rack")
        conn = get_postgres_connection()
        cur = conn.cursor()
        sql = "select id from yarn_yarn where ip='%s' and cluster_id=%d and type=0" % (
            ip, cluster_id)
        cur.execute(sql)
        row = cur.fetchone()
        if row is None:
            # Inserting yarn_yarn table with yarn jmx values.

            sql = """INSERT INTO yarn_yarn (ip, type, status, state, web_port, rpyc_port,
                    cluster_id, updated_at) VALUES
                    ('{0}', {1}, '{2}', {3}, {4}, {5}, {6}, {7}) RETURNING id;""".format(
                ip, type, status, state, web_port, rpyc_port, cluster_id,
                updated_at)
        else:
            # Updating yarn_yarn table with yarn jmx values.
            id = row[0]

            sql = """UPDATE yarn_yarn set type={0}, status='{1}', state={2}, updated_at={3} where id={4} RETURNING id;""" \
                .format(type, status, state, updated_at, id)

        yarn_id = execute_yarn_sql(sql)

        metrics_sql = """INSERT INTO yarn_metrics (cpu_capacity, cpu_used, last_health_update, memory_capacity, memory_used, 
    rack, updated_at, node_id) VALUES({0}, {1}, '{2}', {3}, {4}, '{5}', {6}, {7}) RETURNING id;""".format(
            cpu_capacity, cpu_used, last_health_update, memory_capacity,
            memory_used, rack, updated_at, yarn_id)
        execute_yarn_sql(metrics_sql)
    except Exception as e:
        log.error(e)

    if cur is not None and conn is not None:
        cur.close()
        conn.close()
예제 #15
0
def get_es_db_standby():
    try:
        conn = get_postgres_connection()
        cur = conn.cursor()
        sql = "select ip, cluster_id, web_port from elastic_search_elastic_search " \
              "where type=1 and state=0"
        cur.execute(sql)
        row = cur.fetchone()
        cur.close()
        conn.close()
        return row
    except Exception as e:
        log.error(e)
예제 #16
0
def spark_jmx_update(worker):
    cur = conn = None
    try:
        worker_host = worker['host']
        worker_state = worker['state']
        if worker_state == 'ALIVE':
            worker_status = "RUNNING"
        elif worker_state == 'DEAD':
            worker_status = "DEAD"
        else:
            worker_status = "SHUTDOWN"
        worker_cores = float(worker['cores'])
        worker_coresused = float(worker['coresused'])
        worker_coresfree = float(worker['coresfree'])
        worker_memory = float(worker['memory'] / 1024)
        worker_memoryused = float(worker['memoryused'])
        worker_lastheartbeat = worker['lastheartbeat']
        rpyc_port = int(worker['port'])
        web_port = int((worker['webuiaddress']).split(":")[-1])

        conn = get_postgres_connection()
        cur = conn.cursor()
        sql = "select id from spark_spark where ip='{0}' and type=0 and cluster_id={1}".format(
            worker_host, cluster_id)
        cur.execute(sql)
        row = cur.fetchone()
        if row is None:
            sql = """INSERT INTO spark_spark (ip, type, status, web_port, rpyc_port, state, cluster_id, updated_at) 
    VALUES ('{0}', {1}, '{2}', {3}, {4}, {5}, {6}, {7}) RETURNING id;""".format(
                worker_host, 0, worker_status, web_port, rpyc_port, 0,
                cluster_id, updated_at)
        else:
            id = row[0]
            sql = """UPDATE spark_spark set status='{0}', state={1}, updated_at={2} where id={3} RETURNING id;""".format(
                worker_status, 0, updated_at, id)

        spark_id = execute_spark_sql(sql)
        if worker_status is not 'DEAD':
            metrics_sql = """INSERT INTO spark_metrics (cores_free, cores_used, last_heartbeat, memory_used, total_cores, updated_at, total_memory, node_id) 
        VALUES ({0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}) RETURNING id;""".format(
                worker_coresfree, worker_coresused, worker_lastheartbeat,
                worker_memoryused, worker_cores, updated_at, worker_memory,
                spark_id)
            execute_spark_sql(metrics_sql)
    except Exception as e:
        log.error(e)

    if cur is not None and conn is not None:
        cur.close()
        conn.close()
예제 #17
0
def get_spark_db_info():
    cur = conn = None
    try:
        conn = get_postgres_connection()
        cur = conn.cursor()
        sql = "select ip, cluster_id, web_port from spark_spark where type=1 and state=1"
        cur.execute(sql)
        row = cur.fetchone()
        return row
    except Exception as e:
        log.error(e)

    if cur is not None and conn is not None:
        cur.close()
        conn.close()
예제 #18
0
def yarn_jmx_update(r):
    cur = conn = None
    try:
        r = r.strip().split('\t')
        r = r[:-1]
        host = r[0].strip().split(':')[0]
        ip = get_ip(host)
        ipc_port = r[0].strip().split(':')[1]
        status = r[1].strip()
        web_port = r[2].strip().split(':')[1]

        if ipc_port == rpyc_port:
            node_info = get_yarn_info(host, rpyc_port)
            node_info = ast.literal_eval(node_info)
            insert_update_yarn(ip, node_info, rpyc_port, web_port)
        else:
            conn = get_postgres_connection()
            cur = conn.cursor()
            sql = "select id from yarn_yarn where ip='%s' and cluster_id=%d and type=0" % (
                ip, cluster_id)
            cur.execute(sql)
            row = cur.fetchone()
            if row is None:
                sql = """INSERT INTO yarn_yarn (ip, type, status, state, web_port, rpyc_port,
                                    cluster_id, updated_at) VALUES
                                    ('{0}', {1}, '{2}', {3}, {4}, {5}, {6}, {7}) RETURNING id;""".format(
                    ip, 0, status, 0, web_port, ipc_port, cluster_id,
                    updated_at)
            else:
                # Updating yarn_yarn table with yarn jmx values.
                id = row[0]

                sql = """UPDATE yarn_yarn set type={0}, status='{1}', state={2}, updated_at={3} where id={4} RETURNING id;""" \
                    .format(0, status, 0, updated_at, id)

            yarn_id = execute_yarn_sql(sql)

            metrics_sql = """INSERT INTO yarn_metrics (cpu_capacity, cpu_used, last_health_update, memory_capacity, memory_used, 
                    rack, updated_at, node_id) VALUES({0}, {1}, '{2}', {3}, {4}, '{5}', {6}, {7}) RETURNING id;""".format(
                0, 0, 0, 0, 0, 0, updated_at, yarn_id)
            execute_yarn_sql(metrics_sql)
    except Exception as e:
        log.error(e)

    if cur is not None and conn is not None:
        cur.close()
        conn.close()
예제 #19
0
def execute_yarn_sql(sql):
    cur = conn = None
    id = None
    try:
        conn = get_postgres_connection()
        cur = conn.cursor()
        cur.execute(sql)
        conn.commit()
        id = (cur.fetchone())[0]
    except Exception as e:
        log.error(e)

    if cur is not None and conn is not None:
        cur.close()
        conn.close()

    return id
예제 #20
0
def get_apiKey():
    encrypted_api_key = None
    try:
        conn = get_postgres_connection()
        cur = conn.cursor()
        sql = "select key from security_api_key"
        cur.execute(sql)
        row = cur.fetchone()
        key = ''.join(row)
        key = key.strip()

        encrypted_api_key = hashlib.md5(key.encode()).hexdigest()

        cur.close()
    except Exception as e:
        log.error(e)
    return encrypted_api_key
예제 #21
0
def hdfs_liveserver_update(livenode):
    cur = conn = None
    try:
        gb_value = 1073741824

        xferaddr = json_livenodes[livenode]['xferaddr']
        ip = xferaddr.split(":")[0]

        capacity = json_livenodes[livenode]['capacity']
        capacity = float(capacity / gb_value)

        nondfsusedspace = json_livenodes[livenode]['nonDfsUsedSpace']
        nondfsusedspace = float(nondfsusedspace / gb_value)

        numblocks = json_livenodes[livenode]['numBlocks']

        usedspace = json_livenodes[livenode]['usedSpace']
        usedspace = int(usedspace / gb_value)
        usedspace_oftotal = float(usedspace / capacity)

        conn = get_postgres_connection()
        cur = conn.cursor()
        sql = "select id from hdfs_hdfs where ip='%s' and type=0 and cluster_id='%s'" % (
            ip, cluster_id)
        cur.execute(sql)
        row = cur.fetchone()
        if row is None:
            sql = """INSERT INTO hdfs_hdfs (ip, type, status, state, cluster_id, updated_at, web_port, rpyc_port, safemode) VALUES('{0}', {1}, '{2}', {3}, {4}, {5}, {6}, {7}, {8}) RETURNING id;""" \
                .format(ip, 0, "RUNNING", 0, cluster_id, updated_at, 0, 0, 0)
        else:
            id = row[0]
            sql = """UPDATE hdfs_hdfs set type={0}, status='{1}', state={2}, updated_at={3} where id={4} RETURNING id;""" \
                .format(0, "RUNNING", 0, updated_at, id)

        node_id = execute_hdfs_sql(sql)
        metrics_sql = """INSERT INTO hdfs_metrics (capacity, non_dfs_used, num_blocks, used_space, decommissioned, 
        updated_at, node_id) VALUES({0}, {1}, {2}, {3}, {4}, {5}, {6}) RETURNING id;""".format(
            capacity, nondfsusedspace, numblocks, usedspace_oftotal, 0,
            updated_at, node_id)
        execute_hdfs_sql(metrics_sql)
    except Exception as e:
        log.error(e)

    if cur is not None and conn is not None:
        cur.close()
        conn.close()
예제 #22
0
def yarn_stop():
    log.info("\nStopping Yarn\n")
    header_key = request.headers.get('API-KEY')
    api_status = check_apiKey(header_key)
    if api_status == 'success':
        conn = get_postgres_connection()
        cur = conn.cursor()
        sql = "select status from yarn_yarn where type=1"
        cur.execute(sql)
        rows = cur.fetchall()
        cur.close()
        conn.close()
        status_list = [' '.join(item) for item in rows]
        if "RUNNING" in status_list:
            result = run_services.run_bigdata_services(
                yarn_stop_cmd, 'Stopped Yarn Services',
                'Error Stopping Yarn Services')
            if json.loads(result)["success"]:
                update_rm_info()
                return result
        else:
            return '{"success": 1}'
    else:
        return api_status
예제 #23
0
def get_hdfs():
    while True:
        global updated_at
        updated_at = int(str(time.time()).split(".")[0])

        # Updating Namenode Info
        update_namenode_info()

        beans = None
        try:
            url = "https://%s:%s/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo" % (
                master_ip, master_web_port)
            r = requests.get(url, verify=False)
            response_json = r.json()
            beans = response_json['beans']
            r.close()
        except Exception as e:
            log.error(e)

        try:
            if beans:
                for bean in beans:
                    safemode = bean['Safemode']
                    if safemode != "":
                        sql = """UPDATE hdfs_hdfs set safemode={0}, updated_at={1} WHERE id={2} RETURNING id;""". \
                            format(1, updated_at, master_id)
                        execute_hdfs_sql(sql)
                    else:
                        sql = """UPDATE hdfs_hdfs set safemode={0}, updated_at={1} WHERE id={2} RETURNING id;""". \
                            format(0, updated_at, master_id)
                        execute_hdfs_sql(sql)

                    decommissioning_nodes = bean['DecomNodes']
                    json_decomnodes = json.loads(decommissioning_nodes)
                    if json_decomnodes:
                        xferaddr = json_decomnodes[decommissioning_nodes][
                            'xferaddr']
                        ip = xferaddr.split(":")[0]

                        conn = get_postgres_connection()
                        cur = conn.cursor()
                        sql = "select id from hdfs_metrics where ip='%s' and type=0 and cluster_id='%s'" % (
                            ip, cluster_id)
                        cur.execute(sql)
                        row = cur.fetchone()
                        if row is None:
                            sql = """INSERT INTO hdfs_hdfs (ip, type, status, state, cluster_id, updated_at, web_port, rpyc_port, safemode) VALUES({0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}) RETURNING id;""" \
                                .format(ip, 0, "SHUTDOWN", 0, cluster_id, updated_at, 0, 0, 0)
                            hdfs_id = execute_hdfs_sql(sql)
                        else:
                            hdfs_id = row[0]

                        metrics_sql = """INSERT INTO hdfs_metrics (decommissioned, updated_at, node_id) VALUES({0}, {1}, {2}) RETURNING id;""" \
                            .format(1, updated_at, hdfs_id)
                        execute_hdfs_sql(metrics_sql)
                        cur.close()
                        conn.close()

                    livenodes = bean['LiveNodes']
                    global json_livenodes
                    json_livenodes = json.loads(livenodes)

                    if json_livenodes:
                        no_of_livenodes_threadpool = int(
                            len(json_livenodes) / 10) + 1

                        pool = ThreadPool(no_of_livenodes_threadpool)
                        pool.map(hdfs_liveserver_update, json_livenodes, 10)
                        pool.close()

                    deadnodes = bean['DeadNodes']
                    global json_deadnodes
                    json_deadnodes = json.loads(deadnodes)
                    if json_deadnodes:
                        no_of_deadnode_threadpool = int(
                            len(json_deadnodes) / 10) + 1

                        pool = ThreadPool(no_of_deadnode_threadpool)
                        pool.map(hdfs_deadnode_update, json_deadnodes, 10)
                        pool.close()
        except Exception as e:
            log.error(e)

        time.sleep(10)
예제 #24
0
def es_jmx_update(a):
    roles = response_json[a]["roles"]
    cpu_percent = response_json[a]["os"]["cpu"]["percent"]
    ip = response_json[a]["ip"].split(":")[0]

    total_mem = response_json[a]["os"]["mem"]["total_in_bytes"] / (1024 * 1024)
    free_mem = response_json[a]["os"]["mem"]["free_percent"]
    used_mem = response_json[a]["os"]["mem"]["used_percent"]

    swap_total_mem = response_json[a]["os"]["swap"]["total_in_bytes"] / (1024 *
                                                                         1024)
    swap_free_mem = response_json[a]["os"]["swap"]["free_in_bytes"] / (1024 *
                                                                       1024)
    swap_used_mem = response_json[a]["os"]["swap"]["used_in_bytes"] / (1024 *
                                                                       1024)

    conn = get_postgres_connection()
    cur = conn.cursor()

    if 'master' in roles:
        if master == ip:
            # master_state = "active"
            master_state = 1
        else:
            # master_state = "inactive"
            master_state = 0

        sql = "select id from elastic_search_elastic_search where ip='{0}' and type=1 and cluster_id={1}".format(
            ip, cluster_id)
        cur.execute(sql)
        row = cur.fetchone()

        if row is None:
            sql = """INSERT INTO elastic_search_elastic_search (ip, type, status, web_port, rpyc_port, state, cluster_id, updated_at) 
        VALUES ('{0}', {1}, '{2}', {3}, {4}, {5}, {6}, {7}) RETURNING id;""".format(
                ip, 1, "RUNNING", master_web_port, master_web_port,
                master_state, cluster_id, updated_at)
        else:
            id = row[0]
            sql = """UPDATE elastic_search_elastic_search set status='{0}', state={1}, updated_at={2} where id={3} RETURNING id;""" \
                .format("RUNNING", master_state, updated_at, id)

        execute_es_sql(sql)
    else:
        sql = "select id from elastic_search_elastic_search where ip='{0}' and type=0 and cluster_id={1}".format(
            ip, cluster_id)
        cur.execute(sql)
        row = cur.fetchone()
        if row is None:
            sql = """INSERT INTO elastic_search_elastic_search (ip, type, status, web_port, rpyc_port, state, cluster_id, updated_at) 
    VALUES ('{0}', {1}, '{2}', {3}, {4}, {5}, {6}, {7}) RETURNING id;""".format(
                ip, 0, "RUNNING", 0, 0, 0, cluster_id, updated_at)
        else:
            id = row[0]
            sql = """UPDATE elastic_search_elastic_search set status='{0}', state={1}, updated_at={2} where id={3} RETURNING id;""" \
                .format("RUNNING", 0, updated_at, id)

        es_id = execute_es_sql(sql)
        metrics_sql = """INSERT INTO elastic_search_metrics (cpu_percent, total_memory, free_memory, used_memory, swap_total_memory, 
    swap_used_memory, swap_free_memory, updated_at, node_id) VALUES ({0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}) RETURNING id;""".format(
            cpu_percent, total_mem, free_mem, used_mem, swap_total_mem,
            swap_used_mem, swap_free_mem, updated_at, es_id)
        execute_es_sql(metrics_sql)
        cur.close()
        conn.close()
예제 #25
0
def hbase_liveserver_update(value):
    cur = conn = None
    try:
        data = value.split("=")

        if len(data) == 22:
            live_region_data = {
                data[1].split(", ")[1]: data[2].split(", ")[0],
                data[2].split(", ")[1]: data[3].split(", ")[0],
                data[3].split(", ")[1]: data[4].split(", ")[0],
                data[4].split(", ")[1]: data[5].split(", ")[0],
                data[5].split(", ")[1]: data[6].split(", ")[0],
                data[7].split(", ")[1]: data[8].split(", ")[0],
                data[9].split(", ")[1]: data[10].split(", ")[0],
                data[11].split(", ")[1]: data[12].split(", ")[0],
                data[12].split(", ")[1]: data[13].split(", ")[0],
                data[20].split(", ")[-1]: data[21],
            }
        else:
            live_region_data = {
                data[1].split(", ")[1]: data[2].split(", ")[0],
                data[2].split(", ")[1]: data[3].split(", ")[0],
                data[3].split(", ")[1]: data[4].split(", ")[0],
                data[4].split(", ")[1]: data[5].split(", ")[0],
                data[5].split(", ")[1]: data[6].split(", ")[0],
                data[7].split(", ")[1]: data[8].split(", ")[0],
                data[8].split(", ")[1]: data[9].split(", ")[0],
                data[10].split(", ")[1]: data[11].split(", ")[0],
                data[11].split(", ")[1]: data[12].split(", ")[0],
                data[19].split(", ")[-1]: data[20]
            }

        ip = (live_region_data.get("ip")).split(":")[0]
        rpyc_port = int((live_region_data.get("ip")).split(":")[1])
        web_port = 0
        status = "RUNNING"
        state = 0
        type = 0
        maxHeapMB = float(live_region_data.get("maxHeapMB"))
        memstoreSizeMB = float(live_region_data.get("memstoreSizeMB"))
        numberOfOnlineRegions = float(
            live_region_data.get("numberOfOnlineRegions"))
        readRequestsCount = float(live_region_data.get("readRequestsCount"))
        numberOfStorefiles = float(live_region_data.get("numberOfStorefiles"))
        numberOfStores = float(live_region_data.get("numberOfStores"))
        usedHeapMB = float(live_region_data.get("usedHeapMB"))
        writeRequestsCount = float(live_region_data.get("writeRequestsCount"))
        storefileSizeMB = float(live_region_data.get("storefileSizeMB"))

        conn = get_postgres_connection()
        cur = conn.cursor()
        sql = "select id from hbase_hbase where ip='{0}' and cluster_id={1} and type=0".format(
            ip, cluster_id)
        cur.execute(sql)
        row = cur.fetchone()
        if row is None:
            # Inserting hbase_hbase table with hbase jmx values.

            sql = """INSERT INTO hbase_hbase (ip, type, status, state, web_port, rpyc_port, cluster_id, updated_at) VALUES
                    ('{0}', {1}, '{2}', {3}, {4}, {5}, {6}, {7}) RETURNING id;""".format(
                ip, type, status, state, web_port, rpyc_port, cluster_id,
                updated_at)
        else:
            # Updating hbase_hbase table with hbase jmx values.
            id = row[0]

            sql = """UPDATE hbase_hbase set status='{0}', updated_at={1} where id={2} RETURNING id;""".format(
                status, updated_at, id)
        hbase_id = execute_hbase_sql(sql)
        metrics_sql = """INSERT INTO hbase_metrics (max_heap, mem_store_size, online_region, read_request, no_store_files, 
    no_stores, updated_at, used_heap, write_request, store_file_size, node_id) VALUES
                        ('{0}', {1}, '{2}', {3}, {4}, {5}, {6}, {7}, {8}, {9}, {10}) RETURNING id;""".format(
            maxHeapMB, memstoreSizeMB, numberOfOnlineRegions,
            readRequestsCount, numberOfStorefiles, numberOfStores, updated_at,
            usedHeapMB, writeRequestsCount, storefileSizeMB, hbase_id)
        execute_hbase_sql(metrics_sql)
    except Exception as e:
        log.error(e)

    if cur is not None and conn is not None:
        cur.close()
        conn.close()
예제 #26
0
    else:
        return True


if __name__ == '__main__':
    '''
    Will run flask app from the main module at localhost with port 11605
    '''
    try:
        service_thread = threading.Thread(target=activate_job)
        service_thread.start()

        port = 11605
        while check_running_port(port):
            port = port + 1

        update_port_sql = """UPDATE administer_nodes set port={0} where ip='{1}' RETURNING id;""" \
                .format(port, system_ip)

        conn = get_postgres_connection()
        cur = conn.cursor()
        cur.execute(update_port_sql)
        conn.commit()
        cur.close()
        conn.close()

        custom_app.run(host='0.0.0.0', port=port)
        # app.run(debug=True, host='0.0.0.0', port=11605)
    except Exception as e:
        log.error(e)