def stop_all_hbase(): header_key = request.headers.get('API-KEY') api_status = check_apiKey(header_key) if api_status == 'success': conn = get_postgres_connection() cur = conn.cursor() sql = "select status from hbase_hbase where type=1" cur.execute(sql) rows = cur.fetchall() status_list = [' '.join(item) for item in rows] if "RUNNING" in status_list: result = run_services.run_bigdata_services( hbase_stop_cmd, 'HBase Service Stopped', 'Error Stopping HBase Services') update_hbase_master() if json.loads(result)["success"]: cur.execute(sql) rows = cur.fetchall() status_list = [' '.join(item) for item in rows] if "RUNNING" in status_list: cur.close() conn.close() return '{"success": 2, "msg": ["HBase Service not Stopped!!!"]}' else: cur.close() conn.close() return result else: cur.close() conn.close() return result else: return '{"success": 1}' else: return api_status
def dfs_stop(): ''' Stops dfs services (namenode, datanode, zkfc, journalnode) :return: success if dfs_stop_cmd runs successfully else error message is shown ''' log.info("\nStopping DFS Services\n") header_key = request.headers.get('API-KEY') api_status = check_apiKey(header_key) if api_status == 'success': conn = get_postgres_connection() cur = conn.cursor() sql = "select status from hdfs_hdfs where type=1" cur.execute(sql) rows = cur.fetchall() cur.close() conn.close() status_list = [' '.join(item) for item in rows] if "RUNNING" in status_list: result = run_services.run_bigdata_services(dfs_stop_cmd, 'Stopped DFS Services', 'Error Stopping DFS Services') if json.loads(result)["success"]: update_namenode_info() return result else: update_namenode_info() return '{"success": 1}' else: return api_status
def stop_spark(): ''' Stops spark :return: Returns success if spark_stop command executes successfully or error message is shown ''' log.info("\nStopping spark\n") header_key = request.headers.get('API-KEY') api_status = check_apiKey(header_key) if api_status == 'success': conn = get_postgres_connection() cur = conn.cursor() sql = "select status from spark_spark where type=1" cur.execute(sql) rows = cur.fetchall() status_list = [' '.join(item) for item in rows] if "RUNNING" in status_list: result = run_services.run_bigdata_services(spark_stop_cmd, 'spark Stopped', 'Error Stopping spark') if json.loads(result)["success"]: sql = """UPDATE spark_spark set status='SHUTDOWN' where type=1 and state=1;""" cur.execute(sql) conn.commit() cur.close() conn.close() return result else: cur.close() conn.close() return '{"success": 1}' else: return api_status
def dn_stop(cluster_id): ip = get_system_ip() sql = "select id, status, web_port, rpyc_port from hdfs_hdfs where ip='%s' and type=0 and cluster_id=%d" % (ip, cluster_id) rs = run_services.get_service_status(sql) id = rs[0] status = rs[1] web_port = rs[2] rpyc_port = rs[3] if status == "RUNNING": result = run_services.run_bigdata_services(datanode_stop_cmd, 'Datanode Stopped', 'Error Stopping Datanode') return run_services.confirm_stop(result, "hdfs_hdfs", id, web_port, rpyc_port) else: return '{"success": 1, "msg": ["Datanode Already Stopped!!!"]}'
def dfs_start(): ''' Starts dfs services (namenode, datanode, zkfc, journalnode) :return: success if dfs_start_cmd runs successfully else error message is shown ''' log.info("\nStarting DFS Services\n") header_key = request.headers.get('API-KEY') api_status = check_apiKey(header_key) if api_status == 'success': result = run_services.run_bigdata_services(dfs_start_cmd, 'Started DFS Service', 'Error Starting DFS Services') update_namenode_info() return result else: return api_status
def nn_start(cluster_id): ip = get_system_ip() sql = "select id, status, web_port, rpyc_port from hdfs_hdfs where ip='%s' and type=1 and cluster_id=%d" % (ip, cluster_id) rs = run_services.get_service_status(sql) id = rs[0] status = rs[1] web_port = rs[2] rpyc_port = rs[3] if status != "RUNNING": result = run_services.run_bigdata_services(namenode_start_cmd, 'Namenode Started', 'Error Starting Namenode') update_namenode_info() return run_services.confirm_start(result, "hdfs_hdfs", id, web_port, rpyc_port) else: return '{"success": 1, "msg": ["Namenode Already Running!!!"]}'
def es_start(cluster_id): ip = get_system_ip() sql = "select id, status, web_port, rpyc_port from elastic_search_elastic_search where ip='%s' and cluster_id=%d" % (ip, cluster_id) rs = run_services.get_service_status(sql) id = rs[0] status = rs[1] web_port = rs[2] rpyc_port = rs[3] if status != "RUNNING": es_process_name = get_es_process_name() es_start_cmd = es_start_systemd_cmd if es_process_name == "systemd" else es_start_sysvinit_cmd if es_process_name == "sysv_init" else es_start_bin_cmd result = run_services.run_bigdata_services(es_start_cmd, 'ES Started', 'Error Starting ES') return run_services.confirm_start(result, "elastic_search_elastic_search", id, web_port, rpyc_port) else: return '{"success": 1, "msg": ["ES Already Running!!!"]}'
def yarn_start(): ''' Starts yarn services (nodemanager, resourcemanager) :return: Returns success if yarn_start_cmd command executes successfully or error message is shown ''' log.info("\nStarting Yarn\n") header_key = request.headers.get('API-KEY') api_status = check_apiKey(header_key) if api_status == 'success': result = run_services.run_bigdata_services( yarn_start_cmd, 'Started Yarn Services', 'Error Starting Yarn Services') update_rm_info() return result else: return api_status
def nm_stop(cluster_id): ip = get_system_ip() sql = "select id, status, web_port, rpyc_port from yarn_yarn where ip='%s' and type=0 and cluster_id=%d" % ( ip, cluster_id) rs = run_services.get_service_status(sql) id = rs[0] status = rs[1] web_port = rs[2] rpyc_port = rs[3] if status == "RUNNING": result = run_services.run_bigdata_services( nm_stop_cmd, 'Node manager Stopped', 'Error Stopping Node manager') update_rm_info() return run_services.confirm_stop(result, "yarn_yarn", id, web_port, rpyc_port) else: return '{"success": 1, "msg": ["Node Manager Already Stopped!!!"]}'
def sps_stop(cluster_id): ip = get_system_ip() sql = "select id, status, web_port, rpyc_port from spark_spark where ip='%s' and type=0 and cluster_id=%d" % ( ip, cluster_id) rs = run_services.get_service_status(sql) id = rs[0] status = rs[1] web_port = rs[2] rpyc_port = rs[3] if status == "RUNNING": result = run_services.run_bigdata_services( stop_spark_slave_cmd, 'spark slave stopped', 'Error Stopping Spark Slave') return run_services.confirm_stop(result, "spark_spark", id, web_port, rpyc_port) else: return '{"success": 1, "msg": ["Spark Slave Already Stopped!!!"]}'
def spm_start(cluster_id): ip = get_system_ip() sql = "select id, status, web_port, rpyc_port from spark_spark where ip='%s' and type=1 and cluster_id=%d" % ( ip, cluster_id) rs = run_services.get_service_status(sql) id = rs[0] status = rs[1] web_port = rs[2] rpyc_port = rs[3] if status != "RUNNING": result = run_services.run_bigdata_services( start_spark_master_cmd, 'spark master started', 'Error Starting Spark Master') return run_services.confirm_start(result, "spark_spark", id, web_port, rpyc_port) else: return '{"success": 1, "msg": ["Spark Master Already Running!!!"]}'
def master_stop(cluster_id): ip = get_system_ip() sql = "select id, status, web_port, rpyc_port from hbase_hbase where ip='%s' and type=1 and cluster_id=%d" % ( ip, cluster_id) rs = run_services.get_service_status(sql) id = rs[0] status = rs[1] web_port = rs[2] rpyc_port = rs[3] if status == "RUNNING": result = run_services.run_bigdata_services( hmaster_stop_cmd, 'HMaster Stopped', 'Error Stopping HBase Master') update_hbase_master() return run_services.confirm_stop(result, "hbase_hbase", id, web_port, rpyc_port) else: return '{"success": 1, "msg": ["HMaster Already Stopped!!!"]}'
def rm_start(cluster_id): ip = get_system_ip() sql = "select id, status, web_port, rpyc_port from yarn_yarn where ip='%s' and type=1 and cluster_id=%d" % ( ip, cluster_id) rs = run_services.get_service_status(sql) id = rs[0] status = rs[1] web_port = rs[2] rpyc_port = rs[3] if status != "RUNNING": result = run_services.run_bigdata_services( rm_start_cmd, 'Resource manager Started', 'Error Starting Resource manager') update_rm_info() return run_services.confirm_start(result, "yarn_yarn", id, web_port, rpyc_port) else: return '{"success": 1, "msg": ["Resource Manager Already Running!!!"]}'
def regionserver_start(cluster_id): ip = get_system_ip() sql = "select id, status, web_port, rpyc_port from hbase_hbase where ip='%s' and type=0 and cluster_id=%d" % ( ip, cluster_id) rs = run_services.get_service_status(sql) id = rs[0] status = rs[1] web_port = rs[2] rpyc_port = rs[3] if status != "RUNNING": result = run_services.run_bigdata_services( hregionserver_start_cmd, 'Regionserver Started', 'Error Starting Regionserver') update_hbase_master() return run_services.confirm_start(result, "hbase_hbase", id, web_port, rpyc_port) else: return '{"success": 1, "msg": ["Regionserver Already Running!!!"]}'
def hbase_start(): ''' Starts hbase services (hbase master, hbase regionserver) :return: success if hbase_start_cmd runs successfully else error message is shown ''' log.info("\nStarting HBase\n") header_key = request.headers.get('API-KEY') api_status = check_apiKey(header_key) if api_status == 'success': result = run_services.run_bigdata_services( hbase_start_cmd, 'HBase Services Started', 'Error Starting HBase Services') up_status = update_hbase_master() if up_status is not None: if up_status["success"] == 0: return json.dumps(up_status) return result else: return api_status
def yarn_stop(): log.info("\nStopping Yarn\n") header_key = request.headers.get('API-KEY') api_status = check_apiKey(header_key) if api_status == 'success': conn = get_postgres_connection() cur = conn.cursor() sql = "select status from yarn_yarn where type=1" cur.execute(sql) rows = cur.fetchall() cur.close() conn.close() status_list = [' '.join(item) for item in rows] if "RUNNING" in status_list: result = run_services.run_bigdata_services( yarn_stop_cmd, 'Stopped Yarn Services', 'Error Stopping Yarn Services') if json.loads(result)["success"]: update_rm_info() return result else: return '{"success": 1}' else: return api_status