def dn_stop(cluster_id): ip = get_system_ip() sql = "select id, status, web_port, rpyc_port from hdfs_hdfs where ip='%s' and type=0 and cluster_id=%d" % (ip, cluster_id) rs = run_services.get_service_status(sql) id = rs[0] status = rs[1] web_port = rs[2] rpyc_port = rs[3] if status == "RUNNING": result = run_services.run_bigdata_services(datanode_stop_cmd, 'Datanode Stopped', 'Error Stopping Datanode') return run_services.confirm_stop(result, "hdfs_hdfs", id, web_port, rpyc_port) else: return '{"success": 1, "msg": ["Datanode Already Stopped!!!"]}'
def sps_stop(cluster_id): ip = get_system_ip() sql = "select id, status, web_port, rpyc_port from spark_spark where ip='%s' and type=0 and cluster_id=%d" % ( ip, cluster_id) rs = run_services.get_service_status(sql) id = rs[0] status = rs[1] web_port = rs[2] rpyc_port = rs[3] if status == "RUNNING": result = run_services.run_bigdata_services( stop_spark_slave_cmd, 'spark slave stopped', 'Error Stopping Spark Slave') return run_services.confirm_stop(result, "spark_spark", id, web_port, rpyc_port) else: return '{"success": 1, "msg": ["Spark Slave Already Stopped!!!"]}'
def nm_stop(cluster_id): ip = get_system_ip() sql = "select id, status, web_port, rpyc_port from yarn_yarn where ip='%s' and type=0 and cluster_id=%d" % ( ip, cluster_id) rs = run_services.get_service_status(sql) id = rs[0] status = rs[1] web_port = rs[2] rpyc_port = rs[3] if status == "RUNNING": result = run_services.run_bigdata_services( nm_stop_cmd, 'Node manager Stopped', 'Error Stopping Node manager') update_rm_info() return run_services.confirm_stop(result, "yarn_yarn", id, web_port, rpyc_port) else: return '{"success": 1, "msg": ["Node Manager Already Stopped!!!"]}'
def master_stop(cluster_id): ip = get_system_ip() sql = "select id, status, web_port, rpyc_port from hbase_hbase where ip='%s' and type=1 and cluster_id=%d" % ( ip, cluster_id) rs = run_services.get_service_status(sql) id = rs[0] status = rs[1] web_port = rs[2] rpyc_port = rs[3] if status == "RUNNING": result = run_services.run_bigdata_services( hmaster_stop_cmd, 'HMaster Stopped', 'Error Stopping HBase Master') update_hbase_master() return run_services.confirm_stop(result, "hbase_hbase", id, web_port, rpyc_port) else: return '{"success": 1, "msg": ["HMaster Already Stopped!!!"]}'
def es_stop(cluster_id): ip = get_system_ip() sql = "select id, status, web_port, rpyc_port from elastic_search_elastic_search where ip='%s' and cluster_id=%d" % (ip, cluster_id) rs = run_services.get_service_status(sql) id = rs[0] status = rs[1] web_port = rs[2] rpyc_port = rs[3] if status == "RUNNING": es_process_name = get_es_process_name() es_stop_cmd = es_stop_systemd_cmd if es_process_name == "systemd" else es_stop_sysvinit_cmd if es_process_name == "sysv_init" else "bin_stop_cmd" if es_stop_cmd == "bin_stop_cmd": result = kill_service("elasticsearch") else: result = run_services.run_bigdata_services(es_stop_cmd, 'ES Stopped', 'Error Stopping ES') return run_services.confirm_stop(result, "elastic_search_elastic_search", id, web_port, rpyc_port) else: return '{"success": 1, "msg": [" Already Stopped!!!"]}'