Esempio n. 1
0
def dn_stop(cluster_id):
    ip = get_system_ip()
    sql = "select id, status, web_port, rpyc_port from hdfs_hdfs where ip='%s' and type=0 and cluster_id=%d" % (ip, cluster_id)
    rs = run_services.get_service_status(sql)
    id = rs[0]
    status = rs[1]
    web_port = rs[2]
    rpyc_port = rs[3]
    if status == "RUNNING":
        result = run_services.run_bigdata_services(datanode_stop_cmd, 'Datanode Stopped', 'Error Stopping Datanode')
        return run_services.confirm_stop(result, "hdfs_hdfs", id, web_port, rpyc_port)
    else:
        return '{"success": 1, "msg": ["Datanode Already Stopped!!!"]}'
Esempio n. 2
0
def nn_start(cluster_id):
    ip = get_system_ip()
    sql = "select id, status, web_port, rpyc_port from hdfs_hdfs where ip='%s' and type=1 and cluster_id=%d" % (ip, cluster_id)
    rs = run_services.get_service_status(sql)
    id = rs[0]
    status = rs[1]
    web_port = rs[2]
    rpyc_port = rs[3]
    if status != "RUNNING":
        result = run_services.run_bigdata_services(namenode_start_cmd, 'Namenode Started', 'Error Starting Namenode')
        update_namenode_info()
        return run_services.confirm_start(result, "hdfs_hdfs", id, web_port, rpyc_port)
    else:
        return '{"success": 1, "msg": ["Namenode Already Running!!!"]}'
Esempio n. 3
0
def es_start(cluster_id):
    ip = get_system_ip()
    sql = "select id, status, web_port, rpyc_port from elastic_search_elastic_search where ip='%s' and cluster_id=%d" % (ip, cluster_id)
    rs = run_services.get_service_status(sql)
    id = rs[0]
    status = rs[1]
    web_port = rs[2]
    rpyc_port = rs[3]
    if status != "RUNNING":
        es_process_name = get_es_process_name()
        es_start_cmd = es_start_systemd_cmd if es_process_name == "systemd" else es_start_sysvinit_cmd if es_process_name == "sysv_init" else es_start_bin_cmd
        result = run_services.run_bigdata_services(es_start_cmd, 'ES Started',
                                                   'Error Starting ES')
        return run_services.confirm_start(result, "elastic_search_elastic_search", id, web_port, rpyc_port)
    else:
        return '{"success": 1, "msg": ["ES Already Running!!!"]}'
def nm_stop(cluster_id):
    ip = get_system_ip()
    sql = "select id, status, web_port, rpyc_port from yarn_yarn where ip='%s' and type=0 and cluster_id=%d" % (
        ip, cluster_id)
    rs = run_services.get_service_status(sql)
    id = rs[0]
    status = rs[1]
    web_port = rs[2]
    rpyc_port = rs[3]
    if status == "RUNNING":
        result = run_services.run_bigdata_services(
            nm_stop_cmd, 'Node manager Stopped', 'Error Stopping Node manager')
        update_rm_info()
        return run_services.confirm_stop(result, "yarn_yarn", id, web_port,
                                         rpyc_port)
    else:
        return '{"success": 1, "msg": ["Node Manager Already Stopped!!!"]}'
def sps_stop(cluster_id):
    ip = get_system_ip()
    sql = "select id, status, web_port, rpyc_port from spark_spark where ip='%s' and type=0 and cluster_id=%d" % (
        ip, cluster_id)
    rs = run_services.get_service_status(sql)
    id = rs[0]
    status = rs[1]
    web_port = rs[2]
    rpyc_port = rs[3]
    if status == "RUNNING":
        result = run_services.run_bigdata_services(
            stop_spark_slave_cmd, 'spark slave stopped',
            'Error Stopping Spark Slave')
        return run_services.confirm_stop(result, "spark_spark", id, web_port,
                                         rpyc_port)
    else:
        return '{"success": 1, "msg": ["Spark Slave Already Stopped!!!"]}'
def spm_start(cluster_id):
    ip = get_system_ip()
    sql = "select id, status, web_port, rpyc_port from spark_spark where ip='%s' and type=1 and cluster_id=%d" % (
        ip, cluster_id)
    rs = run_services.get_service_status(sql)
    id = rs[0]
    status = rs[1]
    web_port = rs[2]
    rpyc_port = rs[3]
    if status != "RUNNING":
        result = run_services.run_bigdata_services(
            start_spark_master_cmd, 'spark master started',
            'Error Starting Spark Master')
        return run_services.confirm_start(result, "spark_spark", id, web_port,
                                          rpyc_port)
    else:
        return '{"success": 1, "msg": ["Spark Master Already Running!!!"]}'
Esempio n. 7
0
def master_stop(cluster_id):
    ip = get_system_ip()
    sql = "select id, status, web_port, rpyc_port from hbase_hbase where ip='%s' and type=1 and cluster_id=%d" % (
        ip, cluster_id)
    rs = run_services.get_service_status(sql)
    id = rs[0]
    status = rs[1]
    web_port = rs[2]
    rpyc_port = rs[3]
    if status == "RUNNING":
        result = run_services.run_bigdata_services(
            hmaster_stop_cmd, 'HMaster Stopped', 'Error Stopping HBase Master')
        update_hbase_master()
        return run_services.confirm_stop(result, "hbase_hbase", id, web_port,
                                         rpyc_port)
    else:
        return '{"success": 1, "msg": ["HMaster Already Stopped!!!"]}'
def rm_start(cluster_id):
    ip = get_system_ip()
    sql = "select id, status, web_port, rpyc_port from yarn_yarn where ip='%s' and type=1 and cluster_id=%d" % (
        ip, cluster_id)
    rs = run_services.get_service_status(sql)
    id = rs[0]
    status = rs[1]
    web_port = rs[2]
    rpyc_port = rs[3]
    if status != "RUNNING":
        result = run_services.run_bigdata_services(
            rm_start_cmd, 'Resource manager Started',
            'Error Starting Resource manager')
        update_rm_info()
        return run_services.confirm_start(result, "yarn_yarn", id, web_port,
                                          rpyc_port)
    else:
        return '{"success": 1, "msg": ["Resource Manager Already Running!!!"]}'
Esempio n. 9
0
def regionserver_start(cluster_id):
    ip = get_system_ip()
    sql = "select id, status, web_port, rpyc_port from hbase_hbase where ip='%s' and type=0 and cluster_id=%d" % (
        ip, cluster_id)
    rs = run_services.get_service_status(sql)
    id = rs[0]
    status = rs[1]
    web_port = rs[2]
    rpyc_port = rs[3]
    if status != "RUNNING":
        result = run_services.run_bigdata_services(
            hregionserver_start_cmd, 'Regionserver Started',
            'Error Starting Regionserver')
        update_hbase_master()
        return run_services.confirm_start(result, "hbase_hbase", id, web_port,
                                          rpyc_port)
    else:
        return '{"success": 1, "msg": ["Regionserver Already Running!!!"]}'