def get_kafka_log_dirs():
    ah = AmbariHelper()
    json_output = ah.query_url(
        "clusters/" + ah.cluster_name() +
        "/configurations/service_config_versions?service_name.in(KAFKA)&is_current=true"
    )
    kafka_brokers_configs = [
        element for element in json_output["items"][0]["configurations"]
        if element["type"] == KAFKA_BROKER
    ][0]
    return kafka_brokers_configs["properties"]["log.dirs"].split(',')
Example #2
0
    settings = cluster_manifest.settings
    if "cluster_topology_json_url" in settings:
        json_url = settings["cluster_topology_json_url"]
        r = requests.get(json_url)
        topology_info = r.text
        return topology_info
    else:
        raise_error("Failed to get cluster_topology_json_url from cluster manifest")

def parse_topo_info(cluster_topology_json, fqdn_suffix):
    print cluster_topology_json
    workernode_info = json.loads(cluster_topology_json)["hostGroups"]["workernode"]
    host_info = []
    for node in workernode_info:
        host = {
                UPDATE_DOMAIN: str(node[UPDATE_DOMAIN]),
                FQDN: node[FQDN]+"."+str(fqdn_suffix),
                RACK: "/rack"+str(node[UPDATE_DOMAIN])
        }
        host_info.append(host)
    return host_info

ambariHelper = AmbariHelper()
cluster_topology_json = get_cluster_topology_json(ambariHelper.get_cluster_manifest())
host_info = parse_topo_info(cluster_topology_json, ambariHelper.get_fqdn().split('.',1)[1])
cluster_name = ambariHelper.cluster_name()
for node in host_info:
    ambariHelper.request_url("clusters/"+cluster_name+"/hosts/"+str(node[FQDN]), "PUT", "{\"Hosts\":{\"rack_info\":\""+str(node[RACK])+"\"}}")
ambariHelper.restart_all_stale_services()

from hdinsight_common.AmbariHelper import AmbariHelper
import requests, json, re
import subprocess, sys
import pty

if(len(sys.argv) < 2):
    print "Error: Usage: restartdeadregionserverswithlocalhostissue.py <ssh-username>"
    sys.exit(1)

a=AmbariHelper()
result=a.query_url("clusters/"+a.cluster_name()+"/hosts")

for i in result['items']:
    if(i['Hosts']['host_name'].startswith("zk")):
        result_hbase_metrics=a.query_url("clusters/"+a.cluster_name()+"/hosts/"+i['Hosts']['host_name']+"/host_components/HBASE_MASTER")
        if(result_hbase_metrics['metrics']['hbase']['master']['IsActiveMaster'] == 'true'):
            url="http://"+i['Hosts']['host_name']+":16010/jmx?qry=Hadoop:service=HBase,name=Master,sub=Server"
            req=requests.get(url)
            res=req.json()
            deadregionservers = res['beans'][0]['tag.deadRegionServers'].split(';')

BASH_PATH="/bin/bash"
HBASE_DAEMON_PATH="/usr/hdp/current/hbase-regionserver/bin/hbase-daemon.sh"
HBASE_CONFIG_PATH="/usr/hdp/current/hbase-regionserver/conf"

COMMANDLOGTAIL="sudo tail -n100 /var/log/hbase/hbase-hbase-regionserver-wn*.log"
COMMANDSTOP="sudo -u hbase " + BASH_PATH + " " + HBASE_DAEMON_PATH + " --config " + HBASE_CONFIG_PATH + " stop regionserver"
COMMANDSTART="sudo -u hbase " + BASH_PATH + " " + HBASE_DAEMON_PATH + " --config " + HBASE_CONFIG_PATH + " start regionserver"
LOCALHOSTISSUESTRING = 'WARN  \[regionserver/localhost/127.0.0.1:16020\] regionserver.HRegionServer: error telling master we are up'
SSHUSER = sys.argv[1] 
for deadregionserver in filter(None,deadregionservers):
Example #4
0
        raise_error(
            "Failed to get cluster_topology_json_url from cluster manifest")


def parse_topo_info(cluster_topology_json, fqdn_suffix):
    print cluster_topology_json
    workernode_info = json.loads(
        cluster_topology_json)["hostGroups"]["workernode"]
    host_info = []
    for node in workernode_info:
        host = {
            UPDATE_DOMAIN: str(node[UPDATE_DOMAIN]),
            FQDN: node[FQDN] + "." + str(fqdn_suffix),
            RACK: "/rack" + str(node[UPDATE_DOMAIN])
        }
        host_info.append(host)
    return host_info


ambariHelper = AmbariHelper()
cluster_topology_json = get_cluster_topology_json(
    ambariHelper.get_cluster_manifest())
host_info = parse_topo_info(cluster_topology_json,
                            ambariHelper.get_fqdn().split('.', 1)[1])
cluster_name = ambariHelper.cluster_name()
for node in host_info:
    ambariHelper.request_url(
        "clusters/" + cluster_name + "/hosts/" + str(node[FQDN]), "PUT",
        "{\"Hosts\":{\"rack_info\":\"" + str(node[RACK]) + "\"}}")
ambariHelper.restart_all_stale_services()
def get_kafka_log_dirs():
    ah = AmbariHelper()
    json_output = ah.query_url("clusters/" + ah.cluster_name() + "/configurations/service_config_versions?service_name.in(KAFKA)&is_current=true")
    kafka_brokers_configs = [element for element in json_output["items"][0]["configurations"] if element["type"] == KAFKA_BROKER][0]
    return kafka_brokers_configs["properties"]["log.dirs"].split(',')